Error: Module 'gluonbook' has no attribute 'accuracy



I receive an error:
AttributeError: module ‘gluonbook’ has no attribute ‘accuracy’/

I am running the code in D2L (Kaggle Competition: Image Classification CIFAR-10).

import gluonbook as gb
from mxnet import autograd, gluon, init
from mxnet.gluon import data as gdata, loss as gloss, nn
import os
import pandas as pd
import shutil
import time

DIR='C:/Users/prati/OneDrive/aaFlyingVixen/Tutorials/AI Mxnet/data_mxnet'
# If you use the full data set downloaded for the Kaggle competition, change the demo variable to False.
demo = True
if demo:
    import zipfile
    for f in ['', '', '']:
        with zipfile.ZipFile(DIR+'/data/kaggle_cifar10/' + f, 'r') as z:

def read_label_file(data_dir, label_file, train_dir, valid_ratio):
    with open(os.path.join(data_dir, label_file), 'r') as f:
        # Skip the file header line (column name).
        lines = f.readlines()[1:]
        tokens = [l.rstrip().split(',') for l in lines]
        idx_label = dict(((int(idx), label) for idx, label in tokens))
    labels = set(idx_label.values())
    n_train_valid = len(os.listdir(os.path.join(data_dir, train_dir)))
    n_train = int(n_train_valid * (1 - valid_ratio))
    assert 0 < n_train < n_train_valid
    return n_train // len(labels), idx_label

def mkdir_if_not_exist(path):  # This function is saved in the gluonbook package for future use.
    if not os.path.exists(os.path.join(*path)):

def reorg_train_valid(data_dir, train_dir, input_dir, n_train_per_label,idx_label):
    label_count = {}
    for train_file in os.listdir(os.path.join(data_dir, train_dir)):
        idx = int(train_file.split('.')[0])
        label = idx_label[idx]
        mkdir_if_not_exist([data_dir, input_dir, 'train_valid', label])
        shutil.copy(os.path.join(data_dir, train_dir, train_file), os.path.join(data_dir, input_dir, 'train_valid', label))
        if label not in label_count or label_count[label] < n_train_per_label:
            mkdir_if_not_exist([data_dir, input_dir, 'train', label])
            shutil.copy(os.path.join(data_dir, train_dir, train_file), os.path.join(data_dir, input_dir, 'train', label))
            label_count[label] = label_count.get(label, 0) + 1
            mkdir_if_not_exist([data_dir, input_dir, 'valid', label])
            shutil.copy(os.path.join(data_dir, train_dir, train_file), os.path.join(data_dir, input_dir, 'valid', label))

def reorg_test(data_dir, test_dir, input_dir):
    mkdir_if_not_exist([data_dir, input_dir, 'test', 'unknown'])
    for test_file in os.listdir(os.path.join(data_dir, test_dir)):
        shutil.copy(os.path.join(data_dir, test_dir, test_file),
                    os.path.join(data_dir, input_dir, 'test', 'unknown'))

def reorg_cifar10_data(data_dir, label_file, train_dir, test_dir, input_dir, valid_ratio):
    n_train_per_label, idx_label = read_label_file(data_dir, label_file, train_dir, valid_ratio)
    reorg_train_valid(data_dir, train_dir, input_dir, n_train_per_label, idx_label)
    reorg_test(data_dir, test_dir, input_dir)

if demo:
    # Note: Here, we use small training sets and small testing sets and the batch size should be set smaller. When using the complete data set for the Kaggle competition,
    # the batch size can be set to a large integer.
    train_dir, test_dir, batch_size = 'train_tiny', 'test_tiny', 1
    train_dir, test_dir, batch_size = 'train', 'test', 128
data_dir, label_file = DIR+'/data/kaggle_cifar10', 'trainLabels.csv'
input_dir, valid_ratio = 'train_valid_test', 0.1
reorg_cifar10_data(data_dir, label_file, train_dir, test_dir, input_dir, valid_ratio)

transform_train =[
# Magnify the image to a square of 40 pixels in both height and width.,
# Randomly crop a square image of 40 pixels in both height and width to produce a small square of 0.64 to 1 times the area of the original image,
# and then shrink it to a square of 32 pixels in both height and width., scale=(0.64, 1.0),ratio=(1.0, 1.0)),,,
# Normalize each channel of the image.[0.4914, 0.4822, 0.4465],[0.2023, 0.1994, 0.2010])])

transform_test =[,[0.4914, 0.4822, 0.4465], [0.2023, 0.1994, 0.2010])])

# Read the original image file. Flag=1 indicates that the input image has three channels (color).
train_ds = os.path.join(data_dir, input_dir, 'train'), flag=1)
valid_ds = os.path.join(data_dir, input_dir, 'valid'), flag=1)
train_valid_ds = os.path.join(data_dir, input_dir, 'train_valid'), flag=1)
test_ds = os.path.join(data_dir, input_dir, 'test'), flag=1)

train_data = gdata.DataLoader(train_ds.transform_first(transform_train),batch_size, shuffle=True, last_batch='keep')
valid_data = gdata.DataLoader(valid_ds.transform_first(transform_test),batch_size, shuffle=True, last_batch='keep')
train_valid_data = gdata.DataLoader(train_valid_ds.transform_first(transform_train), batch_size, shuffle=True, last_batch='keep')
test_data = gdata.DataLoader(test_ds.transform_first(transform_test),batch_size, shuffle=False, last_batch='keep')

class Residual(nn.HybridBlock):
    def __init__(self, num_channels, use_1x1conv=False, strides=1, **kwargs):
        super(Residual, self).__init__(**kwargs)
        self.conv1 = nn.Conv2D(num_channels, kernel_size=3, padding=1,strides=strides)
        self.conv2 = nn.Conv2D(num_channels, kernel_size=3, padding=1)
        if use_1x1conv:
            self.conv3 = nn.Conv2D(num_channels, kernel_size=1,strides=strides)
            self.conv3 = None
        self.bn1 = nn.BatchNorm()
        self.bn2 = nn.BatchNorm()

    def hybrid_forward(self, F, X):
        Y = F.relu(self.bn1(self.conv1(X)))
        Y = self.bn2(self.conv2(Y))
        if self.conv3:
            X = self.conv3(X)
        return F.relu(Y + X)

def resnet18(num_classes):
    net = nn.HybridSequential()
    net.add(nn.Conv2D(64, kernel_size=3, strides=1, padding=1), nn.BatchNorm(), nn.Activation('relu'))

    def resnet_block(num_channels, num_residuals, first_block=False):
        blk = nn.HybridSequential()
        for i in range(num_residuals):
            if i == 0 and not first_block:
                blk.add(Residual(num_channels, use_1x1conv=True, strides=2))
        return blk

    net.add(resnet_block(64, 2, first_block=True), resnet_block(128, 2), resnet_block(256, 2), resnet_block(512, 2))
    net.add(nn.GlobalAvgPool2D(), nn.Dense(num_classes))
    return net

def get_net(ctx):
    num_classes = 10
    net = resnet18(num_classes)
    net.initialize(ctx=ctx, init=init.Xavier())
    return net

loss = gloss.SoftmaxCrossEntropyLoss()

def train(net, train_data, valid_data, num_epochs, lr, wd, ctx, lr_period, lr_decay):
    trainer = gluon.Trainer(net.collect_params(), 'sgd', {'learning_rate': lr, 'momentum': 0.9, 'wd': wd})
    for epoch in range(num_epochs):
        train_l, train_acc, start = 0.0, 0.0, time.time()
        if epoch > 0 and epoch % lr_period == 0:
            trainer.set_learning_rate(trainer.learning_rate * lr_decay)
        for X, y in train_data:
            y = y.astype('float32').as_in_context(ctx)
            with autograd.record():
                y_hat = net(X.as_in_context(ctx))
                l = loss(y_hat, y)
            train_l += l.mean().asscalar()
            train_acc += gb.accuracy(y_hat, y)
        time_s = "time %.2f sec" % (time.time() - start)
        if valid_data is not None:
            valid_acc = gb.evaluate_accuracy(valid_data, net, ctx)
            epoch_s = ("epoch %d, loss %f, train acc %f, valid acc %f, " % (epoch + 1, train_l / len(train_data), train_acc / len(train_data), valid_acc))
            epoch_s = ("epoch %d, loss %f, train acc %f, " % (epoch + 1, train_l / len(train_data), train_acc / len(train_data)))
        print(epoch_s + time_s + ', lr ' + str(trainer.learning_rate))

ctx, num_epochs, lr, wd = gb.try_gpu(), 1, 0.1, 5e-4
lr_period, lr_decay, net = 80, 0.1, get_net(ctx)
train(net, train_data, valid_data, num_epochs, lr, wd, ctx, lr_period, lr_decay)

I am using gluonbook 0.8.10, Python 3.7.2. mxnet 1.4.0


I solved it!


def accuracy(y_hat, y):
	return (y_hat.argmax(axis=1) == y.astype('float32')).mean().asscalar()