There are some question during the training process


#1

from mxnet import gluon,nd,init,autograd
from mxnet.gluon import nn,data as gdata,loss as gloss
import numpy as np
num_inputs = 2
num_examples = 1000
true_w = [2, -3.4]
true_b = 4.2
features = nd.random.normal(scale=1, shape=(num_examples, num_inputs))
labels = true_w[0] * features[:, 0] + true_w[1] * features[:, 1] + true_b
labels += nd.random.normal(scale=0.01, shape=labels.shape)

print(features.shape)
net = nn.Sequential()
net.add(nn.Dense(1))
def optimize(batch_size,trainer,num_epochs,decay_epoch,log_interval,
features,labels,net):
dataset = gdata.ArrayDataset(features,labels)
data_iter = gdata.DataLoader(dataset,batch_size,shuffle=True)
loss = gloss.L2Loss()
ls = [loss(net(features),labels).mean().asnumpy]
for epoch in range(1,num_epochs):
if decay_epoch and epoch > decay_epoch:
trainer.set_learning_rate(trainer.learning_rate0.1)
for batch_i,(data,label) in enumerate(data_iter):
with autograd.record():
outputs2 = [net(X) for X in data]
losses = [loss(yhat, y) for yhat, y in zip(outputs2, label)]
for l in losses:
l.backward()
‘’‘outputs1 = net(data)
l = loss(outputs1,label)
l.backward()’’’
trainer.step(batch_size)
if batch_i
batch_size%log_interval ==0:
ls.append(loss(net(features),labels).mean().asnumpy())
print(‘w:’,net[0].weight.data(),’\nb:’,net[0].bias.data(),’\n’)
net.initialize(init.Normal(sigma=0.01), force_reinit=True)
trainer = gluon.Trainer(net.collect_params(), ‘sgd’, {‘learning_rate’: 0.2})
optimize(batch_size=1, trainer=trainer, num_epochs=3, decay_epoch=2,
log_interval=10, features=features, labels=labels, net=net)

Why can’t write outpus2 like this?and How is the difference between outpus1 and outpus2?


#2

Hi @barricade,

when copy pasting your code, can you make sure you put it in

  codeblocks:  ``` ````

So that it is formatted properly. Also make sure you edit your code in ascii format, here you have symbols like : , make sure it is '.

That will ensure that other users can help you best.