import tensorflow as tf from tensorflow import keras from tensorflow.keras import datasets import os # do not print irrelevant information # os.environ[‘TF_CPP_MIN_LOG_LEVEL‘] = ‘2‘ # x: [60k,28,28], [10,28,28] # y: [60k], [10k] (x, y), (x_test, y_test) = datasets.mnist.load_data() # transform Tensor # x: [0~255] ==》 [0~1.] x = tf.convert_to_tensor(x, dtype=tf.float32) / 255. y = tf.convert_to_tensor(y, dtype=tf.int32) x_test = tf.convert_to_tensor(x_test, dtype=tf.float32) / 255. y_test = tf.convert_to_tensor(y_test, dtype=tf.int32)
# batch of 128 train_db = tf.data.Dataset.from_tensor_slices((x, y)).batch(128) test_db = tf.data.Dataset.from_tensor_slices((x_test, y_test)).batch(128) train_iter = iter(train_db) sample = next(train_iter)
# [b,784] ==> [b,256] ==> [b,128] ==> [b,10] # [dim_in,dim_out],[dim_out] w1 = tf.Variable(tf.random.truncated_normal([784, 256], stddev=0.1)) b1 = tf.Variable(tf.zeros([256])) w2 = tf.Variable(tf.random.truncated_normal([256, 128], stddev=0.1)) b2 = tf.Variable(tf.zeros([128])) w3 = tf.Variable(tf.random.truncated_normal([128, 10], stddev=0.1)) b3 = tf.Variable(tf.zeros([10]))
# learning rate lr = 1e-3
for epoch in range(10): # iterate db for 10 # tranin every train_db for step, (x, y) in enumerate(train_db): # x: [128,28,28] # y: [128] # [b,28,28] ==> [b,28*28] x = tf.reshape(x, [-1, 28 * 28]) with tf.GradientTape( ) as tape: # only data types of tf.variable are logged # x: [b,28*28] # h1 = [email protected] + b1 # [b,784]@[784,256]+[256] ==> [b,256] + [256] ==> [b,256] + [b,256] h1 = x @ w1 + tf.broadcast_to(b1, [x.shape[0], 256]) h1 = tf.nn.relu(h1) # [b,256] ==> [b,128] # h2 = [email protected] + b2 # b2 can broadcast automatic h2 = h1 @ w2 + b2 h2 = tf.nn.relu(h2) # [b,128] ==> [b,10] out = h2 @ w3 + b3 # compute loss # out: [b,10] # y:[b] ==> [b,10] y_onehot = tf.one_hot(y, depth=10) # mse = mean(sum(y-out)^2) # [b,10] loss = tf.square(y_onehot - out) # mean:scalar loss = tf.reduce_mean(loss) # compute gradients grads = tape.gradient(loss, [w1, b1, w2, b2, w3, b3]) # w1 = w1 - lr * w1_grad # w1 = w1 - lr * grads[0] # not in situ update # in situ update w1.assign_sub(lr * grads[0]) b1.assign_sub(lr * grads[1]) w2.assign_sub(lr * grads[2]) b2.assign_sub(lr * grads[3]) w3.assign_sub(lr * grads[4]) b3.assign_sub(lr * grads[5]) if step % 100 == 0: print(f‘epoch:{epoch}, step: {step}, loss:{float(loss)}‘) # [w1,b1,w2,b2,w3,b3] total_correct, total_num = 0, 0 for step, (x, y) in enumerate(test_db): # [b,28,28] ==> [b,28*28] x = tf.reshape(x, [-1, 28 * 28]) # [b,784] ==> [b,256] ==> [b,128] ==> [b,10] h1 = tf.nn.relu(x @ w1 + b1) h2 = tf.nn.relu(h1 @ w2 + b2) out = h2 @ w3 + b3 # out: [b,10] ~ R # prob: [b,10] ~ (0,1) prob = tf.nn.softmax(out, axis=1) # [b,10] ==> [b] pred = tf.argmax(prob, axis=1) pred = tf.cast(pred, dtype=tf.int32) # y: [b] # [b], int32 correct = tf.cast(tf.equal(pred, y), dtype=tf.int32) correct = tf.reduce_sum(correct) total_correct += int(correct) total_num += x.shape[0] acc = total_correct / total_num print(f‘test acc: {acc}‘)
原文地址:https://www.cnblogs.com/tszr/p/12142058.html
时间: 2024-11-06 03:29:40