一般来说在TF中使用keras的五个功能:
- datasets
- layers
- losses
- metrics
- optimizer
metrics
在训练中经常需要计算loss和accuracy,Metrics就是用于测量,只需要向metrics中添加数据即可计算loss和accuracy
1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 |
# 创建两个meter acc_meter = metrics.Accuracy() loss_meter = metric.mean() #### 训练开始 # 更新 loss_meter.update(loss) acc_meter.update_state(y, pred) # 输出 print(loss_meter.result().numpy()) print(acc_meter.result().numpy()) # 清除缓存 loss_meter.reset_states() acc_meter.reset_states() |
一个完整的例子:
1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78 79 80 81 82 83 84 85 86 87 88 89 90 91 92 |
import tensorflow as tf from tensorflow.keras import datasets, layers, optimizers, Sequential, metrics def preprocess(x, y): x = tf.cast(x, dtype=tf.float32) / 255. y = tf.cast(y, dtype=tf.int32) return x,y batchsz = 128 (x, y), (x_val, y_val) = datasets.mnist.load_data() print('datasets:', x.shape, y.shape, x.min(), x.max()) db = tf.data.Dataset.from_tensor_slices((x,y)) db = db.map(preprocess).shuffle(60000).batch(batchsz).repeat(10) ds_val = tf.data.Dataset.from_tensor_slices((x_val, y_val)) ds_val = ds_val.map(preprocess).batch(batchsz) network = Sequential([layers.Dense(256, activation='relu'), layers.Dense(128, activation='relu'), layers.Dense(64, activation='relu'), layers.Dense(32, activation='relu'), layers.Dense(10)]) network.build(input_shape=(None, 28*28)) network.summary() optimizer = optimizers.Adam(lr=0.01) acc_meter = metrics.Accuracy() loss_meter = metrics.Mean() for step, (x,y) in enumerate(db): with tf.GradientTape() as tape: # [b, 28, 28] => [b, 784] x = tf.reshape(x, (-1, 28*28)) # [b, 784] => [b, 10] out = network(x) # [b] => [b, 10] y_onehot = tf.one_hot(y, depth=10) # [b] loss = tf.reduce_mean(tf.losses.categorical_crossentropy(y_onehot, out, from_logits=True)) loss_meter.update_state(loss) grads = tape.gradient(loss, network.trainable_variables) optimizer.apply_gradients(zip(grads, network.trainable_variables)) if step % 100 == 0: print(step, 'loss:', loss_meter.result().numpy()) loss_meter.reset_states() # evaluate if step % 500 == 0: total, total_correct = 0., 0 acc_meter.reset_states() for step, (x, y) in enumerate(ds_val): # [b, 28, 28] => [b, 784] x = tf.reshape(x, (-1, 28*28)) # [b, 784] => [b, 10] out = network(x) # [b, 10] => [b] pred = tf.argmax(out, axis=1) pred = tf.cast(pred, dtype=tf.int32) # bool type correct = tf.equal(pred, y) # bool tensor => int tensor => numpy total_correct += tf.reduce_sum(tf.cast(correct, dtype=tf.int32)).numpy() total += x.shape[0] acc_meter.update_state(y, pred) print(step, 'Evaluate Acc:', total_correct/total, acc_meter.result().numpy()) |
Compile & Fit
- Compile 类似于转载的功能 确定优化器的选择和评估指标
- Fit 进行一个标准的训练流程
- Evaluate 评估模型
- Predict 进行预测
接下来是一个训练CIFAR 10的例子,这里没有使用卷积直接打平训练,整个数据集大约160Mb的样子
1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78 79 80 81 82 83 84 85 86 87 88 89 90 91 92 93 94 95 96 97 98 99 100 101 102 103 104 105 106 |
import tensorflow as tf from tensorflow.keras import datasets, layers, optimizers, Sequential, metrics from tensorflow import keras import os os.environ['TF_CPP_MIN_LOG_LEVEL'] = '2' def preprocess(x, y): # [0~255] => [-1~1] x = 2 * tf.cast(x, dtype=tf.float32) / 255. - 1. y = tf.cast(y, dtype=tf.int32) return x,y batchsz = 128 # [50k, 32, 32, 3], [10k, 1] (x, y), (x_val, y_val) = datasets.cifar10.load_data() y = tf.squeeze(y) y_val = tf.squeeze(y_val) y = tf.one_hot(y, depth=10) # [50k, 10] y_val = tf.one_hot(y_val, depth=10) # [10k, 10] print('datasets:', x.shape, y.shape, x_val.shape, y_val.shape, x.min(), x.max()) train_db = tf.data.Dataset.from_tensor_slices((x,y)) train_db = train_db.map(preprocess).shuffle(10000).batch(batchsz) test_db = tf.data.Dataset.from_tensor_slices((x_val, y_val)) test_db = test_db.map(preprocess).batch(batchsz) sample = next(iter(train_db)) print('batch:', sample[0].shape, sample[1].shape) class MyDense(layers.Layer): # to replace standard layers.Dense() def __init__(self, inp_dim, outp_dim): super(MyDense, self).__init__() self.kernel = self.add_variable('w', [inp_dim, outp_dim]) # self.bias = self.add_variable('b', [outp_dim]) def call(self, inputs, training=None): x = inputs @ self.kernel return x class MyNetwork(keras.Model): def __init__(self): super(MyNetwork, self).__init__() self.fc1 = MyDense(32*32*3, 256) self.fc2 = MyDense(256, 128) self.fc3 = MyDense(128, 64) self.fc4 = MyDense(64, 32) self.fc5 = MyDense(32, 10) def call(self, inputs, training=None): """ :param inputs: [b, 32, 32, 3] :param training: :return: """ x = tf.reshape(inputs, [-1, 32*32*3]) # [b, 32*32*3] => [b, 256] x = self.fc1(x) x = tf.nn.relu(x) # [b, 256] => [b, 128] x = self.fc2(x) x = tf.nn.relu(x) # [b, 128] => [b, 64] x = self.fc3(x) x = tf.nn.relu(x) # [b, 64] => [b, 32] x = self.fc4(x) x = tf.nn.relu(x) # [b, 32] => [b, 10] x = self.fc5(x) return x network = MyNetwork() network.compile(optimizer=optimizers.Adam(lr=1e-3), loss=tf.losses.CategoricalCrossentropy(from_logits=True), metrics=['accuracy']) network.fit(train_db, epochs=15, validation_data=test_db, validation_freq=1) network.evaluate(test_db) network.save_weights('ckpt/weights.ckpt') del network print('saved to ckpt/weights.ckpt') network = MyNetwork() network.compile(optimizer=optimizers.Adam(lr=1e-3), loss=tf.losses.CategoricalCrossentropy(from_logits=True), metrics=['accuracy']) network.load_weights('ckpt/weights.ckpt') print('loaded weights from file.') network.evaluate(test_db) |