TensorFlow 2.3.0
更新时间:2023-01-18
TensorFlow
基于tensorflow框架的MNIST图像分类任务示例代码,训练数据集点击这里下载
单机训练(计算节点数为1),示例代码如下:
Python
1"""
2tf train demo
3"""
4import tensorflow as tf
5import os
6mnist = tf.keras.datasets.mnist
7work_path = os.getcwd()
8(x_train, y_train), (x_test, y_test) = mnist.load_data('%s/train_data/mnist.npz' % work_path)
9x_train, x_test = x_train / 255.0, x_test / 255.0
10model = tf.keras.models.Sequential([
11tf.keras.layers.Flatten(input_shape=(28, 28)),
12tf.keras.layers.Dense(128, activation='relu'),
13tf.keras.layers.Dropout(0.2),
14tf.keras.layers.Dense(10, activation='softmax')
15])
16model.compile(optimizer='adam',
17 loss='sparse_categorical_crossentropy',
18 metrics=['accuracy'])
19model.fit(x_train, y_train, epochs=5)
20model.evaluate(x_test, y_test, verbose=2)
21model.save('./output/')
分布式训练(计算节点数大于1),示例代码如下:
说明:demo分布式程序没有做数据的分片操作,仅供参考
Python
1"""
2tf horovod train demo
3"""
4import tensorflow as tf
5import horovod.tensorflow as hvd
6import os
7# Horovod: initialize Horovod.
8hvd.init()
9# Horovod: pin GPU to be used to process local rank (one GPU per process)
10gpus = tf.config.experimental.list_physical_devices('GPU')
11for gpu in gpus:
12 tf.config.experimental.set_memory_growth(gpu, True)
13if gpus:
14 tf.config.experimental.set_visible_devices(gpus[hvd.local_rank()], 'GPU')
15work_path = os.getcwd()
16(mnist_images, mnist_labels), _ = \
17 tf.keras.datasets.mnist.load_data('%s/train_data/mnist.npz' % work_path)
18dataset = tf.data.Dataset.from_tensor_slices(
19 (tf.cast(mnist_images[..., tf.newaxis] / 255.0, tf.float32),
20 tf.cast(mnist_labels, tf.int64))
21)
22dataset = dataset.repeat().shuffle(10000).batch(128)
23mnist_model = tf.keras.Sequential([
24 tf.keras.layers.Conv2D(32, [3, 3], activation='relu'),
25 tf.keras.layers.Conv2D(64, [3, 3], activation='relu'),
26 tf.keras.layers.MaxPooling2D(pool_size=(2, 2)),
27 tf.keras.layers.Dropout(0.25),
28 tf.keras.layers.Flatten(),
29 tf.keras.layers.Dense(128, activation='relu'),
30 tf.keras.layers.Dropout(0.5),
31 tf.keras.layers.Dense(10, activation='softmax')
32])
33loss = tf.losses.SparseCategoricalCrossentropy()
34# Horovod: adjust learning rate based on number of GPUs.
35opt = tf.optimizers.Adam(0.001 * hvd.size())
36@tf.function
37def training_step(images, labels, first_batch):
38 """
39 :param images:
40 :param labels:
41 :param first_batch:
42 :return:
43 """
44 with tf.GradientTape() as tape:
45 probs = mnist_model(images, training=True)
46 loss_value = loss(labels, probs)
47 # Horovod: add Horovod Distributed GradientTape.
48 tape = hvd.DistributedGradientTape(tape)
49 grads = tape.gradient(loss_value, mnist_model.trainable_variables)
50 opt.apply_gradients(zip(grads, mnist_model.trainable_variables))
51 # Horovod: broadcast initial variable states from rank 0 to all other processes.
52 # This is necessary to ensure consistent initialization of all workers when
53 # training is started with random weights or restored from a checkpoint.
54 #
55 # Note: broadcast should be done after the first gradient step to ensure optimizer
56 # initialization.
57 if first_batch:
58 hvd.broadcast_variables(mnist_model.variables, root_rank=0)
59 hvd.broadcast_variables(opt.variables(), root_rank=0)
60 return loss_value
61# Horovod: adjust number of steps based on number of GPUs.
62for batch, (images, labels) in enumerate(dataset.take(10000 // hvd.size())):
63 loss_value = training_step(images, labels, batch == 0)
64 if batch % 10 == 0 and hvd.local_rank() == 0:
65 print('Step #%d\tLoss: %.6f' % (batch, loss_value))
66# Horovod: save model only on worker 0 to prevent other workers from
67# corrupting it.
68if hvd.rank() == 0:
69 mnist_model.save('./output/')