# A vision model.
# Encode an image into a vector.
vision_model = Sequential()
vision_model.add(Conv2D(64, (3, 3),
activation='relu',
input_shape=(224, 224, 3)))
vision_model.add(MaxPooling2D())
vision_model.add(Flatten())
# Get a tensor with the output of your vision model
image_input = Input(shape=(224, 224, 3))
encoded_image = vision_model(image_input)
# A language model.
# Encode the question into a vector.
question_input = Input(shape=(100,),
dtype='int32',
name="Question")
embedded = Embedding(input_dim=10000,
output_dim=256,
input_length=100)(question_input)
encoded_question = LSTM(256)(embedded_question)
# Concatenate the encoded image and question
merged = layers.concatenate([encoded_image,
encoded_question])
# Train a classifier on top.
output = Dense(1000,
activation='softmax')(merged)
# You can train w/ .fit, .train_on_batch,
# or with a GradientTape.
vqa_model = Model(inputs=[image_input,
question_input],
outputs=output)
model.compile(optimizer=Adam(),
loss=BinaryCrossentropy(),
metrics=[AUC(), Precision(), Recall()])
model.fit(data,
epochs=10,
validation_data=val_data,
callbacks=[EarlyStopping(),
TensorBoard(),
ModelCheckpoint()])
不改變一行代碼轉換為分布式訓練(單機多卡和多機多卡)
import tensorflow as tf
model = tf.keras.applications.ResNet50()
optimizer = tf.keras.optimizers.SGD(learning_rate=0.1)
model.compile(..., optimizer=optimizer)
model.fit(train_dataset, epochs=10)
import tensorflow as tf
strategy = tf.distribute.MirroredStrategy()
with strategy.scope():
model = tf.keras.applications.ResNet50()
optimizer = tf.keras.optimizers.SGD(learning_rate=0.1)
model.compile(..., optimizer=optimizer)
model.fit(train_dataset, epochs=10)
正真做到了模型代碼一個字不用改。
# Convert the model to the TFLite format without quantization
converter = tf.lite.TFLiteConverter.from_keras_model(model)
tflite_model = converter.convert()
# Save the model to disk
open("gesture_model.tflite", "wb").write(tflite_model)
# Check the size
import os
basic_model_size = os.path.getsize("gesture_model.tflite")
print("Model is %d bytes" % basic_model_size)
import tensorflowjs as tfjs
metadata = {
'word_index': tokenizer.word_index,
# …
}
# Save metadata
metadata_json_path = os.path.join(FLAGS.artifacts_dir, 'metadata.json')
json.dump(metadata, open(metadata_json_path, 'wt'))
# Convert your model to TF.js format
tfjs.converters.save_keras_model(model, FLAGS.artifacts_dir)