zoukankan      html  css  js  c++  java
  • Keras guide

    1,Sequential model

    model = tf.keras.Sequential()

    # Adds a densely-connected layer with 64 units to the model:
    model.add(layers.Dense(64, activation='relu'))
    # Add another:
    model.add(layers.Dense(64, activation='relu'))
    # Add a softmax layer with 10 output units:
    model.add(layers.Dense(10, activation='softmax'))

    2,Layers

    # Create a sigmoid layer:
    layers.Dense(64, activation='sigmoid')
    # Or:
    layers.Dense(64, activation=tf.sigmoid)

    # A linear layer with L1 regularization of factor 0.01 applied to the kernel matrix:
    layers.Dense(64, kernel_regularizer=tf.keras.regularizers.l1(0.01))

    # A linear layer with L2 regularization of factor 0.01 applied to the bias vector:
    layers.Dense(64, bias_regularizer=tf.keras.regularizers.l2(0.01))

    # A linear layer with a kernel initialized to a random orthogonal matrix:
    layers.Dense(64, kernel_initializer='orthogonal')

    # A linear layer with a bias vector initialized to 2.0s:
    layers.Dense(64, bias_initializer=tf.keras.initializers.constant(2.0))

    3,Model

    # Instantiates a toy dataset instance:
    dataset = tf.data.Dataset.from_tensor_slices((data, labels))
    dataset = dataset.batch(32)
    dataset = dataset.repeat()

    # Don't forget to specify `steps_per_epoch` when calling `fit` on a dataset.
    model.fit(dataset, epochs=10, steps_per_epoch=30)

    4,steps

    # Instantiates a toy dataset instance:
    dataset = tf.data.Dataset.from_tensor_slices((data, labels))
    dataset = dataset.batch(32)
    dataset = dataset.repeat()

    # Don't forget to specify `steps_per_epoch` when calling `fit` on a dataset.
    model.fit(dataset, epochs=10, steps_per_epoch=30)

    dataset = tf.data.Dataset.from_tensor_slices((data, labels))
    dataset = dataset.batch(32).repeat()

    val_dataset = tf.data.Dataset.from_tensor_slices((val_data, val_labels))
    val_dataset = val_dataset.batch(32).repeat()

    model.fit(dataset, epochs=10, steps_per_epoch=30,
    validation_data=val_dataset,
    validation_steps=3)

    data = np.random.random((1000, 32))
    labels = np.random.random((1000, 10))

    model.evaluate(data, labels, batch_size=32)

    model.evaluate(dataset, steps=30)

    result = model.predict(data, batch_size=32)
    print(result.shape)

    5,Functional API

    • Multi-input models,
    • Multi-output models,
    • Models with shared layers (the same layer called several times),
    • Models with non-sequential data flows (e.g. residual connections).

    Building a model with the functional API works like this:

    1. A layer instance is callable and returns a tensor.
    2. Input tensors and output tensors are used to define a tf.keras.Model instance.
    3. This model is trained just like the Sequential model.

    5.1, inout and output

    inputs = tf.keras.Input(shape=(32,)) # Returns a placeholder tensor

    # A layer instance is callable on a tensor, and returns a tensor.
    x = layers.Dense(64, activation='relu')(inputs)
    x = layers.Dense(64, activation='relu')(x)
    predictions = layers.Dense(10, activation='softmax')(x)

    5.2,Wrapped in model

    model = tf.keras.Model(inputs=inputs, outputs=predictions)

    # The compile step specifies the training configuration.
    model.compile(optimizer=tf.train.RMSPropOptimizer(0.001),
    loss='categorical_crossentropy',
    metrics=['accuracy'])

    # Trains for 5 epochs
    model.fit(data, labels, batch_size=32, epochs=5)

    6,Model subclassing

    Build a fully-customizable model by subclassing tf.keras.Model and defining your own forward pass. Create layers in the __init__ method and set them as attributes of the class instance. Define the forward pass in the call method.

    Model subclassing is particularly useful when eager execution is enabled since the forward pass can be written imperatively.

    class MyModel(tf.keras.Model):

    def __init__(self, num_classes=10):
    super(MyModel, self).__init__(name='my_model')
    self.num_classes = num_classes
    # Define your layers here.
    self.dense_1 = layers.Dense(32, activation='relu')
    self.dense_2 = layers.Dense(num_classes, activation='sigmoid')

    def call(self, inputs):
    # Define your forward pass here,
    # using layers you previously defined (in `__init__`).
    x = self.dense_1(inputs)
    return self.dense_2(x)

    def compute_output_shape(self, input_shape):
    # You need to override this function if you want to use the subclassed model
    # as part of a functional-style model.
    # Otherwise, this method is optional.
    shape = tf.TensorShape(input_shape).as_list()
    shape[-1] = self.num_classes
    return tf.TensorShape(shape)

    model = MyModel(num_classes=10)

    # The compile step specifies the training configuration.
    model.compile(optimizer=tf.train.RMSPropOptimizer(0.001),
    loss='categorical_crossentropy',
    metrics=['accuracy'])

    # Trains for 5 epochs.
    model.fit(data, labels, batch_size=32, epochs=5)

    7,Custom layers

    class MyLayer(layers.Layer):

    def __init__(self, output_dim, **kwargs):
    self.output_dim = output_dim
    super(MyLayer, self).__init__(**kwargs)

    def build(self, input_shape):
    shape = tf.TensorShape((input_shape[1], self.output_dim))
    # Create a trainable weight variable for this layer.
    self.kernel = self.add_weight(name='kernel',
    shape=shape,
    initializer='uniform',
    trainable=True)
    # Make sure to call the `build` method at the end
    super(MyLayer, self).build(input_shape)

    def call(self, inputs):
    return tf.matmul(inputs, self.kernel)

    def compute_output_shape(self, input_shape):
    shape = tf.TensorShape(input_shape).as_list()
    shape[-1] = self.output_dim
    return tf.TensorShape(shape)

    def get_config(self):
    base_config = super(MyLayer, self).get_config()
    base_config['output_dim'] = self.output_dim
    return base_config

    @classmethod
    def from_config(cls, config):
    return cls(**config)

    model = tf.keras.Sequential([
    MyLayer(10),
    layers.Activation('softmax')])

    # The compile step specifies the training configuration
    model.compile(optimizer=tf.train.RMSPropOptimizer(0.001),
    loss='categorical_crossentropy',
    metrics=['accuracy'])

    # Trains for 5 epochs.
    model.fit(data, labels, batch_size=32, epochs=5)

    8,callbacks

    • tf.keras.callbacks.ModelCheckpoint: Save checkpoints of your model at regular intervals.
    • tf.keras.callbacks.LearningRateScheduler: Dynamically change the learning rate.
    • tf.keras.callbacks.EarlyStopping: Interrupt training when validation performance has stopped improving.
    • tf.keras.callbacks.TensorBoard: Monitor the model's behavior using TensorBoard.

    callbacks = [
    # Interrupt training if `val_loss` stops improving for over 2 epochs
    tf.keras.callbacks.EarlyStopping(patience=2, monitor='val_loss'),
    # Write TensorBoard logs to `./logs` directory
    tf.keras.callbacks.TensorBoard(log_dir='./logs')
    ]
    model.fit(data, labels, batch_size=32, epochs=5, callbacks=callbacks,
    validation_data=(val_data, val_labels))

    9,Save and restore

    9.1,Weights only

    # Save weights to a HDF5 file
    model.save_weights('my_model.h5', save_format='h5')

    # Restore the model's state
    model.load_weights('my_model.h5')

    9.2,configurations only

    # Serialize a model to JSON format
    json_string = model.to_json()
    json_string

    import json
    import pprint
    pprint.pprint(json.loads(json_string))

    fresh_model = tf.keras.models.model_from_json(json_string)

    yaml_string = model.to_yaml()
    print(yaml_string)

    fresh_model = tf.keras.models.model_from_yaml(yaml_string)

    9.3,Entire model

    # Create a trivial model
    model = tf.keras.Sequential([
    layers.Dense(10, activation='softmax', input_shape=(32,)),
    layers.Dense(10, activation='softmax')
    ])
    model.compile(optimizer='rmsprop',
    loss='categorical_crossentropy',
    metrics=['accuracy'])
    model.fit(data, labels, batch_size=32, epochs=5)


    # Save entire model to a HDF5 file
    model.save('my_model.h5')

    # Recreate the exact same model, including weights and optimizer.
    model = tf.keras.models.load_model('my_model.h5')

    10,Eager execution

    Eager execution is an imperative programming environment that evaluates operations immediately. This is not required for Keras, but is supported by tf.keras and useful for inspecting your program and debugging.

    All of the tf.keras model-building APIs are compatible with eager execution. And while the Sequential and functional APIs can be used, eager execution especially benefits model subclassing and building custom layers—the APIs that require you to write the forward pass as code (instead of the APIs that create models by assembling existing layers).

    11,Estimators

    11.1,converto

    A tf.keras.Model can be trained with the tf.estimator API by converting the model to an tf.estimator.Estimator object with tf.keras.estimator.model_to_estimator

    model = tf.keras.Sequential([layers.Dense(10,activation='softmax'),
    layers.Dense(10,activation='softmax')])

    model.compile(optimizer=tf.train.RMSPropOptimizer(0.001),
    loss='categorical_crossentropy',
    metrics=['accuracy'])

    estimator = tf.keras.estimator.model_to_estimator(model)

    11.2,

    model = tf.keras.Sequential()
    model.add(layers.Dense(16, activation='relu', input_shape=(10,)))
    model.add(layers.Dense(1, activation='sigmoid'))

    optimizer = tf.train.GradientDescentOptimizer(0.2)

    model.compile(loss='binary_crossentropy', optimizer=optimizer)
    model.summary()

    Define an input pipeline

    def input_fn():
    x = np.random.random((1024, 10))
    y = np.random.randint(2, size=(1024, 1))
    x = tf.cast(x, tf.float32)
    dataset = tf.data.Dataset.from_tensor_slices((x, y))
    dataset = dataset.repeat(10)
    dataset = dataset.batch(32)
    return dataset

    Next, create a tf.estimator.RunConfig and set the train_distribute argument to the tf.contrib.distribute.MirroredStrategy instance. When creating MirroredStrategy, you can specify a list of devices or set the num_gpus argument

    strategy = tf.contrib.distribute.MirroredStrategy()
    config = tf.estimator.RunConfig(train_distribute=strategy)

    Convert the Keras model to a tf.estimator.Estimator instance

    keras_estimator = tf.keras.estimator.model_to_estimator(
    keras_model=model,
    config=config,
    model_dir='/tmp/model_dir')

    keras_estimator.train(input_fn=input_fn, steps=10)

  • 相关阅读:
    Vue源码解析
    开发调试的几个小技巧
    C#课后小作业
    C#随堂
    C#是数据类型
    插眼
    SQL基本的45题
    SQL创建数据库、建表、填入内容
    T-SQL语句基础
    SQL基本数据类型等
  • 原文地址:https://www.cnblogs.com/augustone/p/10518566.html
Copyright © 2011-2022 走看看