zoukankan      html  css  js  c++  java
  • 使用Keras搭建cnn+rnn, BRNN,DRNN等模型

    Keras api 提前知道:

    Normalize the activations of the previous layer at each batch, i.e. applies a transformation that maintains the mean activation close to 0 and the activation standard deviation close to 1.

    • TimeDistributed, 总的来说TimeDistributed层在每个时间步上均操作了Dense,比单一dense操作更能发现数据集中比较复杂的模式
      简单的理解:
    1. keras中TimeDistributed的用法
      更进一步的理解:
    2. How to Use the TimeDistributed Layer for Long Short-Term Memory Networks in Python
    3. 1对应的翻译

    Keras 相关导入

    from keras import backend as K
    from keras.models import Model
    from keras.layers import (BatchNormalization, Conv1D, Conv2D, Dense, Input, Dropout,
        TimeDistributed, Activation, Bidirectional, SimpleRNN, GRU, LSTM, MaxPooling1D, Flatten, MaxPooling2D)
    

    RNN

    def simple_rnn_model(input_dim, output_dim=29):
        """ Build a recurrent network for speech 
        """
        # Main acoustic input
        input_data = Input(name='the_input', shape=(None, input_dim))
        # Add recurrent layer
        simp_rnn = GRU(output_dim, return_sequences=True, 
                     implementation=2, name='rnn')(input_data)
        # Add softmax activation layer
        y_pred = Activation('softmax', name='softmax')(simp_rnn)
        # Specify the model
        model = Model(inputs=input_data, outputs=y_pred)
        model.output_length = lambda x: x
        print(model.summary())
        return model
    
    

    或者直接使用Keras SimpleRNN

    rnn + timedistribute

    def rnn_model(input_dim, units, activation, output_dim=29):
        """ Build a recurrent network for speech 
        """
        # Main acoustic input
        input_data = Input(name='the_input', shape=(None, input_dim))
        # Add recurrent layer
        simp_rnn = LSTM(units, activation=activation,
            return_sequences=True, implementation=2, name='rnn')(input_data)
        # TODO: Add batch normalization
        bn_rnn = BatchNormalization()(simp_rnn)
        # TODO: Add a TimeDistributed(Dense(output_dim)) layer
        time_dense = TimeDistributed(Dense(output_dim))(bn_rnn)
        # Add softmax activation layer
        y_pred = Activation('softmax', name='softmax', )(time_dense)
        # Specify the model
        model = Model(inputs=input_data, outputs=y_pred)
        model.output_length = lambda x: x
        print(model.summary())
        return model
    
    

    cnn+rnn+timedistribute

    def cnn_output_length(input_length, filter_size, border_mode, stride,
                           dilation=1):
        """ Compute the length of the output sequence after 1D convolution along
            time. Note that this function is in line with the function used in
            Convolution1D class from Keras.
        Params:
            input_length (int): Length of the input sequence.
            filter_size (int): Width of the convolution kernel.
            border_mode (str): Only support `same` or `valid`.
            stride (int): Stride size used in 1D convolution.
            dilation (int)
        """
        if input_length is None:
            return None
        assert border_mode in {'same', 'valid', 'causal', 'full'}
        dilated_filter_size = filter_size + (filter_size - 1) * (dilation - 1)
        if border_mode == 'same':
            output_length = input_length
        elif border_mode == 'valid':
            output_length = input_length - dilated_filter_size + 1
        elif border_mode == 'causal':
            output_length = input_length
        elif border_mode == 'full':
            output_length = input_length + dilated_filter_size - 1
        return (output_length + stride - 1) // stride
    
    def cnn_rnn_model(input_dim, filters, kernel_size, conv_stride,
        conv_border_mode, units, output_dim=29):
        """ Build a recurrent + convolutional network for speech
        """
        # Main acoustic input
        input_data = Input(name='the_input', shape=(None, input_dim))
        # Add convolutional layer
        conv_1d = Conv1D(filters, kernel_size, 
                         strides=conv_stride, 
                         padding=conv_border_mode,
                         activation='relu',
                         name='conv1d')(input_data)
        # Add batch normalization
        bn_cnn = BatchNormalization(name='bn_conv_1d')(conv_1d)
        # Add a recurrent layer
        simp_rnn = SimpleRNN(units, activation='relu',
            return_sequences=True, implementation=2, name='rnn')(bn_cnn)
        # TODO: Add batch normalization
        bn_rnn = BatchNormalization()(simp_rnn)
        # TODO: Add a TimeDistributed(Dense(output_dim)) layer
        time_dense = TimeDistributed(Dense(output_dim))(bn_rnn)
        # Add softmax activation layer
        y_pred = Activation('softmax', name='softmax')(time_dense)
        # Specify the model
        model = Model(inputs=input_data, outputs=y_pred)
        model.output_length = lambda x: cnn_output_length(
            x, kernel_size, conv_border_mode, conv_stride)
        print(model.summary())
        return model
    

    deep rnn + timedistribute

    def deep_rnn_model(input_dim, units, recur_layers, output_dim=29):
        """ Build a deep recurrent network for speech 
        """
        # Main acoustic input
        input_data = Input(name='the_input', shape=(None, input_dim))
        # TODO: Add recurrent layers, each with batch normalization
        # Add a recurrent layer
        for i in range(recur_layers):
            if i:
                simp_rnn = GRU(units, return_sequences=True,
                               implementation=2)(simp_rnn)
            else:
                simp_rnn = GRU(units, return_sequences=True,
                               implementation=2)(input_data)
    
        # TODO: Add batch normalization
        bn_rnn = BatchNormalization()(simp_rnn)
        # TODO: Add a TimeDistributed(Dense(output_dim)) layer
        time_dense = TimeDistributed(Dense(output_dim))(bn_rnn)
        # Add softmax activation layer
        y_pred = Activation('softmax', name='softmax')(time_dense)
        # Specify the model
        model = Model(inputs=input_data, outputs=y_pred)
        model.output_length = lambda x: x
        print(model.summary())
        return model
    

    bidirection rnn + timedistribute

    def bidirectional_rnn_model(input_dim, units, output_dim=29):
        """ Build a bidirectional recurrent network for speech
        """
        # Main acoustic input
        input_data = Input(name='the_input', shape=(None, input_dim))
        # TODO: Add bidirectional recurrent layer
        bidir_rnn = Bidirectional(GRU(units, return_sequences=True))(input_data)
        bidir_rnn = BatchNormalization()(bidir_rnn)
        # TODO: Add a TimeDistributed(Dense(output_dim)) layer
        time_dense = TimeDistributed(Dense(output_dim))(bidir_rnn)
        # Add softmax activation layer
        y_pred = Activation('softmax', name='softmax')(time_dense)
        # Specify the model
        model = Model(inputs=input_data, outputs=y_pred)
        model.output_length = lambda x: x
        print(model.summary())
        return model
    

    其他:
    使用Keras进行深度学习:(五)RNN和双向RNN讲解及实践

  • 相关阅读:
    20170419数据结构
    20170418 random函数和range函数
    20170418 sum函数、
    20170417嵌套循环
    20170417循环(loop)
    linux 输入输出重定向
    cut 命令-截取文件中指定内容
    read 命令-从键盘读取变量的值
    xargs-命令
    find 在目录中查找文件
  • 原文地址:https://www.cnblogs.com/linyihai/p/10587992.html
Copyright © 2011-2022 走看看