zoukankan      html  css  js  c++  java
  • 手写汉字笔迹识别模型汇总

    手写汉字笔迹识别模型:
    第一名用的是googleNet,准确率97.3%
    def GoogleLeNetSlim(x, num_classes, keep_prob=0.5):
        with tf.variable_scope('main'):
            t = slim.conv2d(x, 64, [3, 3], [1, 1], padding='SAME', activation_fn=relu, normalizer_fn=slim.batch_norm, scope='conv1')
            t = slim.max_pool2d(t, [2, 2], [2, 2], padding='SAME')
            t = slim.conv2d(t, 96, [3, 3], [1, 1], padding='SAME', activation_fn=relu, normalizer_fn=slim.batch_norm, scope='conv2')
            t = slim.conv2d(t, 192, [3, 3], [1, 1], padding='SAME', activation_fn=relu, normalizer_fn=slim.batch_norm, scope='conv3')
            t = slim.max_pool2d(t, [2, 2], [2, 2], padding='SAME')
    
        with tf.variable_scope('block1'):
            t = block_slim(t, [64, 96, 128, 16, 32, 32], name='block1')       # [?, 16, 16, 256]
    
        with tf.variable_scope('block2'):
            t = block_slim(t, [128, 128, 192, 32, 96, 64], name='block1')     # [?, 16, 16, 480]
            t = tf.nn.max_pool(t, ksize=[1, 2, 2, 1], strides=[1, 2, 2, 1], padding='SAME')
    
        with tf.variable_scope('block3'):
            t = block_slim(t, [192, 96, 208, 16, 48, 64], name='block1')
            t = block_slim(t, [160, 112, 224, 24, 64, 64], name='block2')
            t = block_slim(t, [128, 128, 256, 24, 64, 64], name='block3')
            t = block_slim(t, [112, 144, 288, 32, 64, 64], name='block4')
            t = block_slim(t, [256, 160, 320, 32, 128, 128], name='block5')    # [?, 8, 8, 832]
            t = tf.nn.max_pool(t, ksize=[1, 2, 2, 1], strides=[1, 2, 2, 1], padding='SAME')
    
        with tf.variable_scope('block4'):
            t = block_slim(t, [256, 160, 320, 32, 128, 128], name='block1')
            t = block_slim(t, [384, 192, 384, 48, 128, 128], name='block2')    # [?, 8, 8, 1024]
            t = tf.nn.max_pool(t, ksize=[1, 2, 2, 1], strides=[1, 2, 2, 1], padding='SAME')
    
        with tf.variable_scope('fc'):
            t = slim.flatten(t)
            t = slim.fully_connected(slim.dropout(t, keep_prob), 1024, activation_fn=relu, normalizer_fn=slim.batch_norm, scope='fc1')
            t = slim.fully_connected(slim.dropout(t, keep_prob), num_classes, activation_fn=None, scope='logits')
    
        return t
    TODO:实验下,https://github.com/tflearn/tflearn/blob/master/examples/images/googlenet.py
    
    
    还有使用inception v3的!!!
    def build_graph_all(top_k,scope=None):
        keep_prob = tf.placeholder(dtype=tf.float32, shape=[], name='keep_prob')
        images = tf.placeholder(dtype=tf.float32, shape=[None, image_size, image_size, 1], name='image_batch')
        labels = tf.placeholder(dtype=tf.int64, shape=[None], name='label_batch')
    
        with tf.variable_scope(scope,'Incept_Net',[images]):
            with slim.arg_scope([slim.conv2d,slim.max_pool2d,slim.avg_pool2d],stride=1,padding='VALID'):
    
                net = slim.conv2d(images,32,[3,3],scope='conv2d_1a_3x3')
                print('tensor 1:' + str(net.get_shape().as_list()))
    
                net = slim.conv2d(net,32,[3,3],scope='conv2d_2a_3x3')
                print('tensor 2:' + str(net.get_shape().as_list()))
    
                net = slim.conv2d(net,64,[3,3],padding='SAME',scope='conv2d_2b_3x3')
                print('tensor 3:' + str(net.get_shape().as_list()))
    
                net = slim.max_pool2d(net,[3,3],stride=2,scope='maxpool_3a_3x3')
                print('tensor 4:' + str(net.get_shape().as_list()))
    
                net = slim.conv2d(net,80,[1,1],scope='conv2d_3b_1x1')
                print('tensor 5:' + str(net.get_shape().as_list()))
    
                net = slim.conv2d(net,192,[3,3],scope='conv2d_4a_3x3')
                print('tensor 6:' + str(net.get_shape().as_list()))
    
                net = slim.max_pool2d(net,[3,3],stride=2,scope='maxpool_5a_3x3')
                print('tensor 7:' + str(net.get_shape().as_list()))
    
            with slim.arg_scope([slim.conv2d,slim.max_pool2d,slim.avg_pool2d],stride=1,padding='SAME'):
                with tf.variable_scope('mixed_5b'):
                    with tf.variable_scope('branch_0'):
                        branch_0 = slim.conv2d(net,64,[1,1],scope='conv2d_0a_1x1')
                    with tf.variable_scope('branch_1'):
                        branch_1 = slim.conv2d(net,48,[1,1],scope='conv2d_0a_1x1')
                        branch_1 = slim.conv2d(branch_1,64,[5,5],scope='conv2d_0b_5x5')
                    with tf.variable_scope('branch_2'):
                        branch_2 = slim.conv2d(net,64,[1,1],scope='conv2d_0a_1x1')
                        branch_2 = slim.conv2d(branch_2,96,[3,3],scope='conv2d_0b_3x3')
                        branch_2 = slim.conv2d(branch_2,96,[3,3],scope='conv2d_0c_3x3')
                    with tf.variable_scope('branch_3'):
                        branch_3 = slim.avg_pool2d(net,[3,3],scope='avgpool_0a_3x3')
                        branch_3 = slim.conv2d(branch_3,32,[1,1],scope='conv2d_0b_1x1')
    
                    net = tf.concat([branch_0,branch_1,branch_2,branch_3],3)
                    print('tensor 8:' + str(net.get_shape().as_list()))
    
    
                with tf.variable_scope('mixed_5c'):
                    with tf.variable_scope('branch_0'):
                        branch_0 = slim.conv2d(net,64,[1,1],scope='conv2d_0a_1x1')
                    with tf.variable_scope('branch_1'):
                        branch_1 = slim.conv2d(net,48,[1,1],scope='conv2d_0b_1x1')
                        branch_1 = slim.conv2d(branch_1,64,[5,5],scope='conv2d_0c_5x5')
                    with tf.variable_scope('branch_2'):
                        branch_2 = slim.conv2d(net,64,[1,1],scope='conv2d_0a_1x1')
                        branch_2 = slim.conv2d(branch_2,96,[3,3],scope='conv2d_0b_3x3')
                        branch_2 = slim.conv2d(branch_2,96,[3,3],scope='conv2d_0c_3x3')
                    with tf.variable_scope('branch_3'):
                        branch_3 = slim.avg_pool2d(net,[3,3],scope='avgpool_0a_3x3')
                        branch_3 = slim.conv2d(branch_3,64,[1,1],scope='conv2d_0b_1x1')
    
                    net = tf.concat([branch_0,branch_1,branch_2,branch_3],3)
                    print('tensor 9:' + str(net.get_shape().as_list()))
    
    
                with tf.variable_scope('mixed_5d'):
                    with tf.variable_scope('branch_0'):
                        branch_0 = slim.conv2d(net,64,[1,1],scope='conv2d_0a_1x1')
                    with tf.variable_scope('branch_1'):
                        branch_1 = slim.conv2d(net,48,[1,1],scope='conv2d_0a_1x1')
                        branch_1 = slim.conv2d(branch_1,64,[5,5],scope='conv2d_0b_5x5')
                    with tf.variable_scope('branch_2'):
                        branch_2 = slim.conv2d(net,64,[1,1],scope='conv2d_0a_1x1')
                        branch_2 = slim.conv2d(branch_2,96,[3,3],scope='conv2d_0b_3x3')
                        branch_2 = slim.conv2d(branch_2,96,[3,3],scope='conv2d_0c_3x3')
                    with tf.variable_scope('branch_3'):
                        branch_3 = slim.avg_pool2d(net,[3,3],scope='avgpool_0a_3x3')
                        branch_3 = slim.conv2d(branch_3,64,[1,1],scope='conv2d_0b_1x1')
    
                    net = tf.concat([branch_0,branch_1,branch_2,branch_3],3)
                    print('tensor 10:' + str(net.get_shape().as_list()))
    
    
                with tf.variable_scope('mixed_6a'):
                    with tf.variable_scope('branch_0'):
                        branch_0 = slim.conv2d(net,384,[3,3],stride=2,padding='VALID',scope='conv2d_1a_1x1')
                    with tf.variable_scope('branch_1'):
                        branch_1 = slim.conv2d(net,64,[1,1],scope='conv2d_0a_1x1')
                        branch_1 = slim.conv2d(branch_1,96,[3,3],scope='conv2d_0b_3x3')
                        branch_1 = slim.conv2d(branch_1,96,[3,3],stride=2,padding='VALID',scope='conv2d_1a_1x1')
                    with tf.variable_scope('branch_2'):
                        branch_2 = slim.max_pool2d(net,[3,3],stride=2,padding='VALID',scope='maxpool_1a_3x3')
    
                    net = tf.concat([branch_0,branch_1,branch_2],3)
                    print('tensor 11:' + str(net.get_shape().as_list()))
    
    
                with tf.variable_scope('mixed_6b'):
                    with tf.variable_scope('branch_0'):
                        branch_0 = slim.conv2d(net,192,[1,1],scope='conv2d_0a_1x1')
                    with tf.variable_scope('branch_1'):
                        branch_1 = slim.conv2d(net,128,[1,1],scope='conv2d_0a_1x1')
                        branch_1 = slim.conv2d(branch_1,128,[1,7],scope='conv2d_0b_1x7')
                        branch_1 = slim.conv2d(branch_1,192,[7,1],scope='conv2d_0c_7x1')
                    with tf.variable_scope('branch_2'):
                        branch_2 = slim.conv2d(net,128,[1,1],scope='conv2d_0a_1x1')
                        branch_2 = slim.conv2d(branch_2,128,[7,1],scope='conv2d_0b_7x1')
                        branch_2 = slim.conv2d(branch_2,128,[1,7],scope='conv2d_0c_1x7')
                        branch_2 = slim.conv2d(branch_2,128,[7,1],scope='conv2d_0d_7x1')
                        branch_2 = slim.conv2d(branch_2,192,[1,7],scope='conv2d_0e_1x7')
                    with tf.variable_scope('branch_3'):
                        branch_3 = slim.avg_pool2d(net,[3,3],scope='avgpool_0a_3x3')
                        branch_3 = slim.conv2d(branch_3,192,[1,1],scope='conv2d_0b_1x1')
    
                    net = tf.concat([branch_0,branch_1,branch_2,branch_3],3)
                    print('tensor 12:' + str(net.get_shape().as_list()))
    
    
                with tf.variable_scope('mixed_6c'):
                    with tf.variable_scope('branch_0'):
                        branch_0 = slim.conv2d(net,192,[1,1],scope='conv2d_0a_1x1')
                    with tf.variable_scope('branch_1'):
                        branch_1 = slim.conv2d(net,160,[1,1],scope='conv2d_0a_1x1')
                        branch_1 = slim.conv2d(branch_1,160,[1,7],scope='conv2d_0b_1x7')
                        branch_1 = slim.conv2d(branch_1,192,[7,1],scope='conv2d_0c_7x1')
                    with tf.variable_scope('branch_2'):
                        branch_2 = slim.conv2d(net,160,[1,1],scope='conv2d_0a_1x1')
                        branch_2 = slim.conv2d(branch_2,160,[7,1],scope='conv2d_0b_7x1')
                        branch_2 = slim.conv2d(branch_2,160,[1,7],scope='conv2d_0c_1x7')
                        branch_2 = slim.conv2d(branch_2,160,[7,1],scope='conv2d_0d_7x1')
                        branch_2 = slim.conv2d(branch_2,192,[1,7],scope='conv2d_0e_1x7')
                    with tf.variable_scope('branch_3'):
                        branch_3 = slim.avg_pool2d(net,[3,3],scope='avgpool_0a_3x3')
                        branch_3 = slim.conv2d(branch_3,192,[1,1],scope='conv2d_0b_1x1')
    
                    net = tf.concat([branch_0,branch_1,branch_2,branch_3],3)
                    print('tensor 13:' + str(net.get_shape().as_list()))
    
    
                with tf.variable_scope('mixed_6d'):
                    with tf.variable_scope('branch_0'):
                        branch_0 = slim.conv2d(net,192,[1,1],scope='conv2d_0a_1x1')
                    with tf.variable_scope('branch_1'):
                        branch_1 = slim.conv2d(net,160,[1,1],scope='conv2d_0a_1x1')
                        branch_1 = slim.conv2d(branch_1,160,[1,7],scope='conv2d_0b_1x7')
                        branch_1 = slim.conv2d(branch_1,192,[7,1],scope='conv2d_0c_7x1')
                    with tf.variable_scope('branch_2'):
                        branch_2 = slim.conv2d(net,160,[1,1],scope='conv2d_0a_1x1')
                        branch_2 = slim.conv2d(branch_2,160,[7,1],scope='conv2d_0b_7x1')
                        branch_2 = slim.conv2d(branch_2,160,[1,7],scope='conv2d_0c_1x7')
                        branch_2 = slim.conv2d(branch_2,160,[7,1],scope='conv2d_0d_7x1')
                        branch_2 = slim.conv2d(branch_2,192,[1,7],scope='conv2d_0e_1x7')
                    with tf.variable_scope('branch_3'):
                        branch_3 = slim.avg_pool2d(net,[3,3],scope='avgpool_0a_3x3')
                        branch_3 = slim.conv2d(branch_3,192,[1,1],scope='conv2d_0b_1x1')
    
                    net = tf.concat([branch_0,branch_1,branch_2,branch_3],3)
                    print('tensor 14:' + str(net.get_shape().as_list()))
    
    
                with tf.variable_scope('mixed_6e'):
                    with tf.variable_scope('branch_0'):
                        branch_0 = slim.conv2d(net,192,[1,1],scope='conv2d_0a_1x1')
                    with tf.variable_scope('branch_1'):
                        branch_1 = slim.conv2d(net,192,[1,1],scope='conv2d_0a_1x1')
                        branch_1 = slim.conv2d(branch_1,192,[1,7],scope='conv2d_0b_1x7')
                        branch_1 = slim.conv2d(branch_1,192,[7,1],scope='conv2d_0c_7x1')
                    with tf.variable_scope('branch_2'):
                        branch_2 = slim.conv2d(net,192,[1,1],scope='conv2d_0a_1x1')
                        branch_2 = slim.conv2d(branch_2,192,[7,1],scope='conv2d_0b_7x1')
                        branch_2 = slim.conv2d(branch_2,192,[1,7],scope='conv2d_0c_1x7')
                        branch_2 = slim.conv2d(branch_2,192,[7,1],scope='conv2d_0d_7x1')
                        branch_2 = slim.conv2d(branch_2,192,[1,7],scope='conv2d_0e_1x7')
                    with tf.variable_scope('branch_3'):
                        branch_3 = slim.avg_pool2d(net,[3,3],scope='avgpool_0a_3x3')
                        branch_3 = slim.conv2d(branch_3,192,[1,1],scope='conv2d_0b_1x1')
    
                    net = tf.concat([branch_0,branch_1,branch_2,branch_3],3)
                    print('tensor 15:' + str(net.get_shape().as_list()))
    
    
                with tf.variable_scope('mixed_7a'):
                    with tf.variable_scope('branch_0'):
                        branch_0 = slim.conv2d(net,192,[1,1],scope='conv2d_0a_1x1')
                        branch_0 = slim.conv2d(branch_0,320,[3,3],stride=2,padding='VALID',scope='conv2d_1a_3x3')
                    with tf.variable_scope('branch_1'):
                        branch_1 = slim.conv2d(net,192,[1,1],scope='conv2d_0a_1x1')
                        branch_1 = slim.conv2d(branch_1,192,[1,7],scope='conv2d_0b_1x7')
                        branch_1 = slim.conv2d(branch_1,192,[7,1],scope='conv2d_0c_7x1')
                        branch_1 = slim.conv2d(branch_1,192,[3,3],stride=2,padding='VALID',scope='conv2d_1a_3x3')
                    with tf.variable_scope('branch_2'):
                        branch_2 = slim.max_pool2d(net,[3,3],stride=2,padding='VALID',scope='maxpool_1a_3x3')
    
                    net = tf.concat([branch_0,branch_1,branch_2],3)
                    print('tensor 16:' + str(net.get_shape().as_list()))
    
    
                with tf.variable_scope('mixed_7b'):
                    with tf.variable_scope('branch_0'):
                        branch_0 = slim.conv2d(net,320,[1,1],scope='conv2d_0a_1x1')
                    with tf.variable_scope('branch_1'):
                        branch_1 = slim.conv2d(net,384,[1,1],scope='conv2d_0a_1x1')
                        branch_1 = tf.concat([
                            slim.conv2d(branch_1,384,[1,3],scope='conv2d_0b_1x3'),
                            slim.conv2d(branch_1,384,[3,1],scope='conv2d_0b_3x1')
                        ],3)
                    with tf.variable_scope('branch_2'):
                        branch_2 = slim.conv2d(net,448,[1,1],scope='conv2d_0a_1x1')
                        branch_2 = slim.conv2d(branch_2,384,[3,3],scope='conv2d_0b_3x3')
                        branch_2 = tf.concat([
                            slim.conv2d(branch_2,384,[1,3],scope='conv2d_0c_1x3'),
                            slim.conv2d(branch_2,384,[3,1],scope='conv2d_0d_3x1')
                        ],3)
                    with tf.variable_scope('branch_3'):
                        branch_3 = slim.avg_pool2d(net,[3,3],scope='avgpool_0a_3x3')
                        branch_3 = slim.conv2d(branch_3,192,[1,1],scope='conv2d_0b_1x1')
    
                    net = tf.concat([branch_0,branch_1,branch_2,branch_3],3)
                    print('tensor 17:' + str(net.get_shape().as_list()))
    
    
                with tf.variable_scope('mixed_7c'):
                    with tf.variable_scope('branch_0'):
                        branch_0 = slim.conv2d(net,320,[1,1],scope='conv2d_0a_1x1')
                    with tf.variable_scope('branch_1'):
                        branch_1 = slim.conv2d(net,384,[1,1],scope='conv2d_0a_1x1')
                        branch_1 = tf.concat([
                            slim.conv2d(branch_1,384,[1,3],scope='conv2d_0b_1x3'),
                            slim.conv2d(branch_1,384,[3,1],scope='conv2d_0c_3x1')],3)
                    with tf.variable_scope('branch_2'):
                        branch_2 = slim.conv2d(net,448,[1,1],scope='conv2d_0a_1x1')
                        branch_2 = slim.conv2d(branch_2,384,[3,3],scope='conv2d_0b_3x3')
                        branch_2 = tf.concat([
                            slim.conv2d(branch_2,384,[1,3],scope='conv2d_0c_1x3'),
                            slim.conv2d(branch_2,384,[3,1],scope='conv2d_0d_3x1')],3)
                    with tf.variable_scope('branch_3'):
                        branch_3 = slim.avg_pool2d(net,[3,3],scope='avgpool_0a_3x3')
                        branch_3 = slim.conv2d(branch_3,192,[1,1],scope='conv2d_0b_1x1')
    
                    net = tf.concat([branch_0,branch_1,branch_2,branch_3],3)
                    print('tensor 18:' + str(net.get_shape().as_list()))
    
        with slim.arg_scope([slim.conv2d,slim.max_pool2d,slim.avg_pool2d],stride=1,padding='SAME'):
            with tf.variable_scope('logits'):
                net = slim.avg_pool2d(net,[3,3],padding='VALID',scope='avgpool_1a_3x3')
                print('tensor 19:' + str(net.get_shape().as_list()))
    
                net = slim.dropout(net,keep_prob=keep_prob,scope='dropout_1b')
    
                logits = slim.conv2d(net, char_size,[2,2],padding='VALID',activation_fn=None,normalizer_fn=None,
                                     scope='conv2d_1c_2x2')
                print('logits 1:' + str(logits.get_shape().as_list()))
    
                logits = tf.squeeze(logits,[1,2],name='spatialsqueeze')
                print('logits 2:' + str(logits.get_shape().as_list()))
    
        regularization_loss = tf.reduce_sum(tf.get_collection(tf.GraphKeys.REGULARIZATION_LOSSES))
        loss = tf.reduce_mean(tf.nn.sparse_softmax_cross_entropy_with_logits(logits=logits, labels=labels))
    
        total_loss = loss + regularization_loss
        print('get total_loss')
    
        accuracy = tf.reduce_mean(tf.cast(tf.equal(tf.argmax(logits, 1), labels), tf.float32))
    
        global_step = tf.get_variable("step", [], initializer=tf.constant_initializer(0.0), trainable=False)
        rate = tf.train.exponential_decay(2e-3, global_step, decay_steps=2000, decay_rate=0.97, staircase=True)
    
        update_ops = tf.get_collection(tf.GraphKeys.UPDATE_OPS)
        with tf.control_dependencies(update_ops):
            train_op = tf.train.AdamOptimizer(learning_rate=rate).minimize(total_loss, global_step=global_step)
    
        probabilities = tf.nn.softmax(logits)
    
        tf.summary.scalar('loss', loss)
        tf.summary.scalar('accuracy', accuracy)
        merged_summary_op = tf.summary.merge_all()
        predicted_val_top_k, predicted_index_top_k = tf.nn.top_k(probabilities, k=top_k)
        accuracy_in_top_k = tf.reduce_mean(tf.cast(tf.nn.in_top_k(probabilities, labels, top_k), tf.float32))
    
        return {'images': images,
                'labels': labels,
                'keep_prob': keep_prob,
                'top_k': top_k,
                'global_step': global_step,
                'train_op': train_op,
                'loss': total_loss,
                'accuracy': accuracy,
                'accuracy_top_k': accuracy_in_top_k,
                'merged_summary_op': merged_summary_op,
                'predicted_distribution': probabilities,
                'predicted_index_top_k': predicted_index_top_k,
                'predicted_val_top_k': predicted_val_top_k}
    
    用resnet v2的:
    resnet_v2.default_image_size = 128
    
    
    def resnet_v2_50(inputs,
                     num_classes=None,
                     is_training=True,
                     global_pool=True,
                     output_stride=None,
                     spatial_squeeze=True,
                     reuse=None,
                     scope='resnet_v2_50'):
        """ResNet-50 model of [1]. See resnet_v2() for arg and return description."""
        blocks = [
            resnet_v2_block('block1', base_depth=64, num_units=3, stride=2),
            resnet_v2_block('block2', base_depth=128, num_units=4, stride=2),
            resnet_v2_block('block3', base_depth=256, num_units=6, stride=2),
            resnet_v2_block('block4', base_depth=512, num_units=3, stride=1),
        ]
        return resnet_v2(inputs, blocks, num_classes, is_training=is_training,
                         global_pool=global_pool, output_stride=output_stride,
                         include_root_block=True, spatial_squeeze=spatial_squeeze,
                         reuse=reuse, scope=scope)
    
    
    resnet_v2_50.default_image_size = resnet_v2.default_image_size
    
    
    def resnet_v2_101(inputs,
                      num_classes=None,
                      is_training=True,
                      global_pool=True,
                      output_stride=None,
                      spatial_squeeze=True,
                      reuse=None,
                      scope='resnet_v2_101'):
        """ResNet-101 model of [1]. See resnet_v2() for arg and return description."""
        blocks = [
            resnet_v2_block('block1', base_depth=64, num_units=3, stride=2),
            resnet_v2_block('block2', base_depth=128, num_units=4, stride=2),
            resnet_v2_block('block3', base_depth=256, num_units=23, stride=2),
            resnet_v2_block('block4', base_depth=512, num_units=3, stride=1),
        ]
        return resnet_v2(inputs, blocks, num_classes, is_training=is_training,
                         global_pool=global_pool, output_stride=output_stride,
                         include_root_block=True, spatial_squeeze=spatial_squeeze,
                         reuse=reuse, scope=scope)
    					 
    def build_graph(top_k, is_training):
        # with tf.device('/cpu:0'):
        keep_prob = tf.placeholder(dtype=tf.float32, shape=[], name='keep_prob')
        images = tf.placeholder(dtype=tf.float32, shape=[None, 128, 128, 1], name='image_batch')
        labels = tf.placeholder(dtype=tf.int64, shape=[None], name='label_batch')
    
        logits, _ = resnet_v2_50(images, num_classes=3755, is_training=is_training, global_pool=True,
                                 output_stride=None, spatial_squeeze=True, reuse=None)	
    	
    -----------------------------	
    

      

  • 相关阅读:
    win 下 docker 环境配置
    【译】PHP 内核 — 字符串管理
    Elasticsearch和Lucene的关系
    如何写出高性的SQL语句?
    Java 8
    Cause: java.sql.SQLSyntaxErrorException: ORA-01719: OR 或 IN 操作数中不允许外部联接运算符 (+)
    sql 使用 in 后数据量太大报错
    java8-求最小值(8中方法)
    Oracle 强制索引
    为什么新安装eclipse idea等环境,初次运行java程序,会弹出windows防火墙信息
  • 原文地址:https://www.cnblogs.com/bonelee/p/9100476.html
Copyright © 2011-2022 走看看