zoukankan      html  css  js  c++  java
  • keras学习笔记(3)

    1.猫狗数据集自定义

    import tensorflow as tf
    from tensorflow import keras
    import matplotlib.pyplot as plt
    %matplotlib inline
    import numpy as np
    import glob
    import os
    
    print('Tensorflow version: {}'.format(tf.__version__))
    
    train_image_path = glob.glob('./flower_photos/*/*.jpg')
    
    len(train_image_path)
    
    train_image_path[:5]
    
    train_image_path[-5:]
    
    p = './dc/train\dog\dog.995.jpg'
    
    int(p.split('\')[1] == 'cat')
    
    train_image_label = [int(p.split('\')[1] == 'cat') for p in train_image_path]
    
    train_image_label[-5:]
    
    def load_preprosess_image(path, label):
        image = tf.io.read_file(path)
        image = tf.image.decode_jpeg(image, channels=3)
        image = tf.image.resize(image, [360, 360])
        image = tf.image.random_crop(image, [256, 256, 3])
        image = tf.image.random_flip_left_right(image)
        image = tf.image.random_flip_up_down(image)
        image = tf.image.random_brightness(image, 0.5)
        image = tf.image.random_contrast(image, 0, 1)
        image = tf.cast(image, tf.float32)
        image = image/255
        label = tf.reshape(label, [1])
        return image, label
    
    #[1, 2, 3]  -->  [[1], [2], [3]]
    
    #tf.image.convert_image_dtype
    
    train_image_ds = tf.data.Dataset.from_tensor_slices((train_image_path, train_image_label))
    
    AUTOTUNE = tf.data.experimental.AUTOTUNE
    
    train_image_ds = train_image_ds.map(load_preprosess_image, num_parallel_calls=AUTOTUNE)
    
    train_image_ds
    
    for img, label in train_image_ds.take(1):
        plt.imshow(img)
    
    
    
    BATCH_SIZE = 32
    train_count = len(train_image_path)
    
    train_image_ds = train_image_ds.shuffle(train_count).batch(BATCH_SIZE)
    train_image_ds = train_image_ds.prefetch(AUTOTUNE)
    
    test_image_path = glob.glob('./dc/test/*/*.jpg')
    test_image_label = [int(p.split('\')[1] == 'cat') for p in test_image_path]
    test_image_ds = tf.data.Dataset.from_tensor_slices((test_image_path, test_image_label))
    test_image_ds = test_image_ds.map(load_preprosess_image, num_parallel_calls=AUTOTUNE)
    test_image_ds = test_image_ds.batch(BATCH_SIZE)
    test_image_ds = test_image_ds.prefetch(AUTOTUNE)
    
    len(test_image_path)
    
    imgs.shape
    
    labels.shape
    
    plt.imshow(imgs[0])
    
    labels[0]
    
    model = keras.Sequential([
        tf.keras.layers.Conv2D(64, (3, 3), input_shape=(256, 256, 3), activation='relu'),
        tf.keras.layers.BatchNormalization(),
        tf.keras.layers.Conv2D(64, (3, 3), input_shape=(256, 256, 3), activation='relu'),
        tf.keras.layers.BatchNormalization(),
        tf.keras.layers.MaxPooling2D(),
        tf.keras.layers.Conv2D(128, (3, 3), activation='relu'),
        tf.keras.layers.BatchNormalization(),
        tf.keras.layers.Conv2D(128, (3, 3), activation='relu'),
        tf.keras.layers.BatchNormalization(),
        tf.keras.layers.MaxPooling2D(),
        tf.keras.layers.Conv2D(256, (3, 3), activation='relu'),
        tf.keras.layers.BatchNormalization(),
        tf.keras.layers.Conv2D(256, (3, 3), activation='relu'),
        tf.keras.layers.BatchNormalization(),
        tf.keras.layers.MaxPooling2D(),
         tf.keras.layers.BatchNormalization(),
        tf.keras.layers.Conv2D(512, (3, 3), activation='relu'),
        tf.keras.layers.Conv2D(512, (3, 3), activation='relu'),
        tf.keras.layers.MaxPooling2D(),
        tf.keras.layers.Conv2D(1024, (3, 3), activation='relu'),
        tf.keras.layers.Conv2D(1024, (3, 3), activation='relu'),
        tf.keras.layers.GlobalAveragePooling2D(),
        tf.keras.layers.Dense(256, activation='relu'),
        tf.keras.layers.Dense(1)
    ])
    
    model.summary()
    
    pred = model(imgs)
    
    pred.shape
    
    np.array([p[0].numpy() for p in tf.cast(pred > 0, tf.int32)])
    
    np.array([l[0].numpy() for l in labels])
    
    ls = tf.keras.losses.BinaryCrossentropy()
    
    ls([0.,0.,1.,1.], [1.,1.,1.,1.])
    
    ls([[0.],[0.],[1.],[1.]], [[1.],[1.],[1.],[1.]])
    
    tf.keras.losses.binary_crossentropy([0.,0.,1.,1.], [1.,1.,1.,1.])
    
    optimizer = tf.keras.optimizers.Adam()
    
    epoch_loss_avg = tf.keras.metrics.Mean('train_loss')
    train_accuracy = tf.keras.metrics.Accuracy()
    
    epoch_loss_avg_test = tf.keras.metrics.Mean('test_loss')
    test_accuracy = tf.keras.metrics.Accuracy()
    
    train_accuracy([1,0,1], [1,1,1])
    
    def train_step(model, images, labels):
        with tf.GradientTape() as t:
            pred = model(images)
            loss_step = tf.keras.losses.BinaryCrossentropy(from_logits=True)(labels, pred)
        grads = t.gradient(loss_step, model.trainable_variables)
        optimizer.apply_gradients(zip(grads, model.trainable_variables))
        epoch_loss_avg(loss_step)
        train_accuracy(labels, tf.cast(pred>0, tf.int32))
    
    def test_step(model, images, labels):
        pred = model(images, trianing=False)
        loss_step = tf.keras.losses.BinaryCrossentropy(from_logits=True)(labels, pred)
        epoch_loss_avg_test(loss_step)
        test_accuracy(labels, tf.cast(pred>0, tf.int32))
    
    train_loss_results = []
    train_acc_results = []
    
    test_loss_results = []
    test_acc_results = []
    
    num_epochs = 30
    
    for epoch in range(num_epochs):
        for imgs_, labels_ in train_image_ds:
            train_step(model, imgs_, labels_)
            print('.', end='')
        print()
        
        train_loss_results.append(epoch_loss_avg.result())
        train_acc_results.append(train_accuracy.result())
        
        
        for imgs_, labels_ in test_image_ds:
            test_step(model, imgs_, labels_)
            
        test_loss_results.append(epoch_loss_avg_test.result())
        test_acc_results.append(test_accuracy.result())
        
        print('Epoch:{}: loss: {:.3f}, accuracy: {:.3f}, test_loss: {:.3f}, test_accuracy: {:.3f}'.format(
            epoch + 1,
            epoch_loss_avg.result(),
            train_accuracy.result(),
            epoch_loss_avg_test.result(),
            test_accuracy.result()
        ))
        
        epoch_loss_avg.reset_states()
        train_accuracy.reset_states()
        
        epoch_loss_avg_test.reset_states()
        test_accuracy.reset_states()
    

     2.迁移学习

    import tensorflow as tf
    import matplotlib.pyplot as plt
    %matplotlib inline
    import numpy as np
    import glob 
    import os
    
    tf.test.is_gpu_available()
    
    keras=tf.keras
    layers=tf.keras.layers
    
    train_image_path=glob.glob('./dc/train/*/*.jpg')
    
    len(train_image_path)
    
    train_image_label=[int(p.split('\')[1]=='cat') for p in train_image_path]
    
    def load_preprocess_image(path,label):
        image=tf.io.read_file(path)
        image=tf.image.decode_jpeg(image,channels=3)
        image=tf.image.resize(image,[256,256])
        iamge=tf.cast(image,tf.float32)
        image=image/255
        return iamge, label
    
    
    
    train_image_ds=tf.data.Dataset.from_tensor_slices((train_image_path,train_image_label))
    
    AUTOTUNE=tf.data.experimental.AUTOTUNE
    
    train_image_ds=train_image_ds.map(load_preprocess_image,num_parallel_calls=AUTOTUNE)
    
    BATCH_SIZE=32
    train_count=len(train_image_path)
    
    train_image_ds=train_image_ds.shuffle(train_count).repeat().batch(BATCH_SIZE)
    
    test_image_path=glob.glob('./dc/test/*/*.jpg')
    test_image_label=[int(p.split('\')[1]=='cat') for p in test_image_path]
    test_image_ds=tf.data.Dataset.from_tensor_slices((test_image_path,test_image_label))
    test_image_ds=test_image_ds.map(load_preprocess_image,num_parallel_calls=AUTOTUNE)
    test_image_ds=test_image_ds.repeat().batch(BATCH_SIZE)
    
    test_count=len(test_image_path)
    
    #使用vgg16的卷积基与权重值
    conv_base=keras.applications.VGG16(weights='imagenet',include_top=False)
    
    conv_base.summary()
    
    model=keras.Sequential()
    model.add(conv_base)
    model.add(layers.GlobalAveragePooling2D())
    model.add(layers.Dense(512,activation='relu'))
    model.add(layers.Dense(1,activation='sigmoid'))
    
    model.summary()
    
    conv_base.trainable=False
    
    model.summary()
    
    model.compile(optimizer=keras.optimizers.Adam(lr=0.001),loss='binary_crossentropy',metrics=['acc'])
    
    history=model.fit(train_image_ds,steps_per_epoch=train_count//BATCH_SIZE,epochs=15,validation_data=test_image_ds,validation_steps=test_count//BATCH_SIZE)
    
    #微调:解冻
    conv_base.trainable=True
    
    len(conv_base.layers)
    
    fine_tune_at=-3
    
    for layer in conv_base.layers[:-3]:#从0到倒数第三层之前
        layer.trainable=False
    
    model.compile(loss='binary_crossentropy',optimizer=tf.keras.optimizers.Adam(lr=0.0005/10),metrics=['accuracy'])
    
    initial_epochs=1
    fine_tune_epochs=1
    total_epochs=initial_epochs+fine_tune_epochs
    history=model.fit(train_image_ds,steps_per_epoch=train_count//BATCH_SIZE,epochs=total_epochs,initial_epoch=initial_epochs,validation_data=test_image_ds,validation_steps=test_count//BATCH_SIZE)
    

     3.多输出模型实例 

    import tensorflow as tf
    
    print('Tensorflow version: {}'.format(tf.__version__))
    
    from tensorflow import keras
    import matplotlib.pyplot as plt
    %matplotlib inline
    import numpy as np
    import pathlib
    
    data_dir = './dataset'
    
    data_root = pathlib.Path(data_dir)
    
    data_root
    
    for item in data_root.iterdir():
        print(item)
    
    all_image_paths = list(data_root.glob('*/*'))
    
    image_count = len(all_image_paths)
    image_count
    
    all_image_paths[:3]
    
    all_image_paths[-3:]
    
    import random
    all_image_paths = [str(path) for path in all_image_paths]
    random.shuffle(all_image_paths)
    
    all_image_paths[:5]
    
    label_names = sorted(item.name for item in data_root.glob('*/') if item.is_dir())
    label_names
    
    color_label_names = set(name.split('_')[0] for name in label_names)
    color_label_names
    
    item_label_names = set(name.split('_')[1] for name in label_names)
    item_label_names
    
    color_label_to_index = dict((name, index) for index,name in enumerate(color_label_names))
    color_label_to_index
    
    item_label_to_index = dict((name, index) for index,name in enumerate(item_label_names))
    item_label_to_index
    
    all_image_labels = [pathlib.Path(path).parent.name for path in all_image_paths]
    all_image_labels[:5]
    
    color_labels = [color_label_to_index[label.split('_')[0]] for label in all_image_labels]
    
    color_labels[:5]
    
    item_labels = [item_label_to_index[label.split('_')[1]] for label in all_image_labels]
    
    item_labels[:10]
    
    import IPython.display as display
    
    def caption_image(label):
        return {0: 'airplane', 1: 'lake'}.get(label)
    
    for n in range(3):
        image_index = random.choice(range(len(all_image_paths)))
        display.display(display.Image(all_image_paths[image_index], width=100, height=100))
        print(all_image_labels[image_index])
        print()
    
    加载和格式化图像
    
    img_path = all_image_paths[0]
    img_path
    
    img_raw = tf.io.read_file(img_path)
    print(repr(img_raw)[:100]+"...")
    
    img_tensor = tf.image.decode_image(img_raw)
    
    print(img_tensor.shape)
    print(img_tensor.dtype)
    
    img_tensor = tf.cast(img_tensor, tf.float32)
    img_tensor = tf.image.resize(img_tensor, [224, 224])
    img_final = img_tensor/255.0
    print(img_final.shape)
    print(img_final.numpy().min())
    print(img_final.numpy().max())
    
    def load_and_preprocess_image(path):
        image = tf.io.read_file(path)
        image = tf.image.decode_jpeg(image, channels=3)
        image = tf.image.resize(image, [224, 224])
        image = tf.cast(image, tf.float32)
        image = image/255.0  # normalize to [0,1] range
        image = 2*image-1
        return image
    
    import matplotlib.pyplot as plt
    
    image_path = all_image_paths[0]
    label = all_image_labels[0]
    
    plt.imshow((load_and_preprocess_image(img_path) + 1)/2)
    plt.grid(False)
    plt.xlabel(label)
    print()
    
    path_ds = tf.data.Dataset.from_tensor_slices(all_image_paths)
    
    AUTOTUNE = tf.data.experimental.AUTOTUNE
    image_ds = path_ds.map(load_and_preprocess_image, num_parallel_calls=AUTOTUNE)
    
    label_ds = tf.data.Dataset.from_tensor_slices((color_labels, item_labels))
    
    for ele in label_ds.take(3):
        print(ele[0].numpy(), ele[1].numpy())
    
    image_label_ds = tf.data.Dataset.zip((image_ds, label_ds))
    
    image_label_ds
    
    test_count = int(image_count*0.2)
    train_count = image_count - test_count
    
    train_data = image_label_ds.skip(test_count)
    
    test_data = image_label_ds.take(test_count)
    
    BATCH_SIZE = 16
    
    train_data = train_data.shuffle(buffer_size=train_count).repeat(-1)
    train_data = train_data.batch(BATCH_SIZE)
    train_data = train_data.prefetch(buffer_size=AUTOTUNE)
    train_data
    
    test_data = test_data.batch(BATCH_SIZE)
    
    # 建立模型
    
    mobile_net = tf.keras.applications.MobileNetV2(input_shape=(224, 224, 3), 
                                                   include_top=False,
                                                   weights='imagenet')
    
    mobile_net.trianable = False
    
    inputs = tf.keras.Input(shape=(224, 224, 3))
    
    x = mobile_net(inputs)
    
    x.get_shape()
    
    x = tf.keras.layers.GlobalAveragePooling2D()(x)
    
    x.get_shape()
    
    x1 = tf.keras.layers.Dense(1024, activation='relu')(x)
    out_color = tf.keras.layers.Dense(len(color_label_names), 
                                      activation='softmax',
                                      name='out_color')(x1)
    
    x2 = tf.keras.layers.Dense(1024, activation='relu')(x)
    out_item = tf.keras.layers.Dense(len(item_label_names), 
                                     activation='softmax',
                                     name='out_item')(x2)
    
    model = tf.keras.Model(inputs=inputs,
                           outputs=[out_color, out_item])
    
    model.summary()
    
    model.compile(optimizer=tf.keras.optimizers.Adam(learning_rate=0.0001),
                  loss={'out_color':'sparse_categorical_crossentropy',
                        'out_item':'sparse_categorical_crossentropy'},
                  metrics=['acc']
    )
    
    train_steps = train_count//BATCH_SIZE
    test_steps = test_count//BATCH_SIZE
    
    model.fit(train_data,
              epochs=15,
              steps_per_epoch=train_steps,
              validation_data=test_data,
              validation_steps=test_steps
    )
    
    model.evaluate(test_data)
    
    my_image = load_and_preprocess_image(r'D:163	f20jkdatasetmoclue_jeans0000004.jpg')
    
    my_image.shape
    
    my_image = tf.expand_dims(my_image, 0)
    
    my_image.shape
    
    pred = model.predict(my_image)
    
    np.argmax(pred[0])
    
    np.argmax(pred[1])
    

     4.模型的保存

    import tensorflow as tf
    import os
    import pandas as pd
    import numpy as np
    import matplotlib.pyplot as plt
    %matplotlib inline
    
    (train_image,train_label),(test_image,test_label)=tf.keras.datasets.fashion_mnist.load_data()
    
    plt.imshow(train_image[0])
    
    train_image=train_image/255
    test_image=test_image/255
    
    model=tf.keras.Sequential()
    model.add(tf.keras.layers.Flatten(input_shape=(28,28)))
    model.add(tf.keras.layers.Dense(128,activation='relu'))
    model.add(tf.keras.layers.Dense(10,activation='softmax'))
    
    model.summary()
    
    model.compile(optimizer='adam',loss='sparse_categorical_crossentropy',metrics=['acc'])
    
    model.fit(train_image,train_label,epochs=3)
    
    model.evaluate(test_image,test_label,verbose=0)#verbose是否显示提示信息,返回是loss,acc值
    
    #模型保存
    model.save('less_model.h5')
    
    new_model=tf.keras.models.load_model('less_model.h5')
    
    new_model.summary()
    
    new_model.evaluate(test_image,test_label,verbose=0)
    
    #仅保存模型架构,没有compile,也没有权重
    json_config=model.to_json()
    
    json_config
    
    reinitialized_model=tf.keras.models.model_from_json(json_config)
    
    reinitialized_model.summary()
    
    #仅保存模型权重,
    weights=model.get_weights()
    
    weights
    
    reinitialized_model.compile(optimizer='adam',loss='sparse_categorical_crossentropy',metrics=['acc'])
    
    reinitialized_model.set_weights(weights)
    
    reinitialized_model.evaluate(test_image,test_label,verbose=0)
    
    model.save_weights('less_weights.h5')#保存到磁盘上
    
    reinitialized_model.load_weights('less_weights.h5')
    
    #在训练期间保存检查点
    checkpoint_path='training_cp/cp.cpkt'
    cp_callback=tf.keras.callbacks.ModelCheckpoint(checkpoint_path,save_weights_only=True)
    
    model.fit(train_image,train_label,epochs=3,callbacks=[cp_callback])
    
    #加载检查点
    model.load_weights(checkpoint_path)
    
    model.evaluate(test_image,test_label,verbose=0)
    
    #自定义训练模型的保存检查点
    model=tf.keras.Sequential()
    model.add(tf.keras.layers.Flatten(input_shape=(28,28)))
    model.add(tf.keras.layers.Dense(128,activation='relu'))
    model.add(tf.keras.layers.Dense(10))
    
    optimizer=tf.keras.optimizers.Adam()
    
    loss_func=tf.keras.losses.SparseCategoricalCrossentropy(from_logits=True)
    
    def loss(model,x,y):
        y_=model(x)
        return loss_func(y,y_)
    
    train_loss=tf.keras.metrics.Mean('train_loss',dtype=tf.float32)
    train_accuracy=tf.keras.metrics.SparseCategoricalAccuracy('train_accuracy')
    test_loss=tf.keras.metrics.Mean('test_loss',dtype=tf.float32)
    test_loss=tf.keras.metrics.SparseCategoricalAccuracy('test_accuracy')
    
    def train_step(model,images,labels):
        with tf.GradientTape() as t:
            pred=model(images)
            loss_step=loss_func(labels,pred)
        grads=t.gradient(loss_step,model.trainable_variables)
        optimizer.apply_gradients(zip(grads,model.trainable_variables))
        train_loss(loss_step)
        train_accuracy(labels,pred)
    
    dataset=tf.data.Dataset.from_tensor_slices((train_image,train_label))
    
    dataset=dataset.shuffle(10000).batch(32)
    
    cp_dir='./customtrain_cp'
    cp_prefix=os.path.join(cp_dir,'ckpt')
    
    checkpoint=tf.train.Checkpoint(optimizer=optimizer,model=model)
    
    def train():
        for epoch in range(5):
            for (batch,(images,labels)) in enumerate(dataset):
                train_step(model,images,labels)
            print('Epoch{} loss is {}'.format(epoch,train_loss.result()))
            print('Epoch{} accuracy is {}'.format(epoch,train_accuracy.result()))
            train_loss.reset_states()
            train_accuracy.reset_states()
            if(epoch+1)%2==0:
                checkpoint.save(file_prefix=cp_prefix)
    
    train()
    
    checkpoint.restore(tf.train.latest_checkpoint(cp_dir))#会找出最新的检查点
    

     5.图像任务处理

    #图像定位
    import tensorflow as tf
    import matplotlib.pyplot as plt
    %matplotlib inline
    from lxml import etree#对xml解析
    import numpy as np
    import glob
    from matplotlib.patches import Rectangle#画矩形
    
    img=tf.io.read_file(r'.imagesAbyssinian_1.jpg')
    
    img=tf.image.decode_jpeg(img)
    
    img.shape
    
    plt.imshow(img)
    
    xml=open(r'.annotationsxmlsAbyssinian_1.xml').read()
    
    #建立选择器
    sel=etree.HTML(xml)
    
    width=int(sel.xpath('//size/width/text()')[0])#解析图片的宽度
    
    width
    
    height=int(sel.xpath('//size/height/text()')[0])#解析图片高度
    
    height
    
    #解析头部框的坐标值
    xmin=int(sel.xpath('//bndbox/xmin/text()')[0])
    xmax=int(sel.xpath('//bndbox/xmax/text()')[0])
    ymin=int(sel.xpath('//bndbox/ymin/text()')[0])
    ymax=int(sel.xpath('//bndbox/ymax/text()')[0])
    
    xmin,xmax,ymin,ymax
    
    plt.imshow(img)
    rect=Rectangle((xmin,ymin),(xmax-xmin),(ymax-ymin),fill=False,color='red')#右下角坐标,宽,高,不填充
    ax=plt.gca()#get current image ,获取当前图像
    ax.axes.add_patch(rect)
    
    img=tf.image.resize(img,[224,224])
    
    img=img/255
    
    plt.imshow(img)
    
    xmin=(xmin/width)*224
    xmax=(xmax/width)*224
    ymin=(ymin/height)*224
    ymax=(ymax/height)*224
    
    plt.imshow(img)
    rect=Rectangle((xmin,ymin),(xmax-xmin),(ymax-ymin),fill=False,color='red')#右下角坐标,宽,高,不填充
    ax=plt.gca()#get current image ,获取当前图像
    ax.axes.add_patch(rect)
    
    #创建输入管道
    images=glob.glob('./images/*.jpg')
    
    images[:5]
    
    len(images)
    
    xmls=glob.glob('./annotations/xmls/*.xml')
    
    xmls[:3]
    
    len(xmls)
    
    names=[x.split('\')[-1].split('.xml')[0] for x in xmls]
    
    len(names)
    
    imgs_train=[img for img in images if (img.split('\')[-1].split('.jpg')[0]) in names]#保留有xml的图片
    
    len(imgs_train)
    
    imgs_test=[img for img in images if (img.split('\')[-1].split('.jpg')[0])  not in names]
    
    #排序
    imgs_train.sort(key=lambda x : x.split('\')[-1].split('.jpg')[0])
    
    xmls.sort(key=lambda x : x.split('\')[-1].split('.xml')[0])
    
    #解析目标值坐标
    def to_labels(path):
        xml=open('{}'.format(path)).read()
        sel=etree.HTML(xml)
        width=int(sel.xpath('//size/width/text()')[0])#解析图片的宽度
        height=int(sel.xpath('//size/height/text()')[0])#解析图片高度
        #解析头部框的坐标值
        xmin=int(sel.xpath('//bndbox/xmin/text()')[0])
        xmax=int(sel.xpath('//bndbox/xmax/text()')[0])
        ymin=int(sel.xpath('//bndbox/ymin/text()')[0])
        ymax=int(sel.xpath('//bndbox/ymax/text()')[0])
        return [xmin/width,ymin/height,xmax/width,ymax/height]
    
    labels=[to_labels(path) for path in xmls]
    
    out1,out2,out3,out4=list(zip((*labels)))#zip(*)是zip反操作,1是xmin
    
    out1=np.array(out1)
    out2=np.array(out2)
    out3=np.array(out3)
    out4=np.array(out4)
    
    label_datasets=tf.data.Dataset.from_tensor_slices((out1,out2,out3,out4))
    
    label_datasets
    
    #图片输入管道
    def load_image(path):
        img=tf.io.read_file(path)
        img=tf.image.decode_jpeg(img,channels=3)
        img=tf.image.resize(img,[224,224])
        img=img/127.5-1#规范到-1到1
        return img
    
    image_dataset=tf.data.Dataset.from_tensor_slices((imgs_train))
    
    image_dataset=image_dataset.map(load_image)
    
    image_dataset
    
    #合并数据
    dataset=tf.data.Dataset.zip((image_dataset,label_datasets))
    
    dataset
    
    #划分数据集
    test_count=int(len(images)*0.2)
    train_count=len(images)-test_count
    dataset_train=dataset.skip(test_count)
    dataset_test=dataset.take(test_count)
    
    BATCH_SIZE=8
    BUFFER_SIZE=300
    STEPS_PER_EPOCH=train_count//BATCH_SIZE
    VALIDATION_STEPS=test_count//BATCH_SIZE
    
    train_dataset=dataset_train.repeat().shuffle(BUFFER_SIZE).batch(BATCH_SIZE)
    train_dataset=train_dataset.prefetch(buffer_size=tf.data.experimental.AUTOTUNE)
    test_dataset=dataset.batch(BATCH_SIZE)
    
    #产生batch维
    dataset_train=dataset_train.repeat().shuffle(len(imgs_train)).batch(32)
    
    dataset
    
    for img,label in dataset.take(1):
        plt.imshow(tf.keras.preprocessing.image.array_to_img(img[0]))#batch的第一张图片
        out1,out2,out3,out4=label
        xmin,ymin,xmax,ymax=out1[0].numpy()*224,out2[0].numpy()*224,out3[0].numpy()*224,out4[0].numpy()*224
        
        rect=Rectangle((xmin,ymin),(xmax-xmin),(ymax-ymin),fill=False,color='red')#右下角坐标,宽,高,不填充
        ax=plt.gca()#get current image ,获取当前图像
        ax.axes.add_patch(rect)
        plt.show()
    
    #搭建模型
    xception=tf.keras.applications.Xception(weights='imagenet',include_top=False,input_shape=(224,224,3))
    
    inputs=tf.keras.layers.Input(shape=(224,224,3))
    x=xception(inputs)
    x=tf.keras.layers.GlobalAveragePooling2D()(x)
    x=tf.keras.layers.Dense(2048,activation='relu')(x)
    x=tf.keras.layers.Dense(256,activation='relu')(x)
    
    out1=tf.keras.layers.Dense(1)(x)
    out2=tf.keras.layers.Dense(1)(x)
    out3=tf.keras.layers.Dense(1)(x)
    out4=tf.keras.layers.Dense(1)(x)
    prediction=[out1,out2,out3,out4]#输出列表形式
    model=tf.keras.models.Model(inputs=inputs,outputs=prediction)
    
    model.compile(tf.keras.optimizers.Adam(learning_rate=0.0001),loss='mse',metrics=['mae'])
    
    EPOCHS=5
    
    
    history=model.fit(train_dataset,epochs=EPOCHS,steps_per_epoch=STEPS_PER_EPOCH,validation_steps=VALIDATION_STEPS,validation_data=test_dataset)
    
    loss=history.history['loss']
    val_loss=history.history['val_loss']
    epochs=range(EPOCHS)
    plt.figure()
    plt.plot(epochs,loss,'r',label='Training loss')
    plt.plot(epochs,val_loss,'bo',label='Validation loss')
    plt.title('Training and Validation Loss')
    plt.xlabel('Epoch')
    plt.ylabel('Loss Value')
    plt.ylim([0,1])
    plt.legend()
    plt.show()
    
    loss=history.history['loss']
    val_loss=history.history['val_loss']
    epochs=range(EPOCHS)
    plt.figure()
    plt.plot(epochs,loss,'r',label='Training loss')
    plt.plot(epochs,val_loss,'bo',label='Validation loss')
    plt.title('Training and Validation Loss')
    plt.xlabel('Epoch')
    plt.ylabel('Loss Value')
    plt.ylim([0,1])
    plt.legend()
    plt.show()
    
    #模型保存
    model.save('detect_v1.h5')
    
    new_model=tf.keras.models.load_model('detect_v1.h5')
    
    plt.figure(figsize=(8,24))
    for img,_ in test_dataset.take(1):#1个batch的数据
        out1,out2,out3,out4=new_model.predict(img)
        for i in range(3):
            plt.subplot(3,1,i+1)
            plt.imshow(tf,keras.preprocessing.image.array_to_img(img[i]))
            xmin,ymin,xmax,ymax=out1[i]*224,out2[i]*224,out3[i]*224,out4[i]*224
            rect=Rectangle((xmin,ymin),(xmax-xmin),(ymax-ymin),fill=False,color='red')#右下角坐标,宽,高,不填充
            ax=plt.gca()#get current image ,获取当前图像
            ax.axes.add_patch(rect)
    

      

  • 相关阅读:
    Proj THUDBFuzz Paper Reading: The Art, Science, and Engineering of Fuzzing: A Survey
    Proj THUDBFuzz Paper Reading: A systematic review of fuzzing based on machine learning techniques
    9.3 付费代理的使用
    11.1 Charles 的使用
    第十一章 APP 的爬取
    10.2 Cookies 池的搭建
    10.1 模拟登录并爬取 GitHub
    11.5 Appium 爬取微信朋友圈
    11.4 Appium 的基本使用
    11.3 mitmdump 爬取 “得到” App 电子书信息
  • 原文地址:https://www.cnblogs.com/Turing-dz/p/13193958.html
Copyright © 2011-2022 走看看