zoukankan      html  css  js  c++  java
  • yolo-v3

    1、下载代码

    git clone https://github.com/pjreddie/darknet
    cd darknet

    vi Makefile #打开文件

    GPU=1 #使用GPU训练
    CUDNN=0
    make
    

      



    遇到问题:nvcc找不到

    vi ~/.bashrc

    export PATH=$PATH:/usr/local/cuda-10.0/bin
    export LD_LIBRARY_PATH=$LD_LIBRARY_PATH:/usr/local/cuda-10.0/lib64

    在scripts文件夹下创建文件夹 VOCdevkit,在 VOCdevkit文件夹下建立 VOC2019 文件夹。 在 VOC2019 文件夹下建立 Annotations 、ImageSets、JPEGImages三个文件夹。在ImageSets文件夹下建立Main文件夹。

    在VOC2019文件夹下建立dir.py,用于分训练集、测试集、验证集
    import os  
    import random  
    
    trainval_percent = 0.9995  
    train_percent = 0.9995
    xmlfilepath = 'Annotations'  
    #xmlfilepath='1jpgTest'
    txtsavepath = 'ImageSetsMain'  
    total_xml = os.listdir(xmlfilepath)  
    
    num=len(total_xml)  
    list=range(num)  
    tv=int(num*trainval_percent)  
    tr=int(tv*train_percent)  
    trainval= random.sample(list,tv)  
    train=random.sample(trainval,tr)  
    
    ftrainval = open('ImageSets/Main/trainval.txt', 'w')  
    ftest = open('ImageSets/Main/test.txt', 'w')  
    ftrain = open('ImageSets/Main/train.txt', 'w')  
    fval = open('ImageSets/Main/val.txt', 'w')  
    
    for i  in list:  
        name=total_xml[i][:-4]+'
    '  
        if i in trainval:  
            ftrainval.write(name)  
            if i in train:  
                ftrain.write(name)  
            else:  
                fval.write(name)  
        else:  
            ftest.write(name)  
    
    ftrainval.close()  
    ftrain.close()  
    fval.close()  
    ftest .close()  
    

      

    scrips文件夹下 voc_label.by 修改

    sets=[('2019', 'train'), ('2019', 'val')]
    classes = ["eye", "closed", "hand", "phone", "smoke", "mouth"]
    os.system("cat 2019_train.txt 2019_val.txt > train.txt")
    

      

    运行 python voc_label.py ,VOC数据转成yolov3格式数据

    生成labels文件夹,2019_train.txt,2019_val.txt,train.txt

    预训练权重

    wget https://pjreddie.com/media/files/darknet53.conv.74 
    

      

    修改cfg/voc.data

    classes= 6
    train  = /home/***/workspace/yolov3/darknet-master/scripts/2019_train.txt
    valid  = /home/***/workspace/yolov3/darknet-master/scripts/2019_val.txt
    names = data/voc.names
    backup = backup
    

    修改data/voc.name

    eye
    closed
    hand
    phone
    smoke
    mouth
    

     

    修改cfg/yolov3-voc.cfg

    [net]
    # Testing
    # batch=1
    # subdivisions=1
    # Training
    # batch=64
    # subdivisions=16
    width=416
    height=416
    channels=3
    momentum=0.9
    decay=0.0005
    angle=0
    saturation = 1.5
    exposure = 1.5
    hue=.1
    
    learning_rate=0.001
    burn_in=1000
    max_batches = 50200
    policy=steps
    steps=40000,45000
    scales=.1,.1
    
    
    
    [convolutional]
    batch_normalize=1
    filters=32
    size=3
    stride=1
    pad=1
    activation=leaky
    
    # Downsample
    
    [convolutional]
    batch_normalize=1
    filters=64
    size=3
    stride=2
    pad=1
    activation=leaky
    
    [convolutional]
    batch_normalize=1
    filters=32
    size=1
    stride=1
    pad=1
    activation=leaky
    
    [convolutional]
    batch_normalize=1
    filters=64
    size=3
    stride=1
    pad=1
    activation=leaky
    
    [shortcut]
    from=-3
    activation=linear
    
    # Downsample
    
    [convolutional]
    batch_normalize=1
    filters=128
    size=3
    stride=2
    pad=1
    activation=leaky
    
    [convolutional]
    batch_normalize=1
    filters=64
    size=1
    stride=1
    pad=1
    activation=leaky
    
    [convolutional]
    batch_normalize=1
    filters=128
    size=3
    stride=1
    pad=1
    activation=leaky
    
    [shortcut]
    from=-3
    activation=linear
    
    [convolutional]
    batch_normalize=1
    filters=64
    size=1
    stride=1
    pad=1
    activation=leaky
    
    [convolutional]
    batch_normalize=1
    filters=128
    size=3
    stride=1
    pad=1
    activation=leaky
    
    [shortcut]
    from=-3
    activation=linear
    
    # Downsample
    
    [convolutional]
    batch_normalize=1
    filters=256
    size=3
    stride=2
    pad=1
    activation=leaky
    
    [convolutional]
    batch_normalize=1
    filters=128
    size=1
    stride=1
    pad=1
    activation=leaky
    
    [convolutional]
    batch_normalize=1
    filters=256
    size=3
    stride=1
    pad=1
    activation=leaky
    
    [shortcut]
    from=-3
    activation=linear
    
    [convolutional]
    batch_normalize=1
    filters=128
    size=1
    stride=1
    pad=1
    activation=leaky
    
    [convolutional]
    batch_normalize=1
    filters=256
    size=3
    stride=1
    pad=1
    activation=leaky
    
    [shortcut]
    from=-3
    activation=linear
    
    [convolutional]
    batch_normalize=1
    filters=128
    size=1
    stride=1
    pad=1
    activation=leaky
    
    [convolutional]
    batch_normalize=1
    filters=256
    size=3
    stride=1
    pad=1
    activation=leaky
    
    [shortcut]
    from=-3
    activation=linear
    
    [convolutional]
    batch_normalize=1
    filters=128
    size=1
    stride=1
    pad=1
    activation=leaky
    
    [convolutional]
    batch_normalize=1
    filters=256
    size=3
    stride=1
    pad=1
    activation=leaky
    
    [shortcut]
    from=-3
    activation=linear
    
    
    [convolutional]
    batch_normalize=1
    filters=128
    size=1
    stride=1
    pad=1
    activation=leaky
    
    [convolutional]
    batch_normalize=1
    filters=256
    size=3
    stride=1
    pad=1
    activation=leaky
    
    [shortcut]
    from=-3
    activation=linear
    
    [convolutional]
    batch_normalize=1
    filters=128
    size=1
    stride=1
    pad=1
    activation=leaky
    
    [convolutional]
    batch_normalize=1
    filters=256
    size=3
    stride=1
    pad=1
    activation=leaky
    
    [shortcut]
    from=-3
    activation=linear
    
    [convolutional]
    batch_normalize=1
    filters=128
    size=1
    stride=1
    pad=1
    activation=leaky
    
    [convolutional]
    batch_normalize=1
    filters=256
    size=3
    stride=1
    pad=1
    activation=leaky
    
    [shortcut]
    from=-3
    activation=linear
    
    [convolutional]
    batch_normalize=1
    filters=128
    size=1
    stride=1
    pad=1
    activation=leaky
    
    [convolutional]
    batch_normalize=1
    filters=256
    size=3
    stride=1
    pad=1
    activation=leaky
    
    [shortcut]
    from=-3
    activation=linear
    
    # Downsample
    
    [convolutional]
    batch_normalize=1
    filters=512
    size=3
    stride=2
    pad=1
    activation=leaky
    
    [convolutional]
    batch_normalize=1
    filters=256
    size=1
    stride=1
    pad=1
    activation=leaky
    
    [convolutional]
    batch_normalize=1
    filters=512
    size=3
    stride=1
    pad=1
    activation=leaky
    
    [shortcut]
    from=-3
    activation=linear
    
    
    [convolutional]
    batch_normalize=1
    filters=256
    size=1
    stride=1
    pad=1
    activation=leaky
    
    [convolutional]
    batch_normalize=1
    filters=512
    size=3
    stride=1
    pad=1
    activation=leaky
    
    [shortcut]
    from=-3
    activation=linear
    
    
    [convolutional]
    batch_normalize=1
    filters=256
    size=1
    stride=1
    pad=1
    activation=leaky
    
    [convolutional]
    batch_normalize=1
    filters=512
    size=3
    stride=1
    pad=1
    activation=leaky
    
    [shortcut]
    from=-3
    activation=linear
    
    
    [convolutional]
    batch_normalize=1
    filters=256
    size=1
    stride=1
    pad=1
    activation=leaky
    
    [convolutional]
    batch_normalize=1
    filters=512
    size=3
    stride=1
    pad=1
    activation=leaky
    
    [shortcut]
    from=-3
    activation=linear
    
    [convolutional]
    batch_normalize=1
    filters=256
    size=1
    stride=1
    pad=1
    activation=leaky
    
    [convolutional]
    batch_normalize=1
    filters=512
    size=3
    stride=1
    pad=1
    activation=leaky
    
    [shortcut]
    from=-3
    activation=linear
    
    
    [convolutional]
    batch_normalize=1
    filters=256
    size=1
    stride=1
    pad=1
    activation=leaky
    
    [convolutional]
    batch_normalize=1
    filters=512
    size=3
    stride=1
    pad=1
    activation=leaky
    
    [shortcut]
    from=-3
    activation=linear
    
    
    [convolutional]
    batch_normalize=1
    filters=256
    size=1
    stride=1
    pad=1
    activation=leaky
    
    [convolutional]
    batch_normalize=1
    filters=512
    size=3
    stride=1
    pad=1
    activation=leaky
    
    [shortcut]
    from=-3
    activation=linear
    
    [convolutional]
    batch_normalize=1
    filters=256
    size=1
    stride=1
    pad=1
    activation=leaky
    
    [convolutional]
    batch_normalize=1
    filters=512
    size=3
    stride=1
    pad=1
    activation=leaky
    
    [shortcut]
    from=-3
    activation=linear
    
    # Downsample
    
    [convolutional]
    batch_normalize=1
    filters=1024
    size=3
    stride=2
    pad=1
    activation=leaky
    
    [convolutional]
    batch_normalize=1
    filters=512
    size=1
    stride=1
    pad=1
    activation=leaky
    
    [convolutional]
    batch_normalize=1
    filters=1024
    size=3
    stride=1
    pad=1
    activation=leaky
    
    [shortcut]
    from=-3
    activation=linear
    
    [convolutional]
    batch_normalize=1
    filters=512
    size=1
    stride=1
    pad=1
    activation=leaky
    
    [convolutional]
    batch_normalize=1
    filters=1024
    size=3
    stride=1
    pad=1
    activation=leaky
    
    [shortcut]
    from=-3
    activation=linear
    
    [convolutional]
    batch_normalize=1
    filters=512
    size=1
    stride=1
    pad=1
    activation=leaky
    
    [convolutional]
    batch_normalize=1
    filters=1024
    size=3
    stride=1
    pad=1
    activation=leaky
    
    [shortcut]
    from=-3
    activation=linear
    
    [convolutional]
    batch_normalize=1
    filters=512
    size=1
    stride=1
    pad=1
    activation=leaky
    
    [convolutional]
    batch_normalize=1
    filters=1024
    size=3
    stride=1
    pad=1
    activation=leaky
    
    [shortcut]
    from=-3
    activation=linear
    
    ######################
    
    [convolutional]
    batch_normalize=1
    filters=512
    size=1
    stride=1
    pad=1
    activation=leaky
    
    [convolutional]
    batch_normalize=1
    size=3
    stride=1
    pad=1
    filters=1024
    activation=leaky
    
    [convolutional]
    batch_normalize=1
    filters=512
    size=1
    stride=1
    pad=1
    activation=leaky
    
    [convolutional]
    batch_normalize=1
    size=3
    stride=1
    pad=1
    filters=1024
    activation=leaky
    
    [convolutional]
    batch_normalize=1
    filters=512
    size=1
    stride=1
    pad=1
    activation=leaky
    
    [convolutional]
    batch_normalize=1
    size=3
    stride=1
    pad=1
    filters=1024
    activation=leaky
    
    [convolutional]
    size=1
    stride=1
    pad=1
    filters=33
    activation=linear
    
    [yolo]
    mask = 6,7,8
    anchors = 10,13,  16,30,  33,23,  30,61,  62,45,  59,119,  116,90,  156,198,  373,326
    classes=6
    num=9
    jitter=.3
    ignore_thresh = .5
    truth_thresh = 1
    random=1
    
    [route]
    layers = -4
    
    [convolutional]
    batch_normalize=1
    filters=256
    size=1
    stride=1
    pad=1
    activation=leaky
    
    [upsample]
    stride=2
    
    [route]
    layers = -1, 61
    
    
    
    [convolutional]
    batch_normalize=1
    filters=256
    size=1
    stride=1
    pad=1
    activation=leaky
    
    [convolutional]
    batch_normalize=1
    size=3
    stride=1
    pad=1
    filters=512
    activation=leaky
    
    [convolutional]
    batch_normalize=1
    filters=256
    size=1
    stride=1
    pad=1
    activation=leaky
    
    [convolutional]
    batch_normalize=1
    size=3
    stride=1
    pad=1
    filters=512
    activation=leaky
    
    [convolutional]
    batch_normalize=1
    filters=256
    size=1
    stride=1
    pad=1
    activation=leaky
    
    [convolutional]
    batch_normalize=1
    size=3
    stride=1
    pad=1
    filters=512
    activation=leaky
    
    [convolutional]
    size=1
    stride=1
    pad=1
    filters=33
    activation=linear
    
    [yolo]
    mask = 3,4,5
    anchors = 10,13,  16,30,  33,23,  30,61,  62,45,  59,119,  116,90,  156,198,  373,326
    classes=6
    num=9
    jitter=.3
    ignore_thresh = .5
    truth_thresh = 1
    random=1
    
    [route]
    layers = -4
    
    [convolutional]
    batch_normalize=1
    filters=128
    size=1
    stride=1
    pad=1
    activation=leaky
    
    [upsample]
    stride=2
    
    [route]
    layers = -1, 36
    
    
    
    [convolutional]
    batch_normalize=1
    filters=128
    size=1
    stride=1
    pad=1
    activation=leaky
    
    [convolutional]
    batch_normalize=1
    size=3
    stride=1
    pad=1
    filters=256
    activation=leaky
    
    [convolutional]
    batch_normalize=1
    filters=128
    size=1
    stride=1
    pad=1
    activation=leaky
    
    [convolutional]
    batch_normalize=1
    size=3
    stride=1
    pad=1
    filters=256
    activation=leaky
    
    [convolutional]
    batch_normalize=1
    filters=128
    size=1
    stride=1
    pad=1
    activation=leaky
    
    [convolutional]
    batch_normalize=1
    size=3
    stride=1
    pad=1
    filters=256
    activation=leaky
    
    [convolutional]
    size=1
    stride=1
    pad=1
    filters=33
    activation=linear
    
    [yolo]
    mask = 0,1,2
    anchors = 10,13,  16,30,  33,23,  30,61,  62,45,  59,119,  116,90,  156,198,  373,326
    classes=6
    num=9
    jitter=.3
    ignore_thresh = .5
    truth_thresh = 1
    random=1
    

      

    1、605行,filters=75  修改    3*(classes+5)
    2、689行   filters=75  修改    3*(classes+5)
    3、773行   filters=75  修改    3*(classes+5)
    
    4、3行  Testing   注释掉batch=1  subdivisions=1
    
    5、611行  classes=20   类别修改成自己的类别
    6、695行  classes=20  类别修改成自己的类别
    7、779行  classes=20  类别修改成自己的类别
    

      

    训练

    ./darknet detector train cfg/voc.data cfg/yolov3-voc.cfg scripts/darknet53.conv.74 -gpus 0,1
    

      

    测试

    backup文件夹下生成权重文件

    ./darknet detector test cfg/voc.data cfg/yolov3-voc.cfg backup/yolov3-voc_600.weights data/210.jpg
    

      

  • 相关阅读:
    [水煮 ASP.NET Web API2 方法论](3-6)万能路由
    [水煮 ASP.NET Web API2 方法论](3-5)路由约束
    [水煮 ASP.NET Web API2 方法论](3-4)设置路由可选项
    [水煮 ASP.NET Web API2 方法论](3-3)路由默认值
    [水煮 ASP.NET Web API2 方法论](3-2)直接式路由/属性路由
    [转载自简书] ASPNetCore上传大文件碰到的一些问题总结
    [转载] 关于web.config
    [转载] sessionState 4种mode模式
    [转载] web.config 配置详解
    文档注释标记
  • 原文地址:https://www.cnblogs.com/crazybird123/p/11359819.html
Copyright © 2011-2022 走看看