zoukankan      html  css  js  c++  java
  • python写exploit采集器

    前言:

    根据天文地理与风水学,我掐指一算的看到了一篇不错的文章,文章里面写到整理exploit

    我顿时心理想写一个exploit采集器,那么说时迟那时快。就开始写了

    代码:

     思路:http://expku.com,观察URL的变化

    import requests
    from bs4 import *
    import threading
    import re
    import optparse
    
    urlsd=[]
    #neirons=[]
    
    def main():
        parser=optparse.OptionParser()
        parser.add_option('-m',action='store_true',dest='home',help='Save the home directory in the local area')
        parser.add_option('-w',action='store_true',dest='web',help='Save all the attack loads of Web')
        parser.add_option('-s',dest='search',help='search exploit')
        parser.add_option('-y',action='store_true',dest='long',help='Save the Long-range all exploit')
        parser.add_option('-b',action='store_true',dest='local',help='Save the local all exploit')
        (options,args)=parser.parse_args()
        if options.home:
            poc()
        elif options.web:
            web()
        elif options.search:
            searchs=options.search
            searchexploit(searchs)
        elif options.long:
            logins()
        elif options.local:
            local()
        else:
            parser.print_help()
            exit()
    def poc():
        global headers
        print('[+]Emptying exploit1.txt')
        kw=open('exploitcs1.txt','w')
        kw.close()
        print('[+]Complete the emptying')
        print('[+] Generating a new list of exploit')
        url='http://expku.com/'
        headers={'user-agetn':'Mozilla/5.0 (Windows NT 6.1; WOW64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/39.0.2171.71 Safari/537.36'}
        rest=requests.get(url=url,headers=headers)
        qinx=rest.content.decode('gbk')
        kop=BeautifulSoup(qinx,'html.parser')
        for x in kop.find_all('a'):
            a=re.findall('<a href=".*?" target="_blank">.*?</a>',str(x))
            neiron="".join(a)
            nei=BeautifulSoup(neiron,'html.parser')
    
            uw=nei.find_all('a')
            for u in uw:
                u1=u.get('href')
                urlsd.append('http://expku.com/'.strip()+u1)
    
        urlsd.remove(urlsd[0])
        lk=list(set(urlsd))
        for m in lk:
            rest2=requests.get(url=m,headers=headers)
            pl=BeautifulSoup(rest2.content.decode('gbk'),'html.parser')
            for l in pl.find_all('h1'):
                ks='title:',l.get_text(),'','url:',rest2.url
                print(ks)
                li='{}'.format(ks)
                xr=li.replace('(','').replace(')','').replace(',','').replace("''",'')
                pw=open('exploitcs1.txt','a')
                pw.write(xr)
                pw.write('
    ')
                pw.close()
    def web():
        print('[+]empty exploit web.txt')
        odw=open('exploitweb.txt','w')
        odw.close()
        print('[+]empty complete')
        print('[+]Start writing to the collected web exploit')
        urlsd=[]
        headers = {
            'user-agetn': 'Mozilla/5.0 (Windows NT 6.1; WOW64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/39.0.2171.71 Safari/537.36'}
        for h in range(88):
            url='http://expku.com/web/list_6_{}.html'.format(h)
            reques=requests.get(url=url,headers=headers)
            kw=BeautifulSoup(reques.content.decode('gbk'),'html.parser')
            vb=kw.find_all('a')
            for q in vb:
                pq=q.get('href')
                urls='http://expku.com'.strip()+pq
                kq=re.findall('http://expku.com/web/.*.html',urls)
                for k in kq:
                    urlsd.append(k)
            kc=list(set(urlsd))
            for b in kc:
                tfs=requests.get(url=b,headers=headers)
                bds=BeautifulSoup(tfs.content.decode('gbk'),'html.parser')
                for t in bds.find_all('h1'):
                    print(t.get_text(), '', tfs.url)
                    print(t.get_text(),'',tfs.url,file=open('exploitweb.txt','a'))
    
    
    def searchexploit(searchs):
        print('[+]search give the result as follows:')
        jg=[]
        rs=[]
        urlsh=[]
        headers = {'user-agetn': 'Mozilla/5.0 (Windows NT 6.1; WOW64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/39.0.2171.71 Safari/537.36'}
        urls='http://expku.com/search.php?keyword={}'.format(searchs)
        resq=requests.get(url=urls,headers=headers)
        weq=BeautifulSoup(resq.content.decode('gbk'),'html.parser')
        oeq=weq.find_all('a')
        for r in oeq:
            ds=r.get('title')
            durl=r.get('href')
            burw=re.findall('/.*/.*.html',durl)
            op="".join(burw)
            rs.append(op)
            kdw = '{}'.format(ds)
            jg.append(kdw.replace('None', ''))
        while '' in rs:
            rs.remove('')
        for g in rs:
            uw='http://expku.com'.strip()+g
            urlsh.append(uw)
            urlsqf='http://expku.com'.strip()+durl
    
        while '' in jg:
            jg.remove('')
    
        for g in range(0,len(urlsh)):
            print(jg[g],urlsh[g])
    
    def logins():
        print('[+]empty exploitlong.txt')
        lwe=open('exploitlong.txt','w')
        lwe.close()
        print('[+]Get all remote exploit')
        urls=[]
        zj=[]
        headers = {
            'user-agetn': 'Mozilla/5.0 (Windows NT 6.1; WOW64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/39.0.2171.71 Safari/537.36'}
        for i in range(75):
            url='http://expku.com/remote/list_4_{}.html'.format(i)
            regw=requests.get(url=url,headers=headers)
            lvq=BeautifulSoup(regw.content.decode('gbk'),'html.parser')
            fwq=lvq.find_all('a')
            for d in fwq:
                eq=d.get('href')
                oeq=re.findall('/remote/.*.html',eq)
                for b in oeq:
                    ks='http://expku.com'.strip()+b
                    urls.append(ks)
            qwe=list(set(urls))
            for asd in lvq.find_all('a'):
                kl=re.findall('<a href=".*" target="_blank">.*</a>',str(asd))
                for n in kl:
                    vk=''.strip()+n
                    peq=BeautifulSoup(vk,'html.parser')
                    for t in qwe:
                        zj.append(peq.get_text()+' '+t)
            jb=list(set(zj))
            for j in jb:
                print(j)
                print(j,file=open('exploitlong.txt','a'))
    
    
    def local():
        print('[+]empty exploitlocal.txt')
        wd=open('exploitlocal.txt','w')
        wd.close()
        print('[+]get local exploit')
        for j in range(56):
            uk=[]
            url='http://expku.com/local/list_5_{}.html'.format(j)
            headers = {
                'user-agetn': 'Mozilla/5.0 (Windows NT 6.1; WOW64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/39.0.2171.71 Safari/537.36'}
            rwqe=requests.get(url=url,headers=headers)
            vdq=BeautifulSoup(rwqe.content.decode('gbk'),'html.parser')
            hk=vdq.find_all('a')
            for f in hk:
                ddo=f.get('href')
                kio=re.findall('/local/.*.html',str(ddo))
                for g in kio:
                    url='http://expku.com'.strip()+g
                    uk.append(url)
            yf=list(set(uk))
            for c in yf:
                rtq=requests.get(url=c,headers=headers)
                vdq=BeautifulSoup(rtq.content.decode('gbk'),'html.parser')
                for a in vdq.find_all('h1'):
                    print(a.get_text(),'',rtq.url)
                    print(a.get_text(), '', rtq.url,file=open('exploitlocal.txt','a'))
    
    #while '' in neirons:
     #   neirons.remove('')
    
    #while ' ' in neirons:
     #   neirons.remove(' ')
    
    
    #urlsd.remove(urlsd[0])
    #rd=list(set(urlsd))
    
    #for q in range(0,len(rd)):
     #   print(neirons[q],rd[q])
    
    if __name__ == '__main__':
        main()

    效果:

    爬下来的exploit保存:

    用于搜索:

    Github下载地址:https://github.com/422926799/python/blob/master/exploitsearch.py

  • 相关阅读:
    解决上传文件大小限制 PHP+Apache
    PHP中ftp的连接与操作
    python 批量修改文件名
    pytorch 断点续训练
    图片的读取
    tensor 类型转换
    卷积网络提取图像特征
    U盘拷贝数据到服务器
    1*1卷积核的作用
    skimage
  • 原文地址:https://www.cnblogs.com/haq5201314/p/9249881.html
Copyright © 2011-2022 走看看