
# 安装依赖
$ sudo apt-get update
$ sudo apt-get upgrade
$ sudo apt-get install unzip git tree
$ sudo apt install python3                        # Python3
$ sudo apt install python3-pip                    # Pip3
$ pip3 install simplejson requests fake_useragentcrawlergo_x_XRAY$ git clone https://github.com/timwhitez/crawlergo_x_XRAY.git
$ cd crawlergo_x_XRAY/
$ vim launcher.pylauncher.py[-] cmd = ["./crawlergo", "-c", "C:\Program Files (x86)\Google\Chrome\Application\chrome.exe","-t", "20","-f","smart","--fuzz-path", "--output-mode", "json", target]
[+] cmd = ["./crawlergo", "-c", "/snap/bin/chromium","-t", "20","-f","smart","--fuzz-path", "--output-mode", "json", target]# 删除多余文件
$ rm launcher_new.py README.md
$ rm -rf img/
# 将2个文件移入crawlergo目录
$ mv launcher.py crawlergo/
$ mv targets.txt crawlergo/# 下载解压crawlergo_linux_amd64
$ cd crawlergo/
$ wget https://github.com/0Kee-Team/crawlergo/releases/download/v0.4.0/crawlergo_linux_amd64.zip
$ unzip crawlergo_linux_amd64.zip && rm crawlergo_linux_amd64.zip
# 下载解压_linux_amd64
$ cd ../xray/
$ wget https://github.com/chaitin/xray/releases/download/1.5.0/xray_linux_amd64.zip 
$ unzip xray_linux_amd64.zip && rm xray_linux_amd64.zip
# 生成证书
$ ./xray_linux_amd64 genca# 安装Chromium
$ sudo apt install chromium-browser
# 安装证书
$ sudo cp ca.crt /usr/local/share/ca-certificates/xray.crt
$ sudo update-ca-certificateshttprobe$ cd ../ 
# 下载并解压httprobe
$ wget https://github.com/tomnomnom/httprobe/releases/download/v0.1.2/httprobe-linux-amd64-0.1.2.tgz
$ tar zxvf httprobe-linux-amd64-0.1.2.tgz
$ rm httprobe-linux-amd64-0.1.2.tgz
# 新建扫描文件
$ vim check.pycheck.py,运行前需要先填写chromium和httprobe的路径
python3 check.py -f domains.txtdomains.txt中域名的不能带协议http(s)://,否则不能正确探测存活# coding: utf-8
import os
import re
import time
import argparse
import subprocess
# Path
chrome_path = r'/snap/bin/chromium'
httprobe_path = r'/root/crawlergo_x_XRAY/httprobe'
save_dir_name = './'
def parse_args():
    usage = "python3 check.py -f domains.txt"
    parser = argparse.ArgumentParser(usage=usage)
    parser.add_argument('-f', '--file', help='Input Domains File', type=str)
    return parser.parse_args()
def do_httprobe():
    path = args.file
    if os.name == 'nt':
        httprobe_result = os.popen(f'type {path} | {httprobe_path}').read()
    elif os.name == 'posix':
        httprobe_result = os.popen(f'cat {path} | {httprobe_path}').read()
    else:
        print('[-] Unable to identify operating system')
    save_path = os.path.join(save_dir_name, 'targets.txt')
    with open(save_path, 'w+', encoding='utf-8') as file_obj:
        file_obj.write(httprobe_result)
        file_name = file_obj.name
    print('[+] Alive  subdomain  is  saved  in  %s' % file_name)
def main():
    if not os.path.exists(args.file):
        print(f'[*] {args.file} have  error,  Please Check.')
    else:
        do_httprobe()
if __name__ == '__main__':
    args = parse_args()
    main()crawlergo_x_XRAY/目录,创建run.sh脚本# 存活探测
if [[ $1 == "check" ]]
then
        if [ $2 ]
        then
                python3 check.py -f $2
        else
                echo "[-] No Domain File"
                echo "Example: bash run.sh check domain.txt"
        fi
# 开启HTTP
elif [[ $1 == "http" ]]
then
        python3 -m http.server 80
# 漏洞挖掘
elif [[ $1 == "start" ]]
then
        today=`date +%Y%m%d-%H%M%S`
        echo "[+] Start at " $today
        # 进入xray目录,后台运行xray,将扫描结果输出到html,运行日志输出到logs
        cd xray/
        nohup ./xray_linux_amd64 webscan --listen 127.0.0.1:7777 --html-output $today.html >../logs.xray 2>&1 &
        echo "[+] Xray Run Success..."
        sleep 3
        # 进入crawlergo目录,运行launcher.py
        cd ../crawlergo/
        nohup python3 launcher.py >../logs.crawlergo 2>&1 &
        echo "[+] Crawler_X_Xray Run Success..."
# 使用方法
else
        echo """Usage:
        存活探测: bash run.sh check <Domain_File>
        开启HTTP: bash run.sh http
        漏洞挖掘: bash run.sh start
        """
fi
├── check.py
├── crawlergo
│   ├── crawlergo
│   └── launcher.py
├── httprobe
├── run.sh
└── xray
    ├── ca.crt
    ├── ca.key
    ├── config.yaml
    └── xray_linux_amd64$ cd ../
$ which chromium
$ ./xray/xray_linux_amd64 version
$ ./crawlergo/crawlergo -c /snap/bin/chromium -t 5 http://www.baidu.comdomains.txt,进行存活探测。域名不用带http(s)://协议$ bash run.sh check domains.txt
# 或者: python3 check.py -f domains.txttargets.txt移动到crawlergo目录,然后运行$ mv targets.txt crawlergo/
$ bash run.sh start$ ps -ef         # 查看进程
$ kill -9 <pid>  # 终止进程
$ bash run.sh http
# 或者: python3 -m http.server 8080http(s)协议http(s)协议# 反连平台配置,更多解释见 https://docs.xray.cool/#/configration/reverse
# 注意: 默认配置为禁用反连平台,这是无法扫描出依赖反连平台的漏洞,这些漏洞包括 fastjson,ssrf 以及 poc 中依赖反连的情况等
reverse:
  http:
    enabled: true
    listen_ip: <IP>
    listen_port: <PORT>
  client:
    http_base_url: "http://<IP>:<PORT>"  # 默认将根据 ListenIP 和 ListenPort 生成,该地址是存在漏洞的目标反连回来的地址, 当反连平台前面有反代、绑定域名、端口映射时需要自行配置