一.前言
1.实验内容
实验内容主要来源于论文:Byte-level malware classification based on markov images and deep learning,该论文通过恶意软件的字节序列将恶意软件可视化为马尔可夫图像作为输入数据,用来训练基于改良的VGG16模型构建的分类器。实验中所用的数据集依然是基于深度学习的恶意软件分类器(一)一文中介绍过的微软公开数据集。
2.实验环境
Python版本:3.6.13
Pytorch版本:1.8.1
CUDA版本:11.4
二.数据处理
马尔可夫图像是基于下图所示的字节传输矩阵生成的,矩阵中的每一个元素,即字节传输概率Pi,j都代表了一个像素点。
Pi,j的大小由如下的公式计算得到:
其中,f(m, n)的代表大小为m的字节后跟大小为n的字节的频率。由公式可知,每一个像素点P(n|m)的值即为字节大小为m后跟字节大小为n的数量与字节大小为m的数量做除法得到的值。这些值的范围在[0, 1]之间,为了生成的图像可以用来训练分类器,需要将所有的值归一化到[0, 255]的范围内。
假设现在有一恶意软件的字节序列为:0x00, 0x01, 0x00, 0x00, 0x02, 0x03, 0x00, 0x04, 0x00, 0x00, 0x05。此时,通过上述公式计算P(0|0)的时候,分子f(m, n)就等于2,因此字节0x00后跟0x00的数量为2,而分母在进行求和相加以后会等于字节0x00出现的数量,即为6。因此,该恶意软件可视化为马尔可夫图像的第一个像素点,即P(0|0)为3分之1。
根据上面的思路,不难写出以下针对微软数据集提供的恶意软件二进制序列可视化为马尔可夫图像的代码:
import numpy as np
# 将.bytes中的文本16进制读出来
def read_bytes(path_byte):
res_bytes = []
fp_byte = open(path_byte, mode='r')
byte_lines = fp_byte.readlines()
for byte_line in byte_lines:
str_bytes = byte_line.split(" ")
for str_byte in str_bytes:
if str_byte[0] == "?": # 舍弃不合法字符
continue
byte = int(str_byte, 16)
if byte <= 255:
res_bytes.append(byte)
fp_byte.close()
return res_bytes
# 将数组进行计算得出马尔可夫图像
def get_markov_image(array_byte):
byte_len = len(array_byte) - 1
byte_frequency_map = np.zeros([256, 256])
byte_frequency_sum = np.zeros(256)
for i in range(byte_len):
m = array_byte[i]
n = array_byte[i + 1]
byte_frequency_map[m][n] = byte_frequency_map[m][n] + 1
byte_frequency_sum[m] = byte_frequency_sum[m] + 1
for i in range(256):
byte_sum = byte_frequency_sum[i]
if byte_sum == 0:
continue
for j in range(256):
byte_frequency_map[i][j] = byte_frequency_map[i][j] / byte_sum
byte_max = np.max(byte_frequency_map)
for i in range(256):
for j in range(256):
p = ((byte_frequency_map[i][j] * 255) / byte_max) % 256
byte_frequency_map[i][j] = p
return byte_frequency_map
马尔可夫图像是基于字节传输概率生成的,字节大小范围为[0, 255]。因此,生成图像大小均为256,下图为将恶意软件可视化为马尔可夫图像的结果:
完成可视化工作之后,就可以将马尔可夫图像作为输入数据来训练分类器,以下是数据类的参考代码:
import torch
from torch.utils.data import Dataset
import glob
import os
import pandas as pd
from torchvision import transforms
import cv2
class MalwareDataset(Dataset):
def __init__(self, root_path, is_train):
self.is_train = is_train
self.file_path = glob.glob(os.path.join(root_path, "*.png"))
self.len = len(self.file_path)
self.transforms_data = transforms.Compose([transforms.ToTensor()])
if is_train:
train_label_path = os.path.join(root_path, "..", "trainLabels.csv")
df = pd.read_csv(train_label_path)
self.y_data = torch.Tensor(get_train_label(self.file_path, df))
def __getitem__(self, index):
image = cv2.imread(self.file_path[index])
image = cv2.resize(image, (256, 256))
image = self.transforms_data(image)
if self.is_train:
return image, self.y_data[index]
else:
file_name = get_file_name(self.file_path[index])
return image, file_name
def __len__(self):
return self.len
# 从trainLabels.csv中获得文件名对应的类别
def get_train_label(file_path, df):
train_label = []
for fp in file_path:
file_name = get_file_name(fp)
train_label.append(df[df["Id"] == file_name]["Class"].astype(int).values[0] - 1)
return train_label
# 根据文件路径得出不带后缀的文件名
def get_file_name(file_path):
file_name_begin = file_path.rfind("/") + 1
file_name_end = file_path.rfind(".")
return file_path[file_name_begin:file_name_end]
三.模型
下图为构建的分类器所选用的模型,该模型将VGG16的全连接层改成只有单层1024个神经元的神经网络与输出层的全连接层,减少了模型的参数,加快模型的训练同时降低过拟合对分类器泛化能力的影响。
以下是与该模型对应的代码:
import torch.nn as nn
from torch.hub import load_state_dict_from_url
model_urls = {
'vgg16': 'https://download.pytorch.org/models/vgg16-397923af.pth',
}
class VGG(nn.Module):
def __init__(self, features, num_classes=1000, init_weights=True):
super(VGG, self).__init__()
self.features = features
self.avgpool = nn.AdaptiveAvgPool2d((7, 7))
self.classifier1 = nn.Sequential(
nn.Linear(512 * 7 * 7, 1024),
nn.Linear(1024, num_classes),
)
if init_weights:
self._initialize_weights()
def forward(self, x):
x = self.features(x)
x = self.avgpool(x)
x = x.view(x.size(0), -1)
x = self.classifier1(x)
return x
def _initialize_weights(self):
for m in self.modules():
if isinstance(m, nn.Conv2d):
nn.init.kaiming_normal_(m.weight, mode='fan_out', nonlinearity='relu')
if m.bias is not None:
nn.init.constant_(m.bias, 0)
elif isinstance(m, nn.BatchNorm2d):
nn.init.constant_(m.weight, 1)
nn.init.constant_(m.bias, 0)
elif isinstance(m, nn.Linear):
nn.init.normal_(m.weight, 0, 0.01)
nn.init.constant_(m.bias, 0)
def make_layers(cfg, batch_norm=False):
layers = []
in_channels = 3
for v in cfg:
if v == 'M':
layers += [nn.MaxPool2d(kernel_size=2, stride=2)]
else:
conv2d = nn.Conv2d(in_channels, v, kernel_size=3, padding=1)
if batch_norm:
layers += [conv2d, nn.BatchNorm2d(v), nn.ReLU(inplace=True)]
else:
layers += [conv2d, nn.ReLU(inplace=True)]
in_channels = v
return nn.Sequential(*layers)
cfgs = {
'D': [64, 64, 'M', 128, 128, 'M', 256, 256, 256, 'M', 512, 512, 512, 'M', 512, 512, 512, 'M']
}
def vgg16(pretrained=False, progress=True, **kwargs):
return _vgg('vgg16', 'D', False, pretrained, progress, **kwargs)
def _vgg(arch, cfg, batch_norm, pretrained, progress, **kwargs):
if pretrained:
kwargs['init_weights'] = False
model = VGG(make_layers(cfgs[cfg], batch_norm=batch_norm), **kwargs)
if pretrained:
state_dict = load_state_dict_from_url(model_urls[arch], progress=progress)
model.load_state_dict(state_dict, strict=False)
return model
四.参数
本文选用cross_entropy作为损失函数,选用Adam优化器来更新模型的梯度,相关参数设置如下:
learning Rate: 1e-3
weight_decay: 1e-6
batch_size: 32
epoch: 20
因此,Configure中保存的信息如下:
import os
class Configure:
base_dir = ""
train_dir = os.path.join(base_dir, "train_markov_images")
test_dir = os.path.join(base_dir, "test_markov_images")
result_dir = os.path.join(base_dir, "submit.csv")
lr = 1e-3
batch_size = 32
decay = 1e-6
epochs = 20
model_path = "markov.pth"
num_classes = 9
is_train = True
五.分类结果
对分类器进行训练和测试的代码如下:
import os
import sys
from Configure import Configure
import torch
import torch.nn.functional as F
from torch.utils.data import DataLoader
from MalwareDataset import MalwareDataset
import pandas as pd
from VGG import vgg16
def load_model(model_path):
if not os.path.exists(model_path):
print("模型路径错误,模型加载失败")
sys.exit(0)
else:
return torch.load(model_path)
def save_model(target_model, model_path):
if os.path.exists(model_path):
os.remove(model_path)
torch.save(target_model, model_path)
def train(epoch):
for batch_idx, data in enumerate(train_loader, 0):
optimizer.zero_grad()
inputs, labels = data
inputs, labels = inputs.to(device), labels.to(device)
y_pred = modeler(inputs)
loss = F.cross_entropy(y_pred, labels.long())
if batch_idx % 100 == 96:
print(epoch, loss.item())
loss.backward()
optimizer.step()
def test():
df = pd.read_csv(conf.result_dir)
with torch.no_grad():
for inputs, file_name in test_loader:
inputs = inputs.to(device)
outputs = modeler(inputs)
predicted = F.softmax(outputs.data, dim=1)
data_len = len(inputs)
for i in range(data_len):
dict_res = {"Id": file_name[i], "Prediction1": 0, "Prediction2": 0,
"Prediction3": 0, "Prediction4": 0, "Prediction5": 0, "Prediction6": 0,
"Prediction7": 0, "Prediction8": 0, "Prediction9": 0}
for j in range(9):
dict_res["Prediction" + str(j + 1)] = predicted[i][j].item()
df = df.append(dict_res, ignore_index=True)
df.to_csv(conf.result_dir, index=0)
if __name__ == '__main__':
os.environ["CUDA_DEVICE_ORDER"] = "PCI_BUS_ID"
os.environ["CUDA_VISIBLE_DEVICES"] = "5"
device = torch.device("cuda:0" if torch.cuda.is_available() else "cpu")
conf = Configure()
test_dataset = MalwareDataset(conf.test_dir, False)
test_loader = DataLoader(test_dataset, batch_size=conf.batch_size, shuffle=False, num_workers=2)
if conf.is_train:
modeler = vgg16(pretrained=True, num_classes=conf.num_classes)
train_dataset = MalwareDataset(conf.train_dir, True)
train_loader = DataLoader(train_dataset, batch_size=conf.batch_size, shuffle=True, num_workers=2)
else:
print("===============开始加载模型===============")
modeler = load_model(conf.model_path)
print("===============模型加载完成===============")
modeler.to(device)
if conf.is_train:
optimizer = torch.optim.Adam(modeler.parameters(), lr=conf.lr, weight_decay=conf.decay)
print("===============开始训练模型===============")
for i in range(conf.epochs):
train(i)
print("===============模型训练完成===============")
save_model(modeler, conf.model_path)
print("===============开始测试模型===============")
test()
print("===============模型测试完成===============")
最终分类结果如下,其中Private Score为0.06680,Public Score为0.05867。
[培训]二进制漏洞攻防(第3期);满10人开班;模糊测试与工具使用二次开发;网络协议漏洞挖掘;Linux内核漏洞挖掘与利用;AOSP漏洞挖掘与利用;代码审计。
最后于 2022-5-5 15:43
被1900编辑
,原因: