首页
社区
课程
招聘
[分享]使用神经网络拟合数学函数v2
发表于: 2025-2-5 18:35 2279

[分享]使用神经网络拟合数学函数v2

2025-2-5 18:35
2279

抱着学习的目的,让deepseekR1使用pytorch重写了以前文章《使用神经网络拟合数学函数》里的代码。真的不用自己写一行代码,就完美运行了,现在的AI太厉害了!

环境说明

图片描述

torch_nn_fx.py

1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
import torch
import torch.nn as nn
import torch.optim as optim
 
class RegressionNet(nn.Module):
    def __init__(self, input_dim, output_dim):
        super(RegressionNet, self).__init__()
        self.fc1 = nn.Linear(input_dim, 10)
        self.sigmoid1 = nn.Sigmoid()
        self.fc2 = nn.Linear(10, 10)
        self.sigmoid2 = nn.Sigmoid()
        self.fc3 = nn.Linear(10, output_dim)
         
        # 参数初始化
        nn.init.normal_(self.fc1.weight)
        nn.init.normal_(self.fc1.bias)
        nn.init.normal_(self.fc2.weight)
        nn.init.normal_(self.fc2.bias)
        nn.init.normal_(self.fc3.weight)
        nn.init.normal_(self.fc3.bias)
 
    def forward(self, x):
        x = self.fc1(x)
        x = self.sigmoid1(x)
        x = self.fc2(x)
        x = self.sigmoid2(x)
        x = self.fc3(x)
        return x
 
class TorchFitFx:
    def __init__(self):
        self.model = None
        self.max_x = 1.0
        self.max_y = 1.0
 
    def _normalize(self, data, max_val):
        return torch.FloatTensor(data) / max_val
 
    def _denormalize(self, data, max_val):
        return data * max_val
 
    def _get_max_abs(self, tensor):
        return max(tensor.abs().max().item(), 1e-7)
 
    def train(self, X, Y, epochs=100000, lr=0.01):
        # 数据预处理
        X_tensor = torch.FloatTensor(X)
        Y_tensor = torch.FloatTensor(Y)
         
        self.max_x = self._get_max_abs(X_tensor)
        self.max_y = self._get_max_abs(Y_tensor)
         
        X_normalized = X_tensor / self.max_x
        Y_normalized = Y_tensor / self.max_y
 
        # 初始化模型
        input_dim = len(X[0])  # 修改这里
        output_dim = len(Y[0]) # 修改这里
        self.model = RegressionNet(input_dim, output_dim)
         
        criterion = nn.MSELoss()
        optimizer = optim.SGD(self.model.parameters(), lr=lr)
 
        # 训练循环
        self.model.train()
        for epoch in range(epochs):
            optimizer.zero_grad()
             
            outputs = self.model(X_normalized)
            loss = criterion(outputs, Y_normalized)
             
            loss.backward()
            optimizer.step()
 
            if epoch % 10000 == 0:
                print(f"Epoch {epoch}, Loss: {loss.item():.6f}")
                if loss.item() < 0.0001:
                    break
 
    def predict(self, X):
        if self.model is None:
            raise RuntimeError("Model not trained yet!")
             
        self.model.eval()
        with torch.no_grad():
            X_tensor = torch.FloatTensor(X)
            X_normalized = X_tensor / self.max_x
            preds = self.model(X_normalized)
            return self._denormalize(preds, self.max_y).numpy()
 
# 示例使用
if __name__ == "__main__":
    def fx2(x):
        return (x - 5)**2
 
    # 生成训练数据(确保X和Y都是二维列表)
    X = [[i] for i in range(0, 11)]  # 二维列表
    Y = [[fx2(x[0])] for x in X]     # 二维列表
 
    # 训练模型
    fit_fx = TorchFitFx()
    fit_fx.train(X, Y, epochs=100000, lr=0.01)
 
    # 预测结果
    test_points = [[i/2] for i in range(0, 21)]  # 保持二维结构
    predictions = fit_fx.predict(test_points)
 
    # 打印结果对比
    print("\nPredictions vs Actual:")
    for x, pred in zip(test_points, predictions):
        actual = fx2(x[0])
        print(f"x={x[0]:4.1f}, Predicted: {pred[0]:6.3f}, Actual: {actual:6.1f}")

效果如下
图片描述

torch_nn_fx_Sequential.py

让优化一下也能轻松搞定。
图片描述

1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
import torch
import torch.nn as nn
import torch.optim as optim
 
class RegressionNet(nn.Module):
    def __init__(self, input_dim, output_dim):
        super().__init__()
        self.net = nn.Sequential(
            nn.Linear(input_dim, 10),
            nn.Sigmoid(),
            nn.Linear(10, 10),
            nn.Sigmoid(),
            nn.Linear(10, output_dim)
        )
        self._init_weights()
 
    def _init_weights(self):
        for layer in self.net:
            if isinstance(layer, nn.Linear):
                nn.init.normal_(layer.weight)
                nn.init.normal_(layer.bias)
 
    def forward(self, x):
        return self.net(x)
 
class TorchFitFx:
    def __init__(self):
        self.model = None
        self.max_x = 1.0
        self.max_y = 1.0
 
    def train(self, X, Y, epochs=100000, lr=0.01):
        X_tensor = torch.as_tensor(X, dtype=torch.float32)
        Y_tensor = torch.as_tensor(Y, dtype=torch.float32)
         
        self.max_x = X_tensor.abs().max().clamp_min(1e-7)
        self.max_y = Y_tensor.abs().max().clamp_min(1e-7)
         
        X_normalized = X_tensor / self.max_x
        Y_normalized = Y_tensor / self.max_y
 
        self.model = RegressionNet(X_tensor.shape[1], Y_tensor.shape[1])
        optimizer = optim.SGD(self.model.parameters(), lr=lr)
        criterion = nn.MSELoss()
 
        self.model.train()
        for epoch in range(epochs):
            optimizer.zero_grad()
            outputs = self.model(X_normalized)
            loss = criterion(outputs, Y_normalized)
            loss.backward()
            optimizer.step()
 
            if epoch % 10000 == 0 or loss < 0.0001:
                print(f"Epoch {epoch:5d}, Loss: {loss.item():.6f}")
                if loss < 0.0001:
                    break
 
    def predict(self, X):
        if self.model is None:
            raise RuntimeError("Model not trained!")
         
        self.model.eval()
        with torch.no_grad():
            X_tensor = torch.as_tensor(X, dtype=torch.float32)
            return self.model(X_tensor / self.max_x) * self.max_y
 
# 示例使用
if __name__ == "__main__":
    def fx2(x): return (x - 5)**2
 
    # 生成训练数据(二维结构)
    X = [[i] for i in range(11)]
    Y = [[fx2(x[0])] for x in X]
 
    # 训练预测
    fit_fx = TorchFitFx()
    fit_fx.train(X, Y, epochs=100000, lr=0.01)
 
    # 测试预测
    test_points = [[i/2] for i in range(21)]
    predictions = fit_fx.predict(test_points)
 
    # 结果对比
    print("\nPredictions vs Actual:")
    for x, pred in zip(test_points, predictions):
        print(f"x={x[0]:4.1f}, Predicted: {pred.item():6.3f}, Actual: {fx2(x[0]):6.1f}")

效果如下
图片描述


[培训]内核驱动高级班,冲击BAT一流互联网大厂工作,每周日13:00-18:00直播授课

最后于 2025-2-5 18:38 被Jtian编辑 ,原因:
收藏
免费
支持
分享
最新回复 (0)
游客
登录 | 注册 方可回帖
返回

账号登录
验证码登录

忘记密码?
没有账号?立即免费注册