下面完整代码在github仓库:传送门


一、查看形状类型

import numpy as np
import torch

# 形状类型查看
a = np.array([[1, 2], [3, 4]])
print(a.shape, np.shape(a), a.dtype)

a = a.astype(np.float32)
print(a.dtype)

b = torch.tensor([[1, 2], [3, 4]])
print(b.shape, b.size(), b.dtype)
print(b.dtype)

二、轴交换

import numpy as np
import torch

# 轴交换
c = np.arange(12).reshape([2, 2, 3])
# print(c)
c1 = torch.arange(12).reshape([2, 2, 3])
print(c1)

d1 = torch.transpose(c1, 2, 0)
d2 = torch.transpose(d1, 1, 2)
print(d2)
d3 = c1.permute([2, 0, 1])
print(d3)

三、维度变换

import torch

a = torch.randn([2, 3, 2])
print(a)

b = a.reshape(3, 4)
print(b)

c = a.reshape(1, 12)
print(c)

d = a.reshape(12, 1)
print(d)

e = a.reshape(12)
print(e)
# e = a.unsqueeze(0)
t = torch.unsqueeze(e, 1)
print(t)

g = torch.squeeze(t, 1)
print(g)

四、行列式

import numpy as np
import torch

# 求行列式
a = np.array([[1, 4, 7], [2, 5, 8], [3, 6, 9]])  # 奇异矩阵
print(np.linalg.det(a))

b = torch.tensor([[1, 5, 7], [2, 5, 8], [3, 6, 9]], dtype=torch.float32)
print(torch.det(b))

五、转置

import numpy as np
import torch

a = np.arange(1, 12, 2).reshape(2, 3)
# print(a)

b = a.T
# print(b)

c = np.arange(12).reshape(2, 2, 3)
# print(c)

a1 = torch.arange(12).reshape(3, 4)
print(a1)
# q = a1.T
q = a1.t()
print(q)


d = np.transpose(c, [2, 0, 1])  # 打乱内部顺序
# print(d.shape)
print(d)

e = np.reshape(c, [3, 2, 2])
print(e)

六、画激活函数

import numpy as np
import matplotlib.pyplot as plt
from matplotlib import pyplot

x = np.arange(-10, 10, 0.1)
# print(x)

# 1/(1+e^-x)
y1 = 1/(1+np.exp(-x))

# (e^x-e^-x) / (e^x+e^-x)
y2 = (np.exp(x)-np.exp(-x))/(np.exp(x)+np.exp(-x))

plt.plot(x, y1)
plt.plot(x, y2)
plt.show()

七、利用随机数表示BP算法

import random
import math


# 定义网络类
class Neural:

    # 从正太分布采样,获得w
    def __init__(self):
        self.w1 = random.normalvariate(0, 0.1)
        self.w2 = random.normalvariate(0, 0.1)
        self.b = 0

    # 前向输入计算
    def forward(self, x1, x2):
        nety = x1 * self.w1 + x2 * self.w2 + self.b
        return nety, self.outy(nety)  # 返回网络净输出和激活后输出

    # 后向输出计算
    def backward(self, x1, x2, outy, target):
        # 定义误差函数
        Etotal = (1 / 2) * (target - outy) ** 2
        print(Etotal)

        # 更新下一个W值
        self.w1 = self.w1 - 0.1 * self._outy(outy) * (target - outy) * x1  # 0.1步长,更新后的W1=当前W1减去误差对当前W1的求导结果
        self.w2 = self.w2 - 0.1 * self._outy(outy) * (target - outy) * x2  # 0.1步长,更新后的W2=当前W1减去误差对当前W2的求导结果
        self.b = self.b - 0.1 * self._outy(outy) * (target - outy)  # 0.1步长,更新后的b=当前W1减去误差对当前b的求导结果

    # 训练样本
    def train(self):
        # 实例化样本类
        sample = Sample()

        # 训练网络10000次
        for i in range(100000):
            x1, x2, target = sample.getSingle()  # 从随机取样里面获得输入和目标输出
            nety, outy = self.forward(x1, x2)  # 调用前向计算获得nety和outy
            self.backward(x1, x2, target, outy)  # 调用后向计算,算出损失(误差),更新W

    # 测试集网络
    def verify(self):
        print("-------------------------------------------------")
        # 给出测试集的输入样本,调用前向计算测试网络的outy和nety
        print(self.forward(0, 0))
        print(self.forward(1, 0))
        print(self.forward(0, 1))
        print(self.forward(1, 1))
        print(self.forward(1, 2))
        print(self.forward(2, 2))

    # 定义nety的sigmoid函数——outy,压缩数据在0和1之间
    def outy(self, nety):
        return 1 / (1 + math.exp(-nety))

    # 对outy函数求导,用以反向传播误差,更新W 的权重
    def _outy(self, nety):
        return self.outy(nety) * (1 - self.outy(nety))


# 定义样本类
class Sample:
    # 给出训练集的样本
    def __init__(self):
        # 解决或问题的输入和输出样本.
        self.s = [[0., 0., 0.], [1., 0., 0.], [0., 1., 0.], [1., 1., 1.]]

    # 对训练集样本随机取样
    def getSingle(self):
        return self.s[random.randint(0, 3)]


# 调用函数方法
if __name__ == '__main__':  # 主函数
    neural = Neural()  # 实例化对象
    neural.train()  # 调用训练集
    neural.verify()  # 调用测试集

八、创建线性分类器

import numpy as np
import matplotlib.pyplot as plt
import random

'''创建线性分类器'''

x = np.array([0, 20]).reshape(2, 1)
y = np.array([20, 0]).reshape(2, 1)


class Net:
    def __init__(self, a, b, c, d):
        self.w1 = np.random.normal(0, 0.01, (a, b)) # [2, 10]
        self.b1 = np.zeros(b)  # [10]
        self.w2 = np.random.normal(0, 0.01, (b, c))  # [10, 100]
        self.b2 = np.zeros(c)  # [100]
        self.w3 = np.random.normal(0, 0.01, (c, d))  # [100, 1]
        self.b3 = np.zeros(d)  # [1]

    def __call__(self, x):
        return self.forward(x)

    def forward(self, x):  # [2, 1]
        # 创建网络前向
        # [2,1]@[1,10]+[10]=[2,10]
        self.y1 = np.dot(x, self.w1) + self.b1  # [2, 10]
        # [2,10]@[10,100]+[100] = [2,100]
        self.y2 = np.dot(self.y1, self.w2) + self.b2 # [2, 100]
        # [2, 100]@[100,1]+[1] = [2,1]
        self.y3 = np.dot(self.y2, self.w3) + self.b3  # [2, 1]

        return self.y3

    def optimizer(self, output, label, wlr, blr):
        # 计算梯度
        # output = f(x) = ((w1x+b1)*w2+b2)w3+b3
        # y1 = w1x+b1, y2 = y1*w2+b2, y3 = y2*w3+b3
        # y3 = output
        # loss = 1/2(y3-y)**2
        # dloss/dw3 = (dloss/dy3)*(dy3/dw3)
        # dloss/db3 = (dloss/dy3)*(dy3/db3)
        dw3 = np.mean(-(output - label) * self.y2)
        db3 = np.mean(-(output - label) * 1)

        # dloss/dw2 = (dloss/dy3)*(dy3/dy2)*(dy2/dw2)
        # dloss/dw2 = (dloss/dy3)*(dy3/dy2)*(dy2/db2)
        dw2 = np.mean(-(output - label) * np.mean(self.w3) * self.y1)
        db2 = np.mean(-(output - label) * 1)

        # dloss/dw1 = (dloss/dy3)*(dy3/dy2)*(dy2/dy1)*(dy1/w1)
        # dloss/dw1 = (dloss/dy3)*(dy3/dy2)*(dy2/dy1)*(dy1/b1)
        dw1 = np.mean(-(output - label) * np.mean(self.w3) * np.mean(self.w2) * x)
        db1 = np.mean(-(output - label) * 1)

        # 反向更新参数
        self.w3 = self.w3 + wlr * dw3
        self.b3 = self.b3 + blr * db3
        self.w2 = self.w2 + wlr * dw2
        self.b2 = self.b2 + blr * db2
        self.w1 = self.w1 + wlr * dw1
        self.b1 = self.b1 + blr * db1


class MSE:
    def lossfunction(self, output, label):
        return np.mean(1 / 2 * (output - label) ** 2)

    def __call__(self, output, label):
        return self.lossfunction(output, label)


if __name__ == '__main__':
    net = Net(1, 10, 100, 1)
    loss_func = MSE()

    plt.ion()  # 动态画图
    for i in range(10000):
        output = net(x)
        loss = loss_func(output, y)
        net.optimizer(output, y, 0.01, 0.01)

        if i % 5 == 0:
            plt.clf()
            print(loss)
            a = np.random.uniform(0, 10, 100)
            b = np.random.uniform(0, 10, 100)
            c = np.random.uniform(10, 20, 100)
            d = np.random.uniform(10, 20, 100)

            plt.scatter(a, b)
            plt.scatter(c, d)
            plt.plot(x, y)
            plt.plot(x, output)
            plt.text(0, 0, "loss:{:.3f}".format(loss), fontdict={"size":20, "color":"red"})

            plt.pause(0.01)

    plt.ioff()
    plt.show()

九、线性回归

import random
import matplotlib.pyplot as plt

# 线性回归
_x = [i/100 for i in range(100)]
_y = [3*j+4+random.random() for j in _x]

w = random.random()
b = random.random()
plt.ion()

for i in range(100):
    for x, y in zip(_x, _y):
        z = w*x + b
        o = z - y
        loss = 1/2*o**2
        dw = -o*x
        db = -o*1
        w = w + 0.1*dw
        b = b + 0.1*db
        print(w, b, loss)

        plt.clf()
        # plt.plot(_x, _y)
        plt.scatter(_x, _y)

        v = [w*e+b for e in _x]
        # plt.plot(_x, v)
        plt.scatter(_x, v)

        plt.pause(0.01)

plt.ioff()
plt.show()

十、ndarray转tensor

import torch
import numpy as np


a = torch.tensor([[1, 2], [3, 4]], dtype=torch.float32)
print(a, type(a), a.dtype)

b = np.array([[1, 2], [3, 4]])
print(b, type(b), b.dtype)

c = a.numpy()
print(c)

d = torch.from_numpy(b)
print(d, type(d), d.dtype)

十一、Pytorch实现线性分类器

import numpy as np
import matplotlib.pyplot as plt
import torch
from torch import nn

# 利用pytorch实现线性分类器

x = np.array([0, 20]).reshape(2, 1)
y = np.array([20, 0]).reshape(2, 1)

class Net(nn.Module):
    def __init__(self, a, b, c, d):
        super(Net, self).__init__()
        self.conv = nn.Sequential(
            nn.Linear(a, b),
            nn.Linear(b, c),
            nn.Linear(c, d)

        )

    def forward(self, x):
        self.y = self.conv(x)
        return self.y

if __name__ == '__main__':

    net = Net(1, 10, 100, 1)
    loss_func = nn.MSELoss()
    optim = torch.optim.SGD(net.parameters(), 0.0001)
    # for params in net.parameters():
    #     print(params)
    # exit()

    plt.ion()  # 动态画图
    for i in range(10000):
        x = torch.tensor(x, dtype=torch.float32)

        y = torch.tensor(y, dtype=torch.float32)

        output = net(x)
        loss = loss_func(output, y)

        optim.zero_grad()
        loss.backward()
        optim.step()

        if i % 5 == 0:
            plt.clf()
            print(loss)
            a = np.random.uniform(0, 10, 100)
            b = np.random.uniform(0, 10, 100)
            c = np.random.uniform(10, 20, 100)
            d = np.random.uniform(10, 20, 100)

            plt.scatter(a, b)
            plt.scatter(c, d)
            plt.plot(x, y)
            plt.plot(x.detach(), output.detach())
            plt.text(0, 0, "loss:{:.3f}".format(loss.item()), fontdict={"size": 20, "color": "red"})

            plt.pause(0.01)

    plt.ioff()
    plt.show()

十二、Pytorch实现线性回归

import random
import matplotlib.pyplot as plt
import torch
from torch import nn

# 利用pytorch实现线性回归
x = torch.linspace(-1, 1, 100).reshape([100, 1])
w = torch.rand(x.size())
b = torch.rand(x.size())

y = w*x + b

class Net(nn.Module):
    def __init__(self, a, b, c, d):
        super(Net, self).__init__()
        self.conv = nn.Sequential(
            nn.Linear(a, b),
            nn.Linear(b, c),
            nn.Linear(c, d)

        )

    def forward(self, x):
        self.y = self.conv(x)
        return self.y


if __name__ == '__main__':
    net = Net(1, 10, 100, 1)
    loss_fun = torch.nn.MSELoss()
    optim = torch.optim.SGD(net.parameters(), lr=0.005)

    plt.ion()
    for i in range(100):

        z = net(x)
        loss = loss_fun(z, y)

        optim.zero_grad()
        loss.backward()
        optim.step()

        print(loss)
        plt.clf()
        plt.plot(x.detach(), z.detach())
        # plt.scatter(x, y)
        # plt.scatter(x.detach(), z.detach())
        plt.scatter(x.detach(), y.detach())


        plt.pause(0.01)

    plt.ioff()
    plt.show()

Logo

瓜分20万奖金 获得内推名额 丰厚实物奖励 易参与易上手

更多推荐