np.array([0 if ele < 0 else ele for ele in x])
import numpy as np
import torch
from torch import nn
from torch.nn import functional as F
class Activate(object):
def __init__(self):
pass
@staticmethod
def relu(X):
X = np.array(X)
shape = X.shape
X = X.reshape(-1)
x = np.array([0 if x < 0 else x for x in X])
x = x.reshape(shape)
return x
@staticmethod
def relu2(X):
X = torch.tensor(data=X,dtype=torch.float64)
return X.relu()
@staticmethod
def relu3(X):
X = torch.tensor(data=X)
return F.relu(input=X,inplace=True)
np.random.seed(73)
A=np.random.randn(7,3)
# print(A)
"""
[[ 0.57681305 2.1311088 2.44021967]
[ 0.26332687 -1.49612065 -0.03673531]
[ 0.43069579 -1.52947433 -0.73025968]
[ 1.05131524 1.61979267 -1.60501337]
[ 0.33100953 -0.21095236 0.2981767 ]
[-1.14607352 0.57536202 -0.36390663]
[ 0.03639919 -0.52056399 -0.01576433]]
"""
# B = Activate.relu(X=A)
# print(B)
"""
[[0.57681305 2.1311088 2.44021967]
[0.26332687 0. 0. ]
[0.43069579 0. 0. ]
[1.05131524 1.61979267 0. ]
[0.33100953 0. 0.2981767 ]
[0. 0.57536202 0. ]
[0.03639919 0. 0. ]]
"""
# B = Activate.relu2(X=A)
# print(B)
"""可以看出torch.relu()只保留了四位有效数字
tensor([[0.5768, 2.1311, 2.4402],
[0.2633, 0.0000, 0.0000],
[0.4307, 0.0000, 0.0000],
[1.0513, 1.6198, 0.0000],
[0.3310, 0.0000, 0.2982],
[0.0000, 0.5754, 0.0000],
[0.0364, 0.0000, 0.0000]], dtype=torch.float64)
"""
B = Activate.relu3(X=A)
print(B)
"""F.relu同样改变了数据的精度
tensor([[0.5768, 2.1311, 2.4402],
[0.2633, 0.0000, 0.0000],
[0.4307, 0.0000, 0.0000],
[1.0513, 1.6198, 0.0000],
[0.3310, 0.0000, 0.2982],
[0.0000, 0.5754, 0.0000],
[0.0364, 0.0000, 0.0000]], dtype=torch.float64)
"""
|