Set `bias` argument in PyTorch

Super Kai (Kazuya Ito) - Sep 19 - - Dev Community

Buy Me a Coffee

*Memos:

You can set out as shown below:

*Memos:

Linear(). *My post explains Linear():

import torch
from torch import nn

my_tensor = torch.tensor([8., -3., 0., 1., 5., -2.])

torch.manual_seed(42)

linear = nn.Linear(in_features=6, out_features=3, bias=True)

linear.bias
# Parameter containing:
# tensor([-0.1906, 0.1041, -0.1881], requires_grad=True)

linear(input=my_tensor)
# tensor([1.0529, -0.8833,  3.4542], grad_fn=<ViewBackward0>)

torch.manual_seed(42)

linear = nn.Linear(in_features=6, out_features=3, bias=False)

print(linear.bias)
# None

linear(input=my_tensor)
# tensor([1.2434, -0.9874, 3.6423], grad_fn=<SqueezeBackward4>)
Enter fullscreen mode Exit fullscreen mode

Conv1d(). *My post explains Conv1d():

import torch
from torch import nn

my_tensor = torch.tensor([[8., -3., 0., 1., 5., -2.]])

torch.manual_seed(42)

conv1d = nn.Conv1d(in_channels=1, out_channels=3,
                   kernel_size=1, bias=True)
conv1d.bias
# Parameter containing:
# tensor([0.9186, -0.2191, 0.2018], requires_grad=True)

conv1d(input=my_tensor)
# tensor([[7.0349, -1.3750, 0.9186, 1.6831, 4.7413, -0.6105],
#         [6.4210, -2.7091, -0.2191, 0.6109, 3.9309, -1.8791],
#         [-1.6724, 0.9046, 0.2018, -0.0325, -0.9696, 0.6703]],
#        grad_fn=<SqueezeBackward1>)

torch.manual_seed(42)

conv1d = nn.Conv1d(in_channels=1, out_channels=3,
                   kernel_size=1, bias=False)
print(conv1d.bias)
# None

conv1d(input=my_tensor)
# tensor([[6.1163, -2.2936, 0.0000, 0.7645, 3.8227, -1.5291],
#         [6.6401, -2.4900, 0.0000, 0.8300, 4.1500, -1.6600],
#         [-1.8742, 0.7028, -0.0000, -0.2343, -1.1714, 0.4685]],
#        grad_fn=<SqueezeBackward1>)
Enter fullscreen mode Exit fullscreen mode

ConvTranspose1d(). *My post explains ConvTranspose1d():

import torch
from torch import nn

my_tensor = torch.tensor([[8., -3., 0., 1., 5., -2.]])

torch.manual_seed(42)

convtran1d = nn.ConvTranspose1d(in_channels=1, out_channels=3,
                                kernel_size=1, bias=True)
convtran1d.bias
# Parameter containing:
# tensor([0.5304, -0.1265, 0.1165], requires_grad=True)

convtran1d(input=my_tensor)
# tensor([[4.0616, -0.7939, 0.5304, 0.9718, 2.7374, -0.3525],
#         [3.7071, -1.5641, -0.1265, 0.3527, 2.2695, -1.0849],
#         [-0.9656, 0.5223, 0.1165, -0.0188, -0.5598, 0.3870]],
#        grad_fn=<SqueezeBackward1>)

torch.manual_seed(42)

convtran1d = nn.ConvTranspose1d(in_channels=1, out_channels=3,
                                kernel_size=1, bias=False)
print(convtran1d.bias)
# None

convtran1d(input=my_tensor)
# tensor([[3.5313, -1.3242, 0.0000, 0.4414, 2.2070, -0.8828],
#         [3.8336, -1.4376, 0.0000, 0.4792, 2.3960, -0.9584],
#         [-1.0821, 0.4058, 0.0000, -0.1353, -0.6763, 0.2705]],
#        grad_fn=<SqueezeBackward1>)
Enter fullscreen mode Exit fullscreen mode

LayerNorm(). *My post explains LayerNorm():

import torch
from torch import nn

my_tensor = torch.tensor([8., -3., 0., 1., 5., -2.])

torch.manual_seed(42)

layernorm = nn.LayerNorm(normalized_shape=6, bias=True)

layernorm.bias
# Parameter containing:
# tensor([0., 0., 0., 0., 0., 0.], requires_grad=True)

layernorm(input=my_tensor)
# tensor([1.6830, -1.1651, -0.3884, -0.1295, 0.9062, -0.9062],
#        grad_fn=<NativeLayerNormBackward0>)

torch.manual_seed(42)

layernorm = nn.LayerNorm(normalized_shape=6, bias=False)

print(layernorm.bias)
# None

layernorm(input=my_tensor)
# tensor([1.6830, -1.1651, -0.3884, -0.1295, 0.9062, -0.9062],
#        grad_fn=<NativeLayerNormBackward0>)
Enter fullscreen mode Exit fullscreen mode
. . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . .
Terabox Video Player