برنامه نویسی

LSTM در PyTorch – انجمن DEV

Summarize this content to 400 words in Persian Lang LSTM() می تواند سه تانسور دو بعدی یا سه بعدی یک یا چند عنصر محاسبه شده توسط LSTM را از تانسور دو بعدی یا سه بعدی صفر یا چند عنصر مانند زیر دریافت کند:
import torch
from torch import nn

tensor1 = torch.tensor([[8., -3., 0., 1., 5., -2.]])

tensor1.requires_grad
# False

torch.manual_seed(42)

lstm1 = nn.LSTM(input_size=6, hidden_size=3)

tensor2 = lstm1(input=tensor1)
tensor2
# (tensor([[-0.2443, 0.0840, 0.4837]], grad_fn=),
# (tensor([[-0.2443, 0.0840, 0.4837]], grad_fn=),
# tensor([[-0.2616, 0.0902, 0.5288]], grad_fn=)))

tensor2[0].requires_grad
tensor2[1][0].requires_grad
tensor2[1][1].requires_grad
# True

lstm1
# LSTM(6, 3)

lstm1.input_size
# 6

lstm1.hidden_size
# 3

lstm1.num_layers
# 1

lstm1.bias
# True

lstm1.batch_first
# False

lstm1.dropout
# 0.0

lstm1.bidirectional
# False

lstm1.proj_size
# 0

lstm1.bias_ih_l0
# Parameter containing:
# tensor([-0.5053, -0.3676, 0.5771, 0.1090, 0.1779, -0.5385,
# -0.3792, -0.1922, 0.0903, -0.5080, -0.2488, -0.3456],
# requires_grad=True)

lstm1.bias_hh_l0
# Parameter containing:
# tensor([0.0016, -0.2148, -0.0400, -0.3912, -0.3963, -0.3368,
# -0.1976, -0.4557, 0.4841, -0.1146, 0.4968, 0.1799],
# requires_grad=True)

lstm1.weight_ih_l0
# Parameter containing:
# tensor([[0.4414, 0.4792, -0.1353, 0.5304, -0.1265, 0.1165],
# [-0.2811, 0.3391, 0.5090, -0.4236, 0.5018, 0.1081],
# [0.4266, 0.0782, 0.2784, -0.0815, 0.4451, 0.0853],
# [-0.2695, 0.1472, -0.2660, -0.0677, -0.2345, 0.3830],
# [-0.4557, -0.2662, -0.1630, -0.3471, 0.0545, -0.5702],
# [0.5214, -0.4904, 0.4457, 0.0961, -0.1875, 0.3568],
# [0.0900, 0.4665, 0.0631, -0.1821, 0.1551, -0.1566],
# [0.2430, 0.5155, 0.3337, -0.2524, 0.3333, 0.1033],
# [0.2932, -0.3519, -0.5715, -0.2231, -0.4428, 0.4737],
# [0.1663, 0.2391, 0.1826, -0.0100, 0.4518, -0.4102],
# [0.0364, -0.3941, 0.1780, -0.1988, 0.1769, -0.1203],
# [0.4788, -0.3422, -0.3443, -0.3444, 0.5193, 0.1924]],
# requires_grad=True)

lstm1.weight_hh_l0
# Parameter containing:
# tensor([[0.5556, -0.4765, -0.5727],
# [-0.4517, -0.3884, 0.2339],
# [0.2067, 0.4797, -0.2982],
# [-0.3936, 0.3063, -0.2334],
# [0.3504, -0.1370, 0.3303],
# [-0.4486, -0.2914, 0.1760],
# [0.1221, -0.1472, 0.3441],
# [0.3925, -0.4187, -0.3082],
# [0.5287, -0.1948, -0.2047],
# [-0.5586, -0.3306, 0.1442],
# [-0.0762, -0.4191, 0.0135],
# [-0.3944, -0.4898, -0.3179]], requires_grad=True)

torch.manual_seed(42)

lstm2 = nn.LSTM(input_size=3, hidden_size=3)

lstm2(input=tensor2[0])
lstm2(input=tensor2[1][0])
# (tensor([[0.0832, 0.0755, 0.0117]], grad_fn=),
# (tensor([[0.0832, 0.0755, 0.0117]], grad_fn=),
# tensor([[0.1856, 0.1574, 0.0294]], grad_fn=)))

lstm2(input=tensor2[1][1])
# (tensor([[0.0803, 0.0776, 0.0118]], grad_fn=),
# (tensor([[0.0803, 0.0776, 0.0118]], grad_fn=),
# tensor([[0.1810, 0.1612, 0.0293]], grad_fn=)))

lstm2(input=tensor2[1])
# (tensor([[-0.4656, 0.5770, 0.0342]], grad_fn=),
# tensor([[-0.4656, 0.5770, 0.0342]], grad_fn=))

torch.manual_seed(42)

lstm = nn.LSTM(input_size=6, hidden_size=3, num_layers=1, bias=True,
batch_first=False, dropout=0.0, bidirectional=False,
proj_size=0, device=None, dtype=None)
lstm(input=tensor1)
# (tensor([[-0.2443, 0.0840, 0.4837]], grad_fn=),
# (tensor([[-0.2443, 0.0840, 0.4837]], grad_fn=),
# tensor([[-0.2616, 0.0902, 0.5288]], grad_fn=)))

my_tensor = torch.tensor([[8., -3., 0.],
[1., 5., -2.]])
torch.manual_seed(42)

lstm = nn.LSTM(input_size=3, hidden_size=3)
lstm(input=my_tensor)
# (tensor([[-0.0367, 0.4198, -0.0198],
# [0.1306, -0.0044, -0.0685]], grad_fn=),
# (tensor([[0.1306, -0.0044, -0.0685]], grad_fn=),
# tensor([[0.1737, -0.1024, -0.5664]], grad_fn=)))

my_tensor = torch.tensor([[8.], [-3.], [0.],
[1.], [5.], [-2.]])
torch.manual_seed(42)

lstm = nn.LSTM(input_size=1, hidden_size=3)
lstm(input=my_tensor)
# (tensor([[-0.0166, 0.7330, 0.1206],
# [0.1032, 0.1525, -0.1500],
# [0.1405, 0.0611, -0.1123],
# [0.1237, 0.0956, -0.0111],
# [-0.0165, 0.6493, 0.1319],
# [0.1539, 0.1300, -0.0973]], grad_fn=),
# (tensor([[0.1539, 0.1300, -0.0973]], grad_fn=),
# tensor([[0.2453, 0.2261, -0.2197]], grad_fn=)))

my_tensor = torch.tensor([[[8.], [-3.], [0.]],
[[1.], [5.], [-2.]]])
torch.manual_seed(42)

lstm = nn.LSTM(input_size=1, hidden_size=3)
lstm(input=my_tensor)
# (tensor([[[-0.0166, 0.7330, 0.1206],
# [0.1708, -0.0301, -0.1611],
# [0.1316, -0.0240, -0.0288]],
# [[-0.1315, 0.3354, 0.1798],
# [-0.0265, 0.6095, -0.0510],
# [0.2242, -0.0561, -0.1505]]], grad_fn=),
# (tensor([[[-0.1315, 0.3354, 0.1798],
# [-0.0265, 0.6095, -0.0510],
# [0.2242, -0.0561, -0.1505]]], grad_fn=),
# tensor([[[-0.4131, 0.4074, 0.3478],
# [-0.3588, 0.7394, -0.0908],
# [0.3819, -0.1140, -0.4157]]], grad_fn=)))

my_tensor = torch.tensor([[[8.+0.j], [-3.+0.j], [0.+0.j]],
[[1.+0.j], [5.+0.j], [-2.+0.j]]])
torch.manual_seed(42)

lstm = nn.LSTM(input_size=1, hidden_size=3, dtype=torch.complex64)
lstm(input=my_tensor)
# (tensor([[[0.0470+0.0483j, -0.0209+0.0511j, -0.0295+0.0117j],
# [0.0148+0.3332j, -0.1477+0.6714j, -0.3587+0.2890j],
# [-0.1949+0.0649j, 0.0522-0.1214j, -0.3186-0.1003j]],
# [[-0.0046-0.0199j, -0.0496-0.0433j, 0.1316-0.2021j],
# [0.1221+0.0741j, 0.1806+0.7882j, -0.0890-0.0377j],
# [-0.2860+0.3574j, -0.0367+0.2503j, -0.3736+0.1591j]]],
# grad_fn=),
# (tensor([[[-0.0046-0.0199j, -0.0496-0.0433j, 0.1316-0.2021j],
# [0.1221+0.0741j, 0.1806+0.7882j, -0.0890-0.0377j],
# [-0.2860+0.3574j, -0.0367+0.2503j, -0.3736+0.1591j]]],
# grad_fn=),
# tensor([[[-0.0076-0.0525j, -0.0391-0.1021j, 0.3074-0.3711j],
# [0.9092+0.1833j, -0.1939+1.1534j, -0.2418+0.1878j],
# [-0.5759+0.6338j, -0.1179+0.3004j, -0.5170+0.0107j]]],
# grad_fn=)))

وارد حالت تمام صفحه شوید

از حالت تمام صفحه خارج شوید

LSTM() می تواند سه تانسور دو بعدی یا سه بعدی یک یا چند عنصر محاسبه شده توسط LSTM را از تانسور دو بعدی یا سه بعدی صفر یا چند عنصر مانند زیر دریافت کند:

import torch
from torch import nn

tensor1 = torch.tensor([[8., -3., 0., 1., 5., -2.]])

tensor1.requires_grad
# False

torch.manual_seed(42)

lstm1 = nn.LSTM(input_size=6, hidden_size=3)

tensor2 = lstm1(input=tensor1)
tensor2
# (tensor([[-0.2443, 0.0840, 0.4837]], grad_fn=),
# (tensor([[-0.2443, 0.0840, 0.4837]], grad_fn=),
#  tensor([[-0.2616, 0.0902, 0.5288]], grad_fn=)))

tensor2[0].requires_grad
tensor2[1][0].requires_grad
tensor2[1][1].requires_grad
# True

lstm1
# LSTM(6, 3)

lstm1.input_size
# 6

lstm1.hidden_size
# 3

lstm1.num_layers
# 1

lstm1.bias
# True

lstm1.batch_first
# False

lstm1.dropout
# 0.0

lstm1.bidirectional
# False

lstm1.proj_size
# 0

lstm1.bias_ih_l0
# Parameter containing:
# tensor([-0.5053, -0.3676, 0.5771, 0.1090, 0.1779, -0.5385,
#         -0.3792, -0.1922, 0.0903, -0.5080, -0.2488, -0.3456],
#        requires_grad=True)

lstm1.bias_hh_l0
# Parameter containing:
# tensor([0.0016, -0.2148, -0.0400, -0.3912, -0.3963, -0.3368,
#         -0.1976, -0.4557, 0.4841, -0.1146,  0.4968,  0.1799],
#        requires_grad=True)

lstm1.weight_ih_l0
# Parameter containing:
# tensor([[0.4414, 0.4792, -0.1353, 0.5304, -0.1265, 0.1165],
#         [-0.2811, 0.3391, 0.5090, -0.4236, 0.5018, 0.1081],
#         [0.4266, 0.0782, 0.2784, -0.0815, 0.4451, 0.0853],
#         [-0.2695, 0.1472, -0.2660, -0.0677, -0.2345, 0.3830],
#         [-0.4557, -0.2662, -0.1630, -0.3471, 0.0545, -0.5702],
#         [0.5214, -0.4904, 0.4457, 0.0961, -0.1875, 0.3568],
#         [0.0900, 0.4665, 0.0631, -0.1821, 0.1551, -0.1566],
#         [0.2430, 0.5155, 0.3337, -0.2524, 0.3333, 0.1033],
#         [0.2932, -0.3519, -0.5715, -0.2231, -0.4428, 0.4737],
#         [0.1663, 0.2391, 0.1826, -0.0100, 0.4518, -0.4102],
#         [0.0364, -0.3941, 0.1780, -0.1988, 0.1769, -0.1203],
#         [0.4788, -0.3422, -0.3443, -0.3444, 0.5193, 0.1924]],
#        requires_grad=True)

lstm1.weight_hh_l0
# Parameter containing:
# tensor([[0.5556, -0.4765, -0.5727],
#         [-0.4517, -0.3884, 0.2339],
#         [0.2067, 0.4797, -0.2982],
#         [-0.3936, 0.3063, -0.2334],
#         [0.3504, -0.1370, 0.3303],
#         [-0.4486, -0.2914, 0.1760],
#         [0.1221, -0.1472, 0.3441],
#         [0.3925, -0.4187, -0.3082],
#         [0.5287, -0.1948, -0.2047],
#         [-0.5586, -0.3306, 0.1442],
#         [-0.0762, -0.4191, 0.0135],
#         [-0.3944, -0.4898, -0.3179]], requires_grad=True)

torch.manual_seed(42)

lstm2 = nn.LSTM(input_size=3, hidden_size=3)

lstm2(input=tensor2[0])
lstm2(input=tensor2[1][0])
# (tensor([[0.0832, 0.0755, 0.0117]], grad_fn=),
# (tensor([[0.0832, 0.0755, 0.0117]], grad_fn=),
#  tensor([[0.1856, 0.1574, 0.0294]], grad_fn=)))

lstm2(input=tensor2[1][1])
# (tensor([[0.0803, 0.0776, 0.0118]], grad_fn=),
# (tensor([[0.0803, 0.0776, 0.0118]], grad_fn=),
#  tensor([[0.1810, 0.1612, 0.0293]], grad_fn=)))

lstm2(input=tensor2[1])
# (tensor([[-0.4656, 0.5770, 0.0342]], grad_fn=),
#  tensor([[-0.4656, 0.5770, 0.0342]], grad_fn=))

torch.manual_seed(42)

lstm = nn.LSTM(input_size=6, hidden_size=3, num_layers=1, bias=True, 
               batch_first=False, dropout=0.0, bidirectional=False,
               proj_size=0, device=None, dtype=None)
lstm(input=tensor1)
# (tensor([[-0.2443, 0.0840, 0.4837]], grad_fn=),
# (tensor([[-0.2443, 0.0840, 0.4837]], grad_fn=),
#  tensor([[-0.2616, 0.0902, 0.5288]], grad_fn=)))

my_tensor = torch.tensor([[8., -3., 0.],
                          [1., 5., -2.]])
torch.manual_seed(42)

lstm = nn.LSTM(input_size=3, hidden_size=3)
lstm(input=my_tensor)
# (tensor([[-0.0367, 0.4198, -0.0198],
#          [0.1306, -0.0044, -0.0685]], grad_fn=),
# (tensor([[0.1306, -0.0044, -0.0685]], grad_fn=),
#  tensor([[0.1737, -0.1024, -0.5664]], grad_fn=)))

my_tensor = torch.tensor([[8.], [-3.], [0.],
                          [1.], [5.], [-2.]])
torch.manual_seed(42)

lstm = nn.LSTM(input_size=1, hidden_size=3)
lstm(input=my_tensor)
# (tensor([[-0.0166, 0.7330, 0.1206],
#          [0.1032, 0.1525, -0.1500],
#          [0.1405, 0.0611, -0.1123],
#          [0.1237, 0.0956, -0.0111],
#          [-0.0165, 0.6493, 0.1319],
#          [0.1539, 0.1300, -0.0973]], grad_fn=),
# (tensor([[0.1539, 0.1300, -0.0973]], grad_fn=),
#  tensor([[0.2453, 0.2261, -0.2197]], grad_fn=)))

my_tensor = torch.tensor([[[8.], [-3.], [0.]],
                          [[1.], [5.], [-2.]]])
torch.manual_seed(42)

lstm = nn.LSTM(input_size=1, hidden_size=3)
lstm(input=my_tensor)
# (tensor([[[-0.0166, 0.7330, 0.1206],
#           [0.1708, -0.0301, -0.1611],
#           [0.1316, -0.0240, -0.0288]],
#          [[-0.1315, 0.3354, 0.1798],
#           [-0.0265, 0.6095, -0.0510],
#           [0.2242, -0.0561, -0.1505]]], grad_fn=),
# (tensor([[[-0.1315, 0.3354, 0.1798],
#           [-0.0265, 0.6095, -0.0510],
#           [0.2242, -0.0561, -0.1505]]], grad_fn=),
#  tensor([[[-0.4131, 0.4074, 0.3478],
#           [-0.3588, 0.7394, -0.0908],
#           [0.3819, -0.1140, -0.4157]]], grad_fn=)))

my_tensor = torch.tensor([[[8.+0.j], [-3.+0.j], [0.+0.j]],
                          [[1.+0.j], [5.+0.j], [-2.+0.j]]])
torch.manual_seed(42)

lstm = nn.LSTM(input_size=1, hidden_size=3, dtype=torch.complex64)
lstm(input=my_tensor)
# (tensor([[[0.0470+0.0483j, -0.0209+0.0511j, -0.0295+0.0117j],
#           [0.0148+0.3332j, -0.1477+0.6714j, -0.3587+0.2890j],
#           [-0.1949+0.0649j, 0.0522-0.1214j, -0.3186-0.1003j]],
#          [[-0.0046-0.0199j, -0.0496-0.0433j, 0.1316-0.2021j],
#           [0.1221+0.0741j, 0.1806+0.7882j, -0.0890-0.0377j],
#           [-0.2860+0.3574j, -0.0367+0.2503j, -0.3736+0.1591j]]],
#         grad_fn=),
# (tensor([[[-0.0046-0.0199j, -0.0496-0.0433j, 0.1316-0.2021j],
#           [0.1221+0.0741j, 0.1806+0.7882j, -0.0890-0.0377j],
#           [-0.2860+0.3574j, -0.0367+0.2503j, -0.3736+0.1591j]]],
#         grad_fn=),
#  tensor([[[-0.0076-0.0525j, -0.0391-0.1021j, 0.3074-0.3711j],
#           [0.9092+0.1833j, -0.1939+1.1534j, -0.2418+0.1878j],
#           [-0.5759+0.6338j, -0.1179+0.3004j, -0.5170+0.0107j]]],
#         grad_fn=)))
وارد حالت تمام صفحه شوید

از حالت تمام صفحه خارج شوید

نوشته های مشابه

دیدگاهتان را بنویسید

نشانی ایمیل شما منتشر نخواهد شد. بخش‌های موردنیاز علامت‌گذاری شده‌اند *

همچنین ببینید
بستن
دکمه بازگشت به بالا