-
Notifications
You must be signed in to change notification settings - Fork 1
/
lstm.py
106 lines (96 loc) · 4.08 KB
/
lstm.py
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
# -*- coding: utf-8 -*-
"""
(Bi)LSTM for text data
Credict: https://github.com/zachAlbus/pyTorch-text-classification/blob/master/Zhang/model.py
"""
import torch
from torch import nn
from transformers import (
BertForSequenceClassification,
AlbertForSequenceClassification,
XLNetForSequenceClassification,
RobertaForSequenceClassification,
BertTokenizer
)
from torch.autograd import Variable
class LSTM(nn.Module):
def __init__(self, embedding_dim=300, hidden_dim=1000, lstm_layers=2, fc_dim=512, output_dim=1):
self.use_gpu = torch.cuda.is_available()
self.hidden_dim = hidden_dim
self.batch_size = 64
self.lstm_layers = lstm_layers
super(LSTM, self).__init__()
self.tokenizer = BertTokenizer.from_pretrained('bert-base-uncased', do_lower_case=True)
self.embedding = nn.Embedding(len(self.tokenizer), embedding_dim)
self.lstm = nn.LSTM(
embedding_dim,
hidden_dim,
num_layers=lstm_layers,
)
self.fc = nn.Linear(hidden_dim, fc_dim)
self.classifier = nn.Linear(fc_dim, output_dim)
self.hidden = self.init_hidden()
self.relu = nn.ReLU()
def init_hidden(self, batch_size=None):
if batch_size is None:
batch_size= self.batch_size
if self.use_gpu:
h0 = Variable(torch.zeros(self.lstm_layers, batch_size, self.hidden_dim).cuda())
c0 = Variable(torch.zeros(self.lstm_layers, batch_size, self.hidden_dim).cuda())
else:
h0 = Variable(torch.zeros(self.lstm_layers, batch_size, self.hidden_dim))
c0 = Variable(torch.zeros(self.lstm_layers,batch_size, self.hidden_dim))
return (h0, c0)
def forward(self, batch_seqs, batch_seq_masks=None, batch_seq_segments=None):
x = self.embedding(batch_seqs) # batch x sen_len x emb_size
x = x.permute(1,0,2) # sen_len x batch x emb_size
self.hidden= self.init_hidden(batch_seqs.size()[0]) #1x64x128
x, _ = self.lstm(x, self.hidden)
## reduce by mean
x = x.permute(1,0,2)
x = torch.amax(x, 1)
## connect to fc
x = self.relu(self.fc(x))
out = self.classifier(x)
return out
class BiLSTM(nn.Module):
def __init__(self, embedding_dim=300, hidden_dim=500, lstm_layers=2, fc_dim=512, output_dim=1):
self.use_gpu = torch.cuda.is_available()
self.hidden_dim = hidden_dim
self.batch_size = 64
self.lstm_layers = lstm_layers
super(BiLSTM, self).__init__()
self.tokenizer = BertTokenizer.from_pretrained('bert-base-uncased', do_lower_case=True)
self.embedding = nn.Embedding(len(self.tokenizer), embedding_dim)
self.lstm = nn.LSTM(
embedding_dim,
hidden_dim // 2,
num_layers=lstm_layers,
bidirectional = True
)
self.fc = nn.Linear(hidden_dim, fc_dim)
self.classifier = nn.Linear(fc_dim, output_dim)
self.hidden = self.init_hidden()
self.relu = nn.ReLU()
def init_hidden(self, batch_size=None):
if batch_size is None:
batch_size= self.batch_size
if self.use_gpu:
h0 = Variable(torch.zeros(2*self.lstm_layers, batch_size, self.hidden_dim // 2).cuda())
c0 = Variable(torch.zeros(2*self.lstm_layers, batch_size, self.hidden_dim // 2).cuda())
else:
h0 = Variable(torch.zeros(2*self.lstm_layers, batch_size, self.hidden_dim // 2))
c0 = Variable(torch.zeros(2*self.lstm_layers, batch_size, self.hidden_dim // 2))
return (h0, c0)
def forward(self, batch_seqs, batch_seq_masks=None, batch_seq_segments=None):
x = self.embedding(batch_seqs) # batch x sen_len x emb_size
x = x.permute(1,0,2) # sen_len x batch x emb_size
self.hidden= self.init_hidden(batch_seqs.size()[0]) #1x64x128
x, _ = self.lstm(x, self.hidden)
## reduce by mean
x = x.permute(1,0,2)
x = torch.amax(x, 1)
## connect to fc
x = self.relu(self.fc(x))
out = self.classifier(x)
return out