From d3b65f9b78c5c98098e5f8889924657b285ceb5b Mon Sep 17 00:00:00 2001 From: Tong Guo <779222056@qq.com> Date: Tue, 29 Oct 2019 11:34:39 +0800 Subject: [PATCH 1/2] Update desasc_limit_predictor.py --- models/desasc_limit_predictor.py | 11 +++++++---- 1 file changed, 7 insertions(+), 4 deletions(-) diff --git a/models/desasc_limit_predictor.py b/models/desasc_limit_predictor.py index 6b140eb..6e57d88 100644 --- a/models/desasc_limit_predictor.py +++ b/models/desasc_limit_predictor.py @@ -14,15 +14,15 @@ def __init__(self, N_word, N_h, N_depth, gpu, use_hs): self.gpu = gpu self.use_hs = use_hs - self.q_lstm = nn.LSTM(input_size=N_word, hidden_size=N_h/2, + self.q_lstm = nn.LSTM(input_size=N_word, hidden_size=int(N_h/2), num_layers=N_depth, batch_first=True, dropout=0.3, bidirectional=True) - self.hs_lstm = nn.LSTM(input_size=N_word, hidden_size=N_h/2, + self.hs_lstm = nn.LSTM(input_size=N_word, hidden_size=int(N_h/2), num_layers=N_depth, batch_first=True, dropout=0.3, bidirectional=True) - self.col_lstm = nn.LSTM(input_size=N_word, hidden_size=N_h/2, + self.col_lstm = nn.LSTM(input_size=N_word, hidden_size=int(N_h/2), num_layers=N_depth, batch_first=True, dropout=0.3, bidirectional=True) @@ -83,7 +83,10 @@ def forward(self, q_emb_var, q_len, hs_emb_var, hs_len, col_emb_var, col_len, co def loss(self, score, truth): loss = 0 data = torch.from_numpy(np.array(truth)) - truth_var = Variable(data.cuda()) + if self.gpu: + truth_var = Variable(data.cuda()) + else: + truth_var = Variable(data) loss = self.CE(score, truth_var) return loss From f1f95516f5b194a424e2169f45e54c130158030d Mon Sep 17 00:00:00 2001 From: Tong Guo <779222056@qq.com> Date: Tue, 29 Oct 2019 15:00:10 +0800 Subject: [PATCH 2/2] Update desasc_limit_predictor.py --- models/desasc_limit_predictor.py | 3 ++- 1 file changed, 2 insertions(+), 1 deletion(-) diff --git a/models/desasc_limit_predictor.py b/models/desasc_limit_predictor.py index 6e57d88..f159dc9 100644 --- a/models/desasc_limit_predictor.py +++ b/models/desasc_limit_predictor.py @@ -34,7 +34,7 @@ def __init__(self, N_word, N_h, N_depth, gpu, use_hs): self.dat_out_c = nn.Linear(N_h, N_h) self.dat_out = nn.Sequential(nn.Tanh(), nn.Linear(N_h, 4)) #for 4 desc/asc limit/none combinations - self.softmax = nn.Softmax() #dim=1 + self.softmax = nn.Softmax(dim=1) #dim=1 self.CE = nn.CrossEntropyLoss() self.log_softmax = nn.LogSoftmax() self.mlsml = nn.MultiLabelSoftMarginLoss() @@ -83,6 +83,7 @@ def forward(self, q_emb_var, q_len, hs_emb_var, hs_len, col_emb_var, col_len, co def loss(self, score, truth): loss = 0 data = torch.from_numpy(np.array(truth)) + data = torch._cast_Long(data) if self.gpu: truth_var = Variable(data.cuda()) else: