Commit 2fb7ff26 authored by waystogetthere's avatar waystogetthere
Browse files

new Model2

parent 08b35be0
import torch
import torchvision
from torch import optim, nn
import math
......@@ -85,67 +85,48 @@ class CustomizedLSTM(nn.Module):
h_last = init_h
c_last = init_c
AllOnesTensor = torch.ones(batch_size, 1)
# Embed_E = torch.Tensor([])
Bias_aim = torch.ones(batch_size, 1) # Incorporating bias in One Matrices Multiplication: xW
Embed_E = self.embedding(Event.long())
Embed_E = self.embedding(Event.long()) # Embed event from shape: [batch_size, seq_len] to: [batch_size, Embed_size, seq_len]
'''
for event in Event:
# event is the event list for one entity.
e_lst = torch.Tensor([])
for single_event in event:
if single_event != 0:
embedding_event = self.embedding(single_event.to(torch.int64))
else:
embedding_event = torch.zeros(self.embedding.embedding_dim)
e_lst = torch.cat((e_lst, embedding_event.unsqueeze(1)), dim=1)
Embed_E = torch.cat((Embed_E, e_lst.unsqueeze(0)), dim=0)
'''
for t in range(sequence_len):
e = Embed_E[:, t, :]
x_t = X[:, :, t]
d_time = D_time[:, t].reshape(-1, 1)
d_x = D_x[:, :, t]
combine = torch.cat((x_t, h_last, AllOnesTensor), dim=1).float()
x_t_bias = torch.cat((x_t, AllOnesTensor), dim=1).float()
xhb = torch.cat((x_t, h_last, Bias_aim), dim=1).float()
xb = torch.cat((x_t, Bias_aim), dim=1).float()
d_time_bias = torch.cat((d_time, AllOnesTensor), dim=1).float()
d_x_bias = torch.cat((d_x, AllOnesTensor), dim=1).float()
dtb = torch.cat((d_time, Bias_aim), dim=1).float()
dxb = torch.cat((d_x, Bias_aim), dim=1).float()
Embed_E_bias = torch.cat((e, AllOnesTensor), dim=1).float()
eb = torch.cat((e, Bias_aim), dim=1).float()
# Forget gate:
f = torch.sigmoid(torch.mm(combine, self.Wf))
f = torch.sigmoid(torch.mm(xhb, self.Wf))
# Input gate:
i = torch.sigmoid(torch.mm(combine, self.Wi))
i = torch.sigmoid(torch.mm(xhb, self.Wi))
# Time gate:
T = torch.sigmoid(torch.mm(x_t_bias, self.Wtx) + torch.sigmoid(torch.mm(d_time_bias, self.Wt)))
T = torch.sigmoid(torch.mm(xb, self.Wtx) + torch.sigmoid(torch.mm(dtb, self.Wt)))
# Event Gate:
E = torch.sigmoid(torch.mm(x_t_bias, self.Wex) + torch.sigmoid(torch.mm(Embed_E_bias, self.We)))
E = torch.sigmoid(torch.mm(xb, self.Wex) + torch.sigmoid(torch.mm(eb, self.We)))
# Delta Feature Gate:
self.Wd = nn.Parameter((-1) * torch.relu_((-1) * self.Wd)) # Constraint: Wd <= 0
D = 2 * torch.sigmoid(torch.mm(x_t_bias, self.Wdx) + torch.sigmoid(torch.mm(d_x_bias, self.Wd)))
D = 2 * torch.sigmoid(torch.mm(xb, self.Wdx) + torch.sigmoid(torch.mm(dxb, self.Wd)))
# C helper:
c_helper = torch.tanh(torch.mm(combine, self.Wc))
c_helper = torch.tanh(torch.mm(xhb, self.Wc))
# Output Gate:
allvariable = torch.cat((x_t, h_last, d_time, e, d_x, AllOnesTensor), dim=1).float()
o = torch.sigmoid(torch.mm(allvariable, self.Wo))
all_var = torch.cat((x_t, h_last, d_time, e, d_x, Bias_aim), dim=1).float()
o = torch.sigmoid(torch.mm(all_var, self.Wo))
# C short term memory:
C_s = f * c_last + i * D * c_helper
......@@ -231,7 +212,7 @@ class CustomizedLSTM(nn.Module):
# time_loss = (t - t_j) ** 2
# print('time_loss:type', type(time_loss))
loss = torch.mean(time_loss) # + torch.mean(event_loss)
loss = torch.mean(time_loss) + torch.mean(event_loss)
# print('loss:type', type(loss))
return loss, log_f_t, log_lambda
......@@ -293,9 +274,9 @@ class CustomizedLSTM(nn.Module):
v_t = self.v_t.detach().numpy()
w_t = self.softplus(self.w_t).detach().item()
combine = np.concatenate((h_j, c_j), axis=0)[np.newaxis, :]
xhb = np.concatenate((h_j, c_j), axis=0)[np.newaxis, :]
S = np.dot(combine, v_t)
S = np.dot(xhb, v_t)
log_f_t = S + w_t * (t - t_j) + 1 / w_t * (np.exp(S) - np.exp(S + w_t * (t - t_j)))
......@@ -307,7 +288,7 @@ class CustomizedLSTM(nn.Module):
'''
This function is used to derive the definite integral: equation (13).
Firstly we should generated f^{*}(t) in equation (12),
Then return t*f^{*}(t) to scipy.integrate.quad to get the result.
Then return t*f^{*}(t) to scipy.integrate.quad and get the result.
:param t: t in equation (12)
:param t_j: t_j in equation (12)
......@@ -330,16 +311,16 @@ class CustomizedLSTM(nn.Module):
Deltat = umax / 100
dt = torch.linspace(0, umax, 100 + 1)
combine = torch.cat((h_j, c_j), axis=0).unsqueeze(0)
xhb = torch.cat((h_j, c_j), axis=0).unsqueeze(0)
S = torch.mm(combine, self.v_t)
S = torch.mm(xhb, self.v_t)
self.w_t = nn.Parameter(self.softplus(self.w_t))
log_f_t = S + self.w_t * dt + 1 / self.w_t * (torch.exp(S) - torch.exp(S + self.w_t * dt))
f_t = torch.exp(log_f_t)
# print('hj+cj: {}, S: {}, v_t: {}'.format(combine, S, self.v_t))
# print('hj+cj: {}, S: {}, v_t: {}'.format(xhb, S, self.v_t))
df = dt * f_t
# normalization factor
......@@ -365,10 +346,10 @@ class CustomizedLSTM(nn.Module):
batch_size, feature_size, seq_len = X.shape
D_time = time - torch.cat((torch.zeros(batch_size, 1), time[:, 1:]), dim=1)
D_time = time - torch.cat((torch.zeros(batch_size, 1), time[:, 1:]), dim=1) # # D_time is the difference between every t_{i-1} and t_{i}
D_time[:, 0] = 0 # Time interval is 0 at first event.
D_X = X - torch.cat((torch.zeros(batch_size, feature_size, 1), X[:, :, 1:]), dim=2)
D_X = X - torch.cat((torch.zeros(batch_size, feature_size, 1), X[:, :, 1:]), dim=2) # D_X is the difference between every x_{i-1} and x_{i}
D_X[:, :, 0] = 0 # Feature difference is 0 at first event
h_seq, c_seq = self.forward(X, event, D_time, D_X)
......@@ -417,6 +398,6 @@ class CustomizedLSTM(nn.Module):
predicted_time_list.append(pred_time)
LOSS /= seq_len
print('The offset between predicted time and gt time is:', OFFSET)
print(pred_inter_event_time_lst)
# print(pred_inter_event_time_lst)
return LOSS, log_f_t, predicted_time_list, event_acc, pred_inter_event_time_lst, gt_inter_event_time_lst
Markdown is supported
0% or .
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment