Skip to content
GitLab
Projects
Groups
Snippets
Help
Loading...
Help
What's new
10
Help
Support
Community forum
Keyboard shortcuts
?
Submit feedback
Contribute to GitLab
Sign in
Toggle navigation
Open sidebar
Zizhuo Meng
DeltaLSTM_RMTPP
Commits
afd471d4
Commit
afd471d4
authored
Nov 23, 2020
by
蒙律师
Browse files
Options
Browse Files
Download
Email Patches
Plain Diff
revised Mian, Model, Ogata
parent
fdb5d302
Changes
3
Hide whitespace changes
Inline
Side-by-side
Showing
3 changed files
with
137 additions
and
36 deletions
+137
-36
Main.py
Main.py
+64
-20
Model.py
Model.py
+60
-10
Ogata thining.py
Ogata thining.py
+13
-6
No files found.
Main.py
View file @
afd471d4
from
Model
import
CustomizedLSTM
from
Model2
import
CustomizedLSTM
import
matplotlib.pyplot
as
plt
import
pandas
as
pd
import
numpy
as
np
import
torch
#
from import_
files
import
Custom
Dataset
from
import_
RMTPP_dataset
import
import_
Dataset
# from RNN import NaiveCustomRNN
from
tqdm
import
tqdm
from
argparse
import
ArgumentParser
...
...
@@ -12,19 +12,28 @@ import torch.nn.utils.rnn as rnn_utils
from
torch.utils.data
import
Dataset
,
DataLoader
from
torchvision
import
transforms
if
__name__
==
"__main__"
:
coef
=
10
EPOCHS
=
100
LR
=
1e-2
# = pd.read_csv('result_B=100.csv', header=None).to_numpy()
# Data = pd.read_csv('SHP lambda=1, length=34 random seed=0.csv', header=None).to_numpy()
# X = (Data[:, :-2].T)[np.newaxis, :, :]
# x_size = X.shape[1]
Data
=
pd
.
read_csv
(
'result_B=100.csv'
,
header
=
None
).
to_numpy
()
# event = Data[:, -1][np.newaxis, :]-1
# time = Data[:, -2][np.newaxis, :]
X
=
(
Data
[:,
:
-
2
].
T
)[
np
.
newaxis
,
:,
:]
x_size
=
X
.
shape
[
1
]
time
,
event
=
import_Dataset
(
'exp'
)
event
=
event
[
np
.
newaxis
,
:]
-
1
time
=
time
[
np
.
newaxis
,
:]
event
=
Data
[:,
-
1
][
np
.
newaxis
,
:]
time
=
Data
[:,
-
2
][
np
.
newaxis
,
:]
seq_len
=
time
.
shape
[
1
]
X
=
np
.
zeros
((
1
,
5
,
seq_len
))
x_size
=
5
hidden_size
=
5
...
...
@@ -32,7 +41,7 @@ if __name__ == "__main__":
emb_dim
=
5
model
=
CustomizedLSTM
(
x_size
,
hidden_size
,
event_class
,
emb_dim
)
model
.
set_optimizer
(
lr
=
1e-3
,
weight_decay
=
1e-3
)
model
.
set_optimizer
(
lr
=
LR
,
weight_decay
=
1e-3
)
train_X
=
X
[:,
:,
:
-
1
]
train_event
=
event
[:,
:
-
1
]
...
...
@@ -41,11 +50,41 @@ if __name__ == "__main__":
epoch
=
0
train_loss_list
=
[]
test_loss_list
=
[]
while
epoch
<
30
:
train_loss
=
model
.
train_batch_all
([
train_X
,
train_event
,
train_time
])
model
.
init_Parameter
()
while
epoch
<
EPOCHS
:
print
(
"At epoch: {}"
.
format
(
epoch
))
train_loss
,
tr_log_ft_lst
,
tr_log_lambda_lst
=
model
.
train_batch_all
([
train_X
,
train_event
,
train_time
])
# train_loss = model.train_batch([train_X, train_event, train_time])
test_loss
,
log_f_t
,
predicted_time_list
,
event_acc
=
model
.
predict_batch
([
X
,
event
,
time
])
test_loss
,
log_f_t
,
predicted_time_list
,
event_acc
,
pred_inter_event_time_lst
,
gt_inter_event_time_lst
=
model
.
predict_batch
(
[
X
,
event
,
time
])
'''
plt.figure()
plt.title('DeltaLSTM lambda vs time index')
plt.plot(np.exp(tr_log_lambda_lst))# label='Training loss')
plt.xlabel('Time index')
plt.ylabel('lambda')
plt.savefig('./lambda/epoch {}, lr {} lambda.png'.format(epoch, LR))
plt.figure()
plt.title('DeltaLSTM ft vs time index')
plt.plot(np.exp(tr_log_ft_lst))# label='Training loss')
plt.xlabel('Time index')
plt.ylabel('ft')
plt.savefig('./ft/epoch {}, lr {} lambda.png'.format(epoch, LR))
'''
plt
.
figure
()
plt
.
title
(
'DeltaLSTM inter event time'
)
plt
.
plot
(
pred_inter_event_time_lst
,
label
=
'DeltaLSTM'
,
color
=
'red'
)
plt
.
plot
(
gt_inter_event_time_lst
,
label
=
'Data'
,
color
=
'grey'
)
plt
.
legend
()
plt
.
xlabel
(
'Time index'
)
plt
.
ylabel
(
'Inter-event Time'
)
plt
.
savefig
(
'./Inter-event Time/epoch {}, lr {} Inter-event Time.png'
.
format
(
epoch
,
LR
))
epoch
+=
1
train_loss_c
,
test_loss_c
=
train_loss
.
clone
().
detach
().
numpy
(),
test_loss
.
clone
().
detach
().
numpy
()
...
...
@@ -53,15 +92,20 @@ if __name__ == "__main__":
train_loss_list
.
append
(
train_loss_c
)
test_loss_list
.
append
(
test_loss_c
)
import
matplotlib.pyplot
as
plt
plt
.
figure
()
plt
.
plot
(
np
.
arange
(
len
(
train_loss_list
)),
np
.
log
(
train_loss_list
),
np
.
log
(
test_loss_list
))
plt
.
show
()
plt
.
title
(
'DeltaLSTM_RMTPP'
)
plt
.
plot
(
np
.
arange
(
len
(
train_loss_list
)),
train_loss_list
,
label
=
'Training loss'
)
plt
.
plot
(
np
.
arange
(
len
(
train_loss_list
)),
test_loss_list
,
label
=
'Testing loss'
)
plt
.
legend
()
plt
.
xlabel
(
'epoch'
)
plt
.
ylabel
(
'loss'
)
plt
.
savefig
(
'epoch:{}, lr:{}.png'
.
format
(
EPOCHS
,
LR
))
print
(
train_loss_list
)
print
(
test_loss_list
)
print
(
predicted_time_list
)
print
(
event_acc
)
# predicted_time_list = np.array(predicted_time_list)
# np.savetxt('noting really matters.txt',predicted_time_list)
Model.py
View file @
afd471d4
...
...
@@ -153,7 +153,7 @@ class CustomizedLSTM(nn.Module):
return
h_seq
,
c_seq
def
set_optimizer
(
self
,
lr
=
1e-3
,
weight_decay
=
1e-
5
):
def
set_optimizer
(
self
,
lr
=
1e-3
,
weight_decay
=
1e-
3
):
# from torch.optim import Adam
# self.optimizer = Adam(self.parameters(), lr=lr, weight_decay=weight_decay)
...
...
@@ -174,8 +174,9 @@ class CustomizedLSTM(nn.Module):
weight
.
data
.
uniform_
(
-
stdv
,
stdv
)
def
init_Parameter
(
self
):
stdv
=
1.0
/
math
.
sqrt
(
self
.
hidden_size
)
for
weight
in
self
.
parameters
():
torch
.
nn
.
init
.
normal_
(
weight
,
mean
=
0
,
std
=
1
)
weight
.
data
.
uniform_
(
-
stdv
,
std
v
)
def
softplus
(
self
,
x
):
return
torch
.
log
(
1
+
torch
.
exp
(
x
))
...
...
@@ -207,12 +208,16 @@ class CustomizedLSTM(nn.Module):
self
.
w_t
=
nn
.
Parameter
(
self
.
softplus
(
self
.
w_t
))
S
=
torch
.
mm
(
combine
,
self
.
v_t
)
# S = torch.mm(h_j, self.v_t)
log_lambda
=
S
+
self
.
w_t
*
(
t
-
t_j
)
print
(
'The lambda is: {}'
.
format
(
torch
.
exp
(
log_lambda
)))
log_f_t
=
S
+
self
.
w_t
*
(
t
-
t_j
)
+
(
1
/
self
.
w_t
)
*
(
torch
.
exp
(
S
)
-
torch
.
exp
(
S
+
self
.
w_t
*
(
t
-
t_j
)))
time_loss
=
(
-
1
)
*
log_f_t
loss
=
torch
.
mean
(
time_loss
)
+
torch
.
mean
(
event_loss
)
loss
=
torch
.
mean
(
time_loss
)
#
+ torch.mean(event_loss)
return
loss
,
log_f_t
...
...
@@ -243,8 +248,12 @@ class CustomizedLSTM(nn.Module):
loss
,
log_f_t
=
self
.
my_loss
(
target_event
,
target_t
,
t_j
,
h_j
,
c_j
)
loss
.
backward
()
self
.
optimizer
.
step
()
self
.
optimizer
.
zero_grad
()
...
...
@@ -287,9 +296,17 @@ class CustomizedLSTM(nn.Module):
LOSS
+=
loss
LOSS
/=
seq_len
# print(self.parameters())
# print("grad before clip:" + str(self.linear.weight.grad))
LOSS
.
backward
()
self
.
optimizer
.
step
()
# print('what is the v_t grad?', self.v_t.grad)
# print('what is the w_t?', self.w_t)
# print('what is the w_t grad?', self.w_t.grad)
# for weight in self.parameters():
# print('the weight is {}, and the grad is {}'.format(weight, weight.grad))
self
.
optimizer
.
zero_grad
()
return
LOSS
...
...
@@ -299,6 +316,7 @@ class CustomizedLSTM(nn.Module):
w_t
=
self
.
softplus
(
self
.
w_t
).
detach
().
item
()
combine
=
np
.
concatenate
((
h_j
,
c_j
),
axis
=
0
)[
np
.
newaxis
,
:]
S
=
np
.
dot
(
combine
,
v_t
)
log_f_t
=
S
+
w_t
*
(
t
-
t_j
)
+
1
/
w_t
*
(
np
.
exp
(
S
)
-
np
.
exp
(
S
+
w_t
*
(
t
-
t_j
)))
...
...
@@ -322,16 +340,38 @@ class CustomizedLSTM(nn.Module):
v_t
=
self
.
v_t
.
clone
().
detach
().
numpy
()
w_t
=
self
.
softplus
(
self
.
w_t
.
clone
()).
detach
().
item
()
combine
=
np
.
concatenate
((
h_j
,
c_j
),
axis
=
0
)[
np
.
newaxis
,
:]
S
=
np
.
dot
(
combine
,
v_t
)
log_f_t
=
S
+
w_t
*
(
t
-
t_j
)
+
1
/
w_t
*
(
np
.
exp
(
S
)
-
np
.
exp
(
S
+
w_t
*
(
t
-
t_j
)))
f_t
=
np
.
exp
(
log_f_t
)
return
(
t
*
f_t
)
def
next_time
(
self
,
t_j
,
h_j
,
c_j
):
umax
=
100
# self.umax # maximum time
Deltat
=
umax
/
100
dt
=
torch
.
linspace
(
0
,
umax
,
100
+
1
)
combine
=
torch
.
cat
((
h_j
,
c_j
),
axis
=
0
).
unsqueeze
(
0
)
S
=
torch
.
mm
(
combine
,
self
.
v_t
)
self
.
w_t
=
nn
.
Parameter
(
self
.
softplus
(
self
.
w_t
))
log_f_t
=
S
+
self
.
w_t
*
dt
+
1
/
self
.
w_t
*
(
torch
.
exp
(
S
)
-
torch
.
exp
(
S
+
self
.
w_t
*
dt
))
f_t
=
torch
.
exp
(
log_f_t
)
# print('hj+cj: {}, S: {}, v_t: {}'.format(combine, S, self.v_t))
df
=
dt
*
f_t
# normalization factor
integrand_
=
((
df
[
1
:]
+
df
[:
-
1
])
*
0.5
)
*
Deltat
integral_
=
torch
.
sum
(
integrand_
)
return
t_j
+
integral_
def
predict_batch
(
self
,
batch
):
'''
...
...
@@ -369,16 +409,26 @@ class CustomizedLSTM(nn.Module):
pred_time
=
[]
from
scipy
import
integrate
for
idx
,
t_j
in
enumerate
(
current_t
):
t_j
=
t_j
.
clone
().
detach
().
numpy
()
h_j
=
current_h
[
idx
].
clone
().
detach
().
numpy
()
c_j
=
current_c
[
idx
].
clone
().
detach
().
numpy
()
t_j
=
t_j
#
.clone().detach().numpy()
h_j
=
current_h
[
idx
]
#
.clone().detach().numpy()
c_j
=
current_c
[
idx
]
#
.clone().detach().numpy()
predicted_time
=
self
.
next_time
(
t_j
,
h_j
,
c_j
)
'''
predicted_time, err = integrate.quad(self.f_t, t_j, np.inf,
args
=
(
t_j
,
h_j
,
c_j
))
# equation (13), do the integration
args=(t_j, h_j, c_j)) # equation (13), do the integration
print('the current time is, {}, the target time is, {}, the predicted time is {}'
.format(t_j, target_t[idx], predicted_time))
'''
pred_time
.
append
(
predicted_time
)
...
...
Ogata thining.py
View file @
afd471d4
...
...
@@ -41,13 +41,14 @@ def Generate_lambda(base, s, X_history, Event_history, Event_time, y, alpha_x, A
return
result_lambda
def
Thining
(
base
,
max_T
,
Event_class
,
X_history
,
X_time
,
alpha_x
,
A
,
B
):
def
Ogata_
Thining
(
base
,
max_T
,
Event_class
,
X_history
,
X_time
,
alpha_x
,
A
,
B
):
s
=
0
# Current Time
Event_list
=
[]
# Event list
T_e
=
[]
# Event time list
while
s
<
max_T
:
T_candidate
=
[]
lambda_candidate
=
[]
[
latest_x_t_id
]
=
np
.
argwhere
(
X_time
<=
s
)[
-
1
]
sub_Xhistory
=
X_history
[:
latest_x_t_id
+
1
]
...
...
@@ -58,14 +59,20 @@ def Thining(base, max_T, Event_class, X_history, X_time, alpha_x, A, B):
w
=
-
np
.
log
(
u
)
/
lambda_y_s
s_temp
=
s
+
w
D
=
np
.
random
.
uniform
(
low
=
0
,
high
=
1
)
if
D
*
lambda_y_s
<=
Generate_lambda
(
base
,
s_temp
,
sub_Xhistory
,
Event_list
,
T_e
,
y
,
alpha_x
,
A
,
B
):
new_lambda
=
Generate_lambda
(
base
,
s_temp
,
sub_Xhistory
,
Event_list
,
T_e
,
y
,
alpha_x
,
A
,
B
)
if
D
*
lambda_y_s
<=
new_lambda
:
T_candidate
.
append
(
s_temp
)
lambda_candidate
.
append
(
new_lambda
)
#
else
:
T_candidate
.
append
(
np
.
inf
)
lambda_candidate
.
append
(
0
)
if
len
(
T_candidate
)
==
0
:
s
=
s_temp
continue
min_index
=
np
.
argmin
(
T_candidate
)
t_n
=
np
.
min
(
T_candidate
)
[[
y_star
]]
=
np
.
argwhere
(
T_candidate
==
t_n
)
+
1
# print('hey this y_star: {}'.format(y_star))
lambda_n
=
lambda_candidate
[
min_index
]
y_star
=
min_index
+
1
flag
=
1
for
t_i
in
X_time
:
...
...
@@ -122,7 +129,7 @@ if __name__ == '__main__':
X
,
X_time
=
Simulating_X
(
Dim
=
X_dim
,
max_T
=
max_T
,
scale
=
inverse_lambda
)
# Generate feature, the time to record feature
Event_list
,
T_e
=
Thining
(
base
=
0.1
,
max_T
=
max_T
,
Event_class
=
Event_class
,
X_history
=
X
,
X_time
=
X_time
,
Event_list
,
T_e
=
Ogata_
Thining
(
base
=
0.1
,
max_T
=
max_T
,
Event_class
=
Event_class
,
X_history
=
X
,
X_time
=
X_time
,
alpha_x
=
alpha_x
,
A
=
A
,
B
=
B
)
# Generate Event, the time that event occurs
...
...
Write
Preview
Markdown
is supported
0%
Try again
or
attach a new file
.
Attach a file
Cancel
You are about to add
0
people
to the discussion. Proceed with caution.
Finish editing this message first!
Cancel
Please
register
or
sign in
to comment