@@ -214,9 +214,9 @@ def forward(self, e, r, er_e2, direction="tail"):
214
214
emb_hr_r = self .rel_embeddings (r ) # [m, k]
215
215
216
216
if direction == "tail" :
217
- ere2_sigmoid = self .g (torch .dropout (self .f1 (emb_hr_e , emb_hr_r ), p = self .hidden_dropout , train = True ), self .ent_embeddings .weight )
217
+ ere2_sigmoid = ProjE_pointwise .g (torch .dropout (self .f1 (emb_hr_e , emb_hr_r ), p = self .hidden_dropout , train = True ), self .ent_embeddings .weight )
218
218
else :
219
- ere2_sigmoid = self .g (torch .dropout (self .f2 (emb_hr_e , emb_hr_r ), p = self .hidden_dropout , train = True ), self .ent_embeddings .weight )
219
+ ere2_sigmoid = ProjE_pointwise .g (torch .dropout (self .f2 (emb_hr_e , emb_hr_r ), p = self .hidden_dropout , train = True ), self .ent_embeddings .weight )
220
220
221
221
ere2_loss_left = - torch .sum ((torch .log (torch .clamp (ere2_sigmoid , 1e-10 , 1.0 )) * torch .max (torch .FloatTensor ([0 ]).to (self .device ), er_e2 )))
222
222
ere2_loss_right = - torch .sum ((torch .log (torch .clamp (1 - ere2_sigmoid , 1e-10 , 1.0 )) * torch .max (torch .FloatTensor ([0 ]).to (self .device ), torch .neg (er_e2 ))))
@@ -243,21 +243,11 @@ def f2(self, t, r):
243
243
"""
244
244
return torch .tanh (t * self .De2 .weight + r * self .Dr2 .weight + self .bc2 .weight )
245
245
246
- def g (self , f , w ):
247
- """Defines activation layer.
248
-
249
- Args:
250
- f (Tensor): output of the forward layers.
251
- w (Tensor): Matrix for multiplication.
252
- """
253
- # [b, k] [k, tot_ent]
254
- return torch .sigmoid (torch .matmul (f , w .T ))
255
-
256
246
def predict_tail_rank (self , h , r , topk = - 1 ):
257
247
emb_h = self .ent_embeddings (h ) # [1, k]
258
248
emb_r = self .rel_embeddings (r ) # [1, k]
259
249
260
- hrt_sigmoid = - self .g (self .f1 (emb_h , emb_r ), self .ent_embeddings .weight )
250
+ hrt_sigmoid = - ProjE_pointwise .g (self .f1 (emb_h , emb_r ), self .ent_embeddings .weight )
261
251
_ , rank = torch .topk (hrt_sigmoid , k = topk )
262
252
263
253
return rank
@@ -266,11 +256,21 @@ def predict_head_rank(self, t, r, topk=-1):
266
256
emb_t = self .ent_embeddings (t ) # [m, k]
267
257
emb_r = self .rel_embeddings (r ) # [m, k]
268
258
269
- hrt_sigmoid = - self .g (self .f2 (emb_t , emb_r ), self .ent_embeddings .weight )
259
+ hrt_sigmoid = - ProjE_pointwise .g (self .f2 (emb_t , emb_r ), self .ent_embeddings .weight )
270
260
_ , rank = torch .topk (hrt_sigmoid , k = topk )
271
261
272
262
return rank
273
263
264
+ @staticmethod
265
+ def g (f , w ):
266
+ """Defines activation layer.
267
+
268
+ Args:
269
+ f (Tensor): output of the forward layers.
270
+ w (Tensor): Matrix for multiplication.
271
+ """
272
+ # [b, k] [k, tot_ent]
273
+ return torch .sigmoid (torch .matmul (f , w .T ))
274
274
275
275
class TuckER (ProjectionModel ):
276
276
"""
0 commit comments