From 42831bd654d030b71bca88578d041279018f836c Mon Sep 17 00:00:00 2001 From: =?utf8?q?Fran=C3=A7ois=20Fleuret?= Date: Tue, 9 Jan 2024 18:49:12 +0100 Subject: [PATCH] Update. --- fridge | 36 ++++++++++++++++++++++++++++++++++++ mygpt.py | 15 ++++++++++++--- 2 files changed, 48 insertions(+), 3 deletions(-) diff --git a/fridge b/fridge index ac7f86c..d28cc89 100644 --- a/fridge +++ b/fridge @@ -81,3 +81,39 @@ def insert_flash_back(rec_V, V, rec_K, K, t0, t1, CL, proba): # insert_flash_back(self.rec_V,V,self.rec_K,K,t0,t1,CL,proba=self.proba_flashback / CL,) + +###################################################################### + +2024 Jan 09 14:24:42 (from mygpt.py) + + # This piece of code makes the assumption that there is + # nothing informative before t0, otherwise we'd have to + # implement a cache for V and K too. This should not be + # too much of a problem since this is used only during + # train, where full sequence are available + + # n = torch.arange(N, device=X.device)[:, None, None, None] + # t = torch.arange(t0, t1, device=X.device)[None, None, :, None] + # dv = torch.arange(DV, device=X.device)[None, None, None, :] + # dk = torch.arange(DK, device=X.device)[None, None, None, :] + + # u = ( + # torch.rand(N, CH, t1 - t0, 1, device=X.device).mul(t).long() // CL + # ) * CL + + # src_time = t - u - t0 + # src_head = torch.randint(H, (N, CH, t1 - t0, 1), device=X.device) + + # mask = ( + # torch.rand(N, CH, t1 - t0, DV, device=X.device) <= self.proba_flashback + # ).long() + + # self.rec_V[:, :, t0:t1] = ( + # mask * V[n, src_head, src_time, dv] + # + (1 - mask) * self.rec_V[:, :, t0:t1] + # ) + + # self.rec_K[:, :, t0:t1] = ( + # mask * K[n, src_head, src_time, dk] + # + (1 - mask) * self.rec_K[:, :, t0:t1] + # ) diff --git a/mygpt.py b/mygpt.py index e7362b7..b885e21 100755 --- a/mygpt.py +++ b/mygpt.py @@ -481,8 +481,8 @@ class Caterpillar(nn.Module): self.caterpillar_height = caterpillar_height self.attention_dropout = attention_dropout - warnings.warn("flash back", RuntimeWarning) - self.proba_flashback = 1e-2 + self.proba_flashback = 0.0 + self.proba_gate_dropout = 0.0 self.w_G = randw(nb_heads, caterpillar_height, dim_model) self.b_G = nn.Parameter( @@ -551,7 +551,11 @@ class Caterpillar(nn.Module): torch.einsum("ntc,hec->nhet", X, self.w_G) + self.b_G[None, :, :, None] ).sigmoid() - # That bas a bad idea + if self.training and self.proba_gate_dropout > 0.0: + warnings.warn("gate droupout", RuntimeWarning) + epsilon = 0.5 + + # That was a bad idea # G = F.dropout(G, self.attention_dropout, self.training) V = torch.einsum("ntc,hdc->nhtd", X, self.w_V) @@ -559,6 +563,10 @@ class Caterpillar(nn.Module): # We prepare the arguments for the parallel scan + # Clip the gating + warnings.warn("gating clipping", RuntimeWarning) + G = G / G.sum(1, keepdim=True).clamp(min=1) + A = 1 - G.sum(1) gated_V = torch.einsum("nhet,nhtd->netd", G, V) gated_K = torch.einsum("nhet,nhtd->netd", G, K) @@ -585,6 +593,7 @@ class Caterpillar(nn.Module): self.rec_K[:, :, t0:t1] = next_K.flatten(2, 3) if self.training and self.proba_flashback > 0.0: + warnings.warn("flash back", RuntimeWarning) # This piece of code makes the assumption that there is # nothing informative before t0, otherwise we'd have to # implement a cache for V and K too. This should not be -- 2.39.5