if __name__ == "__main__":
     device = torch.device("cuda" if torch.cuda.is_available() else "cpu")
-    mazes, paths = create_maze_data(8)
+    mazes, paths, policies = create_maze_data(8)
     mazes, paths = mazes.to(device), paths.to(device)
-    save_image("test.png", mazes, paths, paths)
+    save_image("test.png", mazes=mazes, target_paths=paths, predicted_paths=paths)
     print(path_correctness(mazes, paths))
 
 ######################################################################
 
     def slice(self):
         return self.x[:, self.first : self.first + self.nb]
 
+    def complete(self):
+        return self.first == 0 and self.nb == x.size(1)
+
 
 ######################################################################
 
         def randw(*d):
             return nn.Parameter(torch.randn(*d) / math.sqrt(d[-1]))
 
-        assert causal, "TODO: Switch off the cache when non-causal!!!"
         self.causal = causal
         self.attention_dropout = attention_dropout
 
     def forward(self, bs_q):
         x_q = bs_q.x
 
+        assert (
+            self.causal or bs_q.complete()
+        ), "Partial evaluation is only possible for causal models"
+
         if bs_q.first == 0:
             self.cache_k = x_q.new_zeros(
                 x_q.size(0), self.w_k.size(0), x_q.size(1), self.w_k.size(1)