while result == None or max(result.values()) > 100:
             l = length
             if l > 5 and randomize_length:
-                l = 5 + torch.randint(l-5, (1,)).item()
+                l = 5 + torch.randint(l - 5, (1,)).item()
             p, v = generate_program(nb_variables, l)
             v = ", ".join(['"' + v + '": ' + v for v in v])
             ldict = {}
 
         self.device = device
 
         train_sequences = expr.generate_sequences(
-            nb_train_samples, nb_variables=nb_variables, length=2*sequence_length, randomize_length=True,
+            nb_train_samples,
+            nb_variables=nb_variables,
+            length=2 * sequence_length,
+            randomize_length=True,
         )
         test_sequences = expr.generate_sequences(
-            nb_test_samples, nb_variables=nb_variables, length=sequence_length,
+            nb_test_samples,
+            nb_variables=nb_variables,
+            length=sequence_length,
         )
         self.char2id = dict(
             [
             input.split(self.batch_size), dynamic_ncols=True, desc=desc
         ):
             if split == "train":
-                last=(batch!=self.filler).max(0).values.nonzero().max()+1
-                batch=batch[:,:last]
+                last = (batch != self.filler).max(0).values.nonzero().max() + 1
+                batch = batch[:, :last]
             yield batch
 
     def vocabulary_size(self):
             )
             correct = (1 - ar_mask) * self.space + ar_mask * input
             for n in range(result.size(0)):
-                comment="GOOD" if (result[n]-input[n]).abs().max()==0 else ""
+                comment = "GOOD" if (result[n] - input[n]).abs().max() == 0 else ""
                 log_string(f"test_after  {self.seq2str(result[n])} {comment}")
                 log_string(f"correct     {self.seq2str(correct[n])}")
             ##############################################################