else:
         return str(n)
 
+class vignette_logger():
+    def __init__(self, delay_min = 60):
+        self.start_t = time.time()
+        self.delay_min = delay_min
+
+    def __call__(self, n, m):
+        t = time.time()
+        if t > self.start_t + self.delay_min:
+            dt = (t - self.start_t) / m
+            log_string('sample_generation {:d} / {:d}'.format(
+                m,
+                n), ' [ETA ' + time.ctime(time.time() + dt * (n - m)) + ']'
+            )
+            self.start_t = t
+
 ######################################################################
 
 if args.nb_train_samples%args.batch_size > 0 or args.nb_test_samples%args.batch_size > 0:
 
         train_set = VignetteSet(problem_number,
                                 args.nb_train_samples, args.batch_size,
-                                cuda = torch.cuda.is_available())
+                                cuda = torch.cuda.is_available(),
+                                logger = vignette_logger())
 
         log_string('data_generation {:0.2f} samples / s'.format(
             train_set.nb_samples / (time.time() - t))
 
 
 class VignetteSet:
 
-    def __init__(self, problem_number, nb_samples, batch_size, cuda = False):
+    def __init__(self, problem_number, nb_samples, batch_size, cuda = False, logger = None):
 
         if nb_samples%batch_size > 0:
             print('nb_samples must be a multiple of batch_size')
         self.data = []
         for b in range(0, self.nb_batches):
             self.data.append(generate_one_batch(mp_args[b]))
+            if logger is not None: logger(self.nb_batches * self.batch_size, b * self.batch_size)
 
         # Weird thing going on with the multi-processing, waiting for more info
 
 ######################################################################
 
 class CompressedVignetteSet:
-    def __init__(self, problem_number, nb_samples, batch_size, cuda = False):
+    def __init__(self, problem_number, nb_samples, batch_size, cuda = False, logger = None):
 
         if nb_samples%batch_size > 0:
             print('nb_samples must be a multiple of batch_size')
             acc_sq += input.float().pow(2).sum() /  input.numel()
             self.targets.append(target)
             self.input_storages.append(svrt.compress(input.storage()))
+            if logger is not None: logger(self.nb_batches * self.batch_size, b * self.batch_size)
 
         self.mean = acc / self.nb_batches
         self.std = sqrt(acc_sq / self.nb_batches - self.mean * self.mean)