From 2db4624955ad2a1c29f7632f30ac217c045638cf Mon Sep 17 00:00:00 2001 From: Francois Fleuret Date: Sat, 9 Jun 2018 14:09:08 +0200 Subject: [PATCH] Compute the minimal input size that will be kept unchanged through a conv/convtranspose auto-encoder --- ae_size.py | 37 +++++++++++++++++++++++++++++++++++++ 1 file changed, 37 insertions(+) create mode 100755 ae_size.py diff --git a/ae_size.py b/ae_size.py new file mode 100755 index 0000000..7bef9f5 --- /dev/null +++ b/ae_size.py @@ -0,0 +1,37 @@ +#!/usr/bin/env python + +import math +from torch import nn +from torch import Tensor + +###################################################################### + +def minimal_input_size(w, layer_specs): + assert w > 0, 'The input is too small' + if layer_specs == []: + return w + else: + k, s = layer_specs[0] + w = math.ceil((w - k) / s) + 1 + w = minimal_input_size(w, layer_specs[1:]) + return int((w - 1) * s + k) + +###################################################################### + +layer_specs = [ (11, 5), (5, 2), (3, 2), (3, 2) ] + +layers = [] +for l in layer_specs: + layers.append(nn.Conv2d(1, 1, l[0], l[1])) + +for l in reversed(layer_specs): + layers.append(nn.ConvTranspose2d(1, 1, l[0], l[1])) + +m = nn.Sequential(*layers) + +h = minimal_input_size(240, layer_specs) +w = minimal_input_size(320, layer_specs) + +x = Tensor(1, 1, h, w).normal_() + +print(x.size(), m(x).size()) -- 2.39.5