
--- model/discriminator.py
... | ... | @@ -1,1 +0,0 @@ |
1 | -from torch import nn(파일 끝에 줄바꿈 문자 없음) |
--- model/generator.py
+++ model/generator.py
... | ... | @@ -63,7 +63,7 @@ |
63 | 63 |
class ConvLSTM(nn.Module): |
64 | 64 |
def __init__(self, ch, kernel_size=3): |
65 | 65 |
super(ConvLSTM, self).__init__() |
66 |
- self.padding = (kernel_size-1)/2 |
|
66 |
+ self.padding = (len(kernel_size)-1)/2 |
|
67 | 67 |
self.conv_i = nn.Conv2d(in_channels=ch, out_channels=ch, kernel_size=kernel_size, stride=1, padding=1, |
68 | 68 |
bias=False) |
69 | 69 |
self.conv_f = nn.Conv2d(in_channels=ch, out_channels=ch, kernel_size=kernel_size, stride=1, padding=1, |
... | ... | @@ -181,7 +181,7 @@ |
181 | 181 |
) |
182 | 182 |
self.generator_blocks = nn.ModuleList() |
183 | 183 |
for repetition in range(repetition): |
184 |
- self.conv_hidden.append( |
|
184 |
+ self.generator_blocks.append( |
|
185 | 185 |
self.generator_block |
186 | 186 |
) |
187 | 187 |
|
... | ... | @@ -248,3 +248,9 @@ |
248 | 248 |
fc_out = torch.clamp(fc_out, min=1e-7, max=1 - 1e-7) |
249 | 249 |
|
250 | 250 |
return fc_out, attention_map, fc2 |
251 |
+ |
|
252 |
+if __name__ == "__main__": |
|
253 |
+ from torchinfo import summary |
|
254 |
+ generator = Generator(3) |
|
255 |
+ batch_size = 10 |
|
256 |
+ summary(generator, input_size=(batch_size, 3, 1920,1080)) |
Add a comment
Delete comment
Once you delete this comment, you won't be able to recover it. Are you sure you want to delete this comment?