• Y
  • List All
  • Feedback
    • This Project
    • All Projects
Profile Account settings Log out
  • Favorite
  • Project
  • All
Loading...
  • Log in
  • Sign up
yjyoon / Raindrop_Detection star
  • Project homeH
  • CodeC
  • IssueI
  • Pull requestP
  • Review R
  • MilestoneM
  • BoardB
  • Files
  • Commit
  • Branches
Raindrop_DetectionmodelGenerator.py
Download as .zip file
File name
Commit message
Commit date
data
updating loss function
2023-06-23
model
Added comment
2023-06-30
tools
working on parser
2023-06-30
README.md
readme update
2023-06-21
batchmix.png
theorizing train code for GAN
2023-06-22
datasetmananger.py
theorizing about dataset management
2023-06-23
main.py
Hello YONA
2023-06-21
train.py
working on parser
2023-06-30
File name
Commit message
Commit date
AttentiveRNN.py
integrated loss functions into neural network class
2023-06-28
Autoencoder.py
integrated loss functions into neural network class
2023-06-28
Discriminator.py
Added comment
2023-06-30
Generator.py
Added comment
2023-06-30
juni 2023-06-30 5304dc8 Added comment UNIX
Raw Open in browser Change history
from AttentiveRNN import AttentiveRNN from Autoencoder import AutoEncoder from torch import nn class Generator(nn.Module): def __init__(self, repetition, blocks=3, layers=1, input_ch=3, out_ch=32, kernel_size=None, stride=1, padding=1, groups=1, dilation=1): super(Generator, self).__init__() if kernel_size is None: kernel_size = [3, 3] self.attentiveRNN = AttentiveRNN(repetition, blocks=blocks, layers=layers, input_ch=input_ch, out_ch=out_ch, kernel_size=None, stride=stride, padding=padding, groups=groups, dilation=dilation ) self.autoencoder = AutoEncoder() self.blocks = blocks self.layers = layers self.input_ch = input_ch self.out_ch = out_ch self.kernel_size = kernel_size self.stride = stride self.padding = padding self.groups = groups self.dilation = dilation self.sigmoid = nn.Sigmoid() def forward(self, x): attentiveRNNresults = self.attentiveRNN(x) x = self.autoencoder(attentiveRNNresults['x'] * attentiveRNNresults['attention_map_list'][-1]) ret = { 'x' : x, 'attention_maps' : attentiveRNNresults['attention_map_list'] } return ret def binary_diff_mask(self, clean, dirty, thresold=0.1): # this parts corrects gamma, and always remember, sRGB values are not in linear scale with lights intensity, clean = torch.pow(clean, 0.45) dirty = torch.pow(dirty, 0.45) diff = torch.abs(clean - dirty) diff = torch.sum(diff, dim=1) bin_diff = (diff > thresold).to(clean.dtype) return bin_diff def loss(self, clean, dirty, thresold=0.1): # check diff if they are working as intended diff_mask = self.binary_diff_mask(clean, dirty, thresold) attentive_rnn_loss = self.attentiveRNN.loss(clean, diff_mask) autoencoder_loss = self.autoencoder.loss(clean, dirty) ret = { "attentive_rnn_loss" : attentive_rnn_loss, "autoencoder_loss" : autoencoder_loss, } return ret if __name__ == "__main__": import torch from torchinfo import summary torch.set_default_tensor_type(torch.FloatTensor) generator = Generator(3, blocks=2) batch_size = 2 summary(generator, input_size=(batch_size, 3, 720,720))

          
        
    
    
Copyright Yona authors & © NAVER Corp. & NAVER LABS Supported by NAVER CLOUD PLATFORM

or
Sign in with github login with Google Sign in with Google
Reset password | Sign up