import torch
from torch import nn
from torch.nn import functional as F


def conv3x3(in_ch, out_ch, stride=1, padding=1, groups=1, dilation=1):
    return nn.Conv2d(in_ch, out_ch, kernel_size=3, stride=stride, padding=padding, groups=groups, dilation=dilation)


def conv1x1(in_ch, out_ch, stride=1):
    return nn.Conv2d(in_ch, out_ch, kernel_size=1, stride=stride)


class ResNetBlock(nn.Module):
    def __init__(self, blocks=3, layers=1, input_ch=3, out_ch=32, kernel_size=None, stride=1, padding=1, groups=1,
                 dilation=1):
        """
        :type kernel_size: iterator or int
        """
        super(ResNetBlock, self).__init__()
        if kernel_size is None:
            kernel_size = [3, 3]
        self.conv1 = nn.Conv2d(
            input_ch, out_ch,
            kernel_size=kernel_size,
            stride=stride,
            padding=padding,
            groups=groups,
            dilation=dilation
        )
        self.conv2 = nn.Sequential(
            nn.Conv2d(
                out_ch, out_ch,
                kernel_size=kernel_size,
                stride=stride,
                padding=padding,
                groups=groups,
                dilation=dilation
            ),
            nn.ReLU()
        )
        self.conv_hidden = nn.ModuleList()
        for block in range(blocks):
            for layer in range(layers):
                self.conv_hidden.append(
                    self.conv2
                )
        self.relu = nn.ReLU
        self.blocks = blocks
        self.layers = layers

    def forward(self, x):
        shortcut = x
        x = self.conv1(shortcut)
        for i, hidden_layer in enumerate(self.conv_hidden):
            x = hidden_layer(x)
            if (i % self.layers == 0) & (i != 0):
                x = self.relu(x)
                x += shortcut
        return x


class ConvLSTM(nn.Module):
    def __init__(self, ch, kernel_size=3):
        super(ConvLSTM, self).__init__()
        self.padding = (kernel_size-1)/2
        self.conv_i = nn.Conv2d(in_channels=ch, out_channels=ch, kernel_size=kernel_size, stride=1, padding=1,
                                bias=False)
        self.conv_f = nn.Conv2d(in_channels=ch, out_channels=ch, kernel_size=kernel_size, stride=1, padding=1,
                                bias=False)
        self.conv_c = nn.Conv2d(in_channels=ch, out_channels=ch, kernel_size=kernel_size, stride=1, padding=1,
                                bias=False)
        self.conv_o = nn.Conv2d(in_channels=ch, out_channels=ch, kernel_size=kernel_size, stride=1, padding=1,
                                bias=False)
        self.conv_attention_map = nn.Conv2d(in_channels=ch, out_channels=1, kernel_size=kernel_size, stride=1,
                                            padding=1, bias=False)
        self.ch = ch

    def init_hidden(self, batch_size, image_size, init=0.5):
        height, width = image_size
        return torch.ones(batch_size, self.ch, height, width).to(self.conv_i.weight.device) * init

    def forward(self, input_tensor, input_cell_state=None):
        if input_cell_state is None:
            batch_size, _, height, width = input_tensor.size()
            input_cell_state = self.init_hidden(batch_size, (height, width))

        conv_i = self.conv_i(input_tensor)
        sigmoid_i = torch.sigmoid(conv_i)

        conv_f = self.conv_f(input_tensor)
        sigmoid_f = torch.sigmoid(conv_f)

        cell_state = sigmoid_f * input_cell_state + sigmoid_i * torch.tanh(self.conv_c(input_tensor))

        conv_o = self.conv_o(input_tensor)
        sigmoid_o = torch.sigmoid(conv_o)

        lstm_feats = sigmoid_o * torch.tanh(cell_state)

        attention_map = self.conv_attention_map(lstm_feats)
        attention_map = torch.sigmoid(attention_map)

        return attention_map, cell_state, lstm_feats


class GeneratorBlock(nn.Module):
    def __init__(self, blocks=3, layers=1, input_ch=3, out_ch=32, kernel_size=None, stride=1, padding=1, groups=1,
                 dilation=1):
        """
        :type kernel_size: iterator or int
        """
        super(GeneratorBlock, self).__init__()
        if kernel_size is None:
            kernel_size = [3, 3]
        self.blocks = blocks
        self.layers = layers
        self.input_ch = input_ch
        self.out_ch = out_ch
        self.kernel_size = kernel_size
        self.stride = stride
        self.padding = padding
        self.groups = groups
        self.dilation = dilation
        self.sigmoid = nn.Sigmoid()
        self.resnet = nn.Sequential(
            ResNetBlock(
                blocks=self.blocks,
                layers=self.layers,
                input_ch=self.input_ch,
                out_ch=self.out_ch,
                kernel_size=self.kernel_size,
                stride=self.stride,
                padding=self.padding,
                groups=self.groups,
                dilation=self.dilation
            )
        )
        self.LSTM = nn.Sequential(
            ConvLSTM(
                ch=out_ch, kernel_size=kernel_size,
            )
        )

    def forward(self, original_image, prev_cell_state=None):
        x = self.resnet(original_image)
        attention_map, cell_state, lstm_feats = self.LSTM(x, prev_cell_state)
        x = attention_map * original_image
        return x, attention_map, cell_state, lstm_feats


class Generator(nn.Module):
    def __init__(self, repetition, blocks=3, layers=1, input_ch=3, out_ch=32, kernel_size=None, stride=1, padding=1,
                 groups=1, dilation=1):
        """
        :type kernel_size: iterator or int
        """
        super(Generator, self).__init__()
        if kernel_size is None:
            kernel_size = [3, 3]
        self.blocks = blocks
        self.layers = layers
        self.input_ch = input_ch
        self.out_ch = out_ch
        self.kernel_size = kernel_size
        self.stride = stride
        self.padding = padding
        self.groups = groups
        self.dilation = dilation
        self.repetition = repetition
        self.generator_block = nn.Sequential(
            GeneratorBlock(blocks=blocks,
                           layers=layers,
                           input_ch=input_ch,
                           out_ch=out_ch,
                           kernel_size=kernel_size,
                           stride=stride,
                           padding=padding,
                           groups=groups,
                           dilation=dilation)
        )
        self.generator_blocks = nn.ModuleList()
        for repetition in range(repetition):
            self.conv_hidden.append(
                self.generator_block
            )

    def forward(self, x):
        cell_state = None
        attention_map = None
        for generator_block in self.generator_blocks:
            x, attention_map, cell_state, lstm_feats = generator_block(x, cell_state)
        return x, attention_map

# need fixing
class AttentiveRNNLoss(nn.Module):
    def __init__(self):
        super(AttentiveRNNLoss, self).__init__()

    def forward(self, input_tensor, label_tensor):
        # Initialize attentive rnn model
        attentive_rnn = Generator
        inference_ret = attentive_rnn(input_tensor)

        loss = 0.0
        n = len(inference_ret['attention_map_list'])
        for index, attention_map in enumerate(inference_ret['attention_map_list']):
            mse_loss = (0.8 ** (n - index + 1)) * nn.MSELoss()(attention_map, label_tensor)
            loss += mse_loss

        return loss, inference_ret['final_attention_map']

# Need work
class DiscriminativeNet(nn.Module):
    def __init__(self, W, H):
        super(DiscriminativeNet, self).__init__()
        self.conv1 = nn.Conv2d(in_channels=3, out_channels=8, kernel_size=5, stride=1, padding=2)
        self.conv2 = nn.Conv2d(in_channels=8, out_channels=16, kernel_size=5, stride=1, padding=2)
        self.conv3 = nn.Conv2d(in_channels=16, out_channels=32, kernel_size=5, stride=1, padding=2)
        self.conv4 = nn.Conv2d(in_channels=32, out_channels=64, kernel_size=5, stride=1, padding=2)
        self.conv5 = nn.Conv2d(in_channels=64, out_channels=128, kernel_size=5, stride=1, padding=2)
        self.conv6 = nn.Conv2d(in_channels=128, out_channels=128, kernel_size=5, stride=1, padding=2)
        self.conv_map = nn.Conv2d(in_channels=128, out_channels=1, kernel_size=5, stride=1, padding=2, bias=False)
        self.conv7 = nn.Conv2d(in_channels=128, out_channels=64, kernel_size=5, stride=4, padding=2)
        self.conv8 = nn.Conv2d(in_channels=64, out_channels=64, kernel_size=5, stride=4, padding=2)
        self.conv9 = nn.Conv2d(in_channels=64, out_channels=32, kernel_size=5, stride=4, padding=2)
        self.fc1 = nn.Linear(32 * W * H,
                             1024)  # You need to adjust the input dimension here depending on your input size
        self.fc2 = nn.Linear(1024, 1)

    def forward(self, x):
        x1 = F.leaky_relu(self.conv1(x))
        x2 = F.leaky_relu(self.conv2(x1))
        x3 = F.leaky_relu(self.conv3(x2))
        x4 = F.leaky_relu(self.conv4(x3))
        x5 = F.leaky_relu(self.conv5(x4))
        x6 = F.leaky_relu(self.conv6(x5))
        attention_map = self.conv_map(x6)
        x7 = F.leaky_relu(self.conv7(attention_map * x6))
        x8 = F.leaky_relu(self.conv8(x7))
        x9 = F.leaky_relu(self.conv9(x8))
        x9 = x9.view(x9.size(0), -1)  # flatten the tensor
        fc1 = self.fc1(x9)
        fc2 = self.fc2(fc1)
        fc_out = torch.sigmoid(fc2)

        # Ensure fc_out is not exactly 0 or 1 for stability of log operation in loss
        fc_out = torch.clamp(fc_out, min=1e-7, max=1 - 1e-7)

        return fc_out, attention_map, fc2
