# Copyright 2021 Zilliz. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
#     http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.

import sys
import os
from pathlib import Path
from typing import Optional, Union, Tuple
from types import MethodType

import torch
import logging
import warnings
from torch import nn

from transformers import AutoProcessor, BlipForImageTextRetrieval
from transformers import logging as t_logging
from transformers.models.blip.modeling_blip import BlipOutput, blip_loss

from towhee import register
from towhee.operator.base import NNOperator, OperatorFlag
from towhee.types.arg import arg, to_image_color
try:
    from towhee import accelerate
except:
    def accelerate(func):
        return func

log = logging.getLogger('run_op')
warnings.filterwarnings('ignore')
os.environ['TF_CPP_MIN_LOG_LEVEL'] = '2'
t_logging.set_verbosity_error()

def _forward(
    self,
    input_ids: Optional[torch.LongTensor] = None,
    pixel_values: Optional[torch.FloatTensor] = None,
    attention_mask: Optional[torch.Tensor] = None,
    position_ids: Optional[torch.LongTensor] = None,
    return_loss: Optional[bool] = None,
    output_attentions: Optional[bool] = None,
    output_hidden_states: Optional[bool] = None,
    return_dict: Optional[bool] = None,
    ) -> Union[Tuple, BlipOutput]:

    output_attentions = output_attentions if output_attentions is not None else self.config.output_attentions
    output_hidden_states = (
        output_hidden_states if output_hidden_states is not None else self.config.output_hidden_states
    )
    return_dict = return_dict if return_dict is not None else self.config.use_return_dict

    vision_outputs = self.vision_model(
        pixel_values=pixel_values,
        output_attentions=output_attentions,
        output_hidden_states=output_hidden_states,
        return_dict=return_dict,
    )

    text_outputs = self.text_encoder(
        input_ids=input_ids,
        attention_mask=attention_mask,
        position_ids=position_ids,
        output_attentions=output_attentions,
        output_hidden_states=output_hidden_states,
        return_dict=return_dict,
    )

    image_embeds = vision_outputs[1]
    image_embeds = self.vision_proj(image_embeds)

    text_embeds = text_outputs[0]
    text_embeds = self.text_proj(text_embeds[:,0,:])

    # normalized features
    image_embeds = image_embeds / image_embeds.norm(p=2, dim=-1, keepdim=True)
    text_embeds = text_embeds / text_embeds.norm(p=2, dim=-1, keepdim=True)

    # cosine similarity as logits
    logit_scale = self.logit_scale.exp()
    logits_per_text = torch.matmul(text_embeds, image_embeds.t()) * logit_scale
    logits_per_image = logits_per_text.t()

    loss = None
    if return_loss:
        loss = blip_loss(logits_per_text)

    if not return_dict:
        output = (logits_per_image, logits_per_text, text_embeds, image_embeds, text_outputs, vision_outputs)
        return ((loss,) + output) if loss is not None else output

    return BlipOutput(
        loss=loss,
        logits_per_image=logits_per_image,
        logits_per_text=logits_per_text,
        text_embeds=text_embeds,
        image_embeds=image_embeds,
        text_model_output=text_outputs,
        vision_model_output=vision_outputs,
    )


def create_model(cfg, modality, checkpoint_path, device):
    hf_blip_model = BlipForImageTextRetrieval.from_pretrained(cfg)
    if checkpoint_path:
        try:
            state_dict = torch.load(checkpoint_path, map_location=device)
            hf_blip_model.load_state_dict(state_dict)
        except Exception as e:
            log.error(f"Fail to load state dict from {checkpoint_path}: {e}")
    hf_blip_model.to(device)
    hf_blip_model.eval()

    if modality == 'image':
        blip = BLIPModelVision(hf_blip_model)
    elif modality == 'text':
        blip = BLIPModelText(hf_blip_model)
    else:
        raise ValueError("modality[{}] not implemented.".format(modality))
    return blip

@accelerate
class BLIPModelVision(nn.Module):
    def __init__(self, model):
        super().__init__()
        self.backbone = model
    
    def forward(self, pixel_values):
        image_embeds = self.backbone.vision_model(pixel_values)[0]
        image_embeds = self.backbone.vision_proj(image_embeds[:,0,:])
        return image_embeds
    
@accelerate
class BLIPModelText(nn.Module):
    def __init__(self, model):
        super().__init__()
        self.backbone = model

    def forward(self, input_ids, attention_mask):
        text_features = self.backbone.text_encoder(input_ids, attention_mask = attention_mask,
                                            return_dict = False)[0]
        text_features = self.backbone.text_proj(text_features[:,0,:])
        return text_features    

class Model:
    def __init__(self, model_name, modality, checkpoint_path, device):
        self.model = create_model(model_name, modality, checkpoint_path, device)
        self.device = device

    def __call__(self, *args, **kwargs):
        new_args = []
        for item in args:
            new_args.append(item.to(self.device))
        new_kwargs = {}
        for k, value in kwargs.items():
            new_kwargs[k] = value.to(self.device)
        outs = self.model(*new_args, **new_kwargs)
        return outs

@register(output_schema=['vec'])
class Blip(NNOperator):
    """
    BLIP multi-modal embedding operator
    """
    def __init__(self, model_name: str, modality: str, device:str = 'cpu', checkpoint_path: str = None):
        super().__init__()
        self.model_name = model_name
        real_name = self._configs()[model_name]['name']
        self.model = Model(real_name, modality, checkpoint_path, device)
        self.modality = modality
        self.device = device
        self.checkpoint_path = checkpoint_path
        self.processor = AutoProcessor.from_pretrained(real_name)

    def __call__(self, data):
        if not isinstance(data, list):
            data = [data]
        else:
            data = data
        results = []
        for single_data in data:
            result = self.inference_single_data(single_data)
            results.append(result)
        if len(data) == 1:
            return results[0]
        else:
            return results

    def _inference_from_text(self, text):
        inputs = self.processor(text=text, padding=True, return_tensors='pt')
        text_feature = self.model(input_ids = inputs.input_ids, attention_mask = inputs.attention_mask)[0]
        return text_feature

    @arg(1, to_image_color('RGB'))
    def _inference_from_image(self, img):
        inputs = self.processor(images=img, return_tensors='pt')
        image_feature = self.model(inputs['pixel_values'])
        return image_feature

    def inference_single_data(self, data):
        if self.modality == 'image':
            vec = self._inference_from_image(data)
        elif self.modality == 'text':
            vec = self._inference_from_text(data)
        else:
            raise ValueError("modality[{}] not implemented.".format(self.modality))
        return vec.detach().cpu().numpy().flatten()

    def _configs(self):
        config = {}
        config['blip_itm_base_coco'] = {} 
        config['blip_itm_base_coco']['name'] = 'Salesforce/blip-itm-base-coco'
        config['blip_itm_base_flickr'] = {} 
        config['blip_itm_base_flickr']['name'] = 'Salesforce/blip-itm-base-flickr'
        config['blip_itm_large_coco'] = {} 
        config['blip_itm_large_coco']['name'] = 'Salesforce/blip-itm-large-coco'
        config['blip_itm_large_flickr'] = {} 
        config['blip_itm_large_flickr']['name'] = 'Salesforce/blip-itm-large-flickr'
        return config
        
    @property
    def _model(self):
        return self.model.model

    def train(self, **kwargs):
        import sys
        import pathlib
        path = str(pathlib.Path(__file__).parent)
        sys.path.append(path)
        from train_blip_with_hf_trainer import train_with_hf_trainer
        data_args = kwargs.pop('data_args', None)
        training_args = kwargs.pop('training_args', None)
        model_args = kwargs.pop('model_args', None)
        model_finetune = self._model.backbone
        model_finetune.forward =  MethodType(_forward, model_finetune)
        model_finetune.logit_scale = torch.nn.Parameter(torch.ones([]) * model_finetune.config.logit_scale_init_value)

        train_with_hf_trainer(model_finetune, self.processor.tokenizer, data_args, training_args, model_args)
 

    @property
    def supported_formats(self):
        onnxes = self.supported_model_names(format='onnx')
        if self.model_name in onnxes:
            return ['onnx']
        else:
            return ['pytorch']

    @staticmethod
    def supported_model_names(format: str = None):
        full_list = ['blip_itm_base']
        if format == None:
            model_list = full_list
        elif format == 'pytorch' or format == 'torchscript' or format == 'onnx':
            model_list = full_list
        else:
            log.error(f'Invalid format "{format}". Currently supported formats: "pytorch", "torchscript".')
        return model_list

    def save_model(self, model_type: str = 'pytorch', output_file: str = 'default'):
        import os
        from PIL import Image
        from torch.onnx import export as onnx_export

        if output_file == 'default':
            output_file = str(Path(__file__).parent)
            output_file = os.path.join(output_file, 'saved', model_type)
            os.makedirs(output_file, exist_ok=True)
            name = self.model_name.replace('/', '-')
            output_file = os.path.join(output_file, name)
            if model_type in ['pytorch', 'torchscript']:
                output_file = output_file + '.pt'
            elif model_type == 'onnx':
                output_file = output_file + '.onnx'
            else:
                raise AttributeError('Unsupported model_type.')

        if self.modality == 'image':
            sz = self.processor.image_processor.size
            if isinstance(sz, int):
                h = sz
                w = sz
            elif isinstance(sz, dict):
                h = sz['height']
                w = sz['width']
            dummy_input = Image.new('RGB', (w, h), color = 'red')
            inputs = self.processor(images=dummy_input, return_tensors='pt')  # a dictionary
        elif self.modality == 'text':
            dummy_input = 'dummy'
            inputs = self.processor(text=dummy_input, padding=True, return_tensors='pt')
        else:
            raise ValueError('modality[{}] not implemented.'.format(self.modality))

        if model_type == 'pytorch':
            torch.save(self._model, output_file)
        elif model_type == 'torchscript':
            inputs = list(inputs.values())
            try:
                try:
                    jit_model = torch.jit.script(self._model)
                except Exception:
                    jit_model = torch.jit.trace(self._model, inputs, strict=False)
                torch.jit.save(jit_model, output_file)
            except Exception as e:
                log.error(f'Fail to save as torchscript: {e}.')
                raise RuntimeError(f'Fail to save as torchscript: {e}.')
        elif model_type == 'onnx':
            if self.modality == 'image':
                input_names= ['pixel_values']
                output_names=['image_embeds']
                dynamic_axes={'pixel_values': {0: 'batch'}, 'image_embeds': {0: 'batch'}}
            elif self.modality == 'text':
                input_names= ['input_ids', 'attention_mask']
                output_names=['text_embeds']
                dynamic_axes={'input_ids': {0: 'batch', 1: 'sequence'}, 'attention_mask': {0: 'batch', 1: 'sequence'}, 'text_embeds': {0: 'batch'}}
            else:
                raise ValueError('modality[{}] not implemented.'.format(self.modality))

            onnx_export(self._model,
                (dict(inputs),),
                f=Path(output_file),
                input_names= input_names,
                output_names=output_names,
                dynamic_axes=dynamic_axes,
                do_constant_folding=True,
                opset_version=14,
            )
        else:
            pass
            raise NotImplementedError