diff --git a/README.md b/README.md
index f48daf0..38157f2 100644
--- a/README.md
+++ b/README.md
@@ -1,3 +1,106 @@
-# cartoongan
+# Cartoongan
+
+*author: Shiyu*
+
+
+
+
+
+
+
+## Description
+
+Convert an image into an cartoon image using [`CartoonGAN`](https://github.com/Yijunmaverick/CartoonGAN-Test-Pytorch-Torch).
+
+
+
+
+
+
+
+
+## Code Example
+
+Load an image from path './test.png'.
+
+ *Write the pipeline in simplified style*:
+
+```python
+import towhee
+
+towhee.glob('./test.png') \
+ .image_decode() \
+ .img2img_translation.animegan(model_name = 'hayao') \
+ .show()
+```
+
+
+
+*Write a pipeline with explicit inputs/outputs name specifications:*
+
+```python
+import towhee
+
+towhee.glob['path']('./test.png') \
+ .image_decode['path', 'origin']() \
+ .img2img_translation.cartoongan['origin', 'hayao'](model_name = 'Hayao') \
+ .img2img_translation.cartoongan['origin', 'hosoda'](model_name = 'Hosoda') \
+ .img2img_translation.cartoongan['origin', 'paprika'](model_name = 'Paprika') \
+ .img2img_translation.cartoongan['origin', 'shinkai'](model_name = 'Shinkai') \
+ .select['origin', 'hayao', 'hosoda', 'paprika', 'shinkai']() \
+ .show()
+```
+
+
+
+
+
+
+
+
+
+## Factory Constructor
+
+Create the operator via the following factory method
+
+***img2img_translation.cartoongan(model_name = 'which anime model to use')***
+
+Model options:
+
+- Hayao
+- Hosoda
+- Paprika
+- Shinkai
+
+
+
+
+
+
+
+## Interface
+
+Takes in a numpy rgb image in channels first. It transforms input into animated image in numpy form.
+
+
+**Parameters:**
+
+ ***model_name***: *str*
+
+ Which model to use for transfer.
+
+ ***framework***: *str*
+
+ Which ML framework being used, for now only supports PyTorch.
+
+ ***device***: *str*
+
+ Which device being used('cpu' or 'cuda'), defaults to 'cpu'.
+
+
+
+**Returns**: *towhee.types.Image (a sub-class of numpy.ndarray)*
+
+ The new image.
+
-2
\ No newline at end of file
diff --git a/cartoongan.py b/cartoongan.py
index 7534f5d..625f710 100644
--- a/cartoongan.py
+++ b/cartoongan.py
@@ -36,10 +36,10 @@ class Cartoongan(NNOperator):
transforms.ToTensor()
])
- @arg(1, to_image_color('RGB'))
+ @arg(1, to_image_color('BGR'))
def __call__(self, image):
- img = self.tfms(image).unsqueeze(0)
- styled_image = self.model(img)
+ image = self.tfms(image).unsqueeze(0)
+ styled_image = self.model(image)
styled_image = numpy.transpose(styled_image, (1, 2, 0))
styled_image = PImage.fromarray((styled_image * 255).astype(numpy.uint8))
diff --git a/pytorch/model.py b/pytorch/model.py
index 20e274b..ff94d35 100644
--- a/pytorch/model.py
+++ b/pytorch/model.py
@@ -2,6 +2,7 @@ import os
import torch
import torch.nn as nn
import torch.nn.functional as F
+from torch import Tensor
from pathlib import Path
class Transformer(nn.Module):
@@ -197,4 +198,6 @@ class Model():
# BGR -> RGB
output_image = output_image[[2, 1, 0], :, :]
output_image = output_image.data.cpu().float() * 0.5 + 0.5
+
return output_image.numpy()
+
\ No newline at end of file
diff --git a/results1.png b/results1.png
new file mode 100644
index 0000000..bdb866f
Binary files /dev/null and b/results1.png differ
diff --git a/results2.png b/results2.png
new file mode 100644
index 0000000..782c4bf
Binary files /dev/null and b/results2.png differ
diff --git a/test.png b/test.png
new file mode 100644
index 0000000..9c13ec0
Binary files /dev/null and b/test.png differ