Spaces:
Runtime error
Runtime error
Create new file
Browse files
app.py
ADDED
|
@@ -0,0 +1,83 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
import gradio as gr
|
| 2 |
+
import numpy as np
|
| 3 |
+
from huggingface_hub import hf_hub_url, cached_download
|
| 4 |
+
import PIL
|
| 5 |
+
import onnx
|
| 6 |
+
import onnxruntime
|
| 7 |
+
|
| 8 |
+
config_file_url = hf_hub_url("Jacopo/ToonClip", filename="model.onnx")
|
| 9 |
+
model_file = cached_download(config_file_url)
|
| 10 |
+
|
| 11 |
+
onnx_model = onnx.load(model_file)
|
| 12 |
+
onnx.checker.check_model(onnx_model)
|
| 13 |
+
|
| 14 |
+
opts = onnxruntime.SessionOptions()
|
| 15 |
+
opts.intra_op_num_threads = 16
|
| 16 |
+
ort_session = onnxruntime.InferenceSession(model_file, sess_options=opts)
|
| 17 |
+
|
| 18 |
+
input_name = ort_session.get_inputs()[0].name
|
| 19 |
+
output_name = ort_session.get_outputs()[0].name
|
| 20 |
+
|
| 21 |
+
def normalize(x, mean=(0., 0., 0.), std=(1.0, 1.0, 1.0)):
|
| 22 |
+
# x = (x - mean) / std
|
| 23 |
+
x = np.asarray(x, dtype=np.float32)
|
| 24 |
+
if len(x.shape) == 4:
|
| 25 |
+
for dim in range(3):
|
| 26 |
+
x[:, dim, :, :] = (x[:, dim, :, :] - mean[dim]) / std[dim]
|
| 27 |
+
if len(x.shape) == 3:
|
| 28 |
+
for dim in range(3):
|
| 29 |
+
x[dim, :, :] = (x[dim, :, :] - mean[dim]) / std[dim]
|
| 30 |
+
|
| 31 |
+
return x
|
| 32 |
+
|
| 33 |
+
def denormalize(x, mean=(0., 0., 0.), std=(1.0, 1.0, 1.0)):
|
| 34 |
+
# x = (x * std) + mean
|
| 35 |
+
x = np.asarray(x, dtype=np.float32)
|
| 36 |
+
if len(x.shape) == 4:
|
| 37 |
+
for dim in range(3):
|
| 38 |
+
x[:, dim, :, :] = (x[:, dim, :, :] * std[dim]) + mean[dim]
|
| 39 |
+
if len(x.shape) == 3:
|
| 40 |
+
for dim in range(3):
|
| 41 |
+
x[dim, :, :] = (x[dim, :, :] * std[dim]) + mean[dim]
|
| 42 |
+
|
| 43 |
+
return x
|
| 44 |
+
|
| 45 |
+
def nogan(input_img):
|
| 46 |
+
i = np.asarray(input_img)
|
| 47 |
+
i = i.astype("float32")
|
| 48 |
+
i = np.transpose(i, (2, 0, 1))
|
| 49 |
+
i = np.expand_dims(i, 0)
|
| 50 |
+
i = i / 255.0
|
| 51 |
+
i = normalize(i, (0.485, 0.456, 0.406), (0.229, 0.224, 0.225))
|
| 52 |
+
|
| 53 |
+
ort_outs = ort_session.run([output_name], {input_name: i})
|
| 54 |
+
output = ort_outs
|
| 55 |
+
output = output[0][0]
|
| 56 |
+
|
| 57 |
+
output = denormalize(output, (0.485, 0.456, 0.406), (0.229, 0.224, 0.225))
|
| 58 |
+
output = output * 255.0
|
| 59 |
+
output = output.astype('uint8')
|
| 60 |
+
output = np.transpose(output, (1, 2, 0))
|
| 61 |
+
output_image = PIL.Image.fromarray(output, 'RGB')
|
| 62 |
+
|
| 63 |
+
return output_image
|
| 64 |
+
|
| 65 |
+
title = "Zoom, Clip, Toon"
|
| 66 |
+
description = """Image to Toon Using AI"""
|
| 67 |
+
article = """
|
| 68 |
+
<p style='text-align: center'>The \"ToonClip\" model was trained by <a href='https://twitter.com/JacopoMangia' target='_blank'>Jacopo Mangiavacchi</a> and available at <a href='https://github.com/jacopomangiavacchi/ComicsHeroMobileUNet' target='_blank'>Github Repo ComicsHeroMobileUNet</a></p>
|
| 69 |
+
<br>
|
| 70 |
+
"""
|
| 71 |
+
|
| 72 |
+
examples=[['1m_hires.jpeg'],['2m_hires.jpeg'],['3m_hires.jpeg'],['1f_hires.jpeg'],['2f_hires.jpeg'],['3f_hires.jpeg']]
|
| 73 |
+
|
| 74 |
+
iface = gr.Interface(
|
| 75 |
+
nogan,
|
| 76 |
+
gr.inputs.Image(type="pil", shape=(1024, 1024)),
|
| 77 |
+
gr.outputs.Image(type="pil"),
|
| 78 |
+
title=title,
|
| 79 |
+
description=description,
|
| 80 |
+
article=article,
|
| 81 |
+
examples=examples)
|
| 82 |
+
|
| 83 |
+
iface.launch()
|