Spaces:
Sleeping
Sleeping
demo init
Browse files- .gitignore +3 -0
- app.py +70 -0
- example_imgs/ch.jpg +0 -0
- example_imgs/example.jpg +0 -0
- example_imgs/img_12.jpg +0 -0
- packages.txt +3 -0
- requirements.txt +1 -0
.gitignore
ADDED
|
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
__pycache__
|
| 2 |
+
.vscode
|
| 3 |
+
.DS_Store
|
app.py
ADDED
|
@@ -0,0 +1,70 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
from paddleocr import PaddleOCR
|
| 2 |
+
import json
|
| 3 |
+
from PIL import Image
|
| 4 |
+
import gradio as gr
|
| 5 |
+
import numpy as np
|
| 6 |
+
import cv2
|
| 7 |
+
|
| 8 |
+
# 获取随机的颜色
|
| 9 |
+
def get_random_color():
|
| 10 |
+
c = tuple(np.random.randint(0, 256, 3).tolist())
|
| 11 |
+
return c
|
| 12 |
+
|
| 13 |
+
# 绘制ocr识别结果
|
| 14 |
+
def draw_ocr_bbox(image, boxes, colors):
|
| 15 |
+
print(colors)
|
| 16 |
+
box_num = len(boxes)
|
| 17 |
+
for i in range(box_num):
|
| 18 |
+
box = np.reshape(np.array(boxes[i]), [-1, 1, 2]).astype(np.int64)
|
| 19 |
+
image = cv2.polylines(np.array(image), [box], True, colors[i], 2)
|
| 20 |
+
return image
|
| 21 |
+
|
| 22 |
+
# torch.hub.download_url_to_file('https://i.imgur.com/aqMBT0i.jpg', 'example.jpg')
|
| 23 |
+
|
| 24 |
+
def inference(img: Image.Image, lang, confidence):
|
| 25 |
+
ocr = PaddleOCR(use_angle_cls=True, lang=lang, use_gpu=False)
|
| 26 |
+
# img_path = img.name
|
| 27 |
+
img2np = np.array(img)
|
| 28 |
+
result = ocr.ocr(img2np, cls=True)[0]
|
| 29 |
+
# rgb
|
| 30 |
+
image = img.convert('RGB')
|
| 31 |
+
boxes = [line[0] for line in result]
|
| 32 |
+
txts = [line[1][0] for line in result]
|
| 33 |
+
scores = [line[1][1] for line in result]
|
| 34 |
+
|
| 35 |
+
# 识别结果
|
| 36 |
+
final_result = [dict(boxes=box, txt=txt, score=score, _c=get_random_color()) for box, txt, score in zip(boxes, txts, scores)]
|
| 37 |
+
# 过滤 score < 0.5 的
|
| 38 |
+
final_result = [item for item in final_result if item['score'] > confidence]
|
| 39 |
+
|
| 40 |
+
im_show = draw_ocr_bbox(image, [item['boxes'] for item in final_result], [item['_c'] for item in final_result])
|
| 41 |
+
im_show = Image.fromarray(im_show)
|
| 42 |
+
data = [[json.dumps(item['boxes']), round(item['score'], 3), item['txt']] for item in final_result]
|
| 43 |
+
return im_show, data
|
| 44 |
+
|
| 45 |
+
title = 'PaddleOCR'
|
| 46 |
+
description = 'Gradio demo for PaddleOCR.'
|
| 47 |
+
|
| 48 |
+
examples = [
|
| 49 |
+
['example_imgs/example.jpg','en', 0.5],
|
| 50 |
+
['example_imgs/ch.jpg','ch', 0.7],
|
| 51 |
+
['example_imgs/img_12.jpg','en', 0.7],
|
| 52 |
+
]
|
| 53 |
+
|
| 54 |
+
css = ".output_image, .input_image {height: 40rem !important; width: 100% !important;}"
|
| 55 |
+
|
| 56 |
+
demo = gr.Interface(
|
| 57 |
+
inference,
|
| 58 |
+
[gr.Image(type='pil', label='Input'),
|
| 59 |
+
gr.Dropdown(choices=['ch', 'en', 'fr', 'german', 'korean', 'japan'], value='ch', label='language'),
|
| 60 |
+
gr.Slider(0.1, 1, 0.5, step=0.1, label='confidence_threshold')
|
| 61 |
+
],
|
| 62 |
+
# 输出
|
| 63 |
+
[gr.Image(type='pil', label='Output'), gr.Dataframe(headers=[ 'bbox', 'score', 'text'], label='Result')],
|
| 64 |
+
title=title,
|
| 65 |
+
description=description,
|
| 66 |
+
examples=examples,
|
| 67 |
+
css=css,
|
| 68 |
+
)
|
| 69 |
+
demo.queue(max_size=10)
|
| 70 |
+
demo.launch(debug=True, share=True)
|
example_imgs/ch.jpg
ADDED
|
example_imgs/example.jpg
ADDED
|
example_imgs/img_12.jpg
ADDED
|
packages.txt
ADDED
|
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
ffmpeg
|
| 2 |
+
libsm6
|
| 3 |
+
libxext6
|
requirements.txt
ADDED
|
@@ -0,0 +1 @@
|
|
|
|
|
|
|
| 1 |
+
paddleocr
|