Spaces:
Sleeping
Sleeping
Update app.py
Browse files
app.py
CHANGED
|
@@ -1,52 +1,13 @@
|
|
| 1 |
import gradio as gr
|
| 2 |
-
import
|
| 3 |
-
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 4 |
|
| 5 |
-
#
|
| 6 |
-
face_cascade = cv2.CascadeClassifier(cv2.data.haarcascades + 'haarcascade_frontalface_default.xml')
|
| 7 |
-
|
| 8 |
-
def process_grayscale(image):
|
| 9 |
-
image = np.array(image)
|
| 10 |
-
image = cv2.cvtColor(image, cv2.COLOR_RGB2BGR)
|
| 11 |
-
gray = cv2.cvtColor(image, cv2.COLOR_BGR2GRAY)
|
| 12 |
-
return cv2.cvtColor(gray, cv2.COLOR_GRAY2RGB)
|
| 13 |
-
|
| 14 |
-
def process_canny(image, threshold1, threshold2):
|
| 15 |
-
image = np.array(image)
|
| 16 |
-
image = cv2.cvtColor(image, cv2.COLOR_RGB2BGR)
|
| 17 |
-
gray = cv2.cvtColor(image, cv2.COLOR_BGR2GRAY)
|
| 18 |
-
edges = cv2.Canny(gray, threshold1, threshold2)
|
| 19 |
-
return cv2.cvtColor(edges, cv2.COLOR_GRAY2RGB)
|
| 20 |
-
|
| 21 |
-
def process_blur(image, kernel_size):
|
| 22 |
-
image = np.array(image)
|
| 23 |
-
image = cv2.cvtColor(image, cv2.COLOR_RGB2BGR)
|
| 24 |
-
kernel_size = int(kernel_size) | 1 # Ensure odd number
|
| 25 |
-
blurred = cv2.GaussianBlur(image, (kernel_size, kernel_size), 0)
|
| 26 |
-
return cv2.cvtColor(blurred, cv2.COLOR_BGR2RGB)
|
| 27 |
-
|
| 28 |
-
def process_face_detection(image):
|
| 29 |
-
image = np.array(image)
|
| 30 |
-
image = cv2.cvtColor(image, cv2.COLOR_RGB2BGR)
|
| 31 |
-
gray = cv2.cvtColor(image, cv2.COLOR_BGR2GRAY)
|
| 32 |
-
faces = face_cascade.detectMultiScale(gray, scaleFactor=1.1, minNeighbors=5, minSize=(30, 30))
|
| 33 |
-
output = image.copy()
|
| 34 |
-
for (x, y, w, h) in faces:
|
| 35 |
-
cv2.rectangle(output, (x, y), (x+w, y+h), (0, 255, 0), 2)
|
| 36 |
-
return cv2.cvtColor(output, cv2.COLOR_BGR2RGB)
|
| 37 |
-
|
| 38 |
-
def process_color_space(image, color_space):
|
| 39 |
-
image = np.array(image)
|
| 40 |
-
image = cv2.cvtColor(image, cv2.COLOR_RGB2BGR)
|
| 41 |
-
if color_space == "HSV":
|
| 42 |
-
output = cv2.cvtColor(image, cv2.COLOR_BGR2HSV)
|
| 43 |
-
elif color_space == "LAB":
|
| 44 |
-
output = cv2.cvtColor(image, cv2.COLOR_BGR2LAB)
|
| 45 |
-
else:
|
| 46 |
-
output = image # Fallback to original
|
| 47 |
-
return cv2.cvtColor(output, cv2.COLOR_BGR2RGB)
|
| 48 |
-
|
| 49 |
-
# Custom CSS with Tailwind via CDN
|
| 50 |
custom_css = """
|
| 51 |
<link href="https://cdn.jsdelivr.net/npm/[email protected]/dist/tailwind.min.css" rel="stylesheet">
|
| 52 |
<style>
|
|
@@ -65,60 +26,254 @@ custom_css = """
|
|
| 65 |
|
| 66 |
# Gradio interface
|
| 67 |
with gr.Blocks(css=custom_css) as demo:
|
| 68 |
-
gr.HTML("<h1 class='text-center'>OpenCV
|
| 69 |
-
gr.Markdown("
|
| 70 |
-
|
| 71 |
-
image_input = gr.Image(label="Upload Image", type="pil")
|
| 72 |
|
| 73 |
with gr.Tabs():
|
| 74 |
-
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 75 |
with gr.Row():
|
| 76 |
with gr.Column():
|
| 77 |
-
gr.Markdown("
|
| 78 |
-
|
|
|
|
|
|
|
|
|
|
|
|
|
| 79 |
with gr.Column():
|
| 80 |
-
|
| 81 |
-
|
| 82 |
|
| 83 |
-
|
|
|
|
| 84 |
with gr.Row():
|
| 85 |
with gr.Column():
|
| 86 |
-
gr.Markdown("
|
| 87 |
-
|
| 88 |
-
|
| 89 |
-
|
|
|
|
| 90 |
with gr.Column():
|
| 91 |
-
|
| 92 |
-
|
| 93 |
|
| 94 |
-
|
|
|
|
| 95 |
with gr.Row():
|
| 96 |
with gr.Column():
|
| 97 |
-
gr.Markdown("
|
| 98 |
-
|
| 99 |
-
|
| 100 |
with gr.Column():
|
| 101 |
-
|
| 102 |
-
|
| 103 |
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 104 |
with gr.TabItem("Face Detection", elem_classes="tab-button"):
|
| 105 |
with gr.Row():
|
| 106 |
with gr.Column():
|
| 107 |
gr.Markdown("Detect faces using Haar Cascade.", elem_classes=["input-label"])
|
| 108 |
-
|
|
|
|
| 109 |
with gr.Column():
|
| 110 |
-
face_output = gr.Image(label="Faces
|
| 111 |
-
face_button.click(fn=
|
| 112 |
|
| 113 |
-
|
|
|
|
| 114 |
with gr.Row():
|
| 115 |
with gr.Column():
|
| 116 |
-
gr.Markdown("
|
| 117 |
-
|
| 118 |
-
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 119 |
with gr.Column():
|
| 120 |
-
|
| 121 |
-
|
| 122 |
|
| 123 |
if __name__ == "__main__":
|
| 124 |
demo.launch()
|
|
|
|
| 1 |
import gradio as gr
|
| 2 |
+
from cv_functions.functions import (
|
| 3 |
+
image_video_io, color_space_conversion, resize_crop, geometric_transform,
|
| 4 |
+
thresholding, edge_detection, image_filtering, contour_detection,
|
| 5 |
+
feature_detection, object_detection, face_detection, image_segmentation,
|
| 6 |
+
optical_flow, camera_calibration, stereo_vision, background_subtraction,
|
| 7 |
+
image_stitching, kmeans_clustering, deep_learning, drawing_text
|
| 8 |
+
)
|
| 9 |
|
| 10 |
+
# Custom CSS with Tailwind
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 11 |
custom_css = """
|
| 12 |
<link href="https://cdn.jsdelivr.net/npm/[email protected]/dist/tailwind.min.css" rel="stylesheet">
|
| 13 |
<style>
|
|
|
|
| 26 |
|
| 27 |
# Gradio interface
|
| 28 |
with gr.Blocks(css=custom_css) as demo:
|
| 29 |
+
gr.HTML("<h1 class='text-center'>OpenCV Comprehensive Demo</h1>")
|
| 30 |
+
gr.Markdown("Explore all OpenCV features by uploading images or videos and selecting a tab below.", elem_classes=["markdown-style"])
|
|
|
|
|
|
|
| 31 |
|
| 32 |
with gr.Tabs():
|
| 33 |
+
# 1. Image and Video I/O
|
| 34 |
+
with gr.TabItem("Image/Video I/O", elem_classes="tab-button"):
|
| 35 |
+
with gr.Row():
|
| 36 |
+
with gr.Column():
|
| 37 |
+
gr.Markdown("Upload an image or video to display.", elem_classes=["input-label"])
|
| 38 |
+
io_image = gr.Image(label="Upload Image", type="pil")
|
| 39 |
+
io_video = gr.Video(label="Upload Video")
|
| 40 |
+
io_button = gr.Button("Display", elem_classes="btn-primary")
|
| 41 |
+
with gr.Column():
|
| 42 |
+
io_output = gr.Gallery(label="Output")
|
| 43 |
+
io_button.click(fn=image_video_io, inputs=[io_image, io_video], outputs=io_output)
|
| 44 |
+
|
| 45 |
+
# 2. Color Space Conversion
|
| 46 |
+
with gr.TabItem("Color Space Conversion", elem_classes="tab-button"):
|
| 47 |
+
with gr.Row():
|
| 48 |
+
with gr.Column():
|
| 49 |
+
gr.Markdown("Convert between RGB, HSV, and LAB color spaces.", elem_classes=["input-label"])
|
| 50 |
+
cs_image = gr.Image(label="Upload Image", type="pil")
|
| 51 |
+
cs_space = gr.Dropdown(choices=["RGB", "HSV", "LAB"], label="Color Space", value="RGB")
|
| 52 |
+
cs_button = gr.Button("Apply Conversion", elem_classes="btn-primary")
|
| 53 |
+
with gr.Column():
|
| 54 |
+
cs_output = gr.Image(label="Converted Image")
|
| 55 |
+
cs_button.click(fn=color_space_conversion, inputs=[cs_image, cs_space], outputs=cs_output)
|
| 56 |
+
|
| 57 |
+
# 3. Image Resizing and Cropping
|
| 58 |
+
with gr.TabItem("Resizing and Cropping", elem_classes="tab-button"):
|
| 59 |
+
with gr.Row():
|
| 60 |
+
with gr.Column():
|
| 61 |
+
gr.Markdown("Resize or crop the image.", elem_classes=["input-label"])
|
| 62 |
+
rc_image = gr.Image(label="Upload Image", type="pil")
|
| 63 |
+
rc_scale = gr.Slider(0.1, 2.0, value=1.0, step=0.1, label="Scale Factor")
|
| 64 |
+
rc_crop_x = gr.Slider(0, 1, value=0, step=0.1, label="Crop X (relative)")
|
| 65 |
+
rc_crop_y = gr.Slider(0, 1, value=0, step=0.1, label="Crop Y (relative)")
|
| 66 |
+
rc_crop_w = gr.Slider(0, 1, value=0.5, step=0.1, label="Crop Width (relative)")
|
| 67 |
+
rc_crop_h = gr.Slider(0, 1, value=0.5, step=0.1, label="Crop Height (relative)")
|
| 68 |
+
rc_button = gr.Button("Apply", elem_classes="btn-primary")
|
| 69 |
+
with gr.Column():
|
| 70 |
+
rc_output = gr.Gallery(label="Resized and Cropped Images")
|
| 71 |
+
rc_button.click(fn=resize_crop, inputs=[rc_image, rc_scale, rc_crop_x, rc_crop_y, rc_crop_w, rc_crop_h], outputs=rc_output)
|
| 72 |
+
|
| 73 |
+
# 4. Geometric Transformations
|
| 74 |
+
with gr.TabItem("Geometric Transformations", elem_classes="tab-button"):
|
| 75 |
+
with gr.Row():
|
| 76 |
+
with gr.Column():
|
| 77 |
+
gr.Markdown("Apply rotation and translation.", elem_classes=["input-label"])
|
| 78 |
+
gt_image = gr.Image(label="Upload Image", type="pil")
|
| 79 |
+
gt_angle = gr.Slider(-180, 180, value=0, step=1, label="Rotation Angle (degrees)")
|
| 80 |
+
gt_tx = gr.Slider(-100, 100, value=0, step=1, label="Translation X (pixels)")
|
| 81 |
+
gt_ty = gr.Slider(-100, 100, value=0, step=1, label="Translation Y (pixels)")
|
| 82 |
+
gt_button = gr.Button("Apply", elem_classes="btn-primary")
|
| 83 |
+
with gr.Column():
|
| 84 |
+
gt_output = gr.Image(label="Transformed Image")
|
| 85 |
+
gt_button.click(fn=geometric_transform, inputs=[gt_image, gt_angle, gt_tx, gt_ty], outputs=gt_output)
|
| 86 |
+
|
| 87 |
+
# 5. Image Thresholding
|
| 88 |
+
with gr.TabItem("Thresholding", elem_classes="tab-button"):
|
| 89 |
+
with gr.Row():
|
| 90 |
+
with gr.Column():
|
| 91 |
+
gr.Markdown("Apply global or adaptive thresholding.", elem_classes=["input-label"])
|
| 92 |
+
thresh_image = gr.Image(label="Upload Image", type="pil")
|
| 93 |
+
thresh_type = gr.Dropdown(choices=["Global", "Adaptive"], label="Threshold Type", value="Global")
|
| 94 |
+
thresh_value = gr.Slider(0, 255, value=127, step=1, label="Threshold Value")
|
| 95 |
+
thresh_block = gr.Slider(3, 21, value=11, step=2, label="Block Size (Adaptive)")
|
| 96 |
+
thresh_C = gr.Slider(-10, 10, value=2, step=1, label="Constant (Adaptive)")
|
| 97 |
+
thresh_button = gr.Button("Apply", elem_classes="btn-primary")
|
| 98 |
+
with gr.Column():
|
| 99 |
+
thresh_output = gr.Image(label="Thresholded Image")
|
| 100 |
+
thresh_button.click(fn=thresholding, inputs=[thresh_image, thresh_type, thresh_value, thresh_block, thresh_C], outputs=thresh_output)
|
| 101 |
+
|
| 102 |
+
# 6. Edge Detection
|
| 103 |
+
with gr.TabItem("Edge Detection", elem_classes="tab-button"):
|
| 104 |
with gr.Row():
|
| 105 |
with gr.Column():
|
| 106 |
+
gr.Markdown("Detect edges using Canny, Sobel, or Laplacian.", elem_classes=["input-label"])
|
| 107 |
+
edge_image = gr.Image(label="Upload Image", type="pil")
|
| 108 |
+
edge_type = gr.Dropdown(choices=["Canny", "Sobel", "Laplacian"], label="Edge Type", value="Canny")
|
| 109 |
+
edge_t1 = gr.Slider(0, 500, value=100, step=10, label="Canny Threshold 1")
|
| 110 |
+
edge_t2 = gr.Slider(0, 500, value=200, step=10, label="Canny Threshold 2")
|
| 111 |
+
edge_button = gr.Button("Apply", elem_classes="btn-primary")
|
| 112 |
with gr.Column():
|
| 113 |
+
edge_output = gr.Image(label="Edges")
|
| 114 |
+
edge_button.click(fn=edge_detection, inputs=[edge_image, edge_type, edge_t1, edge_t2], outputs=edge_output)
|
| 115 |
|
| 116 |
+
# 7. Image Filtering
|
| 117 |
+
with gr.TabItem("Image Filtering", elem_classes="tab-button"):
|
| 118 |
with gr.Row():
|
| 119 |
with gr.Column():
|
| 120 |
+
gr.Markdown("Apply Gaussian or median blur.", elem_classes=["input-label"])
|
| 121 |
+
filter_image = gr.Image(label="Upload Image", type="pil")
|
| 122 |
+
filter_type = gr.Dropdown(choices=["Gaussian", "Median"], label="Filter Type", value="Gaussian")
|
| 123 |
+
filter_kernel = gr.Slider(3, 21, value=5, step=2, label="Kernel Size")
|
| 124 |
+
filter_button = gr.Button("Apply", elem_classes="btn-primary")
|
| 125 |
with gr.Column():
|
| 126 |
+
filter_output = gr.Image(label="Filtered Image")
|
| 127 |
+
filter_button.click(fn=image_filtering, inputs=[filter_image, filter_type, filter_kernel], outputs=filter_output)
|
| 128 |
|
| 129 |
+
# 8. Contour Detection
|
| 130 |
+
with gr.TabItem("Contour Detection", elem_classes="tab-button"):
|
| 131 |
with gr.Row():
|
| 132 |
with gr.Column():
|
| 133 |
+
gr.Markdown("Detect and draw contours.", elem_classes=["input-label"])
|
| 134 |
+
contour_image = gr.Image(label="Upload Image", type="pil")
|
| 135 |
+
contour_button = gr.Button("Apply", elem_classes="btn-primary")
|
| 136 |
with gr.Column():
|
| 137 |
+
contour_output = gr.Image(label="Contours")
|
| 138 |
+
contour_button.click(fn=contour_detection, inputs=contour_image, outputs=contour_output)
|
| 139 |
|
| 140 |
+
# 9. Feature Detection
|
| 141 |
+
with gr.TabItem("Feature Detection", elem_classes="tab-button"):
|
| 142 |
+
with gr.Row():
|
| 143 |
+
with gr.Column():
|
| 144 |
+
gr.Markdown("Detect ORB keypoints.", elem_classes=["input-label"])
|
| 145 |
+
feat_image = gr.Image(label="Upload Image", type="pil")
|
| 146 |
+
feat_button = gr.Button("Apply", elem_classes="btn-primary")
|
| 147 |
+
with gr.Column():
|
| 148 |
+
feat_output = gr.Image(label="Keypoints")
|
| 149 |
+
feat_button.click(fn=feature_detection, inputs=feat_image, outputs=feat_output)
|
| 150 |
+
|
| 151 |
+
# 10. Object Detection
|
| 152 |
+
with gr.TabItem("Object Detection", elem_classes="tab-button"):
|
| 153 |
+
with gr.Row():
|
| 154 |
+
with gr.Column():
|
| 155 |
+
gr.Markdown("Detect cars using Haar Cascade.", elem_classes=["input-label"])
|
| 156 |
+
obj_image = gr.Image(label="Upload Image", type="pil")
|
| 157 |
+
obj_button = gr.Button("Apply", elem_classes="btn-primary")
|
| 158 |
+
with gr.Column():
|
| 159 |
+
obj_output = gr.Image(label="Detected Objects")
|
| 160 |
+
obj_button.click(fn=object_detection, inputs=obj_image, outputs=obj_output)
|
| 161 |
+
|
| 162 |
+
# 11. Face Detection
|
| 163 |
with gr.TabItem("Face Detection", elem_classes="tab-button"):
|
| 164 |
with gr.Row():
|
| 165 |
with gr.Column():
|
| 166 |
gr.Markdown("Detect faces using Haar Cascade.", elem_classes=["input-label"])
|
| 167 |
+
face_image = gr.Image(label="Upload Image", type="pil")
|
| 168 |
+
face_button = gr.Button("Apply", elem_classes="btn-primary")
|
| 169 |
with gr.Column():
|
| 170 |
+
face_output = gr.Image(label="Detected Faces")
|
| 171 |
+
face_button.click(fn=face_detection, inputs=face_image, outputs=face_output)
|
| 172 |
|
| 173 |
+
# 12. Image Segmentation
|
| 174 |
+
with gr.TabItem("Image Segmentation", elem_classes="tab-button"):
|
| 175 |
with gr.Row():
|
| 176 |
with gr.Column():
|
| 177 |
+
gr.Markdown("Apply GrabCut segmentation.", elem_classes=["input-label"])
|
| 178 |
+
seg_image = gr.Image(label="Upload Image", type="pil")
|
| 179 |
+
seg_button = gr.Button("Apply", elem_classes="btn-primary")
|
| 180 |
+
with gr.Column():
|
| 181 |
+
seg_output = gr.Image(label="Segmented Image")
|
| 182 |
+
seg_button.click(fn=image_segmentation, inputs=seg_image, outputs=seg_output)
|
| 183 |
+
|
| 184 |
+
# 13. Motion Analysis
|
| 185 |
+
with gr.TabItem("Motion Analysis", elem_classes="tab-button"):
|
| 186 |
+
with gr.Row():
|
| 187 |
+
with gr.Column():
|
| 188 |
+
gr.Markdown("Compute optical flow for video.", elem_classes=["input-label"])
|
| 189 |
+
motion_video = gr.Video(label="Upload Video")
|
| 190 |
+
motion_button = gr.Button("Apply", elem_classes="btn-primary")
|
| 191 |
+
with gr.Column():
|
| 192 |
+
motion_output = gr.Image(label="Optical Flow")
|
| 193 |
+
motion_button.click(fn=optical_flow, inputs=motion_video, outputs=motion_output)
|
| 194 |
+
|
| 195 |
+
# 14. Camera Calibration
|
| 196 |
+
with gr.TabItem("Camera Calibration", elem_classes="tab-button"):
|
| 197 |
+
with gr.Row():
|
| 198 |
+
with gr.Column():
|
| 199 |
+
gr.Markdown("Detect checkerboard for calibration (upload checkerboard image).", elem_classes=["input-label"])
|
| 200 |
+
calib_image = gr.Image(label="Upload Image", type="pil")
|
| 201 |
+
calib_button = gr.Button("Apply", elem_classes="btn-primary")
|
| 202 |
+
with gr.Column():
|
| 203 |
+
calib_output = gr.Image(label="Calibration Result")
|
| 204 |
+
calib_button.click(fn=camera_calibration, inputs=calib_image, outputs=calib_output)
|
| 205 |
+
|
| 206 |
+
# 15. Stereo Vision
|
| 207 |
+
with gr.TabItem("Stereo Vision", elem_classes="tab-button"):
|
| 208 |
+
with gr.Row():
|
| 209 |
+
with gr.Column():
|
| 210 |
+
gr.Markdown("Compute disparity map (simplified).", elem_classes=["input-label"])
|
| 211 |
+
stereo_image = gr.Image(label="Upload Image", type="pil")
|
| 212 |
+
stereo_button = gr.Button("Apply", elem_classes="btn-primary")
|
| 213 |
+
with gr.Column():
|
| 214 |
+
stereo_output = gr.Image(label="Disparity Map")
|
| 215 |
+
stereo_button.click(fn=stereo_vision, inputs=stereo_image, outputs=stereo_output)
|
| 216 |
+
|
| 217 |
+
# 16. Background Subtraction
|
| 218 |
+
with gr.TabItem("Background Subtraction", elem_classes="tab-button"):
|
| 219 |
+
with gr.Row():
|
| 220 |
+
with gr.Column():
|
| 221 |
+
gr.Markdown("Apply MOG2 for moving object detection.", elem_classes=["input-label"])
|
| 222 |
+
bg_video = gr.Video(label="Upload Video")
|
| 223 |
+
bg_button = gr.Button("Apply", elem_classes="btn-primary")
|
| 224 |
+
with gr.Column():
|
| 225 |
+
bg_output = gr.Image(label="Foreground Mask")
|
| 226 |
+
bg_button.click(fn=background_subtraction, inputs=bg_video, outputs=bg_output)
|
| 227 |
+
|
| 228 |
+
# 17. Image Stitching
|
| 229 |
+
with gr.TabItem("Image Stitching", elem_classes="tab-button"):
|
| 230 |
+
with gr.Row():
|
| 231 |
+
with gr.Column():
|
| 232 |
+
gr.Markdown("Stitch two images using ORB features.", elem_classes=["input-label"])
|
| 233 |
+
stitch_image1 = gr.Image(label="Upload First Image", type="pil")
|
| 234 |
+
stitch_image2 = gr.Image(label="Upload Second Image", type="pil")
|
| 235 |
+
stitch_button = gr.Button("Apply", elem_classes="btn-primary")
|
| 236 |
+
with gr.Column():
|
| 237 |
+
stitch_output = gr.Image(label="Stitched Image")
|
| 238 |
+
stitch_button.click(fn=image_stitching, inputs=[stitch_image1, stitch_image2], outputs=stitch_output)
|
| 239 |
+
|
| 240 |
+
# 18. Machine Learning (K-Means)
|
| 241 |
+
with gr.TabItem("K-Means Clustering", elem_classes="tab-button"):
|
| 242 |
+
with gr.Row():
|
| 243 |
+
with gr.Column():
|
| 244 |
+
gr.Markdown("Apply k-means clustering for color quantization.", elem_classes=["input-label"])
|
| 245 |
+
kmeans_image = gr.Image(label="Upload Image", type="pil")
|
| 246 |
+
kmeans_k = gr.Slider(2, 16, value=8, step=1, label="Number of Clusters (K)")
|
| 247 |
+
kmeans_button = gr.Button("Apply", elem_classes="btn-primary")
|
| 248 |
+
with gr.Column():
|
| 249 |
+
kmeans_output = gr.Image(label="Clustered Image")
|
| 250 |
+
kmeans_button.click(fn=kmeans_clustering, inputs=[kmeans_image, kmeans_k], outputs=kmeans_output)
|
| 251 |
+
|
| 252 |
+
# 19. Deep Learning
|
| 253 |
+
with gr.TabItem("Deep Learning", elem_classes="tab-button"):
|
| 254 |
+
with gr.Row():
|
| 255 |
+
with gr.Column():
|
| 256 |
+
gr.Markdown("Detect objects using MobileNet SSD (upload prototxt and caffemodel files).", elem_classes=["input-label"])
|
| 257 |
+
dl_image = gr.Image(label="Upload Image", type="pil")
|
| 258 |
+
dl_prototxt = gr.File(label="Upload Prototxt File")
|
| 259 |
+
dl_model = gr.File(label="Upload Caffemodel File")
|
| 260 |
+
dl_button = gr.Button("Apply", elem_classes="btn-primary")
|
| 261 |
+
with gr.Column():
|
| 262 |
+
dl_output = gr.Image(label="Detected Objects")
|
| 263 |
+
dl_button.click(fn=deep_learning, inputs=[dl_image, dl_prototxt, dl_model], outputs=dl_output)
|
| 264 |
+
|
| 265 |
+
# 20. Drawing and Text
|
| 266 |
+
with gr.TabItem("Drawing and Text", elem_classes="tab-button"):
|
| 267 |
+
with gr.Row():
|
| 268 |
+
with gr.Column():
|
| 269 |
+
gr.Markdown("Draw shapes and add text to the image.", elem_classes=["input-label"])
|
| 270 |
+
draw_image = gr.Image(label="Upload Image", type="pil")
|
| 271 |
+
draw_shape = gr.Dropdown(choices=["Rectangle", "Circle"], label="Shape", value="Rectangle")
|
| 272 |
+
draw_text = gr.Textbox(label="Text to Add", value="OpenCV")
|
| 273 |
+
draw_button = gr.Button("Apply", elem_classes="btn-primary")
|
| 274 |
with gr.Column():
|
| 275 |
+
draw_output = gr.Image(label="Annotated Image")
|
| 276 |
+
draw_button.click(fn=drawing_text, inputs=[draw_image, draw_shape, draw_text], outputs=draw_output)
|
| 277 |
|
| 278 |
if __name__ == "__main__":
|
| 279 |
demo.launch()
|