Spaces:
Sleeping
Sleeping
| import gradio as gr | |
| from cv_functions.functions import ( | |
| image_video_io, color_space_conversion, resize_crop, geometric_transform, | |
| thresholding, edge_detection, image_filtering, contour_detection, | |
| feature_detection, object_detection, face_detection, image_segmentation, | |
| optical_flow, camera_calibration, stereo_vision, background_subtraction, | |
| image_stitching, kmeans_clustering, deep_learning, drawing_text | |
| ) | |
| # Custom CSS with Tailwind | |
| custom_css = """ | |
| <link href="https://cdn.jsdelivr.net/npm/[email protected]/dist/tailwind.min.css" rel="stylesheet"> | |
| <style> | |
| body { @apply bg-gray-100 font-sans; } | |
| .gradio-container { @apply max-w-7xl mx-auto p-4; } | |
| .tab-button { @apply px-4 py-2 text-sm font-medium text-gray-700 bg-white rounded-t-lg border-b-2 border-transparent hover:border-blue-500 focus:outline-none focus:border-blue-500; } | |
| .tab-button-active { @apply border-blue-500 text-blue-600; } | |
| .tab-content { @apply bg-white p-6 rounded-b-lg shadow-lg; } | |
| .gallery img { @apply rounded-lg shadow-md; } | |
| .btn-primary { @apply bg-blue-500 text-white px-4 py-2 rounded-lg hover:bg-blue-600 transition; } | |
| h1 { @apply text-3xl font-bold text-gray-800 mb-4; } | |
| .input-label { @apply text-sm font-medium text-gray-600 mb-2; } | |
| .markdown-style { @apply text-center text-gray-600 mb-4; } | |
| </style> | |
| """ | |
| # Gradio interface | |
| with gr.Blocks(css=custom_css) as demo: | |
| gr.HTML("<h1 class='text-center'>OpenCV Comprehensive Demo</h1>") | |
| gr.Markdown("Explore all OpenCV features by uploading images or videos and selecting a tab below.", elem_classes=["markdown-style"]) | |
| with gr.Tabs(): | |
| # 1. Image and Video I/O | |
| with gr.TabItem("Image/Video I/O", elem_classes="tab-button"): | |
| with gr.Row(): | |
| with gr.Column(): | |
| gr.Markdown("Upload an image or video to display.", elem_classes=["input-label"]) | |
| io_image = gr.Image(label="Upload Image", type="pil") | |
| io_video = gr.Video(label="Upload Video") | |
| io_button = gr.Button("Display", elem_classes="btn-primary") | |
| with gr.Column(): | |
| io_output = gr.Gallery(label="Output") | |
| io_button.click(fn=image_video_io, inputs=[io_image, io_video], outputs=io_output) | |
| # 2. Color Space Conversion | |
| with gr.TabItem("Color Space Conversion", elem_classes="tab-button"): | |
| with gr.Row(): | |
| with gr.Column(): | |
| gr.Markdown("Convert between RGB, HSV, and LAB color spaces.", elem_classes=["input-label"]) | |
| cs_image = gr.Image(label="Upload Image", type="pil") | |
| cs_space = gr.Dropdown(choices=["RGB", "HSV", "LAB"], label="Color Space", value="RGB") | |
| cs_button = gr.Button("Apply Conversion", elem_classes="btn-primary") | |
| with gr.Column(): | |
| cs_output = gr.Image(label="Converted Image") | |
| cs_button.click(fn=color_space_conversion, inputs=[cs_image, cs_space], outputs=cs_output) | |
| # 3. Image Resizing and Cropping | |
| with gr.TabItem("Resizing and Cropping", elem_classes="tab-button"): | |
| with gr.Row(): | |
| with gr.Column(): | |
| gr.Markdown("Resize or crop the image.", elem_classes=["input-label"]) | |
| rc_image = gr.Image(label="Upload Image", type="pil") | |
| rc_scale = gr.Slider(0.1, 2.0, value=1.0, step=0.1, label="Scale Factor") | |
| rc_crop_x = gr.Slider(0, 1, value=0, step=0.1, label="Crop X (relative)") | |
| rc_crop_y = gr.Slider(0, 1, value=0, step=0.1, label="Crop Y (relative)") | |
| rc_crop_w = gr.Slider(0, 1, value=0.5, step=0.1, label="Crop Width (relative)") | |
| rc_crop_h = gr.Slider(0, 1, value=0.5, step=0.1, label="Crop Height (relative)") | |
| rc_button = gr.Button("Apply", elem_classes="btn-primary") | |
| with gr.Column(): | |
| rc_output = gr.Gallery(label="Resized and Cropped Images") | |
| rc_button.click(fn=resize_crop, inputs=[rc_image, rc_scale, rc_crop_x, rc_crop_y, rc_crop_w, rc_crop_h], outputs=rc_output) | |
| # 4. Geometric Transformations | |
| with gr.TabItem("Geometric Transformations", elem_classes="tab-button"): | |
| with gr.Row(): | |
| with gr.Column(): | |
| gr.Markdown("Apply rotation and translation.", elem_classes=["input-label"]) | |
| gt_image = gr.Image(label="Upload Image", type="pil") | |
| gt_angle = gr.Slider(-180, 180, value=0, step=1, label="Rotation Angle (degrees)") | |
| gt_tx = gr.Slider(-100, 100, value=0, step=1, label="Translation X (pixels)") | |
| gt_ty = gr.Slider(-100, 100, value=0, step=1, label="Translation Y (pixels)") | |
| gt_button = gr.Button("Apply", elem_classes="btn-primary") | |
| with gr.Column(): | |
| gt_output = gr.Image(label="Transformed Image") | |
| gt_button.click(fn=geometric_transform, inputs=[gt_image, gt_angle, gt_tx, gt_ty], outputs=gt_output) | |
| # 5. Image Thresholding | |
| with gr.TabItem("Thresholding", elem_classes="tab-button"): | |
| with gr.Row(): | |
| with gr.Column(): | |
| gr.Markdown("Apply global or adaptive thresholding.", elem_classes=["input-label"]) | |
| thresh_image = gr.Image(label="Upload Image", type="pil") | |
| thresh_type = gr.Dropdown(choices=["Global", "Adaptive"], label="Threshold Type", value="Global") | |
| thresh_value = gr.Slider(0, 255, value=127, step=1, label="Threshold Value") | |
| thresh_block = gr.Slider(3, 21, value=11, step=2, label="Block Size (Adaptive)") | |
| thresh_C = gr.Slider(-10, 10, value=2, step=1, label="Constant (Adaptive)") | |
| thresh_button = gr.Button("Apply", elem_classes="btn-primary") | |
| with gr.Column(): | |
| thresh_output = gr.Image(label="Thresholded Image") | |
| thresh_button.click(fn=thresholding, inputs=[thresh_image, thresh_type, thresh_value, thresh_block, thresh_C], outputs=thresh_output) | |
| # 6. Edge Detection | |
| with gr.TabItem("Edge Detection", elem_classes="tab-button"): | |
| with gr.Row(): | |
| with gr.Column(): | |
| gr.Markdown("Detect edges using Canny, Sobel, or Laplacian.", elem_classes=["input-label"]) | |
| edge_image = gr.Image(label="Upload Image", type="pil") | |
| edge_type = gr.Dropdown(choices=["Canny", "Sobel", "Laplacian"], label="Edge Type", value="Canny") | |
| edge_t1 = gr.Slider(0, 500, value=100, step=10, label="Canny Threshold 1") | |
| edge_t2 = gr.Slider(0, 500, value=200, step=10, label="Canny Threshold 2") | |
| edge_button = gr.Button("Apply", elem_classes="btn-primary") | |
| with gr.Column(): | |
| edge_output = gr.Image(label="Edges") | |
| edge_button.click(fn=edge_detection, inputs=[edge_image, edge_type, edge_t1, edge_t2], outputs=edge_output) | |
| # 7. Image Filtering | |
| with gr.TabItem("Image Filtering", elem_classes="tab-button"): | |
| with gr.Row(): | |
| with gr.Column(): | |
| gr.Markdown("Apply Gaussian or median blur.", elem_classes=["input-label"]) | |
| filter_image = gr.Image(label="Upload Image", type="pil") | |
| filter_type = gr.Dropdown(choices=["Gaussian", "Median"], label="Filter Type", value="Gaussian") | |
| filter_kernel = gr.Slider(3, 21, value=5, step=2, label="Kernel Size") | |
| filter_button = gr.Button("Apply", elem_classes="btn-primary") | |
| with gr.Column(): | |
| filter_output = gr.Image(label="Filtered Image") | |
| filter_button.click(fn=image_filtering, inputs=[filter_image, filter_type, filter_kernel], outputs=filter_output) | |
| # 8. Contour Detection | |
| with gr.TabItem("Contour Detection", elem_classes="tab-button"): | |
| with gr.Row(): | |
| with gr.Column(): | |
| gr.Markdown("Detect and draw contours.", elem_classes=["input-label"]) | |
| contour_image = gr.Image(label="Upload Image", type="pil") | |
| contour_button = gr.Button("Apply", elem_classes="btn-primary") | |
| with gr.Column(): | |
| contour_output = gr.Image(label="Contours") | |
| contour_button.click(fn=contour_detection, inputs=contour_image, outputs=contour_output) | |
| # 9. Feature Detection | |
| with gr.TabItem("Feature Detection", elem_classes="tab-button"): | |
| with gr.Row(): | |
| with gr.Column(): | |
| gr.Markdown("Detect ORB keypoints.", elem_classes=["input-label"]) | |
| feat_image = gr.Image(label="Upload Image", type="pil") | |
| feat_button = gr.Button("Apply", elem_classes="btn-primary") | |
| with gr.Column(): | |
| feat_output = gr.Image(label="Keypoints") | |
| feat_button.click(fn=feature_detection, inputs=feat_image, outputs=feat_output) | |
| # 10. Object Detection | |
| with gr.TabItem("Object Detection", elem_classes="tab-button"): | |
| with gr.Row(): | |
| with gr.Column(): | |
| gr.Markdown("Detect cars using Haar Cascade.", elem_classes=["input-label"]) | |
| obj_image = gr.Image(label="Upload Image", type="pil") | |
| obj_button = gr.Button("Apply", elem_classes="btn-primary") | |
| with gr.Column(): | |
| obj_output = gr.Image(label="Detected Objects") | |
| obj_button.click(fn=object_detection, inputs=obj_image, outputs=obj_output) | |
| # 11. Face Detection | |
| with gr.TabItem("Face Detection", elem_classes="tab-button"): | |
| with gr.Row(): | |
| with gr.Column(): | |
| gr.Markdown("Detect faces using Haar Cascade.", elem_classes=["input-label"]) | |
| face_image = gr.Image(label="Upload Image", type="pil") | |
| face_button = gr.Button("Apply", elem_classes="btn-primary") | |
| with gr.Column(): | |
| face_output = gr.Image(label="Detected Faces") | |
| face_button.click(fn=face_detection, inputs=face_image, outputs=face_output) | |
| # 12. Image Segmentation | |
| with gr.TabItem("Image Segmentation", elem_classes="tab-button"): | |
| with gr.Row(): | |
| with gr.Column(): | |
| gr.Markdown("Apply GrabCut segmentation.", elem_classes=["input-label"]) | |
| seg_image = gr.Image(label="Upload Image", type="pil") | |
| seg_button = gr.Button("Apply", elem_classes="btn-primary") | |
| with gr.Column(): | |
| seg_output = gr.Image(label="Segmented Image") | |
| seg_button.click(fn=image_segmentation, inputs=seg_image, outputs=seg_output) | |
| # 13. Motion Analysis | |
| with gr.TabItem("Motion Analysis", elem_classes="tab-button"): | |
| with gr.Row(): | |
| with gr.Column(): | |
| gr.Markdown("Compute optical flow for video.", elem_classes=["input-label"]) | |
| motion_video = gr.Video(label="Upload Video") | |
| motion_button = gr.Button("Apply", elem_classes="btn-primary") | |
| with gr.Column(): | |
| motion_output = gr.Image(label="Optical Flow") | |
| motion_button.click(fn=optical_flow, inputs=motion_video, outputs=motion_output) | |
| # 14. Camera Calibration | |
| with gr.TabItem("Camera Calibration", elem_classes="tab-button"): | |
| with gr.Row(): | |
| with gr.Column(): | |
| gr.Markdown("Detect checkerboard for calibration (upload checkerboard image).", elem_classes=["input-label"]) | |
| calib_image = gr.Image(label="Upload Image", type="pil") | |
| calib_button = gr.Button("Apply", elem_classes="btn-primary") | |
| with gr.Column(): | |
| calib_output = gr.Image(label="Calibration Result") | |
| calib_button.click(fn=camera_calibration, inputs=calib_image, outputs=calib_output) | |
| # 15. Stereo Vision | |
| with gr.TabItem("Stereo Vision", elem_classes="tab-button"): | |
| with gr.Row(): | |
| with gr.Column(): | |
| gr.Markdown("Compute disparity map (simplified).", elem_classes=["input-label"]) | |
| stereo_image = gr.Image(label="Upload Image", type="pil") | |
| stereo_button = gr.Button("Apply", elem_classes="btn-primary") | |
| with gr.Column(): | |
| stereo_output = gr.Image(label="Disparity Map") | |
| stereo_button.click(fn=stereo_vision, inputs=stereo_image, outputs=stereo_output) | |
| # 16. Background Subtraction | |
| with gr.TabItem("Background Subtraction", elem_classes="tab-button"): | |
| with gr.Row(): | |
| with gr.Column(): | |
| gr.Markdown("Apply MOG2 for moving object detection.", elem_classes=["input-label"]) | |
| bg_video = gr.Video(label="Upload Video") | |
| bg_button = gr.Button("Apply", elem_classes="btn-primary") | |
| with gr.Column(): | |
| bg_output = gr.Image(label="Foreground Mask") | |
| bg_button.click(fn=background_subtraction, inputs=bg_video, outputs=bg_output) | |
| # 17. Image Stitching | |
| with gr.TabItem("Image Stitching", elem_classes="tab-button"): | |
| with gr.Row(): | |
| with gr.Column(): | |
| gr.Markdown("Stitch two images using ORB features.", elem_classes=["input-label"]) | |
| stitch_image1 = gr.Image(label="Upload First Image", type="pil") | |
| stitch_image2 = gr.Image(label="Upload Second Image", type="pil") | |
| stitch_button = gr.Button("Apply", elem_classes="btn-primary") | |
| with gr.Column(): | |
| stitch_output = gr.Image(label="Stitched Image") | |
| stitch_button.click(fn=image_stitching, inputs=[stitch_image1, stitch_image2], outputs=stitch_output) | |
| # 18. Machine Learning (K-Means) | |
| with gr.TabItem("K-Means Clustering", elem_classes="tab-button"): | |
| with gr.Row(): | |
| with gr.Column(): | |
| gr.Markdown("Apply k-means clustering for color quantization.", elem_classes=["input-label"]) | |
| kmeans_image = gr.Image(label="Upload Image", type="pil") | |
| kmeans_k = gr.Slider(2, 16, value=8, step=1, label="Number of Clusters (K)") | |
| kmeans_button = gr.Button("Apply", elem_classes="btn-primary") | |
| with gr.Column(): | |
| kmeans_output = gr.Image(label="Clustered Image") | |
| kmeans_button.click(fn=kmeans_clustering, inputs=[kmeans_image, kmeans_k], outputs=kmeans_output) | |
| # 19. Deep Learning | |
| with gr.TabItem("Deep Learning", elem_classes="tab-button"): | |
| with gr.Row(): | |
| with gr.Column(): | |
| gr.Markdown("Detect objects using MobileNet SSD (upload prototxt and caffemodel files).", elem_classes=["input-label"]) | |
| dl_image = gr.Image(label="Upload Image", type="pil") | |
| dl_prototxt = gr.File(label="Upload Prototxt File") | |
| dl_model = gr.File(label="Upload Caffemodel File") | |
| dl_button = gr.Button("Apply", elem_classes="btn-primary") | |
| with gr.Column(): | |
| dl_output = gr.Image(label="Detected Objects") | |
| dl_button.click(fn=deep_learning, inputs=[dl_image, dl_prototxt, dl_model], outputs=dl_output) | |
| # 20. Drawing and Text | |
| with gr.TabItem("Drawing and Text", elem_classes="tab-button"): | |
| with gr.Row(): | |
| with gr.Column(): | |
| gr.Markdown("Draw shapes and add text to the image.", elem_classes=["input-label"]) | |
| draw_image = gr.Image(label="Upload Image", type="pil") | |
| draw_shape = gr.Dropdown(choices=["Rectangle", "Circle"], label="Shape", value="Rectangle") | |
| draw_text = gr.Textbox(label="Text to Add", value="OpenCV") | |
| draw_button = gr.Button("Apply", elem_classes="btn-primary") | |
| with gr.Column(): | |
| draw_output = gr.Image(label="Annotated Image") | |
| draw_button.click(fn=drawing_text, inputs=[draw_image, draw_shape, draw_text], outputs=draw_output) | |
| if __name__ == "__main__": | |
| demo.launch() |