gloriforge commited on
Commit
3611b6a
·
verified ·
1 Parent(s): 4fea067

Upload folder using huggingface_hub

Browse files
Files changed (2) hide show
  1. __pycache__/miner.cpython-312.pyc +0 -0
  2. miner.py +0 -10
__pycache__/miner.cpython-312.pyc CHANGED
Binary files a/__pycache__/miner.cpython-312.pyc and b/__pycache__/miner.cpython-312.pyc differ
 
miner.py CHANGED
@@ -80,7 +80,6 @@ class Miner:
80
  def __init__(self, path_hf_repo: Path) -> None:
81
  try:
82
  device = "cuda" if torch.cuda.is_available() else "cpu"
83
- print(device)
84
 
85
  providers = [
86
  'CUDAExecutionProvider',
@@ -338,8 +337,6 @@ class Miner:
338
  if len(batch_images) == 0:
339
  return bboxes
340
 
341
- print(f"Processing batch of {len(batch_images)} images")
342
-
343
  # Get original image dimensions for scaling
344
  height, width = batch_images[0].shape[:2]
345
  scale = 640.0
@@ -349,14 +346,12 @@ class Miner:
349
  # Memory optimization: Process smaller batches if needed
350
  max_batch_size = 32 # Reduce batch size further to prevent memory issues
351
  if len(batch_images) > max_batch_size:
352
- print(f"Large batch detected ({len(batch_images)} images), splitting into smaller batches of {max_batch_size}")
353
  # Process in smaller chunks
354
  all_bboxes = {}
355
  for chunk_start in range(0, len(batch_images), max_batch_size):
356
  chunk_end = min(chunk_start + max_batch_size, len(batch_images))
357
  chunk_images = batch_images[chunk_start:chunk_end]
358
  chunk_offset = offset + chunk_start
359
- print(f"Processing chunk {chunk_start//max_batch_size + 1}: images {chunk_start}-{chunk_end-1}")
360
  chunk_bboxes = self._detect_objects_batch(chunk_images, chunk_offset)
361
  all_bboxes.update(chunk_bboxes)
362
  return all_bboxes
@@ -367,7 +362,6 @@ class Miner:
367
 
368
  # Handle batch size mismatch - pad if needed
369
  model_batch_size = self.bbox_model.get_inputs()[0].shape[0]
370
- print(f"Model input shape: {self.bbox_model.get_inputs()[0].shape}, batch_size: {model_batch_size}")
371
 
372
  if model_batch_size is not None:
373
  try:
@@ -378,8 +372,6 @@ class Miner:
378
  model_batch_size = int(model_batch_size)
379
  except (ValueError, TypeError):
380
  model_batch_size = None
381
-
382
- print(f"Processed model_batch_size: {model_batch_size}, actual_batch_size: {actual_batch_size}")
383
 
384
  if model_batch_size and actual_batch_size < model_batch_size:
385
  padding_size = model_batch_size - actual_batch_size
@@ -394,7 +386,6 @@ class Miner:
394
  start_time = time.time()
395
  outputs = self.bbox_model.run(None, {input_name: imgs})[0]
396
  inference_time = time.time() - start_time
397
- print(f"Inference time: {inference_time:.3f}s for {actual_batch_size} images")
398
 
399
  # Remove padded results if we added padding
400
  if model_batch_size and isinstance(model_batch_size, int) and actual_batch_size < model_batch_size:
@@ -516,7 +507,6 @@ class Miner:
516
  else:
517
  frame_keypoints = frame_keypoints[:n_keypoints]
518
  keypoints[offset + frame_number_in_batch] = frame_keypoints
519
- print("✅ Keypoints predicted")
520
  break
521
  # except RuntimeError as e:
522
  # print(self.pitch_batch_size)
 
80
  def __init__(self, path_hf_repo: Path) -> None:
81
  try:
82
  device = "cuda" if torch.cuda.is_available() else "cpu"
 
83
 
84
  providers = [
85
  'CUDAExecutionProvider',
 
337
  if len(batch_images) == 0:
338
  return bboxes
339
 
 
 
340
  # Get original image dimensions for scaling
341
  height, width = batch_images[0].shape[:2]
342
  scale = 640.0
 
346
  # Memory optimization: Process smaller batches if needed
347
  max_batch_size = 32 # Reduce batch size further to prevent memory issues
348
  if len(batch_images) > max_batch_size:
 
349
  # Process in smaller chunks
350
  all_bboxes = {}
351
  for chunk_start in range(0, len(batch_images), max_batch_size):
352
  chunk_end = min(chunk_start + max_batch_size, len(batch_images))
353
  chunk_images = batch_images[chunk_start:chunk_end]
354
  chunk_offset = offset + chunk_start
 
355
  chunk_bboxes = self._detect_objects_batch(chunk_images, chunk_offset)
356
  all_bboxes.update(chunk_bboxes)
357
  return all_bboxes
 
362
 
363
  # Handle batch size mismatch - pad if needed
364
  model_batch_size = self.bbox_model.get_inputs()[0].shape[0]
 
365
 
366
  if model_batch_size is not None:
367
  try:
 
372
  model_batch_size = int(model_batch_size)
373
  except (ValueError, TypeError):
374
  model_batch_size = None
 
 
375
 
376
  if model_batch_size and actual_batch_size < model_batch_size:
377
  padding_size = model_batch_size - actual_batch_size
 
386
  start_time = time.time()
387
  outputs = self.bbox_model.run(None, {input_name: imgs})[0]
388
  inference_time = time.time() - start_time
 
389
 
390
  # Remove padded results if we added padding
391
  if model_batch_size and isinstance(model_batch_size, int) and actual_batch_size < model_batch_size:
 
507
  else:
508
  frame_keypoints = frame_keypoints[:n_keypoints]
509
  keypoints[offset + frame_number_in_batch] = frame_keypoints
 
510
  break
511
  # except RuntimeError as e:
512
  # print(self.pitch_batch_size)