This commit is contained in:
parent
5dae5b86c0
commit
78487918e4
Binary file not shown.
16
app.py
16
app.py
|
|
@ -96,13 +96,21 @@ def process_all_cameras():
|
||||||
break
|
break
|
||||||
|
|
||||||
cam_id = result['camera_id']
|
cam_id = result['camera_id']
|
||||||
|
|
||||||
|
# Check Result Type
|
||||||
|
if result.get('type') == 'success':
|
||||||
val = result['value']
|
val = result['value']
|
||||||
conf = result.get('confidence')
|
conf = result.get('confidence')
|
||||||
|
# Update State & Publish
|
||||||
# Result queue now only contains validated (range + confidence checked) values
|
|
||||||
camera_manager.results[cam_id] = val
|
camera_manager.results[cam_id] = val
|
||||||
publish_detected_number(cam_id, val, conf)
|
publish_detected_number(cam_id, val, conf)
|
||||||
|
|
||||||
|
elif result.get('type') == 'error':
|
||||||
|
# Log the error (Range or Confidence or Parse)
|
||||||
|
# This ensures the log appears exactly when the result is processed
|
||||||
|
msg = result.get('message', 'Unknown error')
|
||||||
|
logger.warning(f"[{cam_id}] Detection skipped: {msg}")
|
||||||
|
|
||||||
# --- Part 2: Feed Frames ---
|
# --- Part 2: Feed Frames ---
|
||||||
camera_manager.load_roi_config()
|
camera_manager.load_roi_config()
|
||||||
|
|
||||||
|
|
@ -295,15 +303,13 @@ def detect_digits():
|
||||||
return jsonify({'error': 'Failed to crop ROIs'}), 500
|
return jsonify({'error': 'Failed to crop ROIs'}), 500
|
||||||
|
|
||||||
try:
|
try:
|
||||||
# 4. Run Inference Synchronously (using the new method signature)
|
# 4. Run Inference Synchronously
|
||||||
# Returns list of dicts: {'digit': 'X', 'confidence': 0.XX}
|
|
||||||
predictions = inference_worker.predict_batch(cropped_images)
|
predictions = inference_worker.predict_batch(cropped_images)
|
||||||
|
|
||||||
valid_digits_str = []
|
valid_digits_str = []
|
||||||
confidences = []
|
confidences = []
|
||||||
rejected_reasons = []
|
rejected_reasons = []
|
||||||
|
|
||||||
# 5. Validation Logic (Mirroring _worker_loop logic)
|
|
||||||
CONFIDENCE_THRESHOLD = inference_worker.CONFIDENCE_THRESHOLD
|
CONFIDENCE_THRESHOLD = inference_worker.CONFIDENCE_THRESHOLD
|
||||||
MIN_VALUE = inference_worker.MIN_VALUE
|
MIN_VALUE = inference_worker.MIN_VALUE
|
||||||
MAX_VALUE = inference_worker.MAX_VALUE
|
MAX_VALUE = inference_worker.MAX_VALUE
|
||||||
|
|
|
||||||
71
inference.py
71
inference.py
|
|
@ -82,27 +82,41 @@ class InferenceWorker:
|
||||||
try:
|
try:
|
||||||
# 1. Crop all ROIs
|
# 1. Crop all ROIs
|
||||||
crops = self._crop_rois(frame, rois)
|
crops = self._crop_rois(frame, rois)
|
||||||
if not crops: continue
|
if not crops:
|
||||||
|
# Report failure to queue so main loop knows we tried
|
||||||
|
self.result_queue.put({
|
||||||
|
'type': 'error',
|
||||||
|
'camera_id': cam_id,
|
||||||
|
'message': 'No ROIs cropped'
|
||||||
|
})
|
||||||
|
continue
|
||||||
|
|
||||||
# 2. Batch Predict (Returns dicts with 'digit' and 'confidence')
|
# 2. Batch Predict
|
||||||
predictions = self.predict_batch(crops)
|
predictions = self.predict_batch(crops)
|
||||||
|
|
||||||
# 3. Validation Logic
|
# 3. Validation Logic
|
||||||
valid_digits_str = []
|
valid_digits_str = []
|
||||||
confidences = []
|
confidences = []
|
||||||
|
|
||||||
# Check individual digit confidence
|
|
||||||
all_confident = True
|
all_confident = True
|
||||||
for p in predictions:
|
low_conf_details = []
|
||||||
|
|
||||||
|
for i, p in enumerate(predictions):
|
||||||
if p['confidence'] < self.CONFIDENCE_THRESHOLD:
|
if p['confidence'] < self.CONFIDENCE_THRESHOLD:
|
||||||
logger.warning(f"[{cam_id}] Rejected digit '{p['digit']}' due to low confidence: {p['confidence']:.2f}")
|
low_conf_details.append(f"Digit {i} conf {p['confidence']:.2f} < {self.CONFIDENCE_THRESHOLD}")
|
||||||
all_confident = False
|
all_confident = False
|
||||||
break
|
|
||||||
valid_digits_str.append(p['digit'])
|
valid_digits_str.append(p['digit'])
|
||||||
confidences.append(p['confidence'])
|
confidences.append(p['confidence'])
|
||||||
|
|
||||||
if not all_confident:
|
if not all_confident:
|
||||||
continue # Skip this frame entirely if any digit is uncertain
|
# Send failure result
|
||||||
|
self.result_queue.put({
|
||||||
|
'type': 'error',
|
||||||
|
'camera_id': cam_id,
|
||||||
|
'message': f"Low confidence: {', '.join(low_conf_details)}",
|
||||||
|
'digits': valid_digits_str
|
||||||
|
})
|
||||||
|
continue
|
||||||
|
|
||||||
if not valid_digits_str:
|
if not valid_digits_str:
|
||||||
continue
|
continue
|
||||||
|
|
@ -116,20 +130,35 @@ class InferenceWorker:
|
||||||
if self.MIN_VALUE <= final_number <= self.MAX_VALUE:
|
if self.MIN_VALUE <= final_number <= self.MAX_VALUE:
|
||||||
avg_conf = float(np.mean(confidences))
|
avg_conf = float(np.mean(confidences))
|
||||||
self.result_queue.put({
|
self.result_queue.put({
|
||||||
|
'type': 'success',
|
||||||
'camera_id': cam_id,
|
'camera_id': cam_id,
|
||||||
'value': final_number,
|
'value': final_number,
|
||||||
'digits': valid_digits_str,
|
'digits': valid_digits_str,
|
||||||
'confidence': avg_conf
|
'confidence': avg_conf
|
||||||
})
|
})
|
||||||
logger.info(f"[{cam_id}] Valid reading: {final_number} (Avg Conf: {avg_conf:.2f})")
|
|
||||||
else:
|
else:
|
||||||
logger.warning(f"[{cam_id}] Value {final_number} out of range ({self.MIN_VALUE}-{self.MAX_VALUE}). Ignored.")
|
# Send range error result
|
||||||
|
self.result_queue.put({
|
||||||
|
'type': 'error',
|
||||||
|
'camera_id': cam_id,
|
||||||
|
'message': f"Value {final_number} out of range ({self.MIN_VALUE}-{self.MAX_VALUE})",
|
||||||
|
'value': final_number
|
||||||
|
})
|
||||||
|
|
||||||
except ValueError:
|
except ValueError:
|
||||||
logger.warning(f"[{cam_id}] Could not parse digits into integer: {valid_digits_str}")
|
self.result_queue.put({
|
||||||
|
'type': 'error',
|
||||||
|
'camera_id': cam_id,
|
||||||
|
'message': f"Parse error: {valid_digits_str}"
|
||||||
|
})
|
||||||
|
|
||||||
except Exception as e:
|
except Exception as e:
|
||||||
logger.error(f"Inference error for {cam_id}: {e}")
|
logger.error(f"Inference error for {cam_id}: {e}")
|
||||||
|
self.result_queue.put({
|
||||||
|
'type': 'error',
|
||||||
|
'camera_id': cam_id,
|
||||||
|
'message': str(e)
|
||||||
|
})
|
||||||
|
|
||||||
def _crop_rois(self, image, roi_list):
|
def _crop_rois(self, image, roi_list):
|
||||||
cropped_images = []
|
cropped_images = []
|
||||||
|
|
@ -154,48 +183,28 @@ class InferenceWorker:
|
||||||
input_index = self.input_details[0]['index']
|
input_index = self.input_details[0]['index']
|
||||||
output_index = self.output_details[0]['index']
|
output_index = self.output_details[0]['index']
|
||||||
|
|
||||||
# Preprocess all images into a single batch array
|
|
||||||
# Shape: [N, 32, 20, 3] (assuming model expects 32x20 rgb)
|
|
||||||
batch_input = []
|
batch_input = []
|
||||||
target_h, target_w = 32, 20 # Based on your previous code logic
|
target_h, target_w = 32, 20
|
||||||
|
|
||||||
for img in images:
|
for img in images:
|
||||||
# Resize
|
|
||||||
roi_resized = cv2.resize(img, (target_w, target_h))
|
roi_resized = cv2.resize(img, (target_w, target_h))
|
||||||
# Color
|
|
||||||
roi_rgb = cv2.cvtColor(roi_resized, cv2.COLOR_BGR2RGB)
|
roi_rgb = cv2.cvtColor(roi_resized, cv2.COLOR_BGR2RGB)
|
||||||
# Normalize
|
|
||||||
roi_norm = roi_rgb.astype(np.float32)
|
roi_norm = roi_rgb.astype(np.float32)
|
||||||
batch_input.append(roi_norm)
|
batch_input.append(roi_norm)
|
||||||
|
|
||||||
# Create batch tensor
|
|
||||||
input_tensor = np.array(batch_input)
|
input_tensor = np.array(batch_input)
|
||||||
|
|
||||||
# --- DYNAMIC RESIZING ---
|
|
||||||
# TFLite models have a fixed input size (usually batch=1).
|
|
||||||
# We must resize the input tensor to match our current batch size (N).
|
|
||||||
|
|
||||||
# 1. Resize input tensor
|
|
||||||
self.interpreter.resize_tensor_input(input_index, [num_images, target_h, target_w, 3])
|
self.interpreter.resize_tensor_input(input_index, [num_images, target_h, target_w, 3])
|
||||||
|
|
||||||
# 2. Re-allocate tensors
|
|
||||||
self.interpreter.allocate_tensors()
|
self.interpreter.allocate_tensors()
|
||||||
|
|
||||||
# 3. Run Inference
|
|
||||||
self.interpreter.set_tensor(input_index, input_tensor)
|
self.interpreter.set_tensor(input_index, input_tensor)
|
||||||
self.interpreter.invoke()
|
self.interpreter.invoke()
|
||||||
|
|
||||||
# 4. Get Results
|
|
||||||
output_data = self.interpreter.get_tensor(output_index)
|
output_data = self.interpreter.get_tensor(output_index)
|
||||||
# Result shape is [N, 10] (logits or probabilities for 10 digits)
|
|
||||||
|
|
||||||
results = []
|
results = []
|
||||||
for i in range(num_images):
|
for i in range(num_images):
|
||||||
# Calculate softmax to get probabilities (if model output is logits)
|
|
||||||
# If model output is already softmax, this is redundant but usually harmless if sum is approx 1
|
|
||||||
logits = output_data[i]
|
logits = output_data[i]
|
||||||
probs = np.exp(logits) / np.sum(np.exp(logits))
|
probs = np.exp(logits) / np.sum(np.exp(logits))
|
||||||
|
|
||||||
digit_class = np.argmax(probs)
|
digit_class = np.argmax(probs)
|
||||||
confidence = probs[digit_class]
|
confidence = probs[digit_class]
|
||||||
|
|
||||||
|
|
|
||||||
Loading…
Reference in New Issue