import threading import queue import time import logging import cv2 import numpy as np import tflite_runtime.interpreter as tflite from config import Config logger = logging.getLogger(__name__) class InferenceWorker: def __init__(self): self.input_queue = queue.Queue(maxsize=10) self.result_queue = queue.Queue() self.running = False self.interpreter = None self.input_details = None self.output_details = None self.lock = threading.Lock() # Validation thresholds self.CONFIDENCE_THRESHOLD = 0.80 # Minimum confidence (0-1) to accept a digit self.MIN_VALUE = 5 # Minimum allowed temperature value self.MAX_VALUE = 100 # Maximum allowed temperature value # Load Model self.load_model() def load_model(self): try: logger.info(f"Loading TFLite model from: {Config.MODEL_PATH}") self.interpreter = tflite.Interpreter(model_path=Config.MODEL_PATH) self.interpreter.allocate_tensors() self.input_details = self.interpreter.get_input_details() self.output_details = self.interpreter.get_output_details() # Store original input shape for resizing logic self.original_input_shape = self.input_details[0]['shape'] logger.info(f"Model loaded. Default input shape: {self.original_input_shape}") except Exception as e: logger.critical(f"Failed to load TFLite model: {e}") self.interpreter = None def start(self): if self.running: return self.running = True threading.Thread(target=self._worker_loop, daemon=True).start() logger.info("Inference worker started.") def add_task(self, camera_id, rois, frame): """Add task (non-blocking).""" if not self.interpreter: return try: task = { 'camera_id': camera_id, 'rois': rois, 'frame': frame, 'timestamp': time.time() } self.input_queue.put(task, block=False) except queue.Full: pass def get_result(self): try: return self.result_queue.get(block=False) except queue.Empty: return None def _worker_loop(self): while self.running: try: task = self.input_queue.get(timeout=1) except queue.Empty: continue cam_id = task['camera_id'] rois = task['rois'] frame = task['frame'] try: # 1. Crop all ROIs crops = self._crop_rois(frame, rois) if not crops: # Report failure to queue so main loop knows we tried self.result_queue.put({ 'type': 'error', 'camera_id': cam_id, 'message': 'No ROIs cropped' }) continue # 2. Batch Predict predictions = self.predict_batch(crops) # 3. Validation Logic valid_digits_str = [] confidences = [] all_confident = True low_conf_details = [] for i, p in enumerate(predictions): if p['confidence'] < self.CONFIDENCE_THRESHOLD: low_conf_details.append(f"Digit {i} conf {p['confidence']:.2f} < {self.CONFIDENCE_THRESHOLD}") all_confident = False valid_digits_str.append(p['digit']) confidences.append(p['confidence']) if not all_confident: # Send failure result self.result_queue.put({ 'type': 'error', 'camera_id': cam_id, 'message': f"Low confidence: {', '.join(low_conf_details)}", 'digits': valid_digits_str }) continue if not valid_digits_str: continue # Parse number try: final_number_str = "".join(valid_digits_str) final_number = int(final_number_str) # Check Range if self.MIN_VALUE <= final_number <= self.MAX_VALUE: avg_conf = float(np.mean(confidences)) self.result_queue.put({ 'type': 'success', 'camera_id': cam_id, 'value': final_number, 'digits': valid_digits_str, 'confidence': avg_conf }) else: # Send range error result self.result_queue.put({ 'type': 'error', 'camera_id': cam_id, 'message': f"Value {final_number} out of range ({self.MIN_VALUE}-{self.MAX_VALUE})", 'value': final_number }) except ValueError: self.result_queue.put({ 'type': 'error', 'camera_id': cam_id, 'message': f"Parse error: {valid_digits_str}" }) except Exception as e: logger.error(f"Inference error for {cam_id}: {e}") self.result_queue.put({ 'type': 'error', 'camera_id': cam_id, 'message': str(e) }) def _crop_rois(self, image, roi_list): cropped_images = [] for roi in roi_list: try: x, y, w, h = roi['x'], roi['y'], roi['width'], roi['height'] cropped = image[y:y+h, x:x+w] if cropped.size > 0: cropped_images.append(cropped) except Exception: pass return cropped_images def predict_batch(self, images): """Run inference on a batch of images at once. Returns list of dicts: {'digit': str, 'confidence': float}""" with self.lock: if not self.interpreter: return [] num_images = len(images) if num_images == 0: return [] input_index = self.input_details[0]['index'] output_index = self.output_details[0]['index'] batch_input = [] target_h, target_w = 32, 20 for img in images: roi_resized = cv2.resize(img, (target_w, target_h)) roi_rgb = cv2.cvtColor(roi_resized, cv2.COLOR_BGR2RGB) roi_norm = roi_rgb.astype(np.float32) batch_input.append(roi_norm) input_tensor = np.array(batch_input) self.interpreter.resize_tensor_input(input_index, [num_images, target_h, target_w, 3]) self.interpreter.allocate_tensors() self.interpreter.set_tensor(input_index, input_tensor) self.interpreter.invoke() output_data = self.interpreter.get_tensor(output_index) results = [] for i in range(num_images): logits = output_data[i] probs = np.exp(logits) / np.sum(np.exp(logits)) digit_class = np.argmax(probs) confidence = probs[digit_class] results.append({ 'digit': str(digit_class), 'confidence': float(confidence) }) return results