From 78487918e4e5003ed9d4e1c4701d695de3e58d4c Mon Sep 17 00:00:00 2001 From: Bora Date: Thu, 1 Jan 2026 10:07:37 +0100 Subject: [PATCH] 2 --- __pycache__/inference.cpython-311.pyc | Bin 8561 -> 10192 bytes app.py | 22 +++++--- inference.py | 71 +++++++++++++++----------- 3 files changed, 54 insertions(+), 39 deletions(-) diff --git a/__pycache__/inference.cpython-311.pyc b/__pycache__/inference.cpython-311.pyc index 6aaa3636b9d41bd814dedf4278cda466bbf1eca6..54f000b0434718c462bd140a5ae013526f3bab81 100644 GIT binary patch delta 3985 zcma)9Yit|G5#Hlde2e5!e2An@5-pliB)?M0ksper$dUp_cAVIaleh$}JJFOVQrSC7 zj!K#Vf)+;LAB4|nY)5X0G)UAo0URi8U7#ouG(a4*2uOo~*+Yf8KZ3w03Q(;B1pd|8 zqbQ1X3S@Ej?d4){MZ4^Zj=h0vSg1^mop=GSn(9%H5RBNSC-YiDk0dYv7-l?6sPi%)MNjo1o&&QUzBhl;E zNcsKHeKqvlXGyn3Jgr#lp+}ER9hrP;Y-)5Y{QSg;vF9d^9eqmDJu^8KJ~e#wn+j#ogc z+F`wi8gB+UNhQSQIZR?GDHl=#CXtp@M^f>4NR5e$_y+{~2@J@6LWG~=Ndy^EuvdKB z5Ty34eQxkkp(lVHQeiU@R7SZ?bLz4A1>w~OKSh(NhZ+{2FwM1*5tZ=%=eH_Ydz41A z(5RN20v{LuWcv3=6lJvVHnEzFF0FYAoylbMX+5iT*CdIWw$_-F3H-P~X1H0zVN1)? zY*SkC7Giavs;{d8$IxhuHNK^~p?MEt`fCzbxlo8TWporu45YPry|_U;)Up+E*wvz; zP`YW030@JVheDSwq;)h`b@s)P1tH zBx8|1nS{;Qxb>cw^wY`2OpGPFI{T1%Y1wv$Il;|x(*nmbY%CfRn4qMViDh??VN)C< zB$;?}iK*)iGRw-oy)#E7vQLS9Nxc|}r#ML?>+yITc+1u^%&AB`#xj_Tu(3oGw2aKo z;l(I33LXqH52=%((~zm?`hvhjBJp^*#t@{V)PmGJ6H7$m;Y4cwEQe(c<93eJ6j_YQ zD!3Qi;7PpXY+Q%xd9qhLWNWvRCFRdSj(rW4&9+PN4Rla^#l}&4#k;olY+L7D|L!~f z-8UUY|3Jwn{J>=IiD{FuU3h0M94SezNr0i&eTXJ6DLGFU`KX*$iek5ONPI9zeLEs>qG<&fcr0 zvdec>U8Vzf>7F}u&+71M^n<61^gxLoD9{6Cy6rCAb%*ZCzq0CmH@n4$u6s^!kh1#z zN_+1)gEg|Eu~}!SaxnD%(mP9qT|>p-lcnI31v;?a*;NP*6g&54OP%|3M`6k?@0AOy z-hy)+o;xz$qW|EpQdKVCdgsm$Z67#qwiZK&N})r=&cmh7!&jTiE^o;dy6ft_Rp}+0@y%mck2TJ&~&$cV?^$@w++_~g=}DURZ|Ge!t;r@S{GiL z-Xvsp(KT0c%@yS5t7?=XUOUGWtNqowink7KZxvp6x%9$J11C0LiY;yD$aF+OR^kY( z1VLZ{e*5iL&UO;QS!z43)1Eh{E>9IY&*c{Z$a89@N)+i>iH;R&Pen;|za`}xjrcSB z$?Ot^EOz8CMNiKGI7fsOPVh`T#><(UWLAEV`EgLr zTERgE($-^4u$HE#Ibg{dG$a1e)7GiLli-DXc!b=T!~r)0NXj^uz~thAZQ=vZo@Pu| zSklUx`}X1!;^VCh6%!s5bbU+6?6rgN(mh7DcW8dmXMOkCkyEll;gDq*fL;Q=^)AcxYGEmEJ6d!ADE`DVG z*jx+^l|n;B*HBLVm7iIOt*VRuzLLK$XDItR@&`)3T{-=|X6Kvc%jR_-lW#5hdP~0E z^^V}GzSyy^)REm+Q7ai+1p#azrPcm`05of$M|5jtiTZvMt4F9#K^CYt9-#tYnD@at zT+~f;D}UYs@F_L!h3a;S&kP?8ot&nVnN@DgdM;V55twfgmYgqe6u;KDeSAQ!zp8@c{qxG(_9y9VzN z&oSQ6HDXZy%_Ae=6#~_X0rjxy;Czxz#ks@y9q~ivCqtu33Vi%-+>rSX{!g!L^uK1Y B0<-`C delta 2423 zcma)7YfMx}6rS0;cb9!H?2BDwpRjh_KzJ2aMHED-0zP6|OROR5?iH^ryX)MGs9nS# zHZ_K{G-I?$jiF7e7;USiiKa=@n2_53W636xYf1QH6Wc#awF&V@)0w-1U~S^$-fzy# zIrrQ%Gv7Hgy?dd}c+FtY0@C6;yZIaT+r}oyJ|n|R_%w9jBgPUez%P{bvmK z?Ebg4{%6FMJ7EcB8@z`eK3&-v51CC0iLflP;#NKXx!KJK|YcH2N&Jh>`P{E;CHTksEtevg_MGvs}8p6tXWkO4=@ zpwPx3+JM`Qe~rXdXF*2Q4z7~<8%tU+tQJkn1CUW)C7W=AZiL)c=~SzA{dCg-+PBK) zrSIeFex+=+MKl!49%YCZVQ+~YC(LHtbD<^#d(jbM@K9miLC3(Y0B~`(y)wOQ4Tz{K3*;n1SPDOd8(L8ga^P z8u`|qfuATwo%_Ib**7o5`959@DI`U8jRfN>3H(gkoZtm9mKsR6oT_=5g1tjB6?+q@ zRJyu3k%~nVLJJX=zId1XR;VO!226We3(xq*=&ikXdcNzqec<~8WNI8PtB{kJB6^Y% z2r=?nCH8nUF~~_OIYa^fX(@A4tPRD8`$Y`?C%shhFXPh0AL zWbSrlBRg`D9lx~BM7pOV-E)E9IJ!3U#n411yS6#EwmBQvlnZPc>zcQg=B$+utf3ie zXtpvu>#Lge_^b13W!Rnv1QvkO=6Fhh3~i3Z#lQ9VNa@@AHUg7Hj$5-8=7IZ_D1^x} z&zA+kRa1ny10&2mYiYXz+^cG{l5R?&ZEs|z^xpPbW~!ELZ=7s)R={FO6-}qPWL!QP zdK@_ETBmx4ctZQ}6_;Nnm(Wi9v&#cH{JZNn#S}9+Rc2wzTZT;{t&-2%QR|YFF=R|Q zZz@#{n={67J*o6orAT7K4W6J9g3-WKPzC2FcLZ2CQdWMhbI#`abVJq_%-Mo@pfJ2C z!x)@%doS}7x?8O`jahe7&fPSon>Cj`F#BiB{;%rC-_Hi>bAkG-xglq6n3kV;i#yN3 z*9)Ssnmwx*vADSSVwJCw1e(t0G#xhPik-kcxLr-U`!;2VQ+~P|AWvHH3XG9r- z=tYWXc|RISwjvLf!HYG)s(uekQ(^_SK`raf zgJmdfV96Q&7YhHOmu|?6C^ZU2tdU5PI+5z@n+a diff --git a/app.py b/app.py index cbc3745..522308c 100644 --- a/app.py +++ b/app.py @@ -96,12 +96,20 @@ def process_all_cameras(): break cam_id = result['camera_id'] - val = result['value'] - conf = result.get('confidence') - # Result queue now only contains validated (range + confidence checked) values - camera_manager.results[cam_id] = val - publish_detected_number(cam_id, val, conf) + # Check Result Type + if result.get('type') == 'success': + val = result['value'] + conf = result.get('confidence') + # Update State & Publish + camera_manager.results[cam_id] = val + publish_detected_number(cam_id, val, conf) + + elif result.get('type') == 'error': + # Log the error (Range or Confidence or Parse) + # This ensures the log appears exactly when the result is processed + msg = result.get('message', 'Unknown error') + logger.warning(f"[{cam_id}] Detection skipped: {msg}") # --- Part 2: Feed Frames --- camera_manager.load_roi_config() @@ -295,15 +303,13 @@ def detect_digits(): return jsonify({'error': 'Failed to crop ROIs'}), 500 try: - # 4. Run Inference Synchronously (using the new method signature) - # Returns list of dicts: {'digit': 'X', 'confidence': 0.XX} + # 4. Run Inference Synchronously predictions = inference_worker.predict_batch(cropped_images) valid_digits_str = [] confidences = [] rejected_reasons = [] - # 5. Validation Logic (Mirroring _worker_loop logic) CONFIDENCE_THRESHOLD = inference_worker.CONFIDENCE_THRESHOLD MIN_VALUE = inference_worker.MIN_VALUE MAX_VALUE = inference_worker.MAX_VALUE diff --git a/inference.py b/inference.py index faa0519..94d33f0 100644 --- a/inference.py +++ b/inference.py @@ -82,27 +82,41 @@ class InferenceWorker: try: # 1. Crop all ROIs crops = self._crop_rois(frame, rois) - if not crops: continue + if not crops: + # Report failure to queue so main loop knows we tried + self.result_queue.put({ + 'type': 'error', + 'camera_id': cam_id, + 'message': 'No ROIs cropped' + }) + continue - # 2. Batch Predict (Returns dicts with 'digit' and 'confidence') + # 2. Batch Predict predictions = self.predict_batch(crops) # 3. Validation Logic valid_digits_str = [] confidences = [] - # Check individual digit confidence all_confident = True - for p in predictions: + low_conf_details = [] + + for i, p in enumerate(predictions): if p['confidence'] < self.CONFIDENCE_THRESHOLD: - logger.warning(f"[{cam_id}] Rejected digit '{p['digit']}' due to low confidence: {p['confidence']:.2f}") + low_conf_details.append(f"Digit {i} conf {p['confidence']:.2f} < {self.CONFIDENCE_THRESHOLD}") all_confident = False - break valid_digits_str.append(p['digit']) confidences.append(p['confidence']) if not all_confident: - continue # Skip this frame entirely if any digit is uncertain + # Send failure result + self.result_queue.put({ + 'type': 'error', + 'camera_id': cam_id, + 'message': f"Low confidence: {', '.join(low_conf_details)}", + 'digits': valid_digits_str + }) + continue if not valid_digits_str: continue @@ -116,20 +130,35 @@ class InferenceWorker: if self.MIN_VALUE <= final_number <= self.MAX_VALUE: avg_conf = float(np.mean(confidences)) self.result_queue.put({ + 'type': 'success', 'camera_id': cam_id, 'value': final_number, 'digits': valid_digits_str, 'confidence': avg_conf }) - logger.info(f"[{cam_id}] Valid reading: {final_number} (Avg Conf: {avg_conf:.2f})") else: - logger.warning(f"[{cam_id}] Value {final_number} out of range ({self.MIN_VALUE}-{self.MAX_VALUE}). Ignored.") + # Send range error result + self.result_queue.put({ + 'type': 'error', + 'camera_id': cam_id, + 'message': f"Value {final_number} out of range ({self.MIN_VALUE}-{self.MAX_VALUE})", + 'value': final_number + }) except ValueError: - logger.warning(f"[{cam_id}] Could not parse digits into integer: {valid_digits_str}") + self.result_queue.put({ + 'type': 'error', + 'camera_id': cam_id, + 'message': f"Parse error: {valid_digits_str}" + }) except Exception as e: logger.error(f"Inference error for {cam_id}: {e}") + self.result_queue.put({ + 'type': 'error', + 'camera_id': cam_id, + 'message': str(e) + }) def _crop_rois(self, image, roi_list): cropped_images = [] @@ -154,48 +183,28 @@ class InferenceWorker: input_index = self.input_details[0]['index'] output_index = self.output_details[0]['index'] - # Preprocess all images into a single batch array - # Shape: [N, 32, 20, 3] (assuming model expects 32x20 rgb) batch_input = [] - target_h, target_w = 32, 20 # Based on your previous code logic + target_h, target_w = 32, 20 for img in images: - # Resize roi_resized = cv2.resize(img, (target_w, target_h)) - # Color roi_rgb = cv2.cvtColor(roi_resized, cv2.COLOR_BGR2RGB) - # Normalize roi_norm = roi_rgb.astype(np.float32) batch_input.append(roi_norm) - # Create batch tensor input_tensor = np.array(batch_input) - # --- DYNAMIC RESIZING --- - # TFLite models have a fixed input size (usually batch=1). - # We must resize the input tensor to match our current batch size (N). - - # 1. Resize input tensor self.interpreter.resize_tensor_input(input_index, [num_images, target_h, target_w, 3]) - - # 2. Re-allocate tensors self.interpreter.allocate_tensors() - - # 3. Run Inference self.interpreter.set_tensor(input_index, input_tensor) self.interpreter.invoke() - # 4. Get Results output_data = self.interpreter.get_tensor(output_index) - # Result shape is [N, 10] (logits or probabilities for 10 digits) results = [] for i in range(num_images): - # Calculate softmax to get probabilities (if model output is logits) - # If model output is already softmax, this is redundant but usually harmless if sum is approx 1 logits = output_data[i] probs = np.exp(logits) / np.sum(np.exp(logits)) - digit_class = np.argmax(probs) confidence = probs[digit_class]