From 8d1b45ce738345cd06f62113770f58abdc9892e0 Mon Sep 17 00:00:00 2001 From: Bora Date: Thu, 1 Jan 2026 11:20:05 +0100 Subject: [PATCH] 3 --- __pycache__/inference.cpython-311.pyc | Bin 10192 -> 13889 bytes app.py | 218 +++++++++++++++------- inference.py | 253 +++++++++++++++++--------- 3 files changed, 323 insertions(+), 148 deletions(-) diff --git a/__pycache__/inference.cpython-311.pyc b/__pycache__/inference.cpython-311.pyc index 54f000b0434718c462bd140a5ae013526f3bab81..6d782e69c580a0458befa4428a9981e7a7009ade 100644 GIT binary patch literal 13889 zcmbtbYit`=cAnw;DN-UOQKTNDx9Ep#$uHS)?8ti9ie))|W+Nxz(i%#pERpgIWjhSn zxWO)<4Gb(3XlrGobh!(!mua%7P;9You`TRwi@Lkpb|?%WVgLauwqDe~DkE7SNdNSl zJLHff$%%`OhG*{k-nr+VbISuPlg@Wtg(8cg~w^7vZF;cj6`M~G%5V%9J6iZK0 zqwkh>6W5mO5+rj(5SoPs$9 zMWeA@1MEbocW75QG9KbWk+D$c%v+K!Fg8BH*`c5VetaL0TU5?ou}*39ZG_CxtJaDi z>|f~0TI)n>-8w~^+g4rPkN2m$vaVLq)hgKX_qrCc)~pq`Mm~pTC}p-VQkaR)2O)9? z*oDjnM$_M+fXy19I#vr*&su>wG_cwLF(YdPYGO@5&8!)yg|z^+vN~XuO>zts-sROO zcQwR~l}tW`Z;!vvUx(B?R4HR7m2xconMS=@5>%it*k{za2aSFTKly_4*bBK>EyN0I z99AgA6eg3HK`DuOxO~VJD=&m7YSLT?Z|aB073Q)!sBJBjBQa(5lNd6TT%^}BC8mdZ z$Y^r028cP8m=R*iI1*I4qL5SiRF4Uz5Ex+^d4fX&nK4cWZ}Q=IXpqA>ocQ5~c>T@M zCpZgFSS;g`^w(xXvmr@;hCHm{$jodkAg9^55D#*ej~ThyNF*GYkf2N~#LaLappsp# zz`~*rPw`S&bT)4i}b6=8(@s=30RNcPO(;f1_B$`x2i@-kHtFb8X!ZaLY}Iw zBGrXld~T|%NI2A0+F=bfN+s4<3`1Xv4tS0^G(nq6`Hi!Qa@#c4cc!Rku0Z9_YVvQUgix`ViB~gT}Ftl)y|7G>?W2R;Gb$Nk z<5Msi$vRY2Oq>sUcLt|G-vnc!Kr9sDqZ}`l!}bh9U!tS9S}a+C>1yT0)nSG^%7rJw zk>FIm78$H$9=JIcnu&#@5y>>hg=68d;1ulmU|><)F04j}3i~}#HCzWI#=RT0Oi?{` zLOyQ+-E@mu*Eq~At2K2CQ<;+w+(OM@wq{VQ8Jv50)mHKSlkc4rJO}Q3v$lTG)-Txl zA2+m$4PEy~g@zN^h7)4Li8;uq&DoqlS8O|%Z9B8JU7~H5VB59U*eW)5tu*#6H}+*4 z2gJq!EL)#zY7?8fSDFTvn+CE?$Hk`OnB1_|u|w=QbiYgNc=7&(*fEHSt;!INK5&b@ zqgmS}(RN8F++RL+c)&_Aw-gChIQ`yfq4vf5*Rr-=(bg;2dUFlUWR!xfcEkN8TNr@>&A1sF z^z?d;?bZIn9vbM@Dt8>pqG<{14MOsjR^+RYg3ry)9g!SmU4k-JBoHJ=N7gAADatz4 zjtp`yLqmQYhh~?g1zjnx2>rnHOzf?V8cwp{rY<`%oA=&gUJx4NaiQSet2KLau4>WM zvf}Dkc6DTC1y@JbwNG^I6KwkkH{M3p!#-CaP&~CawHN(u56&LC7VmoNsE-}HwSP(P z){ZG#3}oQj5j$!9oibD6o}%p*iVz2q zk*9vduP6NCPQd`UgGg{wlFXrk4dvGmiIr%U$GIwqr8R(NTAqKtjT==-<^)vYF9QJv z>U8FVM6BvoY>msd#&lx_I0tglCEB`#!VQchmdZi2RAlnlRX1*O-E$O{DYpgw?Vas6F3<*oZgtJ|EvW`U!>s;KxXPQdUx8H!) zO1M#MEn!yAYSQ{#BeYFWNjvgR}y_ zHMcJ$?ELnx9f$85Ti$9VX>PyE);?qW8@>!AD*&NtE_uHS_4_4VkrzP=wiHW%?%Rz9 z_SdZYx`ds~OX=$0YF4%w`)fEa|DkAAYSLBcIpG@9+|*npnu4>!dU6+X@@eNI@R=ZT zCQ-&U#={<>HQ!D0BAo5YLRs;5tAHIt)tsSTqJel>z*qw`M|QknoWJ z@OJ2u!!hQaxEK!*eRvvNzyL2<#_-D~tarkJ!hR0$YVle<=d>bC}OrZ%5um?85lYww(&T(Tv;uY{V zat;Gu2iV!M5Gzq}`zt|?4>9D04>6LFpB=*w2YyqrRY`w6I5ivMCa@_M31LIAWy3|x zXW|Y9SjI%h87>%^2r(UB(L2AQ{hJ2JO>WUfqhgoJV<~vHMSjLx6tIwCXE}sV!-WrJ zFED&41X$+;4@UC0v3as^a5{>E_4{P72L*#Wj|AOd?gElwBxp_(j7QRfMb5nfQOOcO zjt6knaHE)D&rg2faxgY_1+4GLEPC|t5vLWi4MF$}69MmjG8&FZTKL8zS;+JZ;F99n zu#0vqs2>N^GA3yvGm>sP6pV1Fv1POVTM+VB%HH=GENg(p4UoY`K|l^jda}$VO>DQM z$D#57e~~du*8E5TT_uSam7KuKFdL8;0$>ZAWG${sUUE)FZv-$w`Se7GC3;43juY$_ z-t}^bBh_rBDUvC8eS*~ED`8Lw32eG#0QV)xj@K&8&ZA|eh+7m&@ZSR~7eKSJirZ69 zsU)Cg;8EJ)yO2+;ZeOY1zFfV1saL4po~_<5R_{;gQo1i!U7q0S2!duxYzgXp;J6=e5 zJ6p9~tlBj&1H`;DC2n{)ee z?K_0_o&|mCP5iqbg0AkI_~69CrL5a8y8XGvR>8gF2Ym~=_lM?(QbU<8C;(ryR@~c` z-P^M69in^3Q-`U%;tLAMI+8!S;SJ_rd;ik>rIaS;sr`}ThmMRs6U%z`ik`hNf&x&I zGOShCu2bgnI%spn-LmX%$uaG@*6q3Gy}9OXc<-ebY+)?&bR3OO@IB17iJwl(n?G=2pSURPVavrs$#PR1?zk1bap6RMBpJRK{ZUCW+bx!Pd* zj8GedI}=>8eHN9&Kp(K;U_g#N)oN%e+DqC0F+n3$jGrgIK&tA^D4~Xsq zg8M+u-LT?rTXwgluVrfQzN4f=)|#t%ow95(J?EC#UxxoGEWGqecK;c1{~4kA9Nf94 zZuPWXd^G+rAOu3fmC0;iQfRpf_qxM`^9ST9Wy1Lb0#TU13Y@>{_N^Im;5l0Udd@Vr z{_Ma{4hY*{$TlAqn-2pc)^*nMy4B`3q5VL%xkqg7Ne!olzg(?uBts~YO{-%-*wZI8 z55WEZ7}Ywim-kPsyI}pH+*MI-&-{_}>w>Ef?qx!k&i~>~IV7h7%^evM8qR$@`_r>` zU(H;~wjLB)4`v$wJ!(mYb0^K%P=&WDO9A7DyFU zS)-PctC?Y;`6}Fx+`p@rL&E5r;^>upxcMsfu9Bqx(~?+sQ{|O{vqf~aWazBZF9(5s z`Pc=5YqGnF1oX;^lUa5$LSz4f@kbL`=S9(ZQE*-aEK<(M)i&oE+j9*AxrR2l*R6&o zP#8d-g6O%vK!WmDO7zeL^5&|Ra<~*7D{IgrWYa zzHdJ~{-tU=(BEh)k=8g~_EW#||00NQjQ5i+&@-G^$t$Rs(q@NX@wJbshM@`lKOg z1k8X2|J?w79Q5#A#6SQAk;CI9?Iq0#Gny&##pA|rhI@HA}u zk_pU`z*LxzNo9ExH9#_WNeemL99H`t5Q)Aa(N`p6z7UU~NpXLcH|7YWZAN#C*3W-K zKA!;s+j5O>t}N&BA(CE6$XwsL#$d0^)iG(;;CV~@|?FJO{W{v!5_D#jhXUHSLWI$`!dx6vs?6brHnb3cm8m?aoN=@xSDhB zrWLn;+3nA?XWiYRyIXK~e+hd%2nlV6z6%6oA*I8ez7kgTnxjrI)e-o8uD80E(L7`< zy*u>}chEppFtQ2Whpe}Gus75w<30*~og#|G1L86UpV076jV<9pw$cw~aH<15+Tuw) zKn^vm<|<$@z>cqN1^QE@p>%bta7Z|fRYWbHSxN`810UK50tD-x9ORoynp7AQN*lQn zri3wJ7~gQ1Zv$qgpmPbc>N6eAA-UAElC&f&TYrX&DPBIHnF`XjVa}`x{WEa04LBD_ z+H?$!&iplr$T7LVcUAL2jY=vQSfM;V6QFNEI~ZC0giifF>ea9{My19yH`Le~d(tLs z8~Z86ZYIkMqf@t?w3n{U=R4K*Jnbgz1m~gx3Q__6C}Gx+*Mp6*j>5PAVJ?~dVk!$K z?}|0yG!3lm;`yizbAyi9rYoV_z&{!D+*K?Ht*ST-JH_vk&O}+lS=<>A{Zy?F1s6$_ zt83$%C)H(@W;T{hR;b!k@RP(QtS#XJ*(XxX=@S(RR~%M+#HFgOptzfAyJM*J^Ovd) z9;zW%!mWM_bu3w#a8Kgt26Azgv7?1BtaW#yQh7m2T@TPQQ?d$B;S!XnSTojJ3@0kp zH4{~=d2s_@Kt}&H;dr+8Y^J&>UUx$ypPoXrh=xCMa@i7}IXfFEdiqQ>!UP%Odn3{y zZ+D#*OJO@Z8;Z?x5uU+a5epzr!XIMZZYLgZ`yn{`!R=t$i#{(T5l=j+G%=C~fx;0s zbknbS!obTuL7nvk*@!3zTSo^;lnYDhek<`%Vv^J{vf8{A~BxDjHKmfrzHy*B~czs4>+F8V-*9OxPclQBJL5aDSbB>yYGw;1K_YO8- zf4&C5Z}T;X-W{1UqIcJlQS|oAoy2&ZcE&Q@SVX+#x!52;_e3t6S_zpUnDpi@x1+!{G4S z%gL#{-OIM!OTPQ@M?J!YH-x~qg(JbN?XqaQEZ8m=j)bUB?%CwyeC^A&_KbDu^{nlf zXgem@jy*1~oUeJ`H}9J}R;bv#Y-`TiTH&xB{42m}OUK=zAD_H?a;YiXvKJ5CHRka6 zN=uqwc5D+I+YtOI`=Cr{IPyRzzH~u&HJEi?7M+&`=jAnTz3APx;@!9G-M2KA_4bM0 zzPXcYRi3%ytCik`L)pqUz@*kGoBbj!(-fVnX-S`1tWO!>WxdQ#o9?z{s(##gw=-L_ zJ7rj{te(HPF!=tP^KYg!s~+#7VS)S5vS>-^a~(UE%KyTWu`E<9gg*2xc>hlGEBk}X z*`6V>XDFqAT;B*y`7>}lu^WymX6yHe^?LxZIdt?_+l0DPA74oC%lO3Rec8JGV%>hB zZvUg^lx59Xjc@jI7x?79W-ha(jVasCOd2N%ACzNEA0 z>jB3q5KoUD6v$>GzW|QiUoIK(%rAiFGWdYx7c;(~30r-Cf z{2J3)^6v`r{qS?F9lQp-9=9XJT(qJ-mG|cf9r0fkH4U@YyeM-{5ct$RH<^Kh5z){Hn literal 10192 zcma(%YfKy0nlrY?&+!9nVq^0d9s!&L2;{kW76_0u32D__AId-&(ApRE>N{^|SdA&d*7L_zM2iXWV5V zMhVvVTY@!xZh#i}E4H{<@(m(PJw?T_FsYb_l8MRK)I_lzC#7+opMc^6f+Ns6VU6T( z30R>4pb3pVPO&C{W|jhIVa))otOcNrwF0!WHkiq-xX&aeIi5>QaBn5~>l{B(8XnDM zEdD)jhuQ~(SkX4E}M?tbkfs6O833^nfmx4dj9j|Q}iKES=8)SsQ2f%IZmNo$B84Bn4J?NY8}mU!d$#m zvhZ_>L@Y6-Kp&CgXL$~w;#3h>PK?Ilg5pZfiD-aQ*(P4&xhRX!Oy`ml*A-J7LEq4Y zv5_;UhR246BbQEJ9Dd{Ug>$DA+xatNk;?<;-W-PPz}v;FJ)VrRk(ngR#lIvKlfcC% z1+?ePPxZZgGAZ^Bvs2u_nci55e|lzbDb`3NmWYXw$U>lsuX;)?h_1qK0Nf!qL(KBg z6nuna=S@o0+>FFxN7 z9Q=T=5hChRe6kwPmugVTlA^}1z}bn~Z2D_}(L8P!ds*i-=r8Nb-VGGsTG@&lQS=M_N{?MfG}(!`Gq) zUy_ea#S+nY5eqL`u?^px;AX{GGND)}_?Q@*h{nSPK7=qxPCz7J7`_1#9`VFNuv&GL zH4)fvLhyhX-XRJG+Sa~F*M4~R?pY~t=G#g6;3%Z<=IB>t`c;X3b+dV=+}!tHm*wMQ zQuBpe^98y2!s02JZphOffE#r8I^CV4du6&;qI>f#JLQ(Xjh3PHmZ4nBu-r0?`ZncT zJLJ~>jn?7y*5O?1sN6b=s+&QjguCUgBWrzf*YUL}xoZSf@6;A_WsQ|jyd?#$z?-AT zWqMpHy)QQ1fdXN$wU+@xyKwh{)Npcb006u>`W2aeMWSEHH@D$6OLRlk`vRT<1>-;} z1b{NX9tsXM5r1m3pYj|3)YX2f*7$ub1$mu@L%IrUxNjiy04(Y>r0T?}w*Sylc4M*?`cNCFN=JY$tk5J`O}5JYL9-h5enrv7n#L}L4Zz4z z9{5l(vr%p)nOLaTxQ3a>o(v;E4-q6OY~kH7Ebj%Nm@i?=sL&C$S0tWH&VmG@zI-d{ z)`kGm9jdug8FL?$7wWVrR9~qFV|AmhZvePM(vGDad4C95u1#wV7`3cP zX8P9JCF{wY^`vY)xnUh%w+`p5qq22WvW{+AT^~B`IwW6j&bn8&?v+X}j~ITUlAM}> z`e$g-Z=V1@;2U7FQe?`IGU_%?GhBK_%^H?XX>f$f8=KX(6^7TI6nK28892D z*$QJ)Hff-6O(X$|68dPWky;20u%No4!gZ!AkxX=7MP3Og%5cwE*reDdqB9&Hjl@{R z#3y5dLQV2equ9jQ3R7gu+N+v#PXAsEtQHg<=Ff#Us0;m`3K}@i2uo41WM> z_=5->!fj}F0a1}b58hqH8fDoCYS`+wlv)r22nzu25Kr7*+1)HzJF`2oI|>Gyt>#H% zM?TPyuW#CHY+1hY@SWv%3O35;Di8n`PZemwS%dWrvraQv-^!a3&E)8PGQCeKy`U3( z_ZA8SX{#v%P*aPi0N)6;NXC)!{k#1GB=LxJ5BQ9ayd=Qwg?kdZBKM_IykF+NR681Y zS1Wj~Eb7{>f|}5F)i^f7{}oIZHu1=&Dn^h>bswnVnOX5xwJa%iWIj|McdM>1w++Bx z!Y}}&>y5TR-s_jW?Hk^%b#K?ooaF7wc@N0m0}_1z6Yp1uZlG670J>-PXZ9oF1rYI& zcmL~PBk`$wukl~Vy?VMKwn3c#{NLfb^-`5F@ULMyYBH47)F5DJ&(<&~O(yCrpQY7C~bgpg^QFnLYXSNOlcEZQ&w{!wPipiWl9xr zP@i&bKLdmqt`A%ZtD2Sr4ESbkkc?qNVpwq8MW$1saAu*tlVRsLHOiQ0%4iUQ7<>9BM--&)k!e+^ z8__r%WSB9Yz>h-RLd|>3#b^Aj0fu;M;2$AX)IwKB;` zhL0wuIHv0-vU|Z1?qSYMC6YY&ZwsC80p47kWfDn|nT_%Sr!H53*a+evbS?rB4@~0I z95jaG`6SOk1jH;DATA21E+CRNegFX^zx*Ho#U7d9ld}=*aX8_4g^f*!k*iU0;u^mL zbwW;|VwsQf_!Pz*ot@V+q9wbMt6>dE|eC!vdj-s2CEnifM+6Ca^)|kD+$t zRVsF%${aV0md5YK2zZ%cx9X~(13kqIUJ$rI6)p>f)+Z>A;@TjN5)=!Ia*IWhDb2sPB_g8$^Om_|L%4F?$rUwzdPqYDEkj) zOc~RQO>f}-Q83@m`ZCCS0vRh(dxjuGDowrvG z0)Y3s*T0+peq7B<)7Pc=O?moOvDkV089~$!7=FaCfMLF?d&!v(G%itjU;Bn{_quQQ z%D~Fh*RSS$hh*O&$#*F4Yu@m6tou5$@2@oc{wHD-d`0rLZ?<(voriL5hvl}znRD>Lfrk4ZtTageQFzxeT=V_0 zNzJLX05{vZ{!sh1e>Ic~AD6?&b8RQ&wi8QM@N(rqcq7og9_Y@8d-FT{@*M~ByZXW1 z?G1vv+Z%jFQ1zi75rATS=r1%;-~5^5V~5l}yVhAUrhGJq|ytV3An(TFJAn`O!yjM6yU*1xaryPfc}V< z8V6QK0KmHzRnwejQ1%Q;TV6C6;`kvnZegAz++IzDR6*JTh07pucS9Y-=kCMC{~!+= zANc~O1H_~5!A3~Frws_!4AqTx5`XR>q44L0NM& z(^T4=wxq2Px|-og=q>ouaMUiORs@|3E3EAM{moVH>RaD5 ztO?&8{$pBdJooqZ44LW6M3-vv!R9QPZOIP&Q(Kk-?CY0&{Vza|Hy{assY@X5@nuYir|W>FpST+(YomZ1 z(8U4&0AqN>*ay0)M|((s6P1w_I##ap%DO=^S+O&2yQ-uw&-1hOASmDu6YwM|^nIlDI zwaYR@|I$+(;gH0=8L_U^3I~YgXs0^jM6r;zpW9!!r9j<)a`RbgonR|V3RV@}=5w79DhBUKe&q@BPYF0r%RCslQk1pM+3 zX?Mz*a+j5C+M}biwvQ=~e)n{}w7t>Qz7$tK(gN^Dx<-eI9A^BaYV>{6wW*p^ZN+N6 zDcANqB=(lZfOxuAKZ@8_(dN~+vF7C}DgpQ1Pq|*M7utwELNLu6)QcN*8biVl-@scI zCNItBvfC3d&k26t-S|F6_fhbCx;}Zgdv^{D^A%t**`Bf+O&pVI6 zQ8d4Y=`15L4ywZSJlZYw*wYy2 z68J7gy>~HA+~`-#aHIzI2<}*Jh(pPE5-y2{E}Xk?F)}!MvH#-epkj^+;;mVZNA7}R z!SnCy$L33+qC6kHrFhjLiWflYB@U1HP_YYeW~bf@Ddt$>M)EqRp5qnkC>*;fM(*aU zVie|PU^0FRZor`+^8A}HMA)ZVM|05#+94Dv8N}lCHztBY7jKr-k^>W6U5a8=&-^3T zwA?&|U^8$xj%=;Ecvi9C!qipO0z(WrqtNOtR&lY8;tXt5j6!dG+b$?%4Bwr_L^yb! z<&#$-;=GH$Sa%l0@yt{ts^}DwhZ)f>}?U&)h z!E~ru4(?fbT@LoHTIAs2#j|kJE{A$n-j_pttDSP_$l|$zl?XLLLrZ6lZdyE@x%R{l zp035&jX}ovLsQH0^on=o=2}4N8O${e$xTBU#}C2AvZReWA)t}Jt)(I z5#Nfh6GSCK7?u74$zOb8(cXBk&(C(YV^eqk-^+FOTk4_>%|> zA<&Ni@_2b9P+t=G5=@mab(AixNI?^iss}ENA{7!^n#s*|cz0|kCP02ZY)z3eKM)mFz z(`OkK(ZwOA*-zGE>IR(Pe+PBQ#}fVx0663%nI}9_>CF@L9sJ7^jyw35CtP>%FHhL- z;9s7ol}c}(*e&VbJW(&{-zT== every_s: + _last_log[key] = now + logger.log(level, msg) + # --- Main Processing Loop (Refactored) --- last_processed_time = {} def process_all_cameras(): - """ - Revised Loop with Rate Limiting - """ - DETECTION_INTERVAL = 10 # Configurable interval (seconds) + """Revised loop with rate limiting + debug instrumentation.""" + DETECTION_INTERVAL = int(_cfg("DETECTION_INTERVAL", default=10)) + hb_last = 0.0 while True: try: + # Heartbeat (proves loop is alive even when no publishes happen) + now = time.time() + if now - hb_last >= 5.0: + hb_last = now + in_q = getattr(inference_worker, "input_queue", None) + out_q = getattr(inference_worker, "result_queue", None) + logger.info( + "HB mainloop alive; in_q=%s out_q=%s dropped=%s processed=%s last_invoke_s=%s", + (in_q.qsize() if in_q else "n/a"), + (out_q.qsize() if out_q else "n/a"), + getattr(inference_worker, "dropped_tasks", "n/a"), + getattr(inference_worker, "processed_tasks", "n/a"), + getattr(inference_worker, "last_invoke_secs", "n/a"), + ) + # --- Part 1: Process Results --- while True: result = inference_worker.get_result() if not result: break - cam_id = result['camera_id'] + cam_id = result.get('camera_id') + + # End-to-end latency tracing + task_ts = result.get("task_ts") + if task_ts is not None: + try: + age = time.time() - float(task_ts) + logger.info( + "Result cam=%s type=%s task_id=%s age_s=%.3f timing=%s", + cam_id, + result.get("type"), + result.get("task_id"), + age, + result.get("timing_s"), + ) + except Exception: + pass - # Check Result Type if result.get('type') == 'success': val = result['value'] conf = result.get('confidence') - # Update State & Publish camera_manager.results[cam_id] = val publish_detected_number(cam_id, val, conf) - elif result.get('type') == 'error': - # Log the error (Range or Confidence or Parse) - # This ensures the log appears exactly when the result is processed msg = result.get('message', 'Unknown error') - logger.warning(f"[{cam_id}] Detection skipped: {msg}") + logger.warning("[%s] Detection skipped: %s", cam_id, msg) # --- Part 2: Feed Frames --- camera_manager.load_roi_config() @@ -118,54 +173,73 @@ def process_all_cameras(): if not camera_data.get("active", True): continue - # RATE LIMIT CHECK current_time = time.time() - last_time = last_processed_time.get(camera_id, 0) + last_time = last_processed_time.get(camera_id, 0.0) if current_time - last_time < DETECTION_INTERVAL: - continue # Skip this camera, it's too soon + log_rl( + logging.DEBUG, + f"{camera_id}:rate", + f"[{camera_id}] skip: rate limit ({current_time - last_time:.2f}s<{DETECTION_INTERVAL}s)", + every_s=30, + ) + continue stream = camera_data.get("stream") - if not stream: continue + if not stream: + log_rl(logging.WARNING, f"{camera_id}:nostream", f"[{camera_id}] skip: no stream", every_s=10) + continue - # Warmup Check - if (current_time - stream.start_time) < 5: + # Warmup check + start_time = getattr(stream, "start_time", getattr(stream, "starttime", None)) + if start_time is not None and (current_time - start_time) < 5: + log_rl(logging.DEBUG, f"{camera_id}:warmup", f"[{camera_id}] skip: warmup", every_s=10) continue frame = stream.read() if frame is None: + log_rl(logging.WARNING, f"{camera_id}:noframe", f"[{camera_id}] skip: frame is None", every_s=5) continue - if np.std(frame) < 10: + frame_std = float(np.std(frame)) + if frame_std < 5: + log_rl( + logging.INFO, + f"{camera_id}:lowstd", + f"[{camera_id}] skip: low frame std={frame_std:.2f} (<10) (disturbed/blank/frozen?)", + every_s=5, + ) + mqtt_client.publish(f"{Config.MQTT_TOPIC}/{camera_id}/status", "disturbed") + continue roi_list = camera_manager.rois.get(camera_id, []) if not roi_list: + log_rl(logging.WARNING, f"{camera_id}:norois", f"[{camera_id}] skip: no ROIs", every_s=30) continue - # SEND TO WORKER - inference_worker.add_task(camera_id, roi_list, frame) - - # Update last processed time + inference_worker.add_task(camera_id, roi_list, frame, frame_std=frame_std) last_processed_time[camera_id] = current_time - # Sleep briefly to prevent CPU spinning, but keep it responsive for results time.sleep(0.1) except Exception as e: - logger.error(f"Global process loop error: {e}") + logger.error("Global process loop error: %s", e) traceback.print_exc() time.sleep(5) + # --- Flask Routes --- @app.route('/') def index(): return render_template('index.html') + @app.route('/cameras', methods=['GET']) def get_cameras(): return jsonify(camera_manager.get_camera_list()) + @app.route('/video/') def video_feed(camera_id): def generate(): @@ -174,11 +248,16 @@ def video_feed(camera_id): if frame is not None: ret, jpeg = cv2.imencode('.jpg', frame) if ret: - yield (b'--frame\r\n' b'Content-Type: image/jpeg\r\n\r\n' + jpeg.tobytes() + b'\r\n\r\n') + yield ( + b'--frame\r\n' + b'Content-Type: image/jpeg\r\n\r\n' + jpeg.tobytes() + b'\r\n\r\n' + ) else: time.sleep(0.1) + return Response(generate(), mimetype='multipart/x-mixed-replace; boundary=frame') + @app.route('/snapshot/') def snapshot(camera_id): frame = camera_manager.get_frame(camera_id) @@ -188,6 +267,7 @@ def snapshot(camera_id): return Response(jpeg.tobytes(), mimetype='image/jpeg') return 'No frame available', 404 + @app.route('/rois/', methods=['GET']) def get_rois(camera_id): try: @@ -218,28 +298,34 @@ def get_rois(camera_id): "y": int(round(roi["y"] * scaleY)), "width": int(round(roi["width"] * scaleX)), "height": int(round(roi["height"] * scaleY)), - "angle": roi["angle"] + "angle": roi.get("angle", 0), }) + return jsonify(scaled_rois) + except Exception as e: return jsonify({"error": str(e)}), 500 + @app.route("/save_rois", methods=["POST"]) def save_rois_api(): data = request.json + camera_id = data.get("camera_id") new_rois = data.get("rois") img_width = data.get("img_width") img_height = data.get("img_height") - if not camera_id or new_rois is None: return jsonify({"success": False}) + if not camera_id or new_rois is None: + return jsonify({"success": False}) cam = camera_manager.cameras.get(camera_id) - if not cam: return jsonify({"success": False}) + if not cam: + return jsonify({"success": False}) stream = cam.get("stream") - real_w = stream.width if stream and stream.width else cam["width"] - real_h = stream.height if stream and stream.height else cam["height"] + real_w = stream.width if stream and getattr(stream, "width", None) else cam["width"] + real_h = stream.height if stream and getattr(stream, "height", None) else cam["height"] scaleX = real_w / img_width if img_width else 1 scaleY = real_h / img_height if img_height else 1 @@ -252,21 +338,24 @@ def save_rois_api(): "y": int(round(roi["y"] * scaleY)), "width": int(round(roi["width"] * scaleX)), "height": int(round(roi["height"] * scaleY)), - "angle": roi["angle"] + "angle": roi.get("angle", 0), }) camera_manager.rois[camera_id] = scaled_rois return jsonify(camera_manager.save_roi_config()) + @app.route('/crop', methods=['POST']) def crop(): data = request.json + camera_id = data.get('camera_id') scaleX = data.get('scaleX', 1) scaleY = data.get('scaleY', 1) frame = camera_manager.get_frame(camera_id) - if frame is None: return jsonify({'error': 'No frame'}), 500 + if frame is None: + return jsonify({'error': 'No frame'}), 500 roi_list = camera_manager.rois.get(camera_id, []) cropped_images = crop_image_for_ui(frame, roi_list, scaleX, scaleY) @@ -279,31 +368,29 @@ def crop(): return jsonify({'cropped_images': cropped_base64_list}) + @app.route('/detect_digits', methods=['POST']) def detect_digits(): """Manual trigger: Runs inference immediately and returns result with validation.""" data = request.json + camera_id = data.get('camera_id') if not camera_id: return jsonify({'error': 'Invalid camera ID'}), 400 - # 1. Get Frame frame = camera_manager.get_frame(camera_id) if frame is None: return jsonify({'error': 'Failed to capture image'}), 500 - # 2. Get ROIs roi_list = camera_manager.rois.get(camera_id, []) if not roi_list: return jsonify({'error': 'No ROIs defined'}), 400 - # 3. Crop cropped_images = crop_image_for_ui(frame, roi_list, scaleX=1, scaleY=1) if not cropped_images: return jsonify({'error': 'Failed to crop ROIs'}), 500 try: - # 4. Run Inference Synchronously predictions = inference_worker.predict_batch(cropped_images) valid_digits_str = [] @@ -318,30 +405,24 @@ def detect_digits(): if p['confidence'] < CONFIDENCE_THRESHOLD: msg = f"Digit {i} ('{p['digit']}') rejected: conf {p['confidence']:.2f} < {CONFIDENCE_THRESHOLD}" rejected_reasons.append(msg) - logger.warning(f"[Manual] {msg}") + logger.warning("[Manual] %s", msg) else: valid_digits_str.append(p['digit']) confidences.append(p['confidence']) if len(valid_digits_str) != len(predictions): - return jsonify({ - 'error': 'Low confidence detection', - 'details': rejected_reasons, - 'raw': predictions - }), 400 + return jsonify({'error': 'Low confidence detection', 'details': rejected_reasons, 'raw': predictions}), 400 final_number_str = "".join(valid_digits_str) try: final_number = int(final_number_str) - # Range Check if not (MIN_VALUE <= final_number <= MAX_VALUE): msg = f"Value {final_number} out of range ({MIN_VALUE}-{MAX_VALUE})" - logger.warning(f"[Manual] {msg}") + logger.warning("[Manual] %s", msg) return jsonify({'error': 'Value out of range', 'value': final_number}), 400 - # Valid result - avg_conf = float(np.mean(confidences)) + avg_conf = float(np.mean(confidences)) if confidences else None publish_detected_number(camera_id, final_number, avg_conf) camera_manager.results[camera_id] = final_number @@ -349,25 +430,28 @@ def detect_digits(): 'detected_digits': valid_digits_str, 'final_number': final_number, 'confidences': confidences, - 'avg_confidence': avg_conf + 'avg_confidence': avg_conf, }) except ValueError: - return jsonify({'error': 'Could not parse digits', 'raw': valid_digits_str}), 500 + return jsonify({'error': 'Could not parse digits', 'raw': valid_digits_str}), 500 except Exception as e: - logger.error(f"Error during manual detection: {e}") + logger.error("Error during manual detection: %s", e) return jsonify({'error': str(e)}), 500 + @app.route('/update_camera_config', methods=['POST']) def update_camera_config(): data = request.json success = camera_manager.update_camera_flip(data.get("camera_id"), data.get("flip_type")) return jsonify({"success": success}) + # --- Main --- if __name__ == '__main__': t = threading.Thread(target=process_all_cameras, daemon=True) t.start() + logger.info("Starting Flask Server...") app.run(host='0.0.0.0', port=5000, threaded=True) diff --git a/inference.py b/inference.py index 94d33f0..1b31e69 100644 --- a/inference.py +++ b/inference.py @@ -1,66 +1,98 @@ -import threading -import queue -import time import logging +import queue +import threading +import time + import cv2 import numpy as np import tflite_runtime.interpreter as tflite + from config import Config logger = logging.getLogger(__name__) + +def _cfg(*names, default=None): + for n in names: + if hasattr(Config, n): + return getattr(Config, n) + return default + + class InferenceWorker: def __init__(self): self.input_queue = queue.Queue(maxsize=10) self.result_queue = queue.Queue() self.running = False + self.interpreter = None self.input_details = None self.output_details = None self.lock = threading.Lock() - # Validation thresholds - self.CONFIDENCE_THRESHOLD = 0.80 # Minimum confidence (0-1) to accept a digit - self.MIN_VALUE = 5 # Minimum allowed temperature value - self.MAX_VALUE = 100 # Maximum allowed temperature value + # Debug counters / telemetry + self.task_seq = 0 + self.dropped_tasks = 0 + self.processed_tasks = 0 + self.last_invoke_secs = None + + # Validation thresholds + self.CONFIDENCE_THRESHOLD = 0.10 + self.MIN_VALUE = 5 + self.MAX_VALUE = 100 - # Load Model self.load_model() def load_model(self): try: - logger.info(f"Loading TFLite model from: {Config.MODEL_PATH}") - self.interpreter = tflite.Interpreter(model_path=Config.MODEL_PATH) + model_path = _cfg("MODEL_PATH", "MODELPATH", default=None) + logger.info("Loading TFLite model from: %s", model_path) + + self.interpreter = tflite.Interpreter(model_path=model_path) self.interpreter.allocate_tensors() + self.input_details = self.interpreter.get_input_details() self.output_details = self.interpreter.get_output_details() - # Store original input shape for resizing logic self.original_input_shape = self.input_details[0]['shape'] - logger.info(f"Model loaded. Default input shape: {self.original_input_shape}") + logger.info("Model loaded. Default input shape: %s", self.original_input_shape) + except Exception as e: - logger.critical(f"Failed to load TFLite model: {e}") + logger.critical("Failed to load TFLite model: %s", e) self.interpreter = None def start(self): - if self.running: return + if self.running: + return self.running = True threading.Thread(target=self._worker_loop, daemon=True).start() logger.info("Inference worker started.") - def add_task(self, camera_id, rois, frame): + def add_task(self, camera_id, rois, frame, frame_std=None): """Add task (non-blocking).""" - if not self.interpreter: return + if not self.interpreter: + return + + self.task_seq += 1 + task = { + 'camera_id': camera_id, + 'rois': rois, + 'frame': frame, + 'timestamp': time.time(), + 'task_id': self.task_seq, + 'frame_std': frame_std, + } + try: - task = { - 'camera_id': camera_id, - 'rois': rois, - 'frame': frame, - 'timestamp': time.time() - } self.input_queue.put(task, block=False) except queue.Full: - pass + self.dropped_tasks += 1 + logger.warning( + "add_task drop cam=%s qsize=%d dropped=%d", + camera_id, + self.input_queue.qsize(), + self.dropped_tasks, + ) def get_result(self): try: @@ -68,6 +100,14 @@ class InferenceWorker: except queue.Empty: return None + def _put_result(self, d): + """Best-effort put so failures never go silent.""" + try: + self.result_queue.put(d, block=False) + except Exception: + # Should be extremely rare; log + drop + logger.exception("Failed to enqueue result") + def _worker_loop(self): while self.running: try: @@ -78,86 +118,122 @@ class InferenceWorker: cam_id = task['camera_id'] rois = task['rois'] frame = task['frame'] + task_id = task.get('task_id') + task_ts = task.get('timestamp') try: - # 1. Crop all ROIs + age_s = (time.time() - task_ts) if task_ts else None + logger.info( + "Worker got task cam=%s task_id=%s age_s=%s frame_std=%s rois=%d in_q=%d", + cam_id, + task_id, + (f"{age_s:.3f}" if age_s is not None else "n/a"), + task.get('frame_std'), + len(rois) if rois else 0, + self.input_queue.qsize(), + ) + + t0 = time.time() crops = self._crop_rois(frame, rois) + t_crop = time.time() + if not crops: - # Report failure to queue so main loop knows we tried - self.result_queue.put({ + self._put_result({ 'type': 'error', 'camera_id': cam_id, - 'message': 'No ROIs cropped' + 'message': 'No ROIs cropped', + 'task_id': task_id, + 'task_ts': task_ts, + 'timing_s': {'crop': t_crop - t0, 'total': t_crop - t0}, }) continue - # 2. Batch Predict predictions = self.predict_batch(crops) + t_pred = time.time() - # 3. Validation Logic valid_digits_str = [] confidences = [] - - all_confident = True low_conf_details = [] for i, p in enumerate(predictions): if p['confidence'] < self.CONFIDENCE_THRESHOLD: - low_conf_details.append(f"Digit {i} conf {p['confidence']:.2f} < {self.CONFIDENCE_THRESHOLD}") - all_confident = False + low_conf_details.append( + f"Digit {i} conf {p['confidence']:.2f} < {self.CONFIDENCE_THRESHOLD}" + ) valid_digits_str.append(p['digit']) confidences.append(p['confidence']) - if not all_confident: - # Send failure result - self.result_queue.put({ + if low_conf_details: + self._put_result({ 'type': 'error', 'camera_id': cam_id, 'message': f"Low confidence: {', '.join(low_conf_details)}", - 'digits': valid_digits_str + 'digits': valid_digits_str, + 'task_id': task_id, + 'task_ts': task_ts, + 'timing_s': {'crop': t_crop - t0, 'predict': t_pred - t_crop, 'total': t_pred - t0}, }) continue if not valid_digits_str: - continue - - # Parse number - try: - final_number_str = "".join(valid_digits_str) - final_number = int(final_number_str) - - # Check Range - if self.MIN_VALUE <= final_number <= self.MAX_VALUE: - avg_conf = float(np.mean(confidences)) - self.result_queue.put({ - 'type': 'success', - 'camera_id': cam_id, - 'value': final_number, - 'digits': valid_digits_str, - 'confidence': avg_conf - }) - else: - # Send range error result - self.result_queue.put({ - 'type': 'error', - 'camera_id': cam_id, - 'message': f"Value {final_number} out of range ({self.MIN_VALUE}-{self.MAX_VALUE})", - 'value': final_number - }) - - except ValueError: - self.result_queue.put({ + self._put_result({ 'type': 'error', 'camera_id': cam_id, - 'message': f"Parse error: {valid_digits_str}" + 'message': 'No digits produced', + 'task_id': task_id, + 'task_ts': task_ts, + 'timing_s': {'crop': t_crop - t0, 'predict': t_pred - t_crop, 'total': t_pred - t0}, + }) + continue + + final_number_str = "".join(valid_digits_str) + + try: + final_number = int(final_number_str) + except ValueError: + self._put_result({ + 'type': 'error', + 'camera_id': cam_id, + 'message': f"Parse error: {valid_digits_str}", + 'task_id': task_id, + 'task_ts': task_ts, + 'timing_s': {'crop': t_crop - t0, 'predict': t_pred - t_crop, 'total': t_pred - t0}, + }) + continue + + if self.MIN_VALUE <= final_number <= self.MAX_VALUE: + avg_conf = float(np.mean(confidences)) if confidences else None + self._put_result({ + 'type': 'success', + 'camera_id': cam_id, + 'value': final_number, + 'digits': valid_digits_str, + 'confidence': avg_conf, + 'task_id': task_id, + 'task_ts': task_ts, + 'timing_s': {'crop': t_crop - t0, 'predict': t_pred - t_crop, 'total': t_pred - t0}, + }) + else: + self._put_result({ + 'type': 'error', + 'camera_id': cam_id, + 'message': f"Value {final_number} out of range ({self.MIN_VALUE}-{self.MAX_VALUE})", + 'value': final_number, + 'task_id': task_id, + 'task_ts': task_ts, + 'timing_s': {'crop': t_crop - t0, 'predict': t_pred - t_crop, 'total': t_pred - t0}, }) - except Exception as e: - logger.error(f"Inference error for {cam_id}: {e}") - self.result_queue.put({ + self.processed_tasks += 1 + + except Exception: + logger.exception("Inference error cam=%s task_id=%s", cam_id, task_id) + self._put_result({ 'type': 'error', 'camera_id': cam_id, - 'message': str(e) + 'message': 'Exception during inference; see logs', + 'task_id': task_id, + 'task_ts': task_ts, }) def _crop_rois(self, image, roi_list): @@ -165,7 +241,7 @@ class InferenceWorker: for roi in roi_list: try: x, y, w, h = roi['x'], roi['y'], roi['width'], roi['height'] - cropped = image[y:y+h, x:x+w] + cropped = image[y:y + h, x:x + w] if cropped.size > 0: cropped_images.append(cropped) except Exception: @@ -173,12 +249,17 @@ class InferenceWorker: return cropped_images def predict_batch(self, images): - """Run inference on a batch of images at once. Returns list of dicts: {'digit': str, 'confidence': float}""" + """Run inference on a batch of images. + + Returns list of dicts: {'digit': str, 'confidence': float} + """ with self.lock: - if not self.interpreter: return [] + if not self.interpreter: + return [] num_images = len(images) - if num_images == 0: return [] + if num_images == 0: + return [] input_index = self.input_details[0]['index'] output_index = self.output_details[0]['index'] @@ -194,23 +275,33 @@ class InferenceWorker: input_tensor = np.array(batch_input) + # NOTE: Keeping original behavior (resize+allocate) but timing it. self.interpreter.resize_tensor_input(input_index, [num_images, target_h, target_w, 3]) self.interpreter.allocate_tensors() + self.interpreter.set_tensor(input_index, input_tensor) + + t0 = time.time() self.interpreter.invoke() + self.last_invoke_secs = time.time() - t0 + if self.last_invoke_secs > 1.0: + logger.warning("Slow invoke: %.3fs (batch=%d)", self.last_invoke_secs, num_images) output_data = self.interpreter.get_tensor(output_index) results = [] for i in range(num_images): logits = output_data[i] - probs = np.exp(logits) / np.sum(np.exp(logits)) - digit_class = np.argmax(probs) - confidence = probs[digit_class] - results.append({ - 'digit': str(digit_class), - 'confidence': float(confidence) - }) + # More stable softmax + logits = logits - np.max(logits) + ex = np.exp(logits) + denom = np.sum(ex) + probs = (ex / denom) if denom != 0 else np.zeros_like(ex) + + digit_class = int(np.argmax(probs)) + confidence = float(probs[digit_class]) if probs.size else 0.0 + + results.append({'digit': str(digit_class), 'confidence': confidence}) return results