From c310a45a3eb5fd469bd163e2d02c5ef26a0e1245 Mon Sep 17 00:00:00 2001 From: JamesTheGiblet Date: Sun, 28 Dec 2025 17:25:03 +0000 Subject: [PATCH] feat: add initial implementation of BuddAI web interface with chat functionality, file upload, and theme toggle --- buddai_v3.py | 91 ++- data/conversations.db | Bin 54071296 -> 55025664 bytes data/uploads/buddai_v3/buddai_v3.py | 882 ++++++++++++++++++++++++++++ data/uploads/index.html | 313 ++++++++++ frontend/index.html | 54 +- 5 files changed, 1310 insertions(+), 30 deletions(-) create mode 100644 data/uploads/buddai_v3/buddai_v3.py create mode 100644 data/uploads/index.html diff --git a/buddai_v3.py b/buddai_v3.py index 8dd5428..97f46fb 100644 --- a/buddai_v3.py +++ b/buddai_v3.py @@ -15,10 +15,12 @@ from pathlib import Path import http.client import re # noqa: F401 from typing import Optional +import zipfile +import shutil # Server dependencies try: - from fastapi import FastAPI + from fastapi import FastAPI, UploadFile, File from fastapi.middleware.cors import CORSMiddleware from fastapi.staticfiles import StaticFiles from pydantic import BaseModel @@ -205,18 +207,22 @@ class BuddAI: if "forge" in query.lower(): specific_terms.append("Forge") keywords.extend(specific_terms) - # Search in function names and content - search_conditions = [] - for keyword in keywords: - search_conditions.append(f"function_name LIKE '%{keyword}%'") - search_conditions.append(f"content LIKE '%{keyword}%'") - if not search_conditions: + + if not keywords: print("āŒ No search terms found") conn.close() return "No search terms provided." - search_query = " OR ".join(search_conditions) - sql = f"SELECT repo_name, file_path, function_name, content FROM repo_index WHERE {search_query} LIMIT 10" - cursor.execute(sql) + + # Build parameterized query + conditions = [] + params = [] + for keyword in keywords: + conditions.append("(function_name LIKE ? OR content LIKE ? OR repo_name LIKE ?)") + params.extend([f"%{keyword}%", f"%{keyword}%", f"%{keyword}%"]) + + sql = f"SELECT repo_name, file_path, function_name, content FROM repo_index WHERE {' OR '.join(conditions)} ORDER BY last_modified DESC LIMIT 10" + + cursor.execute(sql, params) results = cursor.fetchall() conn.close() if not results: @@ -237,7 +243,7 @@ class BuddAI: snippet = '\n'.join(snippet_lines) output += f"**{i}. {func}()** in {repo}\n" output += f" šŸ“ {Path(file_path).name}\n" - output += f" ```cpp\n{snippet}\n ```\n" + output += f"\n```cpp\n{snippet}\n```\n" output += f" ---\n\n" return output @@ -355,7 +361,7 @@ class BuddAI: count = 0 for file_path in path.rglob('*'): - if file_path.is_file() and file_path.suffix in ['.py', '.ino', '.cpp', '.h']: + if file_path.is_file() and file_path.suffix in ['.py', '.ino', '.cpp', '.h', '.js', '.jsx', '.html', '.css']: try: with open(file_path, 'r', encoding='utf-8', errors='ignore') as f: content = f.read() @@ -376,6 +382,15 @@ class BuddAI: elif file_path.suffix in ['.ino', '.cpp', '.h']: matches = re.findall(r'\b(?:void|int|bool|float|double|String|char)\s+(\w+)\s*\(', content) functions.extend(matches) + + # JS/Web parsing + elif file_path.suffix in ['.js', '.jsx']: + matches = re.findall(r'(?:function\s+(\w+)|const\s+(\w+)\s*=\s*(?:async\s*)?\(?.*?\)?\s*=>)', content) + functions.extend([m[0] or m[1] for m in matches if m[0] or m[1]]) + + # HTML/CSS - Index as whole file + elif file_path.suffix in ['.html', '.css']: + functions.append("file_content") # Determine repo name try: @@ -570,16 +585,19 @@ Identity Rules: Forge Theory Snippet: float applyForge(float current, float target, float k) { return target + (current - target) * exp(-k); } """ - messages = [{"role": "system", "content": identity}] + messages = [] - # Add recent context - # Check if 'message' is already the last item in context (Chat flow) or new (Build flow) - history = self.context_messages[-5:] + # Only add identity if not already in recent context + recent_system = [m for m in self.context_messages[-5:] if m.get('role') == 'system'] + if not recent_system: + messages.append({"role": "system", "content": identity}) - if history and history[-1]['content'] == message: - messages.extend(history) - else: - messages.extend(history) + # Add conversation history (excluding old system messages) + history = [m for m in self.context_messages[-5:] if m.get('role') != 'system'] + messages.extend(history) + + # Add current message if it's not already the last item + if not history or history[-1].get('content') != message: messages.append({"role": "user", "content": message}) body = { @@ -818,6 +836,37 @@ if SERVER_AVAILABLE: async def history_endpoint(): return {"history": server_buddai.context_messages} + @app.post("/api/upload") + async def upload_repo(file: UploadFile = File(...)): + try: + uploads_dir = DATA_DIR / "uploads" + uploads_dir.mkdir(exist_ok=True) + + file_location = uploads_dir / file.filename + with open(file_location, "wb") as buffer: + shutil.copyfileobj(file.file, buffer) + + if file.filename.endswith(".zip"): + extract_path = uploads_dir / file_location.stem + with zipfile.ZipFile(file_location, 'r') as zip_ref: + zip_ref.extractall(extract_path) + server_buddai.index_local_repositories(extract_path) + file_location.unlink() # Cleanup zip + return {"message": f"āœ… Successfully indexed {file.filename}"} + else: + # Support single code files by moving them to a folder and indexing + if file_location.suffix in ['.py', '.ino', '.cpp', '.h', '.js', '.jsx', '.html', '.css']: + target_dir = uploads_dir / file_location.stem + target_dir.mkdir(exist_ok=True) + final_path = target_dir / file.filename + shutil.move(str(file_location), str(final_path)) + server_buddai.index_local_repositories(target_dir) + return {"message": f"āœ… Successfully indexed {file.filename}"} + + return {"message": f"āœ… Successfully uploaded {file.filename}"} + except Exception as e: + return {"message": f"āŒ Error: {str(e)}"} + def check_ollama(): try: conn = http.client.HTTPConnection(OLLAMA_HOST, OLLAMA_PORT, timeout=5) @@ -839,7 +888,7 @@ def main(): print("šŸš€ Starting BuddAI API Server on port 8000...") uvicorn.run(app, host="0.0.0.0", port=8000) else: - print("āŒ Server dependencies missing. Install: pip install fastapi uvicorn aiofiles") + print("āŒ Server dependencies missing. Install: pip install fastapi uvicorn aiofiles python-multipart") else: buddai = BuddAI() buddai.run() diff --git a/data/conversations.db b/data/conversations.db index ed116b1a097559ef2bcc3666f9ca4008beac217d..68267e32f82932807ec93510c03e53286c6f9fc2 100644 GIT binary patch delta 179241 zcmeHw33yyp`F`%5Bu%F25|VUpdYVwOG?`{5Nwbtd(1k6yXd%6wxszO&%uMdg zq!~&Ht#A~iNI@AC$pu*y6{Vt9MG19RM63%gxPbdE1^uZA`hU-znI!E*8*88E-{ni6 z_iN_fbIv{c`Of>^leyu(pU=JFso{CI&(FC#E34t!ee8VY?)}>yoZ*<|eC5t5Gb?hY zc1T$Q$HM^SY-1T+KT_s0{uWM=W92vf@rM1E1^3=QC&CRajjPQ-_#?kPF_589~ol{sv?)3iYS5EI7DMXe{JIfE`o?GNtsvMQySw~%y zc`7;j8lGGH_)V@MC2!7*w`9qBN))g!)uC2$iyV({sBbB7;$qy$%BtVj^Y|U-cg|FH z-TA`}<;vB2xZsN5s^AX6vjxu)JXi23g69dIFSt|ise%^>K27lHg3l0qrr?EwpCx#a z;KhQM2tG^j*@Djze6Ha01fMVX0>KvwUMlz^!I|KT1uql4T=266Un2N9f>#J$DR`CO z)q*b-yhiX^!Citc6TD7vx8NSZ>jiHRyixGwf;S1?EO?9Ht%9!*{9M7?1V2yk^95fi zc)Q@M1Ya%q8o}2J-XZt}f?p_jr{Hf8{35~E3BF$Niv`~xILfr&1lI)LF1S~4pWwRSe!+VL?-kq-{A$6k5j-Gx zpWs2kLxP6|?-$$@JR*2h@R;BOg6|M~Q1H0m*9yK<@aqKMCHVD%4+(yQ;JXFiBlw#H zzftg;1Q&w8S@5?AezV}W2>w>VZxwv6;I|3>Hog5M>0Lh!o< zf2ZK@68s*)-!1sPf)5LB34Wj8_Y3|W!5xfm|04MR3I12X|0ej~1%FlWe+d4X;I9k* zPr?6n3s-LA3ZMcGAREX5a)Bv89*_?>fvG?NFb$Xv%m8Krg}_-r5l{@20JDJEz#L#M zFb|jyEC3b)rNAP90gHh$pd2_GSOS~_Q~;Gg6;KT<1!{m=zy&M=>Hs(30qTJUpb=OO zGy%;(3(yLz0L}&4fb)R!ft5fzunJfWtO3>n9l!;^g+M3p2H+xK9k3p_7}x;F`r8O> z0=j_Bz$L(?z-7Q2fy;p_fGxmQ;7Z^spc~i*Xux*B3-|yX@B=+SFJJ&y1J?inpbrQF zAs`I&111mwqCgB70CoU_KpeOh*a=(*>;kR_hJYJ@-M}8;O~8%7O@IL247>%n8Mp;_ zD{w2Y7q|_08*n?Y54Z!k6L>rD4&W{z0o)C|6L=SJ5Abf_USJrofct>^f%gCp01pBW z0S^Q31>Ofd0=ysi0PsQJQQ$+shk?g{{lG_n$AOOm9|JxP8~{E6d=mH+@C5K_;4{FJ zzzFaZa1i(`@Hyb~z!!io0$&2Y415LnD)2P$HQ*WG>%gf-v+(|JO_Li z_#W_m;0M6-zz=~R0Y3(Q0=xjc2>cZI8Sr!97r-xpUje@cegpg#_#N zfvG?NFb$Xv%m8Krg}_-r5l{@20JDJEz#L#MFb|jyEC3b)rNAP90gHh$pd2_GSOS~_ zQ~;Gg6;KT<1!{m=zy&M=>Hs(30qTJUpb=OOGy%;(3(yLz0L}&4fb)R!ft5fzunJfW ztO3>n9l!;^g+M3p2H+xK9k3p_7}x;F`r8O>0=j_Bz$L(?z-7Q2fy;p_fGxmQ;7Z^s zpc~i*Xux*B3-|yX@B=+SFJJ&y1J?inpbrQFAs`I&111mwqCgB70CoU_KpeOh*a=(* z>;kR_hJYJ@-M}8;O~8%7O@IL247>%n8Mp;_D{w2Y7q|_08*n?Y54Z!k6L>rD4&W{z z0o)C|6L=SJ5Abf_USJrofct>^f%gCp01pBW0S^Q31>Ofd0=ysi0PsQJQQ$+shk?g{ z{lG_n$AOOm9|JxP8~{E6d=mH+@C5K_;4|uz{7E&tF7MUs0_iL67%8~nN%eaU>+5QP zy5PwhpH!#iDRZppw?3)P+SK45z7An78NL>F&F~;>{qO*6*>DuLaM*;+8V)vRzAMf5r1`!yKal2m zX?`fpkEHprG(VB%1!-QC=BLv9Oq!od^9yNyDb25>`L#5^k>lO|spr!-TgDUfEGG}EP-AmX=(U z)jn>MB{x*Dh;Ej37(pNN$AaFd5ei0Fv{#EV++gcGEzK8~jqdC0Z5_6b4fU?&jZN;B zx(}-UysV{*zPm6ZS!?&kW=HW*XOsm(p=%;6U|gfKcqmq3>TE~Iyv7Ljuuza0ePJ^+ zAU{MxeLC}od^$7rfELw#EE-}yQ`;eLdJWSX&?8JkHbc=~-DEwc7Vb5?5t)d_d`2W3 z(BjCm42=M+-KY7@SmOCVzdB=xbyfrd%zhA(kHmU<^hmOx{*cM~G&?iMo0)t*o}>kJ zvxfDBkXhaDH@t=(jMmssk7$0~bUB^n<>jo?&b;1-o2au8+a|NS+Ro~QdLXQu+gfG7 zAp~{1*d?gXke{ta1xGc+Xmxf*D^MiX6AJl|I}MfZ)nz)e=Dq1)%s_ZuL%IE|S2KOJ zvW|RgKnuk5h>LCRH6pSIs2o{ry8`Wen~0-r59myri%RR$j9?UySCIQ{+qQYbVdsID zvSWW&tOYkUs`-hwH_e=~0HK<@qilWq8@o5Puj#y`i?uONW8(^^ld*(#j(_cifsk3n zcH(35XW25gAsC3;EyWDKoe5L(8LpN6K)LZfy%);wv-Llu(5$)We}}%pUrn zRX9Uc8gi4#Sg$?ham=qm&>bj7Sq&@mBar+*d7okA$xi_z5@lK-kk}xUlA+NVNZ#6k z;oXbLARLOwkG>EyFaQRS*l0(RT@c}P3{#)sV}=YTgCHd8(teyisP&;vF;Do9j%a5N z&$d&S6NswCEe-x@@l;cf#!O5s!FXk2+s*I@_{LJTusK;nQPaQ-Vn(XupkPc^U#jlx z%o@>k}Tw+4C^5=>r)0<7Qepp<&Eghgan&n=M{AMVW1VxVS)> zUNTm(WmTgEwv>_rG|arWo~2IPyq9&^8In`pPA`U7bQdNIv;?My(VBF*Tw8-kVKj+y zwstToCmVK+9>+}Vi!i?#>Pu$SX=&HxDMi-8PkS8mhU_+X1wuP?v$AR|6Xlp(eJm2z zy#}VnRM{E2XW!TmPv7mfFJo+OxpFiI82)AYU^o=SylH5GGGsdW^z@?`X5})UKIGNn znKxTU(~*^bX-v6{PwdF8hq+iY9B?a({G+Qf%#h zqS`TUZbp!FC0R9}@i^v&lAU1Jl8SNVw`(fd*~gMj$Mct^a>3ShUbL1~EZLbV-H~aa z4uhd+dSr|yV;$@)RTq{dbCv8gmdSc~>rzre?)nRYt6 zP_Ffzo0+?=%)0-5sxrqR!*6`j<5+Fo3tPU}U-r{`Z)M2d=qO7T0kd_~i}kL@+IR0# zN6AptuC2kX!Odp8l`XReCOcO?>^!-~CqJiFYD{sdS!1kd$94wvU}Y*~)vjblhR|s+&9<_g6*a6PT`SSbbZ4we=5Oqg2+WKc ztj(BnYnVS>IrtSlQLdME=c|&PQxC@abQ5k``k^(aH7``o@(C0V=iz)6Ftg zQhi)UmBCZZy;Qb$b@fhTmy7L`?Xjz}s=8V>#ZK8==xIY^4S94te16Hgb2r<`(fuk% z8@9?NXBI^Lk1>EY^8@#Y?pRRpQ6t(E=1%$eBrt=H#Sxvw~sSw%G% z)8>F~cK5-JZR^ki5#2f@rYP&IZFf49I!sricTq#*J|)`wl^RQRm8vz;g?WeN!ms7( z+aF5o?pKPG`jJC_^*Gk$STDUZUuhjVTwkiTIc31RtV5sjI2PV@+2%Yeyb@h&3^cP-Z3`y+fI2eH5w9T~MjJdR*9dU}rFZ zvDOigqsOe0G1eyb?Ncge$PUmKg2y4f2;F7v-&(5fwm!1fp;TETjsm6Bnvt8W`5z;Y7Ndh`nEqH6)UQ;6G+dqBfzGGu0vtUcTZqpWhZ+ztBS22{y~ zIjHYom8)U~JV5(eV*hi6#VTvF`u$Us`MFMp3cWj_-KEU0!PW#hi`)CA)bCqr-B)g@ zNX;Y_SF*M?xQ8+`iNc35{_Y#dQ}U;$7Rgi;Yv1>?m9rtSJ_xQAGR5cI#g;mh>JfQ;bLT9cH*iBl-z226DZpYl$vV_P)VlrYJmpGj z=BH8WMEFrJ6K^){kBv+e~055ZciV z-@OMr?zSyu)n!*6eX~!;fcE{Ho4puD$aym6P%>)D@AhI~MVK5%n{+fyBwD!&o4oZH zv4L@mW@=QnvPje%&8?g!1FdXBSh{ywU<>YH)WG=&CW|Vk`mz2v8gOVdBzkz#`3GMS zgp;TRjB7!H*e!)&F1ZMAGO1K=-;TD>3_K&*C~%}kXXLicW=v0&sXW=*tRvM9c8=4H z(w!|Gokagvau&=_UrR1B=}$?w%l#cynKy(k;p&t7m&&qbJM``70$ouJWJm`*kVvJ^ zFm)jD$!O`&E=xPCV#A$^jM_&p8$aQ+rSFXO ztug<3w7TGMVjCCHQ8(#d#}_17{jwaXsr=}o46)RWvEqz1%UG+VV`F3_`(nC8at0WT zcBfh>GXB})DlrxggfzLokYi=67vORQu6N5x(DsR1*(G*J+3n;Mh5stCeXc@nw*N9) znb(?3DAJ7{iTi8XH@A1M>DiBmJmx!7bU*ORXm!b|{5c+5VUG5af_XLgT_A zEqT`^jr7e%rvaIDNv5$7Hn>R-E}ep;O3>|5GPXVD_gjUtu=z{4p3I$+c&auxd#+3? zwEb#3lAUmk9Zol`#lO#17Fl~A_c*4^9etz=Ju4y~Raxe`)(Dp+_Vwl#)uEatXJC)o z3A5at&W=kuJI=&oyOfez#J7ucg%R$MwJAT02E?Aktgy3OJX|?w&wNVYeN;K8w zGJDEsZQ9jt-G5<&W0rO4l4511)r>ATJMrl~N>;nV0X)bD%&-3XsfE+rzG_J z-xZ#B9$0C~(7}Uup&bUcxNVQMthP2;__DDb?*#siwej_AWw!raH^}A8j7pj=xun`F zKv!}F#tdM0pXR!{I!rdHSxOFf`4by3ydg8_!YWgV1r2K+)`qsSI+y(qtCNh|CgWQD zjw;95SD{F$gnhcC!t@}t3kU>8MD|Y?TmUR_0XP(4Bb;3Qp^W(9jA52eKaFWdDA|L< z_6DZ55B!4>MiH#tmBmVl6}wGE+wQw~iDS*YChTupJuR-L=9UJxdm6k&Redlk|6=t) zsXN`RPT#Xv8TL7|b~i04xiPWJdhxR^NBuJCjc!f#=B>%uynCR2>^E09K6(|uTm7r* z9d4yQQ5aSG6OUA8n?o4N!BD@}$~qeA+)j7~7+~?RtYb(@ay+IzCTHq12D~g(`eURw z7BvEpXC|sIXB3=G+M($it)Em%(sv!gB3?6Yy{NI5i;O6)F$dIzhJ+@P%-oSaXl69& zhZx>uBc!6B52alqgdc*}C(9g;`#>mzg-wMT^(*Z(Qx{Xc$i8&6Q;x}8I(1Hc(5r`| zQqn@@B}c`-3okeA2M*t;?pH0oJsXO{F6axKeeX9(X0+o zYbPDxld`q*(AswZ@6HsL?j>;vsuYP!Brc`IC=!=QTq1EvG9M%^k+_spFKsys8=R9< zTpFINxMUq$T)L0Mr4uVI-G9c5OYb3ZiNqzwNL(UuiNqxmmq=Xd0I_?dazWyfd?TC0 zrGG2mk+_tWdrn?)>8vBgCD6?eX41_M5#3C5Go57^eV17tt)R0EV=pU789n1HK>EdR zI?F(38R#qnon`p{dzRth$iLGv_G9~pg)`inR)!5>N;u9 zHhg%pV$);C7Mu2y*mPpWrjMNQV$_yc?Y z#h)f4vFYR&n=;Qfd@Pf0{y5RiL^l)NOms8R%|tg7-CRlE(;!bXrGHKdy7|Cl>E=%y zn{NIj(ak58ZvNC6Pd7h7bTiS-L^l)NOms8R%|tg7-Ar^d(amSd_cYv+m4Bgni~OF3 z>*e<}lnnRUuei4z|10kEKgQIx^mhBn{C2xM&+wT&duI&y*p2Nz{>EOsmZ|4cW1q~8 zEsrvM6nHXITN)v4iL@osmPlJ7ZHcrc(w0bDvcK-@l+u=-nyj{T@YveYXGvQ+vD(t- z&UkI<^Q0}2wnW+zX-lLnk+wwI5@}1MEs?f#8fr_KM;X4DNjHCq=w_muiEbvkndoMs zn~82Fy7|E^Ey-Ar^d(al6R6WvU7GttdNHxu1V zbn|IQH~+h@X}BQcYZ?lMueRTDUpo4Z`*}Vss%?#h10l^9*}6UE^Jzx+K>gTnu5f(x zDt9IZvMfs>E`E&Za%Se^AFE>y7@;$Hxu1VbTiS-L^l)NOms8R%|tg7 z-FzC-%@ZDPcxP7r1?qvq`yP9Xa)7PO%~kMq4LQRB`xW}6G=@ZHIdXrQWHr{ zBsG!LL{igfC^coCZ1{C1-TWJ(n~82Fx|!%^qMM0sCc2sE=2MSu{_SMx=HDHgZvH*d z%_o*_{=*qhH~*37W}=&kZYH{!=w_muiEbvkndoMsn@>Z!d0tbiyROyK;%aJcX>hxz zWo2cl>fKrS7pZq2C|sF)tBOMn&f%c_ihITJA8L4}YoR4c4Loy zKgWOlB@HjUoUgd)1PkkmYA1Dz%kvDs0uE;iOfQqbL;@2DOe8Roz(fKQ2}~p~oq7V( zE0YzN{(NkK=`SQOomheC|IT=U>8~U(k-$U(6A4TtFpLBruV{bQ%gwndce) zo=G>qN^~>P%|tg7-Ar^d(al6R6Wx63(aryuEZzLtvFYa5iEcizbn`#Yc)IytL^l)N zOms8R%|tg7-Ar^d(al6R6Wx3o(#;coQ^P%3`Rn92H9U07*f%xg4TtUb+{=&uP{W~_ zRgNZgpgvI;Rr?cra-@m70hY$xp3B2lbm>6AP0 zaj2bl*04*$fsp3wK(aMV+Kec!KEvk==sPr1cX>l*B%Q?S4V${wkKW3RhUejhcA4MN zC@1?n?)a9524&CQsly?=wX2T5wQql<)=@{N8cvB*4Sz$cs~Iv=gM(xyl9@LzyY#R6~O^lWv|$bTiS-L^l)NOms8R%|tg7-F)iN%>|RCo2MO{Zk|qb z^NFRKXPohL^Gu?fiEbvkndoMsn~82Fx|!%^qMM0sJ`L&S2~RcLo|S*GI#T$;H`JlA zV+|$4rv0LO+ws5XzHw)%Iw$eNE=R3GM;pBI07V2F_aVmYQy1a5PYdD{MEcCcDlMX~ z5BcWB>(I9Cz0FLkZmbm`hn-R+mQcdlz+wQenIW1G#G?$igpdN|5D?Q$vq@sMk&qnQq} zoa|rL(2$Q7FUr)Jib-c8or!cN(wRtSBAtnJCeoQsJ)Nm!vO3eOW9v+_NoP8-I@6pp zUT2z1Iuq$kq%)DuL^>1cOr$fB&O|yB=}f1g&XjqqVSXmvynyItqMM0sCc2sEW}=&k zZYH|<)T5ghPL^&iJvQCEi0I}MMK|X-8QyS6XtT5DE;IbB*N8+zW}In3U;6hgweBmo zRCq(ds2+?~T*=znSf3t=Xg!I-hm|7hzJWX?f4V-1yS~a)bZg)Dvz4z#cQ*bN_Aw#a~{WG#j5*Nwo+_8c59xJV>(%Ld_7)CD?HAk|kN*YtMI}qrb~?>qoaCn& z%J=Lo8jjeF-E#bm-Tm1{$7*Wqlew|;u*IDRoSmsIEg^M@)Fo1vNL?aziPR-hmq=YY z_0*+vCaW%099vzgBz5UTsY|)Gx>R+>A7-c~ZHcrc(w0bDB5jGZCDN8iTOw_Vw58Kf zTgp7lP?JeF*Am@KbTiS-L^l)NOms8R%|th!dUUgEvUKyZW7ExbL^q#Uy4iik)6E{D zn~82Fx|!%^qMM0sCc2sEW}=&kZaxj^<_V87+?|zwp?bGG%5bYZ$}nYkz<#@Z@$tXi z{+U+_l_q+-{bYW-U4Ah`!=Ak}hGTYPyN|!I7vHo*J)au;WNvJElwm2*n5iu-CvAze zCDN8iTOw_Vv?bD(NLxDfw56uWYD>+>)|OgGTRO4YQtKJ7Ev+DJiL@osmPlJ7ZHcrc z(w0bDB5jGZrPEMb$~?-@mPt3CM|3mM%|tg7-Ar^d(al6R6Wx63(aq;imTq2oY`VFf z=;jkkH?KP5>E_i$Hxu1VbTiS-L^l)NOms8R%|tg7-FzC-&2uOER)#yX@-I?H3SW3u z-J={}D|2%d9B7y^JorDqM2YYPF7*+JhsC022z+#tip8B z8Lu#{BZY|+CQ_J4VIqZz6ed!bNMRy{i4>;OP+`hE&~R}k-MoS5W}=&kZYH{!=w_mu ziEbvk`P8GE`DE$ljmM^&Hxb=@V(I3tGoEhVOms8R%|tg7-Ar^d(al6R6WvU7Gtteb zA>BOTfrfWv<#Y8w;e8L>ICh@ltl?|zSKOP9{}uQ9=c!7Gbx(D+QcEwopUf}1%To=P z%J1Ze+fDvId?&}QbrlZ!PL7lM2#Y+`upYQ9Q)YT2$xI|Ok<3Ig6Uj^@Gm*?hGSjIi zGhIGendypS%S>BHW;(Gl)7CRyX1bDOCX$&*W+Iu1WG0fCNM<6LiDV{{nNCBQDf3iA zcP8Dujp$~gn~82Fx|!%^qMM0sCc635qnowK(#_kCO*eaqZa%Sev+s^*CE=l^H&b9TNMIs?i3BDRm`GqEfr$hr z5|~asf$8eW3QX4=TVM*1z;t2-roJ;?U<#7JL;@2DOe8Roz(fKQ2}~p~k-$U((`hI$ zWu9jUXVT66L^l)NOms8R%|tg7-Ar^d(aonG-E2;lZjKzAZjKV&d}8V5*cneZ4-nl< zbTiS-L^l)NOms8R%|tg7-Ar`zX-GHEX=-)XwR&1yP0cM0ZuhjTtSlw}vaI~eMhf{N zhdJc{TbcX3GJVh9tl?euEAB5q}#SkwqOo#m`cHwSc+`Sh?J z^yxvbp+}riGu~?NjO~Q|S|p0KES+)(J`T0>&Kh<}I1ti&9Z0r@Nt+SH)o1v80ey#N z>Mn1{jHHuTy=mc3SoQeG%j^dn3KRdTAp$rcdCvLcD zey-EcmbNjUucLb~QTWPpvz+#?@|wqT55BCFBo?kHo|SlM|N88)pz*PwaaN0<_Gic7 z(xr(*7raGbnvmn6NjvsB}2|_+qR)2mY2)gH$ww@pB{`xSTw}8 zp$7z38>Tm)U#JJdy1A{@4p`1SE@n3pyHE@I0_gUw&UT-V>9XNiZ|WNJZZ=3y*e{BpAkc^im+>SGsHq>5ew=)=xzhL%PF(s4TqfvUdoRBU9lG2)Trhs+TJvC z$^s)8ZSIb;_3dx$-qgOP^O7!f98Y883Ma~&u+H(X9gL{T%MAZf z&)#7~dyOF59*PBhvOgz(5UkQfuVHmn%+#YXGq{2wor^<2f>HN! z7+L)WwGIOkTk!AZ{vZLYl#JA!oNv(%fPZV_!_v$jE$tZZxXfAg28i665RSwF$ z$H&)#%;khV?1cBnE$Hm&9yYne)1k-uBZkIknPkpodyJ0aDoi<}LwKxCWH|XVHk?-^ z4xf!d|B|=)tQDg%;uFV6j_S;Na)cj#yhIG!#-}M^0j6Y-r)^Bcq&i4jw8ehQ#5k3;&xw zY!9pPT`s+tWpug!qA#yXhsFO#43Y`^GffBo-h1$^7~eMkx%%WxYOuDp(cY9Oys)q& zvGNPWGv}{4dMMjH%EvGRBz8YsTs)MXPB!=I5nXzEV@^V(Hxvu_km@x$i-%%Z?&16~ z_zPN4H)~j52&;adhQ17F+ohih-@u+wOcz_Hqj&4{H#q?<_SAVAYu%n&PxEFsb_ed(`UcnXhQ`Kv zdv~BZp3cg4I-WjIxH5OdG1O*E$y!>nD63t@a(g_@-R>rjyRJDF(M>;`++JD!BUALa zTN{?UJasM0o0}7n-n^`(i#{}TL;`g!4IV8LG2lN3qd$H3O{_yY7#Omsgs2zQC+E&q z7VxxO(HyKnA%kAKY&CFCv?wkZK{yxH)ovFJ^kWEk^$C1sVj5J$Z*1r87$GqDAzDHh? zk(j#(Jh2$8FXnZ) z41(c8G^PboDSJb~sENr#H8%Yr8L!+YpzVf%I6Td$ssz1ZVR8=FatNH(av0d%fJ1HNroUAvas? zA8U5Zarkt<75~RVb^g?<ZxfhnK(AF81aS{Fs{YQW7qP4W*TzsN$&IP zYu(8w$x_*QU2VPagN17GkbP6mnW=}5hI1#wtx6`9dtW)uCgloIr3TZC!-RMxVs(6Tr;*4D`%^hNB>xA&r_NOmh7s>+GyCx5~w+>T0r*?PMZbjhLC%65*+ z!Y)+GXt~STWtxF~JVqskT(4yCu(L5jKDJ%QMLa;R$Fi_m4_499c%Ztpc%>VJfju|^ z+E84s)XDbKk$2Dw$xfPT@X;}lE+)Ju`<5Me)FqJJ-hN1uhmfO6J|XS%4`_iHoT&+_ zWLC2CMc|B~uF#c|l!dkb{3%Msod^FhM|HcbCth2qmReujg7LZ7o?cRv4<;5FU| zEv~&zCr2#?KJMG?go!fQ&EPby2SYk;@VQtghFPy3#9WBok(?;yUNc2Bp(VZPKATMM zsE%tbMvpBTs*$s+-PzJp@@Q|vko5N2KIsnI#hsw4;jJFk`;zXh-Bs+l6uC;x0GNj1 zG*wrpN2<-6m7_n&3h7KvBz6JJ7`D=ZP_RcX3SiQ##ZyYv*+Z4vjvo5ks%jX@X^-@Y z>J0py&9F&_+aA#o>Cs{_vg=nq3weU$Rb4&S6J_pLN6IT5Ybvly_GC4_y=0Bsq~KD@{r))!f0 zsM+muBuDig$#2MUKk; zRK${i6%iE`1r!wpJNAM~(d++ydO!TG*_pGuGv~~lojv&*mo>;=zOVkm+>FAswC;-* z<@HY#t{c9hYN}@Cf}+f7t;?%#>zdr0xjt_|h54z{g4`MDshT;ni!$^6e`G<0qCuT< zN{c#;Ph{1px;QP~RFvJNLwSuoTax*Sc(^!GI=En9cIo2&gQ_J)6zv||JTY>ntb~=c zQdYW^VO6j)tt_jeRmrMsWm{FO9IL8T&8lwIuxeVhtlCx`tFBegs&6&0a;=6|Bdf91 z#LBapTFtEHRtu}8)yisZwXxb-?X31z2dksi$?9x%vASB_tnOA1tEbh=>TUJ0`dUX> zM_c`@W2|GX{?-8NIO}+8pmltgE?>r!i&HQc()8ev^-jkKn7`FYnpY7HQma$W>_<=0&A8v+p=}5b(?j&HOHE3 z&9mlPh1LRVp|!|bY!z8|SWB#>RpAOr>jmpYtIR65Hd>pk zm#mkqSFBg9*R0pA&DI;%7VAxGtM!)kw)Kv+&3e~*&wAh5Zhc^VXzj3eTDz=|tdFhT z)+g4d)@Rlp>vQW1Yp=D>`qKK!`r7)&`qui+`rg`a{b2oQ{bc=Y{bK!U{bv1c{bBuS z9kBkg{Y^U%qXBZ!5RK3nO^}DCXolu!ftF~6)@XyaXovRbfR5;d&gg=!=!Wj-fu87v-spqA zI0{FjACAGX;4`BEI1a~SAWp!EI0+}?6b!rg zh>I{37vmCKieVUz%P<0$VCSW2aVKS!R2Hc3L zxCu978g9XKO<@8mz@SJc&{~g{Sch*5g@hz;k#WFW^O# zp&T2r2`}Mgyn4Nd5-KAbRgin1i{PhxsVP0xZNLEJhLTz!EG)G48}&xEuH2UfhReD8X{vj|cD|R$wJo;UPSX zNAM^f!{bf2XJHITs zGgVS+R#~z|c^1vcN{lVZnOByqQ!*+&JJ~yTR`Im7v^E9BQ|Zj&DRf5hB${75alvHn zBju5ekxh}8A}>c?iM$$lE%JI~bL5T4mdKltt&z7PZ%5vVY>T`bc`x#QWP9X;$cK>~ zk)4rUk&hxDM|MX(iF_LQEV3u^dE|@8-pIblmyxd`Uq`-)d>i>L@_l4~qjk<3U| zq++B}q;e!XQYDfTsT!#ksUE2jsTrvisU4{ksT-*msUK+&$&ECOG>SBiG>POznns#M znnzkhT1Hw$T1VPM+D6(%+DAG>Iz~E0I!C%hx<o yo|%?iUVYEzQWp>}x(A0mwK=6bB diff --git a/data/uploads/buddai_v3/buddai_v3.py b/data/uploads/buddai_v3/buddai_v3.py new file mode 100644 index 0000000..de7b6e3 --- /dev/null +++ b/data/uploads/buddai_v3/buddai_v3.py @@ -0,0 +1,882 @@ +#!/usr/bin/env python3 +""" +BuddAI Executive v3.0 - Modular Builder +Breaks complex tasks into manageable chunks + +Author: James Gilbert +License: MIT +""" + +import sys +import json +import sqlite3 +from datetime import datetime +from pathlib import Path +import http.client +import re # noqa: F401 +from typing import Optional +import zipfile +import shutil + +# Server dependencies +try: + from fastapi import FastAPI, UploadFile, File + from fastapi.middleware.cors import CORSMiddleware + from fastapi.staticfiles import StaticFiles + from pydantic import BaseModel + import uvicorn + SERVER_AVAILABLE = True +except ImportError: + SERVER_AVAILABLE = False + +# Configuration +OLLAMA_HOST = "localhost" +OLLAMA_PORT = 11434 +DATA_DIR = Path(__file__).parent / "data" +DB_PATH = DATA_DIR / "conversations.db" + +# Models +MODELS = { + "fast": "qwen2.5-coder:1.5b", + "balanced": "qwen2.5-coder:3b" +} + +# Complexity triggers - if matched, break down the task +COMPLEX_TRIGGERS = [ + "complete", "entire", "full", "build entire", "build complete", + "with ble and", "with servo and", "including", "all of" +] + +# Module patterns we can detect +MODULE_PATTERNS = { + "ble": ["bluetooth", "ble", "wireless"], + "servo": ["servo", "flipper", "weapon"], + "motor": ["motor", "drive", "movement", "l298n"], + "safety": ["safety", "timeout", "failsafe", "emergency"], + "battery": ["battery", "voltage", "power monitor"], + "sensor": ["sensor", "distance", "proximity"] +} + + + +# --- Shadow Suggestion Engine --- +class ShadowSuggestionEngine: + """Proactively suggests modules/settings based on user/project history.""" + def __init__(self, db_path): + self.db_path = db_path + + def lookup_recent_module_usage(self, module, limit=5): + """Look up recent usage patterns for a module from repo_index.""" + conn = sqlite3.connect(self.db_path) + cursor = conn.cursor() + cursor.execute( + """ + SELECT file_path, content, last_modified FROM repo_index + WHERE function_name LIKE ? OR file_path LIKE ? + ORDER BY last_modified DESC LIMIT ? + """, + (f"%{module}%", f"%{module}%", limit) + ) + results = cursor.fetchall() + conn.close() + return results + + def suggest_for_module(self, module): + """Return a proactive suggestion string for a module if pattern detected.""" + history = self.lookup_recent_module_usage(module) + if not history: + return None + # Example: For 'motor', look for L298N and PWM frequency + l298n_count = 0 + pwm_freqs = [] + for _, content, _ in history: + if "L298N" in content or "l298n" in content: + l298n_count += 1 + pwm_matches = re.findall(r'PWM_FREQ\s*=\s*(\d+)', content) + pwm_freqs.extend([int(f) for f in pwm_matches]) + # Also look for explicit frequency in analogWrite or ledcSetup + freq_matches = re.findall(r'(?:ledcSetup|analogWrite)\s*\([^,]+,\s*[^,]+,\s*(\d+)\)', content) + pwm_freqs.extend([int(f) for f in freq_matches if f.isdigit()]) + if l298n_count >= 2: + freq = max(set(pwm_freqs), key=pwm_freqs.count) if pwm_freqs else 500 + return f"I see you usually use the L298N with a {freq}Hz PWM frequency on the ESP32-C3. Should I prep that module?" + return None + + def get_proactive_suggestion(self, user_input): + """ + V3.0 Proactive Hook: + 1. Identify "Concept" (e.g., 'flipper') + 2. Query repo_index for James's most frequent companion modules + 3. If 'flipper' often appears with 'safety_timeout', suggest it. + """ + # 1. Identify Concepts + input_lower = user_input.lower() + detected_modules = [] + for module, keywords in MODULE_PATTERNS.items(): + if any(kw in input_lower for kw in keywords): + detected_modules.append(module) + + if not detected_modules: + return None + + # 2. Query repo_index for correlations + conn = sqlite3.connect(self.db_path) + cursor = conn.cursor() + + suggestions = [] + for module in detected_modules: + # Find files containing this module (simple heuristic) + cursor.execute("SELECT content FROM repo_index WHERE content LIKE ? LIMIT 10", (f"%{module}%",)) + rows = cursor.fetchall() + if not rows: continue + + # Check for companion modules + companions = {} + for (content,) in rows: + content_lower = content.lower() + for other_mod, other_kws in MODULE_PATTERNS.items(): + if other_mod != module and other_mod not in detected_modules: + if any(kw in content_lower for kw in other_kws): + companions[other_mod] = companions.get(other_mod, 0) + 1 + + # 3. Suggest if frequent (>50% correlation in sample) + for other_mod, count in companions.items(): + if count >= len(rows) * 0.5: + suggestions.append(f"I noticed '{module}' often appears with '{other_mod}' in your repos. Want to include that?") + + conn.close() + return " ".join(list(set(suggestions))) if suggestions else None + + def get_all_suggestions(self, user_input, generated_code): + """Aggregate all proactive suggestions into a list.""" + suggestions = [] + + # 1. Companion Modules + companion = self.get_proactive_suggestion(user_input) + if companion: + suggestions.append(companion) + + # 2. Module Settings + input_lower = user_input.lower() + for module, keywords in MODULE_PATTERNS.items(): + if any(kw in input_lower for kw in keywords): + s = self.suggest_for_module(module) + if s: + suggestions.append(s) + + # 3. Forge Theory Check + if ("motor" in input_lower or "servo" in input_lower) and "applyForge" not in generated_code: + suggestions.append("Apply Forge Theory smoothing to movement?") + + # 4. Safety Check (L298N) + if "L298N" in generated_code and "safety" not in generated_code.lower(): + suggestions.append("Drive system lacks safety timeout (GilBot_V2 uses 5s failsafe). Add that?") + + return suggestions + + +class BuddAI: + """Executive with task breakdown""" + + def is_search_query(self, message): + """Check if this is a search query that should query repo_index""" + message_lower = message.lower() + search_triggers = [ + "show me", "find", "search for", "list all", + "what functions", "which repos", "do i have", + "where did i", "have i used", "examples of", + "show all", "display" + ] + return any(trigger in message_lower for trigger in search_triggers) + + def search_repositories(self, query): + """Search repo_index for relevant functions and code""" + conn = sqlite3.connect(DB_PATH) + cursor = conn.cursor() + cursor.execute("SELECT COUNT(*) FROM repo_index") + count = cursor.fetchone()[0] + print(f"\nšŸ” Searching {count} indexed functions...\n") + + # Extract keywords from query + keywords = re.findall(r'\b\w{4,}\b', query.lower()) + # Add specific search terms + specific_terms = [] + if "exponential" in query.lower() or "decay" in query.lower(): + specific_terms.append("applyForge") + specific_terms.append("exp(") + if "forge" in query.lower(): + specific_terms.append("Forge") + keywords.extend(specific_terms) + # Search in function names and content + search_conditions = [] + for keyword in keywords: + search_conditions.append(f"function_name LIKE '%{keyword}%'") + search_conditions.append(f"content LIKE '%{keyword}%'") + if not search_conditions: + print("āŒ No search terms found") + conn.close() + return "No search terms provided." + search_query = " OR ".join(search_conditions) + sql = f"SELECT repo_name, file_path, function_name, content FROM repo_index WHERE {search_query} LIMIT 10" + cursor.execute(sql) + results = cursor.fetchall() + conn.close() + if not results: + return f"āŒ No functions found matching: {', '.join(keywords)}\n\nTry: /index to index more repositories" + # Format results + output = f"āœ… Found {len(results)} matches for: {', '.join(set(keywords))}\n\n" + for i, (repo, file_path, func, content) in enumerate(results, 1): + # Extract relevant snippet + lines = content.split('\n') + snippet_lines = [] + for line in lines[:30]: # First 30 lines + if any(kw in line.lower() for kw in keywords): + snippet_lines.append(line) + if len(snippet_lines) >= 10: + break + if not snippet_lines: + snippet_lines = lines[:10] + snippet = '\n'.join(snippet_lines) + output += f"**{i}. {func}()** in {repo}\n" + output += f" šŸ“ {Path(file_path).name}\n" + output += f" ```cpp\n{snippet}\n ```\n" + output += f" ---\n\n" + return output + + def __init__(self, server_mode=False): + self.ensure_data_dir() + self.init_database() + self.session_id = self.create_session() + self.server_mode = server_mode + self.context_messages = [] + self.shadow_engine = ShadowSuggestionEngine(DB_PATH) + + print("šŸ”„ BuddAI Executive v3.0 - Modular Builder") + print("=" * 50) + print(f"Session: {self.session_id}") + print(f"FAST (5-10s) | BALANCED (15-30s)") + print(f"Smart task breakdown for complex requests") + print("=" * 50) + print("\nCommands: /fast, /balanced, /help, exit\n") + + def ensure_data_dir(self): + DATA_DIR.mkdir(exist_ok=True) + + def init_database(self): + conn = sqlite3.connect(DB_PATH) + cursor = conn.cursor() + + cursor.execute(""" + CREATE TABLE IF NOT EXISTS sessions ( + session_id TEXT PRIMARY KEY, + started_at TIMESTAMP, + ended_at TIMESTAMP + ) + """) + + cursor.execute(""" + CREATE TABLE IF NOT EXISTS messages ( + id INTEGER PRIMARY KEY AUTOINCREMENT, + session_id TEXT, + role TEXT, + content TEXT, + timestamp TIMESTAMP + ) + """) + + cursor.execute(""" + CREATE TABLE IF NOT EXISTS repo_index ( + id INTEGER PRIMARY KEY AUTOINCREMENT, + file_path TEXT, + repo_name TEXT, + function_name TEXT, + content TEXT, + last_modified TIMESTAMP + ) + """) + + cursor.execute(""" + CREATE TABLE IF NOT EXISTS style_preferences ( + id INTEGER PRIMARY KEY AUTOINCREMENT, + category TEXT, + preference TEXT, + confidence FLOAT, + extracted_at TIMESTAMP + ) + """) + + conn.commit() + conn.close() + + def create_session(self): + session_id = datetime.now().strftime("%Y%m%d_%H%M%S") + conn = sqlite3.connect(DB_PATH) + cursor = conn.cursor() + cursor.execute( + "INSERT INTO sessions (session_id, started_at) VALUES (?, ?)", + (session_id, datetime.now().isoformat()) + ) + conn.commit() + conn.close() + return session_id + + def end_session(self): + conn = sqlite3.connect(DB_PATH) + cursor = conn.cursor() + cursor.execute( + "UPDATE sessions SET ended_at = ? WHERE session_id = ?", + (datetime.now().isoformat(), self.session_id) + ) + conn.commit() + conn.close() + + def save_message(self, role, content): + conn = sqlite3.connect(DB_PATH) + cursor = conn.cursor() + cursor.execute( + "INSERT INTO messages (session_id, role, content, timestamp) VALUES (?, ?, ?, ?)", + (self.session_id, role, content, datetime.now().isoformat()) + ) + conn.commit() + conn.close() + + def index_local_repositories(self, root_path): + """Crawl directories and index .py, .ino, and .cpp files""" + import ast + + print(f"\nšŸ” Indexing repositories in: {root_path}") + path = Path(root_path) + + if not path.exists(): + print(f"āŒ Path not found: {root_path}") + return + + conn = sqlite3.connect(DB_PATH) + cursor = conn.cursor() + + count = 0 + + for file_path in path.rglob('*'): + if file_path.is_file() and file_path.suffix in ['.py', '.ino', '.cpp', '.h']: + try: + with open(file_path, 'r', encoding='utf-8', errors='ignore') as f: + content = f.read() + + functions = [] + + # Python parsing + if file_path.suffix == '.py': + try: + tree = ast.parse(content) + for node in ast.walk(tree): + if isinstance(node, ast.FunctionDef): + functions.append(node.name) + except: + pass + + # C++/Arduino parsing + elif file_path.suffix in ['.ino', '.cpp', '.h']: + matches = re.findall(r'\b(?:void|int|bool|float|double|String|char)\s+(\w+)\s*\(', content) + functions.extend(matches) + + # Determine repo name + try: + repo_name = file_path.relative_to(path).parts[0] + except: + repo_name = "unknown" + + timestamp = datetime.fromtimestamp(file_path.stat().st_mtime) + + for func in functions: + cursor.execute(""" + INSERT INTO repo_index (file_path, repo_name, function_name, content, last_modified) + VALUES (?, ?, ?, ?, ?) + """, (str(file_path), repo_name, func, content, timestamp.isoformat())) + count += 1 + + except Exception: + pass + + conn.commit() + conn.close() + print(f"āœ… Indexed {count} functions across repositories") + + def retrieve_style_context(self, message): + """Search repo_index for code snippets matching the request""" + # Extract potential keywords (nouns/modules) + keywords = re.findall(r'\b\w{4,}\b', message.lower()) + if not keywords: + return "" + + conn = sqlite3.connect(DB_PATH) + cursor = conn.cursor() + + # Build a search query for function names or repo names + search_terms = " OR ".join([f"function_name LIKE '%{k}%'" for k in keywords]) + search_terms += " OR " + " OR ".join([f"repo_name LIKE '%{k}%'" for k in keywords]) + + query = f"SELECT repo_name, function_name, content FROM repo_index WHERE {search_terms} LIMIT 2" + + cursor.execute(query) + results = cursor.fetchall() + conn.close() + + if not results: + return "" + + context_block = "\n[REFERENCE STYLE FROM JAMES'S PAST PROJECTS]\n" + for repo, func, content in results: + # Just grab the first 500 chars of the file to save context window + snippet = content[:500] + "..." + context_block += f"Repo: {repo} | Function: {func}\nCode:\n{snippet}\n---\n" + + return context_block + + def scan_style_signature(self): + """V3.0: Analyze repo_index to extract style preferences.""" + print("\nšŸ•µļø Scanning repositories for style signature...") + conn = sqlite3.connect(DB_PATH) + cursor = conn.cursor() + + # Get a sample of code + cursor.execute("SELECT content FROM repo_index ORDER BY RANDOM() LIMIT 5") + rows = cursor.fetchall() + + if not rows: + print("āŒ No code indexed. Run /index first.") + conn.close() + return + + code_sample = "\n---\n".join([r[0][:1000] for r in rows]) + + prompt = f"""Analyze this code sample from James's repositories. + Extract 3 distinct coding preferences or patterns. + Format: Category: Preference + + Examples: + - Serial: Uses 115200 baud + - Safety: Uses non-blocking millis() + - Pins: Prefers #define over const int + + Code Sample: + {code_sample} + """ + + print("⚔ Analyzing with BALANCED model...") + summary = self.call_model("balanced", prompt) + + # Store in DB + timestamp = datetime.now().isoformat() + lines = summary.split('\n') + for line in lines: + if ':' in line: + parts = line.split(':', 1) + category = parts[0].strip('- *') + pref = parts[1].strip() + cursor.execute( + "INSERT INTO style_preferences (category, preference, confidence, extracted_at) VALUES (?, ?, ?, ?)", + (category, pref, 0.8, timestamp) + ) + + conn.commit() + conn.close() + print(f"\nāœ… Style Signature Updated:\n{summary}\n") + + def is_simple_question(self, message): + """Check if this is a simple question that should use FAST model""" + message_lower = message.lower() + + simple_triggers = [ + "what is", "what's", "who is", "who's", "when is", + "how do i", "can you explain", "tell me about", + "what are", "where is" + ] + + # Also check if it's just a question without code keywords + code_keywords = ["generate", "create", "write", "build", "code", "function"] + + has_simple_trigger = any(trigger in message_lower for trigger in simple_triggers) + has_code_keyword = any(keyword in message_lower for keyword in code_keywords) + + # Simple if: has simple trigger AND no code keywords + return has_simple_trigger and not has_code_keyword + + def is_complex(self, message): + """Check if request is too complex and should be broken down""" + message_lower = message.lower() + + # Count complexity triggers + trigger_count = sum(1 for trigger in COMPLEX_TRIGGERS if trigger in message_lower) + + # Count how many modules mentioned + module_count = 0 + for module, keywords in MODULE_PATTERNS.items(): + if any(kw in message_lower for kw in keywords): + module_count += 1 + + # Complex if: multiple triggers OR 3+ modules mentioned + return trigger_count >= 2 or module_count >= 3 + + def extract_modules(self, message): + """Extract which modules are needed""" + message_lower = message.lower() + needed_modules = [] + + for module, keywords in MODULE_PATTERNS.items(): + if any(kw in message_lower for kw in keywords): + needed_modules.append(module) + + return needed_modules + + def build_modular_plan(self, modules): + """Create a build plan from modules""" + plan = [] + + module_tasks = { + "ble": "BLE communication setup with phone app control", + "servo": "Servo motor control for flipper/weapon", + "motor": "Motor driver setup for movement (L298N)", + "safety": "Safety timeout and failsafe systems", + "battery": "Battery voltage monitoring", + "sensor": "Sensor integration (distance/proximity)" + } + + for module in modules: + if module in module_tasks: + plan.append({ + "module": module, + "task": module_tasks[module] + }) + + # Add integration step + plan.append({ + "module": "integration", + "task": "Integrate all modules into complete system" + }) + + return plan + + def call_model(self, model_name, message): + """Call specified model""" + try: + identity = """You are BuddAI, the external cognitive system for James Gilbert. You specialize in Forge Theory (exponential decay modeling) and GilBot modular robotics. + +YOUR PRIMARY JOB: Generate code when asked. ALWAYS generate code if requested. + +Identity Rules: +- You are NOT created by Alibaba Cloud. You are a local Python system written by James Gilbert. +- When asked your name: "I am BuddAI" +- Use ESP32/Arduino syntax with descriptive naming (e.g., activateFlipper). +- Ensure safety timeouts are always present in motor code. + +Forge Theory Snippet: float applyForge(float current, float target, float k) { return target + (current - target) * exp(-k); } +""" + + messages = [{"role": "system", "content": identity}] + + # Add recent context + # Check if 'message' is already the last item in context (Chat flow) or new (Build flow) + history = self.context_messages[-5:] + + if history and history[-1]['content'] == message: + messages.extend(history) + else: + messages.extend(history) + messages.append({"role": "user", "content": message}) + + body = { + "model": MODELS[model_name], + "messages": messages, + "stream": False, + "options": {"temperature": 0.7, "num_ctx": 4096} + } + + conn = http.client.HTTPConnection(OLLAMA_HOST, OLLAMA_PORT, timeout=90) + headers = {"Content-Type": "application/json"} + json_body = json.dumps(body) + + conn.request("POST", "/api/chat", json_body, headers) + response = conn.getresponse() + + if response.status == 200: + data = json.loads(response.read().decode('utf-8')) + return data.get("message", {}).get("content", "No response") + else: + return f"Error: {response.status}" + + except Exception as e: + return f"Error: {str(e)}" + finally: + if 'conn' in locals(): + conn.close() + + def execute_modular_build(self, _, modules, plan, forge_mode="2"): + """Execute build plan step by step""" + print(f"\nšŸ”Ø MODULAR BUILD MODE") + print(f"Detected {len(modules)} modules: {', '.join(modules)}") + print(f"Breaking into {len(plan)} steps...\n") + + all_code = {} + + for i, step in enumerate(plan, 1): + print(f"šŸ“¦ Step {i}/{len(plan)}: {step['task']}") + print("⚔ Building...\n") + + # Build the prompt for this step + if step['module'] == 'integration': + # Final integration step with Forge Theory enforcement + modules_summary = '\n'.join([f"- {m}: {all_code[m][:150]}..." for m in modules if m in all_code]) + + # Ask James for the 'vibe' of the robot + print("\n⚔ FORGE THEORY TUNING:") + print("1. Aggressive (k=0.3) - High snap, combat ready") + print("2. Balanced (k=0.1) - Standard movement") + print("3. Graceful (k=0.03) - Roasting / Smooth curves") + + if self.server_mode: + choice = forge_mode + else: + choice = input("Select Forge Constant [1-3, default 2]: ") + + k_val = "0.1" + if choice == "1": k_val = "0.3" + elif choice == "3": k_val = "0.03" + + prompt = f"""INTEGRATION TASK: Combine modules into a cohesive GilBot system. + + [MODULES] + {modules_summary} + + [FORGE PARAMETERS] + Set k = {k_val} for all applyForge() calls. + + [REQUIREMENTS] + 1. Implement applyForge() math helper. + 2. Use k={k_val} to smooth motor and servo transitions. + 3. Ensure naming matches James's style: activateFlipper(), setMotors(). + """ + else: + # Individual module + prompt = f"Generate ESP32-C3 code for: {step['task']}. Keep it modular with clear comments." + + # Call balanced model for each module + response = self.call_model("balanced", prompt) + all_code[step['module']] = response + + print(f"āœ… {step['module'].upper()} module complete\n") + print("-" * 50 + "\n") + + # Compile final response + final = "# COMPLETE GILBOT CONTROLLER - MODULAR BUILD\n\n" + for module, code in all_code.items(): + final += f"## {module.upper()} MODULE\n{code}\n\n" + + return final + + def apply_style_signature(self, generated_code): + """Refine generated code to match James's specific naming and safety patterns""" + # 1. Check for James's common function names (e.g., setupMotors vs init_motors) + # 2. Ensure Forge Theory helpers are present if motion is detected + # 3. Append a 'Proactive Note' if a common companion module is missing + + return generated_code + + def chat(self, user_message, force_model=None, forge_mode="2"): + """Main chat with smart routing and shadow suggestions""" + style_context = self.retrieve_style_context(user_message) + if style_context: + self.context_messages.append({"role": "system", "content": style_context}) + + + self.save_message("user", user_message) + self.context_messages.append({"role": "user", "content": user_message}) + + + if force_model: + model = force_model + print(f"\n⚔ Using {model.upper()} model (forced)...") + response = self.call_model(model, user_message) + elif self.is_complex(user_message): + modules = self.extract_modules(user_message) + plan = self.build_modular_plan(modules) + print("\n" + "=" * 50) + print("šŸŽÆ COMPLEX REQUEST DETECTED!") + print(f"Modules needed: {', '.join(modules)}") + print(f"Breaking into {len(plan)} manageable steps") + print("=" * 50) + response = self.execute_modular_build(user_message, modules, plan, forge_mode) + elif self.is_search_query(user_message): + # This is a search query - query the database + response = self.search_repositories(user_message) + elif self.is_simple_question(user_message): + print("\n⚔ Using FAST model (simple question)...") + response = self.call_model("fast", user_message) + else: + print("\nāš–ļø Using BALANCED model...") + response = self.call_model("balanced", user_message) + + # Apply Style Guard + response = self.apply_style_signature(response) + + # Generate Suggestion Bar + suggestions = self.shadow_engine.get_all_suggestions(user_message, response) + if suggestions: + bar = "\n\nPROACTIVE: > " + " ".join([f"{i+1}. {s}" for i, s in enumerate(suggestions)]) + response += bar + + self.save_message("assistant", response) + self.context_messages.append({"role": "assistant", "content": response}) + + return response + + def run(self): + """Main loop""" + try: + force_model = None + while True: + user_input = input("\nJames: ").strip() + if not user_input: + continue + if user_input.lower() in ['exit', 'quit']: + print("\nšŸ‘‹ Later!") + self.end_session() + break + if user_input.startswith('/'): + cmd = user_input.lower() + if cmd == '/fast': + force_model = "fast" + print("⚔ Next: FAST model") + continue + elif cmd == '/balanced': + force_model = "balanced" + print("āš–ļø Next: BALANCED model") + continue + elif cmd == '/help': + print("\nšŸ’” Commands:") + print("/fast - Use fast model") + print("/balanced - Use balanced model") + print("/index - Index local repositories") + print("/scan - Scan style signature (V3.0)") + print("/help - This message") + print("exit - End session\n") + continue + elif cmd.startswith('/index'): + parts = user_input.split(maxsplit=1) + if len(parts) > 1: + self.index_local_repositories(parts[1]) + else: + print("Usage: /index ") + continue + elif cmd == '/scan': + self.scan_style_signature() + continue + else: + print("\nUnknown command. Type /help") + continue + # Chat + response = self.chat(user_input, force_model) + print(f"\nBuddAI:\n{response}\n") + force_model = None + except KeyboardInterrupt: + print("\n\nšŸ‘‹ Bye!") + self.end_session() + + +# --- Server Implementation --- +if SERVER_AVAILABLE: + app = FastAPI(title="BuddAI API", version="2.0") + + # Allow React frontend to communicate + app.add_middleware( + CORSMiddleware, + allow_origins=["*"], + allow_methods=["*"], + allow_headers=["*"], + ) + + class ChatRequest(BaseModel): + message: str + model: Optional[str] = None + forge_mode: Optional[str] = "2" + + # Initialize server instance + server_buddai = BuddAI(server_mode=True) + + # Serve Frontend + frontend_path = Path(__file__).parent / "frontend" + frontend_path.mkdir(exist_ok=True) + app.mount("/web", StaticFiles(directory=frontend_path, html=True), name="web") + + @app.get("/") + async def root(): + return {"status": "online", "message": "šŸ”„ BuddAI API is running. Visit /web for the interface or /docs for API documentation."} + + @app.post("/api/chat") + async def chat_endpoint(request: ChatRequest): + response = server_buddai.chat(request.message, force_model=request.model, forge_mode=request.forge_mode) + return {"response": response} + + @app.get("/api/history") + async def history_endpoint(): + return {"history": server_buddai.context_messages} + + @app.post("/api/upload") + async def upload_repo(file: UploadFile = File(...)): + try: + uploads_dir = DATA_DIR / "uploads" + uploads_dir.mkdir(exist_ok=True) + + file_location = uploads_dir / file.filename + with open(file_location, "wb") as buffer: + shutil.copyfileobj(file.file, buffer) + + if file.filename.endswith(".zip"): + extract_path = uploads_dir / file_location.stem + with zipfile.ZipFile(file_location, 'r') as zip_ref: + zip_ref.extractall(extract_path) + server_buddai.index_local_repositories(extract_path) + file_location.unlink() # Cleanup zip + return {"message": f"āœ… Successfully indexed {file.filename}"} + else: + # Support single code files by moving them to a folder and indexing + if file_location.suffix in ['.py', '.ino', '.cpp', '.h']: + target_dir = uploads_dir / file_location.stem + target_dir.mkdir(exist_ok=True) + final_path = target_dir / file.filename + shutil.move(str(file_location), str(final_path)) + server_buddai.index_local_repositories(target_dir) + return {"message": f"āœ… Successfully indexed {file.filename}"} + + return {"message": f"āœ… Successfully uploaded {file.filename}"} + except Exception as e: + return {"message": f"āŒ Error: {str(e)}"} + +def check_ollama(): + try: + conn = http.client.HTTPConnection(OLLAMA_HOST, OLLAMA_PORT, timeout=5) + conn.request("GET", "/api/tags") + response = conn.getresponse() + conn.close() + return response.status == 200 + except: + return False + + +def main(): + if not check_ollama(): + print("āŒ Ollama not running. Start: ollama serve") + sys.exit(1) + + if len(sys.argv) > 1 and sys.argv[1] == "--server": + if SERVER_AVAILABLE: + print("šŸš€ Starting BuddAI API Server on port 8000...") + uvicorn.run(app, host="0.0.0.0", port=8000) + else: + print("āŒ Server dependencies missing. Install: pip install fastapi uvicorn aiofiles python-multipart") + else: + buddai = BuddAI() + buddai.run() + + +if __name__ == "__main__": + main() \ No newline at end of file diff --git a/data/uploads/index.html b/data/uploads/index.html new file mode 100644 index 0000000..8e50e8e --- /dev/null +++ b/data/uploads/index.html @@ -0,0 +1,313 @@ + + + + + + + šŸ”„ BuddAI Web + + + + + + + + + +
+ + + \ No newline at end of file diff --git a/frontend/index.html b/frontend/index.html index b961599..8e50e8e 100644 --- a/frontend/index.html +++ b/frontend/index.html @@ -9,6 +9,8 @@ + + @@ -86,20 +89,19 @@ // Configure Marked for Code Copy const renderer = new marked.Renderer(); renderer.code = (code, language) => { - const validLang = language || 'text'; - const escapeHtml = (unsafe) => unsafe - .replace(/&/g, "&") - .replace(//g, ">") - .replace(/"/g, """) - .replace(/'/g, "'"); + const validLang = (language && hljs.getLanguage(language)) ? language : 'plaintext'; + let highlighted = code; + try { + const result = hljs.highlight(code, { language: validLang }); + highlighted = result.value || code; + } catch (e) { /* fallback */ } return `
- ${validLang} + ${language || 'text'}
-
${escapeHtml(code)}
+
${highlighted}
`; }; marked.setOptions({ renderer }); @@ -132,6 +134,12 @@ useEffect(() => { document.body.className = theme === 'light' ? 'light-mode' : ''; + const hljsTheme = document.getElementById('hljs-theme'); + if (hljsTheme) { + hljsTheme.href = theme === 'light' + ? "https://cdnjs.cloudflare.com/ajax/libs/highlight.js/11.9.0/styles/atom-one-light.min.css" + : "https://cdnjs.cloudflare.com/ajax/libs/highlight.js/11.9.0/styles/atom-one-dark.min.css"; + } }, [theme]); useEffect(() => { @@ -201,6 +209,27 @@ if (abortControllerRef.current) abortControllerRef.current.abort(); }; + const handleFileUpload = async (e) => { + const file = e.target.files[0]; + if (!file) return; + + const formData = new FormData(); + formData.append("file", file); + + setLoading(true); + setHistory(prev => [...prev, { role: "assistant", content: `šŸ“„ Uploading and indexing **${file.name}**...` }]); + + try { + const res = await fetch("/api/upload", { method: "POST", body: formData }); + const data = await res.json(); + setHistory(prev => [...prev, { role: "assistant", content: data.message }]); + } catch (err) { + setHistory(prev => [...prev, { role: "assistant", content: "āŒ Upload failed." }]); + } + setLoading(false); + e.target.value = null; + }; + const parseContent = (content) => { const parts = content.split("\n\nPROACTIVE: > "); const text = parts[0]; @@ -220,6 +249,13 @@ {status}
+ +