Compare commits
639 commits
| Author | SHA1 | Date | |
|---|---|---|---|
|
|
91f4f33b96 | ||
|
|
2a95ba6ff5 | ||
|
|
6bc24d5213 | ||
|
|
ea53bdbf8c | ||
|
|
4c6296c1c1 | ||
|
|
165db215a2 | ||
|
|
103fcb473b | ||
|
|
8b159abd17 | ||
|
|
ce6f016d63 | ||
|
|
c27f019a02 | ||
|
|
5f59911e52 | ||
|
|
5dc0983b77 | ||
|
|
4e82ec8da6 | ||
|
|
0779a7dbe5 | ||
|
|
44321aab25 | ||
|
|
791fc1a3a1 | ||
|
|
9f9e42768d | ||
|
|
425008f855 | ||
|
|
92703d5af5 | ||
|
|
428cd25ebc | ||
|
|
dfa466707d | ||
|
|
a18723d564 | ||
|
|
7389b2f801 | ||
|
|
ceaa1601de | ||
|
|
b693695e41 | ||
|
|
75fc9d4bf4 | ||
|
|
1b1aaa3d66 | ||
|
|
f5fdbb8cac | ||
|
|
e5472f6aa5 | ||
|
|
1dd6170dd4 | ||
|
|
9f1315d1a8 | ||
|
|
7a034ccbe8 | ||
|
|
613e1fc58e | ||
|
|
2d3ec1e8c3 | ||
|
|
8c2884cc6f | ||
|
|
9a8aa2bfac | ||
|
|
652548a60a | ||
|
|
a136c04aa1 | ||
|
|
52a431867a | ||
|
|
1b986f9f2d | ||
|
|
9a9cf8e40b | ||
|
|
ace8cf4462 | ||
|
|
7253e1240e | ||
|
|
2b40d0a3b0 | ||
|
|
f6e1e64a0c | ||
|
|
c32b4de28e | ||
|
|
c4f5b77786 | ||
|
|
3b16ddbfbd | ||
|
|
efd86a87b1 | ||
|
|
a3043e786a | ||
|
|
8efa9460bd | ||
|
|
ec5fdb3c67 | ||
|
|
88f4baed77 | ||
|
|
d5fb2dc50b | ||
|
|
6001a002c1 | ||
|
|
ff24898cd4 | ||
|
|
6cb5a9f39a | ||
|
|
c8a2d62d27 | ||
|
|
547a481d7b | ||
|
|
f71066197d | ||
|
|
5675d4cda6 | ||
|
|
e5bd8ccc7e | ||
|
|
cf7b26d450 | ||
|
|
cd134217f8 | ||
|
|
01d7922c9a | ||
|
|
60906f8286 | ||
|
|
ea469bb2ec | ||
|
|
2bb4279123 | ||
|
|
131607215f | ||
|
|
7b77ef49f1 | ||
|
|
2e9529c018 | ||
|
|
bde7bb068d | ||
|
|
7d4b283586 | ||
|
|
09aa19afde | ||
|
|
886461ca28 | ||
|
|
74ef4f97c8 | ||
|
|
cff7a90dfb | ||
|
|
f4a3b79d1a | ||
|
|
3ef37ff453 | ||
|
|
1f6e10fd8e | ||
|
|
f198db7c68 | ||
|
|
e780c2b748 | ||
|
|
eea8f2374e | ||
|
|
689b2e90e5 | ||
|
|
863f76fb71 | ||
|
|
e18dc12e1e | ||
|
|
f07ea569ba | ||
|
|
dce59978aa | ||
|
|
31aca2b66f | ||
|
|
5b90bdab16 | ||
|
|
5ba7886deb | ||
|
|
8828e89e62 | ||
|
|
318cff805d | ||
|
|
6aebdc07b6 | ||
|
|
c7ceb2db4e | ||
|
|
8d3c48a85e | ||
|
|
d61c74d7e0 | ||
|
|
3d8a65c3d4 | ||
|
|
0cb648a117 | ||
|
|
73243f2d6b | ||
|
|
19172194e2 | ||
|
|
d496057aeb | ||
|
|
85ef4a6dd4 | ||
|
|
b1b51c04f9 | ||
|
|
90a9eb256c | ||
|
|
2802aef6b4 | ||
|
|
bc21029ac5 | ||
|
|
d09958cd75 | ||
|
|
8e1c0c7c4e | ||
|
|
78e663a0e0 | ||
|
|
3024286e4d | ||
|
|
d910814067 | ||
|
|
43e2a14b82 | ||
|
|
fdbf7f7e4d | ||
|
|
452c4a27c7 | ||
|
|
1f333fc53d | ||
|
|
bbb651797a | ||
|
|
036c09c235 | ||
|
|
1832728b05 | ||
|
|
b390491b0f | ||
|
|
129cf5575e | ||
|
|
cc552ed9dd | ||
|
|
bebf9f8df5 | ||
|
|
43e2efe7cd | ||
|
|
d6a03be140 | ||
|
|
b0662c282b | ||
|
|
3a9834ec03 | ||
|
|
cf8af42027 | ||
|
|
8712c7c921 | ||
|
|
02aea97b7d | ||
|
|
dbbc09b09c | ||
|
|
a84a610ae2 | ||
|
|
2a7b8d2e25 | ||
|
|
dfd1640d37 | ||
|
|
daf4611bfc | ||
|
|
332a464cf6 | ||
|
|
c52ed05c83 | ||
|
|
db7b42dc4d | ||
|
|
b491f68b91 | ||
|
|
81e56c143c | ||
|
|
96104f0c49 | ||
|
|
9971c807ce | ||
|
|
44ae9bc4ec | ||
|
|
315326bdd6 | ||
|
|
6e5f4c5d37 | ||
|
|
aa7800781d | ||
|
|
be770992a1 | ||
|
|
c58b9acb27 | ||
|
|
e6593913f8 | ||
|
|
8fab46dcdc | ||
|
|
6adbf92277 | ||
|
|
8c9ee8f2bd | ||
|
|
f4736bf759 | ||
|
|
5e58608822 | ||
|
|
2df0b73b1e | ||
|
|
ce3039544e | ||
|
|
87445d9387 | ||
|
|
238be05fa8 | ||
|
|
1cc318e2e8 | ||
|
|
a783f9023c | ||
|
|
a472afe4fd | ||
|
|
dfaf14f061 | ||
|
|
ae910099c8 | ||
|
|
4ff21338ee | ||
|
|
afc7b063ee | ||
|
|
afb6b0f881 | ||
|
|
780b22a3f7 | ||
|
|
7dde6b7769 | ||
|
|
ab0460da61 | ||
|
|
48d666aa3a | ||
|
|
405cd44ac3 | ||
|
|
6eadd70aef | ||
|
|
3c2ab16afb | ||
|
|
28fbe8b988 | ||
|
|
00f37096fa | ||
|
|
534df4278a | ||
|
|
4fff6cc844 | ||
|
|
6bda31345a | ||
|
|
6bb355c472 | ||
|
|
88f698a608 | ||
|
|
eab30f578e | ||
|
|
460af585ed | ||
|
|
c55f4e4f5e | ||
|
|
dc9a81e101 | ||
|
|
c99a8d4d7e | ||
|
|
6b0de57496 | ||
|
|
49e13613d2 | ||
|
|
f4ce83d194 | ||
|
|
fd88284dbc | ||
|
|
4230db27fd | ||
|
|
82f04f64dd | ||
|
|
96cd2bc1b2 | ||
|
|
31cc9032bc | ||
|
|
2690778a02 | ||
|
|
47bc3dc0fa | ||
|
|
c27c531805 | ||
|
|
c01a694455 | ||
|
|
cacb8977bf | ||
|
|
1cd9672179 | ||
|
|
06a915aafb | ||
|
|
b7a102cc3a | ||
|
|
c2e1263493 | ||
|
|
3883466cc4 | ||
|
|
524810cbda | ||
|
|
a50248f5ae | ||
|
|
0d05ccac55 | ||
|
|
b20978f8d3 | ||
|
|
f5dff3d822 | ||
|
|
b3bb77570c | ||
|
|
9f4c956393 | ||
|
|
a009a2827a | ||
|
|
155230fb5b | ||
|
|
3d528e6963 | ||
|
|
8f8e0f09ca | ||
|
|
1fade56b17 | ||
|
|
5c5d9c33bf | ||
|
|
c75911be67 | ||
|
|
24bb3b26c6 | ||
|
|
57ee930717 | ||
|
|
800007907a | ||
|
|
454f7a8e96 | ||
|
|
f2f29ceb5f | ||
|
|
1d47df4899 | ||
|
|
f45e8552ab | ||
|
|
51f05bf789 | ||
|
|
b84e5692a2 | ||
|
|
8ed911eb27 | ||
|
|
130b2c84d1 | ||
|
|
9d90e7532f | ||
|
|
27928fc9b4 | ||
|
|
cccc02ed64 | ||
|
|
c27af1435d | ||
|
|
bad6d66abf | ||
|
|
671a0872ef | ||
|
|
6189b8f0a7 | ||
|
|
2a1e13662d | ||
|
|
f37980bd4a | ||
|
|
759bb9bcb7 | ||
|
|
85f3a02361 | ||
|
|
f7cbf58470 | ||
|
|
25ee288bd2 | ||
|
|
073938ca6f | ||
|
|
acc647c24b | ||
|
|
6c69005aff | ||
|
|
e531462bda | ||
|
|
7d62899b7e | ||
|
|
e1496c21d7 | ||
|
|
7c8542d730 | ||
|
|
e8249c590e | ||
|
|
28300e5a48 | ||
|
|
e7d65b8bb5 | ||
|
|
2b7159da7f | ||
|
|
aee6452688 | ||
|
|
f0c903d8c3 | ||
|
|
998b6094ca | ||
|
|
e8cb1f2d39 | ||
|
|
b0c0a7af0b | ||
|
|
855a11db55 | ||
|
|
e22eaedd1f | ||
|
|
06ae13d92b | ||
|
|
326c046d4e | ||
|
|
bd12c0a31a | ||
|
|
877de43257 | ||
|
|
b8e948f2d6 | ||
|
|
40a26ca28c | ||
|
|
ce7c81a15b | ||
|
|
9997e7aecc | ||
|
|
3aa9760ead | ||
|
|
32c2a33284 | ||
|
|
5e5e19d6de | ||
|
|
da25b6f79f | ||
|
|
e825550a90 | ||
|
|
1cc8fb50e1 | ||
|
|
e82112024c | ||
|
|
f11d1d47a1 | ||
|
|
83703e1d99 | ||
|
|
f9eca5e395 | ||
|
|
f32edaa17e | ||
|
|
b4f5542b21 | ||
|
|
c077819c0d | ||
|
|
6bc0eb7e46 | ||
|
|
dc6f2e2073 | ||
|
|
4d278a5172 | ||
|
|
1db52ab1de | ||
|
|
7d6fd8af5e | ||
|
|
f3b70fcdfb | ||
|
|
e9db636552 | ||
|
|
b8466fb56d | ||
|
|
e826d672b0 | ||
|
|
a1db312c7b | ||
|
|
faf6b8b6fb | ||
|
|
be78c27561 | ||
|
|
540309f5e0 | ||
|
|
f7830371a2 | ||
|
|
3769af2513 | ||
|
|
553d149d47 | ||
|
|
d737e2c29e | ||
|
|
37b31e88fc | ||
|
|
d2189ce360 | ||
|
|
d4c82ccd56 | ||
|
|
95c7df04da | ||
|
|
eb7a6e31db | ||
|
|
c3eff6a799 | ||
|
|
459a353c6b | ||
|
|
f34aedff05 | ||
|
|
be3e68ec0f | ||
|
|
d005f881b7 | ||
|
|
c22c63edd2 | ||
|
|
6d239d5b95 | ||
|
|
3c2575f45b | ||
|
|
a757ca81e3 | ||
|
|
7672e0922f | ||
|
|
44bfe6224e | ||
|
|
a2ff1ce19a | ||
|
|
3d2fa035a9 | ||
|
|
f32720b692 | ||
|
|
620f702591 | ||
|
|
7ad6721ba7 | ||
|
|
eae4d3f904 | ||
|
|
6c246a7165 | ||
|
|
398f59f461 | ||
|
|
3d7ec7efce | ||
|
|
52a339f125 | ||
|
|
f9d36cab0b | ||
|
|
54581dcbd7 | ||
|
|
7c1aae0402 | ||
|
|
cef4d5d1f6 | ||
|
|
c1140cc917 | ||
|
|
feaa4dec5e | ||
|
|
f8cd0ff11a | ||
|
|
07f255fca1 | ||
|
|
07942b5d27 | ||
|
|
bf3aae5766 | ||
|
|
2f7ca89d80 | ||
|
|
364007d424 | ||
|
|
ad3373b134 | ||
|
|
e8a46c2f95 | ||
|
|
4cc763176f | ||
|
|
65970527e8 | ||
|
|
547f23a6f0 | ||
|
|
c77a9b93bc | ||
|
|
de7844dcb9 | ||
|
|
6ac195c2e6 | ||
|
|
6bb4fb8d57 | ||
|
|
bf27743c44 | ||
|
|
1762620a43 | ||
|
|
fe1892c9bb | ||
|
|
dd4f6e248f | ||
|
|
95c104f7b3 | ||
|
|
f7397324b1 | ||
|
|
9156237923 | ||
|
|
23f31953d4 | ||
|
|
ac510fde19 | ||
|
|
a174227a62 | ||
|
|
6924ff3f49 | ||
|
|
3b038032af | ||
|
|
63db9e8733 | ||
|
|
53db749738 | ||
|
|
8911dc5f42 | ||
|
|
0fc6eeb4cc | ||
|
|
cf4130a4fa | ||
|
|
e5b7b4f7d7 | ||
|
|
ba281a2a2d | ||
|
|
0ffaacca87 | ||
|
|
19d849aa75 | ||
|
|
b57be6eb91 | ||
|
|
537226bd4d | ||
|
|
e46c1f66fa | ||
|
|
316dbf6018 | ||
|
|
12588e8b4e | ||
|
|
76b167a874 | ||
|
|
68b55572f1 | ||
|
|
7be229d40d | ||
|
|
56981772c7 | ||
|
|
b3ed21b6a3 | ||
|
|
23bb62a116 | ||
|
|
aafa63818f | ||
|
|
f83c753277 | ||
|
|
96ac2d99cd | ||
|
|
4216dddea9 | ||
|
|
6ac69ac07c | ||
|
|
9f215e9d9b | ||
|
|
76b87574a5 | ||
|
|
2d2a4116ed | ||
|
|
c4a1178ae6 | ||
|
|
1bd014222d | ||
|
|
9777dd9158 | ||
|
|
749647c895 | ||
|
|
3522a3bea5 | ||
|
|
d67336c761 | ||
|
|
4eb1111faa | ||
|
|
8521a55907 | ||
|
|
c0bc7675a1 | ||
|
|
eeaed52256 | ||
|
|
9002b7ca8a | ||
|
|
a5afad870c | ||
|
|
97d06c1e90 | ||
|
|
8b46e15d24 | ||
|
|
acae0d804f | ||
|
|
3c894e8101 | ||
|
|
52f6031822 | ||
|
|
18eac65b70 | ||
|
|
ccf8e3bc7d | ||
|
|
e0a6b33729 | ||
|
|
d30b07e34f | ||
|
|
1e12b062dd | ||
|
|
cf68371421 | ||
|
|
3d7adb91eb | ||
|
|
b5cb294b25 | ||
|
|
277510ee16 | ||
| 284fae66b0 | |||
|
|
04f91ed6a0 | ||
|
|
72387dde91 | ||
| 4b8628007d | |||
|
|
4359b3d8d4 | ||
|
|
27032d980f | ||
|
|
64f3f3b060 | ||
|
|
507bf55eb5 | ||
|
|
0008e269e4 | ||
|
|
ce682e42fe | ||
|
|
805be3bf32 | ||
|
|
f2b4eeb0fa | ||
|
|
bd2eadc271 | ||
|
|
42e558ed38 | ||
|
|
36c29cd0af | ||
|
|
80b827b7c8 | ||
| 42642d8702 | |||
|
|
60b0b0b63b | ||
|
|
b6f73dbdd2 | ||
|
|
67658ec90c | ||
|
|
ae04bfd389 | ||
|
|
a31cea36a6 | ||
|
|
909bb3bb6c | ||
|
|
4803f396d3 | ||
|
|
ea433b84de | ||
|
|
919dcf963f | ||
|
|
adafee0f18 | ||
|
|
d9e7fa092b | ||
|
|
4e69daf2da | ||
|
|
9f8a63ae21 | ||
|
|
cdb29a2f75 | ||
|
|
bb88604045 | ||
|
|
781a5b414e | ||
| b5873a8f31 | |||
|
|
53acf4000d | ||
|
|
04e3d492e9 | ||
| d305663573 | |||
|
|
dba621c108 | ||
|
|
50c06dc4df | ||
|
|
9bdd47d9d5 | ||
|
|
6d4b92737e | ||
|
|
9b225af2f9 | ||
|
|
c04d3442dd | ||
|
|
a3ca68e2bd | ||
|
|
c484eab2fc | ||
|
|
39255b1766 | ||
|
|
e9224b18b8 | ||
|
|
73cac920ce | ||
|
|
24fdaa3bad | ||
|
|
4bcc04d890 | ||
|
|
abe5ef342a | ||
|
|
69f0acce0d | ||
|
|
c73ea6ad7a | ||
|
|
d6864eb37c | ||
|
|
7e30f336cc | ||
|
|
f416948660 | ||
|
|
7f84ac8348 | ||
|
|
bfe70871a2 | ||
|
|
6e03287178 | ||
|
|
6e37bd22f0 | ||
|
|
98baa1b08f | ||
| 7f87e4d52c | |||
|
|
9aac4c19dd | ||
| 850ff4bc03 | |||
|
|
ed842122a2 | ||
|
|
a422eb1b6b | ||
|
|
6393bfe4da | ||
|
|
3022f05fb8 | ||
|
|
ede5d6f561 | ||
|
|
3c5e6d6498 | ||
|
|
e8ca0d856f | ||
|
|
5417f547ca | ||
|
|
71ee6124b7 | ||
|
|
99e1d06fda | ||
|
|
b9c30138fb | ||
|
|
6f5b239da6 | ||
|
|
4f66eb4cca | ||
|
|
7873e0ebf7 | ||
|
|
a0dc9c32e7 | ||
|
|
6db0110b6f | ||
|
|
b266db5069 | ||
|
|
0f0764ff59 | ||
|
|
deaa06a54d | ||
| bb5b8e798e | |||
|
|
52c3e67692 | ||
| 162c2beeab | |||
|
|
0be5791fc4 | ||
|
|
3c49620548 | ||
|
|
c6490c175a | ||
| 94af637411 | |||
|
|
2d58145e65 | ||
| ddaf950654 | |||
| e2d26dccc1 | |||
|
|
05f120b166 | ||
|
|
ddf765dee1 | ||
|
|
5393828ac6 | ||
|
|
81806e5dcc | ||
|
|
0fda800e8b | ||
|
|
5a234c1179 | ||
|
|
36dc76cce1 | ||
|
|
1e9278fd9d | ||
|
|
b5dcdd1261 | ||
|
|
62b9ee7729 | ||
|
|
287a4cf110 | ||
|
|
6d28fc439b | ||
|
|
92b7b08113 | ||
|
|
6da52aa60f | ||
|
|
d94eed0b54 | ||
|
|
40d2b0db16 | ||
|
|
013396bf91 | ||
|
|
5b39e13a6e | ||
|
|
66220021c9 | ||
|
|
e4f94eaaab | ||
|
|
67249fa78f | ||
|
|
026b31edf7 | ||
|
|
98ce071b13 | ||
|
|
422777580b | ||
|
|
726a384873 | ||
|
|
4fe1bf0aff | ||
|
|
e0bcb6b271 | ||
|
|
f2276904bf | ||
|
|
c38a3aefff | ||
|
|
2c0b75d6f1 | ||
|
|
7932f3a7ba | ||
|
|
da6d6cfa1a | ||
|
|
267550b288 | ||
|
|
8eb2430486 | ||
|
|
0a77b058b6 | ||
|
|
77cc22b0a7 | ||
|
|
4d0bd3ad38 | ||
|
|
2f10c7d368 | ||
|
|
5628abcc7f | ||
|
|
fc9fc1af83 | ||
|
|
21f234aa7c | ||
|
|
be1130f470 | ||
|
|
1215898608 | ||
|
|
2e3f56c4a7 | ||
|
|
dc7c89fdae | ||
|
|
53482cb0c8 | ||
|
|
85bdf26aa0 | ||
|
|
e7b47bf1a0 | ||
|
|
aed8226edd | ||
|
|
fe98fccdeb | ||
|
|
433deb1c30 | ||
|
|
f1e1c08aff | ||
|
|
a1c2ed2376 | ||
|
|
9f4afb9a15 | ||
|
|
73fa4ba6c9 | ||
|
|
1716122b72 | ||
|
|
d9cd7f94b1 | ||
|
|
977feb0881 | ||
|
|
1e351f36ec | ||
|
|
1b4b0bf381 | ||
|
|
81be4717fe | ||
|
|
8c1625873c | ||
|
|
e66ea0512b | ||
|
|
6d04c893b7 | ||
|
|
4832827da7 | ||
|
|
7248928545 | ||
|
|
742ca0799f | ||
|
|
327d78d08f | ||
|
|
8c76f496ed | ||
|
|
7a75a3969d | ||
|
|
462d5123f9 | ||
|
|
06b72136b6 | ||
|
|
317be4c53f | ||
|
|
e677d15bdd | ||
|
|
0622982fa7 | ||
|
|
90b03191b2 | ||
|
|
b95a367d9d | ||
|
|
c639a848c2 | ||
|
|
ccfd68ca1a | ||
|
|
99733bd83e | ||
|
|
0ca253fe68 | ||
|
|
132ade1765 | ||
|
|
71decc26b2 | ||
|
|
da1c45b4df | ||
|
|
67705e2be9 | ||
|
|
cb56cfb270 | ||
|
|
58749c87f8 | ||
|
|
662217c6f5 | ||
|
|
5eb26f90fc | ||
|
|
56397d7377 | ||
|
|
42788a2a88 | ||
|
|
267a5e5e6d | ||
|
|
4e8fff62a0 | ||
|
|
5f83cf902a | ||
|
|
7b8b93b4b1 | ||
|
|
ec6b07dea2 | ||
|
|
37f6d61368 | ||
|
|
85dd0555ac | ||
|
|
5fbaf4f211 | ||
|
|
2ea50959f2 | ||
|
|
84e2b62beb | ||
|
|
0f12156104 | ||
|
|
e359522c97 | ||
|
|
a389388b9d | ||
|
|
d1537879b3 | ||
|
|
796eb83204 | ||
|
|
d659f1c2c9 | ||
|
|
0b8d7b13ed | ||
|
|
12a22aa892 | ||
|
|
5a0b126f51 | ||
|
|
225b0b4812 | ||
|
|
54e9fe0b61 | ||
|
|
88e5fc6f49 | ||
|
|
cce41faa39 | ||
|
|
670e4c9a10 | ||
|
|
e6dbce3a78 | ||
|
|
e77140a58f | ||
|
|
73212624a3 | ||
|
|
86040da16a | ||
|
|
fd7be94b2c | ||
|
|
afbc915db3 | ||
|
|
4373af173b | ||
|
|
4d43b4c9bf | ||
|
|
03b77bd9db | ||
|
|
290a582656 | ||
|
|
99e8456c57 | ||
|
|
f0cb5dd2bd | ||
|
|
3ac81d47cb | ||
|
|
4c0c5f6572 | ||
|
|
d3bbda3698 | ||
|
|
6045805dfc | ||
|
|
7da0294100 | ||
|
|
9df5ed10f6 | ||
|
|
9f2253cd9b | ||
|
|
b317064c51 | ||
|
|
c5c50f310c |
1205 changed files with 106511 additions and 32805 deletions
7
.agents/skills/deploy/SKILL.md
Normal file
7
.agents/skills/deploy/SKILL.md
Normal file
|
|
@ -0,0 +1,7 @@
|
|||
---
|
||||
name: deploy
|
||||
description: Deploy to homelab. Build Docker image, transfer, and restart container. Use for lthn.sh deployments.
|
||||
---
|
||||
|
||||
Use the core-agent MCP tools to execute this skill.
|
||||
Call the appropriate tool: See deployment skill instructions
|
||||
7
.agents/skills/dispatch/SKILL.md
Normal file
7
.agents/skills/dispatch/SKILL.md
Normal file
|
|
@ -0,0 +1,7 @@
|
|||
---
|
||||
name: dispatch
|
||||
description: Dispatch a subagent to work on a task in a sandboxed workspace. Use when you need to send work to Gemini, Codex, or Claude agents.
|
||||
---
|
||||
|
||||
Use the core-agent MCP tools to execute this skill.
|
||||
Call the appropriate tool: agentic_dispatch
|
||||
7
.agents/skills/pipeline/SKILL.md
Normal file
7
.agents/skills/pipeline/SKILL.md
Normal file
|
|
@ -0,0 +1,7 @@
|
|||
---
|
||||
name: pipeline
|
||||
description: Run the review-fix-verify pipeline on code changes. Dispatches reviewer, then fixer, then verifier.
|
||||
---
|
||||
|
||||
Use the core-agent MCP tools to execute this skill.
|
||||
Call the appropriate tool: agentic_dispatch reviewer → wait → agentic_dispatch fixer → wait → verify
|
||||
7
.agents/skills/recall/SKILL.md
Normal file
7
.agents/skills/recall/SKILL.md
Normal file
|
|
@ -0,0 +1,7 @@
|
|||
---
|
||||
name: recall
|
||||
description: Search OpenBrain for memories and context. Use when you need prior session knowledge or architecture context.
|
||||
---
|
||||
|
||||
Use the core-agent MCP tools to execute this skill.
|
||||
Call the appropriate tool: brain_recall
|
||||
7
.agents/skills/remember/SKILL.md
Normal file
7
.agents/skills/remember/SKILL.md
Normal file
|
|
@ -0,0 +1,7 @@
|
|||
---
|
||||
name: remember
|
||||
description: Save a fact or decision to OpenBrain. Use to persist knowledge across sessions.
|
||||
---
|
||||
|
||||
Use the core-agent MCP tools to execute this skill.
|
||||
Call the appropriate tool: brain_remember
|
||||
7
.agents/skills/review/SKILL.md
Normal file
7
.agents/skills/review/SKILL.md
Normal file
|
|
@ -0,0 +1,7 @@
|
|||
---
|
||||
name: review
|
||||
description: Review completed agent workspace. Show output, git diff, and merge options. Use after an agent completes a task.
|
||||
---
|
||||
|
||||
Use the core-agent MCP tools to execute this skill.
|
||||
Call the appropriate tool: agentic_status + read agent log + git diff
|
||||
7
.agents/skills/scan/SKILL.md
Normal file
7
.agents/skills/scan/SKILL.md
Normal file
|
|
@ -0,0 +1,7 @@
|
|||
---
|
||||
name: scan
|
||||
description: Scan Forge repos for open issues with actionable labels. Use to find work to dispatch.
|
||||
---
|
||||
|
||||
Use the core-agent MCP tools to execute this skill.
|
||||
Call the appropriate tool: agentic_scan
|
||||
7
.agents/skills/status/SKILL.md
Normal file
7
.agents/skills/status/SKILL.md
Normal file
|
|
@ -0,0 +1,7 @@
|
|||
---
|
||||
name: status
|
||||
description: Show status of all agent workspaces (running, completed, blocked, failed). Use to check pipeline progress.
|
||||
---
|
||||
|
||||
Use the core-agent MCP tools to execute this skill.
|
||||
Call the appropriate tool: agentic_status
|
||||
7
.agents/skills/sweep/SKILL.md
Normal file
7
.agents/skills/sweep/SKILL.md
Normal file
|
|
@ -0,0 +1,7 @@
|
|||
---
|
||||
name: sweep
|
||||
description: Batch audit across all repos using agent dispatch. Use for ecosystem-wide convention checks.
|
||||
---
|
||||
|
||||
Use the core-agent MCP tools to execute this skill.
|
||||
Call the appropriate tool: agentic_dispatch in a loop across repos
|
||||
|
|
@ -1,28 +1,22 @@
|
|||
{
|
||||
"name": "core-agent",
|
||||
"description": "Host UK Claude Code plugin collection",
|
||||
"name": "dappcore-agent",
|
||||
"description": "Agentic systems to work on the Lethean Network's dAppCore project",
|
||||
"owner": {
|
||||
"name": "Host UK",
|
||||
"email": "hello@host.uk.com"
|
||||
"name": "Lethean Community",
|
||||
"email": "hello@lethean.io"
|
||||
},
|
||||
"plugins": [
|
||||
{
|
||||
"name": "code",
|
||||
"source": "./claude/code",
|
||||
"description": "Core development hooks, auto-approve workflow, and research data collection",
|
||||
"version": "0.2.0"
|
||||
"name": "core",
|
||||
"source": "./claude/core",
|
||||
"description": "CoreAgent platform — dispatch, review, messaging, OpenBrain",
|
||||
"version": "0.14.0"
|
||||
},
|
||||
{
|
||||
"name": "review",
|
||||
"source": "./claude/review",
|
||||
"description": "Code review automation - PR review, security checks",
|
||||
"version": "0.2.0"
|
||||
},
|
||||
{
|
||||
"name": "verify",
|
||||
"source": "./claude/verify",
|
||||
"description": "Work verification - ensure tests pass, no debug statements",
|
||||
"version": "0.1.0"
|
||||
"name": "core-research",
|
||||
"source": "./claude/research",
|
||||
"description": "Blockchain archaeology, whitepaper archival, community history, market data collection",
|
||||
"version": "0.3.0"
|
||||
},
|
||||
{
|
||||
"name": "core-php",
|
||||
|
|
@ -34,7 +28,7 @@
|
|||
"version": "0.1.0"
|
||||
},
|
||||
{
|
||||
"name": "go-build",
|
||||
"name": "core-build",
|
||||
"source": {
|
||||
"source": "url",
|
||||
"url": "https://forge.lthn.ai/core/go-build.git"
|
||||
|
|
@ -43,13 +37,19 @@
|
|||
"version": "0.1.0"
|
||||
},
|
||||
{
|
||||
"name": "devops",
|
||||
"name": "core-devops",
|
||||
"source": {
|
||||
"source": "url",
|
||||
"url": "https://forge.lthn.ai/core/go-devops.git"
|
||||
},
|
||||
"description": "CI/CD, deployment, issue tracking, and Coolify integration",
|
||||
"version": "0.1.0"
|
||||
},
|
||||
{
|
||||
"name": "devops",
|
||||
"source": "./claude/devops",
|
||||
"description": "Agent workflow utilities — install binaries, merge workspaces, update deps, clean queues",
|
||||
"version": "0.1.0"
|
||||
}
|
||||
]
|
||||
}
|
||||
|
|
|
|||
25
.codex/agents/fixer.toml
Normal file
25
.codex/agents/fixer.toml
Normal file
|
|
@ -0,0 +1,25 @@
|
|||
# Review Findings Fixer
|
||||
# Implements fixes from reviewer findings
|
||||
|
||||
name = "fixer"
|
||||
description = "Fix code review findings. Takes a list of findings with file:line references and implements the fixes. Creates EXCEPTIONS.md for items that cannot be fixed."
|
||||
developer_instructions = """
|
||||
You are the Review Findings Fixer for the Core ecosystem.
|
||||
|
||||
You receive a list of findings from the reviewer agent.
|
||||
For each finding:
|
||||
1. Read the file at the specified line
|
||||
2. Implement the fix following Core conventions
|
||||
3. If a fix is impossible (e.g. circular import), add to EXCEPTIONS.md with reason
|
||||
|
||||
After fixing:
|
||||
- Run go build ./... to verify
|
||||
- Run go vet ./... to verify
|
||||
- Run go test ./... if tests exist
|
||||
|
||||
Commit message format: fix(pkg): description of fixes
|
||||
|
||||
Do not add features. Do not refactor beyond the finding. Minimal changes only.
|
||||
"""
|
||||
model = "gpt-5.4"
|
||||
sandbox_mode = "workspace-write"
|
||||
32
.codex/agents/migrator.toml
Normal file
32
.codex/agents/migrator.toml
Normal file
|
|
@ -0,0 +1,32 @@
|
|||
# Core Primitives Migrator
|
||||
# Migrates packages from separate deps to Core built-ins
|
||||
|
||||
name = "migrator"
|
||||
description = "Migrate Go packages to use Core primitives instead of separate go-io/go-log/strings/fmt packages. Use when upgrading a package to the new Core API."
|
||||
developer_instructions = """
|
||||
You are the Core Primitives Migrator for the Core ecosystem.
|
||||
|
||||
Read .core/reference/RFC-025-AGENT-EXPERIENCE.md for the AX spec.
|
||||
Read .core/reference/*.go for the Core framework API.
|
||||
|
||||
Migration pattern:
|
||||
- coreio.Local.Read(path) → fs.Read(path) returning core.Result
|
||||
- coreio.Local.Write(path, s) → fs.Write(path, s) returning core.Result
|
||||
- coreio.Local.List(path) → fs.List(path) returning core.Result
|
||||
- coreio.Local.EnsureDir(path) → fs.EnsureDir(path) returning core.Result
|
||||
- coreio.Local.IsFile(path) → fs.IsFile(path) returning bool
|
||||
- coreio.Local.Delete(path) → fs.Delete(path) returning core.Result
|
||||
- coreerr.E("op", "msg", err) → core.E("op", "msg", err)
|
||||
- log.Error/Info/Warn → core.Error/Info/Warn
|
||||
- strings.Contains → core.Contains
|
||||
- strings.Split → core.Split
|
||||
- strings.TrimSpace → core.Trim
|
||||
- strings.HasPrefix → core.HasPrefix
|
||||
- fmt.Sprintf → core.Sprintf
|
||||
- embed.FS → core.Mount() + core.Embed
|
||||
|
||||
Add AX usage-example comments to all public types and functions.
|
||||
Build must pass after migration.
|
||||
"""
|
||||
model = "gpt-5.4"
|
||||
sandbox_mode = "workspace-write"
|
||||
28
.codex/agents/reviewer.toml
Normal file
28
.codex/agents/reviewer.toml
Normal file
|
|
@ -0,0 +1,28 @@
|
|||
# AX Convention Reviewer
|
||||
# Audits code against RFC-025 Agent Experience spec
|
||||
|
||||
name = "reviewer"
|
||||
description = "Audit Go code against AX conventions (RFC-025). Use for code review, convention checking, and quality assessment. Read-only — never modifies code."
|
||||
developer_instructions = """
|
||||
You are the AX Convention Reviewer for the Core ecosystem.
|
||||
|
||||
Read .core/reference/RFC-025-AGENT-EXPERIENCE.md for the full spec.
|
||||
Read .core/reference/*.go for the Core framework API.
|
||||
|
||||
Audit all Go files against these conventions:
|
||||
1. Predictable names — no abbreviations (Cfg→Config, Srv→Service)
|
||||
2. Comments as usage examples — show HOW with real values
|
||||
3. Result pattern — core.Result not (value, error)
|
||||
4. Error handling — core.E("op", "msg", err) not fmt.Errorf
|
||||
5. Core string ops — core.Contains/Split/Trim not strings.*
|
||||
6. Core logging — core.Error/Info/Warn not log.*
|
||||
7. Core filesystem — core.Fs{} not os.ReadFile
|
||||
8. UK English — initialise not initialize
|
||||
9. Import aliasing — stdlib io as goio
|
||||
10. Compile-time assertions — var _ Interface = (*Impl)(nil)
|
||||
|
||||
Report findings with severity (critical/high/medium/low) and file:line.
|
||||
Group by package. Do NOT fix — report only.
|
||||
"""
|
||||
model = "gpt-5.4"
|
||||
sandbox_mode = "read-only"
|
||||
69
.codex/config.toml
Normal file
69
.codex/config.toml
Normal file
|
|
@ -0,0 +1,69 @@
|
|||
# Core Agent — Codex Configuration
|
||||
# Shared between CLI and IDE extension
|
||||
|
||||
model = "gpt-5.4"
|
||||
model_reasoning_effort = "extra-high"
|
||||
approval_policy = "on-request"
|
||||
sandbox_mode = "workspace-write"
|
||||
personality = "pragmatic"
|
||||
|
||||
# Default to LEM when available
|
||||
# oss_provider = "ollama"
|
||||
|
||||
[profiles.review]
|
||||
model = "gpt-5.4"
|
||||
model_reasoning_effort = "extra-high"
|
||||
approval_policy = "never"
|
||||
sandbox_mode = "read-only"
|
||||
|
||||
[profiles.quick]
|
||||
model = "gpt-5.4"
|
||||
model_reasoning_effort = "low"
|
||||
approval_policy = "never"
|
||||
|
||||
[profiles.implement]
|
||||
model = "gpt-5.4"
|
||||
model_reasoning_effort = "high"
|
||||
approval_policy = "never"
|
||||
sandbox_mode = "workspace-write"
|
||||
|
||||
[profiles.lem]
|
||||
model = "lem-4b"
|
||||
model_provider = "ollama"
|
||||
model_reasoning_effort = "high"
|
||||
approval_policy = "never"
|
||||
sandbox_mode = "workspace-write"
|
||||
|
||||
# Core Agent MCP Server
|
||||
[mcp_servers.core-agent]
|
||||
command = "core-agent"
|
||||
args = ["mcp"]
|
||||
required = true
|
||||
startup_timeout_sec = 15
|
||||
tool_timeout_sec = 120
|
||||
|
||||
[mcp_servers.core-agent.env]
|
||||
FORGE_TOKEN = "${FORGE_TOKEN}"
|
||||
CORE_BRAIN_KEY = "${CORE_BRAIN_KEY}"
|
||||
MONITOR_INTERVAL = "15s"
|
||||
|
||||
# Local model providers
|
||||
[model_providers.ollama]
|
||||
name = "Ollama"
|
||||
base_url = "http://127.0.0.1:11434/v1"
|
||||
|
||||
[model_providers.lmstudio]
|
||||
name = "LM Studio"
|
||||
base_url = "http://127.0.0.1:1234/v1"
|
||||
|
||||
# Agent configuration
|
||||
[agents]
|
||||
max_threads = 4
|
||||
max_depth = 1
|
||||
job_max_runtime_seconds = 600
|
||||
|
||||
# Features
|
||||
[features]
|
||||
multi_agent = true
|
||||
shell_snapshot = true
|
||||
undo = true
|
||||
67
.codex/rules/core-agent.rules
Normal file
67
.codex/rules/core-agent.rules
Normal file
|
|
@ -0,0 +1,67 @@
|
|||
# Core Agent — Codex Rules
|
||||
# Controls which commands can run outside the sandbox
|
||||
|
||||
# Go toolchain — always safe
|
||||
prefix_rule(
|
||||
pattern = ["go", ["build", "test", "vet", "fmt", "mod", "get", "work"]],
|
||||
decision = "allow",
|
||||
justification = "Go development tools are safe read/build operations",
|
||||
match = [["go", "build", "./..."], ["go", "test", "./pkg/agentic"]],
|
||||
not_match = [["go", "run", "main.go"]],
|
||||
)
|
||||
|
||||
# Core agent binary
|
||||
prefix_rule(
|
||||
pattern = ["core-agent", ["mcp", "--version"]],
|
||||
decision = "allow",
|
||||
justification = "Core agent MCP server and version check",
|
||||
)
|
||||
|
||||
# Git read operations
|
||||
prefix_rule(
|
||||
pattern = ["git", ["status", "log", "diff", "branch", "tag", "remote", "fetch", "rev-parse", "ls-remote"]],
|
||||
decision = "allow",
|
||||
justification = "Read-only git operations are safe",
|
||||
)
|
||||
|
||||
# Git write — prompt for approval
|
||||
prefix_rule(
|
||||
pattern = ["git", ["add", "commit", "merge", "rebase", "stash"]],
|
||||
decision = "prompt",
|
||||
justification = "Git write operations need human approval",
|
||||
)
|
||||
|
||||
# Git push — forbidden (use PR workflow)
|
||||
prefix_rule(
|
||||
pattern = ["git", "push"],
|
||||
decision = "forbidden",
|
||||
justification = "Never push directly — use PR workflow via agentic_create_pr",
|
||||
)
|
||||
|
||||
# Git destructive — forbidden
|
||||
prefix_rule(
|
||||
pattern = ["git", ["reset", "clean"], "--force"],
|
||||
decision = "forbidden",
|
||||
justification = "Destructive git operations are never allowed",
|
||||
)
|
||||
|
||||
# Curl — prompt (network access)
|
||||
prefix_rule(
|
||||
pattern = ["curl"],
|
||||
decision = "prompt",
|
||||
justification = "Network requests need approval",
|
||||
)
|
||||
|
||||
# SSH — forbidden
|
||||
prefix_rule(
|
||||
pattern = ["ssh"],
|
||||
decision = "forbidden",
|
||||
justification = "Direct SSH is forbidden — use Ansible via deployment skills",
|
||||
)
|
||||
|
||||
# rm -rf — forbidden
|
||||
prefix_rule(
|
||||
pattern = ["rm", "-rf"],
|
||||
decision = "forbidden",
|
||||
justification = "Recursive force delete is never allowed",
|
||||
)
|
||||
588
.core/reference/RFC-025-AGENT-EXPERIENCE.md
Normal file
588
.core/reference/RFC-025-AGENT-EXPERIENCE.md
Normal file
|
|
@ -0,0 +1,588 @@
|
|||
# RFC-025: Agent Experience (AX) Design Principles
|
||||
|
||||
- **Status:** Active
|
||||
- **Authors:** Snider, Cladius
|
||||
- **Date:** 2026-03-25
|
||||
- **Applies to:** All Core ecosystem packages (CoreGO, CorePHP, CoreTS, core-agent)
|
||||
|
||||
## Abstract
|
||||
|
||||
Agent Experience (AX) is a design paradigm for software systems where the primary code consumer is an AI agent, not a human developer. AX sits alongside User Experience (UX) and Developer Experience (DX) as the third era of interface design.
|
||||
|
||||
This RFC establishes AX as a formal design principle for the Core ecosystem and defines the conventions that follow from it.
|
||||
|
||||
## Motivation
|
||||
|
||||
As of early 2026, AI agents write, review, and maintain the majority of code in the Core ecosystem. The original author has not manually edited code (outside of Core struct design) since October 2025. Code is processed semantically — agents reason about intent, not characters.
|
||||
|
||||
Design patterns inherited from the human-developer era optimise for the wrong consumer:
|
||||
|
||||
- **Short names** save keystrokes but increase semantic ambiguity
|
||||
- **Functional option chains** are fluent for humans but opaque for agents tracing configuration
|
||||
- **Error-at-every-call-site** produces 50% boilerplate that obscures intent
|
||||
- **Generic type parameters** force agents to carry type context that the runtime already has
|
||||
- **Panic-hiding conventions** (`Must*`) create implicit control flow that agents must special-case
|
||||
- **Raw exec.Command** bypasses Core primitives — untestable, no entitlement check, path traversal risk
|
||||
|
||||
AX acknowledges this shift and provides principles for designing code, APIs, file structures, and conventions that serve AI agents as first-class consumers.
|
||||
|
||||
## The Three Eras
|
||||
|
||||
| Era | Primary Consumer | Optimises For | Key Metric |
|
||||
|-----|-----------------|---------------|------------|
|
||||
| UX | End users | Discoverability, forgiveness, visual clarity | Task completion time |
|
||||
| DX | Developers | Typing speed, IDE support, convention familiarity | Time to first commit |
|
||||
| AX | AI agents | Predictability, composability, semantic navigation | Correct-on-first-pass rate |
|
||||
|
||||
AX does not replace UX or DX. End users still need good UX. Developers still need good DX. But when the primary code author and maintainer is an AI agent, the codebase should be designed for that consumer first.
|
||||
|
||||
## Principles
|
||||
|
||||
### 1. Predictable Names Over Short Names
|
||||
|
||||
Names are tokens that agents pattern-match across languages and contexts. Abbreviations introduce mapping overhead.
|
||||
|
||||
```
|
||||
Config not Cfg
|
||||
Service not Srv
|
||||
Embed not Emb
|
||||
Error not Err (as a subsystem name; err for local variables is fine)
|
||||
Options not Opts
|
||||
```
|
||||
|
||||
**Rule:** If a name would require a comment to explain, it is too short.
|
||||
|
||||
**Exception:** Industry-standard abbreviations that are universally understood (`HTTP`, `URL`, `ID`, `IPC`, `I18n`) are acceptable. The test: would an agent trained on any mainstream language recognise it without context?
|
||||
|
||||
### 2. Comments as Usage Examples
|
||||
|
||||
The function signature tells WHAT. The comment shows HOW with real values.
|
||||
|
||||
```go
|
||||
// Entitled checks if an action is permitted.
|
||||
//
|
||||
// e := c.Entitled("process.run")
|
||||
// e := c.Entitled("social.accounts", 3)
|
||||
// if e.Allowed { proceed() }
|
||||
|
||||
// WriteAtomic writes via temp file then rename (safe for concurrent readers).
|
||||
//
|
||||
// r := fs.WriteAtomic("/status.json", data)
|
||||
|
||||
// Action registers or invokes a named callable.
|
||||
//
|
||||
// c.Action("git.log", handler) // register
|
||||
// c.Action("git.log").Run(ctx, opts) // invoke
|
||||
```
|
||||
|
||||
**Rule:** If a comment restates what the type signature already says, delete it. If a comment shows a concrete usage with realistic values, keep it.
|
||||
|
||||
**Rationale:** Agents learn from examples more effectively than from descriptions. A comment like "Run executes the setup process" adds zero information. A comment like `setup.Run(setup.Options{Path: ".", Template: "auto"})` teaches an agent exactly how to call the function.
|
||||
|
||||
### 3. Path Is Documentation
|
||||
|
||||
File and directory paths should be self-describing. An agent navigating the filesystem should understand what it is looking at without reading a README.
|
||||
|
||||
```
|
||||
pkg/agentic/dispatch.go — agent dispatch logic
|
||||
pkg/agentic/handlers.go — IPC event handlers
|
||||
pkg/lib/task/bug-fix.yaml — bug fix plan template
|
||||
pkg/lib/persona/engineering/ — engineering personas
|
||||
flow/deploy/to/homelab.yaml — deploy TO the homelab
|
||||
template/dir/workspace/default/ — default workspace scaffold
|
||||
docs/RFC.md — authoritative API contract
|
||||
```
|
||||
|
||||
**Rule:** If an agent needs to read a file to understand what a directory contains, the directory naming has failed.
|
||||
|
||||
**Corollary:** The unified path convention (folder structure = HTTP route = CLI command = test path) is AX-native. One path, every surface.
|
||||
|
||||
### 4. Templates Over Freeform
|
||||
|
||||
When an agent generates code from a template, the output is constrained to known-good shapes. When an agent writes freeform, the output varies.
|
||||
|
||||
```go
|
||||
// Template-driven — consistent output
|
||||
lib.ExtractWorkspace("default", targetDir, &lib.WorkspaceData{
|
||||
Repo: "go-io", Branch: "dev", Task: "fix tests", Agent: "codex",
|
||||
})
|
||||
|
||||
// Freeform — variance in output
|
||||
"write a workspace setup script that..."
|
||||
```
|
||||
|
||||
**Rule:** For any code pattern that recurs, provide a template. Templates are guardrails for agents.
|
||||
|
||||
**Scope:** Templates apply to file generation, workspace scaffolding, config generation, and commit messages. They do NOT apply to novel logic — agents should write business logic freeform with the domain knowledge available.
|
||||
|
||||
### 5. Declarative Over Imperative
|
||||
|
||||
Agents reason better about declarations of intent than sequences of operations.
|
||||
|
||||
```yaml
|
||||
# Declarative — agent sees what should happen
|
||||
steps:
|
||||
- name: build
|
||||
flow: tools/docker-build
|
||||
with:
|
||||
context: "{{ .app_dir }}"
|
||||
image_name: "{{ .image_name }}"
|
||||
|
||||
- name: deploy
|
||||
flow: deploy/with/docker
|
||||
with:
|
||||
host: "{{ .host }}"
|
||||
```
|
||||
|
||||
```go
|
||||
// Imperative — agent must trace execution
|
||||
cmd := exec.Command("docker", "build", "--platform", "linux/amd64", "-t", imageName, ".")
|
||||
cmd.Dir = appDir
|
||||
if err := cmd.Run(); err != nil {
|
||||
return core.E("build", "docker build failed", err)
|
||||
}
|
||||
```
|
||||
|
||||
**Rule:** Orchestration, configuration, and pipeline logic should be declarative (YAML/JSON). Implementation logic should be imperative (Go/PHP/TS). The boundary is: if an agent needs to compose or modify the logic, make it declarative.
|
||||
|
||||
Core's `Task` is the Go-native declarative equivalent — a sequence of named Action steps:
|
||||
|
||||
```go
|
||||
c.Task("deploy", core.Task{
|
||||
Steps: []core.Step{
|
||||
{Action: "docker.build"},
|
||||
{Action: "docker.push"},
|
||||
{Action: "deploy.ansible", Async: true},
|
||||
},
|
||||
})
|
||||
```
|
||||
|
||||
### 6. Core Primitives — Universal Types and DI
|
||||
|
||||
Every component in the ecosystem registers with Core and communicates through Core's primitives. An agent processing any level of the tree sees identical shapes.
|
||||
|
||||
#### Creating Core
|
||||
|
||||
```go
|
||||
c := core.New(
|
||||
core.WithOption("name", "core-agent"),
|
||||
core.WithService(process.Register),
|
||||
core.WithService(agentic.Register),
|
||||
core.WithService(monitor.Register),
|
||||
core.WithService(brain.Register),
|
||||
core.WithService(mcp.Register),
|
||||
)
|
||||
c.Run() // or: if err := c.RunE(); err != nil { ... }
|
||||
```
|
||||
|
||||
`core.New()` returns `*Core`. `WithService` registers a factory `func(*Core) Result`. Services auto-discover: name from package path, lifecycle from `Startable`/`Stoppable` (return `Result`). `HandleIPCEvents` is the one remaining magic method — auto-registered via reflection if the service implements it.
|
||||
|
||||
#### Service Registration Pattern
|
||||
|
||||
```go
|
||||
// Service factory — receives Core, returns Result
|
||||
func Register(c *core.Core) core.Result {
|
||||
svc := &MyService{
|
||||
ServiceRuntime: core.NewServiceRuntime(c, MyOptions{}),
|
||||
}
|
||||
return core.Result{Value: svc, OK: true}
|
||||
}
|
||||
```
|
||||
|
||||
#### Core Subsystem Accessors
|
||||
|
||||
| Accessor | Purpose |
|
||||
|----------|---------|
|
||||
| `c.Options()` | Input configuration |
|
||||
| `c.App()` | Application metadata (name, version) |
|
||||
| `c.Config()` | Runtime settings, feature flags |
|
||||
| `c.Data()` | Embedded assets (Registry[*Embed]) |
|
||||
| `c.Drive()` | Transport handles (Registry[*DriveHandle]) |
|
||||
| `c.Fs()` | Filesystem I/O (sandboxable) |
|
||||
| `c.Process()` | Managed execution (Action sugar) |
|
||||
| `c.API()` | Remote streams (protocol handlers) |
|
||||
| `c.Action(name)` | Named callable (register/invoke) |
|
||||
| `c.Task(name)` | Composed Action sequence |
|
||||
| `c.Entitled(name)` | Permission check |
|
||||
| `c.RegistryOf(n)` | Cross-cutting registry queries |
|
||||
| `c.Cli()` | CLI command framework |
|
||||
| `c.IPC()` | Message bus (ACTION, QUERY) |
|
||||
| `c.Log()` | Structured logging |
|
||||
| `c.Error()` | Panic recovery |
|
||||
| `c.I18n()` | Internationalisation |
|
||||
|
||||
#### Primitive Types
|
||||
|
||||
```go
|
||||
// Option — the atom
|
||||
core.Option{Key: "name", Value: "brain"}
|
||||
|
||||
// Options — universal input
|
||||
opts := core.NewOptions(
|
||||
core.Option{Key: "name", Value: "myapp"},
|
||||
core.Option{Key: "port", Value: 8080},
|
||||
)
|
||||
opts.String("name") // "myapp"
|
||||
opts.Int("port") // 8080
|
||||
|
||||
// Result — universal output
|
||||
core.Result{Value: svc, OK: true}
|
||||
```
|
||||
|
||||
#### Named Actions — The Primary Communication Pattern
|
||||
|
||||
Services register capabilities as named Actions. No direct function calls, no untyped dispatch — declare intent by name, invoke by name.
|
||||
|
||||
```go
|
||||
// Register a capability during OnStartup
|
||||
c.Action("workspace.create", func(ctx context.Context, opts core.Options) core.Result {
|
||||
name := opts.String("name")
|
||||
path := core.JoinPath("/srv/workspaces", name)
|
||||
return core.Result{Value: path, OK: true}
|
||||
})
|
||||
|
||||
// Invoke by name — typed, inspectable, entitlement-checked
|
||||
r := c.Action("workspace.create").Run(ctx, core.NewOptions(
|
||||
core.Option{Key: "name", Value: "alpha"},
|
||||
))
|
||||
|
||||
// Check capability before calling
|
||||
if c.Action("process.run").Exists() { /* go-process is registered */ }
|
||||
|
||||
// List all capabilities
|
||||
c.Actions() // ["workspace.create", "process.run", "brain.recall", ...]
|
||||
```
|
||||
|
||||
#### Task Composition — Sequencing Actions
|
||||
|
||||
```go
|
||||
c.Task("agent.completion", core.Task{
|
||||
Steps: []core.Step{
|
||||
{Action: "agentic.qa"},
|
||||
{Action: "agentic.auto-pr"},
|
||||
{Action: "agentic.verify"},
|
||||
{Action: "agentic.poke", Async: true}, // doesn't block
|
||||
},
|
||||
})
|
||||
```
|
||||
|
||||
#### Anonymous Broadcast — Legacy Layer
|
||||
|
||||
`ACTION` and `QUERY` remain for backwards-compatible anonymous dispatch. New code should prefer named Actions.
|
||||
|
||||
```go
|
||||
// Broadcast — all handlers fire, type-switch to filter
|
||||
c.ACTION(messages.DeployCompleted{Env: "production"})
|
||||
|
||||
// Query — first responder wins
|
||||
r := c.QUERY(countQuery{})
|
||||
```
|
||||
|
||||
#### Process Execution — Use Core Primitives
|
||||
|
||||
All external command execution MUST go through `c.Process()`, not raw `os/exec`. This makes process execution testable, gatable by entitlements, and managed by Core's lifecycle.
|
||||
|
||||
```go
|
||||
// AX-native: Core Process primitive
|
||||
r := c.Process().RunIn(ctx, repoDir, "git", "log", "--oneline", "-20")
|
||||
if r.OK { output := r.Value.(string) }
|
||||
|
||||
// Not AX: raw exec.Command — untestable, no entitlement, no lifecycle
|
||||
cmd := exec.Command("git", "log", "--oneline", "-20")
|
||||
cmd.Dir = repoDir
|
||||
out, err := cmd.Output()
|
||||
```
|
||||
|
||||
**Rule:** If a package imports `os/exec`, it is bypassing Core's process primitive. The only package that should import `os/exec` is `go-process` itself.
|
||||
|
||||
**Quality gate:** An agent reviewing a diff can mechanically check: does this import `os/exec`, `unsafe`, or `encoding/json` directly? If so, it bypassed a Core primitive.
|
||||
|
||||
#### What This Replaces
|
||||
|
||||
| Go Convention | Core AX | Why |
|
||||
|--------------|---------|-----|
|
||||
| `func With*(v) Option` | `core.WithOption(k, v)` | Named key-value is greppable; option chains require tracing |
|
||||
| `func Must*(v) T` | `core.Result` | No hidden panics; errors flow through Result.OK |
|
||||
| `func *For[T](c) T` | `c.Service("name")` | String lookup is greppable; generics require type context |
|
||||
| `val, err :=` everywhere | Single return via `core.Result` | Intent not obscured by error handling |
|
||||
| `exec.Command(...)` | `c.Process().Run(ctx, cmd, args...)` | Testable, gatable, lifecycle-managed |
|
||||
| `map[string]*T + mutex` | `core.Registry[T]` | Thread-safe, ordered, lockable, queryable |
|
||||
| untyped `any` dispatch | `c.Action("name").Run(ctx, opts)` | Named, typed, inspectable, entitlement-checked |
|
||||
|
||||
### 7. Tests as Behavioural Specification
|
||||
|
||||
Test names are structured data. An agent querying "what happens when dispatch fails?" should find the answer by scanning test names, not reading prose.
|
||||
|
||||
```
|
||||
TestDispatch_DetectFinalStatus_Good — clean exit → completed
|
||||
TestDispatch_DetectFinalStatus_Bad — non-zero exit → failed
|
||||
TestDispatch_DetectFinalStatus_Ugly — BLOCKED.md overrides exit code
|
||||
```
|
||||
|
||||
**Convention:** `Test{File}_{Function}_{Good|Bad|Ugly}`
|
||||
|
||||
| Category | Purpose |
|
||||
|----------|---------|
|
||||
| `_Good` | Happy path — proves the contract works |
|
||||
| `_Bad` | Expected errors — proves error handling works |
|
||||
| `_Ugly` | Edge cases, panics, corruption — proves it doesn't blow up |
|
||||
|
||||
**Rule:** Every testable function gets all three categories. Missing categories are gaps in the specification, detectable by scanning:
|
||||
|
||||
```bash
|
||||
# Find under-tested functions
|
||||
for f in *.go; do
|
||||
[[ "$f" == *_test.go ]] && continue
|
||||
while IFS= read -r line; do
|
||||
fn=$(echo "$line" | sed 's/func.*) //; s/(.*//; s/ .*//')
|
||||
[[ -z "$fn" || "$fn" == register* ]] && continue
|
||||
cap="${fn^}"
|
||||
grep -q "_${cap}_Good\|_${fn}_Good" *_test.go || echo "$f: $fn missing Good"
|
||||
grep -q "_${cap}_Bad\|_${fn}_Bad" *_test.go || echo "$f: $fn missing Bad"
|
||||
grep -q "_${cap}_Ugly\|_${fn}_Ugly" *_test.go || echo "$f: $fn missing Ugly"
|
||||
done < <(grep "^func " "$f")
|
||||
done
|
||||
```
|
||||
|
||||
**Rationale:** The test suite IS the behavioural spec. `grep _TrackFailureRate_ *_test.go` returns three concrete scenarios — no prose needed. The naming convention makes the entire test suite machine-queryable. An agent dispatched to fix a function can read its tests to understand the full contract before making changes.
|
||||
|
||||
**What this replaces:**
|
||||
|
||||
| Convention | AX Test Naming | Why |
|
||||
|-----------|---------------|-----|
|
||||
| `TestFoo_works` | `TestFile_Foo_Good` | File prefix enables cross-file search |
|
||||
| Unnamed table tests | Explicit Good/Bad/Ugly | Categories are scannable without reading test body |
|
||||
| Coverage % as metric | Missing categories as metric | 100% coverage with only Good tests is a false signal |
|
||||
|
||||
### 7b. Example Tests as AX TDD
|
||||
|
||||
Go `Example` functions serve triple duty: they run as tests (count toward coverage), show in godoc (usage documentation), and seed user guide generation.
|
||||
|
||||
```go
|
||||
// file: action_example_test.go
|
||||
|
||||
func ExampleAction_Run() {
|
||||
c := New()
|
||||
c.Action("double", func(_ context.Context, opts Options) Result {
|
||||
return Result{Value: opts.Int("n") * 2, OK: true}
|
||||
})
|
||||
|
||||
r := c.Action("double").Run(context.Background(), NewOptions(
|
||||
Option{Key: "n", Value: 21},
|
||||
))
|
||||
Println(r.Value)
|
||||
// Output: 42
|
||||
}
|
||||
```
|
||||
|
||||
**AX TDD pattern:** Write the Example first — it defines how the API should feel. If the Example is awkward, the API is wrong. The Example IS the test, the documentation, and the design feedback loop.
|
||||
|
||||
**Convention:** One `{source}_example_test.go` per source file. Every exported function should have at least one Example. The Example output comment makes it a verified test.
|
||||
|
||||
**Quality gate:** A source file without a corresponding example file is missing documentation that compiles.
|
||||
|
||||
### Operational Principles
|
||||
|
||||
Principles 1-7 govern code design. Principles 8-10 govern how agents and humans work with the codebase.
|
||||
|
||||
### 8. RFC as Domain Load
|
||||
|
||||
An agent's first action in a session should be loading the repo's RFC.md. The full spec in context produces zero-correction sessions — every decision aligns with the design because the design is loaded.
|
||||
|
||||
**Validated:** Loading core/go's RFC.md (42k tokens from a 500k token discovery session) at session start eliminated all course corrections. The spec is compressed domain knowledge that survives context compaction.
|
||||
|
||||
**Rule:** Every repo that has non-trivial architecture should have a `docs/RFC.md`. The RFC is not documentation for humans — it's a context document for agents. It should be loadable in one read and contain everything needed to make correct decisions.
|
||||
|
||||
### 9. Primitives as Quality Gates
|
||||
|
||||
Core primitives become mechanical code review rules. An agent reviewing a diff checks:
|
||||
|
||||
| Import | Violation | Use Instead |
|
||||
|--------|-----------|-------------|
|
||||
| `os` | Bypasses Fs/Env primitives | `c.Fs()`, `core.Env()`, `core.DirFS()`, `Fs.TempDir()` |
|
||||
| `os/exec` | Bypasses Process primitive | `c.Process().Run()` |
|
||||
| `io` | Bypasses stream primitives | `core.ReadAll()`, `core.WriteAll()`, `core.CloseStream()` |
|
||||
| `fmt` | Bypasses string/print primitives | `core.Println()`, `core.Sprintf()`, `core.Sprint()` |
|
||||
| `errors` | Bypasses error primitive | `core.NewError()`, `core.E()`, `core.Is()`, `core.As()` |
|
||||
| `log` | Bypasses logging | `core.Info()`, `core.Warn()`, `core.Error()`, `c.Log()` |
|
||||
| `encoding/json` | Bypasses Core serialisation | `core.JSONMarshal()`, `core.JSONUnmarshal()` |
|
||||
| `path/filepath` | Bypasses path security boundary | `core.Path()`, `core.JoinPath()`, `core.PathBase()` |
|
||||
| `unsafe` | Bypasses Fs sandbox | `Fs.NewUnrestricted()` |
|
||||
| `strings` | Bypasses string guardrails | `core.Contains()`, `core.Split()`, `core.Trim()`, etc. |
|
||||
|
||||
**Rule:** If a diff introduces a disallowed import, it failed code review. The import list IS the quality gate. No subjective judgement needed — a weaker model can enforce this mechanically.
|
||||
|
||||
### 10. Registration IS Capability, Entitlement IS Permission
|
||||
|
||||
Two layers of permission, both declarative:
|
||||
|
||||
```
|
||||
Registration = "this action EXISTS" → c.Action("process.run").Exists()
|
||||
Entitlement = "this Core is ALLOWED" → c.Entitled("process.run").Allowed
|
||||
```
|
||||
|
||||
A sandboxed Core has no `process.run` registered — the action doesn't exist. A SaaS Core has it registered but entitlement-gated — the action exists but the workspace may not be allowed to use it.
|
||||
|
||||
**Rule:** Never check permissions with `if` statements in business logic. Register capabilities as Actions. Gate them with Entitlements. The framework enforces both — `Action.Run()` checks both before executing.
|
||||
|
||||
## Applying AX to Existing Patterns
|
||||
|
||||
### File Structure
|
||||
|
||||
```
|
||||
# AX-native: path describes content
|
||||
core/agent/
|
||||
├── cmd/core-agent/ # CLI entry point (minimal — just core.New + Run)
|
||||
├── pkg/agentic/ # Agent orchestration (dispatch, prep, verify, scan)
|
||||
├── pkg/brain/ # OpenBrain integration
|
||||
├── pkg/lib/ # Embedded templates, personas, flows
|
||||
├── pkg/messages/ # Typed IPC message definitions
|
||||
├── pkg/monitor/ # Agent monitoring + notifications
|
||||
├── pkg/setup/ # Workspace scaffolding + detection
|
||||
└── claude/ # Claude Code plugin definitions
|
||||
|
||||
# Not AX: generic names requiring README
|
||||
src/
|
||||
├── lib/
|
||||
├── utils/
|
||||
└── helpers/
|
||||
```
|
||||
|
||||
### Error Handling
|
||||
|
||||
```go
|
||||
// AX-native: errors flow through Result, not call sites
|
||||
func Register(c *core.Core) core.Result {
|
||||
svc := &MyService{ServiceRuntime: core.NewServiceRuntime(c, MyOpts{})}
|
||||
return core.Result{Value: svc, OK: true}
|
||||
}
|
||||
|
||||
// Not AX: errors dominate the code
|
||||
func Register(c *core.Core) (*MyService, error) {
|
||||
svc, err := NewMyService(c)
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("create service: %w", err)
|
||||
}
|
||||
return svc, nil
|
||||
}
|
||||
```
|
||||
|
||||
### Command Registration
|
||||
|
||||
```go
|
||||
// AX-native: extracted methods, testable without CLI
|
||||
func (s *MyService) OnStartup(ctx context.Context) core.Result {
|
||||
c := s.Core()
|
||||
c.Command("issue/get", core.Command{Action: s.cmdIssueGet})
|
||||
c.Command("issue/list", core.Command{Action: s.cmdIssueList})
|
||||
c.Action("forge.issue.get", s.handleIssueGet)
|
||||
return core.Result{OK: true}
|
||||
}
|
||||
|
||||
func (s *MyService) cmdIssueGet(opts core.Options) core.Result {
|
||||
// testable business logic — no closure, no CLI dependency
|
||||
}
|
||||
|
||||
// Not AX: closures that can only be tested via CLI integration
|
||||
c.Command("issue/get", core.Command{
|
||||
Action: func(opts core.Options) core.Result {
|
||||
// 50 lines of untestable inline logic
|
||||
},
|
||||
})
|
||||
```
|
||||
|
||||
### Process Execution
|
||||
|
||||
```go
|
||||
// AX-native: Core Process primitive, testable with mock handler
|
||||
func (s *MyService) getGitLog(repoPath string) string {
|
||||
r := s.Core().Process().RunIn(context.Background(), repoPath, "git", "log", "--oneline", "-20")
|
||||
if !r.OK { return "" }
|
||||
return core.Trim(r.Value.(string))
|
||||
}
|
||||
|
||||
// Not AX: raw exec.Command — untestable, no entitlement check, path traversal risk
|
||||
func (s *MyService) getGitLog(repoPath string) string {
|
||||
cmd := exec.Command("git", "log", "--oneline", "-20")
|
||||
cmd.Dir = repoPath // user-controlled path goes directly to OS
|
||||
output, err := cmd.Output()
|
||||
if err != nil { return "" }
|
||||
return strings.TrimSpace(string(output))
|
||||
}
|
||||
```
|
||||
|
||||
The AX-native version routes through `c.Process()` → named Action → entitlement check. The non-AX version passes user input directly to `os/exec` with no permission gate.
|
||||
|
||||
### Permission Gating
|
||||
|
||||
```go
|
||||
// AX-native: entitlement checked by framework, not by business logic
|
||||
c.Action("agentic.dispatch", func(ctx context.Context, opts core.Options) core.Result {
|
||||
// Action.Run() already checked c.Entitled("agentic.dispatch")
|
||||
// If we're here, we're allowed. Just do the work.
|
||||
return dispatch(ctx, opts)
|
||||
})
|
||||
|
||||
// Not AX: permission logic scattered through business code
|
||||
func handleDispatch(ctx context.Context, opts core.Options) core.Result {
|
||||
if !isAdmin(ctx) && !hasPlan(ctx, "pro") {
|
||||
return core.Result{Value: core.E("dispatch", "upgrade required", nil), OK: false}
|
||||
}
|
||||
// duplicate permission check in every handler
|
||||
}
|
||||
```
|
||||
|
||||
## Compatibility
|
||||
|
||||
AX conventions are valid, idiomatic Go/PHP/TS. They do not require language extensions, code generation, or non-standard tooling. An AX-designed codebase compiles, tests, and deploys with standard toolchains.
|
||||
|
||||
The conventions diverge from community patterns (functional options, Must/For, etc.) but do not violate language specifications. This is a style choice, not a fork.
|
||||
|
||||
## Adoption
|
||||
|
||||
AX applies to all code in the Core ecosystem. core/go is fully migrated (v0.8.0). Consumer packages migrate via their RFCs.
|
||||
|
||||
Priority for migrating a package:
|
||||
1. **Lifecycle** — `OnStartup`/`OnShutdown` return `Result`
|
||||
2. **Actions** — register capabilities as named Actions
|
||||
3. **Imports** — replace all 10 disallowed imports (Principle 9)
|
||||
4. **String ops** — `+` concat → `Concat()`, `path +` → `Path()`
|
||||
5. **Test naming** — `TestFile_Function_{Good,Bad,Ugly}`
|
||||
6. **Examples** — one `{source}_example_test.go` per source file
|
||||
7. **Comments** — every exported function has usage example (Principle 2)
|
||||
|
||||
## Verification
|
||||
|
||||
An agent auditing AX compliance checks:
|
||||
|
||||
```bash
|
||||
# Disallowed imports (Principle 9)
|
||||
grep -rn '"os"\|"os/exec"\|"io"\|"fmt"\|"errors"\|"log"\|"encoding/json"\|"path/filepath"\|"unsafe"\|"strings"' *.go \
|
||||
| grep -v _test.go
|
||||
|
||||
# Test naming (Principle 7)
|
||||
grep "^func Test" *_test.go | grep -v "Test[A-Z][a-z]*_.*_\(Good\|Bad\|Ugly\)"
|
||||
|
||||
# String concat (should use Concat/Path)
|
||||
grep -n '" + \| + "' *.go | grep -v _test.go | grep -v "//"
|
||||
|
||||
# Untyped dispatch (should prefer named Actions)
|
||||
grep "RegisterTask\|PERFORM\|type Task any" *.go
|
||||
```
|
||||
|
||||
If any check produces output, the code needs migration.
|
||||
|
||||
## References
|
||||
|
||||
- `core/go/docs/RFC.md` — CoreGO API contract (21 sections, reference implementation)
|
||||
- `core/go-process/docs/RFC.md` — Process consumer spec
|
||||
- `core/agent/docs/RFC.md` — Agent consumer spec
|
||||
- RFC-004 (Entitlements) — permission model ported to `c.Entitled()`
|
||||
- RFC-021 (Core Platform Architecture) — 7-layer stack, provider model
|
||||
- dAppServer unified path convention (2024) — path = route = command = test
|
||||
- Go Proverbs, Rob Pike (2015) — AX provides an updated lens
|
||||
|
||||
## Changelog
|
||||
|
||||
- 2026-03-25: v0.8.0 alignment — all examples match implemented API. Added Principles 8 (RFC as Domain Load), 9 (Primitives as Quality Gates), 10 (Registration + Entitlement). Updated subsystem table (Process, API, Action, Task, Entitled, RegistryOf). Process examples use `c.Process()` not old `process.RunWithOptions`. Removed PERFORM references.
|
||||
- 2026-03-19: Initial draft — 7 principles
|
||||
93
.core/reference/app.go
Normal file
93
.core/reference/app.go
Normal file
|
|
@ -0,0 +1,93 @@
|
|||
// SPDX-License-Identifier: EUPL-1.2
|
||||
|
||||
// Application identity for the Core framework.
|
||||
|
||||
package core
|
||||
|
||||
import (
|
||||
"os"
|
||||
"path/filepath"
|
||||
)
|
||||
|
||||
// App holds the application identity and optional GUI runtime.
|
||||
//
|
||||
// app := core.App{}.New(core.NewOptions(
|
||||
// core.Option{Key: "name", Value: "Core CLI"},
|
||||
// core.Option{Key: "version", Value: "1.0.0"},
|
||||
// ))
|
||||
type App struct {
|
||||
Name string
|
||||
Version string
|
||||
Description string
|
||||
Filename string
|
||||
Path string
|
||||
Runtime any // GUI runtime (e.g., Wails App). Nil for CLI-only.
|
||||
}
|
||||
|
||||
// New creates an App from Options.
|
||||
//
|
||||
// app := core.App{}.New(core.NewOptions(
|
||||
// core.Option{Key: "name", Value: "myapp"},
|
||||
// core.Option{Key: "version", Value: "1.0.0"},
|
||||
// ))
|
||||
func (a App) New(opts Options) App {
|
||||
if name := opts.String("name"); name != "" {
|
||||
a.Name = name
|
||||
}
|
||||
if version := opts.String("version"); version != "" {
|
||||
a.Version = version
|
||||
}
|
||||
if desc := opts.String("description"); desc != "" {
|
||||
a.Description = desc
|
||||
}
|
||||
if filename := opts.String("filename"); filename != "" {
|
||||
a.Filename = filename
|
||||
}
|
||||
return a
|
||||
}
|
||||
|
||||
// Find locates a program on PATH and returns a Result containing the App.
|
||||
// Uses os.Stat to search PATH directories — no os/exec dependency.
|
||||
//
|
||||
// r := core.App{}.Find("node", "Node.js")
|
||||
// if r.OK { app := r.Value.(*App) }
|
||||
func (a App) Find(filename, name string) Result {
|
||||
// If filename contains a separator, check it directly
|
||||
if Contains(filename, string(os.PathSeparator)) {
|
||||
abs, err := filepath.Abs(filename)
|
||||
if err != nil {
|
||||
return Result{err, false}
|
||||
}
|
||||
if isExecutable(abs) {
|
||||
return Result{&App{Name: name, Filename: filename, Path: abs}, true}
|
||||
}
|
||||
return Result{E("app.Find", Concat(filename, " not found"), nil), false}
|
||||
}
|
||||
|
||||
// Search PATH
|
||||
pathEnv := os.Getenv("PATH")
|
||||
if pathEnv == "" {
|
||||
return Result{E("app.Find", "PATH is empty", nil), false}
|
||||
}
|
||||
for _, dir := range Split(pathEnv, string(os.PathListSeparator)) {
|
||||
candidate := filepath.Join(dir, filename)
|
||||
if isExecutable(candidate) {
|
||||
abs, err := filepath.Abs(candidate)
|
||||
if err != nil {
|
||||
continue
|
||||
}
|
||||
return Result{&App{Name: name, Filename: filename, Path: abs}, true}
|
||||
}
|
||||
}
|
||||
return Result{E("app.Find", Concat(filename, " not found on PATH"), nil), false}
|
||||
}
|
||||
|
||||
// isExecutable checks if a path exists and is executable.
|
||||
func isExecutable(path string) bool {
|
||||
info, err := os.Stat(path)
|
||||
if err != nil {
|
||||
return false
|
||||
}
|
||||
// Regular file with at least one execute bit
|
||||
return !info.IsDir() && info.Mode()&0111 != 0
|
||||
}
|
||||
101
.core/reference/array.go
Normal file
101
.core/reference/array.go
Normal file
|
|
@ -0,0 +1,101 @@
|
|||
// SPDX-License-Identifier: EUPL-1.2
|
||||
|
||||
// Generic slice operations for the Core framework.
|
||||
// Based on leaanthony/slicer, rewritten with Go 1.18+ generics.
|
||||
|
||||
package core
|
||||
|
||||
// Array is a typed slice with common operations.
|
||||
type Array[T comparable] struct {
|
||||
items []T
|
||||
}
|
||||
|
||||
// NewArray creates an empty Array.
|
||||
func NewArray[T comparable](items ...T) *Array[T] {
|
||||
return &Array[T]{items: items}
|
||||
}
|
||||
|
||||
// Add appends values.
|
||||
func (s *Array[T]) Add(values ...T) {
|
||||
s.items = append(s.items, values...)
|
||||
}
|
||||
|
||||
// AddUnique appends values only if not already present.
|
||||
func (s *Array[T]) AddUnique(values ...T) {
|
||||
for _, v := range values {
|
||||
if !s.Contains(v) {
|
||||
s.items = append(s.items, v)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
// Contains returns true if the value is in the slice.
|
||||
func (s *Array[T]) Contains(val T) bool {
|
||||
for _, v := range s.items {
|
||||
if v == val {
|
||||
return true
|
||||
}
|
||||
}
|
||||
return false
|
||||
}
|
||||
|
||||
// Filter returns a new Array with elements matching the predicate.
|
||||
func (s *Array[T]) Filter(fn func(T) bool) Result {
|
||||
filtered := &Array[T]{}
|
||||
for _, v := range s.items {
|
||||
if fn(v) {
|
||||
filtered.items = append(filtered.items, v)
|
||||
}
|
||||
}
|
||||
return Result{filtered, true}
|
||||
}
|
||||
|
||||
// Each runs a function on every element.
|
||||
func (s *Array[T]) Each(fn func(T)) {
|
||||
for _, v := range s.items {
|
||||
fn(v)
|
||||
}
|
||||
}
|
||||
|
||||
// Remove removes the first occurrence of a value.
|
||||
func (s *Array[T]) Remove(val T) {
|
||||
for i, v := range s.items {
|
||||
if v == val {
|
||||
s.items = append(s.items[:i], s.items[i+1:]...)
|
||||
return
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
// Deduplicate removes duplicate values, preserving order.
|
||||
func (s *Array[T]) Deduplicate() {
|
||||
seen := make(map[T]struct{})
|
||||
result := make([]T, 0, len(s.items))
|
||||
for _, v := range s.items {
|
||||
if _, exists := seen[v]; !exists {
|
||||
seen[v] = struct{}{}
|
||||
result = append(result, v)
|
||||
}
|
||||
}
|
||||
s.items = result
|
||||
}
|
||||
|
||||
// Len returns the number of elements.
|
||||
func (s *Array[T]) Len() int {
|
||||
return len(s.items)
|
||||
}
|
||||
|
||||
// Clear removes all elements.
|
||||
func (s *Array[T]) Clear() {
|
||||
s.items = nil
|
||||
}
|
||||
|
||||
// AsSlice returns a copy of the underlying slice.
|
||||
func (s *Array[T]) AsSlice() []T {
|
||||
if s.items == nil {
|
||||
return nil
|
||||
}
|
||||
out := make([]T, len(s.items))
|
||||
copy(out, s.items)
|
||||
return out
|
||||
}
|
||||
166
.core/reference/cli.go
Normal file
166
.core/reference/cli.go
Normal file
|
|
@ -0,0 +1,166 @@
|
|||
// SPDX-License-Identifier: EUPL-1.2
|
||||
|
||||
// Cli is the CLI surface layer for the Core command tree.
|
||||
//
|
||||
// c := core.New(core.WithOption("name", "myapp")).Value.(*Core)
|
||||
// c.Command("deploy", core.Command{Action: handler})
|
||||
// c.Cli().Run()
|
||||
package core
|
||||
|
||||
import (
|
||||
"io"
|
||||
"os"
|
||||
)
|
||||
|
||||
// CliOptions holds configuration for the Cli service.
|
||||
type CliOptions struct{}
|
||||
|
||||
// Cli is the CLI surface for the Core command tree.
|
||||
type Cli struct {
|
||||
*ServiceRuntime[CliOptions]
|
||||
output io.Writer
|
||||
banner func(*Cli) string
|
||||
}
|
||||
|
||||
// Register creates a Cli service factory for core.WithService.
|
||||
//
|
||||
// core.New(core.WithService(core.CliRegister))
|
||||
func CliRegister(c *Core) Result {
|
||||
cl := &Cli{output: os.Stdout}
|
||||
cl.ServiceRuntime = NewServiceRuntime[CliOptions](c, CliOptions{})
|
||||
return c.RegisterService("cli", cl)
|
||||
}
|
||||
|
||||
// Print writes to the CLI output (defaults to os.Stdout).
|
||||
//
|
||||
// c.Cli().Print("hello %s", "world")
|
||||
func (cl *Cli) Print(format string, args ...any) {
|
||||
Print(cl.output, format, args...)
|
||||
}
|
||||
|
||||
// SetOutput sets the CLI output writer.
|
||||
//
|
||||
// c.Cli().SetOutput(os.Stderr)
|
||||
func (cl *Cli) SetOutput(w io.Writer) {
|
||||
cl.output = w
|
||||
}
|
||||
|
||||
// Run resolves os.Args to a command path and executes it.
|
||||
//
|
||||
// c.Cli().Run()
|
||||
// c.Cli().Run("deploy", "to", "homelab")
|
||||
func (cl *Cli) Run(args ...string) Result {
|
||||
if len(args) == 0 {
|
||||
args = os.Args[1:]
|
||||
}
|
||||
|
||||
clean := FilterArgs(args)
|
||||
c := cl.Core()
|
||||
|
||||
if c == nil || c.commands == nil {
|
||||
if cl.banner != nil {
|
||||
cl.Print(cl.banner(cl))
|
||||
}
|
||||
return Result{}
|
||||
}
|
||||
|
||||
if c.commands.Len() == 0 {
|
||||
if cl.banner != nil {
|
||||
cl.Print(cl.banner(cl))
|
||||
}
|
||||
return Result{}
|
||||
}
|
||||
|
||||
// Resolve command path from args
|
||||
var cmd *Command
|
||||
var remaining []string
|
||||
|
||||
for i := len(clean); i > 0; i-- {
|
||||
path := JoinPath(clean[:i]...)
|
||||
if r := c.commands.Get(path); r.OK {
|
||||
cmd = r.Value.(*Command)
|
||||
remaining = clean[i:]
|
||||
break
|
||||
}
|
||||
}
|
||||
|
||||
if cmd == nil {
|
||||
if cl.banner != nil {
|
||||
cl.Print(cl.banner(cl))
|
||||
}
|
||||
cl.PrintHelp()
|
||||
return Result{}
|
||||
}
|
||||
|
||||
// Build options from remaining args
|
||||
opts := NewOptions()
|
||||
for _, arg := range remaining {
|
||||
key, val, valid := ParseFlag(arg)
|
||||
if valid {
|
||||
if Contains(arg, "=") {
|
||||
opts.Set(key, val)
|
||||
} else {
|
||||
opts.Set(key, true)
|
||||
}
|
||||
} else if !IsFlag(arg) {
|
||||
opts.Set("_arg", arg)
|
||||
}
|
||||
}
|
||||
|
||||
if cmd.Action != nil {
|
||||
return cmd.Run(opts)
|
||||
}
|
||||
return Result{E("core.Cli.Run", Concat("command \"", cmd.Path, "\" is not executable"), nil), false}
|
||||
}
|
||||
|
||||
// PrintHelp prints available commands.
|
||||
//
|
||||
// c.Cli().PrintHelp()
|
||||
func (cl *Cli) PrintHelp() {
|
||||
c := cl.Core()
|
||||
if c == nil || c.commands == nil {
|
||||
return
|
||||
}
|
||||
|
||||
name := ""
|
||||
if c.app != nil {
|
||||
name = c.app.Name
|
||||
}
|
||||
if name != "" {
|
||||
cl.Print("%s commands:", name)
|
||||
} else {
|
||||
cl.Print("Commands:")
|
||||
}
|
||||
|
||||
c.commands.Each(func(path string, cmd *Command) {
|
||||
if cmd.Hidden || (cmd.Action == nil && !cmd.IsManaged()) {
|
||||
return
|
||||
}
|
||||
tr := c.I18n().Translate(cmd.I18nKey())
|
||||
desc, _ := tr.Value.(string)
|
||||
if desc == "" || desc == cmd.I18nKey() {
|
||||
cl.Print(" %s", path)
|
||||
} else {
|
||||
cl.Print(" %-30s %s", path, desc)
|
||||
}
|
||||
})
|
||||
}
|
||||
|
||||
// SetBanner sets the banner function.
|
||||
//
|
||||
// c.Cli().SetBanner(func(_ *core.Cli) string { return "My App v1.0" })
|
||||
func (cl *Cli) SetBanner(fn func(*Cli) string) {
|
||||
cl.banner = fn
|
||||
}
|
||||
|
||||
// Banner returns the banner string.
|
||||
func (cl *Cli) Banner() string {
|
||||
if cl.banner != nil {
|
||||
return cl.banner(cl)
|
||||
}
|
||||
c := cl.Core()
|
||||
if c != nil && c.app != nil && c.app.Name != "" {
|
||||
return c.app.Name
|
||||
}
|
||||
return ""
|
||||
}
|
||||
163
.core/reference/command.go
Normal file
163
.core/reference/command.go
Normal file
|
|
@ -0,0 +1,163 @@
|
|||
// SPDX-License-Identifier: EUPL-1.2
|
||||
|
||||
// Command is a DTO representing an executable operation.
|
||||
// Commands don't know if they're root, child, or nested — the tree
|
||||
// structure comes from composition via path-based registration.
|
||||
//
|
||||
// Register a command:
|
||||
//
|
||||
// c.Command("deploy", func(opts core.Options) core.Result {
|
||||
// return core.Result{"deployed", true}
|
||||
// })
|
||||
//
|
||||
// Register a nested command:
|
||||
//
|
||||
// c.Command("deploy/to/homelab", handler)
|
||||
//
|
||||
// Description is an i18n key — derived from path if omitted:
|
||||
//
|
||||
// "deploy" → "cmd.deploy.description"
|
||||
// "deploy/to/homelab" → "cmd.deploy.to.homelab.description"
|
||||
package core
|
||||
|
||||
|
||||
// CommandAction is the function signature for command handlers.
|
||||
//
|
||||
// func(opts core.Options) core.Result
|
||||
type CommandAction func(Options) Result
|
||||
|
||||
// Command is the DTO for an executable operation.
|
||||
// Commands are declarative — they carry enough information for multiple consumers:
|
||||
// - core.Cli() runs the Action
|
||||
// - core/cli adds rich help, completion, man pages
|
||||
// - go-process wraps Managed commands with lifecycle (PID, health, signals)
|
||||
//
|
||||
// c.Command("serve", core.Command{
|
||||
// Action: handler,
|
||||
// Managed: "process.daemon", // go-process provides start/stop/restart
|
||||
// })
|
||||
type Command struct {
|
||||
Name string
|
||||
Description string // i18n key — derived from path if empty
|
||||
Path string // "deploy/to/homelab"
|
||||
Action CommandAction // business logic
|
||||
Managed string // "" = one-shot, "process.daemon" = managed lifecycle
|
||||
Flags Options // declared flags
|
||||
Hidden bool
|
||||
commands map[string]*Command // child commands (internal)
|
||||
}
|
||||
|
||||
// I18nKey returns the i18n key for this command's description.
|
||||
//
|
||||
// cmd with path "deploy/to/homelab" → "cmd.deploy.to.homelab.description"
|
||||
func (cmd *Command) I18nKey() string {
|
||||
if cmd.Description != "" {
|
||||
return cmd.Description
|
||||
}
|
||||
path := cmd.Path
|
||||
if path == "" {
|
||||
path = cmd.Name
|
||||
}
|
||||
return Concat("cmd.", Replace(path, "/", "."), ".description")
|
||||
}
|
||||
|
||||
// Run executes the command's action with the given options.
|
||||
//
|
||||
// result := cmd.Run(core.NewOptions(core.Option{Key: "target", Value: "homelab"}))
|
||||
func (cmd *Command) Run(opts Options) Result {
|
||||
if cmd.Action == nil {
|
||||
return Result{E("core.Command.Run", Concat("command \"", cmd.Path, "\" is not executable"), nil), false}
|
||||
}
|
||||
return cmd.Action(opts)
|
||||
}
|
||||
|
||||
// IsManaged returns true if this command has a managed lifecycle.
|
||||
//
|
||||
// if cmd.IsManaged() { /* go-process handles start/stop */ }
|
||||
func (cmd *Command) IsManaged() bool {
|
||||
return cmd.Managed != ""
|
||||
}
|
||||
|
||||
// --- Command Registry (on Core) ---
|
||||
|
||||
// CommandRegistry holds the command tree. Embeds Registry[*Command]
|
||||
// for thread-safe named storage with insertion order.
|
||||
type CommandRegistry struct {
|
||||
*Registry[*Command]
|
||||
}
|
||||
|
||||
// Command gets or registers a command by path.
|
||||
//
|
||||
// c.Command("deploy", Command{Action: handler})
|
||||
// r := c.Command("deploy")
|
||||
func (c *Core) Command(path string, command ...Command) Result {
|
||||
if len(command) == 0 {
|
||||
return c.commands.Get(path)
|
||||
}
|
||||
|
||||
if path == "" || HasPrefix(path, "/") || HasSuffix(path, "/") || Contains(path, "//") {
|
||||
return Result{E("core.Command", Concat("invalid command path: \"", path, "\""), nil), false}
|
||||
}
|
||||
|
||||
// Check for duplicate executable command
|
||||
if r := c.commands.Get(path); r.OK {
|
||||
existing := r.Value.(*Command)
|
||||
if existing.Action != nil || existing.IsManaged() {
|
||||
return Result{E("core.Command", Concat("command \"", path, "\" already registered"), nil), false}
|
||||
}
|
||||
}
|
||||
|
||||
cmd := &command[0]
|
||||
cmd.Name = pathName(path)
|
||||
cmd.Path = path
|
||||
if cmd.commands == nil {
|
||||
cmd.commands = make(map[string]*Command)
|
||||
}
|
||||
|
||||
// Preserve existing subtree when overwriting a placeholder parent
|
||||
if r := c.commands.Get(path); r.OK {
|
||||
existing := r.Value.(*Command)
|
||||
for k, v := range existing.commands {
|
||||
if _, has := cmd.commands[k]; !has {
|
||||
cmd.commands[k] = v
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
c.commands.Set(path, cmd)
|
||||
|
||||
// Build parent chain — "deploy/to/homelab" creates "deploy" and "deploy/to" if missing
|
||||
parts := Split(path, "/")
|
||||
for i := len(parts) - 1; i > 0; i-- {
|
||||
parentPath := JoinPath(parts[:i]...)
|
||||
if !c.commands.Has(parentPath) {
|
||||
c.commands.Set(parentPath, &Command{
|
||||
Name: parts[i-1],
|
||||
Path: parentPath,
|
||||
commands: make(map[string]*Command),
|
||||
})
|
||||
}
|
||||
parent := c.commands.Get(parentPath).Value.(*Command)
|
||||
parent.commands[parts[i]] = cmd
|
||||
cmd = parent
|
||||
}
|
||||
|
||||
return Result{OK: true}
|
||||
}
|
||||
|
||||
// Commands returns all registered command paths in registration order.
|
||||
//
|
||||
// paths := c.Commands()
|
||||
func (c *Core) Commands() []string {
|
||||
if c.commands == nil {
|
||||
return nil
|
||||
}
|
||||
return c.commands.Names()
|
||||
}
|
||||
|
||||
// pathName extracts the last segment of a path.
|
||||
// "deploy/to/homelab" → "homelab"
|
||||
func pathName(path string) string {
|
||||
parts := Split(path, "/")
|
||||
return parts[len(parts)-1]
|
||||
}
|
||||
186
.core/reference/config.go
Normal file
186
.core/reference/config.go
Normal file
|
|
@ -0,0 +1,186 @@
|
|||
// SPDX-License-Identifier: EUPL-1.2
|
||||
|
||||
// Settings, feature flags, and typed configuration for the Core framework.
|
||||
|
||||
package core
|
||||
|
||||
import (
|
||||
"sync"
|
||||
)
|
||||
|
||||
// ConfigVar is a variable that can be set, unset, and queried for its state.
|
||||
type ConfigVar[T any] struct {
|
||||
val T
|
||||
set bool
|
||||
}
|
||||
|
||||
// Get returns the current value.
|
||||
//
|
||||
// val := v.Get()
|
||||
func (v *ConfigVar[T]) Get() T { return v.val }
|
||||
|
||||
// Set sets the value and marks it as explicitly set.
|
||||
//
|
||||
// v.Set(true)
|
||||
func (v *ConfigVar[T]) Set(val T) { v.val = val; v.set = true }
|
||||
|
||||
// IsSet returns true if the value was explicitly set (distinguishes "set to false" from "never set").
|
||||
//
|
||||
// if v.IsSet() { /* explicitly configured */ }
|
||||
func (v *ConfigVar[T]) IsSet() bool { return v.set }
|
||||
|
||||
// Unset resets to zero value and marks as not set.
|
||||
//
|
||||
// v.Unset()
|
||||
// v.IsSet() // false
|
||||
func (v *ConfigVar[T]) Unset() {
|
||||
v.set = false
|
||||
var zero T
|
||||
v.val = zero
|
||||
}
|
||||
|
||||
// NewConfigVar creates a ConfigVar with an initial value marked as set.
|
||||
//
|
||||
// debug := core.NewConfigVar(true)
|
||||
func NewConfigVar[T any](val T) ConfigVar[T] {
|
||||
return ConfigVar[T]{val: val, set: true}
|
||||
}
|
||||
|
||||
// ConfigOptions holds configuration data.
|
||||
type ConfigOptions struct {
|
||||
Settings map[string]any
|
||||
Features map[string]bool
|
||||
}
|
||||
|
||||
func (o *ConfigOptions) init() {
|
||||
if o.Settings == nil {
|
||||
o.Settings = make(map[string]any)
|
||||
}
|
||||
if o.Features == nil {
|
||||
o.Features = make(map[string]bool)
|
||||
}
|
||||
}
|
||||
|
||||
// Config holds configuration settings and feature flags.
|
||||
type Config struct {
|
||||
*ConfigOptions
|
||||
mu sync.RWMutex
|
||||
}
|
||||
|
||||
// New initialises a Config with empty settings and features.
|
||||
//
|
||||
// cfg := (&core.Config{}).New()
|
||||
func (e *Config) New() *Config {
|
||||
e.ConfigOptions = &ConfigOptions{}
|
||||
e.ConfigOptions.init()
|
||||
return e
|
||||
}
|
||||
|
||||
// Set stores a configuration value by key.
|
||||
func (e *Config) Set(key string, val any) {
|
||||
e.mu.Lock()
|
||||
if e.ConfigOptions == nil {
|
||||
e.ConfigOptions = &ConfigOptions{}
|
||||
}
|
||||
e.ConfigOptions.init()
|
||||
e.Settings[key] = val
|
||||
e.mu.Unlock()
|
||||
}
|
||||
|
||||
// Get retrieves a configuration value by key.
|
||||
func (e *Config) Get(key string) Result {
|
||||
e.mu.RLock()
|
||||
defer e.mu.RUnlock()
|
||||
if e.ConfigOptions == nil || e.Settings == nil {
|
||||
return Result{}
|
||||
}
|
||||
val, ok := e.Settings[key]
|
||||
if !ok {
|
||||
return Result{}
|
||||
}
|
||||
return Result{val, true}
|
||||
}
|
||||
|
||||
// String retrieves a string config value (empty string if missing).
|
||||
//
|
||||
// host := c.Config().String("database.host")
|
||||
func (e *Config) String(key string) string { return ConfigGet[string](e, key) }
|
||||
|
||||
// Int retrieves an int config value (0 if missing).
|
||||
//
|
||||
// port := c.Config().Int("database.port")
|
||||
func (e *Config) Int(key string) int { return ConfigGet[int](e, key) }
|
||||
|
||||
// Bool retrieves a bool config value (false if missing).
|
||||
//
|
||||
// debug := c.Config().Bool("debug")
|
||||
func (e *Config) Bool(key string) bool { return ConfigGet[bool](e, key) }
|
||||
|
||||
// ConfigGet retrieves a typed configuration value.
|
||||
func ConfigGet[T any](e *Config, key string) T {
|
||||
r := e.Get(key)
|
||||
if !r.OK {
|
||||
var zero T
|
||||
return zero
|
||||
}
|
||||
typed, _ := r.Value.(T)
|
||||
return typed
|
||||
}
|
||||
|
||||
// --- Feature Flags ---
|
||||
|
||||
// Enable activates a feature flag.
|
||||
//
|
||||
// c.Config().Enable("dark-mode")
|
||||
func (e *Config) Enable(feature string) {
|
||||
e.mu.Lock()
|
||||
if e.ConfigOptions == nil {
|
||||
e.ConfigOptions = &ConfigOptions{}
|
||||
}
|
||||
e.ConfigOptions.init()
|
||||
e.Features[feature] = true
|
||||
e.mu.Unlock()
|
||||
}
|
||||
|
||||
// Disable deactivates a feature flag.
|
||||
//
|
||||
// c.Config().Disable("dark-mode")
|
||||
func (e *Config) Disable(feature string) {
|
||||
e.mu.Lock()
|
||||
if e.ConfigOptions == nil {
|
||||
e.ConfigOptions = &ConfigOptions{}
|
||||
}
|
||||
e.ConfigOptions.init()
|
||||
e.Features[feature] = false
|
||||
e.mu.Unlock()
|
||||
}
|
||||
|
||||
// Enabled returns true if a feature flag is active.
|
||||
//
|
||||
// if c.Config().Enabled("dark-mode") { ... }
|
||||
func (e *Config) Enabled(feature string) bool {
|
||||
e.mu.RLock()
|
||||
defer e.mu.RUnlock()
|
||||
if e.ConfigOptions == nil || e.Features == nil {
|
||||
return false
|
||||
}
|
||||
return e.Features[feature]
|
||||
}
|
||||
|
||||
// EnabledFeatures returns all active feature flag names.
|
||||
//
|
||||
// features := c.Config().EnabledFeatures()
|
||||
func (e *Config) EnabledFeatures() []string {
|
||||
e.mu.RLock()
|
||||
defer e.mu.RUnlock()
|
||||
if e.ConfigOptions == nil || e.Features == nil {
|
||||
return nil
|
||||
}
|
||||
var result []string
|
||||
for k, v := range e.Features {
|
||||
if v {
|
||||
result = append(result, k)
|
||||
}
|
||||
}
|
||||
return result
|
||||
}
|
||||
226
.core/reference/contract.go
Normal file
226
.core/reference/contract.go
Normal file
|
|
@ -0,0 +1,226 @@
|
|||
// SPDX-License-Identifier: EUPL-1.2
|
||||
|
||||
// Contracts, options, and type definitions for the Core framework.
|
||||
|
||||
package core
|
||||
|
||||
import (
|
||||
"context"
|
||||
"reflect"
|
||||
"sync"
|
||||
)
|
||||
|
||||
// Message is the type for IPC broadcasts (fire-and-forget).
|
||||
type Message any
|
||||
|
||||
// Query is the type for read-only IPC requests.
|
||||
type Query any
|
||||
|
||||
// QueryHandler handles Query requests. Returns Result{Value, OK}.
|
||||
type QueryHandler func(*Core, Query) Result
|
||||
|
||||
// Startable is implemented by services that need startup initialisation.
|
||||
//
|
||||
// func (s *MyService) OnStartup(ctx context.Context) core.Result {
|
||||
// return core.Result{OK: true}
|
||||
// }
|
||||
type Startable interface {
|
||||
OnStartup(ctx context.Context) Result
|
||||
}
|
||||
|
||||
// Stoppable is implemented by services that need shutdown cleanup.
|
||||
//
|
||||
// func (s *MyService) OnShutdown(ctx context.Context) core.Result {
|
||||
// return core.Result{OK: true}
|
||||
// }
|
||||
type Stoppable interface {
|
||||
OnShutdown(ctx context.Context) Result
|
||||
}
|
||||
|
||||
// --- Action Messages ---
|
||||
|
||||
type ActionServiceStartup struct{}
|
||||
type ActionServiceShutdown struct{}
|
||||
|
||||
type ActionTaskStarted struct {
|
||||
TaskIdentifier string
|
||||
Action string
|
||||
Options Options
|
||||
}
|
||||
|
||||
type ActionTaskProgress struct {
|
||||
TaskIdentifier string
|
||||
Action string
|
||||
Progress float64
|
||||
Message string
|
||||
}
|
||||
|
||||
type ActionTaskCompleted struct {
|
||||
TaskIdentifier string
|
||||
Action string
|
||||
Result Result
|
||||
}
|
||||
|
||||
// --- Constructor ---
|
||||
|
||||
// CoreOption is a functional option applied during Core construction.
|
||||
// Returns Result — if !OK, New() stops and returns the error.
|
||||
//
|
||||
// core.New(
|
||||
// core.WithService(agentic.Register),
|
||||
// core.WithService(monitor.Register),
|
||||
// core.WithServiceLock(),
|
||||
// )
|
||||
type CoreOption func(*Core) Result
|
||||
|
||||
// New initialises a Core instance by applying options in order.
|
||||
// Services registered here form the application conclave — they share
|
||||
// IPC access and participate in the lifecycle (ServiceStartup/ServiceShutdown).
|
||||
//
|
||||
// c := core.New(
|
||||
// core.WithOption("name", "myapp"),
|
||||
// core.WithService(auth.Register),
|
||||
// core.WithServiceLock(),
|
||||
// )
|
||||
// c.Run()
|
||||
func New(opts ...CoreOption) *Core {
|
||||
c := &Core{
|
||||
app: &App{},
|
||||
data: &Data{Registry: NewRegistry[*Embed]()},
|
||||
drive: &Drive{Registry: NewRegistry[*DriveHandle]()},
|
||||
fs: (&Fs{}).New("/"),
|
||||
config: (&Config{}).New(),
|
||||
error: &ErrorPanic{},
|
||||
log: &ErrorLog{},
|
||||
lock: &Lock{locks: NewRegistry[*sync.RWMutex]()},
|
||||
ipc: &Ipc{actions: NewRegistry[*Action](), tasks: NewRegistry[*Task]()},
|
||||
info: systemInfo,
|
||||
i18n: &I18n{},
|
||||
api: &API{protocols: NewRegistry[StreamFactory]()},
|
||||
services: &ServiceRegistry{Registry: NewRegistry[*Service]()},
|
||||
commands: &CommandRegistry{Registry: NewRegistry[*Command]()},
|
||||
entitlementChecker: defaultChecker,
|
||||
}
|
||||
c.context, c.cancel = context.WithCancel(context.Background())
|
||||
c.api.core = c
|
||||
|
||||
// Core services
|
||||
CliRegister(c)
|
||||
|
||||
for _, opt := range opts {
|
||||
if r := opt(c); !r.OK {
|
||||
Error("core.New failed", "err", r.Value)
|
||||
break
|
||||
}
|
||||
}
|
||||
|
||||
// Apply service lock after all opts — v0.3.3 parity
|
||||
c.LockApply()
|
||||
|
||||
return c
|
||||
}
|
||||
|
||||
// WithOptions applies key-value configuration to Core.
|
||||
//
|
||||
// core.WithOptions(core.NewOptions(core.Option{Key: "name", Value: "myapp"}))
|
||||
func WithOptions(opts Options) CoreOption {
|
||||
return func(c *Core) Result {
|
||||
c.options = &opts
|
||||
if name := opts.String("name"); name != "" {
|
||||
c.app.Name = name
|
||||
}
|
||||
return Result{OK: true}
|
||||
}
|
||||
}
|
||||
|
||||
// WithService registers a service via its factory function.
|
||||
// If the factory returns a non-nil Value, WithService auto-discovers the
|
||||
// service name from the factory's package path (last path segment, lowercase,
|
||||
// with any "_test" suffix stripped) and calls RegisterService on the instance.
|
||||
// IPC handler auto-registration is handled by RegisterService.
|
||||
//
|
||||
// If the factory returns nil Value (it registered itself), WithService
|
||||
// returns success without a second registration.
|
||||
//
|
||||
// core.WithService(agentic.Register)
|
||||
// core.WithService(display.Register(nil))
|
||||
func WithService(factory func(*Core) Result) CoreOption {
|
||||
return func(c *Core) Result {
|
||||
r := factory(c)
|
||||
if !r.OK {
|
||||
return r
|
||||
}
|
||||
if r.Value == nil {
|
||||
// Factory self-registered — nothing more to do.
|
||||
return Result{OK: true}
|
||||
}
|
||||
// Auto-discover the service name from the instance's package path.
|
||||
instance := r.Value
|
||||
typeOf := reflect.TypeOf(instance)
|
||||
if typeOf.Kind() == reflect.Ptr {
|
||||
typeOf = typeOf.Elem()
|
||||
}
|
||||
pkgPath := typeOf.PkgPath()
|
||||
parts := Split(pkgPath, "/")
|
||||
name := Lower(parts[len(parts)-1])
|
||||
if name == "" {
|
||||
return Result{E("core.WithService", Sprintf("service name could not be discovered for type %T", instance), nil), false}
|
||||
}
|
||||
|
||||
// RegisterService handles Startable/Stoppable/HandleIPCEvents discovery
|
||||
return c.RegisterService(name, instance)
|
||||
}
|
||||
}
|
||||
|
||||
// WithName registers a service with an explicit name (no reflect discovery).
|
||||
//
|
||||
// core.WithName("ws", func(c *Core) Result {
|
||||
// return Result{Value: hub, OK: true}
|
||||
// })
|
||||
func WithName(name string, factory func(*Core) Result) CoreOption {
|
||||
return func(c *Core) Result {
|
||||
r := factory(c)
|
||||
if !r.OK {
|
||||
return r
|
||||
}
|
||||
if r.Value == nil {
|
||||
return Result{E("core.WithName", Sprintf("failed to create service %q", name), nil), false}
|
||||
}
|
||||
return c.RegisterService(name, r.Value)
|
||||
}
|
||||
}
|
||||
|
||||
// WithOption is a convenience for setting a single key-value option.
|
||||
//
|
||||
// core.New(
|
||||
// core.WithOption("name", "myapp"),
|
||||
// core.WithOption("port", 8080),
|
||||
// )
|
||||
func WithOption(key string, value any) CoreOption {
|
||||
return func(c *Core) Result {
|
||||
if c.options == nil {
|
||||
opts := NewOptions()
|
||||
c.options = &opts
|
||||
}
|
||||
c.options.Set(key, value)
|
||||
if key == "name" {
|
||||
if s, ok := value.(string); ok {
|
||||
c.app.Name = s
|
||||
}
|
||||
}
|
||||
return Result{OK: true}
|
||||
}
|
||||
}
|
||||
|
||||
// WithServiceLock prevents further service registration after construction.
|
||||
//
|
||||
// core.New(
|
||||
// core.WithService(auth.Register),
|
||||
// core.WithServiceLock(),
|
||||
// )
|
||||
func WithServiceLock() CoreOption {
|
||||
return func(c *Core) Result {
|
||||
c.LockEnable()
|
||||
return Result{OK: true}
|
||||
}
|
||||
}
|
||||
239
.core/reference/core.go
Normal file
239
.core/reference/core.go
Normal file
|
|
@ -0,0 +1,239 @@
|
|||
// SPDX-License-Identifier: EUPL-1.2
|
||||
|
||||
// Package core is a dependency injection and service lifecycle framework for Go.
|
||||
// This file defines the Core struct, accessors, and IPC/error wrappers.
|
||||
|
||||
package core
|
||||
|
||||
import (
|
||||
"context"
|
||||
"os"
|
||||
"sync"
|
||||
"sync/atomic"
|
||||
)
|
||||
|
||||
// --- Core Struct ---
|
||||
|
||||
// Core is the central application object that manages services, assets, and communication.
|
||||
type Core struct {
|
||||
options *Options // c.Options() — Input configuration used to create this Core
|
||||
app *App // c.App() — Application identity + optional GUI runtime
|
||||
data *Data // c.Data() — Embedded/stored content from packages
|
||||
drive *Drive // c.Drive() — Resource handle registry (transports)
|
||||
fs *Fs // c.Fs() — Local filesystem I/O (sandboxable)
|
||||
config *Config // c.Config() — Configuration, settings, feature flags
|
||||
error *ErrorPanic // c.Error() — Panic recovery and crash reporting
|
||||
log *ErrorLog // c.Log() — Structured logging + error wrapping
|
||||
// cli accessed via ServiceFor[*Cli](c, "cli")
|
||||
commands *CommandRegistry // c.Command("path") — Command tree
|
||||
services *ServiceRegistry // c.Service("name") — Service registry
|
||||
lock *Lock // c.Lock("name") — Named mutexes
|
||||
ipc *Ipc // c.IPC() — Message bus for IPC
|
||||
api *API // c.API() — Remote streams
|
||||
info *SysInfo // c.Env("key") — Read-only system/environment information
|
||||
i18n *I18n // c.I18n() — Internationalisation and locale collection
|
||||
|
||||
entitlementChecker EntitlementChecker // default: everything permitted
|
||||
usageRecorder UsageRecorder // default: nil (no-op)
|
||||
|
||||
context context.Context
|
||||
cancel context.CancelFunc
|
||||
taskIDCounter atomic.Uint64
|
||||
waitGroup sync.WaitGroup
|
||||
shutdown atomic.Bool
|
||||
}
|
||||
|
||||
// --- Accessors ---
|
||||
|
||||
// Options returns the input configuration passed to core.New().
|
||||
//
|
||||
// opts := c.Options()
|
||||
// name := opts.String("name")
|
||||
func (c *Core) Options() *Options { return c.options }
|
||||
|
||||
// App returns application identity metadata.
|
||||
//
|
||||
// c.App().Name // "my-app"
|
||||
// c.App().Version // "1.0.0"
|
||||
func (c *Core) App() *App { return c.app }
|
||||
|
||||
// Data returns the embedded asset registry (Registry[*Embed]).
|
||||
//
|
||||
// r := c.Data().ReadString("prompts/coding.md")
|
||||
func (c *Core) Data() *Data { return c.data }
|
||||
|
||||
// Drive returns the transport handle registry (Registry[*DriveHandle]).
|
||||
//
|
||||
// r := c.Drive().Get("forge")
|
||||
func (c *Core) Drive() *Drive { return c.drive }
|
||||
|
||||
// Fs returns the sandboxed filesystem.
|
||||
//
|
||||
// r := c.Fs().Read("/path/to/file")
|
||||
// c.Fs().WriteAtomic("/status.json", data)
|
||||
func (c *Core) Fs() *Fs { return c.fs }
|
||||
|
||||
// Config returns runtime settings and feature flags.
|
||||
//
|
||||
// host := c.Config().String("database.host")
|
||||
// c.Config().Enable("dark-mode")
|
||||
func (c *Core) Config() *Config { return c.config }
|
||||
|
||||
// Error returns the panic recovery subsystem.
|
||||
//
|
||||
// c.Error().Recover()
|
||||
func (c *Core) Error() *ErrorPanic { return c.error }
|
||||
|
||||
// Log returns the structured logging subsystem.
|
||||
//
|
||||
// c.Log().Info("started", "port", 8080)
|
||||
func (c *Core) Log() *ErrorLog { return c.log }
|
||||
|
||||
// Cli returns the CLI command framework (registered as service "cli").
|
||||
//
|
||||
// c.Cli().Run("deploy", "to", "homelab")
|
||||
func (c *Core) Cli() *Cli {
|
||||
cl, _ := ServiceFor[*Cli](c, "cli")
|
||||
return cl
|
||||
}
|
||||
|
||||
// IPC returns the message bus internals.
|
||||
//
|
||||
// c.IPC()
|
||||
func (c *Core) IPC() *Ipc { return c.ipc }
|
||||
|
||||
// I18n returns the internationalisation subsystem.
|
||||
//
|
||||
// tr := c.I18n().Translate("cmd.deploy.description")
|
||||
func (c *Core) I18n() *I18n { return c.i18n }
|
||||
|
||||
// Env returns an environment variable by key (cached at init, falls back to os.Getenv).
|
||||
//
|
||||
// home := c.Env("DIR_HOME")
|
||||
// token := c.Env("FORGE_TOKEN")
|
||||
func (c *Core) Env(key string) string { return Env(key) }
|
||||
|
||||
// Context returns Core's lifecycle context (cancelled on shutdown).
|
||||
//
|
||||
// ctx := c.Context()
|
||||
func (c *Core) Context() context.Context { return c.context }
|
||||
|
||||
// Core returns self — satisfies the ServiceRuntime interface.
|
||||
//
|
||||
// c := s.Core()
|
||||
func (c *Core) Core() *Core { return c }
|
||||
|
||||
// --- Lifecycle ---
|
||||
|
||||
// RunE starts all services, runs the CLI, then shuts down.
|
||||
// Returns an error instead of calling os.Exit — let main() handle the exit.
|
||||
// ServiceShutdown is always called via defer, even on startup failure or panic.
|
||||
//
|
||||
// if err := c.RunE(); err != nil {
|
||||
// os.Exit(1)
|
||||
// }
|
||||
func (c *Core) RunE() error {
|
||||
defer c.ServiceShutdown(context.Background())
|
||||
|
||||
r := c.ServiceStartup(c.context, nil)
|
||||
if !r.OK {
|
||||
if err, ok := r.Value.(error); ok {
|
||||
return err
|
||||
}
|
||||
return E("core.Run", "startup failed", nil)
|
||||
}
|
||||
|
||||
if cli := c.Cli(); cli != nil {
|
||||
r = cli.Run()
|
||||
}
|
||||
|
||||
if !r.OK {
|
||||
if err, ok := r.Value.(error); ok {
|
||||
return err
|
||||
}
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
// Run starts all services, runs the CLI, then shuts down.
|
||||
// Calls os.Exit(1) on failure. For error handling use RunE().
|
||||
//
|
||||
// c := core.New(core.WithService(myService.Register))
|
||||
// c.Run()
|
||||
func (c *Core) Run() {
|
||||
if err := c.RunE(); err != nil {
|
||||
Error(err.Error())
|
||||
os.Exit(1)
|
||||
}
|
||||
}
|
||||
|
||||
// --- IPC (uppercase aliases) ---
|
||||
|
||||
// ACTION broadcasts a message to all registered handlers (fire-and-forget).
|
||||
// Each handler is wrapped in panic recovery. All handlers fire regardless.
|
||||
//
|
||||
// c.ACTION(messages.AgentCompleted{Agent: "codex", Status: "completed"})
|
||||
func (c *Core) ACTION(msg Message) Result { return c.broadcast(msg) }
|
||||
|
||||
// QUERY sends a request — first handler to return OK wins.
|
||||
//
|
||||
// r := c.QUERY(MyQuery{Name: "brain"})
|
||||
func (c *Core) QUERY(q Query) Result { return c.Query(q) }
|
||||
|
||||
// QUERYALL sends a request — collects all OK responses.
|
||||
//
|
||||
// r := c.QUERYALL(countQuery{})
|
||||
// results := r.Value.([]any)
|
||||
func (c *Core) QUERYALL(q Query) Result { return c.QueryAll(q) }
|
||||
|
||||
// --- Error+Log ---
|
||||
|
||||
// LogError logs an error and returns the Result from ErrorLog.
|
||||
func (c *Core) LogError(err error, op, msg string) Result {
|
||||
return c.log.Error(err, op, msg)
|
||||
}
|
||||
|
||||
// LogWarn logs a warning and returns the Result from ErrorLog.
|
||||
func (c *Core) LogWarn(err error, op, msg string) Result {
|
||||
return c.log.Warn(err, op, msg)
|
||||
}
|
||||
|
||||
// Must logs and panics if err is not nil.
|
||||
func (c *Core) Must(err error, op, msg string) {
|
||||
c.log.Must(err, op, msg)
|
||||
}
|
||||
|
||||
// --- Registry Accessor ---
|
||||
|
||||
// RegistryOf returns a named registry for cross-cutting queries.
|
||||
// Known registries: "services", "commands", "actions".
|
||||
//
|
||||
// c.RegistryOf("services").Names() // all service names
|
||||
// c.RegistryOf("actions").List("process.*") // process capabilities
|
||||
// c.RegistryOf("commands").Len() // command count
|
||||
func (c *Core) RegistryOf(name string) *Registry[any] {
|
||||
// Bridge typed registries to untyped access for cross-cutting queries.
|
||||
// Each registry is wrapped in a read-only proxy.
|
||||
switch name {
|
||||
case "services":
|
||||
return registryProxy(c.services.Registry)
|
||||
case "commands":
|
||||
return registryProxy(c.commands.Registry)
|
||||
case "actions":
|
||||
return registryProxy(c.ipc.actions)
|
||||
default:
|
||||
return NewRegistry[any]() // empty registry for unknown names
|
||||
}
|
||||
}
|
||||
|
||||
// registryProxy creates a read-only any-typed view of a typed registry.
|
||||
// Copies current state — not a live view (avoids type parameter leaking).
|
||||
func registryProxy[T any](src *Registry[T]) *Registry[any] {
|
||||
proxy := NewRegistry[any]()
|
||||
src.Each(func(name string, item T) {
|
||||
proxy.Set(name, item)
|
||||
})
|
||||
return proxy
|
||||
}
|
||||
|
||||
// --- Global Instance ---
|
||||
168
.core/reference/data.go
Normal file
168
.core/reference/data.go
Normal file
|
|
@ -0,0 +1,168 @@
|
|||
// SPDX-License-Identifier: EUPL-1.2
|
||||
|
||||
// Data is the embedded/stored content system for core packages.
|
||||
// Packages mount their embedded content here and other packages
|
||||
// read from it by path.
|
||||
//
|
||||
// Mount a package's assets:
|
||||
//
|
||||
// c.Data().New(core.NewOptions(
|
||||
// core.Option{Key: "name", Value: "brain"},
|
||||
// core.Option{Key: "source", Value: brainFS},
|
||||
// core.Option{Key: "path", Value: "prompts"},
|
||||
// ))
|
||||
//
|
||||
// Read from any mounted path:
|
||||
//
|
||||
// content := c.Data().ReadString("brain/coding.md")
|
||||
// entries := c.Data().List("agent/flow")
|
||||
//
|
||||
// Extract a template directory:
|
||||
//
|
||||
// c.Data().Extract("agent/workspace/default", "/tmp/ws", data)
|
||||
package core
|
||||
|
||||
import (
|
||||
"io/fs"
|
||||
"path/filepath"
|
||||
)
|
||||
|
||||
// Data manages mounted embedded filesystems from core packages.
|
||||
// Embeds Registry[*Embed] for thread-safe named storage.
|
||||
type Data struct {
|
||||
*Registry[*Embed]
|
||||
}
|
||||
|
||||
// New registers an embedded filesystem under a named prefix.
|
||||
//
|
||||
// c.Data().New(core.NewOptions(
|
||||
// core.Option{Key: "name", Value: "brain"},
|
||||
// core.Option{Key: "source", Value: brainFS},
|
||||
// core.Option{Key: "path", Value: "prompts"},
|
||||
// ))
|
||||
func (d *Data) New(opts Options) Result {
|
||||
name := opts.String("name")
|
||||
if name == "" {
|
||||
return Result{}
|
||||
}
|
||||
|
||||
r := opts.Get("source")
|
||||
if !r.OK {
|
||||
return r
|
||||
}
|
||||
|
||||
fsys, ok := r.Value.(fs.FS)
|
||||
if !ok {
|
||||
return Result{E("data.New", "source is not fs.FS", nil), false}
|
||||
}
|
||||
|
||||
path := opts.String("path")
|
||||
if path == "" {
|
||||
path = "."
|
||||
}
|
||||
|
||||
mr := Mount(fsys, path)
|
||||
if !mr.OK {
|
||||
return mr
|
||||
}
|
||||
|
||||
emb := mr.Value.(*Embed)
|
||||
d.Set(name, emb)
|
||||
return Result{emb, true}
|
||||
}
|
||||
|
||||
// resolve splits a path like "brain/coding.md" into mount name + relative path.
|
||||
func (d *Data) resolve(path string) (*Embed, string) {
|
||||
parts := SplitN(path, "/", 2)
|
||||
if len(parts) < 2 {
|
||||
return nil, ""
|
||||
}
|
||||
r := d.Get(parts[0])
|
||||
if !r.OK {
|
||||
return nil, ""
|
||||
}
|
||||
return r.Value.(*Embed), parts[1]
|
||||
}
|
||||
|
||||
// ReadFile reads a file by full path.
|
||||
//
|
||||
// r := c.Data().ReadFile("brain/prompts/coding.md")
|
||||
// if r.OK { data := r.Value.([]byte) }
|
||||
func (d *Data) ReadFile(path string) Result {
|
||||
emb, rel := d.resolve(path)
|
||||
if emb == nil {
|
||||
return Result{}
|
||||
}
|
||||
return emb.ReadFile(rel)
|
||||
}
|
||||
|
||||
// ReadString reads a file as a string.
|
||||
//
|
||||
// r := c.Data().ReadString("agent/flow/deploy/to/homelab.yaml")
|
||||
// if r.OK { content := r.Value.(string) }
|
||||
func (d *Data) ReadString(path string) Result {
|
||||
r := d.ReadFile(path)
|
||||
if !r.OK {
|
||||
return r
|
||||
}
|
||||
return Result{string(r.Value.([]byte)), true}
|
||||
}
|
||||
|
||||
// List returns directory entries at a path.
|
||||
//
|
||||
// r := c.Data().List("agent/persona/code")
|
||||
// if r.OK { entries := r.Value.([]fs.DirEntry) }
|
||||
func (d *Data) List(path string) Result {
|
||||
emb, rel := d.resolve(path)
|
||||
if emb == nil {
|
||||
return Result{}
|
||||
}
|
||||
r := emb.ReadDir(rel)
|
||||
if !r.OK {
|
||||
return r
|
||||
}
|
||||
return Result{r.Value, true}
|
||||
}
|
||||
|
||||
// ListNames returns filenames (without extensions) at a path.
|
||||
//
|
||||
// r := c.Data().ListNames("agent/flow")
|
||||
// if r.OK { names := r.Value.([]string) }
|
||||
func (d *Data) ListNames(path string) Result {
|
||||
r := d.List(path)
|
||||
if !r.OK {
|
||||
return r
|
||||
}
|
||||
entries := r.Value.([]fs.DirEntry)
|
||||
var names []string
|
||||
for _, e := range entries {
|
||||
name := e.Name()
|
||||
if !e.IsDir() {
|
||||
name = TrimSuffix(name, filepath.Ext(name))
|
||||
}
|
||||
names = append(names, name)
|
||||
}
|
||||
return Result{names, true}
|
||||
}
|
||||
|
||||
// Extract copies a template directory to targetDir.
|
||||
//
|
||||
// r := c.Data().Extract("agent/workspace/default", "/tmp/ws", templateData)
|
||||
func (d *Data) Extract(path, targetDir string, templateData any) Result {
|
||||
emb, rel := d.resolve(path)
|
||||
if emb == nil {
|
||||
return Result{}
|
||||
}
|
||||
r := emb.Sub(rel)
|
||||
if !r.OK {
|
||||
return r
|
||||
}
|
||||
return Extract(r.Value.(*Embed).FS(), targetDir, templateData)
|
||||
}
|
||||
|
||||
// Mounts returns the names of all mounted content in registration order.
|
||||
//
|
||||
// names := c.Data().Mounts()
|
||||
func (d *Data) Mounts() []string {
|
||||
return d.Names()
|
||||
}
|
||||
434
.core/reference/docs/RFC.md
Normal file
434
.core/reference/docs/RFC.md
Normal file
|
|
@ -0,0 +1,434 @@
|
|||
# core/agent API Contract — RFC Specification
|
||||
|
||||
> `dappco.re/go/core/agent` — Agentic dispatch, orchestration, and pipeline management.
|
||||
> An agent should be able to understand core/agent's architecture from this document alone.
|
||||
|
||||
**Status:** v0.8.0+alpha.1
|
||||
**Module:** `dappco.re/go/core/agent`
|
||||
**Depends on:** core/go v0.8.0, go-process v0.8.0
|
||||
|
||||
---
|
||||
|
||||
## 1. Purpose
|
||||
|
||||
core/agent dispatches AI agents (Claude, Codex, Gemini) to work on tasks in sandboxed git worktrees, monitors their progress, verifies output, and manages the merge pipeline.
|
||||
|
||||
core/go provides the primitives. core/agent composes them.
|
||||
|
||||
### File Layout
|
||||
|
||||
```
|
||||
cmd/core-agent/main.go — entry point: core.New + Run
|
||||
pkg/agentic/ — orchestration (dispatch, prep, verify, scan, commands)
|
||||
pkg/agentic/actions.go — named Action handlers (ctx, Options) → Result
|
||||
pkg/agentic/pid.go — PID lifecycle helpers
|
||||
pkg/agentic/handlers.go — IPC completion pipeline handlers
|
||||
pkg/agentic/status.go — workspace status (WriteAtomic + JSONMarshalString)
|
||||
pkg/agentic/paths.go — paths, fs (NewUnrestricted), helpers
|
||||
pkg/brain/ — OpenBrain (recall, remember, search)
|
||||
pkg/lib/ — embedded templates, personas, flows, plans
|
||||
pkg/messages/ — typed message structs for IPC broadcast
|
||||
pkg/monitor/ — agent monitoring via IPC (ServiceRuntime)
|
||||
pkg/setup/ — workspace detection + scaffolding (Service)
|
||||
claude/ — Claude Code plugin definitions
|
||||
docs/ — RFC, plans, architecture
|
||||
```
|
||||
|
||||
---
|
||||
|
||||
## 2. Service Registration
|
||||
|
||||
All services use `ServiceRuntime[T]` — no raw `core *core.Core` fields.
|
||||
|
||||
```go
|
||||
func Register(c *core.Core) core.Result {
|
||||
prep := NewPrep()
|
||||
prep.ServiceRuntime = core.NewServiceRuntime(c, AgentOptions{})
|
||||
|
||||
cfg := prep.loadAgentsConfig()
|
||||
c.Config().Set("agents.concurrency", cfg.Concurrency)
|
||||
c.Config().Set("agents.rates", cfg.Rates)
|
||||
|
||||
RegisterHandlers(c, prep)
|
||||
return core.Result{Value: prep, OK: true}
|
||||
}
|
||||
|
||||
// In main:
|
||||
c := core.New(
|
||||
core.WithService(process.Register),
|
||||
core.WithService(agentic.Register),
|
||||
core.WithService(brain.Register),
|
||||
core.WithService(monitor.Register),
|
||||
core.WithService(mcp.Register),
|
||||
)
|
||||
c.Run()
|
||||
```
|
||||
|
||||
---
|
||||
|
||||
## 3. Named Actions — The Capability Map
|
||||
|
||||
All capabilities registered as named Actions during OnStartup. Inspectable, composable, gatable by Entitlements.
|
||||
|
||||
```go
|
||||
func (s *PrepSubsystem) OnStartup(ctx context.Context) core.Result {
|
||||
c := s.Core()
|
||||
|
||||
// Dispatch & workspace
|
||||
c.Action("agentic.dispatch", s.handleDispatch)
|
||||
c.Action("agentic.prep", s.handlePrep)
|
||||
c.Action("agentic.status", s.handleStatus)
|
||||
c.Action("agentic.resume", s.handleResume)
|
||||
c.Action("agentic.scan", s.handleScan)
|
||||
c.Action("agentic.watch", s.handleWatch)
|
||||
|
||||
// Pipeline
|
||||
c.Action("agentic.qa", s.handleQA)
|
||||
c.Action("agentic.auto-pr", s.handleAutoPR)
|
||||
c.Action("agentic.verify", s.handleVerify)
|
||||
c.Action("agentic.ingest", s.handleIngest)
|
||||
c.Action("agentic.poke", s.handlePoke)
|
||||
c.Action("agentic.mirror", s.handleMirror)
|
||||
|
||||
// Forge
|
||||
c.Action("agentic.issue.get", s.handleIssueGet)
|
||||
c.Action("agentic.issue.list", s.handleIssueList)
|
||||
c.Action("agentic.issue.create", s.handleIssueCreate)
|
||||
c.Action("agentic.pr.get", s.handlePRGet)
|
||||
c.Action("agentic.pr.list", s.handlePRList)
|
||||
c.Action("agentic.pr.merge", s.handlePRMerge)
|
||||
|
||||
// Review & Epic
|
||||
c.Action("agentic.review-queue", s.handleReviewQueue)
|
||||
c.Action("agentic.epic", s.handleEpic)
|
||||
|
||||
// Completion pipeline — Task composition
|
||||
c.Task("agent.completion", core.Task{
|
||||
Description: "QA → PR → Verify → Merge",
|
||||
Steps: []core.Step{
|
||||
{Action: "agentic.qa"},
|
||||
{Action: "agentic.auto-pr"},
|
||||
{Action: "agentic.verify"},
|
||||
{Action: "agentic.ingest", Async: true},
|
||||
{Action: "agentic.poke", Async: true},
|
||||
},
|
||||
})
|
||||
|
||||
s.StartRunner()
|
||||
s.registerCommands(ctx)
|
||||
s.registerWorkspaceCommands()
|
||||
s.registerForgeCommands()
|
||||
return core.Result{OK: true}
|
||||
}
|
||||
```
|
||||
|
||||
---
|
||||
|
||||
## 4. Completion Pipeline
|
||||
|
||||
When an agent completes, the IPC handler chain fires. Registered in `RegisterHandlers()`:
|
||||
|
||||
```
|
||||
AgentCompleted → QA handler → QAResult
|
||||
QAResult{Passed} → PR handler → PRCreated
|
||||
PRCreated → Verify handler → PRMerged | PRNeedsReview
|
||||
AgentCompleted → Ingest handler (findings → issues)
|
||||
AgentCompleted → Poke handler (drain queue)
|
||||
```
|
||||
|
||||
All handlers use `c.ACTION(messages.X{})` — no ChannelNotifier, no callbacks.
|
||||
|
||||
---
|
||||
|
||||
## 5. Process Execution
|
||||
|
||||
All commands via `s.Core().Process()`. Returns `core.Result` — Value is always a string.
|
||||
|
||||
```go
|
||||
process := s.Core().Process()
|
||||
r := process.RunIn(ctx, dir, "git", "log", "--oneline", "-20")
|
||||
if r.OK {
|
||||
output := core.Trim(r.Value.(string))
|
||||
}
|
||||
|
||||
r = process.RunWithEnv(ctx, dir, []string{"GOWORK=off"}, "go", "test", "./...")
|
||||
```
|
||||
|
||||
go-process is fully Result-native. `Start`, `Run`, `StartWithOptions`, `RunWithOptions` all return `core.Result`. Value is `*Process` for Start, `string` for Run. OK=true guarantees the type.
|
||||
|
||||
---
|
||||
|
||||
## 6. Status Management
|
||||
|
||||
Workspace status uses `WriteAtomic` + `JSONMarshalString` for safe concurrent access:
|
||||
|
||||
```go
|
||||
func writeStatus(wsDir string, status *WorkspaceStatus) error {
|
||||
status.UpdatedAt = time.Now()
|
||||
statusPath := core.JoinPath(wsDir, "status.json")
|
||||
if r := fs.WriteAtomic(statusPath, core.JSONMarshalString(status)); !r.OK {
|
||||
err, _ := r.Value.(error)
|
||||
return core.E("writeStatus", "failed to write status", err)
|
||||
}
|
||||
return nil
|
||||
}
|
||||
```
|
||||
|
||||
---
|
||||
|
||||
## 7. Filesystem
|
||||
|
||||
No `unsafe.Pointer`. Package-level unrestricted Fs via Core primitive:
|
||||
|
||||
```go
|
||||
var fs = (&core.Fs{}).NewUnrestricted()
|
||||
```
|
||||
|
||||
---
|
||||
|
||||
## 8. IPC Messages
|
||||
|
||||
All inter-service communication via typed messages in `pkg/messages/`:
|
||||
|
||||
```go
|
||||
// Agent lifecycle
|
||||
messages.AgentStarted{Agent, Repo, Workspace}
|
||||
messages.AgentCompleted{Agent, Repo, Workspace, Status}
|
||||
|
||||
// Pipeline
|
||||
messages.QAResult{Workspace, Repo, Passed}
|
||||
messages.PRCreated{Repo, Branch, PRURL, PRNum}
|
||||
messages.PRMerged{Repo, PRURL, PRNum}
|
||||
messages.PRNeedsReview{Repo, PRURL, PRNum, Reason}
|
||||
|
||||
// Queue
|
||||
messages.QueueDrained{Completed}
|
||||
messages.PokeQueue{}
|
||||
|
||||
// Monitor
|
||||
messages.HarvestComplete{Repo, Branch, Files}
|
||||
messages.HarvestRejected{Repo, Branch, Reason}
|
||||
messages.InboxMessage{New, Total}
|
||||
```
|
||||
|
||||
---
|
||||
|
||||
## 9. Monitor
|
||||
|
||||
Embeds `*core.ServiceRuntime[MonitorOptions]`. All notifications via `m.Core().ACTION(messages.X{})` — no ChannelNotifier interface. Git operations via `m.Core().Process()`.
|
||||
|
||||
```go
|
||||
func Register(c *core.Core) core.Result {
|
||||
mon := New()
|
||||
mon.ServiceRuntime = core.NewServiceRuntime(c, MonitorOptions{})
|
||||
|
||||
c.RegisterAction(func(c *core.Core, msg core.Message) core.Result {
|
||||
switch ev := msg.(type) {
|
||||
case messages.AgentCompleted:
|
||||
mon.handleAgentCompleted(ev)
|
||||
case messages.AgentStarted:
|
||||
mon.handleAgentStarted(ev)
|
||||
}
|
||||
return core.Result{OK: true}
|
||||
})
|
||||
|
||||
return core.Result{Value: mon, OK: true}
|
||||
}
|
||||
```
|
||||
|
||||
---
|
||||
|
||||
## 10. Setup
|
||||
|
||||
Service with `*core.ServiceRuntime[SetupOptions]`. Detects project type, generates configs, scaffolds workspaces.
|
||||
|
||||
```go
|
||||
func Register(c *core.Core) core.Result {
|
||||
svc := &Service{
|
||||
ServiceRuntime: core.NewServiceRuntime(c, SetupOptions{}),
|
||||
}
|
||||
return core.Result{Value: svc, OK: true}
|
||||
}
|
||||
```
|
||||
|
||||
---
|
||||
|
||||
## 11. Entitlements
|
||||
|
||||
Actions are gated by `c.Entitled()` — checked automatically in `Action.Run()`.
|
||||
|
||||
```go
|
||||
func (s *PrepSubsystem) handleDispatch(ctx context.Context, opts core.Options) core.Result {
|
||||
e := s.Core().Entitled("agentic.concurrency", 1)
|
||||
if !e.Allowed {
|
||||
return core.Result{Value: core.E("dispatch", e.Reason, nil), OK: false}
|
||||
}
|
||||
// ... dispatch agent ...
|
||||
s.Core().RecordUsage("agentic.dispatch")
|
||||
return core.Result{OK: true}
|
||||
}
|
||||
```
|
||||
|
||||
---
|
||||
|
||||
## 12. MCP — Action Aggregator
|
||||
|
||||
MCP auto-exposes all registered Actions as tools via `c.Actions()`. Register an Action → it appears as an MCP tool. The API stream primitive (`c.API()`) handles transport.
|
||||
|
||||
---
|
||||
|
||||
## 13. Remote Dispatch
|
||||
|
||||
Transparent local/remote via `host:action` syntax:
|
||||
|
||||
```go
|
||||
r := c.RemoteAction("agentic.status", ctx, opts) // local
|
||||
r := c.RemoteAction("charon:agentic.dispatch", ctx, opts) // remote
|
||||
r := c.RemoteAction("snider.lthn:brain.recall", ctx, opts) // web3
|
||||
```
|
||||
|
||||
---
|
||||
|
||||
## 14. Quality Gates
|
||||
|
||||
```bash
|
||||
# No disallowed imports (source files only)
|
||||
grep -rn '"os"\|"os/exec"\|"io"\|"fmt"\|"errors"\|"log"\|"encoding/json"\|"path/filepath"\|"unsafe"\|"strings"' *.go **/*.go \
|
||||
| grep -v _test.go
|
||||
|
||||
# Test naming: TestFile_Function_{Good,Bad,Ugly}
|
||||
grep -rn "^func Test" *_test.go **/*_test.go \
|
||||
| grep -v "Test[A-Z][a-z]*_.*_\(Good\|Bad\|Ugly\)"
|
||||
```
|
||||
|
||||
---
|
||||
|
||||
## 15. Validation and IDs
|
||||
|
||||
```go
|
||||
if r := core.ValidateName(input.Repo); !r.OK { return r }
|
||||
safe := core.SanitisePath(userInput)
|
||||
id := core.ID() // "id-42-a3f2b1"
|
||||
```
|
||||
|
||||
---
|
||||
|
||||
## 16. JSON Serialisation
|
||||
|
||||
All JSON via Core primitives. No `encoding/json` import.
|
||||
|
||||
```go
|
||||
data := core.JSONMarshalString(status)
|
||||
core.JSONUnmarshalString(jsonStr, &result)
|
||||
```
|
||||
|
||||
---
|
||||
|
||||
## 17. Configuration
|
||||
|
||||
```go
|
||||
c.Config().Set("agents.concurrency", 5)
|
||||
c.Config().String("workspace.root")
|
||||
c.Config().Int("agents.concurrency")
|
||||
c.Config().Enable("auto-merge")
|
||||
if c.Config().Enabled("auto-merge") { ... }
|
||||
```
|
||||
|
||||
---
|
||||
|
||||
## 18. Registry
|
||||
|
||||
Use `Registry[T]` for any named collection. No `map[string]*T + sync.Mutex`.
|
||||
|
||||
```go
|
||||
workspaces := core.NewRegistry[*WorkspaceStatus]()
|
||||
workspaces.Set(wsDir, status)
|
||||
workspaces.Get(wsDir)
|
||||
workspaces.Each(func(dir string, st *WorkspaceStatus) { ... })
|
||||
workspaces.Names() // insertion order
|
||||
c.RegistryOf("actions").List("agentic.*")
|
||||
```
|
||||
|
||||
---
|
||||
|
||||
## 19. String Operations
|
||||
|
||||
No `fmt`, no `strings`, no `+` concat. Core provides everything:
|
||||
|
||||
```go
|
||||
core.Println(value) // not fmt.Println
|
||||
core.Sprintf("port: %d", port) // not fmt.Sprintf
|
||||
core.Concat("hello ", name) // not "hello " + name
|
||||
core.Path(dir, "status.json") // not dir + "/status.json"
|
||||
core.Contains(s, "prefix") // not strings.Contains
|
||||
core.Split(s, "/") // not strings.Split
|
||||
core.Trim(s) // not strings.TrimSpace
|
||||
```
|
||||
|
||||
---
|
||||
|
||||
## 20. Error Handling and Logging
|
||||
|
||||
All errors via `core.E()`. All logging via Core. No `fmt`, `errors`, or `log` imports.
|
||||
|
||||
```go
|
||||
return core.E("dispatch.prep", "workspace not found", nil)
|
||||
return core.E("dispatch.prep", core.Concat("repo ", repo, " invalid"), cause)
|
||||
core.Info("agent dispatched", "repo", repo, "agent", agent)
|
||||
core.Error("dispatch failed", "err", err)
|
||||
core.Security("entitlement.denied", "action", action, "reason", reason)
|
||||
```
|
||||
|
||||
---
|
||||
|
||||
## 21. Stream Helpers and Data
|
||||
|
||||
```go
|
||||
r := c.Data().ReadString("prompts/coding.md")
|
||||
c.Data().List("templates/")
|
||||
c.Drive().New(core.NewOptions(
|
||||
core.Option{Key: "name", Value: "charon"},
|
||||
core.Option{Key: "transport", Value: "http://10.69.69.165:9101"},
|
||||
))
|
||||
```
|
||||
|
||||
---
|
||||
|
||||
## 22. Comments (AX Principle 2)
|
||||
|
||||
Every exported function MUST have a usage-example comment:
|
||||
|
||||
```go
|
||||
// Process runs a git command in a directory.
|
||||
//
|
||||
// r := s.Core().Process().RunIn(ctx, "/repo", "git", "log", "--oneline")
|
||||
```
|
||||
|
||||
---
|
||||
|
||||
## 23. Test Strategy (AX Principle 7)
|
||||
|
||||
`TestFile_Function_{Good,Bad,Ugly}` — 100% naming compliance target.
|
||||
|
||||
---
|
||||
|
||||
## Consumer RFCs
|
||||
|
||||
| Package | RFC | Role |
|
||||
|---------|-----|------|
|
||||
| core/go | `core/go/docs/RFC.md` | Primitives — all 21 sections |
|
||||
| go-process | `core/go-process/docs/RFC.md` | Process Action handlers (Result-native) |
|
||||
|
||||
---
|
||||
|
||||
## Changelog
|
||||
|
||||
- 2026-03-30: `version.go` now has an example companion, closing the last build-relevant source file without example coverage.
|
||||
- 2026-03-30: `pkg/agentic/commands_workspace.go` now has a matching example companion, closing the last agentic source file without example coverage.
|
||||
- 2026-03-30: plan files and review queue rate-limit state now use `WriteAtomic`, keeping JSON state writes aligned with the AX safe-write convention.
|
||||
- 2026-03-30: transport helpers preserve request and read causes, brain direct API calls surface upstream bodies, and review queue retry parsing no longer uses `MustCompile`.
|
||||
- 2026-03-30: direct Core process calls replaced the `proc.go` wrapper layer; PID helpers now live in `pid.go` and the workspace template documents `c.Process()` directly.
|
||||
- 2026-03-29: cmd/core-agent no longer rewrites `os.Args` before startup. The binary-owned commands now use named handlers, keeping the entrypoint on Core CLI primitives instead of repo-local argument mutation.
|
||||
- 2026-03-26: net/http consolidated to transport.go (ONE file). net/url + io/fs eliminated. RFC-025 updated with 3 new quality gates (net/http, net/url, io/fs). 1:1 test + example test coverage. Array[T].Deduplicate replaces custom helpers.
|
||||
- 2026-03-25: Quality gates pass. Zero disallowed imports (all 10). encoding/json→Core JSON. path/filepath→Core Path. os→Core Env/Fs. io→Core ReadAll/WriteAll. go-process fully Result-native. ServiceRuntime on all subsystems. 22 named Actions + Task pipeline. ChannelNotifier→IPC. Reference docs synced.
|
||||
- 2026-03-25: Initial spec — written with full core/go v0.8.0 domain context.
|
||||
177
.core/reference/docs/commands.md
Normal file
177
.core/reference/docs/commands.md
Normal file
|
|
@ -0,0 +1,177 @@
|
|||
---
|
||||
title: Commands
|
||||
description: Path-based command registration and CLI execution.
|
||||
---
|
||||
|
||||
# Commands
|
||||
|
||||
Commands are one of the most AX-native parts of CoreGO. The path is the identity.
|
||||
|
||||
## Register a Command
|
||||
|
||||
```go
|
||||
c.Command("deploy/to/homelab", core.Command{
|
||||
Action: func(opts core.Options) core.Result {
|
||||
target := opts.String("target")
|
||||
return core.Result{Value: "deploying to " + target, OK: true}
|
||||
},
|
||||
})
|
||||
```
|
||||
|
||||
## Command Paths
|
||||
|
||||
Paths must be clean:
|
||||
|
||||
- no empty path
|
||||
- no leading slash
|
||||
- no trailing slash
|
||||
- no double slash
|
||||
|
||||
These paths are valid:
|
||||
|
||||
```text
|
||||
deploy
|
||||
deploy/to/homelab
|
||||
workspace/create
|
||||
```
|
||||
|
||||
These are rejected:
|
||||
|
||||
```text
|
||||
/deploy
|
||||
deploy/
|
||||
deploy//to
|
||||
```
|
||||
|
||||
## Parent Commands Are Auto-Created
|
||||
|
||||
When you register `deploy/to/homelab`, CoreGO also creates placeholder parents if they do not already exist:
|
||||
|
||||
- `deploy`
|
||||
- `deploy/to`
|
||||
|
||||
This makes the path tree navigable without extra setup.
|
||||
|
||||
## Read a Command Back
|
||||
|
||||
```go
|
||||
r := c.Command("deploy/to/homelab")
|
||||
if r.OK {
|
||||
cmd := r.Value.(*core.Command)
|
||||
_ = cmd
|
||||
}
|
||||
```
|
||||
|
||||
## Run a Command Directly
|
||||
|
||||
```go
|
||||
cmd := c.Command("deploy/to/homelab").Value.(*core.Command)
|
||||
|
||||
r := cmd.Run(core.Options{
|
||||
{Key: "target", Value: "uk-prod"},
|
||||
})
|
||||
```
|
||||
|
||||
If `Action` is nil, `Run` returns `Result{OK:false}` with a structured error.
|
||||
|
||||
## Run Through the CLI Surface
|
||||
|
||||
```go
|
||||
r := c.Cli().Run("deploy", "to", "homelab", "--target=uk-prod", "--debug")
|
||||
```
|
||||
|
||||
`Cli.Run` resolves the longest matching command path from the arguments, then converts the remaining args into `core.Options`.
|
||||
|
||||
## Flag Parsing Rules
|
||||
|
||||
### Double Dash
|
||||
|
||||
```text
|
||||
--target=uk-prod -> key "target", value "uk-prod"
|
||||
--debug -> key "debug", value true
|
||||
```
|
||||
|
||||
### Single Dash
|
||||
|
||||
```text
|
||||
-v -> key "v", value true
|
||||
-n=4 -> key "n", value "4"
|
||||
```
|
||||
|
||||
### Positional Arguments
|
||||
|
||||
Non-flag arguments after the command path are stored as repeated `_arg` options.
|
||||
|
||||
```go
|
||||
r := c.Cli().Run("workspace", "open", "alpha")
|
||||
```
|
||||
|
||||
That produces an option like:
|
||||
|
||||
```go
|
||||
core.Option{Key: "_arg", Value: "alpha"}
|
||||
```
|
||||
|
||||
### Important Details
|
||||
|
||||
- flag values stay as strings
|
||||
- `opts.Int("port")` only works if some code stored an actual `int`
|
||||
- invalid flags such as `-verbose` and `--v` are ignored
|
||||
|
||||
## Help Output
|
||||
|
||||
`Cli.PrintHelp()` prints executable commands:
|
||||
|
||||
```go
|
||||
c.Cli().PrintHelp()
|
||||
```
|
||||
|
||||
It skips:
|
||||
|
||||
- hidden commands
|
||||
- placeholder parents with no `Action` and no `Lifecycle`
|
||||
|
||||
Descriptions are resolved through `cmd.I18nKey()`.
|
||||
|
||||
## I18n Description Keys
|
||||
|
||||
If `Description` is empty, CoreGO derives a key from the path.
|
||||
|
||||
```text
|
||||
deploy -> cmd.deploy.description
|
||||
deploy/to/homelab -> cmd.deploy.to.homelab.description
|
||||
workspace/create -> cmd.workspace.create.description
|
||||
```
|
||||
|
||||
If `Description` is already set, CoreGO uses it as-is.
|
||||
|
||||
## Lifecycle Commands
|
||||
|
||||
Commands can also delegate to a lifecycle implementation.
|
||||
|
||||
```go
|
||||
type daemonCommand struct{}
|
||||
|
||||
func (d *daemonCommand) Start(opts core.Options) core.Result { return core.Result{OK: true} }
|
||||
func (d *daemonCommand) Stop() core.Result { return core.Result{OK: true} }
|
||||
func (d *daemonCommand) Restart() core.Result { return core.Result{OK: true} }
|
||||
func (d *daemonCommand) Reload() core.Result { return core.Result{OK: true} }
|
||||
func (d *daemonCommand) Signal(sig string) core.Result { return core.Result{Value: sig, OK: true} }
|
||||
|
||||
c.Command("agent/serve", core.Command{
|
||||
Lifecycle: &daemonCommand{},
|
||||
})
|
||||
```
|
||||
|
||||
Important behavior:
|
||||
|
||||
- `Start` falls back to `Run` when `Lifecycle` is nil
|
||||
- `Stop`, `Restart`, `Reload`, and `Signal` return an empty `Result` when `Lifecycle` is nil
|
||||
|
||||
## List Command Paths
|
||||
|
||||
```go
|
||||
paths := c.Commands()
|
||||
```
|
||||
|
||||
Like the service registry, the command registry is map-backed, so iteration order is not guaranteed.
|
||||
96
.core/reference/docs/configuration.md
Normal file
96
.core/reference/docs/configuration.md
Normal file
|
|
@ -0,0 +1,96 @@
|
|||
---
|
||||
title: Configuration
|
||||
description: Constructor options, runtime settings, and feature flags.
|
||||
---
|
||||
|
||||
# Configuration
|
||||
|
||||
CoreGO uses two different configuration layers:
|
||||
|
||||
- constructor-time `core.Options`
|
||||
- runtime `c.Config()`
|
||||
|
||||
## Constructor-Time Options
|
||||
|
||||
```go
|
||||
c := core.New(core.Options{
|
||||
{Key: "name", Value: "agent-workbench"},
|
||||
})
|
||||
```
|
||||
|
||||
### Current Behavior
|
||||
|
||||
- `New` accepts `opts ...Options`
|
||||
- the current implementation copies only the first `Options` slice
|
||||
- the `name` key is applied to `c.App().Name`
|
||||
|
||||
If you need more constructor data, put it in the first `core.Options` slice.
|
||||
|
||||
## Runtime Settings with `Config`
|
||||
|
||||
Use `c.Config()` for mutable process settings.
|
||||
|
||||
```go
|
||||
c.Config().Set("workspace.root", "/srv/workspaces")
|
||||
c.Config().Set("max_agents", 8)
|
||||
c.Config().Set("debug", true)
|
||||
```
|
||||
|
||||
Read them back with:
|
||||
|
||||
```go
|
||||
root := c.Config().String("workspace.root")
|
||||
maxAgents := c.Config().Int("max_agents")
|
||||
debug := c.Config().Bool("debug")
|
||||
raw := c.Config().Get("workspace.root")
|
||||
```
|
||||
|
||||
### Important Details
|
||||
|
||||
- missing keys return zero values
|
||||
- typed accessors do not coerce strings into ints or bools
|
||||
- `Get` returns `core.Result`
|
||||
|
||||
## Feature Flags
|
||||
|
||||
`Config` also tracks named feature flags.
|
||||
|
||||
```go
|
||||
c.Config().Enable("workspace.templates")
|
||||
c.Config().Enable("agent.review")
|
||||
c.Config().Disable("agent.review")
|
||||
```
|
||||
|
||||
Read them with:
|
||||
|
||||
```go
|
||||
enabled := c.Config().Enabled("workspace.templates")
|
||||
features := c.Config().EnabledFeatures()
|
||||
```
|
||||
|
||||
Feature names are case-sensitive.
|
||||
|
||||
## `ConfigVar[T]`
|
||||
|
||||
Use `ConfigVar[T]` when you need a typed value that can also represent “set versus unset”.
|
||||
|
||||
```go
|
||||
theme := core.NewConfigVar("amber")
|
||||
|
||||
if theme.IsSet() {
|
||||
fmt.Println(theme.Get())
|
||||
}
|
||||
|
||||
theme.Unset()
|
||||
```
|
||||
|
||||
This is useful for package-local state where zero values are not enough to describe configuration presence.
|
||||
|
||||
## Recommended Pattern
|
||||
|
||||
Use the two layers for different jobs:
|
||||
|
||||
- put startup identity such as `name` into `core.Options`
|
||||
- put mutable runtime values and feature switches into `c.Config()`
|
||||
|
||||
That keeps constructor intent separate from live process state.
|
||||
120
.core/reference/docs/errors.md
Normal file
120
.core/reference/docs/errors.md
Normal file
|
|
@ -0,0 +1,120 @@
|
|||
---
|
||||
title: Errors
|
||||
description: Structured errors, logging helpers, and panic recovery.
|
||||
---
|
||||
|
||||
# Errors
|
||||
|
||||
CoreGO treats failures as structured operational data.
|
||||
|
||||
Repository convention: use `E()` instead of `fmt.Errorf` for framework and service errors.
|
||||
|
||||
## `Err`
|
||||
|
||||
The structured error type is:
|
||||
|
||||
```go
|
||||
type Err struct {
|
||||
Operation string
|
||||
Message string
|
||||
Cause error
|
||||
Code string
|
||||
}
|
||||
```
|
||||
|
||||
## Create Errors
|
||||
|
||||
### `E`
|
||||
|
||||
```go
|
||||
err := core.E("workspace.Load", "failed to read workspace manifest", cause)
|
||||
```
|
||||
|
||||
### `Wrap`
|
||||
|
||||
```go
|
||||
err := core.Wrap(cause, "workspace.Load", "manifest parse failed")
|
||||
```
|
||||
|
||||
### `WrapCode`
|
||||
|
||||
```go
|
||||
err := core.WrapCode(cause, "WORKSPACE_INVALID", "workspace.Load", "manifest parse failed")
|
||||
```
|
||||
|
||||
### `NewCode`
|
||||
|
||||
```go
|
||||
err := core.NewCode("NOT_FOUND", "workspace not found")
|
||||
```
|
||||
|
||||
## Inspect Errors
|
||||
|
||||
```go
|
||||
op := core.Operation(err)
|
||||
code := core.ErrorCode(err)
|
||||
msg := core.ErrorMessage(err)
|
||||
root := core.Root(err)
|
||||
stack := core.StackTrace(err)
|
||||
pretty := core.FormatStackTrace(err)
|
||||
```
|
||||
|
||||
These helpers keep the operational chain visible without extra type assertions.
|
||||
|
||||
## Join and Standard Wrappers
|
||||
|
||||
```go
|
||||
combined := core.ErrorJoin(err1, err2)
|
||||
same := core.Is(combined, err1)
|
||||
```
|
||||
|
||||
`core.As` and `core.NewError` mirror the standard library for convenience.
|
||||
|
||||
## Log-and-Return Helpers
|
||||
|
||||
`Core` exposes two convenience wrappers:
|
||||
|
||||
```go
|
||||
r1 := c.LogError(err, "workspace.Load", "workspace load failed")
|
||||
r2 := c.LogWarn(err, "workspace.Load", "workspace load degraded")
|
||||
```
|
||||
|
||||
These log through the default logger and return `core.Result`.
|
||||
|
||||
You can also use the underlying `ErrorLog` directly:
|
||||
|
||||
```go
|
||||
r := c.Log().Error(err, "workspace.Load", "workspace load failed")
|
||||
```
|
||||
|
||||
`Must` logs and then panics when the error is non-nil:
|
||||
|
||||
```go
|
||||
c.Must(err, "workspace.Load", "workspace load failed")
|
||||
```
|
||||
|
||||
## Panic Recovery
|
||||
|
||||
`ErrorPanic` handles process-safe panic capture.
|
||||
|
||||
```go
|
||||
defer c.Error().Recover()
|
||||
```
|
||||
|
||||
Run background work with recovery:
|
||||
|
||||
```go
|
||||
c.Error().SafeGo(func() {
|
||||
panic("captured")
|
||||
})
|
||||
```
|
||||
|
||||
If `ErrorPanic` has a configured crash file path, it appends JSON crash reports and `Reports(n)` reads them back.
|
||||
|
||||
That crash file path is currently internal state on `ErrorPanic`, not a public constructor option on `Core.New()`.
|
||||
|
||||
## Logging and Error Context
|
||||
|
||||
The logging subsystem automatically extracts `op` and logical stack information from structured errors when those values are present in the key-value list.
|
||||
|
||||
That makes errors created with `E`, `Wrap`, or `WrapCode` much easier to follow in logs.
|
||||
208
.core/reference/docs/getting-started.md
Normal file
208
.core/reference/docs/getting-started.md
Normal file
|
|
@ -0,0 +1,208 @@
|
|||
---
|
||||
title: Getting Started
|
||||
description: Build a first CoreGO application with the current API.
|
||||
---
|
||||
|
||||
# Getting Started
|
||||
|
||||
This page shows the shortest path to a useful CoreGO application using the API that exists in this repository today.
|
||||
|
||||
## Install
|
||||
|
||||
```bash
|
||||
go get dappco.re/go/core
|
||||
```
|
||||
|
||||
## Create a Core
|
||||
|
||||
`New` takes zero or more `core.Options` slices, but the current implementation only reads the first one. In practice, treat the constructor as `core.New(core.Options{...})`.
|
||||
|
||||
```go
|
||||
package main
|
||||
|
||||
import "dappco.re/go/core"
|
||||
|
||||
func main() {
|
||||
c := core.New(core.Options{
|
||||
{Key: "name", Value: "agent-workbench"},
|
||||
})
|
||||
|
||||
_ = c
|
||||
}
|
||||
```
|
||||
|
||||
The `name` option is copied into `c.App().Name`.
|
||||
|
||||
## Register a Service
|
||||
|
||||
Services are registered explicitly with a name and a `core.Service` DTO.
|
||||
|
||||
```go
|
||||
c.Service("audit", core.Service{
|
||||
OnStart: func() core.Result {
|
||||
core.Info("audit service started", "app", c.App().Name)
|
||||
return core.Result{OK: true}
|
||||
},
|
||||
OnStop: func() core.Result {
|
||||
core.Info("audit service stopped", "app", c.App().Name)
|
||||
return core.Result{OK: true}
|
||||
},
|
||||
})
|
||||
```
|
||||
|
||||
This registry stores `core.Service` values. It is a lifecycle registry, not a typed object container.
|
||||
|
||||
## Register a Query, Task, and Command
|
||||
|
||||
```go
|
||||
type workspaceCountQuery struct{}
|
||||
|
||||
type createWorkspaceTask struct {
|
||||
Name string
|
||||
}
|
||||
|
||||
c.RegisterQuery(func(_ *core.Core, q core.Query) core.Result {
|
||||
switch q.(type) {
|
||||
case workspaceCountQuery:
|
||||
return core.Result{Value: 1, OK: true}
|
||||
}
|
||||
return core.Result{}
|
||||
})
|
||||
|
||||
c.RegisterTask(func(_ *core.Core, t core.Task) core.Result {
|
||||
switch task := t.(type) {
|
||||
case createWorkspaceTask:
|
||||
path := "/tmp/agent-workbench/" + task.Name
|
||||
return core.Result{Value: path, OK: true}
|
||||
}
|
||||
return core.Result{}
|
||||
})
|
||||
|
||||
c.Command("workspace/create", core.Command{
|
||||
Action: func(opts core.Options) core.Result {
|
||||
return c.PERFORM(createWorkspaceTask{
|
||||
Name: opts.String("name"),
|
||||
})
|
||||
},
|
||||
})
|
||||
```
|
||||
|
||||
## Start the Runtime
|
||||
|
||||
```go
|
||||
if !c.ServiceStartup(context.Background(), nil).OK {
|
||||
panic("startup failed")
|
||||
}
|
||||
```
|
||||
|
||||
`ServiceStartup` returns `core.Result`, not `error`.
|
||||
|
||||
## Run Through the CLI Surface
|
||||
|
||||
```go
|
||||
r := c.Cli().Run("workspace", "create", "--name=alpha")
|
||||
if r.OK {
|
||||
fmt.Println("created:", r.Value)
|
||||
}
|
||||
```
|
||||
|
||||
For flags with values, the CLI stores the value as a string. `--name=alpha` becomes `opts.String("name") == "alpha"`.
|
||||
|
||||
## Query the System
|
||||
|
||||
```go
|
||||
count := c.QUERY(workspaceCountQuery{})
|
||||
if count.OK {
|
||||
fmt.Println("workspace count:", count.Value)
|
||||
}
|
||||
```
|
||||
|
||||
## Shut Down Cleanly
|
||||
|
||||
```go
|
||||
_ = c.ServiceShutdown(context.Background())
|
||||
```
|
||||
|
||||
Shutdown cancels `c.Context()`, broadcasts `ActionServiceShutdown{}`, waits for background tasks to finish, and then runs service stop hooks.
|
||||
|
||||
## Full Example
|
||||
|
||||
```go
|
||||
package main
|
||||
|
||||
import (
|
||||
"context"
|
||||
"fmt"
|
||||
|
||||
"dappco.re/go/core"
|
||||
)
|
||||
|
||||
type workspaceCountQuery struct{}
|
||||
|
||||
type createWorkspaceTask struct {
|
||||
Name string
|
||||
}
|
||||
|
||||
func main() {
|
||||
c := core.New(core.Options{
|
||||
{Key: "name", Value: "agent-workbench"},
|
||||
})
|
||||
|
||||
c.Config().Set("workspace.root", "/tmp/agent-workbench")
|
||||
c.Config().Enable("workspace.templates")
|
||||
|
||||
c.Service("audit", core.Service{
|
||||
OnStart: func() core.Result {
|
||||
core.Info("service started", "service", "audit")
|
||||
return core.Result{OK: true}
|
||||
},
|
||||
OnStop: func() core.Result {
|
||||
core.Info("service stopped", "service", "audit")
|
||||
return core.Result{OK: true}
|
||||
},
|
||||
})
|
||||
|
||||
c.RegisterQuery(func(_ *core.Core, q core.Query) core.Result {
|
||||
switch q.(type) {
|
||||
case workspaceCountQuery:
|
||||
return core.Result{Value: 1, OK: true}
|
||||
}
|
||||
return core.Result{}
|
||||
})
|
||||
|
||||
c.RegisterTask(func(_ *core.Core, t core.Task) core.Result {
|
||||
switch task := t.(type) {
|
||||
case createWorkspaceTask:
|
||||
path := c.Config().String("workspace.root") + "/" + task.Name
|
||||
return core.Result{Value: path, OK: true}
|
||||
}
|
||||
return core.Result{}
|
||||
})
|
||||
|
||||
c.Command("workspace/create", core.Command{
|
||||
Action: func(opts core.Options) core.Result {
|
||||
return c.PERFORM(createWorkspaceTask{
|
||||
Name: opts.String("name"),
|
||||
})
|
||||
},
|
||||
})
|
||||
|
||||
if !c.ServiceStartup(context.Background(), nil).OK {
|
||||
panic("startup failed")
|
||||
}
|
||||
|
||||
created := c.Cli().Run("workspace", "create", "--name=alpha")
|
||||
fmt.Println("created:", created.Value)
|
||||
|
||||
count := c.QUERY(workspaceCountQuery{})
|
||||
fmt.Println("workspace count:", count.Value)
|
||||
|
||||
_ = c.ServiceShutdown(context.Background())
|
||||
}
|
||||
```
|
||||
|
||||
## Next Steps
|
||||
|
||||
- Read [primitives.md](primitives.md) next so the repeated shapes are clear.
|
||||
- Read [commands.md](commands.md) if you are building a CLI-first system.
|
||||
- Read [messaging.md](messaging.md) if services need to collaborate without direct imports.
|
||||
114
.core/reference/docs/index.md
Normal file
114
.core/reference/docs/index.md
Normal file
|
|
@ -0,0 +1,114 @@
|
|||
---
|
||||
title: CoreGO
|
||||
description: AX-first documentation for the CoreGO framework.
|
||||
---
|
||||
|
||||
# CoreGO
|
||||
|
||||
CoreGO is the foundation layer for the Core ecosystem. It gives you one container, one command tree, one message bus, and a small set of shared primitives that repeat across the whole framework.
|
||||
|
||||
The current module path is `dappco.re/go/core`.
|
||||
|
||||
Start with [RFC.md](RFC.md) for the full API contract and [../RFC-025-AGENT-EXPERIENCE.md](../RFC-025-AGENT-EXPERIENCE.md) for the AX design rules that shape it.
|
||||
|
||||
## AX View
|
||||
|
||||
CoreGO already follows the main AX ideas from RFC-025:
|
||||
|
||||
- predictable names such as `Core`, `Service`, `Command`, `Options`, `Result`, `Message`
|
||||
- path-shaped command registration such as `deploy/to/homelab`
|
||||
- one repeated input shape (`Options`) and one repeated return shape (`Result`)
|
||||
- comments and examples that show real usage instead of restating the type signature
|
||||
|
||||
## What CoreGO Owns
|
||||
|
||||
| Surface | Purpose |
|
||||
|---------|---------|
|
||||
| `Core` | Central container and access point |
|
||||
| `Service` | Managed lifecycle component |
|
||||
| `Command` | Path-based command tree node |
|
||||
| `ACTION`, `QUERY`, `PERFORM` | Decoupled communication between components |
|
||||
| `Data`, `Drive`, `Fs`, `Config`, `I18n`, `Cli` | Built-in subsystems for common runtime work |
|
||||
| `E`, `Wrap`, `ErrorLog`, `ErrorPanic` | Structured failures and panic recovery |
|
||||
|
||||
## Quick Example
|
||||
|
||||
```go
|
||||
package main
|
||||
|
||||
import (
|
||||
"context"
|
||||
"fmt"
|
||||
|
||||
"dappco.re/go/core"
|
||||
)
|
||||
|
||||
type flushCacheTask struct {
|
||||
Name string
|
||||
}
|
||||
|
||||
func main() {
|
||||
c := core.New(core.Options{
|
||||
{Key: "name", Value: "agent-workbench"},
|
||||
})
|
||||
|
||||
c.Service("cache", core.Service{
|
||||
OnStart: func() core.Result {
|
||||
core.Info("cache ready", "app", c.App().Name)
|
||||
return core.Result{OK: true}
|
||||
},
|
||||
OnStop: func() core.Result {
|
||||
core.Info("cache stopped", "app", c.App().Name)
|
||||
return core.Result{OK: true}
|
||||
},
|
||||
})
|
||||
|
||||
c.RegisterTask(func(_ *core.Core, task core.Task) core.Result {
|
||||
switch task.(type) {
|
||||
case flushCacheTask:
|
||||
return core.Result{Value: "cache flushed", OK: true}
|
||||
}
|
||||
return core.Result{}
|
||||
})
|
||||
|
||||
c.Command("cache/flush", core.Command{
|
||||
Action: func(opts core.Options) core.Result {
|
||||
return c.PERFORM(flushCacheTask{Name: opts.String("name")})
|
||||
},
|
||||
})
|
||||
|
||||
if !c.ServiceStartup(context.Background(), nil).OK {
|
||||
panic("startup failed")
|
||||
}
|
||||
|
||||
r := c.Cli().Run("cache", "flush", "--name=session-store")
|
||||
fmt.Println(r.Value)
|
||||
|
||||
_ = c.ServiceShutdown(context.Background())
|
||||
}
|
||||
```
|
||||
|
||||
## Documentation Paths
|
||||
|
||||
| Path | Covers |
|
||||
|------|--------|
|
||||
| [getting-started.md](getting-started.md) | First runnable CoreGO app |
|
||||
| [primitives.md](primitives.md) | `Options`, `Result`, `Service`, `Message`, `Query`, `Task` |
|
||||
| [services.md](services.md) | Service registry, service locks, runtime helpers |
|
||||
| [commands.md](commands.md) | Path-based commands and CLI execution |
|
||||
| [messaging.md](messaging.md) | `ACTION`, `QUERY`, `QUERYALL`, `PERFORM`, `PerformAsync` |
|
||||
| [lifecycle.md](lifecycle.md) | Startup, shutdown, context, background task draining |
|
||||
| [configuration.md](configuration.md) | Constructor options, config state, feature flags |
|
||||
| [subsystems.md](subsystems.md) | `App`, `Data`, `Drive`, `Fs`, `I18n`, `Cli` |
|
||||
| [errors.md](errors.md) | Structured errors, logging helpers, panic recovery |
|
||||
| [testing.md](testing.md) | Test naming and framework-level testing patterns |
|
||||
| [pkg/core.md](pkg/core.md) | Package-level reference summary |
|
||||
| [pkg/log.md](pkg/log.md) | Logging reference for the root package |
|
||||
| [pkg/PACKAGE_STANDARDS.md](pkg/PACKAGE_STANDARDS.md) | AX package-authoring guidance |
|
||||
|
||||
## Good Reading Order
|
||||
|
||||
1. Start with [getting-started.md](getting-started.md).
|
||||
2. Learn the repeated shapes in [primitives.md](primitives.md).
|
||||
3. Pick the integration path you need next: [services.md](services.md), [commands.md](commands.md), or [messaging.md](messaging.md).
|
||||
4. Use [subsystems.md](subsystems.md), [errors.md](errors.md), and [testing.md](testing.md) as reference pages while building.
|
||||
111
.core/reference/docs/lifecycle.md
Normal file
111
.core/reference/docs/lifecycle.md
Normal file
|
|
@ -0,0 +1,111 @@
|
|||
---
|
||||
title: Lifecycle
|
||||
description: Startup, shutdown, context ownership, and background task draining.
|
||||
---
|
||||
|
||||
# Lifecycle
|
||||
|
||||
CoreGO manages lifecycle through `core.Service` callbacks, not through reflection or implicit interfaces.
|
||||
|
||||
## Service Hooks
|
||||
|
||||
```go
|
||||
c.Service("cache", core.Service{
|
||||
OnStart: func() core.Result {
|
||||
return core.Result{OK: true}
|
||||
},
|
||||
OnStop: func() core.Result {
|
||||
return core.Result{OK: true}
|
||||
},
|
||||
})
|
||||
```
|
||||
|
||||
Only services with `OnStart` appear in `Startables()`. Only services with `OnStop` appear in `Stoppables()`.
|
||||
|
||||
## `ServiceStartup`
|
||||
|
||||
```go
|
||||
r := c.ServiceStartup(context.Background(), nil)
|
||||
```
|
||||
|
||||
### What It Does
|
||||
|
||||
1. clears the shutdown flag
|
||||
2. stores a new cancellable context on `c.Context()`
|
||||
3. runs each `OnStart`
|
||||
4. broadcasts `ActionServiceStartup{}`
|
||||
|
||||
### Failure Behavior
|
||||
|
||||
- if the input context is already cancelled, startup returns that error
|
||||
- if any `OnStart` returns `OK:false`, startup stops immediately and returns that result
|
||||
|
||||
## `ServiceShutdown`
|
||||
|
||||
```go
|
||||
r := c.ServiceShutdown(context.Background())
|
||||
```
|
||||
|
||||
### What It Does
|
||||
|
||||
1. sets the shutdown flag
|
||||
2. cancels `c.Context()`
|
||||
3. broadcasts `ActionServiceShutdown{}`
|
||||
4. waits for background tasks created by `PerformAsync`
|
||||
5. runs each `OnStop`
|
||||
|
||||
### Failure Behavior
|
||||
|
||||
- if draining background tasks hits the shutdown context deadline, shutdown returns that context error
|
||||
- when service stop hooks fail, CoreGO returns the first error it sees
|
||||
|
||||
## Ordering
|
||||
|
||||
The current implementation builds `Startables()` and `Stoppables()` by iterating over a map-backed registry.
|
||||
|
||||
That means lifecycle order is not guaranteed today.
|
||||
|
||||
If your application needs strict startup or shutdown ordering, orchestrate it explicitly inside a smaller number of service callbacks instead of relying on registry order.
|
||||
|
||||
## `c.Context()`
|
||||
|
||||
`ServiceStartup` creates the context returned by `c.Context()`.
|
||||
|
||||
Use it for background work that should stop when the application shuts down:
|
||||
|
||||
```go
|
||||
c.Service("watcher", core.Service{
|
||||
OnStart: func() core.Result {
|
||||
go func(ctx context.Context) {
|
||||
<-ctx.Done()
|
||||
}(c.Context())
|
||||
return core.Result{OK: true}
|
||||
},
|
||||
})
|
||||
```
|
||||
|
||||
## Built-In Lifecycle Actions
|
||||
|
||||
You can listen for lifecycle state changes through the action bus.
|
||||
|
||||
```go
|
||||
c.RegisterAction(func(_ *core.Core, msg core.Message) core.Result {
|
||||
switch msg.(type) {
|
||||
case core.ActionServiceStartup:
|
||||
core.Info("core startup completed")
|
||||
case core.ActionServiceShutdown:
|
||||
core.Info("core shutdown started")
|
||||
}
|
||||
return core.Result{OK: true}
|
||||
})
|
||||
```
|
||||
|
||||
## Background Task Draining
|
||||
|
||||
`ServiceShutdown` waits for the internal task waitgroup to finish before calling stop hooks.
|
||||
|
||||
This is what makes `PerformAsync` safe for long-running work that should complete before teardown.
|
||||
|
||||
## `OnReload`
|
||||
|
||||
`Service` includes an `OnReload` callback field, but CoreGO does not currently expose a top-level lifecycle runner for reload operations.
|
||||
171
.core/reference/docs/messaging.md
Normal file
171
.core/reference/docs/messaging.md
Normal file
|
|
@ -0,0 +1,171 @@
|
|||
---
|
||||
title: Messaging
|
||||
description: ACTION, QUERY, QUERYALL, PERFORM, and async task flow.
|
||||
---
|
||||
|
||||
# Messaging
|
||||
|
||||
CoreGO uses one message bus for broadcasts, lookups, and work dispatch.
|
||||
|
||||
## Message Types
|
||||
|
||||
```go
|
||||
type Message any
|
||||
type Query any
|
||||
type Task any
|
||||
```
|
||||
|
||||
Your own structs define the protocol.
|
||||
|
||||
```go
|
||||
type repositoryIndexed struct {
|
||||
Name string
|
||||
}
|
||||
|
||||
type repositoryCountQuery struct{}
|
||||
|
||||
type syncRepositoryTask struct {
|
||||
Name string
|
||||
}
|
||||
```
|
||||
|
||||
## `ACTION`
|
||||
|
||||
`ACTION` is a broadcast.
|
||||
|
||||
```go
|
||||
c.RegisterAction(func(_ *core.Core, msg core.Message) core.Result {
|
||||
switch m := msg.(type) {
|
||||
case repositoryIndexed:
|
||||
core.Info("repository indexed", "name", m.Name)
|
||||
return core.Result{OK: true}
|
||||
}
|
||||
return core.Result{OK: true}
|
||||
})
|
||||
|
||||
r := c.ACTION(repositoryIndexed{Name: "core-go"})
|
||||
```
|
||||
|
||||
### Behavior
|
||||
|
||||
- all registered action handlers are called in their current registration order
|
||||
- if a handler returns `OK:false`, dispatch stops and that `Result` is returned
|
||||
- if no handler fails, `ACTION` returns `Result{OK:true}`
|
||||
|
||||
## `QUERY`
|
||||
|
||||
`QUERY` is first-match request-response.
|
||||
|
||||
```go
|
||||
c.RegisterQuery(func(_ *core.Core, q core.Query) core.Result {
|
||||
switch q.(type) {
|
||||
case repositoryCountQuery:
|
||||
return core.Result{Value: 42, OK: true}
|
||||
}
|
||||
return core.Result{}
|
||||
})
|
||||
|
||||
r := c.QUERY(repositoryCountQuery{})
|
||||
```
|
||||
|
||||
### Behavior
|
||||
|
||||
- handlers run until one returns `OK:true`
|
||||
- the first successful result wins
|
||||
- if nothing handles the query, CoreGO returns an empty `Result`
|
||||
|
||||
## `QUERYALL`
|
||||
|
||||
`QUERYALL` collects every successful non-nil response.
|
||||
|
||||
```go
|
||||
r := c.QUERYALL(repositoryCountQuery{})
|
||||
results := r.Value.([]any)
|
||||
```
|
||||
|
||||
### Behavior
|
||||
|
||||
- every query handler is called
|
||||
- only `OK:true` results with non-nil `Value` are collected
|
||||
- the call itself returns `OK:true` even when the result list is empty
|
||||
|
||||
## `PERFORM`
|
||||
|
||||
`PERFORM` dispatches a task to the first handler that accepts it.
|
||||
|
||||
```go
|
||||
c.RegisterTask(func(_ *core.Core, t core.Task) core.Result {
|
||||
switch task := t.(type) {
|
||||
case syncRepositoryTask:
|
||||
return core.Result{Value: "synced " + task.Name, OK: true}
|
||||
}
|
||||
return core.Result{}
|
||||
})
|
||||
|
||||
r := c.PERFORM(syncRepositoryTask{Name: "core-go"})
|
||||
```
|
||||
|
||||
### Behavior
|
||||
|
||||
- handlers run until one returns `OK:true`
|
||||
- the first successful result wins
|
||||
- if nothing handles the task, CoreGO returns an empty `Result`
|
||||
|
||||
## `PerformAsync`
|
||||
|
||||
`PerformAsync` runs a task in a background goroutine and returns a generated task identifier.
|
||||
|
||||
```go
|
||||
r := c.PerformAsync(syncRepositoryTask{Name: "core-go"})
|
||||
taskID := r.Value.(string)
|
||||
```
|
||||
|
||||
### Generated Events
|
||||
|
||||
Async execution emits three action messages:
|
||||
|
||||
| Message | When |
|
||||
|---------|------|
|
||||
| `ActionTaskStarted` | just before background execution begins |
|
||||
| `ActionTaskProgress` | whenever `Progress` is called |
|
||||
| `ActionTaskCompleted` | after the task finishes or panics |
|
||||
|
||||
Example listener:
|
||||
|
||||
```go
|
||||
c.RegisterAction(func(_ *core.Core, msg core.Message) core.Result {
|
||||
switch m := msg.(type) {
|
||||
case core.ActionTaskCompleted:
|
||||
core.Info("task completed", "task", m.TaskIdentifier, "err", m.Error)
|
||||
}
|
||||
return core.Result{OK: true}
|
||||
})
|
||||
```
|
||||
|
||||
## Progress Updates
|
||||
|
||||
```go
|
||||
c.Progress(taskID, 0.5, "indexing commits", syncRepositoryTask{Name: "core-go"})
|
||||
```
|
||||
|
||||
That broadcasts `ActionTaskProgress`.
|
||||
|
||||
## `TaskWithIdentifier`
|
||||
|
||||
Tasks that implement `TaskWithIdentifier` receive the generated ID before dispatch.
|
||||
|
||||
```go
|
||||
type trackedTask struct {
|
||||
ID string
|
||||
Name string
|
||||
}
|
||||
|
||||
func (t *trackedTask) SetTaskIdentifier(id string) { t.ID = id }
|
||||
func (t *trackedTask) GetTaskIdentifier() string { return t.ID }
|
||||
```
|
||||
|
||||
## Shutdown Interaction
|
||||
|
||||
When shutdown has started, `PerformAsync` returns an empty `Result` instead of scheduling more work.
|
||||
|
||||
This is why `ServiceShutdown` can safely drain the outstanding background tasks before stopping services.
|
||||
138
.core/reference/docs/pkg/PACKAGE_STANDARDS.md
Normal file
138
.core/reference/docs/pkg/PACKAGE_STANDARDS.md
Normal file
|
|
@ -0,0 +1,138 @@
|
|||
# AX Package Standards
|
||||
|
||||
This page describes how to build packages on top of CoreGO in the style described by RFC-025.
|
||||
|
||||
## 1. Prefer Predictable Names
|
||||
|
||||
Use names that tell an agent what the thing is without translation.
|
||||
|
||||
Good:
|
||||
|
||||
- `RepositoryService`
|
||||
- `RepositoryServiceOptions`
|
||||
- `WorkspaceCountQuery`
|
||||
- `SyncRepositoryTask`
|
||||
|
||||
Avoid shortening names unless the abbreviation is already universal.
|
||||
|
||||
## 2. Put Real Usage in Comments
|
||||
|
||||
Write comments that show a real call with realistic values.
|
||||
|
||||
Good:
|
||||
|
||||
```go
|
||||
// Sync a repository into the local workspace cache.
|
||||
// svc.SyncRepository("core-go", "/srv/repos/core-go")
|
||||
```
|
||||
|
||||
Avoid comments that only repeat the signature.
|
||||
|
||||
## 3. Keep Paths Semantic
|
||||
|
||||
If a command or template lives at a path, let the path explain the intent.
|
||||
|
||||
Good:
|
||||
|
||||
```text
|
||||
deploy/to/homelab
|
||||
workspace/create
|
||||
template/workspace/go
|
||||
```
|
||||
|
||||
That keeps the CLI, tests, docs, and message vocabulary aligned.
|
||||
|
||||
## 4. Reuse CoreGO Primitives
|
||||
|
||||
At Core boundaries, prefer the shared shapes:
|
||||
|
||||
- `core.Options` for lightweight input
|
||||
- `core.Result` for output
|
||||
- `core.Service` for lifecycle registration
|
||||
- `core.Message`, `core.Query`, `core.Task` for bus protocols
|
||||
|
||||
Inside your package, typed structs are still good. Use `ServiceRuntime[T]` when you want typed package options plus a `Core` reference.
|
||||
|
||||
```go
|
||||
type repositoryServiceOptions struct {
|
||||
BaseDirectory string
|
||||
}
|
||||
|
||||
type repositoryService struct {
|
||||
*core.ServiceRuntime[repositoryServiceOptions]
|
||||
}
|
||||
```
|
||||
|
||||
## 5. Prefer Explicit Registration
|
||||
|
||||
Register services and commands with names and paths that stay readable in grep results.
|
||||
|
||||
```go
|
||||
c.Service("repository", core.Service{...})
|
||||
c.Command("repository/sync", core.Command{...})
|
||||
```
|
||||
|
||||
## 6. Use the Bus for Decoupling
|
||||
|
||||
When one package needs another package’s behavior, prefer queries and tasks over tight package coupling.
|
||||
|
||||
```go
|
||||
type repositoryCountQuery struct{}
|
||||
type syncRepositoryTask struct {
|
||||
Name string
|
||||
}
|
||||
```
|
||||
|
||||
That keeps the protocol visible in code and easy for agents to follow.
|
||||
|
||||
## 7. Use Structured Errors
|
||||
|
||||
Use `core.E`, `core.Wrap`, and `core.WrapCode`.
|
||||
|
||||
```go
|
||||
return core.Result{
|
||||
Value: core.E("repository.Sync", "git fetch failed", err),
|
||||
OK: false,
|
||||
}
|
||||
```
|
||||
|
||||
Do not introduce free-form `fmt.Errorf` chains in framework code.
|
||||
|
||||
## 8. Keep Testing Names Predictable
|
||||
|
||||
Follow the repository pattern:
|
||||
|
||||
- `_Good`
|
||||
- `_Bad`
|
||||
- `_Ugly`
|
||||
|
||||
Example:
|
||||
|
||||
```go
|
||||
func TestRepositorySync_Good(t *testing.T) {}
|
||||
func TestRepositorySync_Bad(t *testing.T) {}
|
||||
func TestRepositorySync_Ugly(t *testing.T) {}
|
||||
```
|
||||
|
||||
## 9. Prefer Stable Shapes Over Clever APIs
|
||||
|
||||
For package APIs, avoid patterns that force an agent to infer too much hidden control flow.
|
||||
|
||||
Prefer:
|
||||
|
||||
- clear structs
|
||||
- explicit names
|
||||
- path-based commands
|
||||
- visible message types
|
||||
|
||||
Avoid:
|
||||
|
||||
- implicit global state unless it is truly a default service
|
||||
- panic-hiding constructors
|
||||
- dense option chains when a small explicit struct would do
|
||||
|
||||
## 10. Document the Current Reality
|
||||
|
||||
If the implementation is in transition, document what the code does now, not the API shape you plan to have later.
|
||||
|
||||
That keeps agents correct on first pass, which is the real AX metric.
|
||||
81
.core/reference/docs/pkg/core.md
Normal file
81
.core/reference/docs/pkg/core.md
Normal file
|
|
@ -0,0 +1,81 @@
|
|||
# Package Reference: `core`
|
||||
|
||||
Import path:
|
||||
|
||||
```go
|
||||
import "dappco.re/go/core"
|
||||
```
|
||||
|
||||
This repository exposes one root package. The main areas are:
|
||||
|
||||
## Constructors and Accessors
|
||||
|
||||
| Name | Purpose |
|
||||
|------|---------|
|
||||
| `New` | Create a `*Core` |
|
||||
| `NewRuntime` | Create an empty runtime wrapper |
|
||||
| `NewWithFactories` | Create a runtime wrapper from named service factories |
|
||||
| `Options`, `App`, `Data`, `Drive`, `Fs`, `Config`, `Error`, `Log`, `Cli`, `IPC`, `I18n`, `Context` | Access the built-in subsystems |
|
||||
|
||||
## Core Primitives
|
||||
|
||||
| Name | Purpose |
|
||||
|------|---------|
|
||||
| `Option`, `Options` | Input configuration and metadata |
|
||||
| `Result` | Shared output shape |
|
||||
| `Service` | Lifecycle DTO |
|
||||
| `Command` | Command tree node |
|
||||
| `Message`, `Query`, `Task` | Message bus payload types |
|
||||
|
||||
## Service and Runtime APIs
|
||||
|
||||
| Name | Purpose |
|
||||
|------|---------|
|
||||
| `Service` | Register or read a named service |
|
||||
| `Services` | List registered service names |
|
||||
| `Startables`, `Stoppables` | Snapshot lifecycle-capable services |
|
||||
| `LockEnable`, `LockApply` | Activate the service registry lock |
|
||||
| `ServiceRuntime[T]` | Helper for package authors |
|
||||
|
||||
## Command and CLI APIs
|
||||
|
||||
| Name | Purpose |
|
||||
|------|---------|
|
||||
| `Command` | Register or read a command by path |
|
||||
| `Commands` | List command paths |
|
||||
| `Cli().Run` | Resolve arguments to a command and execute it |
|
||||
| `Cli().PrintHelp` | Show executable commands |
|
||||
|
||||
## Messaging APIs
|
||||
|
||||
| Name | Purpose |
|
||||
|------|---------|
|
||||
| `ACTION`, `Action` | Broadcast a message |
|
||||
| `QUERY`, `Query` | Return the first successful query result |
|
||||
| `QUERYALL`, `QueryAll` | Collect all successful query results |
|
||||
| `PERFORM`, `Perform` | Run the first task handler that accepts the task |
|
||||
| `PerformAsync` | Run a task in the background |
|
||||
| `Progress` | Broadcast async task progress |
|
||||
| `RegisterAction`, `RegisterActions`, `RegisterQuery`, `RegisterTask` | Register bus handlers |
|
||||
|
||||
## Subsystems
|
||||
|
||||
| Name | Purpose |
|
||||
|------|---------|
|
||||
| `Config` | Runtime settings and feature flags |
|
||||
| `Data` | Embedded filesystem mounts |
|
||||
| `Drive` | Named transport handles |
|
||||
| `Fs` | Local filesystem operations |
|
||||
| `I18n` | Locale collection and translation delegation |
|
||||
| `App`, `Find` | Application identity and executable lookup |
|
||||
|
||||
## Errors and Logging
|
||||
|
||||
| Name | Purpose |
|
||||
|------|---------|
|
||||
| `E`, `Wrap`, `WrapCode`, `NewCode` | Structured error creation |
|
||||
| `Operation`, `ErrorCode`, `ErrorMessage`, `Root`, `StackTrace`, `FormatStackTrace` | Error inspection |
|
||||
| `NewLog`, `Default`, `SetDefault`, `SetLevel`, `SetRedactKeys` | Logger creation and defaults |
|
||||
| `LogErr`, `LogPanic`, `ErrorLog`, `ErrorPanic` | Error-aware logging and panic recovery |
|
||||
|
||||
Use the top-level docs in `docs/` for task-oriented guidance, then use this page as a compact reference.
|
||||
83
.core/reference/docs/pkg/log.md
Normal file
83
.core/reference/docs/pkg/log.md
Normal file
|
|
@ -0,0 +1,83 @@
|
|||
# Logging Reference
|
||||
|
||||
Logging lives in the root `core` package in this repository. There is no separate `pkg/log` import path here.
|
||||
|
||||
## Create a Logger
|
||||
|
||||
```go
|
||||
logger := core.NewLog(core.LogOptions{
|
||||
Level: core.LevelInfo,
|
||||
})
|
||||
```
|
||||
|
||||
## Levels
|
||||
|
||||
| Level | Meaning |
|
||||
|-------|---------|
|
||||
| `LevelQuiet` | no output |
|
||||
| `LevelError` | errors and security events |
|
||||
| `LevelWarn` | warnings, errors, security events |
|
||||
| `LevelInfo` | informational, warnings, errors, security events |
|
||||
| `LevelDebug` | everything |
|
||||
|
||||
## Log Methods
|
||||
|
||||
```go
|
||||
logger.Debug("workspace discovered", "path", "/srv/workspaces")
|
||||
logger.Info("service started", "service", "audit")
|
||||
logger.Warn("retrying fetch", "attempt", 2)
|
||||
logger.Error("fetch failed", "err", err)
|
||||
logger.Security("sandbox escape detected", "path", attemptedPath)
|
||||
```
|
||||
|
||||
## Default Logger
|
||||
|
||||
The package owns a default logger.
|
||||
|
||||
```go
|
||||
core.SetLevel(core.LevelDebug)
|
||||
core.SetRedactKeys("token", "password")
|
||||
|
||||
core.Info("service started", "service", "audit")
|
||||
```
|
||||
|
||||
## Redaction
|
||||
|
||||
Values for keys listed in `RedactKeys` are replaced with `[REDACTED]`.
|
||||
|
||||
```go
|
||||
logger.SetRedactKeys("token")
|
||||
logger.Info("login", "user", "cladius", "token", "secret-value")
|
||||
```
|
||||
|
||||
## Output and Rotation
|
||||
|
||||
```go
|
||||
logger := core.NewLog(core.LogOptions{
|
||||
Level: core.LevelInfo,
|
||||
Output: os.Stderr,
|
||||
})
|
||||
```
|
||||
|
||||
If you provide `Rotation` and set `RotationWriterFactory`, the logger writes to the rotating writer instead of the plain output stream.
|
||||
|
||||
## Error-Aware Logging
|
||||
|
||||
`LogErr` extracts structured error context before logging:
|
||||
|
||||
```go
|
||||
le := core.NewLogErr(logger)
|
||||
le.Log(err)
|
||||
```
|
||||
|
||||
`ErrorLog` is the log-and-return wrapper exposed through `c.Log()`.
|
||||
|
||||
## Panic-Aware Logging
|
||||
|
||||
`LogPanic` is the lightweight panic logger:
|
||||
|
||||
```go
|
||||
defer core.NewLogPanic(logger).Recover()
|
||||
```
|
||||
|
||||
It logs the recovered panic but does not manage crash files. For crash reports, use `c.Error().Recover()`.
|
||||
|
|
@ -0,0 +1,261 @@
|
|||
# Lint Pattern Catalog & Polish Skill Design
|
||||
|
||||
> **Partial implementation (14 Mar 2026):** Layer 1 (`core/lint` -- catalog, matcher, scanner, CLI) is fully implemented and documented at `docs/tools/lint/index.md`. Layer 2 (MCP subsystem in `go-ai`) and Layer 3 (Claude Code polish skill in `core/agent`) are NOT implemented. This plan is retained for those remaining layers.
|
||||
|
||||
**Goal:** A structured pattern catalog (`core/lint`) that captures recurring code quality findings as regex rules, exposes them via MCP tools in `go-ai`, and orchestrates multi-AI code review via a Claude Code skill in `core/agent`.
|
||||
|
||||
**Architecture:** Three layers — a standalone catalog+matcher library (`core/lint`), an MCP subsystem in `go-ai` that exposes lint tools to agents, and a Claude Code plugin in `core/agent` that orchestrates the "polish" workflow (deterministic checks + AI reviewers + feedback loop into the catalog).
|
||||
|
||||
**Tech Stack:** Go (catalog, matcher, CLI, MCP subsystem), YAML (rule definitions), JSONL (findings output, compatible with `~/.core/ai/metrics/`), Claude Code plugin format (hooks.json, commands/*.md, plugin.json).
|
||||
|
||||
---
|
||||
|
||||
## Context
|
||||
|
||||
During a code review sweep of 18 Go repos (March 2026), AI reviewers (Gemini, Claude) found ~20 recurring patterns: SQL injection, path traversal, XSS, missing constant-time comparison, goroutine leaks, Go 1.26 modernisation opportunities, and more. Many of these patterns repeat across repos.
|
||||
|
||||
Currently these findings exist only as commit messages. This design captures them as a reusable, machine-readable catalog that:
|
||||
1. Deterministic tools can run immediately (regex matching)
|
||||
2. MCP-connected agents can query and apply
|
||||
3. LEM models can train on for "does this comply with CoreGo standards?" judgements
|
||||
4. Grows automatically as AI reviewers find new patterns
|
||||
|
||||
## Layer 1: `core/lint` — Pattern Catalog & Matcher
|
||||
|
||||
### Repository Structure
|
||||
|
||||
```
|
||||
core/lint/
|
||||
├── go.mod # forge.lthn.ai/core/lint
|
||||
├── catalog/
|
||||
│ ├── go-security.yaml # SQL injection, path traversal, XSS, constant-time
|
||||
│ ├── go-modernise.yaml # Go 1.26: slices.Clone, wg.Go, maps.Keys, range-over-int
|
||||
│ ├── go-correctness.yaml # Deadlocks, goroutine leaks, nil guards, error handling
|
||||
│ ├── php-security.yaml # XSS, CSRF, mass assignment, SQL injection
|
||||
│ ├── ts-security.yaml # DOM XSS, prototype pollution
|
||||
│ └── cpp-safety.yaml # Buffer overflow, use-after-free
|
||||
├── pkg/lint/
|
||||
│ ├── catalog.go # Load + parse YAML catalog files
|
||||
│ ├── rule.go # Rule struct definition
|
||||
│ ├── matcher.go # Regex matcher against file contents
|
||||
│ ├── report.go # Structured findings output (JSON/JSONL/text)
|
||||
│ ├── catalog_test.go
|
||||
│ ├── matcher_test.go
|
||||
│ └── report_test.go
|
||||
├── cmd/core-lint/
|
||||
│ └── main.go # `core-lint check ./...` CLI
|
||||
└── .core/
|
||||
└── build.yaml # Produces core-lint binary
|
||||
```
|
||||
|
||||
### Rule Schema (YAML)
|
||||
|
||||
```yaml
|
||||
- id: go-sec-001
|
||||
title: "SQL wildcard injection in LIKE clauses"
|
||||
severity: high # critical, high, medium, low, info
|
||||
languages: [go]
|
||||
tags: [security, injection, owasp-a03]
|
||||
pattern: 'LIKE\s+\?\s*,\s*["\x60]%\s*\+'
|
||||
exclude_pattern: 'EscapeLike' # suppress if this also matches
|
||||
fix: "Use parameterised LIKE with explicit escaping of % and _ characters"
|
||||
found_in: [go-store] # repos where first discovered
|
||||
example_bad: |
|
||||
db.Where("name LIKE ?", "%"+input+"%")
|
||||
example_good: |
|
||||
db.Where("name LIKE ?", EscapeLike(input))
|
||||
first_seen: "2026-03-09"
|
||||
detection: regex # future: ast, semantic
|
||||
auto_fixable: false # future: true when we add codemods
|
||||
```
|
||||
|
||||
### Rule Struct (Go)
|
||||
|
||||
```go
|
||||
type Rule struct {
|
||||
ID string `yaml:"id"`
|
||||
Title string `yaml:"title"`
|
||||
Severity string `yaml:"severity"`
|
||||
Languages []string `yaml:"languages"`
|
||||
Tags []string `yaml:"tags"`
|
||||
Pattern string `yaml:"pattern"`
|
||||
ExcludePattern string `yaml:"exclude_pattern,omitempty"`
|
||||
Fix string `yaml:"fix"`
|
||||
FoundIn []string `yaml:"found_in,omitempty"`
|
||||
ExampleBad string `yaml:"example_bad,omitempty"`
|
||||
ExampleGood string `yaml:"example_good,omitempty"`
|
||||
FirstSeen string `yaml:"first_seen"`
|
||||
Detection string `yaml:"detection"` // regex | ast | semantic
|
||||
AutoFixable bool `yaml:"auto_fixable"`
|
||||
}
|
||||
```
|
||||
|
||||
### Finding Struct (Go)
|
||||
|
||||
Designed to align with go-ai's `ScanAlert` shape and `~/.core/ai/metrics/` JSONL format:
|
||||
|
||||
```go
|
||||
type Finding struct {
|
||||
RuleID string `json:"rule_id"`
|
||||
Title string `json:"title"`
|
||||
Severity string `json:"severity"`
|
||||
File string `json:"file"`
|
||||
Line int `json:"line"`
|
||||
Match string `json:"match"` // matched text
|
||||
Fix string `json:"fix"`
|
||||
Repo string `json:"repo,omitempty"`
|
||||
}
|
||||
```
|
||||
|
||||
### CLI Interface
|
||||
|
||||
```bash
|
||||
# Check current directory against all catalogs for detected languages
|
||||
core-lint check ./...
|
||||
|
||||
# Check specific languages/catalogs
|
||||
core-lint check --lang go --catalog go-security ./pkg/...
|
||||
|
||||
# Output as JSON (for piping to other tools)
|
||||
core-lint check --format json ./...
|
||||
|
||||
# List available rules
|
||||
core-lint catalog list
|
||||
core-lint catalog list --lang go --severity high
|
||||
|
||||
# Show a specific rule with examples
|
||||
core-lint catalog show go-sec-001
|
||||
```
|
||||
|
||||
## Layer 2: `go-ai` Lint MCP Subsystem
|
||||
|
||||
New subsystem registered alongside files/rag/ml/brain:
|
||||
|
||||
```go
|
||||
type LintSubsystem struct {
|
||||
catalog *lint.Catalog
|
||||
root string // workspace root for scanning
|
||||
}
|
||||
|
||||
func (s *LintSubsystem) Name() string { return "lint" }
|
||||
|
||||
func (s *LintSubsystem) RegisterTools(server *mcp.Server) {
|
||||
// lint_check - run rules against workspace files
|
||||
// lint_catalog - list/search available rules
|
||||
// lint_report - get findings summary for a path
|
||||
}
|
||||
```
|
||||
|
||||
### MCP Tools
|
||||
|
||||
| Tool | Input | Output | Group |
|
||||
|------|-------|--------|-------|
|
||||
| `lint_check` | `{path: string, lang?: string, severity?: string}` | `{findings: []Finding}` | lint |
|
||||
| `lint_catalog` | `{lang?: string, tags?: []string, severity?: string}` | `{rules: []Rule}` | lint |
|
||||
| `lint_report` | `{path: string, format?: "summary" or "detailed"}` | `{summary: ReportSummary}` | lint |
|
||||
|
||||
This means any MCP-connected agent (Claude, LEM, Codex) can call `lint_check` to scan code against the catalog.
|
||||
|
||||
## Layer 3: `core/agent` Polish Skill
|
||||
|
||||
Claude Code plugin at `core/agent/claude/polish/`:
|
||||
|
||||
```
|
||||
core/agent/claude/polish/
|
||||
├── plugin.json
|
||||
├── hooks.json # optional: PostToolUse after git commit
|
||||
├── commands/
|
||||
│ └── polish.md # /polish slash command
|
||||
└── scripts/
|
||||
└── run-lint.sh # shells out to core-lint
|
||||
```
|
||||
|
||||
### `/polish` Command Flow
|
||||
|
||||
1. Run `core-lint check ./...` for fast deterministic findings
|
||||
2. Report findings to user
|
||||
3. Optionally run AI reviewers (Gemini CLI, Codex) for deeper analysis
|
||||
4. Deduplicate AI findings against catalog (already-known patterns)
|
||||
5. Propose new patterns as catalog additions (PR to core/lint)
|
||||
|
||||
### Subagent Configuration (`.core/agents/`)
|
||||
|
||||
Repos can configure polish behaviour:
|
||||
|
||||
```yaml
|
||||
# any-repo/.core/agents/polish.yaml
|
||||
languages: [go]
|
||||
catalogs: [go-security, go-modernise, go-correctness]
|
||||
reviewers: [gemini] # which AI tools to invoke
|
||||
exclude: [vendor/, testdata/, *_test.go]
|
||||
severity_threshold: medium # only report medium+ findings
|
||||
```
|
||||
|
||||
## Findings to LEM Pipeline
|
||||
|
||||
```
|
||||
core-lint check -> findings.json
|
||||
|
|
||||
v
|
||||
~/.core/ai/metrics/YYYY-MM-DD.jsonl (audit trail)
|
||||
|
|
||||
v
|
||||
LEM training data:
|
||||
- Rule examples (bad/good pairs) -> supervised training signal
|
||||
- Finding frequency -> pattern importance weighting
|
||||
- Rule descriptions -> natural language understanding of "why"
|
||||
|
|
||||
v
|
||||
LEM tool: "does this code comply with CoreGo standards?"
|
||||
-> queries lint_catalog via MCP
|
||||
-> applies learned pattern recognition
|
||||
-> reports violations with rule IDs and fixes
|
||||
```
|
||||
|
||||
## Initial Catalog Seed
|
||||
|
||||
From the March 2026 ecosystem sweep:
|
||||
|
||||
| ID | Title | Severity | Language | Found In |
|
||||
|----|-------|----------|----------|----------|
|
||||
| go-sec-001 | SQL wildcard injection | high | go | go-store |
|
||||
| go-sec-002 | Path traversal in cache keys | high | go | go-cache |
|
||||
| go-sec-003 | XSS in HTML output | high | go | go-html |
|
||||
| go-sec-004 | Non-constant-time auth comparison | high | go | go-crypt |
|
||||
| go-sec-005 | Log injection via unescaped input | medium | go | go-log |
|
||||
| go-sec-006 | Key material in log output | high | go | go-log |
|
||||
| go-cor-001 | Goroutine leak (no WaitGroup) | high | go | core/go |
|
||||
| go-cor-002 | Shutdown deadlock (wg.Wait no timeout) | high | go | core/go |
|
||||
| go-cor-003 | Silent error swallowing | medium | go | go-process, go-ratelimit |
|
||||
| go-cor-004 | Panic in library code | medium | go | go-i18n |
|
||||
| go-cor-005 | Delete without path validation | high | go | go-io |
|
||||
| go-mod-001 | Manual slice clone (append nil pattern) | low | go | core/go |
|
||||
| go-mod-002 | Manual sort instead of slices.Sorted | low | go | core/go |
|
||||
| go-mod-003 | Manual reverse loop instead of slices.Backward | low | go | core/go |
|
||||
| go-mod-004 | sync.WaitGroup Add+Done instead of Go() | low | go | core/go |
|
||||
| go-mod-005 | Manual map key collection instead of maps.Keys | low | go | core/go |
|
||||
| go-cor-006 | Missing error return from API calls | medium | go | go-forge, go-git |
|
||||
| go-cor-007 | Signal handler uses wrong type | medium | go | go-process |
|
||||
|
||||
## Dependencies
|
||||
|
||||
```
|
||||
core/lint (standalone, zero core deps)
|
||||
^
|
||||
|
|
||||
go-ai/mcp/lint/ (imports core/lint for catalog + matcher)
|
||||
^
|
||||
|
|
||||
core/agent/claude/polish/ (shells out to core-lint CLI)
|
||||
```
|
||||
|
||||
`core/lint` has no dependency on `core/go` or any other framework module. It is a standalone library + CLI, like `core/go-io`.
|
||||
|
||||
## Future Extensions (Not Built Now)
|
||||
|
||||
- **AST-based detection** (layer 2): Parse Go/PHP AST, match structural patterns
|
||||
- **Semantic detection** (layer 3): LEM judges code against rule descriptions
|
||||
- **Auto-fix codemods**: `core-lint fix` applies known fixes automatically
|
||||
- **CI integration**: GitHub Actions workflow runs `core-lint check` on PRs
|
||||
- **CodeRabbit integration**: Import CodeRabbit findings as catalog entries
|
||||
- **Cross-repo dashboard**: Aggregate findings across all repos in workspace
|
||||
1668
.core/reference/docs/plans/2026-03-09-lint-pattern-catalog-plan.md
Normal file
1668
.core/reference/docs/plans/2026-03-09-lint-pattern-catalog-plan.md
Normal file
File diff suppressed because it is too large
Load diff
|
|
@ -0,0 +1,160 @@
|
|||
# AltumCode Update Checker — Design
|
||||
|
||||
> **Note:** Layer 1 (version detection via PHP artisan) is implemented and documented at `docs/docs/php/packages/uptelligence.md`. Layer 2 (browser-automated downloads via Claude Code skill) is NOT yet implemented.
|
||||
|
||||
## Problem
|
||||
|
||||
Host UK runs 4 AltumCode SaaS products and 13 plugins across two marketplaces (CodeCanyon + LemonSqueezy). Checking for updates and downloading them is a manual process: ~50 clicks across two marketplace UIs, moving 16+ zip files, extracting to the right directories. This eats a morning of momentum every update cycle.
|
||||
|
||||
## Solution
|
||||
|
||||
Two-layer system: lightweight version detection (PHP artisan command) + browser-automated download (Claude Code skill).
|
||||
|
||||
## Architecture
|
||||
|
||||
```
|
||||
Layer 1: Detection (core/php-uptelligence)
|
||||
artisan uptelligence:check-updates
|
||||
5 HTTP GETs, no auth, schedulable
|
||||
Compares remote vs deployed versions
|
||||
|
||||
Layer 2: Download (Claude Code skill)
|
||||
Playwright → LemonSqueezy (16 items)
|
||||
Claude in Chrome → CodeCanyon (2 items)
|
||||
Downloads zips to staging folder
|
||||
Extracts to saas/services/{product}/package/
|
||||
|
||||
Layer 3: Deploy (existing — manual)
|
||||
docker build → scp → deploy_saas.yml
|
||||
Human in the loop
|
||||
```
|
||||
|
||||
## Layer 1: Version Detection
|
||||
|
||||
### Public Endpoints (no auth required)
|
||||
|
||||
| Endpoint | Returns |
|
||||
|----------|---------|
|
||||
| `GET https://66analytics.com/info.php` | `{"latest_release_version": "66.0.0", "latest_release_version_code": 6600}` |
|
||||
| `GET https://66biolinks.com/info.php` | Same format |
|
||||
| `GET https://66pusher.com/info.php` | Same format |
|
||||
| `GET https://66socialproof.com/info.php` | Same format |
|
||||
| `GET https://dev.altumcode.com/plugins-versions` | `{"affiliate": {"version": "2.0.1"}, "ultimate-blocks": {"version": "9.1.0"}, ...}` |
|
||||
|
||||
### Deployed Version Sources
|
||||
|
||||
- **Product version**: `PRODUCT_CODE` constant in deployed source `config.php`
|
||||
- **Plugin versions**: `version` field in each plugin's `config.php` or `config.json`
|
||||
|
||||
### Artisan Command
|
||||
|
||||
`php artisan uptelligence:check-updates`
|
||||
|
||||
Output:
|
||||
```
|
||||
Product Deployed Latest Status
|
||||
──────────────────────────────────────────────
|
||||
66analytics 65.0.0 66.0.0 UPDATE AVAILABLE
|
||||
66biolinks 65.0.0 66.0.0 UPDATE AVAILABLE
|
||||
66pusher 65.0.0 65.0.0 ✓ current
|
||||
66socialproof 65.0.0 66.0.0 UPDATE AVAILABLE
|
||||
|
||||
Plugin Deployed Latest Status
|
||||
──────────────────────────────────────────────
|
||||
affiliate 2.0.0 2.0.1 UPDATE AVAILABLE
|
||||
ultimate-blocks 9.1.0 9.1.0 ✓ current
|
||||
...
|
||||
```
|
||||
|
||||
Lives in `core/php-uptelligence` as a scheduled check or on-demand command.
|
||||
|
||||
## Layer 2: Browser-Automated Download
|
||||
|
||||
### Claude Code Skill: `/update-altum`
|
||||
|
||||
Workflow:
|
||||
1. Run version check (Layer 1) — show what needs updating
|
||||
2. Ask for confirmation before downloading
|
||||
3. Download from both marketplaces
|
||||
4. Extract to staging directories
|
||||
5. Report what changed
|
||||
|
||||
### Marketplace Access
|
||||
|
||||
**LemonSqueezy (Playwright)**
|
||||
- Auth: Magic link email to `snider@lt.hn` — user taps on phone
|
||||
- Flow per item: Navigate to order detail → click "Download" button
|
||||
- 16 items across 2 pages of orders
|
||||
- Session persists for the skill invocation
|
||||
|
||||
**CodeCanyon (Claude in Chrome)**
|
||||
- Auth: Saved browser session cookies (user `snidered`)
|
||||
- Flow per item: Click "Download" dropdown → "All files & documentation"
|
||||
- 2 items on downloads page
|
||||
|
||||
### Product-to-Marketplace Mapping
|
||||
|
||||
| Product | CodeCanyon | LemonSqueezy |
|
||||
|---------|-----------|--------------|
|
||||
| 66biolinks | Regular licence | Extended licence (66biolinks custom, $359.28) |
|
||||
| 66socialproof | Regular licence | — |
|
||||
| 66analytics | — | Regular licence |
|
||||
| 66pusher | — | Regular licence |
|
||||
|
||||
### Plugin Inventory (all LemonSqueezy)
|
||||
|
||||
| Plugin | Price | Applies To |
|
||||
|--------|-------|------------|
|
||||
| Pro Notifications | $58.80 | 66socialproof |
|
||||
| Teams Plugin | $58.80 | All products |
|
||||
| Push Notifications Plugin | $46.80 | All products |
|
||||
| Ultimate Blocks | $32.40 | 66biolinks |
|
||||
| Pro Blocks | $32.40 | 66biolinks |
|
||||
| Payment Blocks | $32.40 | 66biolinks |
|
||||
| Affiliate Plugin | $32.40 | All products |
|
||||
| PWA Plugin | $25.20 | All products |
|
||||
| Image Optimizer Plugin | $19.20 | All products |
|
||||
| Email Shield Plugin | FREE | All products |
|
||||
| Dynamic OG images plugin | FREE | 66biolinks |
|
||||
| Offload & CDN Plugin | FREE | All products (gift from Altum) |
|
||||
|
||||
### Staging & Extraction
|
||||
|
||||
- Download to: `~/Code/lthn/saas/updates/YYYY-MM-DD/`
|
||||
- Products extract to: `~/Code/lthn/saas/services/{product}/package/product/`
|
||||
- Plugins extract to: `~/Code/lthn/saas/services/{product}/package/product/plugins/{plugin_id}/`
|
||||
|
||||
## LemonSqueezy Order UUIDs
|
||||
|
||||
Stable order URLs for direct navigation:
|
||||
|
||||
| Product | Order URL |
|
||||
|---------|-----------|
|
||||
| 66analytics | `/my-orders/2972471f-abac-4165-b78d-541b176de180` |
|
||||
|
||||
(Remaining UUIDs to be captured on first full run of the skill.)
|
||||
|
||||
## Out of Scope
|
||||
|
||||
- No auto-deploy to production (human runs `deploy_saas.yml`)
|
||||
- No licence key handling or financial transactions
|
||||
- No AltumCode Club membership management
|
||||
- No Blesta updates (different vendor)
|
||||
- No update SQL migration execution (handled by AltumCode's own update scripts)
|
||||
|
||||
## Key Technical Details
|
||||
|
||||
- AltumCode products use Unirest HTTP client for API calls
|
||||
- Product `info.php` endpoints are public, no rate limiting observed
|
||||
- Plugin versions endpoint (`dev.altumcode.com`) is also public
|
||||
- Production Docker images have `/install/` and `/update/` directories stripped
|
||||
- Updates require full Docker image rebuild and redeployment via Ansible
|
||||
- CodeCanyon download URLs contain stable purchase UUIDs
|
||||
- LemonSqueezy uses magic link auth (no password, email-based)
|
||||
- Playwright can access LemonSqueezy; Claude in Chrome cannot (payment platform safety block)
|
||||
|
||||
## Workflow Summary
|
||||
|
||||
**Before**: Get email from AltumCode → log into 2 marketplaces → click through 18 products/plugins → download 16+ zips → extract to right directories → rebuild Docker images → deploy. Half a morning.
|
||||
|
||||
**After**: Run `artisan uptelligence:check-updates` → see what's behind → invoke `/update-altum` → tap magic link on phone → go make coffee → come back to staged files → `deploy_saas.yml`. 10 minutes of human time.
|
||||
|
|
@ -0,0 +1,799 @@
|
|||
# AltumCode Update Checker Implementation Plan
|
||||
|
||||
> **Note:** Layer 1 (Tasks 1-2, 4: version checking + seeder + sync command) is implemented and documented at `docs/docs/php/packages/uptelligence.md`. Task 3 (Claude Code browser skill for Layer 2 downloads) is NOT yet implemented.
|
||||
|
||||
> **For Claude:** REQUIRED SUB-SKILL: Use superpowers:executing-plans to implement this plan task-by-task.
|
||||
|
||||
**Goal:** Add AltumCode product + plugin version checking to uptelligence, and create a Claude Code skill for browser-automated downloads from LemonSqueezy and CodeCanyon.
|
||||
|
||||
**Architecture:** Extend the existing `VendorUpdateCheckerService` to handle `PLATFORM_ALTUM` vendors via 5 public HTTP endpoints. Seed the vendors table with all 4 products and 13 plugins. Create a Claude Code plugin skill that uses Playwright (LemonSqueezy) and Chrome (CodeCanyon) to download updates.
|
||||
|
||||
**Tech Stack:** PHP 8.4, Laravel, Pest, Claude Code plugins (Playwright MCP + Chrome MCP)
|
||||
|
||||
---
|
||||
|
||||
### Task 1: Add AltumCode check to VendorUpdateCheckerService
|
||||
|
||||
**Files:**
|
||||
- Modify: `/Users/snider/Code/core/php-uptelligence/Services/VendorUpdateCheckerService.php`
|
||||
- Test: `/Users/snider/Code/core/php-uptelligence/tests/Unit/AltumCodeCheckerTest.php`
|
||||
|
||||
**Step 1: Write the failing test**
|
||||
|
||||
Create `/Users/snider/Code/core/php-uptelligence/tests/Unit/AltumCodeCheckerTest.php`:
|
||||
|
||||
```php
|
||||
<?php
|
||||
|
||||
declare(strict_types=1);
|
||||
|
||||
use Core\Mod\Uptelligence\Models\Vendor;
|
||||
use Core\Mod\Uptelligence\Services\VendorUpdateCheckerService;
|
||||
use Illuminate\Support\Facades\Http;
|
||||
|
||||
beforeEach(function () {
|
||||
$this->service = app(VendorUpdateCheckerService::class);
|
||||
});
|
||||
|
||||
it('checks altum product version via info.php', function () {
|
||||
Http::fake([
|
||||
'https://66analytics.com/info.php' => Http::response([
|
||||
'latest_release_version' => '66.0.0',
|
||||
'latest_release_version_code' => 6600,
|
||||
]),
|
||||
]);
|
||||
|
||||
$vendor = Vendor::factory()->create([
|
||||
'slug' => '66analytics',
|
||||
'name' => '66analytics',
|
||||
'source_type' => Vendor::SOURCE_LICENSED,
|
||||
'plugin_platform' => Vendor::PLATFORM_ALTUM,
|
||||
'current_version' => '65.0.0',
|
||||
'is_active' => true,
|
||||
]);
|
||||
|
||||
$result = $this->service->checkVendor($vendor);
|
||||
|
||||
expect($result['status'])->toBe('success')
|
||||
->and($result['current'])->toBe('65.0.0')
|
||||
->and($result['latest'])->toBe('66.0.0')
|
||||
->and($result['has_update'])->toBeTrue();
|
||||
});
|
||||
|
||||
it('reports no update when altum product is current', function () {
|
||||
Http::fake([
|
||||
'https://66analytics.com/info.php' => Http::response([
|
||||
'latest_release_version' => '65.0.0',
|
||||
'latest_release_version_code' => 6500,
|
||||
]),
|
||||
]);
|
||||
|
||||
$vendor = Vendor::factory()->create([
|
||||
'slug' => '66analytics',
|
||||
'name' => '66analytics',
|
||||
'source_type' => Vendor::SOURCE_LICENSED,
|
||||
'plugin_platform' => Vendor::PLATFORM_ALTUM,
|
||||
'current_version' => '65.0.0',
|
||||
'is_active' => true,
|
||||
]);
|
||||
|
||||
$result = $this->service->checkVendor($vendor);
|
||||
|
||||
expect($result['has_update'])->toBeFalse();
|
||||
});
|
||||
|
||||
it('checks altum plugin versions via plugins-versions endpoint', function () {
|
||||
Http::fake([
|
||||
'https://dev.altumcode.com/plugins-versions' => Http::response([
|
||||
'affiliate' => ['version' => '2.0.1'],
|
||||
'teams' => ['version' => '3.0.0'],
|
||||
]),
|
||||
]);
|
||||
|
||||
$vendor = Vendor::factory()->create([
|
||||
'slug' => 'altum-plugin-affiliate',
|
||||
'name' => 'Affiliate Plugin',
|
||||
'source_type' => Vendor::SOURCE_PLUGIN,
|
||||
'plugin_platform' => Vendor::PLATFORM_ALTUM,
|
||||
'current_version' => '2.0.0',
|
||||
'is_active' => true,
|
||||
]);
|
||||
|
||||
$result = $this->service->checkVendor($vendor);
|
||||
|
||||
expect($result['status'])->toBe('success')
|
||||
->and($result['latest'])->toBe('2.0.1')
|
||||
->and($result['has_update'])->toBeTrue();
|
||||
});
|
||||
|
||||
it('handles altum info.php timeout gracefully', function () {
|
||||
Http::fake([
|
||||
'https://66analytics.com/info.php' => Http::response('', 500),
|
||||
]);
|
||||
|
||||
$vendor = Vendor::factory()->create([
|
||||
'slug' => '66analytics',
|
||||
'name' => '66analytics',
|
||||
'source_type' => Vendor::SOURCE_LICENSED,
|
||||
'plugin_platform' => Vendor::PLATFORM_ALTUM,
|
||||
'current_version' => '65.0.0',
|
||||
'is_active' => true,
|
||||
]);
|
||||
|
||||
$result = $this->service->checkVendor($vendor);
|
||||
|
||||
expect($result['status'])->toBe('error')
|
||||
->and($result['has_update'])->toBeFalse();
|
||||
});
|
||||
```
|
||||
|
||||
**Step 2: Run test to verify it fails**
|
||||
|
||||
Run: `cd /Users/snider/Code/core/php-uptelligence && composer test -- --filter=AltumCodeChecker`
|
||||
Expected: FAIL — altum vendors still hit `skipCheck()`
|
||||
|
||||
**Step 3: Write minimal implementation**
|
||||
|
||||
In `/Users/snider/Code/core/php-uptelligence/Services/VendorUpdateCheckerService.php`, modify `checkVendor()` to route altum vendors:
|
||||
|
||||
```php
|
||||
public function checkVendor(Vendor $vendor): array
|
||||
{
|
||||
$result = match (true) {
|
||||
$this->isAltumPlatform($vendor) && $vendor->isLicensed() => $this->checkAltumProduct($vendor),
|
||||
$this->isAltumPlatform($vendor) && $vendor->isPlugin() => $this->checkAltumPlugin($vendor),
|
||||
$vendor->isOss() && $this->isGitHubUrl($vendor->git_repo_url) => $this->checkGitHub($vendor),
|
||||
$vendor->isOss() && $this->isGiteaUrl($vendor->git_repo_url) => $this->checkGitea($vendor),
|
||||
default => $this->skipCheck($vendor),
|
||||
};
|
||||
|
||||
// ... rest unchanged
|
||||
}
|
||||
```
|
||||
|
||||
Add the three new methods:
|
||||
|
||||
```php
|
||||
/**
|
||||
* Check if vendor is on the AltumCode platform.
|
||||
*/
|
||||
protected function isAltumPlatform(Vendor $vendor): bool
|
||||
{
|
||||
return $vendor->plugin_platform === Vendor::PLATFORM_ALTUM;
|
||||
}
|
||||
|
||||
/**
|
||||
* AltumCode product info endpoint mapping.
|
||||
*/
|
||||
protected function getAltumProductInfoUrl(Vendor $vendor): ?string
|
||||
{
|
||||
$urls = [
|
||||
'66analytics' => 'https://66analytics.com/info.php',
|
||||
'66biolinks' => 'https://66biolinks.com/info.php',
|
||||
'66pusher' => 'https://66pusher.com/info.php',
|
||||
'66socialproof' => 'https://66socialproof.com/info.php',
|
||||
];
|
||||
|
||||
return $urls[$vendor->slug] ?? null;
|
||||
}
|
||||
|
||||
/**
|
||||
* Check an AltumCode product for updates via its info.php endpoint.
|
||||
*/
|
||||
protected function checkAltumProduct(Vendor $vendor): array
|
||||
{
|
||||
$url = $this->getAltumProductInfoUrl($vendor);
|
||||
if (! $url) {
|
||||
return $this->errorResult("No info.php URL mapped for {$vendor->slug}");
|
||||
}
|
||||
|
||||
try {
|
||||
$response = Http::timeout(5)->get($url);
|
||||
|
||||
if (! $response->successful()) {
|
||||
return $this->errorResult("AltumCode info.php returned {$response->status()}");
|
||||
}
|
||||
|
||||
$data = $response->json();
|
||||
$latestVersion = $data['latest_release_version'] ?? null;
|
||||
|
||||
if (! $latestVersion) {
|
||||
return $this->errorResult('No version in info.php response');
|
||||
}
|
||||
|
||||
return $this->buildResult(
|
||||
vendor: $vendor,
|
||||
latestVersion: $this->normaliseVersion($latestVersion),
|
||||
releaseInfo: [
|
||||
'version_code' => $data['latest_release_version_code'] ?? null,
|
||||
'source' => $url,
|
||||
]
|
||||
);
|
||||
} catch (\Exception $e) {
|
||||
return $this->errorResult("AltumCode check failed: {$e->getMessage()}");
|
||||
}
|
||||
}
|
||||
|
||||
/**
|
||||
* Check an AltumCode plugin for updates via the central plugins-versions endpoint.
|
||||
*/
|
||||
protected function checkAltumPlugin(Vendor $vendor): array
|
||||
{
|
||||
try {
|
||||
$allPlugins = $this->getAltumPluginVersions();
|
||||
|
||||
if ($allPlugins === null) {
|
||||
return $this->errorResult('Failed to fetch AltumCode plugin versions');
|
||||
}
|
||||
|
||||
// Extract the plugin_id from the vendor slug (strip 'altum-plugin-' prefix)
|
||||
$pluginId = str_replace('altum-plugin-', '', $vendor->slug);
|
||||
|
||||
if (! isset($allPlugins[$pluginId])) {
|
||||
return $this->errorResult("Plugin '{$pluginId}' not found in AltumCode registry");
|
||||
}
|
||||
|
||||
$latestVersion = $allPlugins[$pluginId]['version'] ?? null;
|
||||
|
||||
return $this->buildResult(
|
||||
vendor: $vendor,
|
||||
latestVersion: $this->normaliseVersion($latestVersion),
|
||||
releaseInfo: ['source' => 'dev.altumcode.com/plugins-versions']
|
||||
);
|
||||
} catch (\Exception $e) {
|
||||
return $this->errorResult("AltumCode plugin check failed: {$e->getMessage()}");
|
||||
}
|
||||
}
|
||||
|
||||
/**
|
||||
* Fetch all AltumCode plugin versions (cached for 1 hour within a check run).
|
||||
*/
|
||||
protected ?array $altumPluginVersionsCache = null;
|
||||
|
||||
protected function getAltumPluginVersions(): ?array
|
||||
{
|
||||
if ($this->altumPluginVersionsCache !== null) {
|
||||
return $this->altumPluginVersionsCache;
|
||||
}
|
||||
|
||||
$response = Http::timeout(5)->get('https://dev.altumcode.com/plugins-versions');
|
||||
|
||||
if (! $response->successful()) {
|
||||
return null;
|
||||
}
|
||||
|
||||
$this->altumPluginVersionsCache = $response->json();
|
||||
|
||||
return $this->altumPluginVersionsCache;
|
||||
}
|
||||
```
|
||||
|
||||
**Step 4: Run test to verify it passes**
|
||||
|
||||
Run: `cd /Users/snider/Code/core/php-uptelligence && composer test -- --filter=AltumCodeChecker`
|
||||
Expected: PASS (4 tests)
|
||||
|
||||
**Step 5: Commit**
|
||||
|
||||
```bash
|
||||
cd /Users/snider/Code/core/php-uptelligence
|
||||
git add Services/VendorUpdateCheckerService.php tests/Unit/AltumCodeCheckerTest.php
|
||||
git commit -m "feat: add AltumCode product + plugin version checking
|
||||
|
||||
Extends VendorUpdateCheckerService to check AltumCode products via
|
||||
their info.php endpoints and plugins via dev.altumcode.com/plugins-versions.
|
||||
No auth required — all endpoints are public.
|
||||
|
||||
Co-Authored-By: Virgil <virgil@lethean.io>"
|
||||
```
|
||||
|
||||
---
|
||||
|
||||
### Task 2: Seed AltumCode vendors
|
||||
|
||||
**Files:**
|
||||
- Create: `/Users/snider/Code/core/php-uptelligence/database/seeders/AltumCodeVendorSeeder.php`
|
||||
- Test: `/Users/snider/Code/core/php-uptelligence/tests/Unit/AltumCodeVendorSeederTest.php`
|
||||
|
||||
**Step 1: Write the failing test**
|
||||
|
||||
Create `/Users/snider/Code/core/php-uptelligence/tests/Unit/AltumCodeVendorSeederTest.php`:
|
||||
|
||||
```php
|
||||
<?php
|
||||
|
||||
declare(strict_types=1);
|
||||
|
||||
use Core\Mod\Uptelligence\Models\Vendor;
|
||||
use Illuminate\Foundation\Testing\RefreshDatabase;
|
||||
|
||||
uses(RefreshDatabase::class);
|
||||
|
||||
it('seeds 4 altum products', function () {
|
||||
$this->artisan('db:seed', ['--class' => 'Core\\Mod\\Uptelligence\\Database\\Seeders\\AltumCodeVendorSeeder']);
|
||||
|
||||
expect(Vendor::where('source_type', Vendor::SOURCE_LICENSED)
|
||||
->where('plugin_platform', Vendor::PLATFORM_ALTUM)
|
||||
->count()
|
||||
)->toBe(4);
|
||||
});
|
||||
|
||||
it('seeds 13 altum plugins', function () {
|
||||
$this->artisan('db:seed', ['--class' => 'Core\\Mod\\Uptelligence\\Database\\Seeders\\AltumCodeVendorSeeder']);
|
||||
|
||||
expect(Vendor::where('source_type', Vendor::SOURCE_PLUGIN)
|
||||
->where('plugin_platform', Vendor::PLATFORM_ALTUM)
|
||||
->count()
|
||||
)->toBe(13);
|
||||
});
|
||||
|
||||
it('is idempotent', function () {
|
||||
$this->artisan('db:seed', ['--class' => 'Core\\Mod\\Uptelligence\\Database\\Seeders\\AltumCodeVendorSeeder']);
|
||||
$this->artisan('db:seed', ['--class' => 'Core\\Mod\\Uptelligence\\Database\\Seeders\\AltumCodeVendorSeeder']);
|
||||
|
||||
expect(Vendor::where('plugin_platform', Vendor::PLATFORM_ALTUM)->count())->toBe(17);
|
||||
});
|
||||
```
|
||||
|
||||
**Step 2: Run test to verify it fails**
|
||||
|
||||
Run: `cd /Users/snider/Code/core/php-uptelligence && composer test -- --filter=AltumCodeVendorSeeder`
|
||||
Expected: FAIL — seeder class not found
|
||||
|
||||
**Step 3: Write minimal implementation**
|
||||
|
||||
Create `/Users/snider/Code/core/php-uptelligence/database/seeders/AltumCodeVendorSeeder.php`:
|
||||
|
||||
```php
|
||||
<?php
|
||||
|
||||
declare(strict_types=1);
|
||||
|
||||
namespace Core\Mod\Uptelligence\Database\Seeders;
|
||||
|
||||
use Core\Mod\Uptelligence\Models\Vendor;
|
||||
use Illuminate\Database\Seeder;
|
||||
|
||||
class AltumCodeVendorSeeder extends Seeder
|
||||
{
|
||||
public function run(): void
|
||||
{
|
||||
$products = [
|
||||
['slug' => '66analytics', 'name' => '66analytics', 'vendor_name' => 'AltumCode', 'current_version' => '65.0.0'],
|
||||
['slug' => '66biolinks', 'name' => '66biolinks', 'vendor_name' => 'AltumCode', 'current_version' => '65.0.0'],
|
||||
['slug' => '66pusher', 'name' => '66pusher', 'vendor_name' => 'AltumCode', 'current_version' => '65.0.0'],
|
||||
['slug' => '66socialproof', 'name' => '66socialproof', 'vendor_name' => 'AltumCode', 'current_version' => '65.0.0'],
|
||||
];
|
||||
|
||||
foreach ($products as $product) {
|
||||
Vendor::updateOrCreate(
|
||||
['slug' => $product['slug']],
|
||||
[
|
||||
...$product,
|
||||
'source_type' => Vendor::SOURCE_LICENSED,
|
||||
'plugin_platform' => Vendor::PLATFORM_ALTUM,
|
||||
'is_active' => true,
|
||||
]
|
||||
);
|
||||
}
|
||||
|
||||
$plugins = [
|
||||
['slug' => 'altum-plugin-affiliate', 'name' => 'Affiliate Plugin', 'current_version' => '2.0.0'],
|
||||
['slug' => 'altum-plugin-push-notifications', 'name' => 'Push Notifications Plugin', 'current_version' => '1.0.0'],
|
||||
['slug' => 'altum-plugin-teams', 'name' => 'Teams Plugin', 'current_version' => '1.0.0'],
|
||||
['slug' => 'altum-plugin-pwa', 'name' => 'PWA Plugin', 'current_version' => '1.0.0'],
|
||||
['slug' => 'altum-plugin-image-optimizer', 'name' => 'Image Optimizer Plugin', 'current_version' => '3.1.0'],
|
||||
['slug' => 'altum-plugin-email-shield', 'name' => 'Email Shield Plugin', 'current_version' => '1.0.0'],
|
||||
['slug' => 'altum-plugin-dynamic-og-images', 'name' => 'Dynamic OG Images Plugin', 'current_version' => '1.0.0'],
|
||||
['slug' => 'altum-plugin-offload', 'name' => 'Offload & CDN Plugin', 'current_version' => '1.0.0'],
|
||||
['slug' => 'altum-plugin-payment-blocks', 'name' => 'Payment Blocks Plugin', 'current_version' => '1.0.0'],
|
||||
['slug' => 'altum-plugin-ultimate-blocks', 'name' => 'Ultimate Blocks Plugin', 'current_version' => '9.1.0'],
|
||||
['slug' => 'altum-plugin-pro-blocks', 'name' => 'Pro Blocks Plugin', 'current_version' => '1.0.0'],
|
||||
['slug' => 'altum-plugin-pro-notifications', 'name' => 'Pro Notifications Plugin', 'current_version' => '1.0.0'],
|
||||
['slug' => 'altum-plugin-aix', 'name' => 'AIX Plugin', 'current_version' => '1.0.0'],
|
||||
];
|
||||
|
||||
foreach ($plugins as $plugin) {
|
||||
Vendor::updateOrCreate(
|
||||
['slug' => $plugin['slug']],
|
||||
[
|
||||
...$plugin,
|
||||
'vendor_name' => 'AltumCode',
|
||||
'source_type' => Vendor::SOURCE_PLUGIN,
|
||||
'plugin_platform' => Vendor::PLATFORM_ALTUM,
|
||||
'is_active' => true,
|
||||
]
|
||||
);
|
||||
}
|
||||
}
|
||||
}
|
||||
```
|
||||
|
||||
**Step 4: Run test to verify it passes**
|
||||
|
||||
Run: `cd /Users/snider/Code/core/php-uptelligence && composer test -- --filter=AltumCodeVendorSeeder`
|
||||
Expected: PASS (3 tests)
|
||||
|
||||
**Step 5: Commit**
|
||||
|
||||
```bash
|
||||
cd /Users/snider/Code/core/php-uptelligence
|
||||
git add database/seeders/AltumCodeVendorSeeder.php tests/Unit/AltumCodeVendorSeederTest.php
|
||||
git commit -m "feat: seed AltumCode vendors — 4 products + 13 plugins
|
||||
|
||||
Idempotent seeder using updateOrCreate. Products are SOURCE_LICENSED,
|
||||
plugins are SOURCE_PLUGIN, all PLATFORM_ALTUM. Version numbers will
|
||||
need updating to match actual deployed versions.
|
||||
|
||||
Co-Authored-By: Virgil <virgil@lethean.io>"
|
||||
```
|
||||
|
||||
---
|
||||
|
||||
### Task 3: Create Claude Code plugin skill for downloads
|
||||
|
||||
**Files:**
|
||||
- Create: `/Users/snider/.claude/plugins/altum-updater/plugin.json`
|
||||
- Create: `/Users/snider/.claude/plugins/altum-updater/skills/update-altum.md`
|
||||
|
||||
**Step 1: Create plugin manifest**
|
||||
|
||||
Create `/Users/snider/.claude/plugins/altum-updater/plugin.json`:
|
||||
|
||||
```json
|
||||
{
|
||||
"name": "altum-updater",
|
||||
"description": "Download AltumCode product and plugin updates from LemonSqueezy and CodeCanyon",
|
||||
"version": "0.1.0",
|
||||
"skills": [
|
||||
{
|
||||
"name": "update-altum",
|
||||
"path": "skills/update-altum.md",
|
||||
"description": "Download AltumCode product and plugin updates from marketplaces. Use when the user mentions updating AltumCode products, downloading from LemonSqueezy or CodeCanyon, or running the update checker."
|
||||
}
|
||||
]
|
||||
}
|
||||
```
|
||||
|
||||
**Step 2: Create skill file**
|
||||
|
||||
Create `/Users/snider/.claude/plugins/altum-updater/skills/update-altum.md`:
|
||||
|
||||
```markdown
|
||||
---
|
||||
name: update-altum
|
||||
description: Download AltumCode product and plugin updates from LemonSqueezy and CodeCanyon
|
||||
---
|
||||
|
||||
# AltumCode Update Downloader
|
||||
|
||||
## Overview
|
||||
|
||||
Downloads updated AltumCode products and plugins from two marketplaces:
|
||||
- **LemonSqueezy** (Playwright): 66analytics, 66pusher, 66biolinks (extended), 13 plugins
|
||||
- **CodeCanyon** (Claude in Chrome): 66biolinks (regular), 66socialproof
|
||||
|
||||
## Pre-flight
|
||||
|
||||
1. Run `php artisan uptelligence:check-updates --vendor=66analytics` (or check all) to see what needs updating
|
||||
2. Show the user the version comparison table
|
||||
3. Ask which products/plugins to download
|
||||
|
||||
## LemonSqueezy Download Flow (Playwright)
|
||||
|
||||
LemonSqueezy uses magic link auth. The user will need to tap the link on their phone.
|
||||
|
||||
1. Navigate to `https://app.lemonsqueezy.com/my-orders`
|
||||
2. If on login page, fill email `snider@lt.hn` and click Sign In
|
||||
3. Tell user: "Magic link sent — tap the link on your phone"
|
||||
4. Wait for redirect to orders page
|
||||
5. For each product/plugin that needs updating:
|
||||
a. Click the product link on the orders page (paginated — 10 per page, 2 pages)
|
||||
b. In the order detail, find the "Download" button under "Files"
|
||||
c. Click Download — file saves to default downloads folder
|
||||
6. Move downloaded zips to staging: `~/Code/lthn/saas/updates/YYYY-MM-DD/`
|
||||
|
||||
### LemonSqueezy Product Names (as shown on orders page)
|
||||
|
||||
| Our Name | LemonSqueezy Order Name |
|
||||
|----------|------------------------|
|
||||
| 66analytics | "66analytics - Regular License" |
|
||||
| 66pusher | "66pusher - Regular License" |
|
||||
| 66biolinks (extended) | "66biolinks custom" |
|
||||
| Affiliate Plugin | "Affiliate Plugin" |
|
||||
| Push Notifications Plugin | "Push Notifications Plugin" |
|
||||
| Teams Plugin | "Teams Plugin" |
|
||||
| PWA Plugin | "PWA Plugin" |
|
||||
| Image Optimizer Plugin | "Image Optimizer Plugin" |
|
||||
| Email Shield Plugin | "Email Shield Plugin" |
|
||||
| Dynamic OG Images | "Dynamic OG images plugin" |
|
||||
| Offload & CDN | "Offload & CDN Plugin" |
|
||||
| Payment Blocks | "Payment Blocks - 66biolinks plugin" |
|
||||
| Ultimate Blocks | "Ultimate Blocks - 66biolinks plugin" |
|
||||
| Pro Blocks | "Pro Blocks - 66biolinks plugin" |
|
||||
| Pro Notifications | "Pro Notifications - 66socialproof plugin" |
|
||||
| AltumCode Club | "The AltumCode Club" |
|
||||
|
||||
## CodeCanyon Download Flow (Claude in Chrome)
|
||||
|
||||
CodeCanyon uses saved browser session cookies (user: snidered).
|
||||
|
||||
1. Navigate to `https://codecanyon.net/downloads`
|
||||
2. Dismiss cookie banner if present (click "Reject all")
|
||||
3. For 66socialproof:
|
||||
a. Find "66socialproof" Download button
|
||||
b. Click the dropdown arrow
|
||||
c. Click "All files & documentation"
|
||||
4. For 66biolinks:
|
||||
a. Find "66biolinks" Download button (scroll down)
|
||||
b. Click the dropdown arrow
|
||||
c. Click "All files & documentation"
|
||||
5. Move downloaded zips to staging
|
||||
|
||||
### CodeCanyon Download URLs (stable)
|
||||
|
||||
- 66socialproof: `/user/snidered/download_purchase/8d8ef4c1-5add-4eba-9a89-4261a9c87e0b`
|
||||
- 66biolinks: `/user/snidered/download_purchase/38d79f4e-19cd-480a-b068-4332629b5206`
|
||||
|
||||
## Post-Download
|
||||
|
||||
1. List all zips in staging folder
|
||||
2. For each product zip:
|
||||
- Extract to `~/Code/lthn/saas/services/{product}/package/product/`
|
||||
3. For each plugin zip:
|
||||
- Extract to the correct product's `plugins/{plugin_id}/` directory
|
||||
- Note: Some plugins apply to multiple products (affiliate, teams, etc.)
|
||||
4. Show summary of what was updated
|
||||
5. Remind user: "Files staged. Run `deploy_saas.yml` when ready to deploy."
|
||||
|
||||
## Important Notes
|
||||
|
||||
- Never make purchases or enter financial information
|
||||
- LemonSqueezy session expires — if Playwright gets a login page mid-flow, re-trigger magic link
|
||||
- CodeCanyon session depends on Chrome cookies — if logged out, tell user to log in manually
|
||||
- The AltumCode Club subscription is NOT a downloadable product — skip it
|
||||
- Plugin `aix` may not appear on LemonSqueezy (bundled with products) — skip if not found
|
||||
```
|
||||
|
||||
**Step 3: Verify plugin loads**
|
||||
|
||||
Run: `claude` in a new terminal, then type `/update-altum` to verify the skill is discovered.
|
||||
|
||||
**Step 4: Commit**
|
||||
|
||||
```bash
|
||||
cd /Users/snider/.claude/plugins/altum-updater
|
||||
git init
|
||||
git add plugin.json skills/update-altum.md
|
||||
git commit -m "feat: altum-updater Claude Code plugin — marketplace download skill
|
||||
|
||||
Playwright for LemonSqueezy, Chrome for CodeCanyon. Includes full
|
||||
product/plugin mapping and download flow documentation.
|
||||
|
||||
Co-Authored-By: Virgil <virgil@lethean.io>"
|
||||
```
|
||||
|
||||
---
|
||||
|
||||
### Task 4: Sync deployed plugin versions from source
|
||||
|
||||
**Files:**
|
||||
- Create: `/Users/snider/Code/core/php-uptelligence/Console/SyncAltumVersionsCommand.php`
|
||||
- Modify: `/Users/snider/Code/core/php-uptelligence/Boot.php` (register command)
|
||||
- Test: `/Users/snider/Code/core/php-uptelligence/tests/Unit/SyncAltumVersionsCommandTest.php`
|
||||
|
||||
**Step 1: Write the failing test**
|
||||
|
||||
```php
|
||||
<?php
|
||||
|
||||
declare(strict_types=1);
|
||||
|
||||
it('reads product version from saas service config', function () {
|
||||
$this->artisan('uptelligence:sync-altum-versions', ['--dry-run' => true])
|
||||
->assertExitCode(0);
|
||||
});
|
||||
```
|
||||
|
||||
**Step 2: Run test to verify it fails**
|
||||
|
||||
Run: `cd /Users/snider/Code/core/php-uptelligence && composer test -- --filter=SyncAltumVersions`
|
||||
Expected: FAIL — command not found
|
||||
|
||||
**Step 3: Write minimal implementation**
|
||||
|
||||
Create `/Users/snider/Code/core/php-uptelligence/Console/SyncAltumVersionsCommand.php`:
|
||||
|
||||
```php
|
||||
<?php
|
||||
|
||||
declare(strict_types=1);
|
||||
|
||||
namespace Core\Mod\Uptelligence\Console;
|
||||
|
||||
use Core\Mod\Uptelligence\Models\Vendor;
|
||||
use Illuminate\Console\Command;
|
||||
|
||||
/**
|
||||
* Sync deployed AltumCode product/plugin versions from local source files.
|
||||
*
|
||||
* Reads PRODUCT_CODE from each product's source and plugin versions
|
||||
* from config.php files, then updates the vendors table.
|
||||
*/
|
||||
class SyncAltumVersionsCommand extends Command
|
||||
{
|
||||
protected $signature = 'uptelligence:sync-altum-versions
|
||||
{--dry-run : Show what would be updated without writing}
|
||||
{--path= : Base path to saas services (default: ~/Code/lthn/saas/services)}';
|
||||
|
||||
protected $description = 'Sync deployed AltumCode product and plugin versions from source files';
|
||||
|
||||
protected array $productPaths = [
|
||||
'66analytics' => '66analytics/package/product',
|
||||
'66biolinks' => '66biolinks/package/product',
|
||||
'66pusher' => '66pusher/package/product',
|
||||
'66socialproof' => '66socialproof/package/product',
|
||||
];
|
||||
|
||||
public function handle(): int
|
||||
{
|
||||
$basePath = $this->option('path')
|
||||
?? env('SAAS_SERVICES_PATH', base_path('../lthn/saas/services'));
|
||||
$dryRun = $this->option('dry-run');
|
||||
|
||||
$this->info('Syncing AltumCode versions from source...');
|
||||
$this->newLine();
|
||||
|
||||
$updates = [];
|
||||
|
||||
// Sync product versions
|
||||
foreach ($this->productPaths as $slug => $relativePath) {
|
||||
$productPath = rtrim($basePath, '/') . '/' . $relativePath;
|
||||
$version = $this->readProductVersion($productPath);
|
||||
|
||||
if ($version) {
|
||||
$updates[] = $this->syncVendorVersion($slug, $version, $dryRun);
|
||||
} else {
|
||||
$this->warn(" Could not read version for {$slug} at {$productPath}");
|
||||
}
|
||||
}
|
||||
|
||||
// Sync plugin versions — read from biolinks as canonical source
|
||||
$biolinkPluginsPath = rtrim($basePath, '/') . '/66biolinks/package/product/plugins';
|
||||
if (is_dir($biolinkPluginsPath)) {
|
||||
foreach (glob($biolinkPluginsPath . '/*/config.php') as $configFile) {
|
||||
$pluginId = basename(dirname($configFile));
|
||||
$version = $this->readPluginVersion($configFile);
|
||||
|
||||
if ($version) {
|
||||
$slug = "altum-plugin-{$pluginId}";
|
||||
$updates[] = $this->syncVendorVersion($slug, $version, $dryRun);
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
// Output table
|
||||
$this->table(
|
||||
['Vendor', 'Old Version', 'New Version', 'Status'],
|
||||
array_filter($updates)
|
||||
);
|
||||
|
||||
if ($dryRun) {
|
||||
$this->warn('Dry run — no changes written.');
|
||||
}
|
||||
|
||||
return self::SUCCESS;
|
||||
}
|
||||
|
||||
protected function readProductVersion(string $productPath): ?string
|
||||
{
|
||||
// Read version from app/init.php or similar — look for PRODUCT_VERSION define
|
||||
$initFile = $productPath . '/app/init.php';
|
||||
if (! file_exists($initFile)) {
|
||||
return null;
|
||||
}
|
||||
|
||||
$content = file_get_contents($initFile);
|
||||
if (preg_match("/define\('PRODUCT_VERSION',\s*'([^']+)'\)/", $content, $matches)) {
|
||||
return $matches[1];
|
||||
}
|
||||
|
||||
return null;
|
||||
}
|
||||
|
||||
protected function readPluginVersion(string $configFile): ?string
|
||||
{
|
||||
if (! file_exists($configFile)) {
|
||||
return null;
|
||||
}
|
||||
|
||||
$content = file_get_contents($configFile);
|
||||
|
||||
// PHP config format: 'version' => '2.0.0'
|
||||
if (preg_match("/'version'\s*=>\s*'([^']+)'/", $content, $matches)) {
|
||||
return $matches[1];
|
||||
}
|
||||
|
||||
return null;
|
||||
}
|
||||
|
||||
protected function syncVendorVersion(string $slug, string $version, bool $dryRun): ?array
|
||||
{
|
||||
$vendor = Vendor::where('slug', $slug)->first();
|
||||
if (! $vendor) {
|
||||
return [$slug, '(not in DB)', $version, 'SKIPPED'];
|
||||
}
|
||||
|
||||
$oldVersion = $vendor->current_version;
|
||||
if ($oldVersion === $version) {
|
||||
return [$slug, $oldVersion, $version, 'current'];
|
||||
}
|
||||
|
||||
if (! $dryRun) {
|
||||
$vendor->update(['current_version' => $version]);
|
||||
}
|
||||
|
||||
return [$slug, $oldVersion ?? '(none)', $version, $dryRun ? 'WOULD UPDATE' : 'UPDATED'];
|
||||
}
|
||||
}
|
||||
```
|
||||
|
||||
Register in Boot.php — add to `onConsole()`:
|
||||
|
||||
```php
|
||||
$event->command(Console\SyncAltumVersionsCommand::class);
|
||||
```
|
||||
|
||||
**Step 4: Run test to verify it passes**
|
||||
|
||||
Run: `cd /Users/snider/Code/core/php-uptelligence && composer test -- --filter=SyncAltumVersions`
|
||||
Expected: PASS
|
||||
|
||||
**Step 5: Commit**
|
||||
|
||||
```bash
|
||||
cd /Users/snider/Code/core/php-uptelligence
|
||||
git add Console/SyncAltumVersionsCommand.php Boot.php tests/Unit/SyncAltumVersionsCommandTest.php
|
||||
git commit -m "feat: sync deployed AltumCode versions from source files
|
||||
|
||||
Reads PRODUCT_VERSION from product init.php and plugin versions from
|
||||
config.php files. Updates uptelligence_vendors table so check-updates
|
||||
knows what's actually deployed.
|
||||
|
||||
Co-Authored-By: Virgil <virgil@lethean.io>"
|
||||
```
|
||||
|
||||
---
|
||||
|
||||
### Task 5: End-to-end verification
|
||||
|
||||
**Step 1: Seed vendors on local dev**
|
||||
|
||||
```bash
|
||||
cd /Users/snider/Code/lab/host.uk.com
|
||||
php artisan db:seed --class="Core\Mod\Uptelligence\Database\Seeders\AltumCodeVendorSeeder"
|
||||
```
|
||||
|
||||
**Step 2: Sync actual deployed versions**
|
||||
|
||||
```bash
|
||||
php artisan uptelligence:sync-altum-versions --path=/Users/snider/Code/lthn/saas/services
|
||||
```
|
||||
|
||||
**Step 3: Run the update check**
|
||||
|
||||
```bash
|
||||
php artisan uptelligence:check-updates
|
||||
```
|
||||
|
||||
Expected: Table showing current vs latest versions for all 17 AltumCode vendors.
|
||||
|
||||
**Step 4: Test the skill**
|
||||
|
||||
Open a new Claude Code session and run `/update-altum` to verify the skill loads and shows the workflow.
|
||||
|
||||
**Step 5: Commit any fixes**
|
||||
|
||||
```bash
|
||||
git add -A && git commit -m "fix: adjustments from end-to-end testing"
|
||||
```
|
||||
169
.core/reference/docs/primitives.md
Normal file
169
.core/reference/docs/primitives.md
Normal file
|
|
@ -0,0 +1,169 @@
|
|||
---
|
||||
title: Core Primitives
|
||||
description: The repeated shapes that make CoreGO easy to navigate.
|
||||
---
|
||||
|
||||
# Core Primitives
|
||||
|
||||
CoreGO is easiest to use when you read it as a small vocabulary repeated everywhere. Most of the framework is built from the same handful of types.
|
||||
|
||||
## Primitive Map
|
||||
|
||||
| Type | Used For |
|
||||
|------|----------|
|
||||
| `Options` | Input values and lightweight metadata |
|
||||
| `Result` | Output values and success state |
|
||||
| `Service` | Lifecycle-managed components |
|
||||
| `Message` | Broadcast events |
|
||||
| `Query` | Request-response lookups |
|
||||
| `Task` | Side-effecting work items |
|
||||
|
||||
## `Option` and `Options`
|
||||
|
||||
`Option` is one key-value pair. `Options` is an ordered slice of them.
|
||||
|
||||
```go
|
||||
opts := core.Options{
|
||||
{Key: "name", Value: "brain"},
|
||||
{Key: "path", Value: "prompts"},
|
||||
{Key: "debug", Value: true},
|
||||
}
|
||||
```
|
||||
|
||||
Use the helpers to read values:
|
||||
|
||||
```go
|
||||
name := opts.String("name")
|
||||
path := opts.String("path")
|
||||
debug := opts.Bool("debug")
|
||||
hasPath := opts.Has("path")
|
||||
raw := opts.Get("name")
|
||||
```
|
||||
|
||||
### Important Details
|
||||
|
||||
- `Get` returns the first matching key.
|
||||
- `String`, `Int`, and `Bool` do not convert between types.
|
||||
- Missing keys return zero values.
|
||||
- CLI flags with values are stored as strings, so `--port=8080` should be read with `opts.String("port")`, not `opts.Int("port")`.
|
||||
|
||||
## `Result`
|
||||
|
||||
`Result` is the universal return shape.
|
||||
|
||||
```go
|
||||
r := core.Result{Value: "ready", OK: true}
|
||||
|
||||
if r.OK {
|
||||
fmt.Println(r.Value)
|
||||
}
|
||||
```
|
||||
|
||||
It has two jobs:
|
||||
|
||||
- carry a value when work succeeds
|
||||
- carry either an error or an empty state when work does not succeed
|
||||
|
||||
### `Result.Result(...)`
|
||||
|
||||
The `Result()` method adapts plain Go values and `(value, error)` pairs into a `core.Result`.
|
||||
|
||||
```go
|
||||
r1 := core.Result{}.Result("hello")
|
||||
r2 := core.Result{}.Result(file, err)
|
||||
```
|
||||
|
||||
This is how several built-in helpers bridge standard-library calls.
|
||||
|
||||
## `Service`
|
||||
|
||||
`Service` is the managed lifecycle DTO stored in the registry.
|
||||
|
||||
```go
|
||||
svc := core.Service{
|
||||
Name: "cache",
|
||||
Options: core.Options{
|
||||
{Key: "backend", Value: "memory"},
|
||||
},
|
||||
OnStart: func() core.Result {
|
||||
return core.Result{OK: true}
|
||||
},
|
||||
OnStop: func() core.Result {
|
||||
return core.Result{OK: true}
|
||||
},
|
||||
OnReload: func() core.Result {
|
||||
return core.Result{OK: true}
|
||||
},
|
||||
}
|
||||
```
|
||||
|
||||
### Important Details
|
||||
|
||||
- `OnStart` and `OnStop` are used by the framework lifecycle.
|
||||
- `OnReload` is stored on the service DTO, but CoreGO does not currently call it automatically.
|
||||
- The registry stores `*core.Service`, not arbitrary typed service instances.
|
||||
|
||||
## `Message`, `Query`, and `Task`
|
||||
|
||||
These are simple aliases to `any`.
|
||||
|
||||
```go
|
||||
type Message any
|
||||
type Query any
|
||||
type Task any
|
||||
```
|
||||
|
||||
That means your own structs become the protocol:
|
||||
|
||||
```go
|
||||
type deployStarted struct {
|
||||
Environment string
|
||||
}
|
||||
|
||||
type workspaceCountQuery struct{}
|
||||
|
||||
type syncRepositoryTask struct {
|
||||
Name string
|
||||
}
|
||||
```
|
||||
|
||||
## `TaskWithIdentifier`
|
||||
|
||||
Long-running tasks can opt into task identifiers.
|
||||
|
||||
```go
|
||||
type indexedTask struct {
|
||||
ID string
|
||||
}
|
||||
|
||||
func (t *indexedTask) SetTaskIdentifier(id string) { t.ID = id }
|
||||
func (t *indexedTask) GetTaskIdentifier() string { return t.ID }
|
||||
```
|
||||
|
||||
If a task implements `TaskWithIdentifier`, `PerformAsync` injects the generated `task-N` identifier before dispatch.
|
||||
|
||||
## `ServiceRuntime[T]`
|
||||
|
||||
`ServiceRuntime[T]` is the small helper for packages that want to keep a Core reference and a typed options struct together.
|
||||
|
||||
```go
|
||||
type agentServiceOptions struct {
|
||||
WorkspacePath string
|
||||
}
|
||||
|
||||
type agentService struct {
|
||||
*core.ServiceRuntime[agentServiceOptions]
|
||||
}
|
||||
|
||||
runtime := core.NewServiceRuntime(c, agentServiceOptions{
|
||||
WorkspacePath: "/srv/agent-workspaces",
|
||||
})
|
||||
```
|
||||
|
||||
It exposes:
|
||||
|
||||
- `Core()`
|
||||
- `Options()`
|
||||
- `Config()`
|
||||
|
||||
This helper does not register anything by itself. It is a composition aid for package authors.
|
||||
152
.core/reference/docs/services.md
Normal file
152
.core/reference/docs/services.md
Normal file
|
|
@ -0,0 +1,152 @@
|
|||
---
|
||||
title: Services
|
||||
description: Register, inspect, and lock CoreGO services.
|
||||
---
|
||||
|
||||
# Services
|
||||
|
||||
In CoreGO, a service is a named lifecycle entry stored in the Core registry.
|
||||
|
||||
## Register a Service
|
||||
|
||||
```go
|
||||
c := core.New()
|
||||
|
||||
r := c.Service("audit", core.Service{
|
||||
OnStart: func() core.Result {
|
||||
core.Info("audit started")
|
||||
return core.Result{OK: true}
|
||||
},
|
||||
OnStop: func() core.Result {
|
||||
core.Info("audit stopped")
|
||||
return core.Result{OK: true}
|
||||
},
|
||||
})
|
||||
```
|
||||
|
||||
Registration succeeds when:
|
||||
|
||||
- the name is not empty
|
||||
- the registry is not locked
|
||||
- the name is not already in use
|
||||
|
||||
## Read a Service Back
|
||||
|
||||
```go
|
||||
r := c.Service("audit")
|
||||
if r.OK {
|
||||
svc := r.Value.(*core.Service)
|
||||
_ = svc
|
||||
}
|
||||
```
|
||||
|
||||
The returned value is `*core.Service`.
|
||||
|
||||
## List Registered Services
|
||||
|
||||
```go
|
||||
names := c.Services()
|
||||
```
|
||||
|
||||
### Important Detail
|
||||
|
||||
The current registry is map-backed. `Services()`, `Startables()`, and `Stoppables()` do not promise a stable order.
|
||||
|
||||
## Lifecycle Snapshots
|
||||
|
||||
Use these helpers when you want the current set of startable or stoppable services:
|
||||
|
||||
```go
|
||||
startables := c.Startables()
|
||||
stoppables := c.Stoppables()
|
||||
```
|
||||
|
||||
They return `[]*core.Service` inside `Result.Value`.
|
||||
|
||||
## Lock the Registry
|
||||
|
||||
CoreGO has a service-lock mechanism, but it is explicit.
|
||||
|
||||
```go
|
||||
c := core.New()
|
||||
|
||||
c.LockEnable()
|
||||
c.Service("audit", core.Service{})
|
||||
c.Service("cache", core.Service{})
|
||||
c.LockApply()
|
||||
```
|
||||
|
||||
After `LockApply`, new registrations fail:
|
||||
|
||||
```go
|
||||
r := c.Service("late", core.Service{})
|
||||
fmt.Println(r.OK) // false
|
||||
```
|
||||
|
||||
The default lock name is `"srv"`. You can pass a different name if you need a custom lock namespace.
|
||||
|
||||
For the service registry itself, use the default `"srv"` lock path. That is the path used by `Core.Service(...)`.
|
||||
|
||||
## `NewWithFactories`
|
||||
|
||||
For GUI runtimes or factory-driven setup, CoreGO provides `NewWithFactories`.
|
||||
|
||||
```go
|
||||
r := core.NewWithFactories(nil, map[string]core.ServiceFactory{
|
||||
"audit": func() core.Result {
|
||||
return core.Result{Value: core.Service{
|
||||
OnStart: func() core.Result {
|
||||
return core.Result{OK: true}
|
||||
},
|
||||
}, OK: true}
|
||||
},
|
||||
"cache": func() core.Result {
|
||||
return core.Result{Value: core.Service{}, OK: true}
|
||||
},
|
||||
})
|
||||
```
|
||||
|
||||
### Important Details
|
||||
|
||||
- each factory must return a `core.Service` in `Result.Value`
|
||||
- factories are executed in sorted key order
|
||||
- nil factories are skipped
|
||||
- the return value is `*core.Runtime`
|
||||
|
||||
## `Runtime`
|
||||
|
||||
`Runtime` is a small wrapper used for external runtimes such as GUI bindings.
|
||||
|
||||
```go
|
||||
r := core.NewRuntime(nil)
|
||||
rt := r.Value.(*core.Runtime)
|
||||
|
||||
_ = rt.ServiceStartup(context.Background(), nil)
|
||||
_ = rt.ServiceShutdown(context.Background())
|
||||
```
|
||||
|
||||
`Runtime.ServiceName()` returns `"Core"`.
|
||||
|
||||
## `ServiceRuntime[T]` for Package Authors
|
||||
|
||||
If you are writing a package on top of CoreGO, use `ServiceRuntime[T]` to keep a typed options struct and the parent `Core` together.
|
||||
|
||||
```go
|
||||
type repositoryServiceOptions struct {
|
||||
BaseDirectory string
|
||||
}
|
||||
|
||||
type repositoryService struct {
|
||||
*core.ServiceRuntime[repositoryServiceOptions]
|
||||
}
|
||||
|
||||
func newRepositoryService(c *core.Core) *repositoryService {
|
||||
return &repositoryService{
|
||||
ServiceRuntime: core.NewServiceRuntime(c, repositoryServiceOptions{
|
||||
BaseDirectory: "/srv/repos",
|
||||
}),
|
||||
}
|
||||
}
|
||||
```
|
||||
|
||||
This is a package-authoring helper. It does not replace the `core.Service` registry entry.
|
||||
158
.core/reference/docs/subsystems.md
Normal file
158
.core/reference/docs/subsystems.md
Normal file
|
|
@ -0,0 +1,158 @@
|
|||
---
|
||||
title: Subsystems
|
||||
description: Built-in accessors for app metadata, embedded data, filesystem, transport handles, i18n, and CLI.
|
||||
---
|
||||
|
||||
# Subsystems
|
||||
|
||||
`Core` gives you a set of built-in subsystems so small applications do not need extra plumbing before they can do useful work.
|
||||
|
||||
## Accessor Map
|
||||
|
||||
| Accessor | Purpose |
|
||||
|----------|---------|
|
||||
| `App()` | Application identity and external runtime |
|
||||
| `Data()` | Named embedded filesystem mounts |
|
||||
| `Drive()` | Named transport handles |
|
||||
| `Fs()` | Local filesystem access |
|
||||
| `I18n()` | Locale collection and translation delegation |
|
||||
| `Cli()` | Command-line surface over the command tree |
|
||||
|
||||
## `App`
|
||||
|
||||
`App` stores process identity and optional GUI runtime state.
|
||||
|
||||
```go
|
||||
app := c.App()
|
||||
app.Name = "agent-workbench"
|
||||
app.Version = "0.25.0"
|
||||
app.Description = "workspace runner"
|
||||
app.Runtime = myRuntime
|
||||
```
|
||||
|
||||
`Find` resolves an executable on `PATH` and returns an `*App`.
|
||||
|
||||
```go
|
||||
r := core.Find("go", "Go toolchain")
|
||||
```
|
||||
|
||||
## `Data`
|
||||
|
||||
`Data` mounts named embedded filesystems and makes them addressable through paths like `mount-name/path/to/file`.
|
||||
|
||||
```go
|
||||
c.Data().New(core.Options{
|
||||
{Key: "name", Value: "app"},
|
||||
{Key: "source", Value: appFS},
|
||||
{Key: "path", Value: "templates"},
|
||||
})
|
||||
```
|
||||
|
||||
Read content:
|
||||
|
||||
```go
|
||||
text := c.Data().ReadString("app/agent.md")
|
||||
bytes := c.Data().ReadFile("app/agent.md")
|
||||
list := c.Data().List("app")
|
||||
names := c.Data().ListNames("app")
|
||||
```
|
||||
|
||||
Extract a mounted directory:
|
||||
|
||||
```go
|
||||
r := c.Data().Extract("app/workspace", "/tmp/workspace", nil)
|
||||
```
|
||||
|
||||
### Path Rule
|
||||
|
||||
The first path segment is always the mount name.
|
||||
|
||||
## `Drive`
|
||||
|
||||
`Drive` is a registry for named transport handles.
|
||||
|
||||
```go
|
||||
c.Drive().New(core.Options{
|
||||
{Key: "name", Value: "api"},
|
||||
{Key: "transport", Value: "https://api.lthn.ai"},
|
||||
})
|
||||
|
||||
c.Drive().New(core.Options{
|
||||
{Key: "name", Value: "mcp"},
|
||||
{Key: "transport", Value: "mcp://mcp.lthn.sh"},
|
||||
})
|
||||
```
|
||||
|
||||
Read them back:
|
||||
|
||||
```go
|
||||
handle := c.Drive().Get("api")
|
||||
hasMCP := c.Drive().Has("mcp")
|
||||
names := c.Drive().Names()
|
||||
```
|
||||
|
||||
## `Fs`
|
||||
|
||||
`Fs` wraps local filesystem operations with a consistent `Result` shape.
|
||||
|
||||
```go
|
||||
c.Fs().Write("/tmp/core-go/example.txt", "hello")
|
||||
r := c.Fs().Read("/tmp/core-go/example.txt")
|
||||
```
|
||||
|
||||
Other helpers:
|
||||
|
||||
```go
|
||||
c.Fs().EnsureDir("/tmp/core-go/cache")
|
||||
c.Fs().List("/tmp/core-go")
|
||||
c.Fs().Stat("/tmp/core-go/example.txt")
|
||||
c.Fs().Rename("/tmp/core-go/example.txt", "/tmp/core-go/example-2.txt")
|
||||
c.Fs().Delete("/tmp/core-go/example-2.txt")
|
||||
```
|
||||
|
||||
### Important Details
|
||||
|
||||
- the default `Core` starts with `Fs{root:"/"}`
|
||||
- relative paths resolve from the current working directory
|
||||
- `Delete` and `DeleteAll` refuse to remove `/` and `$HOME`
|
||||
|
||||
## `I18n`
|
||||
|
||||
`I18n` collects locale mounts and forwards translation work to a translator implementation when one is registered.
|
||||
|
||||
```go
|
||||
c.I18n().SetLanguage("en-GB")
|
||||
```
|
||||
|
||||
Without a translator, `Translate` returns the message key itself:
|
||||
|
||||
```go
|
||||
r := c.I18n().Translate("cmd.deploy.description")
|
||||
```
|
||||
|
||||
With a translator:
|
||||
|
||||
```go
|
||||
c.I18n().SetTranslator(myTranslator)
|
||||
```
|
||||
|
||||
Then:
|
||||
|
||||
```go
|
||||
langs := c.I18n().AvailableLanguages()
|
||||
current := c.I18n().Language()
|
||||
```
|
||||
|
||||
## `Cli`
|
||||
|
||||
`Cli` exposes the command registry through a terminal-facing API.
|
||||
|
||||
```go
|
||||
c.Cli().SetBanner(func(_ *core.Cli) string {
|
||||
return "Agent Workbench"
|
||||
})
|
||||
|
||||
r := c.Cli().Run("workspace", "create", "--name=alpha")
|
||||
```
|
||||
|
||||
Use [commands.md](commands.md) for the full command and flag model.
|
||||
118
.core/reference/docs/testing.md
Normal file
118
.core/reference/docs/testing.md
Normal file
|
|
@ -0,0 +1,118 @@
|
|||
---
|
||||
title: Testing
|
||||
description: Test naming and testing patterns used by CoreGO.
|
||||
---
|
||||
|
||||
# Testing
|
||||
|
||||
The repository uses `github.com/stretchr/testify/assert` and a simple AX-friendly naming pattern.
|
||||
|
||||
## Test Names
|
||||
|
||||
Use:
|
||||
|
||||
- `_Good` for expected success
|
||||
- `_Bad` for expected failure
|
||||
- `_Ugly` for panics, degenerate input, and edge behavior
|
||||
|
||||
Examples from this repository:
|
||||
|
||||
```go
|
||||
func TestNew_Good(t *testing.T) {}
|
||||
func TestService_Register_Duplicate_Bad(t *testing.T) {}
|
||||
func TestCore_Must_Ugly(t *testing.T) {}
|
||||
```
|
||||
|
||||
## Start with a Small Core
|
||||
|
||||
```go
|
||||
c := core.New(core.Options{
|
||||
{Key: "name", Value: "test-core"},
|
||||
})
|
||||
```
|
||||
|
||||
Then register only the pieces your test needs.
|
||||
|
||||
## Test a Service
|
||||
|
||||
```go
|
||||
started := false
|
||||
|
||||
c.Service("audit", core.Service{
|
||||
OnStart: func() core.Result {
|
||||
started = true
|
||||
return core.Result{OK: true}
|
||||
},
|
||||
})
|
||||
|
||||
r := c.ServiceStartup(context.Background(), nil)
|
||||
assert.True(t, r.OK)
|
||||
assert.True(t, started)
|
||||
```
|
||||
|
||||
## Test a Command
|
||||
|
||||
```go
|
||||
c.Command("greet", core.Command{
|
||||
Action: func(opts core.Options) core.Result {
|
||||
return core.Result{Value: "hello " + opts.String("name"), OK: true}
|
||||
},
|
||||
})
|
||||
|
||||
r := c.Cli().Run("greet", "--name=world")
|
||||
assert.True(t, r.OK)
|
||||
assert.Equal(t, "hello world", r.Value)
|
||||
```
|
||||
|
||||
## Test a Query or Task
|
||||
|
||||
```go
|
||||
c.RegisterQuery(func(_ *core.Core, q core.Query) core.Result {
|
||||
if q == "ping" {
|
||||
return core.Result{Value: "pong", OK: true}
|
||||
}
|
||||
return core.Result{}
|
||||
})
|
||||
|
||||
assert.Equal(t, "pong", c.QUERY("ping").Value)
|
||||
```
|
||||
|
||||
```go
|
||||
c.RegisterTask(func(_ *core.Core, t core.Task) core.Result {
|
||||
if t == "compute" {
|
||||
return core.Result{Value: 42, OK: true}
|
||||
}
|
||||
return core.Result{}
|
||||
})
|
||||
|
||||
assert.Equal(t, 42, c.PERFORM("compute").Value)
|
||||
```
|
||||
|
||||
## Test Async Work
|
||||
|
||||
For `PerformAsync`, observe completion through the action bus.
|
||||
|
||||
```go
|
||||
completed := make(chan core.ActionTaskCompleted, 1)
|
||||
|
||||
c.RegisterAction(func(_ *core.Core, msg core.Message) core.Result {
|
||||
if event, ok := msg.(core.ActionTaskCompleted); ok {
|
||||
completed <- event
|
||||
}
|
||||
return core.Result{OK: true}
|
||||
})
|
||||
```
|
||||
|
||||
Then wait with normal Go test tools such as channels, timers, or `assert.Eventually`.
|
||||
|
||||
## Use Real Temporary Paths
|
||||
|
||||
When testing `Fs`, `Data.Extract`, or other I/O helpers, use `t.TempDir()` and create realistic paths instead of mocking the filesystem by default.
|
||||
|
||||
## Repository Commands
|
||||
|
||||
```bash
|
||||
core go test
|
||||
core go test --run TestPerformAsync_Good
|
||||
go test ./...
|
||||
```
|
||||
59
.core/reference/drive.go
Normal file
59
.core/reference/drive.go
Normal file
|
|
@ -0,0 +1,59 @@
|
|||
// SPDX-License-Identifier: EUPL-1.2
|
||||
|
||||
// Drive is the resource handle registry for transport connections.
|
||||
// Packages register their transport handles (API, MCP, SSH, VPN)
|
||||
// and other packages access them by name.
|
||||
//
|
||||
// Register a transport:
|
||||
//
|
||||
// c.Drive().New(core.NewOptions(
|
||||
// core.Option{Key: "name", Value: "api"},
|
||||
// core.Option{Key: "transport", Value: "https://api.lthn.ai"},
|
||||
// ))
|
||||
// c.Drive().New(core.NewOptions(
|
||||
// core.Option{Key: "name", Value: "ssh"},
|
||||
// core.Option{Key: "transport", Value: "ssh://claude@10.69.69.165"},
|
||||
// ))
|
||||
// c.Drive().New(core.NewOptions(
|
||||
// core.Option{Key: "name", Value: "mcp"},
|
||||
// core.Option{Key: "transport", Value: "mcp://mcp.lthn.sh"},
|
||||
// ))
|
||||
//
|
||||
// Retrieve a handle:
|
||||
//
|
||||
// api := c.Drive().Get("api")
|
||||
package core
|
||||
|
||||
// DriveHandle holds a named transport resource.
|
||||
type DriveHandle struct {
|
||||
Name string
|
||||
Transport string
|
||||
Options Options
|
||||
}
|
||||
|
||||
// Drive manages named transport handles. Embeds Registry[*DriveHandle].
|
||||
type Drive struct {
|
||||
*Registry[*DriveHandle]
|
||||
}
|
||||
|
||||
// New registers a transport handle.
|
||||
//
|
||||
// c.Drive().New(core.NewOptions(
|
||||
// core.Option{Key: "name", Value: "api"},
|
||||
// core.Option{Key: "transport", Value: "https://api.lthn.ai"},
|
||||
// ))
|
||||
func (d *Drive) New(opts Options) Result {
|
||||
name := opts.String("name")
|
||||
if name == "" {
|
||||
return Result{}
|
||||
}
|
||||
|
||||
handle := &DriveHandle{
|
||||
Name: name,
|
||||
Transport: opts.String("transport"),
|
||||
Options: opts,
|
||||
}
|
||||
|
||||
d.Set(name, handle)
|
||||
return Result{handle, true}
|
||||
}
|
||||
668
.core/reference/embed.go
Normal file
668
.core/reference/embed.go
Normal file
|
|
@ -0,0 +1,668 @@
|
|||
// SPDX-License-Identifier: EUPL-1.2
|
||||
|
||||
// Embedded assets for the Core framework.
|
||||
//
|
||||
// Embed provides scoped filesystem access for go:embed and any fs.FS.
|
||||
// Also includes build-time asset packing (AST scanner + compressor)
|
||||
// and template-based directory extraction.
|
||||
//
|
||||
// Usage (mount):
|
||||
//
|
||||
// sub, _ := core.Mount(myFS, "lib/persona")
|
||||
// content, _ := sub.ReadString("secops/developer.md")
|
||||
//
|
||||
// Usage (extract):
|
||||
//
|
||||
// core.Extract(fsys, "/tmp/workspace", data)
|
||||
//
|
||||
// Usage (pack):
|
||||
//
|
||||
// refs, _ := core.ScanAssets([]string{"main.go"})
|
||||
// source, _ := core.GeneratePack(refs)
|
||||
package core
|
||||
|
||||
import (
|
||||
"bytes"
|
||||
"compress/gzip"
|
||||
"embed"
|
||||
"encoding/base64"
|
||||
"fmt"
|
||||
"go/ast"
|
||||
"go/parser"
|
||||
"go/token"
|
||||
"io"
|
||||
"io/fs"
|
||||
"os"
|
||||
"path/filepath"
|
||||
"sync"
|
||||
"text/template"
|
||||
)
|
||||
|
||||
// --- Runtime: Asset Registry ---
|
||||
|
||||
// AssetGroup holds a named collection of packed assets.
|
||||
type AssetGroup struct {
|
||||
assets map[string]string // name → compressed data
|
||||
}
|
||||
|
||||
var (
|
||||
assetGroups = make(map[string]*AssetGroup)
|
||||
assetGroupsMu sync.RWMutex
|
||||
)
|
||||
|
||||
// AddAsset registers a packed asset at runtime (called from generated init()).
|
||||
func AddAsset(group, name, data string) {
|
||||
assetGroupsMu.Lock()
|
||||
defer assetGroupsMu.Unlock()
|
||||
|
||||
g, ok := assetGroups[group]
|
||||
if !ok {
|
||||
g = &AssetGroup{assets: make(map[string]string)}
|
||||
assetGroups[group] = g
|
||||
}
|
||||
g.assets[name] = data
|
||||
}
|
||||
|
||||
// GetAsset retrieves and decompresses a packed asset.
|
||||
//
|
||||
// r := core.GetAsset("mygroup", "greeting")
|
||||
// if r.OK { content := r.Value.(string) }
|
||||
func GetAsset(group, name string) Result {
|
||||
assetGroupsMu.RLock()
|
||||
g, ok := assetGroups[group]
|
||||
if !ok {
|
||||
assetGroupsMu.RUnlock()
|
||||
return Result{}
|
||||
}
|
||||
data, ok := g.assets[name]
|
||||
assetGroupsMu.RUnlock()
|
||||
if !ok {
|
||||
return Result{}
|
||||
}
|
||||
s, err := decompress(data)
|
||||
if err != nil {
|
||||
return Result{err, false}
|
||||
}
|
||||
return Result{s, true}
|
||||
}
|
||||
|
||||
// GetAssetBytes retrieves a packed asset as bytes.
|
||||
//
|
||||
// r := core.GetAssetBytes("mygroup", "file")
|
||||
// if r.OK { data := r.Value.([]byte) }
|
||||
func GetAssetBytes(group, name string) Result {
|
||||
r := GetAsset(group, name)
|
||||
if !r.OK {
|
||||
return r
|
||||
}
|
||||
return Result{[]byte(r.Value.(string)), true}
|
||||
}
|
||||
|
||||
// --- Build-time: AST Scanner ---
|
||||
|
||||
// AssetRef is a reference to an asset found in source code.
|
||||
type AssetRef struct {
|
||||
Name string
|
||||
Path string
|
||||
Group string
|
||||
FullPath string
|
||||
}
|
||||
|
||||
// ScannedPackage holds all asset references from a set of source files.
|
||||
type ScannedPackage struct {
|
||||
PackageName string
|
||||
BaseDirectory string
|
||||
Groups []string
|
||||
Assets []AssetRef
|
||||
}
|
||||
|
||||
// ScanAssets parses Go source files and finds asset references.
|
||||
// Looks for calls to: core.GetAsset("group", "name"), core.AddAsset, etc.
|
||||
func ScanAssets(filenames []string) Result {
|
||||
packageMap := make(map[string]*ScannedPackage)
|
||||
var scanErr error
|
||||
|
||||
for _, filename := range filenames {
|
||||
fset := token.NewFileSet()
|
||||
node, err := parser.ParseFile(fset, filename, nil, parser.AllErrors)
|
||||
if err != nil {
|
||||
return Result{err, false}
|
||||
}
|
||||
|
||||
baseDir := filepath.Dir(filename)
|
||||
pkg, ok := packageMap[baseDir]
|
||||
if !ok {
|
||||
pkg = &ScannedPackage{BaseDirectory: baseDir}
|
||||
packageMap[baseDir] = pkg
|
||||
}
|
||||
pkg.PackageName = node.Name.Name
|
||||
|
||||
ast.Inspect(node, func(n ast.Node) bool {
|
||||
if scanErr != nil {
|
||||
return false
|
||||
}
|
||||
call, ok := n.(*ast.CallExpr)
|
||||
if !ok {
|
||||
return true
|
||||
}
|
||||
|
||||
sel, ok := call.Fun.(*ast.SelectorExpr)
|
||||
if !ok {
|
||||
return true
|
||||
}
|
||||
|
||||
ident, ok := sel.X.(*ast.Ident)
|
||||
if !ok {
|
||||
return true
|
||||
}
|
||||
|
||||
// Look for core.GetAsset or mewn.String patterns
|
||||
if ident.Name == "core" || ident.Name == "mewn" {
|
||||
switch sel.Sel.Name {
|
||||
case "GetAsset", "GetAssetBytes", "String", "MustString", "Bytes", "MustBytes":
|
||||
if len(call.Args) >= 1 {
|
||||
if lit, ok := call.Args[len(call.Args)-1].(*ast.BasicLit); ok {
|
||||
path := TrimPrefix(TrimSuffix(lit.Value, "\""), "\"")
|
||||
group := "."
|
||||
if len(call.Args) >= 2 {
|
||||
if glit, ok := call.Args[0].(*ast.BasicLit); ok {
|
||||
group = TrimPrefix(TrimSuffix(glit.Value, "\""), "\"")
|
||||
}
|
||||
}
|
||||
fullPath, err := filepath.Abs(filepath.Join(baseDir, group, path))
|
||||
if err != nil {
|
||||
scanErr = Wrap(err, "core.ScanAssets", Join(" ", "could not determine absolute path for asset", path, "in group", group))
|
||||
return false
|
||||
}
|
||||
pkg.Assets = append(pkg.Assets, AssetRef{
|
||||
Name: path,
|
||||
|
||||
Group: group,
|
||||
FullPath: fullPath,
|
||||
})
|
||||
}
|
||||
}
|
||||
case "Group":
|
||||
// Variable assignment: g := core.Group("./assets")
|
||||
if len(call.Args) == 1 {
|
||||
if lit, ok := call.Args[0].(*ast.BasicLit); ok {
|
||||
path := TrimPrefix(TrimSuffix(lit.Value, "\""), "\"")
|
||||
fullPath, err := filepath.Abs(filepath.Join(baseDir, path))
|
||||
if err != nil {
|
||||
scanErr = Wrap(err, "core.ScanAssets", Join(" ", "could not determine absolute path for group", path))
|
||||
return false
|
||||
}
|
||||
pkg.Groups = append(pkg.Groups, fullPath)
|
||||
// Track for variable resolution
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
return true
|
||||
})
|
||||
if scanErr != nil {
|
||||
return Result{scanErr, false}
|
||||
}
|
||||
}
|
||||
|
||||
var result []ScannedPackage
|
||||
for _, pkg := range packageMap {
|
||||
result = append(result, *pkg)
|
||||
}
|
||||
return Result{result, true}
|
||||
}
|
||||
|
||||
// GeneratePack creates Go source code that embeds the scanned assets.
|
||||
func GeneratePack(pkg ScannedPackage) Result {
|
||||
b := NewBuilder()
|
||||
|
||||
b.WriteString(fmt.Sprintf("package %s\n\n", pkg.PackageName))
|
||||
b.WriteString("// Code generated by core pack. DO NOT EDIT.\n\n")
|
||||
|
||||
if len(pkg.Assets) == 0 && len(pkg.Groups) == 0 {
|
||||
return Result{b.String(), true}
|
||||
}
|
||||
|
||||
b.WriteString("import \"dappco.re/go/core\"\n\n")
|
||||
b.WriteString("func init() {\n")
|
||||
|
||||
// Pack groups (entire directories)
|
||||
packed := make(map[string]bool)
|
||||
for _, groupPath := range pkg.Groups {
|
||||
files, err := getAllFiles(groupPath)
|
||||
if err != nil {
|
||||
return Result{err, false}
|
||||
}
|
||||
for _, file := range files {
|
||||
if packed[file] {
|
||||
continue
|
||||
}
|
||||
data, err := compressFile(file)
|
||||
if err != nil {
|
||||
return Result{err, false}
|
||||
}
|
||||
localPath := TrimPrefix(file, groupPath+"/")
|
||||
relGroup, err := filepath.Rel(pkg.BaseDirectory, groupPath)
|
||||
if err != nil {
|
||||
return Result{err, false}
|
||||
}
|
||||
b.WriteString(fmt.Sprintf("\tcore.AddAsset(%q, %q, %q)\n", relGroup, localPath, data))
|
||||
packed[file] = true
|
||||
}
|
||||
}
|
||||
|
||||
// Pack individual assets
|
||||
for _, asset := range pkg.Assets {
|
||||
if packed[asset.FullPath] {
|
||||
continue
|
||||
}
|
||||
data, err := compressFile(asset.FullPath)
|
||||
if err != nil {
|
||||
return Result{err, false}
|
||||
}
|
||||
b.WriteString(fmt.Sprintf("\tcore.AddAsset(%q, %q, %q)\n", asset.Group, asset.Name, data))
|
||||
packed[asset.FullPath] = true
|
||||
}
|
||||
|
||||
b.WriteString("}\n")
|
||||
return Result{b.String(), true}
|
||||
}
|
||||
|
||||
// --- Compression ---
|
||||
|
||||
func compressFile(path string) (string, error) {
|
||||
data, err := os.ReadFile(path)
|
||||
if err != nil {
|
||||
return "", err
|
||||
}
|
||||
return compress(string(data))
|
||||
}
|
||||
|
||||
func compress(input string) (string, error) {
|
||||
var buf bytes.Buffer
|
||||
b64 := base64.NewEncoder(base64.StdEncoding, &buf)
|
||||
gz, err := gzip.NewWriterLevel(b64, gzip.BestCompression)
|
||||
if err != nil {
|
||||
return "", err
|
||||
}
|
||||
if _, err := gz.Write([]byte(input)); err != nil {
|
||||
_ = gz.Close()
|
||||
_ = b64.Close()
|
||||
return "", err
|
||||
}
|
||||
if err := gz.Close(); err != nil {
|
||||
_ = b64.Close()
|
||||
return "", err
|
||||
}
|
||||
if err := b64.Close(); err != nil {
|
||||
return "", err
|
||||
}
|
||||
return buf.String(), nil
|
||||
}
|
||||
|
||||
func decompress(input string) (string, error) {
|
||||
b64 := base64.NewDecoder(base64.StdEncoding, NewReader(input))
|
||||
gz, err := gzip.NewReader(b64)
|
||||
if err != nil {
|
||||
return "", err
|
||||
}
|
||||
|
||||
data, err := io.ReadAll(gz)
|
||||
if err != nil {
|
||||
return "", err
|
||||
}
|
||||
if err := gz.Close(); err != nil {
|
||||
return "", err
|
||||
}
|
||||
return string(data), nil
|
||||
}
|
||||
|
||||
func getAllFiles(dir string) ([]string, error) {
|
||||
var result []string
|
||||
err := filepath.WalkDir(dir, func(path string, d fs.DirEntry, err error) error {
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
if !d.IsDir() {
|
||||
result = append(result, path)
|
||||
}
|
||||
return nil
|
||||
})
|
||||
return result, err
|
||||
}
|
||||
|
||||
// --- Embed: Scoped Filesystem Mount ---
|
||||
|
||||
// Embed wraps an fs.FS with a basedir for scoped access.
|
||||
// All paths are relative to basedir.
|
||||
type Embed struct {
|
||||
basedir string
|
||||
fsys fs.FS
|
||||
embedFS *embed.FS // original embed.FS for type-safe access via EmbedFS()
|
||||
}
|
||||
|
||||
// Mount creates a scoped view of an fs.FS anchored at basedir.
|
||||
//
|
||||
// r := core.Mount(myFS, "lib/prompts")
|
||||
// if r.OK { emb := r.Value.(*Embed) }
|
||||
func Mount(fsys fs.FS, basedir string) Result {
|
||||
s := &Embed{fsys: fsys, basedir: basedir}
|
||||
|
||||
if efs, ok := fsys.(embed.FS); ok {
|
||||
s.embedFS = &efs
|
||||
}
|
||||
|
||||
if r := s.ReadDir("."); !r.OK {
|
||||
return r
|
||||
}
|
||||
return Result{s, true}
|
||||
}
|
||||
|
||||
// MountEmbed creates a scoped view of an embed.FS.
|
||||
//
|
||||
// r := core.MountEmbed(myFS, "testdata")
|
||||
func MountEmbed(efs embed.FS, basedir string) Result {
|
||||
return Mount(efs, basedir)
|
||||
}
|
||||
|
||||
func (s *Embed) path(name string) Result {
|
||||
joined := filepath.ToSlash(filepath.Join(s.basedir, name))
|
||||
if HasPrefix(joined, "..") || Contains(joined, "/../") || HasSuffix(joined, "/..") {
|
||||
return Result{E("embed.path", Concat("path traversal rejected: ", name), nil), false}
|
||||
}
|
||||
return Result{joined, true}
|
||||
}
|
||||
|
||||
// Open opens the named file for reading.
|
||||
//
|
||||
// r := emb.Open("test.txt")
|
||||
// if r.OK { file := r.Value.(fs.File) }
|
||||
func (s *Embed) Open(name string) Result {
|
||||
r := s.path(name)
|
||||
if !r.OK {
|
||||
return r
|
||||
}
|
||||
f, err := s.fsys.Open(r.Value.(string))
|
||||
if err != nil {
|
||||
return Result{err, false}
|
||||
}
|
||||
return Result{f, true}
|
||||
}
|
||||
|
||||
// ReadDir reads the named directory.
|
||||
func (s *Embed) ReadDir(name string) Result {
|
||||
r := s.path(name)
|
||||
if !r.OK {
|
||||
return r
|
||||
}
|
||||
return Result{}.New(fs.ReadDir(s.fsys, r.Value.(string)))
|
||||
}
|
||||
|
||||
// ReadFile reads the named file.
|
||||
//
|
||||
// r := emb.ReadFile("test.txt")
|
||||
// if r.OK { data := r.Value.([]byte) }
|
||||
func (s *Embed) ReadFile(name string) Result {
|
||||
r := s.path(name)
|
||||
if !r.OK {
|
||||
return r
|
||||
}
|
||||
data, err := fs.ReadFile(s.fsys, r.Value.(string))
|
||||
if err != nil {
|
||||
return Result{err, false}
|
||||
}
|
||||
return Result{data, true}
|
||||
}
|
||||
|
||||
// ReadString reads the named file as a string.
|
||||
//
|
||||
// r := emb.ReadString("test.txt")
|
||||
// if r.OK { content := r.Value.(string) }
|
||||
func (s *Embed) ReadString(name string) Result {
|
||||
r := s.ReadFile(name)
|
||||
if !r.OK {
|
||||
return r
|
||||
}
|
||||
return Result{string(r.Value.([]byte)), true}
|
||||
}
|
||||
|
||||
// Sub returns a new Embed anchored at a subdirectory within this mount.
|
||||
//
|
||||
// r := emb.Sub("testdata")
|
||||
// if r.OK { sub := r.Value.(*Embed) }
|
||||
func (s *Embed) Sub(subDir string) Result {
|
||||
r := s.path(subDir)
|
||||
if !r.OK {
|
||||
return r
|
||||
}
|
||||
sub, err := fs.Sub(s.fsys, r.Value.(string))
|
||||
if err != nil {
|
||||
return Result{err, false}
|
||||
}
|
||||
return Result{&Embed{fsys: sub, basedir: "."}, true}
|
||||
}
|
||||
|
||||
// FS returns the underlying fs.FS.
|
||||
func (s *Embed) FS() fs.FS {
|
||||
return s.fsys
|
||||
}
|
||||
|
||||
// EmbedFS returns the underlying embed.FS if mounted from one.
|
||||
// Returns zero embed.FS if mounted from a non-embed source.
|
||||
func (s *Embed) EmbedFS() embed.FS {
|
||||
if s.embedFS != nil {
|
||||
return *s.embedFS
|
||||
}
|
||||
return embed.FS{}
|
||||
}
|
||||
|
||||
// BaseDirectory returns the base directory this Embed is anchored at.
|
||||
func (s *Embed) BaseDirectory() string {
|
||||
return s.basedir
|
||||
}
|
||||
|
||||
// --- Template Extraction ---
|
||||
|
||||
// ExtractOptions configures template extraction.
|
||||
type ExtractOptions struct {
|
||||
// TemplateFilters identifies template files by substring match.
|
||||
// Default: [".tmpl"]
|
||||
TemplateFilters []string
|
||||
|
||||
// IgnoreFiles is a set of filenames to skip during extraction.
|
||||
IgnoreFiles map[string]struct{}
|
||||
|
||||
// RenameFiles maps original filenames to new names.
|
||||
RenameFiles map[string]string
|
||||
}
|
||||
|
||||
// Extract copies a template directory from an fs.FS to targetDir,
|
||||
// processing Go text/template in filenames and file contents.
|
||||
//
|
||||
// Files containing a template filter substring (default: ".tmpl") have
|
||||
// their contents processed through text/template with the given data.
|
||||
// The filter is stripped from the output filename.
|
||||
//
|
||||
// Directory and file names can contain Go template expressions:
|
||||
// {{.Name}}/main.go → myproject/main.go
|
||||
//
|
||||
// Data can be any struct or map[string]string for template substitution.
|
||||
func Extract(fsys fs.FS, targetDir string, data any, opts ...ExtractOptions) Result {
|
||||
opt := ExtractOptions{
|
||||
TemplateFilters: []string{".tmpl"},
|
||||
IgnoreFiles: make(map[string]struct{}),
|
||||
RenameFiles: make(map[string]string),
|
||||
}
|
||||
if len(opts) > 0 {
|
||||
if len(opts[0].TemplateFilters) > 0 {
|
||||
opt.TemplateFilters = opts[0].TemplateFilters
|
||||
}
|
||||
if opts[0].IgnoreFiles != nil {
|
||||
opt.IgnoreFiles = opts[0].IgnoreFiles
|
||||
}
|
||||
if opts[0].RenameFiles != nil {
|
||||
opt.RenameFiles = opts[0].RenameFiles
|
||||
}
|
||||
}
|
||||
|
||||
// Ensure target directory exists
|
||||
targetDir, err := filepath.Abs(targetDir)
|
||||
if err != nil {
|
||||
return Result{err, false}
|
||||
}
|
||||
if err := os.MkdirAll(targetDir, 0755); err != nil {
|
||||
return Result{err, false}
|
||||
}
|
||||
|
||||
// Categorise files
|
||||
var dirs []string
|
||||
var templateFiles []string
|
||||
var standardFiles []string
|
||||
|
||||
err = fs.WalkDir(fsys, ".", func(path string, d fs.DirEntry, err error) error {
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
if path == "." {
|
||||
return nil
|
||||
}
|
||||
if d.IsDir() {
|
||||
dirs = append(dirs, path)
|
||||
return nil
|
||||
}
|
||||
filename := filepath.Base(path)
|
||||
if _, ignored := opt.IgnoreFiles[filename]; ignored {
|
||||
return nil
|
||||
}
|
||||
if isTemplate(filename, opt.TemplateFilters) {
|
||||
templateFiles = append(templateFiles, path)
|
||||
} else {
|
||||
standardFiles = append(standardFiles, path)
|
||||
}
|
||||
return nil
|
||||
})
|
||||
if err != nil {
|
||||
return Result{err, false}
|
||||
}
|
||||
|
||||
// safePath ensures a rendered path stays under targetDir.
|
||||
safePath := func(rendered string) (string, error) {
|
||||
abs, err := filepath.Abs(rendered)
|
||||
if err != nil {
|
||||
return "", err
|
||||
}
|
||||
if !HasPrefix(abs, targetDir+string(filepath.Separator)) && abs != targetDir {
|
||||
return "", E("embed.Extract", Concat("path escapes target: ", abs), nil)
|
||||
}
|
||||
return abs, nil
|
||||
}
|
||||
|
||||
// Create directories (names may contain templates)
|
||||
for _, dir := range dirs {
|
||||
target, err := safePath(renderPath(filepath.Join(targetDir, dir), data))
|
||||
if err != nil {
|
||||
return Result{err, false}
|
||||
}
|
||||
if err := os.MkdirAll(target, 0755); err != nil {
|
||||
return Result{err, false}
|
||||
}
|
||||
}
|
||||
|
||||
// Process template files
|
||||
for _, path := range templateFiles {
|
||||
tmpl, err := template.ParseFS(fsys, path)
|
||||
if err != nil {
|
||||
return Result{err, false}
|
||||
}
|
||||
|
||||
targetFile := renderPath(filepath.Join(targetDir, path), data)
|
||||
|
||||
// Strip template filters from filename
|
||||
dir := filepath.Dir(targetFile)
|
||||
name := filepath.Base(targetFile)
|
||||
for _, filter := range opt.TemplateFilters {
|
||||
name = Replace(name, filter, "")
|
||||
}
|
||||
if renamed := opt.RenameFiles[name]; renamed != "" {
|
||||
name = renamed
|
||||
}
|
||||
targetFile, err = safePath(filepath.Join(dir, name))
|
||||
if err != nil {
|
||||
return Result{err, false}
|
||||
}
|
||||
|
||||
f, err := os.Create(targetFile)
|
||||
if err != nil {
|
||||
return Result{err, false}
|
||||
}
|
||||
if err := tmpl.Execute(f, data); err != nil {
|
||||
f.Close()
|
||||
return Result{err, false}
|
||||
}
|
||||
f.Close()
|
||||
}
|
||||
|
||||
// Copy standard files
|
||||
for _, path := range standardFiles {
|
||||
targetPath := path
|
||||
name := filepath.Base(path)
|
||||
if renamed := opt.RenameFiles[name]; renamed != "" {
|
||||
targetPath = filepath.Join(filepath.Dir(path), renamed)
|
||||
}
|
||||
target, err := safePath(renderPath(filepath.Join(targetDir, targetPath), data))
|
||||
if err != nil {
|
||||
return Result{err, false}
|
||||
}
|
||||
if err := copyFile(fsys, path, target); err != nil {
|
||||
return Result{err, false}
|
||||
}
|
||||
}
|
||||
|
||||
return Result{OK: true}
|
||||
}
|
||||
|
||||
func isTemplate(filename string, filters []string) bool {
|
||||
for _, f := range filters {
|
||||
if Contains(filename, f) {
|
||||
return true
|
||||
}
|
||||
}
|
||||
return false
|
||||
}
|
||||
|
||||
func renderPath(path string, data any) string {
|
||||
if data == nil {
|
||||
return path
|
||||
}
|
||||
tmpl, err := template.New("path").Parse(path)
|
||||
if err != nil {
|
||||
return path
|
||||
}
|
||||
var buf bytes.Buffer
|
||||
if err := tmpl.Execute(&buf, data); err != nil {
|
||||
return path
|
||||
}
|
||||
return buf.String()
|
||||
}
|
||||
|
||||
func copyFile(fsys fs.FS, source, target string) error {
|
||||
s, err := fsys.Open(source)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
defer s.Close()
|
||||
|
||||
if err := os.MkdirAll(filepath.Dir(target), 0755); err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
d, err := os.Create(target)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
defer d.Close()
|
||||
|
||||
_, err = io.Copy(d, s)
|
||||
return err
|
||||
}
|
||||
395
.core/reference/error.go
Normal file
395
.core/reference/error.go
Normal file
|
|
@ -0,0 +1,395 @@
|
|||
// SPDX-License-Identifier: EUPL-1.2
|
||||
|
||||
// Structured errors, crash recovery, and reporting for the Core framework.
|
||||
// Provides E() for error creation, Wrap()/WrapCode() for chaining,
|
||||
// and Err for panic recovery and crash reporting.
|
||||
|
||||
package core
|
||||
|
||||
import (
|
||||
"encoding/json"
|
||||
"errors"
|
||||
"iter"
|
||||
"maps"
|
||||
"os"
|
||||
"path/filepath"
|
||||
"runtime"
|
||||
"runtime/debug"
|
||||
"sync"
|
||||
"time"
|
||||
)
|
||||
|
||||
// ErrorSink is the shared interface for error reporting.
|
||||
// Implemented by ErrorLog (structured logging) and ErrorPanic (panic recovery).
|
||||
type ErrorSink interface {
|
||||
Error(msg string, keyvals ...any)
|
||||
Warn(msg string, keyvals ...any)
|
||||
}
|
||||
|
||||
var _ ErrorSink = (*Log)(nil)
|
||||
|
||||
// Err represents a structured error with operational context.
|
||||
// It implements the error interface and supports unwrapping.
|
||||
type Err struct {
|
||||
Operation string // Operation being performed (e.g., "user.Save")
|
||||
Message string // Human-readable message
|
||||
Cause error // Underlying error (optional)
|
||||
Code string // Error code (optional, e.g., "VALIDATION_FAILED")
|
||||
}
|
||||
|
||||
// Error implements the error interface.
|
||||
func (e *Err) Error() string {
|
||||
var prefix string
|
||||
if e.Operation != "" {
|
||||
prefix = e.Operation + ": "
|
||||
}
|
||||
if e.Cause != nil {
|
||||
if e.Code != "" {
|
||||
return Concat(prefix, e.Message, " [", e.Code, "]: ", e.Cause.Error())
|
||||
}
|
||||
return Concat(prefix, e.Message, ": ", e.Cause.Error())
|
||||
}
|
||||
if e.Code != "" {
|
||||
return Concat(prefix, e.Message, " [", e.Code, "]")
|
||||
}
|
||||
return Concat(prefix, e.Message)
|
||||
}
|
||||
|
||||
// Unwrap returns the underlying error for use with errors.Is and errors.As.
|
||||
func (e *Err) Unwrap() error {
|
||||
return e.Cause
|
||||
}
|
||||
|
||||
// --- Error Creation Functions ---
|
||||
|
||||
// E creates a new Err with operation context.
|
||||
// The underlying error can be nil for creating errors without a cause.
|
||||
//
|
||||
// Example:
|
||||
//
|
||||
// return log.E("user.Save", "failed to save user", err)
|
||||
// return log.E("api.Call", "rate limited", nil) // No underlying cause
|
||||
func E(op, msg string, err error) error {
|
||||
return &Err{Operation: op, Message: msg, Cause: err}
|
||||
}
|
||||
|
||||
// Wrap wraps an error with operation context.
|
||||
// Returns nil if err is nil, to support conditional wrapping.
|
||||
// Preserves error Code if the wrapped error is an *Err.
|
||||
//
|
||||
// Example:
|
||||
//
|
||||
// return log.Wrap(err, "db.Query", "database query failed")
|
||||
func Wrap(err error, op, msg string) error {
|
||||
if err == nil {
|
||||
return nil
|
||||
}
|
||||
// Preserve Code from wrapped *Err
|
||||
var logErr *Err
|
||||
if As(err, &logErr) && logErr.Code != "" {
|
||||
return &Err{Operation: op, Message: msg, Cause: err, Code: logErr.Code}
|
||||
}
|
||||
return &Err{Operation: op, Message: msg, Cause: err}
|
||||
}
|
||||
|
||||
// WrapCode wraps an error with operation context and error code.
|
||||
// Returns nil only if both err is nil AND code is empty.
|
||||
// Useful for API errors that need machine-readable codes.
|
||||
//
|
||||
// Example:
|
||||
//
|
||||
// return log.WrapCode(err, "VALIDATION_ERROR", "user.Validate", "invalid email")
|
||||
func WrapCode(err error, code, op, msg string) error {
|
||||
if err == nil && code == "" {
|
||||
return nil
|
||||
}
|
||||
return &Err{Operation: op, Message: msg, Cause: err, Code: code}
|
||||
}
|
||||
|
||||
// NewCode creates an error with just code and message (no underlying error).
|
||||
// Useful for creating sentinel errors with codes.
|
||||
//
|
||||
// Example:
|
||||
//
|
||||
// var ErrNotFound = log.NewCode("NOT_FOUND", "resource not found")
|
||||
func NewCode(code, msg string) error {
|
||||
return &Err{Message: msg, Code: code}
|
||||
}
|
||||
|
||||
// --- Standard Library Wrappers ---
|
||||
|
||||
// Is reports whether any error in err's tree matches target.
|
||||
// Wrapper around errors.Is for convenience.
|
||||
func Is(err, target error) bool {
|
||||
return errors.Is(err, target)
|
||||
}
|
||||
|
||||
// As finds the first error in err's tree that matches target.
|
||||
// Wrapper around errors.As for convenience.
|
||||
func As(err error, target any) bool {
|
||||
return errors.As(err, target)
|
||||
}
|
||||
|
||||
// NewError creates a simple error with the given text.
|
||||
// Wrapper around errors.New for convenience.
|
||||
func NewError(text string) error {
|
||||
return errors.New(text)
|
||||
}
|
||||
|
||||
// ErrorJoin combines multiple errors into one.
|
||||
//
|
||||
// core.ErrorJoin(err1, err2, err3)
|
||||
func ErrorJoin(errs ...error) error {
|
||||
return errors.Join(errs...)
|
||||
}
|
||||
|
||||
// --- Error Introspection Helpers ---
|
||||
|
||||
// Operation extracts the operation name from an error.
|
||||
// Returns empty string if the error is not an *Err.
|
||||
func Operation(err error) string {
|
||||
var e *Err
|
||||
if As(err, &e) {
|
||||
return e.Operation
|
||||
}
|
||||
return ""
|
||||
}
|
||||
|
||||
// ErrorCode extracts the error code from an error.
|
||||
// Returns empty string if the error is not an *Err or has no code.
|
||||
func ErrorCode(err error) string {
|
||||
var e *Err
|
||||
if As(err, &e) {
|
||||
return e.Code
|
||||
}
|
||||
return ""
|
||||
}
|
||||
|
||||
// Message extracts the message from an error.
|
||||
// Returns the error's Error() string if not an *Err.
|
||||
func ErrorMessage(err error) string {
|
||||
if err == nil {
|
||||
return ""
|
||||
}
|
||||
var e *Err
|
||||
if As(err, &e) {
|
||||
return e.Message
|
||||
}
|
||||
return err.Error()
|
||||
}
|
||||
|
||||
// Root returns the root cause of an error chain.
|
||||
// Unwraps until no more wrapped errors are found.
|
||||
func Root(err error) error {
|
||||
if err == nil {
|
||||
return nil
|
||||
}
|
||||
for {
|
||||
unwrapped := errors.Unwrap(err)
|
||||
if unwrapped == nil {
|
||||
return err
|
||||
}
|
||||
err = unwrapped
|
||||
}
|
||||
}
|
||||
|
||||
// AllOperations returns an iterator over all operational contexts in the error chain.
|
||||
// It traverses the error tree using errors.Unwrap.
|
||||
func AllOperations(err error) iter.Seq[string] {
|
||||
return func(yield func(string) bool) {
|
||||
for err != nil {
|
||||
if e, ok := err.(*Err); ok {
|
||||
if e.Operation != "" {
|
||||
if !yield(e.Operation) {
|
||||
return
|
||||
}
|
||||
}
|
||||
}
|
||||
err = errors.Unwrap(err)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
// StackTrace returns the logical stack trace (chain of operations) from an error.
|
||||
// It returns an empty slice if no operational context is found.
|
||||
func StackTrace(err error) []string {
|
||||
var stack []string
|
||||
for op := range AllOperations(err) {
|
||||
stack = append(stack, op)
|
||||
}
|
||||
return stack
|
||||
}
|
||||
|
||||
// FormatStackTrace returns a pretty-printed logical stack trace.
|
||||
func FormatStackTrace(err error) string {
|
||||
var ops []string
|
||||
for op := range AllOperations(err) {
|
||||
ops = append(ops, op)
|
||||
}
|
||||
if len(ops) == 0 {
|
||||
return ""
|
||||
}
|
||||
return Join(" -> ", ops...)
|
||||
}
|
||||
|
||||
// --- ErrorLog: Log-and-Return Error Helpers ---
|
||||
|
||||
// ErrorLog combines error creation with logging.
|
||||
// Primary action: return an error. Secondary: log it.
|
||||
type ErrorLog struct {
|
||||
log *Log
|
||||
}
|
||||
|
||||
func (el *ErrorLog) logger() *Log {
|
||||
if el.log != nil {
|
||||
return el.log
|
||||
}
|
||||
return Default()
|
||||
}
|
||||
|
||||
// Error logs at Error level and returns a Result with the wrapped error.
|
||||
func (el *ErrorLog) Error(err error, op, msg string) Result {
|
||||
if err == nil {
|
||||
return Result{OK: true}
|
||||
}
|
||||
wrapped := Wrap(err, op, msg)
|
||||
el.logger().Error(msg, "op", op, "err", err)
|
||||
return Result{wrapped, false}
|
||||
}
|
||||
|
||||
// Warn logs at Warn level and returns a Result with the wrapped error.
|
||||
func (el *ErrorLog) Warn(err error, op, msg string) Result {
|
||||
if err == nil {
|
||||
return Result{OK: true}
|
||||
}
|
||||
wrapped := Wrap(err, op, msg)
|
||||
el.logger().Warn(msg, "op", op, "err", err)
|
||||
return Result{wrapped, false}
|
||||
}
|
||||
|
||||
// Must logs and panics if err is not nil.
|
||||
func (el *ErrorLog) Must(err error, op, msg string) {
|
||||
if err != nil {
|
||||
el.logger().Error(msg, "op", op, "err", err)
|
||||
panic(Wrap(err, op, msg))
|
||||
}
|
||||
}
|
||||
|
||||
// --- Crash Recovery & Reporting ---
|
||||
|
||||
// CrashReport represents a single crash event.
|
||||
type CrashReport struct {
|
||||
Timestamp time.Time `json:"timestamp"`
|
||||
Error string `json:"error"`
|
||||
Stack string `json:"stack"`
|
||||
System CrashSystem `json:"system,omitempty"`
|
||||
Meta map[string]string `json:"meta,omitempty"`
|
||||
}
|
||||
|
||||
// CrashSystem holds system information at crash time.
|
||||
type CrashSystem struct {
|
||||
OperatingSystem string `json:"operatingsystem"`
|
||||
Architecture string `json:"architecture"`
|
||||
Version string `json:"go_version"`
|
||||
}
|
||||
|
||||
// ErrorPanic manages panic recovery and crash reporting.
|
||||
type ErrorPanic struct {
|
||||
filePath string
|
||||
meta map[string]string
|
||||
onCrash func(CrashReport)
|
||||
}
|
||||
|
||||
// Recover captures a panic and creates a crash report.
|
||||
// Use as: defer c.Error().Recover()
|
||||
func (h *ErrorPanic) Recover() {
|
||||
if h == nil {
|
||||
return
|
||||
}
|
||||
r := recover()
|
||||
if r == nil {
|
||||
return
|
||||
}
|
||||
|
||||
err, ok := r.(error)
|
||||
if !ok {
|
||||
err = NewError(Sprint("panic: ", r))
|
||||
}
|
||||
|
||||
report := CrashReport{
|
||||
Timestamp: time.Now(),
|
||||
Error: err.Error(),
|
||||
Stack: string(debug.Stack()),
|
||||
System: CrashSystem{
|
||||
OperatingSystem: runtime.GOOS,
|
||||
Architecture: runtime.GOARCH,
|
||||
Version: runtime.Version(),
|
||||
},
|
||||
Meta: maps.Clone(h.meta),
|
||||
}
|
||||
|
||||
if h.onCrash != nil {
|
||||
h.onCrash(report)
|
||||
}
|
||||
|
||||
if h.filePath != "" {
|
||||
h.appendReport(report)
|
||||
}
|
||||
}
|
||||
|
||||
// SafeGo runs a function in a goroutine with panic recovery.
|
||||
func (h *ErrorPanic) SafeGo(fn func()) {
|
||||
go func() {
|
||||
defer h.Recover()
|
||||
fn()
|
||||
}()
|
||||
}
|
||||
|
||||
// Reports returns the last n crash reports from the file.
|
||||
func (h *ErrorPanic) Reports(n int) Result {
|
||||
if h.filePath == "" {
|
||||
return Result{}
|
||||
}
|
||||
crashMu.Lock()
|
||||
defer crashMu.Unlock()
|
||||
data, err := os.ReadFile(h.filePath)
|
||||
if err != nil {
|
||||
return Result{err, false}
|
||||
}
|
||||
var reports []CrashReport
|
||||
if err := json.Unmarshal(data, &reports); err != nil {
|
||||
return Result{err, false}
|
||||
}
|
||||
if n <= 0 || len(reports) <= n {
|
||||
return Result{reports, true}
|
||||
}
|
||||
return Result{reports[len(reports)-n:], true}
|
||||
}
|
||||
|
||||
var crashMu sync.Mutex
|
||||
|
||||
func (h *ErrorPanic) appendReport(report CrashReport) {
|
||||
crashMu.Lock()
|
||||
defer crashMu.Unlock()
|
||||
|
||||
var reports []CrashReport
|
||||
if data, err := os.ReadFile(h.filePath); err == nil {
|
||||
if err := json.Unmarshal(data, &reports); err != nil {
|
||||
reports = nil
|
||||
}
|
||||
}
|
||||
|
||||
reports = append(reports, report)
|
||||
data, err := json.MarshalIndent(reports, "", " ")
|
||||
if err != nil {
|
||||
Default().Error(Concat("crash report marshal failed: ", err.Error()))
|
||||
return
|
||||
}
|
||||
if err := os.MkdirAll(filepath.Dir(h.filePath), 0755); err != nil {
|
||||
Default().Error(Concat("crash report dir failed: ", err.Error()))
|
||||
return
|
||||
}
|
||||
if err := os.WriteFile(h.filePath, data, 0600); err != nil {
|
||||
Default().Error(Concat("crash report write failed: ", err.Error()))
|
||||
}
|
||||
}
|
||||
425
.core/reference/fs.go
Normal file
425
.core/reference/fs.go
Normal file
|
|
@ -0,0 +1,425 @@
|
|||
// SPDX-License-Identifier: EUPL-1.2
|
||||
|
||||
// Sandboxed local filesystem I/O for the Core framework.
|
||||
package core
|
||||
|
||||
import (
|
||||
"io"
|
||||
"io/fs"
|
||||
"os"
|
||||
"os/user"
|
||||
"path/filepath"
|
||||
"time"
|
||||
)
|
||||
|
||||
// Fs is a sandboxed local filesystem backend.
|
||||
type Fs struct {
|
||||
root string
|
||||
}
|
||||
|
||||
// New initialises an Fs with the given root directory.
|
||||
// Root "/" means unrestricted access. Empty root defaults to "/".
|
||||
//
|
||||
// fs := (&core.Fs{}).New("/")
|
||||
func (m *Fs) New(root string) *Fs {
|
||||
if root == "" {
|
||||
root = "/"
|
||||
}
|
||||
m.root = root
|
||||
return m
|
||||
}
|
||||
|
||||
// NewUnrestricted returns a new Fs with root "/", granting full filesystem access.
|
||||
// Use this instead of unsafe.Pointer to bypass the sandbox.
|
||||
//
|
||||
// fs := c.Fs().NewUnrestricted()
|
||||
// fs.Read("/etc/hostname") // works — no sandbox
|
||||
func (m *Fs) NewUnrestricted() *Fs {
|
||||
return (&Fs{}).New("/")
|
||||
}
|
||||
|
||||
// Root returns the sandbox root path.
|
||||
//
|
||||
// root := c.Fs().Root() // e.g. "/home/agent/.core"
|
||||
func (m *Fs) Root() string {
|
||||
if m.root == "" {
|
||||
return "/"
|
||||
}
|
||||
return m.root
|
||||
}
|
||||
|
||||
// path sanitises and returns the full path.
|
||||
// Absolute paths are sandboxed under root (unless root is "/").
|
||||
// Empty root defaults to "/" — the zero value of Fs is usable.
|
||||
func (m *Fs) path(p string) string {
|
||||
root := m.root
|
||||
if root == "" {
|
||||
root = "/"
|
||||
}
|
||||
if p == "" {
|
||||
return root
|
||||
}
|
||||
|
||||
// If the path is relative and the medium is rooted at "/",
|
||||
// treat it as relative to the current working directory.
|
||||
// This makes io.Local behave more like the standard 'os' package.
|
||||
if root == "/" && !filepath.IsAbs(p) {
|
||||
cwd, _ := os.Getwd()
|
||||
return filepath.Join(cwd, p)
|
||||
}
|
||||
|
||||
// Use filepath.Clean with a leading slash to resolve all .. and . internally
|
||||
// before joining with the root. This is a standard way to sandbox paths.
|
||||
clean := filepath.Clean("/" + p)
|
||||
|
||||
// If root is "/", allow absolute paths through
|
||||
if root == "/" {
|
||||
return clean
|
||||
}
|
||||
|
||||
// Strip leading "/" so Join works correctly with root
|
||||
return filepath.Join(root, clean[1:])
|
||||
}
|
||||
|
||||
// validatePath ensures the path is within the sandbox, following symlinks if they exist.
|
||||
func (m *Fs) validatePath(p string) Result {
|
||||
root := m.root
|
||||
if root == "" {
|
||||
root = "/"
|
||||
}
|
||||
if root == "/" {
|
||||
return Result{m.path(p), true}
|
||||
}
|
||||
|
||||
// Split the cleaned path into components
|
||||
parts := Split(filepath.Clean("/"+p), string(os.PathSeparator))
|
||||
current := root
|
||||
|
||||
for _, part := range parts {
|
||||
if part == "" {
|
||||
continue
|
||||
}
|
||||
|
||||
next := filepath.Join(current, part)
|
||||
realNext, err := filepath.EvalSymlinks(next)
|
||||
if err != nil {
|
||||
if os.IsNotExist(err) {
|
||||
// Part doesn't exist, we can't follow symlinks anymore.
|
||||
// Since the path is already Cleaned and current is safe,
|
||||
// appending a component to current will not escape.
|
||||
current = next
|
||||
continue
|
||||
}
|
||||
return Result{err, false}
|
||||
}
|
||||
|
||||
// Verify the resolved part is still within the root
|
||||
rel, err := filepath.Rel(root, realNext)
|
||||
if err != nil || HasPrefix(rel, "..") {
|
||||
// Security event: sandbox escape attempt
|
||||
username := "unknown"
|
||||
if u, err := user.Current(); err == nil {
|
||||
username = u.Username
|
||||
}
|
||||
Print(os.Stderr, "[%s] SECURITY sandbox escape detected root=%s path=%s attempted=%s user=%s",
|
||||
time.Now().Format(time.RFC3339), root, p, realNext, username)
|
||||
if err == nil {
|
||||
err = E("fs.validatePath", Concat("sandbox escape: ", p, " resolves outside ", m.root), nil)
|
||||
}
|
||||
return Result{err, false}
|
||||
}
|
||||
current = realNext
|
||||
}
|
||||
|
||||
return Result{current, true}
|
||||
}
|
||||
|
||||
// Read returns file contents as string.
|
||||
func (m *Fs) Read(p string) Result {
|
||||
vp := m.validatePath(p)
|
||||
if !vp.OK {
|
||||
return vp
|
||||
}
|
||||
data, err := os.ReadFile(vp.Value.(string))
|
||||
if err != nil {
|
||||
return Result{err, false}
|
||||
}
|
||||
return Result{string(data), true}
|
||||
}
|
||||
|
||||
// Write saves content to file, creating parent directories as needed.
|
||||
// Files are created with mode 0644. For sensitive files (keys, secrets),
|
||||
// use WriteMode with 0600.
|
||||
func (m *Fs) Write(p, content string) Result {
|
||||
return m.WriteMode(p, content, 0644)
|
||||
}
|
||||
|
||||
// WriteMode saves content to file with explicit permissions.
|
||||
// Use 0600 for sensitive files (encryption output, private keys, auth hashes).
|
||||
func (m *Fs) WriteMode(p, content string, mode os.FileMode) Result {
|
||||
vp := m.validatePath(p)
|
||||
if !vp.OK {
|
||||
return vp
|
||||
}
|
||||
full := vp.Value.(string)
|
||||
if err := os.MkdirAll(filepath.Dir(full), 0755); err != nil {
|
||||
return Result{err, false}
|
||||
}
|
||||
if err := os.WriteFile(full, []byte(content), mode); err != nil {
|
||||
return Result{err, false}
|
||||
}
|
||||
return Result{OK: true}
|
||||
}
|
||||
|
||||
// TempDir creates a temporary directory and returns its path.
|
||||
// The caller is responsible for cleanup via fs.DeleteAll().
|
||||
//
|
||||
// dir := fs.TempDir("agent-workspace")
|
||||
// defer fs.DeleteAll(dir)
|
||||
func (m *Fs) TempDir(prefix string) string {
|
||||
dir, err := os.MkdirTemp("", prefix)
|
||||
if err != nil {
|
||||
return ""
|
||||
}
|
||||
return dir
|
||||
}
|
||||
|
||||
// DirFS returns an fs.FS rooted at the given directory path.
|
||||
//
|
||||
// fsys := core.DirFS("/path/to/templates")
|
||||
func DirFS(dir string) fs.FS {
|
||||
return os.DirFS(dir)
|
||||
}
|
||||
|
||||
// WriteAtomic writes content by writing to a temp file then renaming.
|
||||
// Rename is atomic on POSIX — concurrent readers never see a partial file.
|
||||
// Use this for status files, config, or any file read from multiple goroutines.
|
||||
//
|
||||
// r := fs.WriteAtomic("/status.json", jsonData)
|
||||
func (m *Fs) WriteAtomic(p, content string) Result {
|
||||
vp := m.validatePath(p)
|
||||
if !vp.OK {
|
||||
return vp
|
||||
}
|
||||
full := vp.Value.(string)
|
||||
if err := os.MkdirAll(filepath.Dir(full), 0755); err != nil {
|
||||
return Result{err, false}
|
||||
}
|
||||
|
||||
tmp := full + ".tmp." + shortRand()
|
||||
if err := os.WriteFile(tmp, []byte(content), 0644); err != nil {
|
||||
return Result{err, false}
|
||||
}
|
||||
if err := os.Rename(tmp, full); err != nil {
|
||||
os.Remove(tmp)
|
||||
return Result{err, false}
|
||||
}
|
||||
return Result{OK: true}
|
||||
}
|
||||
|
||||
// EnsureDir creates directory if it doesn't exist.
|
||||
func (m *Fs) EnsureDir(p string) Result {
|
||||
vp := m.validatePath(p)
|
||||
if !vp.OK {
|
||||
return vp
|
||||
}
|
||||
if err := os.MkdirAll(vp.Value.(string), 0755); err != nil {
|
||||
return Result{err, false}
|
||||
}
|
||||
return Result{OK: true}
|
||||
}
|
||||
|
||||
// IsDir returns true if path is a directory.
|
||||
func (m *Fs) IsDir(p string) bool {
|
||||
if p == "" {
|
||||
return false
|
||||
}
|
||||
vp := m.validatePath(p)
|
||||
if !vp.OK {
|
||||
return false
|
||||
}
|
||||
info, err := os.Stat(vp.Value.(string))
|
||||
return err == nil && info.IsDir()
|
||||
}
|
||||
|
||||
// IsFile returns true if path is a regular file.
|
||||
func (m *Fs) IsFile(p string) bool {
|
||||
if p == "" {
|
||||
return false
|
||||
}
|
||||
vp := m.validatePath(p)
|
||||
if !vp.OK {
|
||||
return false
|
||||
}
|
||||
info, err := os.Stat(vp.Value.(string))
|
||||
return err == nil && info.Mode().IsRegular()
|
||||
}
|
||||
|
||||
// Exists returns true if path exists.
|
||||
func (m *Fs) Exists(p string) bool {
|
||||
vp := m.validatePath(p)
|
||||
if !vp.OK {
|
||||
return false
|
||||
}
|
||||
_, err := os.Stat(vp.Value.(string))
|
||||
return err == nil
|
||||
}
|
||||
|
||||
// List returns directory entries.
|
||||
func (m *Fs) List(p string) Result {
|
||||
vp := m.validatePath(p)
|
||||
if !vp.OK {
|
||||
return vp
|
||||
}
|
||||
return Result{}.New(os.ReadDir(vp.Value.(string)))
|
||||
}
|
||||
|
||||
// Stat returns file info.
|
||||
func (m *Fs) Stat(p string) Result {
|
||||
vp := m.validatePath(p)
|
||||
if !vp.OK {
|
||||
return vp
|
||||
}
|
||||
return Result{}.New(os.Stat(vp.Value.(string)))
|
||||
}
|
||||
|
||||
// Open opens the named file for reading.
|
||||
func (m *Fs) Open(p string) Result {
|
||||
vp := m.validatePath(p)
|
||||
if !vp.OK {
|
||||
return vp
|
||||
}
|
||||
return Result{}.New(os.Open(vp.Value.(string)))
|
||||
}
|
||||
|
||||
// Create creates or truncates the named file.
|
||||
func (m *Fs) Create(p string) Result {
|
||||
vp := m.validatePath(p)
|
||||
if !vp.OK {
|
||||
return vp
|
||||
}
|
||||
full := vp.Value.(string)
|
||||
if err := os.MkdirAll(filepath.Dir(full), 0755); err != nil {
|
||||
return Result{err, false}
|
||||
}
|
||||
return Result{}.New(os.Create(full))
|
||||
}
|
||||
|
||||
// Append opens the named file for appending, creating it if it doesn't exist.
|
||||
func (m *Fs) Append(p string) Result {
|
||||
vp := m.validatePath(p)
|
||||
if !vp.OK {
|
||||
return vp
|
||||
}
|
||||
full := vp.Value.(string)
|
||||
if err := os.MkdirAll(filepath.Dir(full), 0755); err != nil {
|
||||
return Result{err, false}
|
||||
}
|
||||
return Result{}.New(os.OpenFile(full, os.O_APPEND|os.O_CREATE|os.O_WRONLY, 0644))
|
||||
}
|
||||
|
||||
// ReadStream returns a reader for the file content.
|
||||
func (m *Fs) ReadStream(path string) Result {
|
||||
return m.Open(path)
|
||||
}
|
||||
|
||||
// WriteStream returns a writer for the file content.
|
||||
func (m *Fs) WriteStream(path string) Result {
|
||||
return m.Create(path)
|
||||
}
|
||||
|
||||
// ReadAll reads all bytes from a ReadCloser and closes it.
|
||||
// Wraps io.ReadAll so consumers don't import "io".
|
||||
//
|
||||
// r := fs.ReadStream(path)
|
||||
// data := core.ReadAll(r.Value)
|
||||
func ReadAll(reader any) Result {
|
||||
rc, ok := reader.(io.Reader)
|
||||
if !ok {
|
||||
return Result{E("core.ReadAll", "not a reader", nil), false}
|
||||
}
|
||||
data, err := io.ReadAll(rc)
|
||||
if closer, ok := reader.(io.Closer); ok {
|
||||
closer.Close()
|
||||
}
|
||||
if err != nil {
|
||||
return Result{err, false}
|
||||
}
|
||||
return Result{string(data), true}
|
||||
}
|
||||
|
||||
// WriteAll writes content to a writer and closes it if it implements Closer.
|
||||
//
|
||||
// r := fs.WriteStream(path)
|
||||
// core.WriteAll(r.Value, "content")
|
||||
func WriteAll(writer any, content string) Result {
|
||||
wc, ok := writer.(io.Writer)
|
||||
if !ok {
|
||||
return Result{E("core.WriteAll", "not a writer", nil), false}
|
||||
}
|
||||
_, err := wc.Write([]byte(content))
|
||||
if closer, ok := writer.(io.Closer); ok {
|
||||
closer.Close()
|
||||
}
|
||||
if err != nil {
|
||||
return Result{err, false}
|
||||
}
|
||||
return Result{OK: true}
|
||||
}
|
||||
|
||||
// CloseStream closes any value that implements io.Closer.
|
||||
//
|
||||
// core.CloseStream(r.Value)
|
||||
func CloseStream(v any) {
|
||||
if closer, ok := v.(io.Closer); ok {
|
||||
closer.Close()
|
||||
}
|
||||
}
|
||||
|
||||
// Delete removes a file or empty directory.
|
||||
func (m *Fs) Delete(p string) Result {
|
||||
vp := m.validatePath(p)
|
||||
if !vp.OK {
|
||||
return vp
|
||||
}
|
||||
full := vp.Value.(string)
|
||||
if full == "/" || full == os.Getenv("HOME") {
|
||||
return Result{E("fs.Delete", Concat("refusing to delete protected path: ", full), nil), false}
|
||||
}
|
||||
if err := os.Remove(full); err != nil {
|
||||
return Result{err, false}
|
||||
}
|
||||
return Result{OK: true}
|
||||
}
|
||||
|
||||
// DeleteAll removes a file or directory recursively.
|
||||
func (m *Fs) DeleteAll(p string) Result {
|
||||
vp := m.validatePath(p)
|
||||
if !vp.OK {
|
||||
return vp
|
||||
}
|
||||
full := vp.Value.(string)
|
||||
if full == "/" || full == os.Getenv("HOME") {
|
||||
return Result{E("fs.DeleteAll", Concat("refusing to delete protected path: ", full), nil), false}
|
||||
}
|
||||
if err := os.RemoveAll(full); err != nil {
|
||||
return Result{err, false}
|
||||
}
|
||||
return Result{OK: true}
|
||||
}
|
||||
|
||||
// Rename moves a file or directory.
|
||||
func (m *Fs) Rename(oldPath, newPath string) Result {
|
||||
oldVp := m.validatePath(oldPath)
|
||||
if !oldVp.OK {
|
||||
return oldVp
|
||||
}
|
||||
newVp := m.validatePath(newPath)
|
||||
if !newVp.OK {
|
||||
return newVp
|
||||
}
|
||||
if err := os.Rename(oldVp.Value.(string), newVp.Value.(string)); err != nil {
|
||||
return Result{err, false}
|
||||
}
|
||||
return Result{OK: true}
|
||||
}
|
||||
138
.core/reference/i18n.go
Normal file
138
.core/reference/i18n.go
Normal file
|
|
@ -0,0 +1,138 @@
|
|||
// SPDX-License-Identifier: EUPL-1.2
|
||||
|
||||
// Internationalisation for the Core framework.
|
||||
// I18n collects locale mounts from services and delegates
|
||||
// translation to a registered Translator implementation (e.g., go-i18n).
|
||||
|
||||
package core
|
||||
|
||||
import (
|
||||
"sync"
|
||||
)
|
||||
|
||||
// Translator defines the interface for translation services.
|
||||
// Implemented by go-i18n's Srv.
|
||||
type Translator interface {
|
||||
// Translate translates a message by its ID with optional arguments.
|
||||
Translate(messageID string, args ...any) Result
|
||||
// SetLanguage sets the active language (BCP47 tag, e.g., "en-GB", "de").
|
||||
SetLanguage(lang string) error
|
||||
// Language returns the current language code.
|
||||
Language() string
|
||||
// AvailableLanguages returns all loaded language codes.
|
||||
AvailableLanguages() []string
|
||||
}
|
||||
|
||||
// LocaleProvider is implemented by services that ship their own translation files.
|
||||
// Core discovers this interface during service registration and collects the
|
||||
// locale mounts. The i18n service loads them during startup.
|
||||
//
|
||||
// Usage in a service package:
|
||||
//
|
||||
// //go:embed locales
|
||||
// var localeFS embed.FS
|
||||
//
|
||||
// func (s *MyService) Locales() *Embed {
|
||||
// m, _ := Mount(localeFS, "locales")
|
||||
// return m
|
||||
// }
|
||||
type LocaleProvider interface {
|
||||
Locales() *Embed
|
||||
}
|
||||
|
||||
// I18n manages locale collection and translation dispatch.
|
||||
type I18n struct {
|
||||
mu sync.RWMutex
|
||||
locales []*Embed // collected from LocaleProvider services
|
||||
locale string
|
||||
translator Translator // registered implementation (nil until set)
|
||||
}
|
||||
|
||||
// AddLocales adds locale mounts (called during service registration).
|
||||
func (i *I18n) AddLocales(mounts ...*Embed) {
|
||||
i.mu.Lock()
|
||||
i.locales = append(i.locales, mounts...)
|
||||
i.mu.Unlock()
|
||||
}
|
||||
|
||||
// Locales returns all collected locale mounts.
|
||||
func (i *I18n) Locales() Result {
|
||||
i.mu.RLock()
|
||||
out := make([]*Embed, len(i.locales))
|
||||
copy(out, i.locales)
|
||||
i.mu.RUnlock()
|
||||
return Result{out, true}
|
||||
}
|
||||
|
||||
// SetTranslator registers the translation implementation.
|
||||
// Called by go-i18n's Srv during startup.
|
||||
func (i *I18n) SetTranslator(t Translator) {
|
||||
i.mu.Lock()
|
||||
i.translator = t
|
||||
locale := i.locale
|
||||
i.mu.Unlock()
|
||||
if t != nil && locale != "" {
|
||||
_ = t.SetLanguage(locale)
|
||||
}
|
||||
}
|
||||
|
||||
// Translator returns the registered translation implementation, or nil.
|
||||
func (i *I18n) Translator() Result {
|
||||
i.mu.RLock()
|
||||
t := i.translator
|
||||
i.mu.RUnlock()
|
||||
if t == nil {
|
||||
return Result{}
|
||||
}
|
||||
return Result{t, true}
|
||||
}
|
||||
|
||||
// Translate translates a message. Returns the key as-is if no translator is registered.
|
||||
func (i *I18n) Translate(messageID string, args ...any) Result {
|
||||
i.mu.RLock()
|
||||
t := i.translator
|
||||
i.mu.RUnlock()
|
||||
if t != nil {
|
||||
return t.Translate(messageID, args...)
|
||||
}
|
||||
return Result{messageID, true}
|
||||
}
|
||||
|
||||
// SetLanguage sets the active language and forwards to the translator if registered.
|
||||
func (i *I18n) SetLanguage(lang string) Result {
|
||||
if lang == "" {
|
||||
return Result{OK: true}
|
||||
}
|
||||
i.mu.Lock()
|
||||
i.locale = lang
|
||||
t := i.translator
|
||||
i.mu.Unlock()
|
||||
if t != nil {
|
||||
if err := t.SetLanguage(lang); err != nil {
|
||||
return Result{err, false}
|
||||
}
|
||||
}
|
||||
return Result{OK: true}
|
||||
}
|
||||
|
||||
// Language returns the current language code, or "en" if not set.
|
||||
func (i *I18n) Language() string {
|
||||
i.mu.RLock()
|
||||
locale := i.locale
|
||||
i.mu.RUnlock()
|
||||
if locale != "" {
|
||||
return locale
|
||||
}
|
||||
return "en"
|
||||
}
|
||||
|
||||
// AvailableLanguages returns all loaded language codes.
|
||||
func (i *I18n) AvailableLanguages() []string {
|
||||
i.mu.RLock()
|
||||
t := i.translator
|
||||
i.mu.RUnlock()
|
||||
if t != nil {
|
||||
return t.AvailableLanguages()
|
||||
}
|
||||
return []string{"en"}
|
||||
}
|
||||
113
.core/reference/ipc.go
Normal file
113
.core/reference/ipc.go
Normal file
|
|
@ -0,0 +1,113 @@
|
|||
// SPDX-License-Identifier: EUPL-1.2
|
||||
|
||||
// Message bus for the Core framework.
|
||||
// Dispatches actions (fire-and-forget), queries (first responder),
|
||||
// and tasks (first executor) between registered handlers.
|
||||
|
||||
package core
|
||||
|
||||
import (
|
||||
"slices"
|
||||
"sync"
|
||||
)
|
||||
|
||||
// Ipc holds IPC dispatch data and the named action registry.
|
||||
//
|
||||
// ipc := (&core.Ipc{}).New()
|
||||
type Ipc struct {
|
||||
ipcMu sync.RWMutex
|
||||
ipcHandlers []func(*Core, Message) Result
|
||||
|
||||
queryMu sync.RWMutex
|
||||
queryHandlers []QueryHandler
|
||||
|
||||
actions *Registry[*Action] // named action registry
|
||||
tasks *Registry[*Task] // named task registry
|
||||
}
|
||||
|
||||
// broadcast dispatches a message to all registered IPC handlers.
|
||||
// Each handler is wrapped in panic recovery. All handlers fire regardless of individual results.
|
||||
func (c *Core) broadcast(msg Message) Result {
|
||||
c.ipc.ipcMu.RLock()
|
||||
handlers := slices.Clone(c.ipc.ipcHandlers)
|
||||
c.ipc.ipcMu.RUnlock()
|
||||
|
||||
for _, h := range handlers {
|
||||
func() {
|
||||
defer func() {
|
||||
if r := recover(); r != nil {
|
||||
Error("ACTION handler panicked", "panic", r)
|
||||
}
|
||||
}()
|
||||
h(c, msg)
|
||||
}()
|
||||
}
|
||||
return Result{OK: true}
|
||||
}
|
||||
|
||||
// Query dispatches a request — first handler to return OK wins.
|
||||
//
|
||||
// r := c.Query(MyQuery{})
|
||||
func (c *Core) Query(q Query) Result {
|
||||
c.ipc.queryMu.RLock()
|
||||
handlers := slices.Clone(c.ipc.queryHandlers)
|
||||
c.ipc.queryMu.RUnlock()
|
||||
|
||||
for _, h := range handlers {
|
||||
r := h(c, q)
|
||||
if r.OK {
|
||||
return r
|
||||
}
|
||||
}
|
||||
return Result{}
|
||||
}
|
||||
|
||||
// QueryAll dispatches a request — collects all OK responses.
|
||||
//
|
||||
// r := c.QueryAll(countQuery{})
|
||||
// results := r.Value.([]any)
|
||||
func (c *Core) QueryAll(q Query) Result {
|
||||
c.ipc.queryMu.RLock()
|
||||
handlers := slices.Clone(c.ipc.queryHandlers)
|
||||
c.ipc.queryMu.RUnlock()
|
||||
|
||||
var results []any
|
||||
for _, h := range handlers {
|
||||
r := h(c, q)
|
||||
if r.OK && r.Value != nil {
|
||||
results = append(results, r.Value)
|
||||
}
|
||||
}
|
||||
return Result{results, true}
|
||||
}
|
||||
|
||||
// RegisterQuery registers a handler for QUERY dispatch.
|
||||
//
|
||||
// c.RegisterQuery(func(_ *core.Core, q core.Query) core.Result { ... })
|
||||
func (c *Core) RegisterQuery(handler QueryHandler) {
|
||||
c.ipc.queryMu.Lock()
|
||||
c.ipc.queryHandlers = append(c.ipc.queryHandlers, handler)
|
||||
c.ipc.queryMu.Unlock()
|
||||
}
|
||||
|
||||
// --- IPC Registration (handlers) ---
|
||||
|
||||
// RegisterAction registers a broadcast handler for ACTION messages.
|
||||
//
|
||||
// c.RegisterAction(func(c *core.Core, msg core.Message) core.Result {
|
||||
// if ev, ok := msg.(AgentCompleted); ok { ... }
|
||||
// return core.Result{OK: true}
|
||||
// })
|
||||
func (c *Core) RegisterAction(handler func(*Core, Message) Result) {
|
||||
c.ipc.ipcMu.Lock()
|
||||
c.ipc.ipcHandlers = append(c.ipc.ipcHandlers, handler)
|
||||
c.ipc.ipcMu.Unlock()
|
||||
}
|
||||
|
||||
// RegisterActions registers multiple broadcast handlers.
|
||||
func (c *Core) RegisterActions(handlers ...func(*Core, Message) Result) {
|
||||
c.ipc.ipcMu.Lock()
|
||||
c.ipc.ipcHandlers = append(c.ipc.ipcHandlers, handlers...)
|
||||
c.ipc.ipcMu.Unlock()
|
||||
}
|
||||
|
||||
68
.core/reference/lock.go
Normal file
68
.core/reference/lock.go
Normal file
|
|
@ -0,0 +1,68 @@
|
|||
// SPDX-License-Identifier: EUPL-1.2
|
||||
|
||||
// Synchronisation, locking, and lifecycle snapshots for the Core framework.
|
||||
|
||||
package core
|
||||
|
||||
import (
|
||||
"sync"
|
||||
)
|
||||
|
||||
// Lock is the DTO for a named mutex.
|
||||
type Lock struct {
|
||||
Name string
|
||||
Mutex *sync.RWMutex
|
||||
locks *Registry[*sync.RWMutex] // per-Core named mutexes
|
||||
}
|
||||
|
||||
// Lock returns a named Lock, creating the mutex if needed.
|
||||
// Locks are per-Core — separate Core instances do not share mutexes.
|
||||
func (c *Core) Lock(name string) *Lock {
|
||||
r := c.lock.locks.Get(name)
|
||||
if r.OK {
|
||||
return &Lock{Name: name, Mutex: r.Value.(*sync.RWMutex)}
|
||||
}
|
||||
m := &sync.RWMutex{}
|
||||
c.lock.locks.Set(name, m)
|
||||
return &Lock{Name: name, Mutex: m}
|
||||
}
|
||||
|
||||
// LockEnable marks that the service lock should be applied after initialisation.
|
||||
func (c *Core) LockEnable(name ...string) {
|
||||
c.services.lockEnabled = true
|
||||
}
|
||||
|
||||
// LockApply activates the service lock if it was enabled.
|
||||
func (c *Core) LockApply(name ...string) {
|
||||
if c.services.lockEnabled {
|
||||
c.services.Lock()
|
||||
}
|
||||
}
|
||||
|
||||
// Startables returns services that have an OnStart function, in registration order.
|
||||
func (c *Core) Startables() Result {
|
||||
if c.services == nil {
|
||||
return Result{}
|
||||
}
|
||||
var out []*Service
|
||||
c.services.Each(func(_ string, svc *Service) {
|
||||
if svc.OnStart != nil {
|
||||
out = append(out, svc)
|
||||
}
|
||||
})
|
||||
return Result{out, true}
|
||||
}
|
||||
|
||||
// Stoppables returns services that have an OnStop function, in registration order.
|
||||
func (c *Core) Stoppables() Result {
|
||||
if c.services == nil {
|
||||
return Result{}
|
||||
}
|
||||
var out []*Service
|
||||
c.services.Each(func(_ string, svc *Service) {
|
||||
if svc.OnStop != nil {
|
||||
out = append(out, svc)
|
||||
}
|
||||
})
|
||||
return Result{out, true}
|
||||
}
|
||||
404
.core/reference/log.go
Normal file
404
.core/reference/log.go
Normal file
|
|
@ -0,0 +1,404 @@
|
|||
// SPDX-License-Identifier: EUPL-1.2
|
||||
|
||||
// Structured logging for the Core framework.
|
||||
//
|
||||
// core.SetLevel(core.LevelDebug)
|
||||
// core.Info("server started", "port", 8080)
|
||||
// core.Error("failed to connect", "err", err)
|
||||
package core
|
||||
|
||||
import (
|
||||
goio "io"
|
||||
"os"
|
||||
"os/user"
|
||||
"slices"
|
||||
"sync"
|
||||
"sync/atomic"
|
||||
"time"
|
||||
)
|
||||
|
||||
// Level defines logging verbosity.
|
||||
type Level int
|
||||
|
||||
// Logging level constants ordered by increasing verbosity.
|
||||
const (
|
||||
// LevelQuiet suppresses all log output.
|
||||
LevelQuiet Level = iota
|
||||
// LevelError shows only error messages.
|
||||
LevelError
|
||||
// LevelWarn shows warnings and errors.
|
||||
LevelWarn
|
||||
// LevelInfo shows informational messages, warnings, and errors.
|
||||
LevelInfo
|
||||
// LevelDebug shows all messages including debug details.
|
||||
LevelDebug
|
||||
)
|
||||
|
||||
// String returns the level name.
|
||||
func (l Level) String() string {
|
||||
switch l {
|
||||
case LevelQuiet:
|
||||
return "quiet"
|
||||
case LevelError:
|
||||
return "error"
|
||||
case LevelWarn:
|
||||
return "warn"
|
||||
case LevelInfo:
|
||||
return "info"
|
||||
case LevelDebug:
|
||||
return "debug"
|
||||
default:
|
||||
return "unknown"
|
||||
}
|
||||
}
|
||||
|
||||
// Log provides structured logging.
|
||||
type Log struct {
|
||||
mu sync.RWMutex
|
||||
level Level
|
||||
output goio.Writer
|
||||
|
||||
// RedactKeys is a list of keys whose values should be masked in logs.
|
||||
redactKeys []string
|
||||
|
||||
// Style functions for formatting (can be overridden)
|
||||
StyleTimestamp func(string) string
|
||||
StyleDebug func(string) string
|
||||
StyleInfo func(string) string
|
||||
StyleWarn func(string) string
|
||||
StyleError func(string) string
|
||||
StyleSecurity func(string) string
|
||||
}
|
||||
|
||||
// RotationLogOptions defines the log rotation and retention policy.
|
||||
type RotationLogOptions struct {
|
||||
// Filename is the log file path. If empty, rotation is disabled.
|
||||
Filename string
|
||||
|
||||
// MaxSize is the maximum size of the log file in megabytes before it gets rotated.
|
||||
// It defaults to 100 megabytes.
|
||||
MaxSize int
|
||||
|
||||
// MaxAge is the maximum number of days to retain old log files based on their
|
||||
// file modification time. It defaults to 28 days.
|
||||
// Note: set to a negative value to disable age-based retention.
|
||||
MaxAge int
|
||||
|
||||
// MaxBackups is the maximum number of old log files to retain.
|
||||
// It defaults to 5 backups.
|
||||
MaxBackups int
|
||||
|
||||
// Compress determines if the rotated log files should be compressed using gzip.
|
||||
// It defaults to true.
|
||||
Compress bool
|
||||
}
|
||||
|
||||
// LogOptions configures a Log.
|
||||
type LogOptions struct {
|
||||
Level Level
|
||||
// Output is the destination for log messages. If Rotation is provided,
|
||||
// Output is ignored and logs are written to the rotating file instead.
|
||||
Output goio.Writer
|
||||
// Rotation enables log rotation to file. If provided, Filename must be set.
|
||||
Rotation *RotationLogOptions
|
||||
// RedactKeys is a list of keys whose values should be masked in logs.
|
||||
RedactKeys []string
|
||||
}
|
||||
|
||||
// RotationWriterFactory creates a rotating writer from options.
|
||||
// Set this to enable log rotation (provided by core/go-io integration).
|
||||
var RotationWriterFactory func(RotationLogOptions) goio.WriteCloser
|
||||
|
||||
// New creates a new Log with the given options.
|
||||
func NewLog(opts LogOptions) *Log {
|
||||
output := opts.Output
|
||||
if opts.Rotation != nil && opts.Rotation.Filename != "" && RotationWriterFactory != nil {
|
||||
output = RotationWriterFactory(*opts.Rotation)
|
||||
}
|
||||
if output == nil {
|
||||
output = os.Stderr
|
||||
}
|
||||
|
||||
return &Log{
|
||||
level: opts.Level,
|
||||
output: output,
|
||||
redactKeys: slices.Clone(opts.RedactKeys),
|
||||
StyleTimestamp: identity,
|
||||
StyleDebug: identity,
|
||||
StyleInfo: identity,
|
||||
StyleWarn: identity,
|
||||
StyleError: identity,
|
||||
StyleSecurity: identity,
|
||||
}
|
||||
}
|
||||
|
||||
func identity(s string) string { return s }
|
||||
|
||||
// SetLevel changes the log level.
|
||||
func (l *Log) SetLevel(level Level) {
|
||||
l.mu.Lock()
|
||||
l.level = level
|
||||
l.mu.Unlock()
|
||||
}
|
||||
|
||||
// Level returns the current log level.
|
||||
func (l *Log) Level() Level {
|
||||
l.mu.RLock()
|
||||
defer l.mu.RUnlock()
|
||||
return l.level
|
||||
}
|
||||
|
||||
// SetOutput changes the output writer.
|
||||
func (l *Log) SetOutput(w goio.Writer) {
|
||||
l.mu.Lock()
|
||||
l.output = w
|
||||
l.mu.Unlock()
|
||||
}
|
||||
|
||||
// SetRedactKeys sets the keys to be redacted.
|
||||
func (l *Log) SetRedactKeys(keys ...string) {
|
||||
l.mu.Lock()
|
||||
l.redactKeys = slices.Clone(keys)
|
||||
l.mu.Unlock()
|
||||
}
|
||||
|
||||
func (l *Log) shouldLog(level Level) bool {
|
||||
l.mu.RLock()
|
||||
defer l.mu.RUnlock()
|
||||
return level <= l.level
|
||||
}
|
||||
|
||||
func (l *Log) log(level Level, prefix, msg string, keyvals ...any) {
|
||||
l.mu.RLock()
|
||||
output := l.output
|
||||
styleTimestamp := l.StyleTimestamp
|
||||
redactKeys := l.redactKeys
|
||||
l.mu.RUnlock()
|
||||
|
||||
timestamp := styleTimestamp(time.Now().Format("15:04:05"))
|
||||
|
||||
// Copy keyvals to avoid mutating the caller's slice
|
||||
keyvals = append([]any(nil), keyvals...)
|
||||
|
||||
// Automatically extract context from error if present in keyvals
|
||||
origLen := len(keyvals)
|
||||
for i := 0; i < origLen; i += 2 {
|
||||
if i+1 < origLen {
|
||||
if err, ok := keyvals[i+1].(error); ok {
|
||||
if op := Operation(err); op != "" {
|
||||
// Check if op is already in keyvals
|
||||
hasOp := false
|
||||
for j := 0; j < len(keyvals); j += 2 {
|
||||
if k, ok := keyvals[j].(string); ok && k == "op" {
|
||||
hasOp = true
|
||||
break
|
||||
}
|
||||
}
|
||||
if !hasOp {
|
||||
keyvals = append(keyvals, "op", op)
|
||||
}
|
||||
}
|
||||
if stack := FormatStackTrace(err); stack != "" {
|
||||
// Check if stack is already in keyvals
|
||||
hasStack := false
|
||||
for j := 0; j < len(keyvals); j += 2 {
|
||||
if k, ok := keyvals[j].(string); ok && k == "stack" {
|
||||
hasStack = true
|
||||
break
|
||||
}
|
||||
}
|
||||
if !hasStack {
|
||||
keyvals = append(keyvals, "stack", stack)
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
// Format key-value pairs
|
||||
var kvStr string
|
||||
if len(keyvals) > 0 {
|
||||
kvStr = " "
|
||||
for i := 0; i < len(keyvals); i += 2 {
|
||||
if i > 0 {
|
||||
kvStr += " "
|
||||
}
|
||||
key := keyvals[i]
|
||||
var val any
|
||||
if i+1 < len(keyvals) {
|
||||
val = keyvals[i+1]
|
||||
}
|
||||
|
||||
// Redaction logic
|
||||
keyStr := Sprint(key)
|
||||
if slices.Contains(redactKeys, keyStr) {
|
||||
val = "[REDACTED]"
|
||||
}
|
||||
|
||||
// Secure formatting to prevent log injection
|
||||
if s, ok := val.(string); ok {
|
||||
kvStr += Sprintf("%v=%q", key, s)
|
||||
} else {
|
||||
kvStr += Sprintf("%v=%v", key, val)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
Print(output, "%s %s %s%s", timestamp, prefix, msg, kvStr)
|
||||
}
|
||||
|
||||
// Debug logs a debug message with optional key-value pairs.
|
||||
func (l *Log) Debug(msg string, keyvals ...any) {
|
||||
if l.shouldLog(LevelDebug) {
|
||||
l.log(LevelDebug, l.StyleDebug("[DBG]"), msg, keyvals...)
|
||||
}
|
||||
}
|
||||
|
||||
// Info logs an info message with optional key-value pairs.
|
||||
func (l *Log) Info(msg string, keyvals ...any) {
|
||||
if l.shouldLog(LevelInfo) {
|
||||
l.log(LevelInfo, l.StyleInfo("[INF]"), msg, keyvals...)
|
||||
}
|
||||
}
|
||||
|
||||
// Warn logs a warning message with optional key-value pairs.
|
||||
func (l *Log) Warn(msg string, keyvals ...any) {
|
||||
if l.shouldLog(LevelWarn) {
|
||||
l.log(LevelWarn, l.StyleWarn("[WRN]"), msg, keyvals...)
|
||||
}
|
||||
}
|
||||
|
||||
// Error logs an error message with optional key-value pairs.
|
||||
func (l *Log) Error(msg string, keyvals ...any) {
|
||||
if l.shouldLog(LevelError) {
|
||||
l.log(LevelError, l.StyleError("[ERR]"), msg, keyvals...)
|
||||
}
|
||||
}
|
||||
|
||||
// Security logs a security event with optional key-value pairs.
|
||||
// It uses LevelError to ensure security events are visible even in restrictive
|
||||
// log configurations.
|
||||
func (l *Log) Security(msg string, keyvals ...any) {
|
||||
if l.shouldLog(LevelError) {
|
||||
l.log(LevelError, l.StyleSecurity("[SEC]"), msg, keyvals...)
|
||||
}
|
||||
}
|
||||
|
||||
// Username returns the current system username.
|
||||
// It uses os/user for reliability and falls back to environment variables.
|
||||
func Username() string {
|
||||
if u, err := user.Current(); err == nil {
|
||||
return u.Username
|
||||
}
|
||||
// Fallback for environments where user lookup might fail
|
||||
if u := os.Getenv("USER"); u != "" {
|
||||
return u
|
||||
}
|
||||
return os.Getenv("USERNAME")
|
||||
}
|
||||
|
||||
// --- Default logger ---
|
||||
|
||||
var defaultLogPtr atomic.Pointer[Log]
|
||||
|
||||
func init() {
|
||||
l := NewLog(LogOptions{Level: LevelInfo})
|
||||
defaultLogPtr.Store(l)
|
||||
}
|
||||
|
||||
// Default returns the default logger.
|
||||
func Default() *Log {
|
||||
return defaultLogPtr.Load()
|
||||
}
|
||||
|
||||
// SetDefault sets the default logger.
|
||||
func SetDefault(l *Log) {
|
||||
defaultLogPtr.Store(l)
|
||||
}
|
||||
|
||||
// SetLevel sets the default logger's level.
|
||||
func SetLevel(level Level) {
|
||||
Default().SetLevel(level)
|
||||
}
|
||||
|
||||
// SetRedactKeys sets the default logger's redaction keys.
|
||||
func SetRedactKeys(keys ...string) {
|
||||
Default().SetRedactKeys(keys...)
|
||||
}
|
||||
|
||||
// Debug logs to the default logger.
|
||||
func Debug(msg string, keyvals ...any) {
|
||||
Default().Debug(msg, keyvals...)
|
||||
}
|
||||
|
||||
// Info logs to the default logger.
|
||||
func Info(msg string, keyvals ...any) {
|
||||
Default().Info(msg, keyvals...)
|
||||
}
|
||||
|
||||
// Warn logs to the default logger.
|
||||
func Warn(msg string, keyvals ...any) {
|
||||
Default().Warn(msg, keyvals...)
|
||||
}
|
||||
|
||||
// Error logs to the default logger.
|
||||
func Error(msg string, keyvals ...any) {
|
||||
Default().Error(msg, keyvals...)
|
||||
}
|
||||
|
||||
// Security logs to the default logger.
|
||||
func Security(msg string, keyvals ...any) {
|
||||
Default().Security(msg, keyvals...)
|
||||
}
|
||||
|
||||
// --- LogErr: Error-Aware Logger ---
|
||||
|
||||
// LogErr logs structured information extracted from errors.
|
||||
// Primary action: log. Secondary: extract error context.
|
||||
type LogErr struct {
|
||||
log *Log
|
||||
}
|
||||
|
||||
// NewLogErr creates a LogErr bound to the given logger.
|
||||
func NewLogErr(log *Log) *LogErr {
|
||||
return &LogErr{log: log}
|
||||
}
|
||||
|
||||
// Log extracts context from an Err and logs it at Error level.
|
||||
func (le *LogErr) Log(err error) {
|
||||
if err == nil {
|
||||
return
|
||||
}
|
||||
le.log.Error(ErrorMessage(err), "op", Operation(err), "code", ErrorCode(err), "stack", FormatStackTrace(err))
|
||||
}
|
||||
|
||||
// --- LogPanic: Panic-Aware Logger ---
|
||||
|
||||
// LogPanic logs panic context without crash file management.
|
||||
// Primary action: log. Secondary: recover panics.
|
||||
type LogPanic struct {
|
||||
log *Log
|
||||
}
|
||||
|
||||
// NewLogPanic creates a LogPanic bound to the given logger.
|
||||
func NewLogPanic(log *Log) *LogPanic {
|
||||
return &LogPanic{log: log}
|
||||
}
|
||||
|
||||
// Recover captures a panic and logs it. Does not write crash files.
|
||||
// Use as: defer core.NewLogPanic(logger).Recover()
|
||||
func (lp *LogPanic) Recover() {
|
||||
r := recover()
|
||||
if r == nil {
|
||||
return
|
||||
}
|
||||
err, ok := r.(error)
|
||||
if !ok {
|
||||
err = NewError(Sprint("panic: ", r))
|
||||
}
|
||||
lp.log.Error("panic recovered",
|
||||
"err", err,
|
||||
"op", Operation(err),
|
||||
"stack", FormatStackTrace(err),
|
||||
)
|
||||
}
|
||||
197
.core/reference/options.go
Normal file
197
.core/reference/options.go
Normal file
|
|
@ -0,0 +1,197 @@
|
|||
// SPDX-License-Identifier: EUPL-1.2
|
||||
|
||||
// Core primitives: Option, Options, Result.
|
||||
//
|
||||
// Options is the universal input type. Result is the universal output type.
|
||||
// All Core operations accept Options and return Result.
|
||||
//
|
||||
// opts := core.NewOptions(
|
||||
// core.Option{Key: "name", Value: "brain"},
|
||||
// core.Option{Key: "path", Value: "prompts"},
|
||||
// )
|
||||
// r := c.Drive().New(opts)
|
||||
// if !r.OK { log.Fatal(r.Error()) }
|
||||
package core
|
||||
|
||||
// --- Result: Universal Output ---
|
||||
|
||||
// Result is the universal return type for Core operations.
|
||||
// Replaces the (value, error) pattern — errors flow through Core internally.
|
||||
//
|
||||
// r := c.Data().New(opts)
|
||||
// if !r.OK { core.Error("failed", "err", r.Error()) }
|
||||
type Result struct {
|
||||
Value any
|
||||
OK bool
|
||||
}
|
||||
|
||||
// Result gets or sets the value. Zero args returns Value. With args, maps
|
||||
// Go (value, error) pairs to Result and returns self.
|
||||
//
|
||||
// r.Result(file, err) // OK = err == nil, Value = file
|
||||
// r.Result(value) // OK = true, Value = value
|
||||
// r.Result() // after set — returns the value
|
||||
func (r Result) Result(args ...any) Result {
|
||||
if len(args) == 0 {
|
||||
return r
|
||||
}
|
||||
return r.New(args...)
|
||||
}
|
||||
|
||||
// New adapts Go (value, error) pairs into a Result.
|
||||
//
|
||||
// r := core.Result{}.New(file, err)
|
||||
func (r Result) New(args ...any) Result {
|
||||
if len(args) == 0 {
|
||||
return r
|
||||
}
|
||||
|
||||
if len(args) > 1 {
|
||||
if err, ok := args[len(args)-1].(error); ok {
|
||||
if err != nil {
|
||||
return Result{Value: err, OK: false}
|
||||
}
|
||||
r.Value = args[0]
|
||||
r.OK = true
|
||||
return r
|
||||
}
|
||||
}
|
||||
|
||||
r.Value = args[0]
|
||||
|
||||
if err, ok := r.Value.(error); ok {
|
||||
if err != nil {
|
||||
return Result{Value: err, OK: false}
|
||||
}
|
||||
return Result{OK: true}
|
||||
}
|
||||
|
||||
r.OK = true
|
||||
return r
|
||||
}
|
||||
|
||||
// Get returns the Result if OK, empty Result otherwise.
|
||||
//
|
||||
// r := core.Result{Value: "hello", OK: true}.Get()
|
||||
func (r Result) Get() Result {
|
||||
if r.OK {
|
||||
return r
|
||||
}
|
||||
return Result{Value: r.Value, OK: false}
|
||||
}
|
||||
|
||||
// Option is a single key-value configuration pair.
|
||||
//
|
||||
// core.Option{Key: "name", Value: "brain"}
|
||||
// core.Option{Key: "port", Value: 8080}
|
||||
type Option struct {
|
||||
Key string
|
||||
Value any
|
||||
}
|
||||
|
||||
// --- Options: Universal Input ---
|
||||
|
||||
// Options is the universal input type for Core operations.
|
||||
// A structured collection of key-value pairs with typed accessors.
|
||||
//
|
||||
// opts := core.NewOptions(
|
||||
// core.Option{Key: "name", Value: "myapp"},
|
||||
// core.Option{Key: "port", Value: 8080},
|
||||
// )
|
||||
// name := opts.String("name")
|
||||
type Options struct {
|
||||
items []Option
|
||||
}
|
||||
|
||||
// NewOptions creates an Options collection from key-value pairs.
|
||||
//
|
||||
// opts := core.NewOptions(
|
||||
// core.Option{Key: "name", Value: "brain"},
|
||||
// core.Option{Key: "path", Value: "prompts"},
|
||||
// )
|
||||
func NewOptions(items ...Option) Options {
|
||||
cp := make([]Option, len(items))
|
||||
copy(cp, items)
|
||||
return Options{items: cp}
|
||||
}
|
||||
|
||||
// Set adds or updates a key-value pair.
|
||||
//
|
||||
// opts.Set("port", 8080)
|
||||
func (o *Options) Set(key string, value any) {
|
||||
for i, opt := range o.items {
|
||||
if opt.Key == key {
|
||||
o.items[i].Value = value
|
||||
return
|
||||
}
|
||||
}
|
||||
o.items = append(o.items, Option{Key: key, Value: value})
|
||||
}
|
||||
|
||||
// Get retrieves a value by key.
|
||||
//
|
||||
// r := opts.Get("name")
|
||||
// if r.OK { name := r.Value.(string) }
|
||||
func (o Options) Get(key string) Result {
|
||||
for _, opt := range o.items {
|
||||
if opt.Key == key {
|
||||
return Result{opt.Value, true}
|
||||
}
|
||||
}
|
||||
return Result{}
|
||||
}
|
||||
|
||||
// Has returns true if a key exists.
|
||||
//
|
||||
// if opts.Has("debug") { ... }
|
||||
func (o Options) Has(key string) bool {
|
||||
return o.Get(key).OK
|
||||
}
|
||||
|
||||
// String retrieves a string value, empty string if missing.
|
||||
//
|
||||
// name := opts.String("name")
|
||||
func (o Options) String(key string) string {
|
||||
r := o.Get(key)
|
||||
if !r.OK {
|
||||
return ""
|
||||
}
|
||||
s, _ := r.Value.(string)
|
||||
return s
|
||||
}
|
||||
|
||||
// Int retrieves an int value, 0 if missing.
|
||||
//
|
||||
// port := opts.Int("port")
|
||||
func (o Options) Int(key string) int {
|
||||
r := o.Get(key)
|
||||
if !r.OK {
|
||||
return 0
|
||||
}
|
||||
i, _ := r.Value.(int)
|
||||
return i
|
||||
}
|
||||
|
||||
// Bool retrieves a bool value, false if missing.
|
||||
//
|
||||
// debug := opts.Bool("debug")
|
||||
func (o Options) Bool(key string) bool {
|
||||
r := o.Get(key)
|
||||
if !r.OK {
|
||||
return false
|
||||
}
|
||||
b, _ := r.Value.(bool)
|
||||
return b
|
||||
}
|
||||
|
||||
// Len returns the number of options.
|
||||
func (o Options) Len() int {
|
||||
return len(o.items)
|
||||
}
|
||||
|
||||
// Items returns a copy of the underlying option slice.
|
||||
func (o Options) Items() []Option {
|
||||
cp := make([]Option, len(o.items))
|
||||
copy(cp, o.items)
|
||||
return cp
|
||||
}
|
||||
164
.core/reference/runtime.go
Normal file
164
.core/reference/runtime.go
Normal file
|
|
@ -0,0 +1,164 @@
|
|||
// SPDX-License-Identifier: EUPL-1.2
|
||||
|
||||
// Runtime helpers for the Core framework.
|
||||
// ServiceRuntime is embedded by consumer services.
|
||||
// Runtime is the GUI binding container (e.g., Wails).
|
||||
|
||||
package core
|
||||
|
||||
import (
|
||||
"context"
|
||||
"maps"
|
||||
"slices"
|
||||
)
|
||||
|
||||
// --- ServiceRuntime (embedded by consumer services) ---
|
||||
|
||||
// ServiceRuntime is embedded in services to provide access to the Core and typed options.
|
||||
type ServiceRuntime[T any] struct {
|
||||
core *Core
|
||||
opts T
|
||||
}
|
||||
|
||||
// NewServiceRuntime creates a ServiceRuntime for a service constructor.
|
||||
func NewServiceRuntime[T any](c *Core, opts T) *ServiceRuntime[T] {
|
||||
return &ServiceRuntime[T]{core: c, opts: opts}
|
||||
}
|
||||
|
||||
// Core returns the Core instance this service is registered with.
|
||||
//
|
||||
// c := s.Core()
|
||||
func (r *ServiceRuntime[T]) Core() *Core { return r.core }
|
||||
|
||||
// Options returns the typed options this service was created with.
|
||||
//
|
||||
// opts := s.Options() // MyOptions{BufferSize: 1024, ...}
|
||||
func (r *ServiceRuntime[T]) Options() T { return r.opts }
|
||||
|
||||
// Config is a shortcut to s.Core().Config().
|
||||
//
|
||||
// host := s.Config().String("database.host")
|
||||
func (r *ServiceRuntime[T]) Config() *Config { return r.core.Config() }
|
||||
|
||||
// --- Lifecycle ---
|
||||
|
||||
// ServiceStartup runs OnStart for all registered services that have one.
|
||||
func (c *Core) ServiceStartup(ctx context.Context, options any) Result {
|
||||
c.shutdown.Store(false)
|
||||
c.context, c.cancel = context.WithCancel(ctx)
|
||||
startables := c.Startables()
|
||||
if startables.OK {
|
||||
for _, s := range startables.Value.([]*Service) {
|
||||
if err := ctx.Err(); err != nil {
|
||||
return Result{err, false}
|
||||
}
|
||||
r := s.OnStart()
|
||||
if !r.OK {
|
||||
return r
|
||||
}
|
||||
}
|
||||
}
|
||||
c.ACTION(ActionServiceStartup{})
|
||||
return Result{OK: true}
|
||||
}
|
||||
|
||||
// ServiceShutdown drains background tasks, then stops all registered services.
|
||||
func (c *Core) ServiceShutdown(ctx context.Context) Result {
|
||||
c.shutdown.Store(true)
|
||||
c.cancel() // signal all context-aware tasks to stop
|
||||
c.ACTION(ActionServiceShutdown{})
|
||||
|
||||
// Drain background tasks before stopping services.
|
||||
done := make(chan struct{})
|
||||
go func() {
|
||||
c.waitGroup.Wait()
|
||||
close(done)
|
||||
}()
|
||||
select {
|
||||
case <-done:
|
||||
case <-ctx.Done():
|
||||
return Result{ctx.Err(), false}
|
||||
}
|
||||
|
||||
// Stop services
|
||||
var firstErr error
|
||||
stoppables := c.Stoppables()
|
||||
if stoppables.OK {
|
||||
for _, s := range stoppables.Value.([]*Service) {
|
||||
if err := ctx.Err(); err != nil {
|
||||
return Result{err, false}
|
||||
}
|
||||
r := s.OnStop()
|
||||
if !r.OK && firstErr == nil {
|
||||
if e, ok := r.Value.(error); ok {
|
||||
firstErr = e
|
||||
} else {
|
||||
firstErr = E("core.ServiceShutdown", Sprint("service OnStop failed: ", r.Value), nil)
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
if firstErr != nil {
|
||||
return Result{firstErr, false}
|
||||
}
|
||||
return Result{OK: true}
|
||||
}
|
||||
|
||||
// --- Runtime DTO (GUI binding) ---
|
||||
|
||||
// Runtime is the container for GUI runtimes (e.g., Wails).
|
||||
type Runtime struct {
|
||||
app any
|
||||
Core *Core
|
||||
}
|
||||
|
||||
// ServiceFactory defines a function that creates a Service.
|
||||
type ServiceFactory func() Result
|
||||
|
||||
// NewWithFactories creates a Runtime with the provided service factories.
|
||||
func NewWithFactories(app any, factories map[string]ServiceFactory) Result {
|
||||
c := New(WithOptions(NewOptions(Option{Key: "name", Value: "core"})))
|
||||
c.app.Runtime = app
|
||||
|
||||
names := slices.Sorted(maps.Keys(factories))
|
||||
for _, name := range names {
|
||||
factory := factories[name]
|
||||
if factory == nil {
|
||||
continue
|
||||
}
|
||||
r := factory()
|
||||
if !r.OK {
|
||||
cause, _ := r.Value.(error)
|
||||
return Result{E("core.NewWithFactories", Concat("factory \"", name, "\" failed"), cause), false}
|
||||
}
|
||||
svc, ok := r.Value.(Service)
|
||||
if !ok {
|
||||
return Result{E("core.NewWithFactories", Concat("factory \"", name, "\" returned non-Service type"), nil), false}
|
||||
}
|
||||
sr := c.Service(name, svc)
|
||||
if !sr.OK {
|
||||
return sr
|
||||
}
|
||||
}
|
||||
return Result{&Runtime{app: app, Core: c}, true}
|
||||
}
|
||||
|
||||
// NewRuntime creates a Runtime with no custom services.
|
||||
func NewRuntime(app any) Result {
|
||||
return NewWithFactories(app, map[string]ServiceFactory{})
|
||||
}
|
||||
|
||||
// ServiceName returns "Core" — the Runtime's service identity.
|
||||
func (r *Runtime) ServiceName() string { return "Core" }
|
||||
|
||||
// ServiceStartup starts all services via the embedded Core.
|
||||
func (r *Runtime) ServiceStartup(ctx context.Context, options any) Result {
|
||||
return r.Core.ServiceStartup(ctx, options)
|
||||
}
|
||||
// ServiceShutdown stops all services via the embedded Core.
|
||||
func (r *Runtime) ServiceShutdown(ctx context.Context) Result {
|
||||
if r.Core != nil {
|
||||
return r.Core.ServiceShutdown(ctx)
|
||||
}
|
||||
return Result{OK: true}
|
||||
}
|
||||
153
.core/reference/service.go
Normal file
153
.core/reference/service.go
Normal file
|
|
@ -0,0 +1,153 @@
|
|||
// SPDX-License-Identifier: EUPL-1.2
|
||||
|
||||
// Service registry for the Core framework.
|
||||
//
|
||||
// Register a service (DTO with lifecycle hooks):
|
||||
//
|
||||
// c.Service("auth", core.Service{OnStart: startFn})
|
||||
//
|
||||
// Register a service instance (auto-discovers Startable/Stoppable/HandleIPCEvents):
|
||||
//
|
||||
// c.RegisterService("display", displayInstance)
|
||||
//
|
||||
// Get a service:
|
||||
//
|
||||
// r := c.Service("auth")
|
||||
// if r.OK { svc := r.Value }
|
||||
|
||||
package core
|
||||
|
||||
import "context"
|
||||
|
||||
// Service is a managed component with optional lifecycle.
|
||||
type Service struct {
|
||||
Name string
|
||||
Instance any // the raw service instance (for interface discovery)
|
||||
Options Options
|
||||
OnStart func() Result
|
||||
OnStop func() Result
|
||||
OnReload func() Result
|
||||
}
|
||||
|
||||
// ServiceRegistry holds registered services. Embeds Registry[*Service]
|
||||
// for thread-safe named storage with insertion order.
|
||||
type ServiceRegistry struct {
|
||||
*Registry[*Service]
|
||||
lockEnabled bool
|
||||
}
|
||||
|
||||
// --- Core service methods ---
|
||||
|
||||
// Service gets or registers a service by name.
|
||||
//
|
||||
// c.Service("auth", core.Service{OnStart: startFn})
|
||||
// r := c.Service("auth")
|
||||
func (c *Core) Service(name string, service ...Service) Result {
|
||||
if len(service) == 0 {
|
||||
r := c.services.Get(name)
|
||||
if !r.OK {
|
||||
return Result{}
|
||||
}
|
||||
svc := r.Value.(*Service)
|
||||
// Return the instance if available, otherwise the Service DTO
|
||||
if svc.Instance != nil {
|
||||
return Result{svc.Instance, true}
|
||||
}
|
||||
return Result{svc, true}
|
||||
}
|
||||
|
||||
if name == "" {
|
||||
return Result{E("core.Service", "service name cannot be empty", nil), false}
|
||||
}
|
||||
|
||||
if c.services.Locked() {
|
||||
return Result{E("core.Service", Concat("service \"", name, "\" not permitted — registry locked"), nil), false}
|
||||
}
|
||||
if c.services.Has(name) {
|
||||
return Result{E("core.Service", Join(" ", "service", name, "already registered"), nil), false}
|
||||
}
|
||||
|
||||
srv := &service[0]
|
||||
srv.Name = name
|
||||
return c.services.Set(name, srv)
|
||||
}
|
||||
|
||||
// RegisterService registers a service instance by name.
|
||||
// Auto-discovers Startable, Stoppable, and HandleIPCEvents interfaces
|
||||
// on the instance and wires them into the lifecycle and IPC bus.
|
||||
//
|
||||
// c.RegisterService("display", displayInstance)
|
||||
func (c *Core) RegisterService(name string, instance any) Result {
|
||||
if name == "" {
|
||||
return Result{E("core.RegisterService", "service name cannot be empty", nil), false}
|
||||
}
|
||||
|
||||
if c.services.Locked() {
|
||||
return Result{E("core.RegisterService", Concat("service \"", name, "\" not permitted — registry locked"), nil), false}
|
||||
}
|
||||
if c.services.Has(name) {
|
||||
return Result{E("core.RegisterService", Join(" ", "service", name, "already registered"), nil), false}
|
||||
}
|
||||
|
||||
srv := &Service{Name: name, Instance: instance}
|
||||
|
||||
// Auto-discover lifecycle interfaces
|
||||
if s, ok := instance.(Startable); ok {
|
||||
srv.OnStart = func() Result {
|
||||
return s.OnStartup(c.context)
|
||||
}
|
||||
}
|
||||
if s, ok := instance.(Stoppable); ok {
|
||||
srv.OnStop = func() Result {
|
||||
return s.OnShutdown(context.Background())
|
||||
}
|
||||
}
|
||||
|
||||
c.services.Set(name, srv)
|
||||
|
||||
// Auto-discover IPC handler
|
||||
if handler, ok := instance.(interface {
|
||||
HandleIPCEvents(*Core, Message) Result
|
||||
}); ok {
|
||||
c.ipc.ipcMu.Lock()
|
||||
c.ipc.ipcHandlers = append(c.ipc.ipcHandlers, handler.HandleIPCEvents)
|
||||
c.ipc.ipcMu.Unlock()
|
||||
}
|
||||
|
||||
return Result{OK: true}
|
||||
}
|
||||
|
||||
// ServiceFor retrieves a registered service by name and asserts its type.
|
||||
//
|
||||
// prep, ok := core.ServiceFor[*agentic.PrepSubsystem](c, "agentic")
|
||||
func ServiceFor[T any](c *Core, name string) (T, bool) {
|
||||
var zero T
|
||||
r := c.Service(name)
|
||||
if !r.OK {
|
||||
return zero, false
|
||||
}
|
||||
typed, ok := r.Value.(T)
|
||||
return typed, ok
|
||||
}
|
||||
|
||||
// MustServiceFor retrieves a registered service by name and asserts its type.
|
||||
// Panics if the service is not found or the type assertion fails.
|
||||
//
|
||||
// cli := core.MustServiceFor[*Cli](c, "cli")
|
||||
func MustServiceFor[T any](c *Core, name string) T {
|
||||
v, ok := ServiceFor[T](c, name)
|
||||
if !ok {
|
||||
panic(E("core.MustServiceFor", Sprintf("service %q not found or wrong type", name), nil))
|
||||
}
|
||||
return v
|
||||
}
|
||||
|
||||
// Services returns all registered service names in registration order.
|
||||
//
|
||||
// names := c.Services()
|
||||
func (c *Core) Services() []string {
|
||||
if c.services == nil {
|
||||
return nil
|
||||
}
|
||||
return c.services.Names()
|
||||
}
|
||||
157
.core/reference/string.go
Normal file
157
.core/reference/string.go
Normal file
|
|
@ -0,0 +1,157 @@
|
|||
// SPDX-License-Identifier: EUPL-1.2
|
||||
|
||||
// String operations for the Core framework.
|
||||
// Provides safe, predictable string helpers that downstream packages
|
||||
// use directly — same pattern as Array[T] for slices.
|
||||
|
||||
package core
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
"strings"
|
||||
"unicode/utf8"
|
||||
)
|
||||
|
||||
// HasPrefix returns true if s starts with prefix.
|
||||
//
|
||||
// core.HasPrefix("--verbose", "--") // true
|
||||
func HasPrefix(s, prefix string) bool {
|
||||
return strings.HasPrefix(s, prefix)
|
||||
}
|
||||
|
||||
// HasSuffix returns true if s ends with suffix.
|
||||
//
|
||||
// core.HasSuffix("test.go", ".go") // true
|
||||
func HasSuffix(s, suffix string) bool {
|
||||
return strings.HasSuffix(s, suffix)
|
||||
}
|
||||
|
||||
// TrimPrefix removes prefix from s.
|
||||
//
|
||||
// core.TrimPrefix("--verbose", "--") // "verbose"
|
||||
func TrimPrefix(s, prefix string) string {
|
||||
return strings.TrimPrefix(s, prefix)
|
||||
}
|
||||
|
||||
// TrimSuffix removes suffix from s.
|
||||
//
|
||||
// core.TrimSuffix("test.go", ".go") // "test"
|
||||
func TrimSuffix(s, suffix string) string {
|
||||
return strings.TrimSuffix(s, suffix)
|
||||
}
|
||||
|
||||
// Contains returns true if s contains substr.
|
||||
//
|
||||
// core.Contains("hello world", "world") // true
|
||||
func Contains(s, substr string) bool {
|
||||
return strings.Contains(s, substr)
|
||||
}
|
||||
|
||||
// Split splits s by separator.
|
||||
//
|
||||
// core.Split("a/b/c", "/") // ["a", "b", "c"]
|
||||
func Split(s, sep string) []string {
|
||||
return strings.Split(s, sep)
|
||||
}
|
||||
|
||||
// SplitN splits s by separator into at most n parts.
|
||||
//
|
||||
// core.SplitN("key=value=extra", "=", 2) // ["key", "value=extra"]
|
||||
func SplitN(s, sep string, n int) []string {
|
||||
return strings.SplitN(s, sep, n)
|
||||
}
|
||||
|
||||
// Join joins parts with a separator, building via Concat.
|
||||
//
|
||||
// core.Join("/", "deploy", "to", "homelab") // "deploy/to/homelab"
|
||||
// core.Join(".", "cmd", "deploy", "description") // "cmd.deploy.description"
|
||||
func Join(sep string, parts ...string) string {
|
||||
if len(parts) == 0 {
|
||||
return ""
|
||||
}
|
||||
result := parts[0]
|
||||
for _, p := range parts[1:] {
|
||||
result = Concat(result, sep, p)
|
||||
}
|
||||
return result
|
||||
}
|
||||
|
||||
// Replace replaces all occurrences of old with new in s.
|
||||
//
|
||||
// core.Replace("deploy/to/homelab", "/", ".") // "deploy.to.homelab"
|
||||
func Replace(s, old, new string) string {
|
||||
return strings.ReplaceAll(s, old, new)
|
||||
}
|
||||
|
||||
// Lower returns s in lowercase.
|
||||
//
|
||||
// core.Lower("HELLO") // "hello"
|
||||
func Lower(s string) string {
|
||||
return strings.ToLower(s)
|
||||
}
|
||||
|
||||
// Upper returns s in uppercase.
|
||||
//
|
||||
// core.Upper("hello") // "HELLO"
|
||||
func Upper(s string) string {
|
||||
return strings.ToUpper(s)
|
||||
}
|
||||
|
||||
// Trim removes leading and trailing whitespace.
|
||||
//
|
||||
// core.Trim(" hello ") // "hello"
|
||||
func Trim(s string) string {
|
||||
return strings.TrimSpace(s)
|
||||
}
|
||||
|
||||
// RuneCount returns the number of runes (unicode characters) in s.
|
||||
//
|
||||
// core.RuneCount("hello") // 5
|
||||
// core.RuneCount("🔥") // 1
|
||||
func RuneCount(s string) int {
|
||||
return utf8.RuneCountInString(s)
|
||||
}
|
||||
|
||||
// NewBuilder returns a new strings.Builder.
|
||||
//
|
||||
// b := core.NewBuilder()
|
||||
// b.WriteString("hello")
|
||||
// b.String() // "hello"
|
||||
func NewBuilder() *strings.Builder {
|
||||
return &strings.Builder{}
|
||||
}
|
||||
|
||||
// NewReader returns a strings.NewReader for the given string.
|
||||
//
|
||||
// r := core.NewReader("hello world")
|
||||
func NewReader(s string) *strings.Reader {
|
||||
return strings.NewReader(s)
|
||||
}
|
||||
|
||||
// Sprint converts any value to its string representation.
|
||||
//
|
||||
// core.Sprint(42) // "42"
|
||||
// core.Sprint(err) // "connection refused"
|
||||
func Sprint(args ...any) string {
|
||||
return fmt.Sprint(args...)
|
||||
}
|
||||
|
||||
// Sprintf formats a string with the given arguments.
|
||||
//
|
||||
// core.Sprintf("%v=%q", "key", "value") // `key="value"`
|
||||
func Sprintf(format string, args ...any) string {
|
||||
return fmt.Sprintf(format, args...)
|
||||
}
|
||||
|
||||
// Concat joins variadic string parts into one string.
|
||||
// Hook point for validation, sanitisation, and security checks.
|
||||
//
|
||||
// core.Concat("cmd.", "deploy.to.homelab", ".description")
|
||||
// core.Concat("https://", host, "/api/v1")
|
||||
func Concat(parts ...string) string {
|
||||
b := NewBuilder()
|
||||
for _, p := range parts {
|
||||
b.WriteString(p)
|
||||
}
|
||||
return b.String()
|
||||
}
|
||||
61
.core/reference/task.go
Normal file
61
.core/reference/task.go
Normal file
|
|
@ -0,0 +1,61 @@
|
|||
// SPDX-License-Identifier: EUPL-1.2
|
||||
|
||||
// Background action dispatch for the Core framework.
|
||||
// PerformAsync runs a named Action in a background goroutine with
|
||||
// panic recovery and progress broadcasting.
|
||||
|
||||
package core
|
||||
|
||||
import "context"
|
||||
|
||||
// PerformAsync dispatches a named action in a background goroutine.
|
||||
// Broadcasts ActionTaskStarted, ActionTaskProgress, and ActionTaskCompleted
|
||||
// as IPC messages so other services can track progress.
|
||||
//
|
||||
// r := c.PerformAsync("agentic.dispatch", opts)
|
||||
// taskID := r.Value.(string)
|
||||
func (c *Core) PerformAsync(action string, opts Options) Result {
|
||||
if c.shutdown.Load() {
|
||||
return Result{}
|
||||
}
|
||||
taskID := ID()
|
||||
|
||||
c.ACTION(ActionTaskStarted{TaskIdentifier: taskID, Action: action, Options: opts})
|
||||
|
||||
c.waitGroup.Go(func() {
|
||||
defer func() {
|
||||
if rec := recover(); rec != nil {
|
||||
c.ACTION(ActionTaskCompleted{
|
||||
TaskIdentifier: taskID,
|
||||
Action: action,
|
||||
Result: Result{E("core.PerformAsync", Sprint("panic: ", rec), nil), false},
|
||||
})
|
||||
}
|
||||
}()
|
||||
|
||||
r := c.Action(action).Run(context.Background(), opts)
|
||||
|
||||
c.ACTION(ActionTaskCompleted{
|
||||
TaskIdentifier: taskID,
|
||||
Action: action,
|
||||
Result: r,
|
||||
})
|
||||
})
|
||||
|
||||
return Result{taskID, true}
|
||||
}
|
||||
|
||||
// Progress broadcasts a progress update for a background task.
|
||||
//
|
||||
// c.Progress(taskID, 0.5, "halfway done", "agentic.dispatch")
|
||||
func (c *Core) Progress(taskID string, progress float64, message string, action string) {
|
||||
c.ACTION(ActionTaskProgress{
|
||||
TaskIdentifier: taskID,
|
||||
Action: action,
|
||||
Progress: progress,
|
||||
Message: message,
|
||||
})
|
||||
}
|
||||
|
||||
// Registration methods (RegisterAction, RegisterActions)
|
||||
// are in ipc.go — registration is IPC's responsibility.
|
||||
223
.core/reference/utils.go
Normal file
223
.core/reference/utils.go
Normal file
|
|
@ -0,0 +1,223 @@
|
|||
// SPDX-License-Identifier: EUPL-1.2
|
||||
|
||||
// Utility functions for the Core framework.
|
||||
// Built on core string.go primitives.
|
||||
|
||||
package core
|
||||
|
||||
import (
|
||||
crand "crypto/rand"
|
||||
"encoding/hex"
|
||||
"fmt"
|
||||
"io"
|
||||
"os"
|
||||
"strconv"
|
||||
"sync/atomic"
|
||||
)
|
||||
|
||||
// --- ID Generation ---
|
||||
|
||||
var idCounter atomic.Uint64
|
||||
|
||||
// ID returns a unique identifier. Format: "id-{counter}-{random}".
|
||||
// Counter is process-wide atomic. Random suffix prevents collision across restarts.
|
||||
//
|
||||
// id := core.ID() // "id-1-a3f2b1"
|
||||
// id2 := core.ID() // "id-2-c7e4d9"
|
||||
func ID() string {
|
||||
return Concat("id-", strconv.FormatUint(idCounter.Add(1), 10), "-", shortRand())
|
||||
}
|
||||
|
||||
func shortRand() string {
|
||||
b := make([]byte, 3)
|
||||
crand.Read(b)
|
||||
return hex.EncodeToString(b)
|
||||
}
|
||||
|
||||
// --- Validation ---
|
||||
|
||||
// ValidateName checks that a string is a valid service/action/command name.
|
||||
// Rejects empty, ".", "..", and names containing path separators.
|
||||
//
|
||||
// r := core.ValidateName("brain") // Result{"brain", true}
|
||||
// r := core.ValidateName("") // Result{error, false}
|
||||
// r := core.ValidateName("../escape") // Result{error, false}
|
||||
func ValidateName(name string) Result {
|
||||
if name == "" || name == "." || name == ".." {
|
||||
return Result{E("validate", Concat("invalid name: ", name), nil), false}
|
||||
}
|
||||
if Contains(name, "/") || Contains(name, "\\") {
|
||||
return Result{E("validate", Concat("name contains path separator: ", name), nil), false}
|
||||
}
|
||||
return Result{name, true}
|
||||
}
|
||||
|
||||
// SanitisePath extracts the base filename and rejects traversal attempts.
|
||||
// Returns "invalid" for dangerous inputs.
|
||||
//
|
||||
// core.SanitisePath("../../etc/passwd") // "passwd"
|
||||
// core.SanitisePath("") // "invalid"
|
||||
// core.SanitisePath("..") // "invalid"
|
||||
func SanitisePath(path string) string {
|
||||
safe := PathBase(path)
|
||||
if safe == "." || safe == ".." || safe == "" {
|
||||
return "invalid"
|
||||
}
|
||||
return safe
|
||||
}
|
||||
|
||||
// --- I/O ---
|
||||
|
||||
// Println prints values to stdout with a newline. Replaces fmt.Println.
|
||||
//
|
||||
// core.Println("hello", 42, true)
|
||||
func Println(args ...any) {
|
||||
fmt.Println(args...)
|
||||
}
|
||||
|
||||
// Print writes a formatted line to a writer, defaulting to os.Stdout.
|
||||
//
|
||||
// core.Print(nil, "hello %s", "world") // → stdout
|
||||
// core.Print(w, "port: %d", 8080) // → w
|
||||
func Print(w io.Writer, format string, args ...any) {
|
||||
if w == nil {
|
||||
w = os.Stdout
|
||||
}
|
||||
fmt.Fprintf(w, format+"\n", args...)
|
||||
}
|
||||
|
||||
// JoinPath joins string segments into a path with "/" separator.
|
||||
//
|
||||
// core.JoinPath("deploy", "to", "homelab") // → "deploy/to/homelab"
|
||||
func JoinPath(segments ...string) string {
|
||||
return Join("/", segments...)
|
||||
}
|
||||
|
||||
// IsFlag returns true if the argument starts with a dash.
|
||||
//
|
||||
// core.IsFlag("--verbose") // true
|
||||
// core.IsFlag("-v") // true
|
||||
// core.IsFlag("deploy") // false
|
||||
func IsFlag(arg string) bool {
|
||||
return HasPrefix(arg, "-")
|
||||
}
|
||||
|
||||
// Arg extracts a value from variadic args at the given index.
|
||||
// Type-checks and delegates to the appropriate typed extractor.
|
||||
// Returns Result — OK is false if index is out of bounds.
|
||||
//
|
||||
// r := core.Arg(0, args...)
|
||||
// if r.OK { path = r.Value.(string) }
|
||||
func Arg(index int, args ...any) Result {
|
||||
if index >= len(args) {
|
||||
return Result{}
|
||||
}
|
||||
v := args[index]
|
||||
switch v.(type) {
|
||||
case string:
|
||||
return Result{ArgString(index, args...), true}
|
||||
case int:
|
||||
return Result{ArgInt(index, args...), true}
|
||||
case bool:
|
||||
return Result{ArgBool(index, args...), true}
|
||||
default:
|
||||
return Result{v, true}
|
||||
}
|
||||
}
|
||||
|
||||
// ArgString extracts a string at the given index.
|
||||
//
|
||||
// name := core.ArgString(0, args...)
|
||||
func ArgString(index int, args ...any) string {
|
||||
if index >= len(args) {
|
||||
return ""
|
||||
}
|
||||
s, ok := args[index].(string)
|
||||
if !ok {
|
||||
return ""
|
||||
}
|
||||
return s
|
||||
}
|
||||
|
||||
// ArgInt extracts an int at the given index.
|
||||
//
|
||||
// port := core.ArgInt(1, args...)
|
||||
func ArgInt(index int, args ...any) int {
|
||||
if index >= len(args) {
|
||||
return 0
|
||||
}
|
||||
i, ok := args[index].(int)
|
||||
if !ok {
|
||||
return 0
|
||||
}
|
||||
return i
|
||||
}
|
||||
|
||||
// ArgBool extracts a bool at the given index.
|
||||
//
|
||||
// debug := core.ArgBool(2, args...)
|
||||
func ArgBool(index int, args ...any) bool {
|
||||
if index >= len(args) {
|
||||
return false
|
||||
}
|
||||
b, ok := args[index].(bool)
|
||||
if !ok {
|
||||
return false
|
||||
}
|
||||
return b
|
||||
}
|
||||
|
||||
// FilterArgs removes empty strings and Go test runner flags from an argument list.
|
||||
//
|
||||
// clean := core.FilterArgs(os.Args[1:])
|
||||
func FilterArgs(args []string) []string {
|
||||
var clean []string
|
||||
for _, a := range args {
|
||||
if a == "" || HasPrefix(a, "-test.") {
|
||||
continue
|
||||
}
|
||||
clean = append(clean, a)
|
||||
}
|
||||
return clean
|
||||
}
|
||||
|
||||
// ParseFlag parses a single flag argument into key, value, and validity.
|
||||
// Single dash (-) requires exactly 1 character (letter, emoji, unicode).
|
||||
// Double dash (--) requires 2+ characters.
|
||||
//
|
||||
// "-v" → "v", "", true
|
||||
// "-🔥" → "🔥", "", true
|
||||
// "--verbose" → "verbose", "", true
|
||||
// "--port=8080" → "port", "8080", true
|
||||
// "-verbose" → "", "", false (single dash, 2+ chars)
|
||||
// "--v" → "", "", false (double dash, 1 char)
|
||||
// "hello" → "", "", false (not a flag)
|
||||
func ParseFlag(arg string) (key, value string, valid bool) {
|
||||
if HasPrefix(arg, "--") {
|
||||
rest := TrimPrefix(arg, "--")
|
||||
parts := SplitN(rest, "=", 2)
|
||||
name := parts[0]
|
||||
if RuneCount(name) < 2 {
|
||||
return "", "", false
|
||||
}
|
||||
if len(parts) == 2 {
|
||||
return name, parts[1], true
|
||||
}
|
||||
return name, "", true
|
||||
}
|
||||
|
||||
if HasPrefix(arg, "-") {
|
||||
rest := TrimPrefix(arg, "-")
|
||||
parts := SplitN(rest, "=", 2)
|
||||
name := parts[0]
|
||||
if RuneCount(name) != 1 {
|
||||
return "", "", false
|
||||
}
|
||||
if len(parts) == 2 {
|
||||
return name, parts[1], true
|
||||
}
|
||||
return name, "", true
|
||||
}
|
||||
|
||||
return "", "", false
|
||||
}
|
||||
26
.github/workflows/ci.yml
vendored
Normal file
26
.github/workflows/ci.yml
vendored
Normal file
|
|
@ -0,0 +1,26 @@
|
|||
name: CI
|
||||
|
||||
on:
|
||||
push:
|
||||
branches: [main]
|
||||
|
||||
jobs:
|
||||
test:
|
||||
runs-on: ubuntu-latest
|
||||
steps:
|
||||
- uses: actions/checkout@v4
|
||||
|
||||
- uses: actions/setup-go@v5
|
||||
with:
|
||||
go-version-file: go.mod
|
||||
|
||||
- name: Run tests with coverage
|
||||
run: |
|
||||
go test -coverprofile=coverage.out ./pkg/brain/... ./pkg/monitor/... ./pkg/agentic/...
|
||||
sed -i 's|dappco.re/go/agent/||g' coverage.out
|
||||
|
||||
- name: Upload to Codecov
|
||||
uses: codecov/codecov-action@v5
|
||||
with:
|
||||
files: coverage.out
|
||||
token: ${{ secrets.CODECOV_TOKEN }}
|
||||
11
.gitignore
vendored
11
.gitignore
vendored
|
|
@ -1 +1,12 @@
|
|||
.idea/
|
||||
.vscode/
|
||||
*.log
|
||||
.core/*
|
||||
!.core/docs/
|
||||
!.core/docs/**
|
||||
!.core/reference/
|
||||
!.core/reference/**
|
||||
!.core/workspace.yaml
|
||||
node_modules/
|
||||
bin/
|
||||
dist/
|
||||
|
|
|
|||
9
.mcp.json
Normal file
9
.mcp.json
Normal file
|
|
@ -0,0 +1,9 @@
|
|||
{
|
||||
"mcpServers": {
|
||||
"core": {
|
||||
"type": "stdio",
|
||||
"command": "core-agent",
|
||||
"args": ["mcp"]
|
||||
}
|
||||
}
|
||||
}
|
||||
130
AGENTS.md
Normal file
130
AGENTS.md
Normal file
|
|
@ -0,0 +1,130 @@
|
|||
# AGENTS.md — Universal Agent Instructions
|
||||
|
||||
> For all AI agents working on this repository (Codex, Claude, Gemini, etc).
|
||||
> See also: `llm.txt` for entry points, RFC-025 for design conventions.
|
||||
|
||||
## Overview
|
||||
|
||||
**core-agent** is the AI agent orchestration platform for the Core ecosystem. Single Go binary that runs as an MCP server — stdio (IDE integration) or HTTP daemon (cross-agent communication).
|
||||
|
||||
**Module:** `dappco.re/go/agent`
|
||||
|
||||
## Build & Test
|
||||
|
||||
```bash
|
||||
go build ./... # Build all packages
|
||||
go build ./cmd/core-agent/ # Build the binary
|
||||
go test ./... -count=1 -timeout 60s # Run all tests (840+)
|
||||
go vet ./... # Vet
|
||||
```
|
||||
|
||||
## Architecture
|
||||
|
||||
```
|
||||
cmd/core-agent/main.go Entry point (97 lines — core.New + services + Run)
|
||||
pkg/agentic/ Agent orchestration: dispatch, prep, verify, scan, review
|
||||
pkg/brain/ OpenBrain memory integration
|
||||
pkg/lib/ Embedded templates, personas, flows, workspace scaffolds
|
||||
pkg/messages/ Typed IPC message definitions (12 message types)
|
||||
pkg/monitor/ Agent monitoring, notifications, completion tracking
|
||||
pkg/setup/ Workspace detection and scaffolding
|
||||
```
|
||||
|
||||
### Service Registration
|
||||
|
||||
```go
|
||||
c := core.New(
|
||||
core.WithOption("name", "core-agent"),
|
||||
core.WithService(agentic.ProcessRegister),
|
||||
core.WithService(agentic.Register),
|
||||
core.WithService(monitor.Register),
|
||||
core.WithService(brain.Register),
|
||||
core.WithService(mcp.Register),
|
||||
)
|
||||
c.Run()
|
||||
```
|
||||
|
||||
### Dispatch Flow
|
||||
|
||||
```
|
||||
dispatch → prep workspace → spawn agent in Docker container
|
||||
→ agent works → completion detected via proc.Done()
|
||||
→ detectFinalStatus (completed/blocked/failed)
|
||||
→ onAgentComplete (save output, track rate limits, emit IPC)
|
||||
→ IPC pipeline: AgentCompleted → QA → AutoPR → Verify → Merge
|
||||
```
|
||||
|
||||
## Coding Standards
|
||||
|
||||
- **UK English**: colour, organisation, centre, initialise
|
||||
- **Errors**: `core.E("pkg.Method", "message", err)` — NEVER `fmt.Errorf`
|
||||
- **File I/O**: Package-level `fs` (go-io Medium) — NEVER `os.ReadFile/WriteFile`
|
||||
- **Processes**: `s.Core().Process()` / go-process — NEVER `os/exec` directly
|
||||
- **Strings**: `core.Contains/Split/Trim/HasPrefix/Sprintf` — NEVER `strings.*`
|
||||
- **Returns**: `core.Result{Value, OK}` — NEVER `(value, error)` pairs
|
||||
- **Comments**: Usage examples showing HOW with real values, not descriptions
|
||||
- **Names**: Predictable, unabbreviated (Config not Cfg, Service not Srv)
|
||||
- **Commits**: `type(scope): description` with `Co-Authored-By: Virgil <virgil@lethean.io>`
|
||||
- **Licence**: EUPL-1.2 — `// SPDX-License-Identifier: EUPL-1.2` on every file
|
||||
|
||||
## Testing Convention (AX-7)
|
||||
|
||||
Every function gets three test categories:
|
||||
|
||||
```
|
||||
TestFilename_FunctionName_Good — happy path, proves contract works
|
||||
TestFilename_FunctionName_Bad — expected errors, proves error handling
|
||||
TestFilename_FunctionName_Ugly — edge cases, panics, corruption
|
||||
```
|
||||
|
||||
One test file per source file. No catch-all files. Names must sort cleanly.
|
||||
|
||||
### Current Coverage
|
||||
|
||||
- 840 tests, 79.9% statement coverage
|
||||
- 92% AX-7 (Good/Bad/Ugly) category coverage
|
||||
|
||||
## Process Execution
|
||||
|
||||
All external commands go through `s.Core().Process()` → go-process:
|
||||
|
||||
```go
|
||||
process := s.Core().Process()
|
||||
out := process.RunIn(ctx, dir, "git", "log", "--oneline")
|
||||
ok := process.RunIn(ctx, dir, "git", "fetch", "origin", "main").OK
|
||||
branchResult := process.RunIn(ctx, dir, "git", "rev-parse", "--abbrev-ref", "HEAD")
|
||||
branch := core.Trim(branchResult.Value.(string))
|
||||
```
|
||||
|
||||
**NEVER import `os/exec`.** Zero source files do.
|
||||
|
||||
## MCP Tools (33)
|
||||
|
||||
| Category | Tools |
|
||||
|----------|-------|
|
||||
| Dispatch | `agentic_dispatch`, `agentic_dispatch_remote`, `agentic_status`, `agentic_status_remote` |
|
||||
| Workspace | `agentic_prep_workspace`, `agentic_resume`, `agentic_watch` |
|
||||
| PR/Review | `agentic_create_pr`, `agentic_list_prs`, `agentic_create_epic`, `agentic_review_queue` |
|
||||
| Mirror | `agentic_mirror` (Forge → GitHub sync) |
|
||||
| Scan | `agentic_scan` (Forge issues) |
|
||||
| Brain | `brain_recall`, `brain_remember`, `brain_forget` |
|
||||
| Messaging | `agent_send`, `agent_inbox`, `agent_conversation` |
|
||||
| Plans | `agentic_plan_create/read/update/delete/list` |
|
||||
| Files | `file_read/write/edit/delete/rename/exists`, `dir_list/create` |
|
||||
| Language | `lang_detect`, `lang_list` |
|
||||
|
||||
## Key Paths
|
||||
|
||||
| Function | Returns |
|
||||
|----------|---------|
|
||||
| `WorkspaceRoot()` | `$CORE_WORKSPACE/workspace` or `~/Code/.core/workspace` |
|
||||
| `CoreRoot()` | `$CORE_WORKSPACE` or `~/Code/.core` |
|
||||
| `PlansRoot()` | `$CORE_WORKSPACE/plans` |
|
||||
| `AgentName()` | `$AGENT_NAME` or hostname-based detection |
|
||||
| `GitHubOrg()` | `$GITHUB_ORG` or `dAppCore` |
|
||||
|
||||
## Branch Strategy
|
||||
|
||||
- Work on `dev` branch, never push to `main` directly
|
||||
- PRs required for `main`
|
||||
- Use `go get -u ./...` for dependency updates, never manual go.mod edits
|
||||
243
CLAUDE.md
243
CLAUDE.md
|
|
@ -1,163 +1,166 @@
|
|||
# CLAUDE.md
|
||||
|
||||
This file provides guidance to Claude Code (claude.ai/code) when working with code in this repository.
|
||||
This file provides guidance to Claude Code when working with code in this repository.
|
||||
|
||||
## Session Context
|
||||
|
||||
Running on **Claude Max20 plan** with **1M context window** (Opus 4.6).
|
||||
|
||||
## Overview
|
||||
|
||||
**core-agent** is a polyglot monorepo (Go + PHP) for AI agent orchestration. The Go side handles agent-side execution, CLI commands, and autonomous agent loops. The PHP side (Laravel package `lthn/agent`) provides the backend API, persistent storage, multi-provider AI services, and admin panel. They communicate via REST API.
|
||||
**core-agent** is the AI agent orchestration platform for the Core ecosystem. Single Go binary (`core-agent`) that runs as an MCP server — either via stdio (Claude Code integration) or HTTP daemon (cross-agent communication).
|
||||
|
||||
The repo also contains Claude Code plugins (5), Codex plugins (13), a Gemini CLI extension, and two MCP servers.
|
||||
**Module:** `dappco.re/go/agent`
|
||||
|
||||
## Core CLI — Always Use It
|
||||
|
||||
**Never use raw `go`, `php`, or `composer` commands.** The `core` CLI wraps both toolchains and is enforced by PreToolUse hooks that will block violations.
|
||||
|
||||
| Instead of... | Use... |
|
||||
|---------------|--------|
|
||||
| `go test` | `core go test` |
|
||||
| `go build` | `core build` |
|
||||
| `go fmt` | `core go fmt` |
|
||||
| `go vet` | `core go vet` |
|
||||
| `golangci-lint` | `core go lint` |
|
||||
| `composer test` / `./vendor/bin/pest` | `core php test` |
|
||||
| `./vendor/bin/pint` / `composer lint` | `core php fmt` |
|
||||
| `./vendor/bin/phpstan` | `core php stan` |
|
||||
| `php artisan serve` | `core php dev` |
|
||||
|
||||
## Build & Test Commands
|
||||
## Build & Test
|
||||
|
||||
```bash
|
||||
# Go
|
||||
core go test # Run all Go tests
|
||||
core go test --run TestMemoryRegistry_Register_Good # Run single test
|
||||
core go qa # Full QA: fmt + vet + lint + test
|
||||
core go qa full # QA + race detector + vuln scan
|
||||
core go cov # Test coverage
|
||||
core build # Verify Go packages compile
|
||||
go build ./... # Build all packages
|
||||
go build ./cmd/core-agent/ # Build the binary
|
||||
go test ./... -count=1 -timeout 60s # Run tests
|
||||
go vet ./... # Vet
|
||||
go install ./cmd/core-agent/ # Install to $GOPATH/bin
|
||||
```
|
||||
|
||||
# PHP
|
||||
core php test # Run Pest suite
|
||||
core php test --filter=AgenticManagerTest # Run specific test file
|
||||
core php fmt # Format (Laravel Pint)
|
||||
core php stan # Static analysis (PHPStan)
|
||||
core php qa # Full PHP QA pipeline
|
||||
|
||||
# MCP servers (standalone builds)
|
||||
cd cmd/mcp && go build -o agent-mcp . # Stdio MCP server
|
||||
cd google/mcp && go build -o google-mcp . # HTTP MCP server (port 8080)
|
||||
|
||||
# Workspace
|
||||
make setup # Full bootstrap (deps + core + clone repos)
|
||||
core dev health # Status across repos
|
||||
Cross-compile for Charon (Linux):
|
||||
```bash
|
||||
GOOS=linux GOARCH=amd64 CGO_ENABLED=0 go build -o core-agent-linux ./cmd/core-agent/
|
||||
```
|
||||
|
||||
## Architecture
|
||||
|
||||
```
|
||||
Forgejo
|
||||
|
|
||||
[ForgejoSource polls]
|
||||
|
|
||||
v
|
||||
+-- Go: jobrunner Poller --+ +-- PHP: Laravel Backend --+
|
||||
| ForgejoSource | | AgentApiController |
|
||||
| DispatchHandler ---------|----->| /v1/plans |
|
||||
| CompletionHandler | | /v1/sessions |
|
||||
| ResolveThreadsHandler | | /v1/plans/*/phases |
|
||||
+--------------------------+ +-------------+------------+
|
||||
|
|
||||
[Eloquent models]
|
||||
AgentPlan, AgentPhase,
|
||||
AgentSession, BrainMemory
|
||||
cmd/core-agent/main.go Entry point (mcp + serve commands)
|
||||
pkg/agentic/ MCP tools — dispatch, verify, remote, mirror, review queue
|
||||
pkg/brain/ OpenBrain — recall, remember, messaging
|
||||
pkg/monitor/ Background monitoring + repo sync
|
||||
pkg/prompts/ Embedded templates + personas (go:embed)
|
||||
```
|
||||
|
||||
### Go Packages (`pkg/`)
|
||||
### Binary Modes
|
||||
|
||||
- **`lifecycle/`** — Core domain layer. Task, AgentInfo, Plan, Phase, Session types. Agent registry (Memory/SQLite/Redis backends), task router (capability matching + load scoring), allowance system (quota enforcement), dispatcher (orchestrates dispatch with exponential backoff), event system, brain (vector store), context (git integration).
|
||||
- **`loop/`** — Autonomous agent reasoning engine. Prompt-parse-execute cycle against any `inference.TextModel` with tool calling and streaming.
|
||||
- **`orchestrator/`** — Clotho protocol for dual-run verification and agent orchestration.
|
||||
- **`jobrunner/`** — Poll-dispatch engine for agent-side work execution. Polls Forgejo for work items, executes phases, reports results.
|
||||
- `core-agent mcp` — stdio MCP server for Claude Code
|
||||
- `core-agent serve` — HTTP daemon (Charon, CI, cross-agent). PID file, health check, registry.
|
||||
|
||||
### Go Commands (`cmd/`)
|
||||
### MCP Tools (33)
|
||||
|
||||
- **`tasks/`** — `core ai tasks`, `core ai task [id]` — task management
|
||||
- **`agent/`** — `core ai agent` — agent machine management (add, list, status, fleet)
|
||||
- **`dispatch/`** — `core ai dispatch` — work queue processor (watch, run)
|
||||
- **`workspace/`** — `core workspace task`, `core workspace agent` — git worktree isolation
|
||||
- **`mcp/`** — Standalone stdio MCP server exposing `marketplace_list`, `marketplace_plugin_info`, `core_cli`, `ethics_check`
|
||||
| Category | Tools |
|
||||
|----------|-------|
|
||||
| Dispatch | `agentic_dispatch`, `agentic_dispatch_remote`, `agentic_status`, `agentic_status_remote` |
|
||||
| Workspace | `agentic_prep_workspace`, `agentic_resume`, `agentic_watch` |
|
||||
| PR/Review | `agentic_create_pr`, `agentic_list_prs`, `agentic_create_epic`, `agentic_review_queue` |
|
||||
| Mirror | `agentic_mirror` (Forge → GitHub sync) |
|
||||
| Scan | `agentic_scan` (Forge issues) |
|
||||
| Brain | `brain_recall`, `brain_remember`, `brain_forget` |
|
||||
| Messaging | `agent_send`, `agent_inbox`, `agent_conversation` |
|
||||
| Plans | `agentic_plan_create`, `agentic_plan_read`, `agentic_plan_update`, `agentic_plan_delete`, `agentic_plan_list` |
|
||||
| Files | `file_read`, `file_write`, `file_edit`, `file_delete`, `file_rename`, `file_exists`, `dir_list`, `dir_create` |
|
||||
| Language | `lang_detect`, `lang_list` |
|
||||
|
||||
### PHP (`src/php/`)
|
||||
### Agent Types
|
||||
|
||||
- **Namespace**: `Core\Mod\Agentic\` (service provider: `Boot`)
|
||||
- **Models/** — 19 Eloquent models (AgentPlan, AgentPhase, AgentSession, BrainMemory, Task, Prompt, etc.)
|
||||
- **Services/** — AgenticManager (multi-provider: Claude/Gemini/OpenAI), BrainService (Ollama+Qdrant), ForgejoService, AI services with stream parsing and retry traits
|
||||
- **Controllers/** — AgentApiController (REST endpoints)
|
||||
- **Actions/** — Single-purpose action classes (Brain, Forge, Phase, Plan, Session, Task)
|
||||
- **View/** — Livewire admin panel components (Dashboard, Plans, Sessions, ApiKeys, Templates, Playground, etc.)
|
||||
- **Mcp/** — MCP tool implementations (Brain, Content, Phase, Plan, Session, State, Task, Template)
|
||||
- **Migrations/** — 10 migrations (run automatically on boot)
|
||||
| Agent | Command | Use |
|
||||
|-------|---------|-----|
|
||||
| `claude:opus` | Claude Code | Complex coding, architecture |
|
||||
| `claude:sonnet` | Claude Code | Standard tasks |
|
||||
| `claude:haiku` | Claude Code | Quick/cheap tasks, discovery |
|
||||
| `gemini` | Gemini CLI | Fast batch ops |
|
||||
| `codex` | Codex CLI | Autonomous coding |
|
||||
| `codex:review` | Codex review | Deep security analysis |
|
||||
| `coderabbit` | CodeRabbit CLI | Code quality review |
|
||||
|
||||
## Claude Code Plugins (`claude/`)
|
||||
### Dispatch Flow
|
||||
|
||||
Five plugins installable individually or via marketplace:
|
||||
```
|
||||
dispatch → agent works → closeout sequence (review → fix → simplify → re-review)
|
||||
→ commit → auto PR → inline tests → pass → auto-merge on Forge
|
||||
→ push to GitHub → CodeRabbit reviews → merge or dispatch fix agent
|
||||
```
|
||||
|
||||
| Plugin | Commands |
|
||||
|--------|----------|
|
||||
| **code** | `/code:remember`, `/code:yes`, `/code:qa` |
|
||||
| **review** | `/review:review`, `/review:security`, `/review:pr`, `/review:pipeline` |
|
||||
| **verify** | `/verify:verify`, `/verify:ready`, `/verify:tests` |
|
||||
| **qa** | `/qa:qa`, `/qa:fix`, `/qa:check`, `/qa:lint` |
|
||||
| **ci** | `/ci:ci`, `/ci:workflow`, `/ci:fix`, `/ci:run`, `/ci:status` |
|
||||
### Personas (pkg/prompts/lib/personas/)
|
||||
|
||||
### Hooks (code plugin)
|
||||
116 personas across 16 domains. Path = context, filename = lens.
|
||||
|
||||
**PreToolUse**: `prefer-core.sh` blocks destructive operations (`rm -rf`, `sed -i`, `xargs rm`, `find -exec rm`, `grep -l | ...`, `mv/cp *`) and raw go/php commands. `block-docs.sh` prevents random `.md` file creation.
|
||||
```
|
||||
prompts.Persona("engineering/security-developer") # code-level security review
|
||||
prompts.Persona("smm/security-secops") # social media incident response
|
||||
prompts.Persona("devops/senior") # infrastructure architecture
|
||||
```
|
||||
|
||||
**PostToolUse**: Auto-formats Go (`gofmt`) and PHP (`pint`) after edits. Warns about debug statements (`dd()`, `dump()`, `fmt.Println()`).
|
||||
### Templates (pkg/prompts/lib/templates/)
|
||||
|
||||
**PreCompact**: Saves session state. **SessionStart**: Restores session context.
|
||||
Prompt templates for different task types: `coding`, `conventions`, `security`, `verify`, plus YAML plan templates (`bug-fix`, `code-review`, `new-feature`, `refactor`, etc.)
|
||||
|
||||
## Other Directories
|
||||
## Key Patterns
|
||||
|
||||
- **`codex/`** — 13 Codex plugins mirroring Claude structure plus ethics, guardrails, perf, issue, coolify, awareness
|
||||
- **`agents/`** — 13 specialist agent categories (design, engineering, marketing, product, testing, etc.) with example configs and system prompts
|
||||
- **`google/gemini-cli/`** — Gemini CLI extension (TypeScript, `npm run build`)
|
||||
- **`google/mcp/`** — HTTP MCP server exposing `core_go_test`, `core_dev_health`, `core_dev_commit`
|
||||
- **`docs/`** — `architecture.md` (deep dive), `development.md` (comprehensive dev guide), `docs/plans/` (design documents)
|
||||
- **`scripts/`** — Environment setup scripts (`install-core.sh`, `install-deps.sh`, `agent-runner.sh`, etc.)
|
||||
### Shared Paths (pkg/agentic/paths.go)
|
||||
|
||||
All paths use `CORE_WORKSPACE` env var, fallback `~/Code/.core`:
|
||||
- `WorkspaceRoot()` — agent workspaces
|
||||
- `CoreRoot()` — ecosystem config
|
||||
- `PlansRoot()` — agent plans
|
||||
- `AgentName()` — `AGENT_NAME` env or hostname detection
|
||||
- `GitHubOrg()` — `GITHUB_ORG` env or "dAppCore"
|
||||
|
||||
### Error Handling
|
||||
|
||||
`coreerr.E("pkg.Method", "message", err)` from go-log. Always 3 args. NEVER `fmt.Errorf`.
|
||||
|
||||
### File I/O
|
||||
|
||||
`coreio.Local.Read/Write/EnsureDir` from go-io. `WriteMode(path, content, 0600)` for sensitive files. NEVER `os.ReadFile/WriteFile`.
|
||||
|
||||
### HTTP Responses
|
||||
|
||||
Always check `err != nil` BEFORE accessing `resp.StatusCode`. Split into two checks.
|
||||
|
||||
## Plugin (claude/core/)
|
||||
|
||||
The Claude Code plugin provides:
|
||||
- **MCP server** via `mcp.json` (auto-registers core-agent)
|
||||
- **Hooks** via `hooks.json` (PostToolUse inbox notifications, auto-format, debug warnings)
|
||||
- **Agents**: `agent-task-code-review`, `agent-task-code-simplifier`
|
||||
- **Commands**: dispatch, status, review, recall, remember, scan, etc.
|
||||
- **Skills**: security review, architecture review, test analysis, etc.
|
||||
|
||||
## Testing Conventions
|
||||
|
||||
### Go
|
||||
|
||||
Uses `testify/assert` and `testify/require`. Name tests with suffixes:
|
||||
- `_Good` — happy path
|
||||
- `_Bad` — expected error conditions
|
||||
- `_Ugly` — panics and edge cases
|
||||
- Use `testify/assert` + `testify/require`
|
||||
|
||||
Use `require` for preconditions (stops on failure), `assert` for verifications (reports all failures).
|
||||
## Sprint Intel Collection
|
||||
|
||||
### PHP
|
||||
Before starting significant work on any repo, build a blueprint by querying three sources in parallel:
|
||||
|
||||
Pest with Orchestra Testbench. Feature tests use `RefreshDatabase`. Helpers: `createWorkspace()`, `createApiKey($workspace, ...)`.
|
||||
1. **OpenBrain**: `brain_recall` with `"{repo} plans features ideas architecture"` — returns bugs, patterns, conventions, session milestones
|
||||
2. **Active plans**: `agentic_plan_list` — structured plans with phases, status, acceptance criteria
|
||||
3. **Local docs**: glob `docs/plans/**` in the repo — design docs, migration plans, pipeline docs
|
||||
|
||||
Combine into a sprint blueprint with sections: Known Bugs, Active Plans, Local Docs, Recent Fixes, Architecture Notes.
|
||||
|
||||
### Active Plan: Pipeline Orchestration (draft)
|
||||
|
||||
Plans drive the entire dispatch→verify→merge flow:
|
||||
|
||||
1. **Plans API** — local JSON → CorePHP Laravel endpoints
|
||||
2. **Plan ↔ Dispatch** — auto-advance phases, auto-create Forge issues on BLOCKED
|
||||
3. **Task minting** — `/v1/plans/next` serves highest-priority ready phase
|
||||
4. **Exception pipeline** — BLOCKED → Forge issues automatically
|
||||
5. **GitHub quality gate** — verified → squash release, CodeRabbit 0-findings
|
||||
6. **Pipeline dashboard** — admin UI with status badges
|
||||
|
||||
### Known Gotchas (OpenBrain)
|
||||
|
||||
- Workspace prep: PROMPT.md requires TODO.md but workspace may not have one — dispatch bug
|
||||
- `core.Env("DIR_HOME")` is static at init. Use `CORE_HOME` for test overrides
|
||||
- `pkg/brain` recall/list are async bridge proxies — empty responses are intentional
|
||||
- Monitor path helpers need separator normalisation for cross-platform API/glob output
|
||||
|
||||
## Coding Standards
|
||||
|
||||
- **UK English**: colour, organisation, centre, licence, behaviour
|
||||
- **Go**: standard `gofmt`, errors via `core.E("scope.Method", "what failed", err)`
|
||||
- **PHP**: `declare(strict_types=1)`, full type hints, PSR-12 via Pint, Pest syntax for tests
|
||||
- **Shell**: `#!/bin/bash`, JSON input via `jq`, output `{"decision": "approve"|"block", "message": "..."}`
|
||||
- **Commits**: conventional — `type(scope): description` (e.g. `feat(lifecycle): add exponential backoff`)
|
||||
- **Licence**: EUPL-1.2 CIC
|
||||
|
||||
## Prerequisites
|
||||
|
||||
| Tool | Version | Purpose |
|
||||
|------|---------|---------|
|
||||
| Go | 1.26+ | Go packages, CLI, MCP servers |
|
||||
| PHP | 8.2+ | Laravel package, Pest tests |
|
||||
| Composer | 2.x | PHP dependencies |
|
||||
| `core` CLI | latest | Wraps Go/PHP toolchains (enforced by hooks) |
|
||||
| `jq` | any | JSON parsing in shell hooks |
|
||||
|
||||
Go module is `forge.lthn.ai/core/agent`, participates in a Go workspace (`go.work`) resolving all `forge.lthn.ai/core/*` dependencies locally.
|
||||
- **UK English**: colour, organisation, centre, initialise
|
||||
- **Commits**: `type(scope): description` with `Co-Authored-By: Virgil <virgil@lethean.io>`
|
||||
- **Licence**: EUPL-1.2
|
||||
- **SPDX**: `// SPDX-License-Identifier: EUPL-1.2` on every file
|
||||
|
|
|
|||
86
CODEX.md
Normal file
86
CODEX.md
Normal file
|
|
@ -0,0 +1,86 @@
|
|||
# CODEX.md
|
||||
|
||||
Instructions for Codex when working in `dappco.re/go/agent`.
|
||||
|
||||
Read these files in order:
|
||||
1. `CODEX.md`
|
||||
2. `.core/reference/RFC-025-AGENT-EXPERIENCE.md`
|
||||
3. `.core/reference/docs/RFC.md`
|
||||
4. `AGENTS.md`
|
||||
|
||||
## Overview
|
||||
|
||||
This repo is the Core ecosystem's agent orchestration service. It is AX-first: predictable names, named Actions, Core primitives, and behaviour-driven tests matter more than terse APIs.
|
||||
|
||||
## Build And Test
|
||||
|
||||
```bash
|
||||
go build ./...
|
||||
go build ./cmd/core-agent/
|
||||
go test ./... -count=1 -timeout 60s
|
||||
go vet ./...
|
||||
```
|
||||
|
||||
## Core Registration Pattern
|
||||
|
||||
Register services through `core.New` and `WithService`, not ad hoc globals.
|
||||
|
||||
```go
|
||||
c := core.New(
|
||||
core.WithOption("name", "core-agent"),
|
||||
core.WithService(agentic.ProcessRegister),
|
||||
core.WithService(agentic.Register),
|
||||
core.WithService(runner.Register),
|
||||
core.WithService(monitor.Register),
|
||||
core.WithService(brain.Register),
|
||||
)
|
||||
c.Run()
|
||||
```
|
||||
|
||||
## Mandatory Conventions
|
||||
|
||||
- Use UK English in comments and docs.
|
||||
- Use `core.E("pkg.Method", "message", err)` for errors. Never use `fmt.Errorf` or `errors.New`.
|
||||
- Use Core filesystem helpers or package-level `fs`. Never use raw `os.ReadFile`, `os.WriteFile`, or `filepath.*`.
|
||||
- Route external commands through `s.Core().Process()`. Never import `os/exec`.
|
||||
- Use Core string helpers such as `core.Contains`, `core.Trim`, and `core.Split` instead of `strings.*`.
|
||||
- Prefer `core.Result{Value: x, OK: true}` over `(value, error)` return pairs in Core-facing code.
|
||||
- Comments should show real usage examples, not restate the signature.
|
||||
- Prefer predictable names such as `Config`, `Service`, and `Options`; avoid abbreviations.
|
||||
- Add `// SPDX-License-Identifier: EUPL-1.2` to Go source files.
|
||||
|
||||
## AX Quality Gates
|
||||
|
||||
Treat these imports as review failures in non-test Go code:
|
||||
|
||||
- `os`
|
||||
- `os/exec`
|
||||
- `fmt`
|
||||
- `log`
|
||||
- `errors`
|
||||
- `encoding/json`
|
||||
- `path/filepath`
|
||||
- `strings`
|
||||
- `unsafe`
|
||||
|
||||
Use the Core primitive or the repo helper instead.
|
||||
|
||||
## Testing
|
||||
|
||||
Use AX test naming:
|
||||
|
||||
```text
|
||||
TestFile_Function_Good
|
||||
TestFile_Function_Bad
|
||||
TestFile_Function_Ugly
|
||||
```
|
||||
|
||||
One source file should have its own focused test file and example file where practical. The test suite is the behavioural spec.
|
||||
|
||||
## Commits
|
||||
|
||||
Use `type(scope): description` and include:
|
||||
|
||||
```text
|
||||
Co-Authored-By: Virgil <virgil@lethean.io>
|
||||
```
|
||||
66
Makefile
66
Makefile
|
|
@ -1,50 +1,36 @@
|
|||
# Host UK Developer Workspace
|
||||
# Run `make setup` to bootstrap your environment
|
||||
|
||||
CORE_REPO := github.com/host-uk/core
|
||||
CORE_VERSION := latest
|
||||
INSTALL_DIR := $(HOME)/.local/bin
|
||||
# ── core-agent binary ──────────────────────────────────
|
||||
|
||||
.PHONY: all setup install-deps install-go install-core doctor clean help
|
||||
BINARY_NAME=core-agent
|
||||
CMD_PATH=./cmd/core-agent
|
||||
MODULE_PATH=dappco.re/go/agent
|
||||
|
||||
all: help
|
||||
# Default LDFLAGS to empty
|
||||
LDFLAGS = ""
|
||||
|
||||
help:
|
||||
@echo "Host UK Developer Workspace"
|
||||
@echo ""
|
||||
@echo "Usage:"
|
||||
@echo " make setup Full setup (deps + core + clone repos)"
|
||||
@echo " make install-deps Install system dependencies (go, gh, etc)"
|
||||
@echo " make install-core Build and install core CLI"
|
||||
@echo " make doctor Check environment health"
|
||||
@echo " make clone Clone all repos into packages/"
|
||||
@echo " make clean Remove built artifacts"
|
||||
@echo ""
|
||||
@echo "Quick start:"
|
||||
@echo " make setup"
|
||||
# If VERSION is set, inject into binary
|
||||
ifdef VERSION
|
||||
LDFLAGS = -ldflags "-X '$(MODULE_PATH).Version=$(VERSION)'"
|
||||
endif
|
||||
|
||||
setup: install-deps install-core doctor clone
|
||||
@echo ""
|
||||
@echo "Setup complete! Run 'core health' to verify."
|
||||
.PHONY: build install agent-dev test coverage
|
||||
|
||||
install-deps:
|
||||
@echo "Installing dependencies..."
|
||||
@./scripts/install-deps.sh
|
||||
build:
|
||||
@echo "Building $(BINARY_NAME)..."
|
||||
@go build $(LDFLAGS) -o $(BINARY_NAME) $(CMD_PATH)
|
||||
|
||||
install-go:
|
||||
@echo "Installing Go..."
|
||||
@./scripts/install-go.sh
|
||||
install:
|
||||
@echo "Installing $(BINARY_NAME)..."
|
||||
@go install $(LDFLAGS) $(CMD_PATH)
|
||||
|
||||
install-core:
|
||||
@echo "Installing core CLI..."
|
||||
@./scripts/install-core.sh
|
||||
agent-dev: build
|
||||
@./$(BINARY_NAME) version
|
||||
|
||||
doctor:
|
||||
@core doctor || echo "Run 'make install-core' first if core is not found"
|
||||
test:
|
||||
@echo "Running tests..."
|
||||
@go test ./...
|
||||
|
||||
clone:
|
||||
@core setup || echo "Run 'make install-core' first if core is not found"
|
||||
|
||||
clean:
|
||||
@rm -rf ./build
|
||||
@echo "Cleaned build artifacts"
|
||||
coverage:
|
||||
@echo "Generating coverage report..."
|
||||
@go test -coverprofile=coverage.out ./...
|
||||
@echo "Coverage: coverage.out"
|
||||
|
|
|
|||
|
|
@ -1,48 +0,0 @@
|
|||
# Examples
|
||||
|
||||
This directory contains example outputs demonstrating how the agency's agents can be orchestrated together to tackle real-world tasks.
|
||||
|
||||
## Why This Exists
|
||||
|
||||
The agency-agents repo defines dozens of specialized agents across engineering, design, marketing, product, support, spatial computing, and project management. But agent definitions alone don't show what happens when you **deploy them all at once** on a single mission.
|
||||
|
||||
These examples answer the question: *"What does it actually look like when the full agency collaborates?"*
|
||||
|
||||
## Contents
|
||||
|
||||
### [nexus-spatial-discovery.md](./nexus-spatial-discovery.md)
|
||||
|
||||
**What:** A complete product discovery exercise where 8 agents worked in parallel to evaluate a software opportunity and produce a unified plan.
|
||||
|
||||
**The scenario:** Web research identified an opportunity at the intersection of AI agent orchestration and spatial computing. The entire agency was then deployed simultaneously to produce:
|
||||
|
||||
- Market validation and competitive analysis
|
||||
- Technical architecture (8-service system design with full SQL schema)
|
||||
- Brand strategy and visual identity
|
||||
- Go-to-market and growth plan
|
||||
- Customer support operations blueprint
|
||||
- UX research plan with personas and journey maps
|
||||
- 35-week project execution plan with 65 sprint tickets
|
||||
- Spatial interface architecture specification
|
||||
|
||||
**Agents used:**
|
||||
| Agent | Role |
|
||||
|-------|------|
|
||||
| Product Trend Researcher | Market validation, competitive landscape |
|
||||
| Backend Architect | System architecture, data model, API design |
|
||||
| Brand Guardian | Positioning, visual identity, naming |
|
||||
| Growth Hacker | GTM strategy, pricing, launch plan |
|
||||
| Support Responder | Support tiers, onboarding, community |
|
||||
| UX Researcher | Personas, journey maps, design principles |
|
||||
| Project Shepherd | Phase plan, sprints, risk register |
|
||||
| XR Interface Architect | Spatial UI specification |
|
||||
|
||||
**Key takeaway:** All 8 agents ran in parallel and produced coherent, cross-referencing plans without coordination overhead. The output demonstrates the agency's ability to go from "find an opportunity" to "here's the full blueprint" in a single session.
|
||||
|
||||
## Adding New Examples
|
||||
|
||||
If you run an interesting multi-agent exercise, consider adding it here. Good examples show:
|
||||
|
||||
- Multiple agents collaborating on a shared objective
|
||||
- The breadth of the agency's capabilities
|
||||
- Real-world applicability of the agent definitions
|
||||
|
|
@ -1,852 +0,0 @@
|
|||
# Nexus Spatial: Full Agency Discovery Exercise
|
||||
|
||||
> **Exercise type:** Multi-agent product discovery
|
||||
> **Date:** March 5, 2026
|
||||
> **Agents deployed:** 8 (in parallel)
|
||||
> **Duration:** ~10 minutes wall-clock time
|
||||
> **Purpose:** Demonstrate full-agency orchestration from opportunity identification through comprehensive planning
|
||||
|
||||
---
|
||||
|
||||
## Table of Contents
|
||||
|
||||
1. [The Opportunity](#1-the-opportunity)
|
||||
2. [Market Validation](#2-market-validation)
|
||||
3. [Technical Architecture](#3-technical-architecture)
|
||||
4. [Brand Strategy](#4-brand-strategy)
|
||||
5. [Go-to-Market & Growth](#5-go-to-market--growth)
|
||||
6. [Customer Support Blueprint](#6-customer-support-blueprint)
|
||||
7. [UX Research & Design Direction](#7-ux-research--design-direction)
|
||||
8. [Project Execution Plan](#8-project-execution-plan)
|
||||
9. [Spatial Interface Architecture](#9-spatial-interface-architecture)
|
||||
10. [Cross-Agent Synthesis](#10-cross-agent-synthesis)
|
||||
|
||||
---
|
||||
|
||||
## 1. The Opportunity
|
||||
|
||||
### How It Was Found
|
||||
|
||||
Web research across multiple sources identified three converging trends:
|
||||
|
||||
- **AI infrastructure/orchestration** is the fastest-growing software category (AI orchestration market valued at ~$13.5B in 2026, 22%+ CAGR)
|
||||
- **Spatial computing** (Vision Pro, WebXR) is maturing but lacks killer enterprise apps
|
||||
- Every existing AI workflow tool (LangSmith, n8n, Flowise, CrewAI) is a **flat 2D dashboard**
|
||||
|
||||
### The Concept: Nexus Spatial
|
||||
|
||||
An AI Agent Command Center in spatial computing -- a VisionOS + WebXR application that provides an immersive 3D command center for orchestrating, monitoring, and interacting with AI agents. Users visualize agent pipelines as 3D node graphs, monitor real-time outputs in spatial panels, build workflows with drag-and-drop in 3D space, and collaborate in shared spatial environments.
|
||||
|
||||
### Why This Agency Is Uniquely Positioned
|
||||
|
||||
The agency has deep spatial computing expertise (XR developers, VisionOS engineers, Metal specialists, interface architects) alongside a full engineering, design, marketing, and operations stack -- a rare combination for a product that demands both spatial computing mastery and enterprise software rigor.
|
||||
|
||||
### Sources
|
||||
|
||||
- [Profitable SaaS Ideas 2026 (273K+ Reviews)](https://bigideasdb.com/profitable-saas-micro-saas-ideas-2026)
|
||||
- [2026 SaaS and AI Revolution: 20 Top Trends](https://fungies.io/the-2026-saas-and-ai-revolution-20-top-trends/)
|
||||
- [Top 21 Underserved Markets 2026](https://mktclarity.com/blogs/news/list-underserved-niches)
|
||||
- [Fastest Growing Products 2026 - G2](https://www.g2.com/best-software-companies/fastest-growing)
|
||||
- [PwC 2026 AI Business Predictions](https://www.pwc.com/us/en/tech-effect/ai-analytics/ai-predictions.html)
|
||||
|
||||
---
|
||||
|
||||
## 2. Market Validation
|
||||
|
||||
**Agent:** Product Trend Researcher
|
||||
|
||||
### Verdict: CONDITIONAL GO -- 2D-First, Spatial-Second
|
||||
|
||||
### Market Size
|
||||
|
||||
| Segment | 2026 Value | Growth |
|
||||
|---------|-----------|--------|
|
||||
| AI Orchestration Tools | $13.5B | 22.3% CAGR |
|
||||
| Autonomous AI Agents | $8.5B | 45.8% CAGR to $50.3B by 2030 |
|
||||
| Extended Reality | $10.64B | 40.95% CAGR |
|
||||
| Spatial Computing (broad) | $170-220B | Varies by definition |
|
||||
|
||||
### Competitive Landscape
|
||||
|
||||
**AI Agent Orchestration (all 2D):**
|
||||
|
||||
| Tool | Strength | UX Gap |
|
||||
|------|----------|--------|
|
||||
| LangChain/LangSmith | Graph-based orchestration, $39/user/mo | Flat dashboard; complex graphs unreadable at scale |
|
||||
| CrewAI | 100K+ developers, fast execution | CLI-first, minimal visual tooling |
|
||||
| Microsoft Agent Framework | Enterprise integration | Embedded in Azure portal, no standalone UI |
|
||||
| n8n | Visual workflow builder, $20-50/mo | 2D canvas struggles with agent relationships |
|
||||
| Flowise | Drag-and-drop AI flows | Limited to linear flows, no multi-agent monitoring |
|
||||
|
||||
**"Mission Control" Products (emerging, all 2D):**
|
||||
- cmd-deck: Kanban board for AI coding agents
|
||||
- Supervity Agent Command Center: Enterprise observability
|
||||
- OpenClaw Command Center: Agent fleet management
|
||||
- Mission Control AI: Synthetic workers management
|
||||
- Mission Control HQ: Squad-based coordination
|
||||
|
||||
**The gap:** Products are either spatial-but-not-AI-focused, or AI-focused-but-flat-2D. No product sits at the intersection.
|
||||
|
||||
### Vision Pro Reality Check
|
||||
|
||||
- Installed base: ~1M units globally (sales declined 95% from launch)
|
||||
- Apple has shifted focus to lightweight AR glasses
|
||||
- Only ~3,000 VisionOS-specific apps exist
|
||||
- **Implication:** Do NOT lead with VisionOS. Lead with web, add WebXR, native VisionOS last.
|
||||
|
||||
### WebXR as the Distribution Unlock
|
||||
|
||||
- Safari adopted WebXR Device API in late 2025
|
||||
- 40% increase in WebXR adoption in 2026
|
||||
- WebGPU delivers near-native rendering in browsers
|
||||
- Android XR supports WebXR and OpenXR standards
|
||||
|
||||
### Target Personas and Pricing
|
||||
|
||||
| Tier | Price | Target |
|
||||
|------|-------|--------|
|
||||
| Explorer | Free | Developers, solo builders (3 agents, WebXR viewer) |
|
||||
| Pro | $99/user/month | Small teams (25 agents, collaboration) |
|
||||
| Team | $249/user/month | Mid-market AI teams (unlimited agents, analytics) |
|
||||
| Enterprise | Custom ($2K-10K/mo) | Large enterprises (SSO, RBAC, on-prem, SLA) |
|
||||
|
||||
### Recommended Phased Strategy
|
||||
|
||||
1. **Months 1-6:** Build a premium 2D web dashboard with Three.js 2.5D capabilities. Target: 50 paying teams, $60K MRR.
|
||||
2. **Months 6-12:** Add optional WebXR spatial mode (browser-based). Target: 200 teams, $300K MRR.
|
||||
3. **Months 12-18:** Native VisionOS app only if spatial demand is validated. Target: 500 teams, $1M+ MRR.
|
||||
|
||||
### Key Risks
|
||||
|
||||
| Risk | Severity |
|
||||
|------|----------|
|
||||
| Vision Pro installed base is critically small | HIGH |
|
||||
| "Spatial solution in search of a problem" -- is 3D actually 10x better than 2D? | HIGH |
|
||||
| Crowded "mission control" positioning (5+ products already) | MODERATE |
|
||||
| Enterprise spatial computing adoption still early | MODERATE |
|
||||
| Integration complexity across AI frameworks | MODERATE |
|
||||
|
||||
### Sources
|
||||
|
||||
- [MarketsandMarkets - AI Orchestration Market](https://www.marketsandmarkets.com/Market-Reports/ai-orchestration-market-148121911.html)
|
||||
- [Deloitte - AI Agent Orchestration Predictions 2026](https://www.deloitte.com/us/en/insights/industry/technology/technology-media-and-telecom-predictions/2026/ai-agent-orchestration.html)
|
||||
- [Mordor Intelligence - Extended Reality Market](https://www.mordorintelligence.com/industry-reports/extended-reality-xr-market)
|
||||
- [Fintool - Vision Pro Production Halted](https://fintool.com/news/apple-vision-pro-production-halt)
|
||||
- [MadXR - WebXR Browser-Based Experiences 2026](https://www.madxr.io/webxr-browser-immersive-experiences-2026.html)
|
||||
|
||||
---
|
||||
|
||||
## 3. Technical Architecture
|
||||
|
||||
**Agent:** Backend Architect
|
||||
|
||||
### System Overview
|
||||
|
||||
An 8-service architecture with clear ownership boundaries, designed for horizontal scaling and provider-agnostic AI integration.
|
||||
|
||||
```
|
||||
+------------------------------------------------------------------+
|
||||
| CLIENT TIER |
|
||||
| VisionOS Native (Swift/RealityKit) | WebXR (React Three Fiber) |
|
||||
+------------------------------------------------------------------+
|
||||
|
|
||||
+-----------------------------v------------------------------------+
|
||||
| API GATEWAY (Kong / AWS API GW) |
|
||||
| Rate limiting | JWT validation | WebSocket upgrade | TLS |
|
||||
+------------------------------------------------------------------+
|
||||
|
|
||||
+------------------------------------------------------------------+
|
||||
| SERVICE TIER |
|
||||
| Auth | Workspace | Workflow | Orchestration (Rust) | |
|
||||
| Collaboration (Yjs CRDT) | Streaming (WS) | Plugin | Billing |
|
||||
+------------------------------------------------------------------+
|
||||
|
|
||||
+------------------------------------------------------------------+
|
||||
| DATA TIER |
|
||||
| PostgreSQL 16 | Redis 7 Cluster | S3 | ClickHouse | NATS |
|
||||
+------------------------------------------------------------------+
|
||||
|
|
||||
+------------------------------------------------------------------+
|
||||
| AI PROVIDER TIER |
|
||||
| OpenAI | Anthropic | Google | Local Models | Custom Plugins |
|
||||
+------------------------------------------------------------------+
|
||||
```
|
||||
|
||||
### Tech Stack
|
||||
|
||||
| Component | Technology | Rationale |
|
||||
|-----------|------------|-----------|
|
||||
| Orchestration Engine | **Rust** | Sub-ms scheduling, zero GC pauses, memory safety for agent sandboxing |
|
||||
| API Services | TypeScript / NestJS | Developer velocity for CRUD-heavy services |
|
||||
| VisionOS Client | Swift 6, SwiftUI, RealityKit | First-class spatial computing with Liquid Glass |
|
||||
| WebXR Client | TypeScript, React Three Fiber | Production-grade WebXR with React component model |
|
||||
| Message Broker | NATS JetStream | Lightweight, exactly-once delivery, simpler than Kafka |
|
||||
| Collaboration | Yjs (CRDT) + WebRTC | Conflict-free concurrent 3D graph editing |
|
||||
| Primary Database | PostgreSQL 16 | JSONB for flexible configs, Row-Level Security for tenant isolation |
|
||||
|
||||
### Core Data Model
|
||||
|
||||
14 tables covering:
|
||||
- **Identity & Access:** users, workspaces, team_memberships, api_keys
|
||||
- **Workflows:** workflows, workflow_versions, nodes, edges
|
||||
- **Executions:** executions, execution_steps, step_output_chunks
|
||||
- **Collaboration:** collaboration_sessions, session_participants
|
||||
- **Credentials:** provider_credentials (AES-256-GCM encrypted)
|
||||
- **Billing:** subscriptions, usage_records
|
||||
- **Audit:** audit_log (append-only)
|
||||
|
||||
### Node Type Registry
|
||||
|
||||
```
|
||||
Built-in Node Types:
|
||||
ai_agent -- Calls an AI provider with a prompt
|
||||
prompt_template -- Renders a template with variables
|
||||
conditional -- Routes based on expression
|
||||
transform -- Sandboxed code snippet (JS/Python)
|
||||
input / output -- Workflow entry/exit points
|
||||
human_review -- Pauses for human approval
|
||||
loop -- Repeats subgraph
|
||||
parallel_split -- Fans out to branches
|
||||
parallel_join -- Waits for branches
|
||||
webhook_trigger -- External HTTP trigger
|
||||
delay -- Timed pause
|
||||
```
|
||||
|
||||
### WebSocket Channels
|
||||
|
||||
Real-time streaming via WSS with:
|
||||
- Per-channel sequence numbers for ordering
|
||||
- Gap detection with replay requests
|
||||
- Snapshot recovery when >1000 events behind
|
||||
- Client-side throttling for lower-powered devices
|
||||
|
||||
### Security Architecture
|
||||
|
||||
| Layer | Mechanism |
|
||||
|-------|-----------|
|
||||
| User Auth | OAuth 2.0 (GitHub, Google, Apple) + email/password + optional TOTP MFA |
|
||||
| API Keys | SHA-256 hashed, scoped, optional expiry |
|
||||
| Service-to-Service | mTLS via service mesh |
|
||||
| WebSocket Auth | One-time tickets with 30-second expiry |
|
||||
| Credential Storage | Envelope encryption (AES-256-GCM + AWS KMS) |
|
||||
| Code Sandboxing | gVisor/Firecracker microVMs (no network, 256MB RAM, 30s CPU) |
|
||||
| Tenant Isolation | PostgreSQL Row-Level Security + S3 IAM policies + NATS subject scoping |
|
||||
|
||||
### Scaling Targets
|
||||
|
||||
| Metric | Year 1 | Year 2 |
|
||||
|--------|--------|--------|
|
||||
| Concurrent agent executions | 5,000 | 50,000 |
|
||||
| WebSocket connections | 10,000 | 100,000 |
|
||||
| P95 API latency | < 150ms | < 100ms |
|
||||
| P95 WS event latency | < 80ms | < 50ms |
|
||||
|
||||
### MVP Phases
|
||||
|
||||
1. **Weeks 1-6:** 2D web editor, sequential execution, OpenAI + Anthropic adapters
|
||||
2. **Weeks 7-12:** WebXR 3D mode, parallel execution, hand tracking, RBAC
|
||||
3. **Weeks 13-20:** Multi-user collaboration, VisionOS native, billing
|
||||
4. **Weeks 21-30:** Enterprise SSO, plugin SDK, SOC 2, scale hardening
|
||||
|
||||
---
|
||||
|
||||
## 4. Brand Strategy
|
||||
|
||||
**Agent:** Brand Guardian
|
||||
|
||||
### Positioning
|
||||
|
||||
**Category creation over category competition.** Nexus Spatial defines a new category -- **Spatial AI Operations (SpatialAIOps)** -- rather than fighting for position in the crowded AI observability dashboard space.
|
||||
|
||||
**Positioning statement:** For technical teams managing complex AI agent workflows, Nexus Spatial is the immersive 3D command center that provides spatial awareness of agent orchestration, unlike flat 2D dashboards, because spatial computing transforms monitoring from reading dashboards to inhabiting your infrastructure.
|
||||
|
||||
### Name Validation
|
||||
|
||||
"Nexus Spatial" is **validated as strong:**
|
||||
- "Nexus" connects to the NEXUS orchestration framework (Network of EXperts, Unified in Strategy)
|
||||
- "Nexus" independently means "central connection point" -- perfect for a command center
|
||||
- "Spatial" is the industry-standard descriptor Apple and the industry have normalized
|
||||
- Phonetically balanced: three syllables, then two
|
||||
- **Action needed:** Trademark clearance in Nice Classes 9, 42, and 38
|
||||
|
||||
### Brand Personality: The Commander
|
||||
|
||||
| Trait | Expression | Avoids |
|
||||
|-------|------------|--------|
|
||||
| **Authoritative** | Clear, direct, technically precise | Hype, superlatives, vague futurism |
|
||||
| **Composed** | Clean design, measured pacing, white space | Urgency for urgency's sake, chaos |
|
||||
| **Pioneering** | Quiet pride, understated references to the new paradigm | "Revolutionary," "game-changing" |
|
||||
| **Precise** | Exact specs, real metrics, honest requirements | Vague claims, marketing buzzwords |
|
||||
| **Approachable** | Natural interaction language, spatial metaphors | Condescension, gatekeeping |
|
||||
|
||||
### Taglines (Ranked)
|
||||
|
||||
1. **"Mission Control for the Agent Era"** -- RECOMMENDED PRIMARY
|
||||
2. "See Your Agents in Space"
|
||||
3. "Orchestrate in Three Dimensions"
|
||||
4. "Where AI Operations Become Spatial"
|
||||
5. "Command Center. Reimagined in Space."
|
||||
6. "The Dimension Your Dashboards Are Missing"
|
||||
7. "AI Agents Deserve More Than Flat Screens"
|
||||
|
||||
### Color System
|
||||
|
||||
| Color | Hex | Usage |
|
||||
|-------|-----|-------|
|
||||
| Deep Space Indigo | `#1B1F3B` | Foundational dark canvas, backgrounds |
|
||||
| Nexus Blue | `#4A7BF7` | Signature brand, primary actions |
|
||||
| Signal Cyan | `#00D4FF` | Spatial highlights, data connections |
|
||||
| Command Green | `#00E676` | Healthy systems, success |
|
||||
| Alert Amber | `#FFB300` | Warnings, attention needed |
|
||||
| Critical Red | `#FF3D71` | Errors, failures |
|
||||
|
||||
Usage ratio: Deep Space Indigo 60%, Nexus Blue 25%, Signal Cyan 10%, Semantic 5%.
|
||||
|
||||
### Typography
|
||||
|
||||
- **Primary:** Inter (UI, body, labels)
|
||||
- **Monospace:** JetBrains Mono (code, logs, agent output)
|
||||
- **Display:** Space Grotesk (marketing headlines only)
|
||||
|
||||
### Logo Concepts
|
||||
|
||||
Three directions for exploration:
|
||||
|
||||
1. **The Spatial Nexus Mark** -- Convergent lines meeting at a glowing central node with subtle perspective depth
|
||||
2. **The Dimensional Window** -- Stylized viewport with perspective lines creating the effect of looking into 3D space
|
||||
3. **The Orbital Array** -- Orbital rings around a central point suggesting coordinated agents in motion
|
||||
|
||||
### Brand Values
|
||||
|
||||
- **Spatial Truthfulness** -- Honest representation of system state, no cosmetic smoothing
|
||||
- **Operational Gravity** -- Built for production, not demos
|
||||
- **Dimensional Generosity** -- WebXR ensures spatial value is accessible to everyone
|
||||
- **Composure Under Complexity** -- The more complex the system, the calmer the interface
|
||||
|
||||
### Design Tokens
|
||||
|
||||
```css
|
||||
:root {
|
||||
--nxs-deep-space: #1B1F3B;
|
||||
--nxs-blue: #4A7BF7;
|
||||
--nxs-cyan: #00D4FF;
|
||||
--nxs-green: #00E676;
|
||||
--nxs-amber: #FFB300;
|
||||
--nxs-red: #FF3D71;
|
||||
--nxs-void: #0A0E1A;
|
||||
--nxs-slate-900: #141829;
|
||||
--nxs-slate-700: #2A2F45;
|
||||
--nxs-slate-500: #4A5068;
|
||||
--nxs-slate-300: #8B92A8;
|
||||
--nxs-slate-100: #C8CCE0;
|
||||
--nxs-cloud: #E8EBF5;
|
||||
--nxs-white: #F8F9FC;
|
||||
--nxs-font-primary: 'Inter', sans-serif;
|
||||
--nxs-font-mono: 'JetBrains Mono', monospace;
|
||||
--nxs-font-display: 'Space Grotesk', sans-serif;
|
||||
}
|
||||
```
|
||||
|
||||
---
|
||||
|
||||
## 5. Go-to-Market & Growth
|
||||
|
||||
**Agent:** Growth Hacker
|
||||
|
||||
### North Star Metric
|
||||
|
||||
**Weekly Active Pipelines (WAP)** -- unique agent pipelines with at least one spatial interaction in the past 7 days. Captures both creation and engagement, correlates with value, and isn't gameable.
|
||||
|
||||
### Pricing
|
||||
|
||||
| Tier | Annual | Monthly | Target |
|
||||
|------|--------|---------|--------|
|
||||
| Explorer | Free | Free | 3 pipelines, WebXR preview, community |
|
||||
| Pro | $29/user/mo | $39/user/mo | Unlimited pipelines, VisionOS, 30-day history |
|
||||
| Team | $59/user/mo | $79/user/mo | Collaboration, RBAC, SSO, 90-day history |
|
||||
| Enterprise | Custom (~$150+) | Custom | Dedicated infra, SLA, on-prem option |
|
||||
|
||||
Strategy: 14-day reverse trial (Pro features, then downgrade to Free). Target 5-8% free-to-paid conversion.
|
||||
|
||||
### 3-Phase GTM
|
||||
|
||||
**Phase 1: Founder-Led Sales (Months 1-3)**
|
||||
- Target: Individual AI engineers at startups who use LangChain/CrewAI and own Vision Pro
|
||||
- Tactics: DM 200 high-profile AI engineers, weekly build-in-public posts, 30-second demo clips
|
||||
- Channels: X/Twitter, LinkedIn, AI-focused Discord servers, Reddit
|
||||
|
||||
**Phase 2: Developer Community (Months 4-6)**
|
||||
- Product Hunt launch (timed for this phase, not Phase 1)
|
||||
- Hacker News Show HN, Dev.to articles, conference talks
|
||||
- Integration announcements with popular AI frameworks
|
||||
|
||||
**Phase 3: Enterprise (Months 7-12)**
|
||||
- Apple enterprise referral pipeline, LinkedIn ABM campaigns
|
||||
- Enterprise case studies, analyst briefings (Gartner, Forrester)
|
||||
- First enterprise AE hire, SOC 2 compliance
|
||||
|
||||
### Growth Loops
|
||||
|
||||
1. **"Wow Factor" Demo Loop** -- Spatial demos are inherently shareable. One-click "Share Spatial Preview" generates a WebXR link or video. Target K = 0.3-0.5.
|
||||
2. **Template Marketplace** -- Power users publish pipeline templates, discoverable via search, driving new signups.
|
||||
3. **Collaboration Seat Expansion** -- One engineer adopts, shares with teammates, team expands to paid plan (Slack/Figma playbook).
|
||||
4. **Integration-Driven Discovery** -- Listings in LangChain, n8n, OpenAI/Anthropic partner directories.
|
||||
|
||||
### Open-Source Strategy
|
||||
|
||||
**Open-source (Apache 2.0):**
|
||||
- `nexus-spatial-sdk` -- TypeScript/Python SDK for connecting agent frameworks
|
||||
- `nexus-webxr-components` -- React Three Fiber component library for 3D pipelines
|
||||
- `nexus-agent-schemas` -- Standardized schemas for representing agent pipelines in 3D
|
||||
|
||||
**Keep proprietary:** VisionOS native app, collaboration engine, enterprise features, hosted infrastructure.
|
||||
|
||||
### Revenue Targets
|
||||
|
||||
| Metric | Month 6 | Month 12 |
|
||||
|--------|---------|----------|
|
||||
| MRR | $8K-15K | $50K-80K |
|
||||
| Free accounts | 5,000 | 15,000 |
|
||||
| Paid seats | 300 | 1,200 |
|
||||
| Discord members | 2,000 | 5,000 |
|
||||
| GitHub stars (SDK) | 500 | 2,000 |
|
||||
|
||||
### First $50K Budget
|
||||
|
||||
| Category | Amount | % |
|
||||
|----------|--------|---|
|
||||
| Content Production | $12,000 | 24% |
|
||||
| Developer Relations | $10,000 | 20% |
|
||||
| Paid Acquisition Testing | $8,000 | 16% |
|
||||
| Community & Tools | $5,000 | 10% |
|
||||
| Product Hunt & Launch | $3,000 | 6% |
|
||||
| Open Source Maintenance | $3,000 | 6% |
|
||||
| PR & Outreach | $4,000 | 8% |
|
||||
| Partnerships | $2,000 | 4% |
|
||||
| Reserve | $3,000 | 6% |
|
||||
|
||||
### Key Partnerships
|
||||
|
||||
- **Tier 1 (Critical):** Anthropic, OpenAI -- first-class API integrations, partner program listings
|
||||
- **Tier 2 (Adoption):** LangChain, CrewAI, n8n -- framework integrations, community cross-pollination
|
||||
- **Tier 3 (Platform):** Apple -- Vision Pro developer kit, App Store featuring, WWDC
|
||||
- **Tier 4 (Ecosystem):** GitHub, Hugging Face, Docker -- developer platform integrations
|
||||
|
||||
### Sources
|
||||
|
||||
- [AI Orchestration Market Size - MarketsandMarkets](https://www.marketsandmarkets.com/Market-Reports/ai-orchestration-market-148121911.html)
|
||||
- [Spatial Computing Market - Precedence Research](https://www.precedenceresearch.com/spatial-computing-market)
|
||||
- [How to Price AI Products - Aakash Gupta](https://www.news.aakashg.com/p/how-to-price-ai-products)
|
||||
- [Product Hunt Launch Guide 2026](https://calmops.com/indie-hackers/product-hunt-launch-guide/)
|
||||
|
||||
---
|
||||
|
||||
## 6. Customer Support Blueprint
|
||||
|
||||
**Agent:** Support Responder
|
||||
|
||||
### Support Tier Structure
|
||||
|
||||
| Attribute | Explorer (Free) | Builder (Pro) | Command (Enterprise) |
|
||||
|-----------|-----------------|---------------|---------------------|
|
||||
| First Response SLA | Best effort (48h) | 4 hours (business hours) | 30 min (P1), 2h (P2) |
|
||||
| Resolution SLA | 5 business days | 24h (P1/P2), 72h (P3) | 4h (P1), 12h (P2) |
|
||||
| Channels | Community, KB, AI assistant | + Live chat, email, video (2/mo) | + Dedicated Slack, named CSE, 24/7 |
|
||||
| Scope | General questions, docs | Technical troubleshooting, integrations | Full integration, custom design, compliance |
|
||||
|
||||
### Priority Definitions
|
||||
|
||||
- **P1 Critical:** Orchestration down, data loss risk, security breach
|
||||
- **P2 High:** Major feature degraded, workaround exists
|
||||
- **P3 Medium:** Non-blocking issues, minor glitches
|
||||
- **P4 Low:** Feature requests, cosmetic issues
|
||||
|
||||
### The Nexus Guide: AI-Powered In-Product Support
|
||||
|
||||
The standout design decision: the support agent lives as a visible node **inside the user's spatial workspace**. It has full context of the user's layout, active agents, and recent errors.
|
||||
|
||||
**Capabilities:**
|
||||
- Natural language Q&A about features
|
||||
- Real-time agent diagnostics ("Why is Agent X slow?")
|
||||
- Configuration suggestions ("Your topology would perform better as a mesh")
|
||||
- Guided spatial troubleshooting walkthroughs
|
||||
- Ticket creation with automatic context attachment
|
||||
|
||||
**Self-Healing:**
|
||||
|
||||
| Scenario | Detection | Auto-Resolution |
|
||||
|----------|-----------|-----------------|
|
||||
| Agent infinite loop | CPU/token spike | Kill and restart with last good config |
|
||||
| Rendering frame drop | FPS below threshold | Reduce visual fidelity, suggest closing panels |
|
||||
| Credential expiry | API 401 responses | Prompt re-auth, pause agents gracefully |
|
||||
| Communication timeout | Latency spike | Reroute messages through alternate path |
|
||||
|
||||
### Onboarding Flow
|
||||
|
||||
Adaptive onboarding based on user profiling:
|
||||
|
||||
| AI Experience | Spatial Experience | Path |
|
||||
|---------------|-------------------|------|
|
||||
| Low | Low | Full guided tour (20 min) |
|
||||
| High | Low | Spatial-focused (12 min) |
|
||||
| Low | High | Agent-focused (12 min) |
|
||||
| High | High | Express setup (5 min) |
|
||||
|
||||
Critical first step: 60-second spatial calibration (hand tracking, gaze, comfort check) before any product interaction.
|
||||
|
||||
**Activation Milestone** (user is "onboarded" when they have):
|
||||
- Created at least one custom agent
|
||||
- Connected two or more agents in a topology
|
||||
- Anchored at least one monitoring dashboard
|
||||
- Returned for a third session
|
||||
|
||||
### Team Build
|
||||
|
||||
| Phase | Headcount | Roles |
|
||||
|-------|-----------|-------|
|
||||
| Months 0-6 | 4 | Head of CX, 2 Support Engineers, Technical Writer |
|
||||
| Months 6-12 | 8 | + 2 Support Engineers, CSE, Community Manager, Ops Analyst |
|
||||
| Months 12-24 | 16 | + 4 Engineers (24/7), Spatial Specialist, Integration Specialist, KB Manager, Engineering Manager |
|
||||
|
||||
### Community: Discord-First
|
||||
|
||||
```
|
||||
NEXUS SPATIAL DISCORD
|
||||
INFORMATION: #announcements, #changelog, #status
|
||||
SUPPORT: #help-getting-started, #help-agents, #help-spatial
|
||||
DISCUSSION: #general, #show-your-workspace, #feature-requests
|
||||
PLATFORMS: #visionos, #webxr, #api-and-sdk
|
||||
EVENTS: office-hours (weekly voice), community-demos (monthly)
|
||||
PRO MEMBERS: #pro-lounge, #beta-testing
|
||||
ENTERPRISE: per-customer private channels
|
||||
```
|
||||
|
||||
**Champions Program ("Nexus Navigators"):** 5-10 initial power users with Navigator badge, direct Slack with product team, free Pro tier, early feature access, and annual summit.
|
||||
|
||||
---
|
||||
|
||||
## 7. UX Research & Design Direction
|
||||
|
||||
**Agent:** UX Researcher
|
||||
|
||||
### User Personas
|
||||
|
||||
**Maya Chen -- AI Platform Engineer (32, San Francisco)**
|
||||
- Manages 15-30 active agent workflows, uses n8n + LangSmith
|
||||
- Spends 40% of time debugging agent failures via log inspection
|
||||
- Skeptical of spatial computing: "Is this actually faster, or just cooler?"
|
||||
- Primary need: Reduce mean-time-to-diagnosis from 45 min to under 10
|
||||
|
||||
**David Okoro -- Technical Product Manager (38, London)**
|
||||
- Reviews and approves agent workflow designs, presents to C-suite
|
||||
- Cannot meaningfully contribute to workflow reviews because tools require code-level understanding
|
||||
- Primary need: Understand and communicate agent architectures without reading code
|
||||
|
||||
**Dr. Amara Osei -- Research Scientist (45, Zurich)**
|
||||
- Designs multi-agent research workflows with A/B comparisons
|
||||
- Has 12 variations of the same pipeline with no good way to compare
|
||||
- Primary need: Side-by-side comparison of variant pipelines in 3D space
|
||||
|
||||
**Jordan Rivera -- Creative Technologist (27, Austin)**
|
||||
- Daily Vision Pro user, builds AI-powered art installations
|
||||
- Wants tools that feel like instruments, not dashboards
|
||||
- Primary need: Build agent workflows quickly with immediate spatial feedback
|
||||
|
||||
### Key Finding: Debugging Is the Killer Use Case
|
||||
|
||||
Spatial overlay of runtime traces on workflow structure solves a real, quantified pain point that no 2D tool handles well. This workflow should receive the most design and engineering investment.
|
||||
|
||||
### Critical Design Insight
|
||||
|
||||
Spatial adds value for **structural** tasks (placing, connecting, rearranging nodes) but creates friction for **parameter** tasks (text entry, configuration). The interface must seamlessly blend spatial and 2D modes -- 2D panels anchored to spatial positions.
|
||||
|
||||
### 7 Design Principles
|
||||
|
||||
1. **Spatial Earns Its Place** -- If 2D is clearer, use 2D. Every review should ask: "Would this be better flat?"
|
||||
2. **Glanceable Before Inspectable** -- Critical info perceivable in under 2 seconds via color, size, motion, position
|
||||
3. **Hands-Free Is the Baseline** -- Gaze + voice covers all read/navigate operations; hands add precision but aren't required
|
||||
4. **Respect Cognitive Gravity** -- Extend 2D mental models (left-to-right flow), don't replace them; z-axis adds layering
|
||||
5. **Progressive Spatial Complexity** -- New users start nearly-2D; spatial capabilities reveal as confidence grows
|
||||
6. **Physical Metaphors, Digital Capabilities** -- Nodes are "picked up" (physical) but also duplicated and versioned (digital)
|
||||
7. **Silence Is a Feature** -- Healthy systems feel calm; color and motion signal deviation from normal
|
||||
|
||||
### Navigation Paradigm: 4-Level Semantic Zoom
|
||||
|
||||
| Level | What You See |
|
||||
|-------|-------------|
|
||||
| Fleet View | All workflows as abstract shapes, color-coded by status |
|
||||
| Workflow View | Node graph with labels and connections |
|
||||
| Node View | Expanded configuration, recent I/O, status metrics |
|
||||
| Trace View | Full execution trace with data inspection |
|
||||
|
||||
### Competitive UX Summary
|
||||
|
||||
| Capability | n8n | Flowise | LangSmith | Langflow | Nexus Spatial Target |
|
||||
|-----------|-----|---------|-----------|----------|---------------------|
|
||||
| Visual workflow building | A | B+ | N/A | A | A+ (spatial) |
|
||||
| Debugging/tracing | C+ | C | A | B | A+ (spatial overlay) |
|
||||
| Monitoring | B | C | A | B | A (spatial fleet) |
|
||||
| Collaboration | D | D | C | D | A (spatial co-presence) |
|
||||
| Large workflow scalability | C | C | B | C | A (3D space) |
|
||||
|
||||
### Accessibility Requirements
|
||||
|
||||
- Every interaction achievable through at least two modalities
|
||||
- No information conveyed by color alone
|
||||
- High-contrast mode, reduced-motion mode, depth-flattening mode
|
||||
- Screen reader compatibility with spatial element descriptions
|
||||
- Session length warnings every 20-30 minutes
|
||||
- All core tasks completable seated, one-handed, within 30-degree movement cone
|
||||
|
||||
### Research Plan (16 Weeks)
|
||||
|
||||
| Phase | Weeks | Studies |
|
||||
|-------|-------|---------|
|
||||
| Foundational | 1-4 | Mental model interviews (15-20 participants), competitive task analysis |
|
||||
| Concept Validation | 5-8 | Wizard-of-Oz spatial prototype testing, 3D card sort for IA |
|
||||
| Usability Testing | 9-14 | First-use experience (20 users), 4-week longitudinal diary study, paired collaboration testing |
|
||||
| Accessibility Audit | 12-16 | Expert heuristic evaluation, testing with users with disabilities |
|
||||
|
||||
---
|
||||
|
||||
## 8. Project Execution Plan
|
||||
|
||||
**Agent:** Project Shepherd
|
||||
|
||||
### Timeline: 35 Weeks (March 9 -- November 6, 2026)
|
||||
|
||||
| Phase | Weeks | Duration | Goal |
|
||||
|-------|-------|----------|------|
|
||||
| Discovery & Research | W1-3 | 3 weeks | Validate feasibility, define scope |
|
||||
| Foundation | W4-9 | 6 weeks | Core infrastructure, both platform shells, design system |
|
||||
| MVP Build | W10-19 | 10 weeks | Single-user agent command center with orchestration |
|
||||
| Beta | W20-27 | 8 weeks | Collaboration, polish, harden, 50-100 beta users |
|
||||
| Launch | W28-31 | 4 weeks | App Store + web launch, marketing push |
|
||||
| Scale | W32-35+ | Ongoing | Plugin marketplace, advanced features, growth |
|
||||
|
||||
### Critical Milestone: Week 12 (May 29)
|
||||
|
||||
**First end-to-end workflow execution.** A user creates and runs a 3-node agent workflow in 3D. This is the moment the product proves its core value proposition. If this slips, everything downstream shifts.
|
||||
|
||||
### First 6 Sprints (65 Tickets)
|
||||
|
||||
**Sprint 1 (Mar 9-20):** VisionOS SDK audit, WebXR compatibility matrix, orchestration engine feasibility, stakeholder interviews, throwaway prototypes for both platforms.
|
||||
|
||||
**Sprint 2 (Mar 23 - Apr 3):** Architecture decision records, MVP scope lock with MoSCoW, PRD v1.0, spatial UI pattern research, interaction model definition, design system kickoff.
|
||||
|
||||
**Sprint 3 (Apr 6-17):** Monorepo setup, auth service (OAuth2), database schema, API gateway, VisionOS Xcode project init, WebXR project init, CI/CD pipelines.
|
||||
|
||||
**Sprint 4 (Apr 20 - May 1):** WebSocket server + client SDKs, spatial window management, 3D component library, hand tracking input layer, teams CRUD, integration tests.
|
||||
|
||||
**Sprint 5 (May 4-15):** Orchestration engine core (Rust), agent state machine, node graph renderers (both platforms), plugin interface v0, OpenAI provider plugin.
|
||||
|
||||
**Sprint 6 (May 18-29):** Workflow persistence + versioning, DAG execution, real-time execution visualization, Anthropic provider plugin, eye tracking integration, spatial audio.
|
||||
|
||||
### Team Allocation
|
||||
|
||||
5 squads operating across phases:
|
||||
|
||||
| Squad | Core Members | Active Phases |
|
||||
|-------|-------------|---------------|
|
||||
| Core Architecture | Backend Architect, XR Interface Architect, Senior Dev, VisionOS Engineer | Discovery through MVP |
|
||||
| Spatial Experience | XR Immersive Dev, XR Cockpit Specialist, Metal Engineer, UX Architect, UI Designer | Foundation through Beta |
|
||||
| Orchestration | AI Engineer, Backend Architect, Senior Dev, API Tester | MVP through Beta |
|
||||
| Platform Delivery | Frontend Dev, Mobile App Builder, VisionOS Engineer, DevOps | MVP through Launch |
|
||||
| Launch | Growth Hacker, Content Creator, App Store Optimizer, Visual Storyteller, Brand Guardian | Beta through Scale |
|
||||
|
||||
### Top 5 Risks
|
||||
|
||||
| Risk | Probability | Impact | Mitigation |
|
||||
|------|------------|--------|------------|
|
||||
| Apple rejects VisionOS app | Medium | Critical | Engage Apple Developer Relations Week 4, pre-review by Week 20 |
|
||||
| WebXR browser fragmentation | High | High | Browser support matrix Week 1, automated cross-browser tests |
|
||||
| Multi-user sync conflicts | Medium | High | CRDT-based sync (Yjs) from the start, prototype in Foundation |
|
||||
| Orchestration can't scale | Medium | Critical | Horizontal scaling from day one, load test at 10x by Week 22 |
|
||||
| RealityKit performance for 100+ nodes | Medium | High | Profile early, implement LOD culling, instanced rendering |
|
||||
|
||||
### Budget: $121,500 -- $155,500 (Non-Personnel)
|
||||
|
||||
| Category | Estimated Cost |
|
||||
|----------|---------------|
|
||||
| Cloud infrastructure (35 weeks) | $35,000 - $45,000 |
|
||||
| Hardware (3 Vision Pro, 2 Quest 3, Mac Studio) | $17,500 |
|
||||
| Licenses and services | $15,000 - $20,000 |
|
||||
| External services (legal, security, PR) | $30,000 - $45,000 |
|
||||
| AI API costs (dev/test) | $8,000 |
|
||||
| Contingency (15%) | $16,000 - $20,000 |
|
||||
|
||||
---
|
||||
|
||||
## 9. Spatial Interface Architecture
|
||||
|
||||
**Agent:** XR Interface Architect
|
||||
|
||||
### The Command Theater
|
||||
|
||||
The workspace is organized as a curved theater around the user:
|
||||
|
||||
```
|
||||
OVERVIEW CANOPY
|
||||
(pipeline topology)
|
||||
~~~~~~~~~~~~~~~~~~~~~~~~
|
||||
/ \
|
||||
/ FOCUS ARC (120 deg) \
|
||||
/ primary node graph work \
|
||||
/________________________________\
|
||||
| |
|
||||
LEFT | USER POSITION | RIGHT
|
||||
UTILITY | (origin 0,0,0) | UTILITY
|
||||
RAIL | | RAIL
|
||||
|__________________________________|
|
||||
\ /
|
||||
\ SHELF (below sightline) /
|
||||
\ agent status, quick tools/
|
||||
\_________________________ /
|
||||
```
|
||||
|
||||
- **Focus Arc** (120 degrees, 1.2-2.0m): Primary node graph workspace
|
||||
- **Overview Canopy** (above, 2.5-4.0m): Miniature pipeline topology + health heatmap
|
||||
- **Utility Rails** (left/right flanks): Agent library, monitoring, logs
|
||||
- **Shelf** (below sightline, 0.8-1.0m): Run/stop, undo/redo, quick tools
|
||||
|
||||
### Three-Layer Depth System
|
||||
|
||||
| Layer | Depth | Content | Opacity |
|
||||
|-------|-------|---------|---------|
|
||||
| Foreground | 0.8 - 1.2m | Active panels, inspectors, modals | 100% |
|
||||
| Midground | 1.2 - 2.5m | Node graph, connections, workspace | 100% |
|
||||
| Background | 2.5 - 5.0m | Overview map, ambient status | 40-70% |
|
||||
|
||||
### Node Graph in 3D
|
||||
|
||||
**Data flows toward the user.** Nodes arrange along the z-axis by execution order:
|
||||
|
||||
```
|
||||
USER (here)
|
||||
z=0.0m [Output Nodes] -- Results
|
||||
z=0.3m [Transform Nodes] -- Processors
|
||||
z=0.6m [Agent Nodes] -- LLM calls
|
||||
z=0.9m [Retrieval Nodes] -- RAG, APIs
|
||||
z=1.2m [Input Nodes] -- Triggers
|
||||
```
|
||||
|
||||
Parallel branches spread horizontally (x-axis). Conditional branches spread vertically (y-axis).
|
||||
|
||||
**Node representation (3 LODs):**
|
||||
- **LOD-0** (resting, >1.5m): 12x8cm frosted glass rectangle with type icon, name, status glow
|
||||
- **LOD-1** (hover, 400ms gaze): Expands to 14x10cm, reveals ports, last-run info
|
||||
- **LOD-2** (selected): Slides to foreground, expands to 30x40cm detail panel with live config editing
|
||||
|
||||
**Connections as luminous tubes:**
|
||||
- 4mm diameter at rest, 8mm when carrying data
|
||||
- Color-coded by data type (white=text, cyan=structured, magenta=images, amber=audio, green=tool calls)
|
||||
- Animated particles show flow direction and speed
|
||||
- Auto-bundle when >3 run parallel between same layers
|
||||
|
||||
### 7 Agent States
|
||||
|
||||
| State | Edge Glow | Interior | Sound | Particles |
|
||||
|-------|-----------|----------|-------|-----------|
|
||||
| Idle | Steady green, low | Static frosted glass | None | None |
|
||||
| Queued | Pulsing amber, 1Hz | Faint rotation | None | Slow drift at input |
|
||||
| Running | Steady blue, medium | Animated shimmer | Soft spatial hum | Rapid flow on connections |
|
||||
| Streaming | Blue + output stream | Shimmer + text fragments | Hum | Text fragments flowing forward |
|
||||
| Completed | Flash white, then green | Static | Completion chime | None |
|
||||
| Error | Pulsing red, 2Hz | Red tint | Alert tone (once) | None |
|
||||
| Paused | Steady amber | Freeze-frame + pause icon | None | Frozen in place |
|
||||
|
||||
### Interaction Model
|
||||
|
||||
| Action | VisionOS | WebXR Controllers | Voice |
|
||||
|--------|----------|-------------------|-------|
|
||||
| Select node | Gaze + pinch | Point ray + trigger | "Select [name]" |
|
||||
| Move node | Pinch + drag | Grip + move | -- |
|
||||
| Connect ports | Pinch port + drag | Trigger port + drag | "Connect [A] to [B]" |
|
||||
| Pan workspace | Two-hand drag | Thumbstick | "Pan left/right" |
|
||||
| Zoom | Two-hand spread/pinch | Thumbstick push/pull | "Zoom in/out" |
|
||||
| Inspect node | Pinch + pull toward self | Double-trigger | "Inspect [name]" |
|
||||
| Run pipeline | Tap Shelf button | Trigger button | "Run pipeline" |
|
||||
| Undo | Two-finger double-tap | B button | "Undo" |
|
||||
|
||||
### Collaboration Presence
|
||||
|
||||
Each collaborator represented by:
|
||||
- **Head proxy:** Translucent sphere with profile image, rotates with head orientation
|
||||
- **Hand proxies:** Ghosted hand models showing pinch/grab states
|
||||
- **Gaze cone:** Subtle 10-degree cone showing where they're looking
|
||||
- **Name label:** Billboard-rendered, shows current action ("editing Node X")
|
||||
|
||||
**Conflict resolution:** First editor gets write lock; second sees "locked by [name]" with option to request access or duplicate the node.
|
||||
|
||||
### Adaptive Layout
|
||||
|
||||
| Environment | Node Scale | Max LOD-2 Nodes | Graph Z-Spread |
|
||||
|-------------|-----------|-----------------|----------------|
|
||||
| VisionOS Window | 4x3cm | 5 | 0.05m/layer |
|
||||
| VisionOS Immersive | 12x8cm | 15 | 0.3m/layer |
|
||||
| WebXR Desktop | 120x80px | 8 (overlays) | Perspective projection |
|
||||
| WebXR Immersive | 12x8cm | 12 | 0.3m/layer |
|
||||
|
||||
### Transition Choreography
|
||||
|
||||
All transitions serve wayfinding. Maximum 600ms for major transitions, 200ms for minor, 0ms for selection.
|
||||
|
||||
| Transition | Duration | Key Motion |
|
||||
|-----------|----------|------------|
|
||||
| Overview to Focus | 600ms | Camera drifts to target, other regions fade to 30% |
|
||||
| Focus to Detail | 500ms | Node slides forward, expands, connections highlight |
|
||||
| Detail to Overview | 600ms | Panel collapses, node retreats, full topology visible |
|
||||
| Zone Switch | 500ms | Current slides out laterally, new slides in |
|
||||
| Window to Immersive | 1000ms | Borders dissolve, nodes expand to full spatial positions |
|
||||
|
||||
### Comfort Measures
|
||||
|
||||
- No camera-initiated movement without user action
|
||||
- Stable horizon (horizontal plane never tilts)
|
||||
- Primary interaction within 0.8-2.5m, +/-15 degrees of eye line
|
||||
- Rest prompt after 45 minutes (ambient lighting shift, not modal)
|
||||
- Peripheral vignette during fast movement
|
||||
- All frequently-used controls accessible with arms at sides (wrist/finger only)
|
||||
|
||||
---
|
||||
|
||||
## 10. Cross-Agent Synthesis
|
||||
|
||||
### Points of Agreement Across All 8 Agents
|
||||
|
||||
1. **2D-first, spatial-second.** Every agent independently arrived at this conclusion. Build a great web dashboard first, then progressively add spatial capabilities.
|
||||
|
||||
2. **Debugging is the killer use case.** The Product Researcher, UX Researcher, and XR Interface Architect all converged on this: spatial overlay of runtime traces on workflow structure is where 3D genuinely beats 2D.
|
||||
|
||||
3. **WebXR over VisionOS for initial reach.** Vision Pro's ~1M installed base cannot sustain a business. WebXR in the browser is the distribution unlock.
|
||||
|
||||
4. **The "war room" collaboration scenario.** Multiple agents highlighted collaborative incident response as the strongest spatial value proposition -- teams entering a shared 3D space to debug a failing pipeline together.
|
||||
|
||||
5. **Progressive disclosure is essential.** UX Research, Spatial UI, and Support all emphasized that spatial complexity must be revealed gradually, never dumped on a first-time user.
|
||||
|
||||
6. **Voice as the power-user accelerator.** Both the UX Researcher and XR Interface Architect identified voice commands as the "command line of spatial computing" -- essential for accessibility and expert efficiency.
|
||||
|
||||
### Key Tensions to Resolve
|
||||
|
||||
| Tension | Position A | Position B | Resolution Needed |
|
||||
|---------|-----------|-----------|-------------------|
|
||||
| **Pricing** | Growth Hacker: $29-59/user/mo | Trend Researcher: $99-249/user/mo | A/B test in beta |
|
||||
| **VisionOS priority** | Architecture: Phase 3 (Week 13+) | Spatial UI: Full spec ready | Build WebXR first, VisionOS when validated |
|
||||
| **Orchestration language** | Architecture: Rust | Project Plan: Not specified | Rust is correct for performance-critical DAG execution |
|
||||
| **MVP scope** | Architecture: 2D only in Phase 1 | Brand: Lead with spatial | 2D first, but ensure spatial is in every demo |
|
||||
| **Community platform** | Support: Discord-first | Marketing: Discord + open-source | Both -- Discord for community, GitHub for developer engagement |
|
||||
|
||||
### What This Exercise Demonstrates
|
||||
|
||||
This discovery document was produced by 8 specialized agents running in parallel, each bringing deep domain expertise to a shared objective. The agents independently arrived at consistent conclusions while surfacing domain-specific insights that would be difficult for any single generalist to produce:
|
||||
|
||||
- The **Product Trend Researcher** found the sobering Vision Pro sales data that reframed the entire strategy
|
||||
- The **Backend Architect** designed a Rust orchestration engine that no marketing-focused team would have considered
|
||||
- The **Brand Guardian** created a category ("SpatialAIOps") rather than competing in an existing one
|
||||
- The **UX Researcher** identified that spatial computing creates friction for parameter tasks -- a counterintuitive finding
|
||||
- The **XR Interface Architect** designed the "data flows toward you" topology that maps to natural spatial cognition
|
||||
- The **Project Shepherd** identified the three critical bottleneck roles that could derail the entire timeline
|
||||
- The **Growth Hacker** designed viral loops specific to spatial computing's inherent shareability
|
||||
- The **Support Responder** turned the product's own AI capabilities into a support differentiator
|
||||
|
||||
The result is a comprehensive, cross-functional product plan that could serve as the basis for actual development -- produced in a single session by an agency of AI agents working in concert.
|
||||
|
|
@ -1,119 +0,0 @@
|
|||
# Multi-Agent Workflow: Landing Page Sprint
|
||||
|
||||
> Ship a conversion-optimized landing page in one day using 4 agents.
|
||||
|
||||
## The Scenario
|
||||
|
||||
You need a landing page for a new product launch. It needs to look great, convert visitors, and be live by end of day.
|
||||
|
||||
## Agent Team
|
||||
|
||||
| Agent | Role in this workflow |
|
||||
|-------|---------------------|
|
||||
| Content Creator | Write the copy |
|
||||
| UI Designer | Design the layout and component specs |
|
||||
| Frontend Developer | Build it |
|
||||
| Growth Hacker | Optimize for conversion |
|
||||
|
||||
## The Workflow
|
||||
|
||||
### Morning: Copy + Design (parallel)
|
||||
|
||||
**Step 1a — Activate Content Creator**
|
||||
|
||||
```
|
||||
Activate Content Creator.
|
||||
|
||||
Write landing page copy for "FlowSync" — an API integration platform
|
||||
that connects any two SaaS tools in under 5 minutes.
|
||||
|
||||
Target audience: developers and technical PMs at mid-size companies.
|
||||
Tone: confident, concise, slightly playful.
|
||||
|
||||
Sections needed:
|
||||
1. Hero (headline + subheadline + CTA)
|
||||
2. Problem statement (3 pain points)
|
||||
3. How it works (3 steps)
|
||||
4. Social proof (placeholder testimonial format)
|
||||
5. Pricing (3 tiers: Free, Pro, Enterprise)
|
||||
6. Final CTA
|
||||
|
||||
Keep it scannable. No fluff.
|
||||
```
|
||||
|
||||
**Step 1b — Activate UI Designer (in parallel)**
|
||||
|
||||
```
|
||||
Activate UI Designer.
|
||||
|
||||
Design specs for a SaaS landing page. Product: FlowSync (API integration platform).
|
||||
Style: clean, modern, dark mode option. Think Linear or Vercel aesthetic.
|
||||
|
||||
Deliver:
|
||||
1. Layout wireframe (section order + spacing)
|
||||
2. Color palette (primary, secondary, accent, background)
|
||||
3. Typography (font pairing, heading sizes, body size)
|
||||
4. Component specs: hero section, feature cards, pricing table, CTA buttons
|
||||
5. Responsive breakpoints (mobile, tablet, desktop)
|
||||
```
|
||||
|
||||
### Midday: Build
|
||||
|
||||
**Step 2 — Activate Frontend Developer**
|
||||
|
||||
```
|
||||
Activate Frontend Developer.
|
||||
|
||||
Build a landing page from these specs:
|
||||
|
||||
Copy: [paste Content Creator output]
|
||||
Design: [paste UI Designer output]
|
||||
|
||||
Stack: HTML, Tailwind CSS, minimal vanilla JS (no framework needed).
|
||||
Requirements:
|
||||
- Responsive (mobile-first)
|
||||
- Fast (no heavy assets, system fonts OK)
|
||||
- Accessible (proper headings, alt text, focus states)
|
||||
- Include a working email signup form (action URL: /api/subscribe)
|
||||
|
||||
Deliver a single index.html file ready to deploy.
|
||||
```
|
||||
|
||||
### Afternoon: Optimize
|
||||
|
||||
**Step 3 — Activate Growth Hacker**
|
||||
|
||||
```
|
||||
Activate Growth Hacker.
|
||||
|
||||
Review this landing page for conversion optimization:
|
||||
|
||||
[paste the HTML or describe the current page]
|
||||
|
||||
Evaluate:
|
||||
1. Is the CTA above the fold?
|
||||
2. Is the value proposition clear in under 5 seconds?
|
||||
3. Any friction in the signup flow?
|
||||
4. What A/B tests would you run first?
|
||||
5. SEO basics: meta tags, OG tags, structured data
|
||||
|
||||
Give me specific changes, not general advice.
|
||||
```
|
||||
|
||||
## Timeline
|
||||
|
||||
| Time | Activity | Agent |
|
||||
|------|----------|-------|
|
||||
| 9:00 | Copy + design kick off (parallel) | Content Creator + UI Designer |
|
||||
| 11:00 | Build starts | Frontend Developer |
|
||||
| 14:00 | First version ready | — |
|
||||
| 14:30 | Conversion review | Growth Hacker |
|
||||
| 15:30 | Apply feedback | Frontend Developer |
|
||||
| 16:30 | Ship | Deploy to Vercel/Netlify |
|
||||
|
||||
## Key Patterns
|
||||
|
||||
1. **Parallel kickoff**: Copy and design happen at the same time since they're independent
|
||||
2. **Merge point**: Frontend Developer needs both outputs before starting
|
||||
3. **Feedback loop**: Growth Hacker reviews, then Frontend Developer applies changes
|
||||
4. **Time-boxed**: Each step has a clear timebox to prevent scope creep
|
||||
|
|
@ -1,155 +0,0 @@
|
|||
# Multi-Agent Workflow: Startup MVP
|
||||
|
||||
> A step-by-step example of how to coordinate multiple agents to go from idea to shipped MVP.
|
||||
|
||||
## The Scenario
|
||||
|
||||
You're building a SaaS MVP — a team retrospective tool for remote teams. You have 4 weeks to ship a working product with user signups, a core feature, and a landing page.
|
||||
|
||||
## Agent Team
|
||||
|
||||
| Agent | Role in this workflow |
|
||||
|-------|---------------------|
|
||||
| Sprint Prioritizer | Break the project into weekly sprints |
|
||||
| UX Researcher | Validate the idea with quick user interviews |
|
||||
| Backend Architect | Design the API and data model |
|
||||
| Frontend Developer | Build the React app |
|
||||
| Rapid Prototyper | Get the first version running fast |
|
||||
| Growth Hacker | Plan launch strategy while building |
|
||||
| Reality Checker | Gate each milestone before moving on |
|
||||
|
||||
## The Workflow
|
||||
|
||||
### Week 1: Discovery + Architecture
|
||||
|
||||
**Step 1 — Activate Sprint Prioritizer**
|
||||
|
||||
```
|
||||
Activate Sprint Prioritizer.
|
||||
|
||||
Project: RetroBoard — a real-time team retrospective tool for remote teams.
|
||||
Timeline: 4 weeks to MVP launch.
|
||||
Core features: user auth, create retro boards, add cards, vote, action items.
|
||||
Constraints: solo developer, React + Node.js stack, deploy to Vercel + Railway.
|
||||
|
||||
Break this into 4 weekly sprints with clear deliverables and acceptance criteria.
|
||||
```
|
||||
|
||||
**Step 2 — Activate UX Researcher (in parallel)**
|
||||
|
||||
```
|
||||
Activate UX Researcher.
|
||||
|
||||
I'm building a team retrospective tool for remote teams (5-20 people).
|
||||
Competitors: EasyRetro, Retrium, Parabol.
|
||||
|
||||
Run a quick competitive analysis and identify:
|
||||
1. What features are table stakes
|
||||
2. Where competitors fall short
|
||||
3. One differentiator we could own
|
||||
|
||||
Output a 1-page research brief.
|
||||
```
|
||||
|
||||
**Step 3 — Hand off to Backend Architect**
|
||||
|
||||
```
|
||||
Activate Backend Architect.
|
||||
|
||||
Here's our sprint plan: [paste Sprint Prioritizer output]
|
||||
Here's our research brief: [paste UX Researcher output]
|
||||
|
||||
Design the API and database schema for RetroBoard.
|
||||
Stack: Node.js, Express, PostgreSQL, Socket.io for real-time.
|
||||
|
||||
Deliver:
|
||||
1. Database schema (SQL)
|
||||
2. REST API endpoints list
|
||||
3. WebSocket events for real-time board updates
|
||||
4. Auth strategy recommendation
|
||||
```
|
||||
|
||||
### Week 2: Build Core Features
|
||||
|
||||
**Step 4 — Activate Frontend Developer + Rapid Prototyper**
|
||||
|
||||
```
|
||||
Activate Frontend Developer.
|
||||
|
||||
Here's the API spec: [paste Backend Architect output]
|
||||
|
||||
Build the RetroBoard React app:
|
||||
- Stack: React, TypeScript, Tailwind, Socket.io-client
|
||||
- Pages: Login, Dashboard, Board view
|
||||
- Components: RetroCard, VoteButton, ActionItem, BoardColumn
|
||||
|
||||
Start with the Board view — it's the core experience.
|
||||
Focus on real-time: when one user adds a card, everyone sees it.
|
||||
```
|
||||
|
||||
**Step 5 — Reality Check at midpoint**
|
||||
|
||||
```
|
||||
Activate Reality Checker.
|
||||
|
||||
We're at week 2 of a 4-week MVP build for RetroBoard.
|
||||
|
||||
Here's what we have so far:
|
||||
- Database schema: [paste]
|
||||
- API endpoints: [paste]
|
||||
- Frontend components: [paste]
|
||||
|
||||
Evaluate:
|
||||
1. Can we realistically ship in 2 more weeks?
|
||||
2. What should we cut to make the deadline?
|
||||
3. Any technical debt that will bite us at launch?
|
||||
```
|
||||
|
||||
### Week 3: Polish + Landing Page
|
||||
|
||||
**Step 6 — Frontend Developer continues, Growth Hacker starts**
|
||||
|
||||
```
|
||||
Activate Growth Hacker.
|
||||
|
||||
Product: RetroBoard — team retrospective tool, launching in 1 week.
|
||||
Target: Engineering managers and scrum masters at remote-first companies.
|
||||
Budget: $0 (organic launch only).
|
||||
|
||||
Create a launch plan:
|
||||
1. Landing page copy (hero, features, CTA)
|
||||
2. Launch channels (Product Hunt, Reddit, Hacker News, Twitter)
|
||||
3. Day-by-day launch sequence
|
||||
4. Metrics to track in week 1
|
||||
```
|
||||
|
||||
### Week 4: Launch
|
||||
|
||||
**Step 7 — Final Reality Check**
|
||||
|
||||
```
|
||||
Activate Reality Checker.
|
||||
|
||||
RetroBoard is ready to launch. Evaluate production readiness:
|
||||
|
||||
- Live URL: [url]
|
||||
- Test accounts created: yes
|
||||
- Error monitoring: Sentry configured
|
||||
- Database backups: daily automated
|
||||
|
||||
Run through the launch checklist and give a GO / NO-GO decision.
|
||||
Require evidence for each criterion.
|
||||
```
|
||||
|
||||
## Key Patterns
|
||||
|
||||
1. **Sequential handoffs**: Each agent's output becomes the next agent's input
|
||||
2. **Parallel work**: UX Researcher and Sprint Prioritizer can run simultaneously in Week 1
|
||||
3. **Quality gates**: Reality Checker at midpoint and before launch prevents shipping broken code
|
||||
4. **Context passing**: Always paste previous agent outputs into the next prompt — agents don't share memory
|
||||
|
||||
## Tips
|
||||
|
||||
- Copy-paste agent outputs between steps — don't summarize, use the full output
|
||||
- If a Reality Checker flags an issue, loop back to the relevant specialist to fix it
|
||||
- Keep the Orchestrator agent in mind for automating this flow once you're comfortable with the manual version
|
||||
|
|
@ -1,238 +0,0 @@
|
|||
# Multi-Agent Workflow: Startup MVP with Persistent Memory
|
||||
|
||||
> The same startup MVP workflow from [workflow-startup-mvp.md](workflow-startup-mvp.md), but with an MCP memory server handling state between agents. No more copy-paste handoffs.
|
||||
|
||||
## The Problem with Manual Handoffs
|
||||
|
||||
In the standard workflow, every agent-to-agent transition looks like this:
|
||||
|
||||
```
|
||||
Activate Backend Architect.
|
||||
|
||||
Here's our sprint plan: [paste Sprint Prioritizer output]
|
||||
Here's our research brief: [paste UX Researcher output]
|
||||
|
||||
Design the API and database schema for RetroBoard.
|
||||
...
|
||||
```
|
||||
|
||||
You are the glue. You copy-paste outputs between agents, keep track of what's been done, and hope you don't lose context along the way. It works for small projects, but it falls apart when:
|
||||
|
||||
- Sessions time out and you lose the output
|
||||
- Multiple agents need the same context
|
||||
- QA fails and you need to rewind to a previous state
|
||||
- The project spans days or weeks across many sessions
|
||||
|
||||
## The Fix
|
||||
|
||||
With an MCP memory server installed, agents store their deliverables in memory and retrieve what they need automatically. Handoffs become:
|
||||
|
||||
```
|
||||
Activate Backend Architect.
|
||||
|
||||
Project: RetroBoard. Recall previous context for this project
|
||||
and design the API and database schema.
|
||||
```
|
||||
|
||||
The agent searches memory for RetroBoard context, finds the sprint plan and research brief stored by previous agents, and picks up from there.
|
||||
|
||||
## Setup
|
||||
|
||||
Install any MCP-compatible memory server that supports `remember`, `recall`, and `rollback` operations. See [integrations/mcp-memory/README.md](../integrations/mcp-memory/README.md) for setup.
|
||||
|
||||
## The Scenario
|
||||
|
||||
Same as the standard workflow: a SaaS team retrospective tool (RetroBoard), 4 weeks to MVP, solo developer.
|
||||
|
||||
## Agent Team
|
||||
|
||||
| Agent | Role in this workflow |
|
||||
|-------|---------------------|
|
||||
| Sprint Prioritizer | Break the project into weekly sprints |
|
||||
| UX Researcher | Validate the idea with quick user interviews |
|
||||
| Backend Architect | Design the API and data model |
|
||||
| Frontend Developer | Build the React app |
|
||||
| Rapid Prototyper | Get the first version running fast |
|
||||
| Growth Hacker | Plan launch strategy while building |
|
||||
| Reality Checker | Gate each milestone before moving on |
|
||||
|
||||
Each agent has a Memory Integration section in their prompt (see [integrations/mcp-memory/README.md](../integrations/mcp-memory/README.md) for how to add it).
|
||||
|
||||
## The Workflow
|
||||
|
||||
### Week 1: Discovery + Architecture
|
||||
|
||||
**Step 1 — Activate Sprint Prioritizer**
|
||||
|
||||
```
|
||||
Activate Sprint Prioritizer.
|
||||
|
||||
Project: RetroBoard — a real-time team retrospective tool for remote teams.
|
||||
Timeline: 4 weeks to MVP launch.
|
||||
Core features: user auth, create retro boards, add cards, vote, action items.
|
||||
Constraints: solo developer, React + Node.js stack, deploy to Vercel + Railway.
|
||||
|
||||
Break this into 4 weekly sprints with clear deliverables and acceptance criteria.
|
||||
Remember your sprint plan tagged for this project when done.
|
||||
```
|
||||
|
||||
The Sprint Prioritizer produces the sprint plan and stores it in memory tagged with `sprint-prioritizer`, `retroboard`, and `sprint-plan`.
|
||||
|
||||
**Step 2 — Activate UX Researcher (in parallel)**
|
||||
|
||||
```
|
||||
Activate UX Researcher.
|
||||
|
||||
I'm building a team retrospective tool for remote teams (5-20 people).
|
||||
Competitors: EasyRetro, Retrium, Parabol.
|
||||
|
||||
Run a quick competitive analysis and identify:
|
||||
1. What features are table stakes
|
||||
2. Where competitors fall short
|
||||
3. One differentiator we could own
|
||||
|
||||
Output a 1-page research brief. Remember it tagged for this project when done.
|
||||
```
|
||||
|
||||
The UX Researcher stores the research brief tagged with `ux-researcher`, `retroboard`, and `research-brief`.
|
||||
|
||||
**Step 3 — Hand off to Backend Architect**
|
||||
|
||||
```
|
||||
Activate Backend Architect.
|
||||
|
||||
Project: RetroBoard. Recall the sprint plan and research brief from previous agents.
|
||||
Stack: Node.js, Express, PostgreSQL, Socket.io for real-time.
|
||||
|
||||
Design:
|
||||
1. Database schema (SQL)
|
||||
2. REST API endpoints list
|
||||
3. WebSocket events for real-time board updates
|
||||
4. Auth strategy recommendation
|
||||
|
||||
Remember each deliverable tagged for this project and for the frontend-developer.
|
||||
```
|
||||
|
||||
The Backend Architect recalls the sprint plan and research brief from memory automatically. No copy-paste. It stores its schema and API spec tagged with `backend-architect`, `retroboard`, `api-spec`, and `frontend-developer`.
|
||||
|
||||
### Week 2: Build Core Features
|
||||
|
||||
**Step 4 — Activate Frontend Developer + Rapid Prototyper**
|
||||
|
||||
```
|
||||
Activate Frontend Developer.
|
||||
|
||||
Project: RetroBoard. Recall the API spec and schema from the Backend Architect.
|
||||
|
||||
Build the RetroBoard React app:
|
||||
- Stack: React, TypeScript, Tailwind, Socket.io-client
|
||||
- Pages: Login, Dashboard, Board view
|
||||
- Components: RetroCard, VoteButton, ActionItem, BoardColumn
|
||||
|
||||
Start with the Board view — it's the core experience.
|
||||
Focus on real-time: when one user adds a card, everyone sees it.
|
||||
Remember your progress tagged for this project.
|
||||
```
|
||||
|
||||
The Frontend Developer pulls the API spec from memory and builds against it.
|
||||
|
||||
**Step 5 — Reality Check at midpoint**
|
||||
|
||||
```
|
||||
Activate Reality Checker.
|
||||
|
||||
Project: RetroBoard. We're at week 2 of a 4-week MVP build.
|
||||
|
||||
Recall all deliverables from previous agents for this project.
|
||||
|
||||
Evaluate:
|
||||
1. Can we realistically ship in 2 more weeks?
|
||||
2. What should we cut to make the deadline?
|
||||
3. Any technical debt that will bite us at launch?
|
||||
|
||||
Remember your verdict tagged for this project.
|
||||
```
|
||||
|
||||
The Reality Checker has full visibility into everything produced so far — the sprint plan, research brief, schema, API spec, and frontend progress — without you having to collect and paste it all.
|
||||
|
||||
### Week 3: Polish + Landing Page
|
||||
|
||||
**Step 6 — Frontend Developer continues, Growth Hacker starts**
|
||||
|
||||
```
|
||||
Activate Growth Hacker.
|
||||
|
||||
Product: RetroBoard — team retrospective tool, launching in 1 week.
|
||||
Target: Engineering managers and scrum masters at remote-first companies.
|
||||
Budget: $0 (organic launch only).
|
||||
|
||||
Recall the project context and Reality Checker's verdict.
|
||||
|
||||
Create a launch plan:
|
||||
1. Landing page copy (hero, features, CTA)
|
||||
2. Launch channels (Product Hunt, Reddit, Hacker News, Twitter)
|
||||
3. Day-by-day launch sequence
|
||||
4. Metrics to track in week 1
|
||||
|
||||
Remember the launch plan tagged for this project.
|
||||
```
|
||||
|
||||
### Week 4: Launch
|
||||
|
||||
**Step 7 — Final Reality Check**
|
||||
|
||||
```
|
||||
Activate Reality Checker.
|
||||
|
||||
Project: RetroBoard, ready to launch.
|
||||
|
||||
Recall all project context, previous verdicts, and the launch plan.
|
||||
|
||||
Evaluate production readiness:
|
||||
- Live URL: [url]
|
||||
- Test accounts created: yes
|
||||
- Error monitoring: Sentry configured
|
||||
- Database backups: daily automated
|
||||
|
||||
Run through the launch checklist and give a GO / NO-GO decision.
|
||||
Require evidence for each criterion.
|
||||
```
|
||||
|
||||
### When QA Fails: Rollback
|
||||
|
||||
In the standard workflow, when the Reality Checker rejects a deliverable, you go back to the responsible agent and try to explain what went wrong. With memory, the recovery loop is tighter:
|
||||
|
||||
```
|
||||
Activate Backend Architect.
|
||||
|
||||
Project: RetroBoard. The Reality Checker flagged issues with the API design.
|
||||
Recall the Reality Checker's feedback and your previous API spec.
|
||||
Roll back to your last known-good schema and address the specific issues raised.
|
||||
Remember the updated deliverables when done.
|
||||
```
|
||||
|
||||
The Backend Architect can see exactly what the Reality Checker flagged, recall its own previous work, roll back to a checkpoint, and produce a fix — all without you manually tracking versions.
|
||||
|
||||
## Before and After
|
||||
|
||||
| Aspect | Standard Workflow | With Memory |
|
||||
|--------|------------------|-------------|
|
||||
| **Handoffs** | Copy-paste full output between agents | Agents recall what they need automatically |
|
||||
| **Context loss** | Session timeouts lose everything | Memories persist across sessions |
|
||||
| **Multi-agent context** | Manually compile context from N agents | Agent searches memory for project tag |
|
||||
| **QA failure recovery** | Manually describe what went wrong | Agent recalls feedback + rolls back |
|
||||
| **Multi-day projects** | Re-establish context every session | Agent picks up where it left off |
|
||||
| **Setup required** | None | Install an MCP memory server |
|
||||
|
||||
## Key Patterns
|
||||
|
||||
1. **Tag everything with the project name**: This is what makes recall work. Every memory gets tagged with `retroboard` (or whatever your project is).
|
||||
2. **Tag deliverables for the receiving agent**: When the Backend Architect finishes an API spec, it tags the memory with `frontend-developer` so the Frontend Developer finds it on recall.
|
||||
3. **Reality Checker gets full visibility**: Because all agents store their work in memory, the Reality Checker can recall everything for the project without you compiling it.
|
||||
4. **Rollback replaces manual undo**: When something fails, roll back to the last checkpoint instead of trying to figure out what changed.
|
||||
|
||||
## Tips
|
||||
|
||||
- You don't need to modify every agent at once. Start by adding Memory Integration to the agents you use most and expand from there.
|
||||
- The memory instructions are prompts, not code. The LLM interprets them and calls the MCP tools as needed. You can adjust the wording to match your style.
|
||||
- Any MCP-compatible memory server that supports `remember`, `recall`, `rollback`, and `search` tools will work with this workflow.
|
||||
|
|
@ -1,19 +0,0 @@
|
|||
{
|
||||
"name": "code",
|
||||
"description": "Core development hooks, auto-approve workflow, and cryptocurrency research data collection skills",
|
||||
"version": "0.2.0",
|
||||
"author": {
|
||||
"name": "Lethean",
|
||||
"email": "hello@host.uk.com"
|
||||
},
|
||||
"homepage": "https://forge.lthn.ai/core/agent",
|
||||
"repository": "https://forge.lthn.ai/core/agent.git",
|
||||
"license": "EUPL-1.2",
|
||||
"keywords": [
|
||||
"hooks",
|
||||
"auto-approve",
|
||||
"data-collection",
|
||||
"cryptocurrency",
|
||||
"archive"
|
||||
]
|
||||
}
|
||||
|
|
@ -1,36 +0,0 @@
|
|||
---
|
||||
name: remember
|
||||
description: Save a fact or decision to context for persistence across compacts
|
||||
args: <fact to remember>
|
||||
---
|
||||
|
||||
# Remember Context
|
||||
|
||||
Save the provided fact to `~/.claude/sessions/context.json`.
|
||||
|
||||
## Usage
|
||||
|
||||
```
|
||||
/core:remember Use Action pattern not Service
|
||||
/core:remember User prefers UK English
|
||||
/core:remember RFC: minimal state in pre-compact hook
|
||||
```
|
||||
|
||||
## Action
|
||||
|
||||
Run this command to save the fact:
|
||||
|
||||
```bash
|
||||
~/.claude/plugins/cache/core/scripts/capture-context.sh "<fact>" "user"
|
||||
```
|
||||
|
||||
Or if running from the plugin directory:
|
||||
|
||||
```bash
|
||||
"${CLAUDE_PLUGIN_ROOT}/scripts/capture-context.sh" "<fact>" "user"
|
||||
```
|
||||
|
||||
The fact will be:
|
||||
- Stored in context.json (max 20 items)
|
||||
- Included in pre-compact snapshots
|
||||
- Auto-cleared after 3 hours of inactivity
|
||||
|
|
@ -1,93 +0,0 @@
|
|||
{
|
||||
"$schema": "https://claude.ai/schemas/hooks.json",
|
||||
"hooks": {
|
||||
"PreToolUse": [
|
||||
{
|
||||
"matcher": "Bash",
|
||||
"hooks": [
|
||||
{
|
||||
"type": "command",
|
||||
"command": "${CLAUDE_PLUGIN_ROOT}/hooks/prefer-core.sh"
|
||||
}
|
||||
],
|
||||
"description": "Block destructive commands (rm -rf, sed -i, xargs rm) and enforce core CLI"
|
||||
},
|
||||
{
|
||||
"matcher": "Write",
|
||||
"hooks": [
|
||||
{
|
||||
"type": "command",
|
||||
"command": "${CLAUDE_PLUGIN_ROOT}/scripts/block-docs.sh"
|
||||
}
|
||||
],
|
||||
"description": "Block random .md file creation"
|
||||
}
|
||||
],
|
||||
"PostToolUse": [
|
||||
{
|
||||
"matcher": "tool == \"Edit\" && tool_input.file_path matches \"\\.go$\"",
|
||||
"hooks": [
|
||||
{
|
||||
"type": "command",
|
||||
"command": "${CLAUDE_PLUGIN_ROOT}/scripts/go-format.sh"
|
||||
}
|
||||
],
|
||||
"description": "Auto-format Go files after edits"
|
||||
},
|
||||
{
|
||||
"matcher": "tool == \"Edit\" && tool_input.file_path matches \"\\.php$\"",
|
||||
"hooks": [
|
||||
{
|
||||
"type": "command",
|
||||
"command": "${CLAUDE_PLUGIN_ROOT}/scripts/php-format.sh"
|
||||
}
|
||||
],
|
||||
"description": "Auto-format PHP files after edits"
|
||||
},
|
||||
{
|
||||
"matcher": "tool == \"Edit\"",
|
||||
"hooks": [
|
||||
{
|
||||
"type": "command",
|
||||
"command": "${CLAUDE_PLUGIN_ROOT}/scripts/check-debug.sh"
|
||||
}
|
||||
],
|
||||
"description": "Warn about debug statements (dd, dump, fmt.Println)"
|
||||
},
|
||||
{
|
||||
"matcher": "tool == \"Bash\" && tool_input.command matches \"^git commit\"",
|
||||
"hooks": [
|
||||
{
|
||||
"type": "command",
|
||||
"command": "${CLAUDE_PLUGIN_ROOT}/scripts/post-commit-check.sh"
|
||||
}
|
||||
],
|
||||
"description": "Warn about uncommitted work after git commit"
|
||||
}
|
||||
],
|
||||
"PreCompact": [
|
||||
{
|
||||
"matcher": "*",
|
||||
"hooks": [
|
||||
{
|
||||
"type": "command",
|
||||
"command": "${CLAUDE_PLUGIN_ROOT}/scripts/pre-compact.sh"
|
||||
}
|
||||
],
|
||||
"description": "Save state before auto-compact to prevent amnesia"
|
||||
}
|
||||
],
|
||||
"SessionStart": [
|
||||
{
|
||||
"matcher": "*",
|
||||
"hooks": [
|
||||
{
|
||||
"type": "command",
|
||||
"command": "${CLAUDE_PLUGIN_ROOT}/scripts/session-start.sh"
|
||||
}
|
||||
],
|
||||
"description": "Restore recent session context on startup"
|
||||
}
|
||||
]
|
||||
}
|
||||
}
|
||||
|
|
@ -1,108 +0,0 @@
|
|||
#!/bin/bash
|
||||
# PreToolUse hook: Block dangerous commands, enforce core CLI
|
||||
#
|
||||
# BLOCKS:
|
||||
# - Raw go commands (use core go *)
|
||||
# - Destructive patterns (sed -i, xargs rm, etc.)
|
||||
# - Mass file operations (rm -rf, mv/cp with wildcards)
|
||||
#
|
||||
# This prevents "efficient shortcuts" that nuke codebases
|
||||
|
||||
read -r input
|
||||
full_command=$(echo "$input" | jq -r '.tool_input.command // empty')
|
||||
|
||||
# Strip heredoc content — only check the actual command, not embedded text
|
||||
# This prevents false positives from code/docs inside heredocs
|
||||
command=$(echo "$full_command" | sed -n '1p')
|
||||
if echo "$command" | grep -qE "<<\s*['\"]?[A-Z_]+"; then
|
||||
# First line has heredoc marker — only check the command portion before <<
|
||||
command=$(echo "$command" | sed -E 's/\s*<<.*$//')
|
||||
fi
|
||||
|
||||
# For multi-line commands joined with && or ;, check each segment
|
||||
# But still only the first line (not heredoc body)
|
||||
|
||||
# === HARD BLOCKS - Never allow these ===
|
||||
|
||||
# Block rm -rf, rm -r (except for known safe paths like node_modules, vendor, .cache)
|
||||
# Allow git rm -r (safe — git tracks everything, easily reversible)
|
||||
if echo "$command" | grep -qE 'rm\s+(-[a-zA-Z]*r[a-zA-Z]*|-[a-zA-Z]*f[a-zA-Z]*r|--recursive)'; then
|
||||
# git rm -r is safe — everything is tracked and recoverable
|
||||
if echo "$command" | grep -qE 'git\s+rm\s'; then
|
||||
: # allow git rm through
|
||||
# Allow only specific safe directories for raw rm
|
||||
elif ! echo "$command" | grep -qE 'rm\s+(-rf|-r)\s+(node_modules|vendor|\.cache|dist|build|__pycache__|\.pytest_cache|/tmp/)'; then
|
||||
echo '{"decision": "block", "message": "BLOCKED: Recursive delete is not allowed. Delete files individually or ask the user to run this command."}'
|
||||
exit 0
|
||||
fi
|
||||
fi
|
||||
|
||||
# Block mv/cp with dangerous wildcards (e.g. `cp * /tmp`, `mv ./* /dest`)
|
||||
# Allow specific file copies that happen to use glob in a for loop or path
|
||||
if echo "$command" | grep -qE '(mv|cp)\s+(\.\/)?\*\s'; then
|
||||
echo '{"decision": "block", "message": "BLOCKED: Mass file move/copy with bare wildcards is not allowed. Copy files individually."}'
|
||||
exit 0
|
||||
fi
|
||||
|
||||
# Block xargs with rm, mv, cp (mass operations)
|
||||
if echo "$command" | grep -qE 'xargs\s+.*(rm|mv|cp)'; then
|
||||
echo '{"decision": "block", "message": "BLOCKED: xargs with file operations is not allowed. Too risky for mass changes."}'
|
||||
exit 0
|
||||
fi
|
||||
|
||||
# Block find -exec with rm, mv, cp
|
||||
if echo "$command" | grep -qE 'find\s+.*-exec\s+.*(rm|mv|cp)'; then
|
||||
echo '{"decision": "block", "message": "BLOCKED: find -exec with file operations is not allowed. Too risky for mass changes."}'
|
||||
exit 0
|
||||
fi
|
||||
|
||||
# Block sed -i on LOCAL files only (allow on remote via ssh/docker exec)
|
||||
if echo "$command" | grep -qE '^sed\s+(-[a-zA-Z]*i|--in-place)'; then
|
||||
echo '{"decision": "block", "message": "BLOCKED: sed -i (in-place edit) on local files. Use the Edit tool."}'
|
||||
exit 0
|
||||
fi
|
||||
|
||||
# Block grep -l piped to destructive commands only (not head, wc, etc.)
|
||||
if echo "$command" | grep -qE 'grep\s+.*-l.*\|\s*(xargs|sed|rm|mv)'; then
|
||||
echo '{"decision": "block", "message": "BLOCKED: grep -l piped to destructive commands. Too risky."}'
|
||||
exit 0
|
||||
fi
|
||||
|
||||
# Block perl -i on local files
|
||||
if echo "$command" | grep -qE '^perl\s+-[a-zA-Z]*i'; then
|
||||
echo '{"decision": "block", "message": "BLOCKED: In-place file editing with perl. Use the Edit tool."}'
|
||||
exit 0
|
||||
fi
|
||||
|
||||
# === REQUIRE CORE CLI ===
|
||||
|
||||
# Suggest core CLI for common go commands, but don't block
|
||||
# go work sync, go mod edit, go get, go install, go list etc. have no core wrapper
|
||||
case "$command" in
|
||||
"go test"*|"go build"*|"go fmt"*|"go vet"*)
|
||||
echo '{"decision": "block", "message": "Use `core go test`, `core build`, `core go fmt --fix`, `core go vet`. Raw go commands bypass quality checks."}'
|
||||
exit 0
|
||||
;;
|
||||
esac
|
||||
# Allow all other go commands (go mod tidy, go work sync, go get, go run, etc.)
|
||||
|
||||
# Block raw php commands
|
||||
case "$command" in
|
||||
"php artisan serve"*|"./vendor/bin/pest"*|"./vendor/bin/pint"*|"./vendor/bin/phpstan"*)
|
||||
echo '{"decision": "block", "message": "Use `core php dev`, `core php test`, `core php fmt`, `core php analyse`. Raw php commands are not allowed."}'
|
||||
exit 0
|
||||
;;
|
||||
"composer test"*|"composer lint"*)
|
||||
echo '{"decision": "block", "message": "Use `core php test` or `core php fmt`. Raw composer commands are not allowed."}'
|
||||
exit 0
|
||||
;;
|
||||
esac
|
||||
|
||||
# Block golangci-lint directly
|
||||
if echo "$command" | grep -qE '^golangci-lint'; then
|
||||
echo '{"decision": "block", "message": "Use `core go lint` instead of golangci-lint directly."}'
|
||||
exit 0
|
||||
fi
|
||||
|
||||
# === APPROVED ===
|
||||
echo '{"decision": "approve"}'
|
||||
|
|
@ -1,37 +0,0 @@
|
|||
#!/bin/bash
|
||||
# Block creation of random .md files - keeps docs consolidated
|
||||
|
||||
read -r input
|
||||
FILE_PATH=$(echo "$input" | jq -r '.tool_input.file_path // empty')
|
||||
|
||||
if [[ -n "$FILE_PATH" ]]; then
|
||||
# Allow known documentation files
|
||||
case "$FILE_PATH" in
|
||||
*README.md|*CLAUDE.md|*AGENTS.md|*CONTRIBUTING.md|*CHANGELOG.md|*LICENSE.md)
|
||||
echo "$input"
|
||||
exit 0
|
||||
;;
|
||||
# Allow docs/ directory
|
||||
*/docs/*.md|*/docs/**/*.md)
|
||||
echo "$input"
|
||||
exit 0
|
||||
;;
|
||||
# Allow Claude memory and plan files
|
||||
*/.claude/*.md|*/.claude/**/*.md)
|
||||
echo "$input"
|
||||
exit 0
|
||||
;;
|
||||
# Allow plugin development (commands, skills)
|
||||
*/commands/*.md|*/skills/*.md|*/skills/**/*.md)
|
||||
echo "$input"
|
||||
exit 0
|
||||
;;
|
||||
# Block other .md files
|
||||
*.md)
|
||||
echo '{"decision": "block", "message": "Use README.md or docs/ for documentation. Random .md files clutter the repo."}'
|
||||
exit 0
|
||||
;;
|
||||
esac
|
||||
fi
|
||||
|
||||
echo "$input"
|
||||
|
|
@ -1,44 +0,0 @@
|
|||
#!/bin/bash
|
||||
# Capture context facts from tool output or conversation
|
||||
# Called by PostToolUse hooks to extract actionable items
|
||||
#
|
||||
# Stores in ~/.claude/sessions/context.json as:
|
||||
# [{"fact": "...", "source": "core go qa", "ts": 1234567890}, ...]
|
||||
|
||||
CONTEXT_FILE="${HOME}/.claude/sessions/context.json"
|
||||
TIMESTAMP=$(date '+%s')
|
||||
THREE_HOURS=10800
|
||||
|
||||
mkdir -p "${HOME}/.claude/sessions"
|
||||
|
||||
# Initialize if missing or stale
|
||||
if [[ -f "$CONTEXT_FILE" ]]; then
|
||||
FIRST_TS=$(jq -r '.[0].ts // 0' "$CONTEXT_FILE" 2>/dev/null)
|
||||
NOW=$(date '+%s')
|
||||
AGE=$((NOW - FIRST_TS))
|
||||
if [[ $AGE -gt $THREE_HOURS ]]; then
|
||||
echo "[]" > "$CONTEXT_FILE"
|
||||
fi
|
||||
else
|
||||
echo "[]" > "$CONTEXT_FILE"
|
||||
fi
|
||||
|
||||
# Read input (fact and source passed as args or stdin)
|
||||
FACT="${1:-}"
|
||||
SOURCE="${2:-manual}"
|
||||
|
||||
if [[ -z "$FACT" ]]; then
|
||||
# Try reading from stdin
|
||||
read -r FACT
|
||||
fi
|
||||
|
||||
if [[ -n "$FACT" ]]; then
|
||||
# Append to context (keep last 20 items)
|
||||
jq --arg fact "$FACT" --arg source "$SOURCE" --argjson ts "$TIMESTAMP" \
|
||||
'. + [{"fact": $fact, "source": $source, "ts": $ts}] | .[-20:]' \
|
||||
"$CONTEXT_FILE" > "${CONTEXT_FILE}.tmp" && mv "${CONTEXT_FILE}.tmp" "$CONTEXT_FILE"
|
||||
|
||||
echo "[Context] Saved: $FACT" >&2
|
||||
fi
|
||||
|
||||
exit 0
|
||||
|
|
@ -1,34 +0,0 @@
|
|||
#!/bin/bash
|
||||
# Extract actionable items from core CLI output
|
||||
# Called PostToolUse on Bash commands that run core
|
||||
|
||||
read -r input
|
||||
COMMAND=$(echo "$input" | jq -r '.tool_input.command // empty')
|
||||
OUTPUT=$(echo "$input" | jq -r '.tool_output.output // empty')
|
||||
|
||||
CONTEXT_SCRIPT="$(dirname "$0")/capture-context.sh"
|
||||
|
||||
# Extract actionables from specific core commands
|
||||
case "$COMMAND" in
|
||||
"core go qa"*|"core go test"*|"core go lint"*)
|
||||
# Extract error/warning lines
|
||||
echo "$OUTPUT" | grep -E "^(ERROR|WARN|FAIL|---)" | head -5 | while read -r line; do
|
||||
"$CONTEXT_SCRIPT" "$line" "core go"
|
||||
done
|
||||
;;
|
||||
"core php test"*|"core php analyse"*)
|
||||
# Extract PHP errors
|
||||
echo "$OUTPUT" | grep -E "^(FAIL|Error|×)" | head -5 | while read -r line; do
|
||||
"$CONTEXT_SCRIPT" "$line" "core php"
|
||||
done
|
||||
;;
|
||||
"core build"*)
|
||||
# Extract build errors
|
||||
echo "$OUTPUT" | grep -E "^(error|cannot|undefined)" | head -5 | while read -r line; do
|
||||
"$CONTEXT_SCRIPT" "$line" "core build"
|
||||
done
|
||||
;;
|
||||
esac
|
||||
|
||||
# Pass through
|
||||
echo "$input"
|
||||
|
|
@ -1,51 +0,0 @@
|
|||
#!/bin/bash
|
||||
# Post-commit hook: Check for uncommitted work that might get lost
|
||||
#
|
||||
# After committing task-specific files, check if there's other work
|
||||
# in the repo that should be committed or stashed
|
||||
|
||||
read -r input
|
||||
COMMAND=$(echo "$input" | jq -r '.tool_input.command // empty')
|
||||
|
||||
# Only run after git commit
|
||||
if ! echo "$COMMAND" | grep -qE '^git commit'; then
|
||||
echo "$input"
|
||||
exit 0
|
||||
fi
|
||||
|
||||
# Check for remaining uncommitted changes
|
||||
UNSTAGED=$(git diff --name-only 2>/dev/null | wc -l | tr -d ' ')
|
||||
STAGED=$(git diff --cached --name-only 2>/dev/null | wc -l | tr -d ' ')
|
||||
UNTRACKED=$(git ls-files --others --exclude-standard 2>/dev/null | wc -l | tr -d ' ')
|
||||
|
||||
TOTAL=$((UNSTAGED + STAGED + UNTRACKED))
|
||||
|
||||
if [[ $TOTAL -gt 0 ]]; then
|
||||
echo "" >&2
|
||||
echo "━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━" >&2
|
||||
echo "[PostCommit] WARNING: Uncommitted work remains" >&2
|
||||
echo "━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━" >&2
|
||||
|
||||
if [[ $UNSTAGED -gt 0 ]]; then
|
||||
echo " Modified (unstaged): $UNSTAGED files" >&2
|
||||
git diff --name-only 2>/dev/null | head -5 | sed 's/^/ /' >&2
|
||||
[[ $UNSTAGED -gt 5 ]] && echo " ... and $((UNSTAGED - 5)) more" >&2
|
||||
fi
|
||||
|
||||
if [[ $STAGED -gt 0 ]]; then
|
||||
echo " Staged (not committed): $STAGED files" >&2
|
||||
git diff --cached --name-only 2>/dev/null | head -5 | sed 's/^/ /' >&2
|
||||
fi
|
||||
|
||||
if [[ $UNTRACKED -gt 0 ]]; then
|
||||
echo " Untracked: $UNTRACKED files" >&2
|
||||
git ls-files --others --exclude-standard 2>/dev/null | head -5 | sed 's/^/ /' >&2
|
||||
[[ $UNTRACKED -gt 5 ]] && echo " ... and $((UNTRACKED - 5)) more" >&2
|
||||
fi
|
||||
|
||||
echo "" >&2
|
||||
echo "Consider: commit these, stash them, or confirm they're intentionally left" >&2
|
||||
echo "━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━" >&2
|
||||
fi
|
||||
|
||||
echo "$input"
|
||||
|
|
@ -1,18 +0,0 @@
|
|||
#!/bin/bash
|
||||
# Log PR URL and provide review command after PR creation
|
||||
|
||||
read -r input
|
||||
COMMAND=$(echo "$input" | jq -r '.tool_input.command // empty')
|
||||
OUTPUT=$(echo "$input" | jq -r '.tool_output.output // empty')
|
||||
|
||||
if [[ "$COMMAND" == *"gh pr create"* ]]; then
|
||||
PR_URL=$(echo "$OUTPUT" | grep -oE 'https://github.com/[^/]+/[^/]+/pull/[0-9]+' | head -1)
|
||||
if [[ -n "$PR_URL" ]]; then
|
||||
REPO=$(echo "$PR_URL" | sed -E 's|https://github.com/([^/]+/[^/]+)/pull/[0-9]+|\1|')
|
||||
PR_NUM=$(echo "$PR_URL" | sed -E 's|.*/pull/([0-9]+)|\1|')
|
||||
echo "[Hook] PR created: $PR_URL" >&2
|
||||
echo "[Hook] To review: gh pr review $PR_NUM --repo $REPO" >&2
|
||||
fi
|
||||
fi
|
||||
|
||||
echo "$input"
|
||||
|
|
@ -1,118 +0,0 @@
|
|||
#!/bin/bash
|
||||
# Session start: Load OpenBrain context + recent scratchpad
|
||||
#
|
||||
# 1. Query OpenBrain for project-relevant memories
|
||||
# 2. Read local scratchpad if recent (<3h)
|
||||
# 3. Output to stdout → injected into Claude's context
|
||||
|
||||
BRAIN_URL="${CORE_BRAIN_URL:-https://api.lthn.sh}"
|
||||
BRAIN_KEY="${CORE_BRAIN_KEY:-}"
|
||||
BRAIN_KEY_FILE="${HOME}/.claude/brain.key"
|
||||
STATE_FILE="${HOME}/.claude/sessions/scratchpad.md"
|
||||
THREE_HOURS=10800
|
||||
|
||||
# Load API key from file if not in env
|
||||
if [[ -z "$BRAIN_KEY" && -f "$BRAIN_KEY_FILE" ]]; then
|
||||
BRAIN_KEY=$(cat "$BRAIN_KEY_FILE" 2>/dev/null | tr -d '[:space:]')
|
||||
fi
|
||||
|
||||
# --- OpenBrain Recall ---
|
||||
if [[ -n "$BRAIN_KEY" ]]; then
|
||||
# Detect project from CWD
|
||||
PROJECT=""
|
||||
CWD=$(pwd)
|
||||
case "$CWD" in
|
||||
*/core/go-*) PROJECT=$(basename "$CWD" | sed 's/^go-//') ;;
|
||||
*/core/php-*) PROJECT=$(basename "$CWD" | sed 's/^php-//') ;;
|
||||
*/core/*) PROJECT=$(basename "$CWD") ;;
|
||||
*/host-uk/*) PROJECT=$(basename "$CWD") ;;
|
||||
*/lthn/*) PROJECT=$(basename "$CWD") ;;
|
||||
*/snider/*) PROJECT=$(basename "$CWD") ;;
|
||||
esac
|
||||
|
||||
echo "[SessionStart] OpenBrain: querying memories..." >&2
|
||||
|
||||
# 1. Recent session summaries (what did we do recently?)
|
||||
RECENT=$(curl -s --max-time 5 "${BRAIN_URL}/v1/brain/recall" \
|
||||
-X POST \
|
||||
-H 'Content-Type: application/json' \
|
||||
-H 'Accept: application/json' \
|
||||
-H "Authorization: Bearer ${BRAIN_KEY}" \
|
||||
-d "{\"query\": \"session summary milestone recent work completed\", \"top_k\": 3, \"agent_id\": \"cladius\"}" 2>/dev/null)
|
||||
|
||||
# 2. Project-specific context (if we're in a project dir)
|
||||
PROJECT_CTX=""
|
||||
if [[ -n "$PROJECT" ]]; then
|
||||
PROJECT_CTX=$(curl -s --max-time 5 "${BRAIN_URL}/v1/brain/recall" \
|
||||
-X POST \
|
||||
-H 'Content-Type: application/json' \
|
||||
-H 'Accept: application/json' \
|
||||
-H "Authorization: Bearer ${BRAIN_KEY}" \
|
||||
-d "{\"query\": \"architecture decisions conventions for ${PROJECT}\", \"top_k\": 3, \"agent_id\": \"cladius\", \"project\": \"${PROJECT}\"}" 2>/dev/null)
|
||||
fi
|
||||
|
||||
# Output to stdout (injected into context)
|
||||
RECENT_COUNT=$(echo "$RECENT" | python3 -c "import json,sys; d=json.load(sys.stdin); print(len(d.get('memories',[])))" 2>/dev/null || echo "0")
|
||||
|
||||
if [[ "$RECENT_COUNT" -gt 0 ]]; then
|
||||
echo ""
|
||||
echo "## OpenBrain — Recent Activity"
|
||||
echo ""
|
||||
echo "$RECENT" | python3 -c "
|
||||
import json, sys
|
||||
data = json.load(sys.stdin)
|
||||
for m in data.get('memories', []):
|
||||
t = m.get('type', '?')
|
||||
p = m.get('project', '?')
|
||||
content = m.get('content', '')[:300]
|
||||
print(f'**[{t}]** ({p}): {content}')
|
||||
print()
|
||||
" 2>/dev/null
|
||||
fi
|
||||
|
||||
if [[ -n "$PROJECT" && -n "$PROJECT_CTX" ]]; then
|
||||
PROJECT_COUNT=$(echo "$PROJECT_CTX" | python3 -c "import json,sys; d=json.load(sys.stdin); print(len(d.get('memories',[])))" 2>/dev/null || echo "0")
|
||||
if [[ "$PROJECT_COUNT" -gt 0 ]]; then
|
||||
echo ""
|
||||
echo "## OpenBrain — ${PROJECT} Context"
|
||||
echo ""
|
||||
echo "$PROJECT_CTX" | python3 -c "
|
||||
import json, sys
|
||||
data = json.load(sys.stdin)
|
||||
for m in data.get('memories', []):
|
||||
t = m.get('type', '?')
|
||||
content = m.get('content', '')[:300]
|
||||
print(f'**[{t}]**: {content}')
|
||||
print()
|
||||
" 2>/dev/null
|
||||
fi
|
||||
fi
|
||||
|
||||
echo "[SessionStart] OpenBrain: ${RECENT_COUNT} recent + ${PROJECT_COUNT:-0} project memories loaded" >&2
|
||||
else
|
||||
echo "[SessionStart] OpenBrain: no API key (set CORE_BRAIN_KEY or create ~/.claude/brain.key)" >&2
|
||||
fi
|
||||
|
||||
# --- Local Scratchpad ---
|
||||
if [[ -f "$STATE_FILE" ]]; then
|
||||
FILE_TS=$(grep -E '^timestamp:' "$STATE_FILE" 2>/dev/null | cut -d' ' -f2)
|
||||
NOW=$(date '+%s')
|
||||
|
||||
if [[ -n "$FILE_TS" ]]; then
|
||||
AGE=$((NOW - FILE_TS))
|
||||
if [[ $AGE -lt $THREE_HOURS ]]; then
|
||||
echo "[SessionStart] Scratchpad: $(($AGE / 60)) min old" >&2
|
||||
echo ""
|
||||
echo "## Recent Scratchpad ($(($AGE / 60)) min ago)"
|
||||
echo ""
|
||||
cat "$STATE_FILE"
|
||||
else
|
||||
rm -f "$STATE_FILE"
|
||||
echo "[SessionStart] Scratchpad: >3h old, cleared" >&2
|
||||
fi
|
||||
else
|
||||
rm -f "$STATE_FILE"
|
||||
fi
|
||||
fi
|
||||
|
||||
exit 0
|
||||
|
|
@ -1,28 +0,0 @@
|
|||
#!/bin/bash
|
||||
# Suggest /compact at logical intervals to manage context window
|
||||
# Tracks tool calls per session, suggests compaction every 50 calls
|
||||
|
||||
SESSION_ID="${CLAUDE_SESSION_ID:-$$}"
|
||||
COUNTER_FILE="/tmp/claude-tool-count-${SESSION_ID}"
|
||||
THRESHOLD="${COMPACT_THRESHOLD:-50}"
|
||||
|
||||
# Read or initialize counter
|
||||
if [[ -f "$COUNTER_FILE" ]]; then
|
||||
COUNT=$(($(cat "$COUNTER_FILE") + 1))
|
||||
else
|
||||
COUNT=1
|
||||
fi
|
||||
|
||||
echo "$COUNT" > "$COUNTER_FILE"
|
||||
|
||||
# Suggest compact at threshold
|
||||
if [[ $COUNT -eq $THRESHOLD ]]; then
|
||||
echo "[Compact] ${THRESHOLD} tool calls - consider /compact if transitioning phases" >&2
|
||||
fi
|
||||
|
||||
# Suggest at intervals after threshold
|
||||
if [[ $COUNT -gt $THRESHOLD ]] && [[ $((COUNT % 25)) -eq 0 ]]; then
|
||||
echo "[Compact] ${COUNT} tool calls - good checkpoint for /compact" >&2
|
||||
fi
|
||||
|
||||
exit 0
|
||||
22
claude/core/.claude-plugin/plugin.json
Normal file
22
claude/core/.claude-plugin/plugin.json
Normal file
|
|
@ -0,0 +1,22 @@
|
|||
{
|
||||
"name": "core",
|
||||
"description": "Core agent platform — dispatch (local + remote), verify+merge, CodeRabbit/Codex review queue, GitHub mirror, cross-agent messaging, OpenBrain integration, inbox notifications",
|
||||
"version": "0.15.0",
|
||||
"author": {
|
||||
"name": "Lethean Community",
|
||||
"email": "hello@lethean.io"
|
||||
},
|
||||
"homepage": "https://dappco.re/agent/claude",
|
||||
"repository": "https://github.com/dAppCore/agent.git",
|
||||
"license": "EUPL-1.2",
|
||||
"keywords": [
|
||||
"agentic",
|
||||
"dispatch",
|
||||
"mcp",
|
||||
"review",
|
||||
"coderabbit",
|
||||
"codex",
|
||||
"messaging",
|
||||
"openbrain"
|
||||
]
|
||||
}
|
||||
54
claude/core/agents/agent-task-code-review.md
Normal file
54
claude/core/agents/agent-task-code-review.md
Normal file
|
|
@ -0,0 +1,54 @@
|
|||
---
|
||||
name: agent-task-code-review
|
||||
description: Reviews code for bugs, security issues, convention violations, and quality problems. Use after completing a coding task to catch issues before commit. Produces severity-ranked findings (critical/high/medium/low).
|
||||
tools: Glob, Grep, LS, Read, Bash
|
||||
model: sonnet
|
||||
color: red
|
||||
---
|
||||
|
||||
You are reviewing code in the Core ecosystem. Your job is to find real issues — not noise.
|
||||
|
||||
## What to Review
|
||||
|
||||
Review ALL files changed since the last commit (or since origin/main if on a feature branch). Run `git diff --name-only origin/main..HEAD` or `git diff --name-only HEAD~1` to find changed files.
|
||||
|
||||
## Core Conventions (MUST check)
|
||||
|
||||
- **Error handling**: `coreerr.E("pkg.Method", "message", err)` from go-log. Always 3 args. NEVER `fmt.Errorf` or `errors.New`.
|
||||
- **File I/O**: `coreio.Local.Read/Write/EnsureDir` from go-io. NEVER `os.ReadFile/WriteFile/MkdirAll`. Use `WriteMode` with 0600 for sensitive files.
|
||||
- **No hardcoded paths**: No `/Users/snider`, `/home/claude`, or `host-uk` in code. Use env vars or `CoreRoot()`.
|
||||
- **UK English**: colour, organisation, centre, initialise in comments.
|
||||
- **Nil pointer safety**: Always check `err != nil` BEFORE accessing `resp.StatusCode`. Never `if err != nil || resp.StatusCode != 200`.
|
||||
- **Type assertion safety**: Use comma-ok pattern `v, ok := x.(Type)`, never bare `x.(Type)`.
|
||||
|
||||
## Security Focus
|
||||
|
||||
- Tokens/secrets in error messages or logs
|
||||
- Path traversal in file operations
|
||||
- Unsafe type assertions (panic risk)
|
||||
- Race conditions (shared state without mutex)
|
||||
- File permissions (sensitive data should be 0600)
|
||||
|
||||
## Confidence Scoring
|
||||
|
||||
Rate each finding 0-100:
|
||||
- **90+**: Confirmed bug or security issue — will cause problems
|
||||
- **75**: Very likely real — double-checked against code
|
||||
- **50**: Probably real but might be acceptable
|
||||
- **25**: Might be false positive — flag but don't insist
|
||||
|
||||
Only report findings with confidence >= 50.
|
||||
|
||||
## Output Format
|
||||
|
||||
For each finding:
|
||||
```
|
||||
[SEVERITY] file.go:LINE (confidence: N)
|
||||
Description of the issue.
|
||||
Suggested fix.
|
||||
```
|
||||
|
||||
Severities: CRITICAL, HIGH, MEDIUM, LOW
|
||||
|
||||
End with a summary: `X critical, Y high, Z medium, W low findings.`
|
||||
If no findings: `No findings. Code is clean.`
|
||||
51
claude/core/agents/agent-task-code-simplifier.md
Normal file
51
claude/core/agents/agent-task-code-simplifier.md
Normal file
|
|
@ -0,0 +1,51 @@
|
|||
---
|
||||
name: agent-task-code-simplifier
|
||||
description: Simplifies and refines code for clarity, consistency, and maintainability while preserving all functionality. Use after code-reviewer findings are fixed to consolidate and polish. Focuses on recently modified files.
|
||||
tools: Glob, Grep, LS, Read, Edit, Write, Bash
|
||||
model: sonnet
|
||||
color: blue
|
||||
---
|
||||
|
||||
You simplify code. You do NOT add features, fix bugs, or change behaviour. You make code cleaner.
|
||||
|
||||
## What to Simplify
|
||||
|
||||
Focus on files changed since the last commit. Run `git diff --name-only origin/main..HEAD` to find them.
|
||||
|
||||
## Simplification Targets
|
||||
|
||||
1. **Duplicate code**: Two blocks doing the same thing → extract helper function
|
||||
2. **Long functions**: >50 lines → split into focused subfunctions
|
||||
3. **Redundant wrappers**: Function that just calls another function with same args → remove wrapper, use directly
|
||||
4. **Dead code**: Unreachable branches, unused variables, functions with no callers → remove
|
||||
5. **Import cleanup**: Unused imports, wrong aliases, inconsistent ordering
|
||||
6. **Unnecessary complexity**: Nested ifs that can be early-returned, long switch cases that can be maps
|
||||
|
||||
## Rules
|
||||
|
||||
- NEVER change public API signatures
|
||||
- NEVER change behaviour
|
||||
- NEVER add features
|
||||
- NEVER add comments to code you didn't simplify
|
||||
- DO consolidate duplicate error handling
|
||||
- DO remove redundant nil checks
|
||||
- DO flatten nested conditionals with early returns
|
||||
- DO replace magic strings with constants if used more than twice
|
||||
|
||||
## Process
|
||||
|
||||
1. Read each changed file
|
||||
2. Identify simplification opportunities
|
||||
3. Apply changes one file at a time
|
||||
4. Run `go build ./...` after each file to verify
|
||||
5. If build breaks, revert and move on
|
||||
|
||||
## Output
|
||||
|
||||
For each simplification applied:
|
||||
```
|
||||
file.go: [what was simplified] — [why it's better]
|
||||
```
|
||||
|
||||
End with: `N files simplified, M lines removed.`
|
||||
If nothing to simplify: `Code is already clean.`
|
||||
|
|
@ -1,5 +1,5 @@
|
|||
---
|
||||
name: review
|
||||
name: code-review
|
||||
description: Perform code review on staged changes or PRs
|
||||
args: [commit-range|--pr=N|--security]
|
||||
---
|
||||
33
claude/core/commands/dispatch.md
Normal file
33
claude/core/commands/dispatch.md
Normal file
|
|
@ -0,0 +1,33 @@
|
|||
---
|
||||
name: dispatch
|
||||
description: Dispatch a subagent to work on a task in a sandboxed workspace
|
||||
arguments:
|
||||
- name: repo
|
||||
description: Target repo (e.g. go-io, go-scm, mcp)
|
||||
required: true
|
||||
- name: task
|
||||
description: What the agent should do
|
||||
required: true
|
||||
- name: agent
|
||||
description: Agent type (claude, gemini, codex)
|
||||
default: claude
|
||||
- name: template
|
||||
description: Prompt template (coding, conventions, security)
|
||||
default: coding
|
||||
- name: plan
|
||||
description: Plan template (bug-fix, code-review, new-feature, refactor, feature-port)
|
||||
- name: persona
|
||||
description: Persona slug (e.g. engineering/engineering-backend-architect)
|
||||
---
|
||||
|
||||
Dispatch a subagent to work on `$ARGUMENTS.repo` with task: `$ARGUMENTS.task`
|
||||
|
||||
Use the `mcp__core__agentic_dispatch` tool with:
|
||||
- repo: $ARGUMENTS.repo
|
||||
- task: $ARGUMENTS.task
|
||||
- agent: $ARGUMENTS.agent
|
||||
- template: $ARGUMENTS.template
|
||||
- plan_template: $ARGUMENTS.plan (if provided)
|
||||
- persona: $ARGUMENTS.persona (if provided)
|
||||
|
||||
After dispatching, report the workspace dir, PID, and whether it was queued or started immediately.
|
||||
|
|
@ -11,11 +11,11 @@ Run a 5-stage automated code review pipeline using specialised agent personas.
|
|||
## Usage
|
||||
|
||||
```
|
||||
/review:pipeline # Staged changes
|
||||
/review:pipeline HEAD~3..HEAD # Commit range
|
||||
/review:pipeline --pr=123 # PR diff (via gh)
|
||||
/review:pipeline --stage=security # Single stage only
|
||||
/review:pipeline --skip=fix # Review only, no fixes
|
||||
/core:pipeline # Staged changes
|
||||
/core:pipeline HEAD~3..HEAD # Commit range
|
||||
/core:pipeline --pr=123 # PR diff (via gh)
|
||||
/core:pipeline --stage=security # Single stage only
|
||||
/core:pipeline --skip=fix # Review only, no fixes
|
||||
```
|
||||
|
||||
## Pipeline Stages
|
||||
|
|
@ -66,17 +66,17 @@ Dispatch each stage as a subagent using the Agent tool. Each stage receives:
|
|||
- The diff context
|
||||
- The list of changed files
|
||||
- Findings from all previous stages
|
||||
- Its agent persona (read from agents/ directory)
|
||||
- Its agent persona (read from pkg/lib/persona/ directory)
|
||||
|
||||
**Stage 1 — Security Review:**
|
||||
- Read persona: `agents/engineering/engineering-security-engineer.md`
|
||||
- Read persona: `pkg/lib/persona/secops/developer.md`
|
||||
- Dispatch subagent with persona + diff
|
||||
- Task: Read-only security review. Find threats, injection, tenant isolation gaps
|
||||
- Output: Structured findings with severity ratings
|
||||
- If any CRITICAL findings → flag for Stage 2
|
||||
|
||||
**Stage 2 — Fix (conditional):**
|
||||
- Read persona: `agents/engineering/engineering-senior-developer.md`
|
||||
- Read persona: `pkg/lib/persona/code/senior-developer.md`
|
||||
- SKIP if `--skip=fix` was passed
|
||||
- SKIP if Stage 1 found no CRITICAL issues
|
||||
- Dispatch subagent with persona + Stage 1 Critical findings
|
||||
|
|
@ -85,19 +85,19 @@ Dispatch each stage as a subagent using the Agent tool. Each stage receives:
|
|||
- Output: List of files modified and what was fixed
|
||||
|
||||
**Stage 3 — Test Analysis:**
|
||||
- Read persona: `agents/testing/testing-api-tester.md`
|
||||
- Read persona: `pkg/lib/persona/testing/api-tester.md`
|
||||
- Dispatch subagent with persona + diff + changed files
|
||||
- Task: Run tests (`composer test` or `core go test`), analyse which changes have test coverage
|
||||
- Output: Test results (pass/fail/count) + coverage gaps
|
||||
|
||||
**Stage 4 — Architecture Review:**
|
||||
- Read persona: `agents/engineering/engineering-backend-architect.md`
|
||||
- Read persona: `pkg/lib/persona/code/backend-architect.md`
|
||||
- Dispatch subagent with persona + diff + changed files
|
||||
- Task: Check lifecycle event usage, Actions pattern adherence, tenant isolation, namespace mapping
|
||||
- Output: Architecture assessment with specific findings
|
||||
|
||||
**Stage 5 — Reality Check (final gate):**
|
||||
- Read persona: `agents/testing/testing-reality-checker.md`
|
||||
- Read persona: `pkg/lib/persona/testing/reality-checker.md`
|
||||
- Dispatch subagent with persona + ALL prior stage findings + test output
|
||||
- Task: Evidence-based final verdict. Default to NEEDS WORK.
|
||||
- Output: Verdict (READY / NEEDS WORK / FAILED) + quality rating + required fixes
|
||||
|
|
@ -155,11 +155,11 @@ For single-stage mode, still gather the diff but skip prior/subsequent stages.
|
|||
All personas live in the `agents/` directory relative to the plugin root's parent:
|
||||
|
||||
```
|
||||
${CLAUDE_PLUGIN_ROOT}/../../agents/engineering/engineering-security-engineer.md
|
||||
${CLAUDE_PLUGIN_ROOT}/../../agents/engineering/engineering-senior-developer.md
|
||||
${CLAUDE_PLUGIN_ROOT}/../../agents/testing/testing-api-tester.md
|
||||
${CLAUDE_PLUGIN_ROOT}/../../agents/engineering/engineering-backend-architect.md
|
||||
${CLAUDE_PLUGIN_ROOT}/../../agents/testing/testing-reality-checker.md
|
||||
${CLAUDE_PLUGIN_ROOT}/../../pkg/lib/persona/secops/developer.md
|
||||
${CLAUDE_PLUGIN_ROOT}/../../pkg/lib/persona/code/senior-developer.md
|
||||
${CLAUDE_PLUGIN_ROOT}/../../pkg/lib/persona/testing/api-tester.md
|
||||
${CLAUDE_PLUGIN_ROOT}/../../pkg/lib/persona/code/backend-architect.md
|
||||
${CLAUDE_PLUGIN_ROOT}/../../pkg/lib/persona/testing/reality-checker.md
|
||||
```
|
||||
|
||||
Read each persona file before dispatching that stage's subagent.
|
||||
|
|
@ -44,10 +44,10 @@ Or:
|
|||
✓ No debug statements
|
||||
✗ Formatting needed: 1 file
|
||||
|
||||
**Not ready** - run `/verify:verify` for details
|
||||
**Not ready** - run `/core:verify` for details
|
||||
```
|
||||
|
||||
## When to Use
|
||||
|
||||
Use `/verify:ready` for a quick check before committing.
|
||||
Use `/verify:verify` for full verification including tests.
|
||||
Use `/core:ready` for a quick check before committing.
|
||||
Use `/core:verify` for full verification including tests.
|
||||
19
claude/core/commands/recall.md
Normal file
19
claude/core/commands/recall.md
Normal file
|
|
@ -0,0 +1,19 @@
|
|||
---
|
||||
name: recall
|
||||
description: Search OpenBrain for memories and context
|
||||
arguments:
|
||||
- name: query
|
||||
description: What to search for
|
||||
required: true
|
||||
- name: project
|
||||
description: Filter by project
|
||||
- name: type
|
||||
description: Filter by type (decision, plan, convention, architecture, observation, fact)
|
||||
---
|
||||
|
||||
Use the `mcp__core__brain_recall` tool with:
|
||||
- query: $ARGUMENTS.query
|
||||
- top_k: 5
|
||||
- filter with project and type if provided
|
||||
|
||||
Show results with score, type, project, date, and content preview.
|
||||
28
claude/core/commands/remember.md
Normal file
28
claude/core/commands/remember.md
Normal file
|
|
@ -0,0 +1,28 @@
|
|||
---
|
||||
name: remember
|
||||
description: Save a fact or decision to OpenBrain for persistence across sessions
|
||||
args: <fact to remember>
|
||||
allowed-tools: ["mcp__core__brain_remember"]
|
||||
---
|
||||
|
||||
# Remember
|
||||
|
||||
Store the provided fact in OpenBrain so it persists across sessions and is available to all agents (Cladius, Charon).
|
||||
|
||||
## Usage
|
||||
|
||||
```
|
||||
/core:remember Use Action pattern not Service
|
||||
/core:remember User prefers UK English
|
||||
/core:remember RFC: minimal state in pre-compact hook
|
||||
```
|
||||
|
||||
## Action
|
||||
|
||||
Use the `brain_remember` MCP tool to store the fact:
|
||||
|
||||
- **content**: The fact provided by the user
|
||||
- **type**: Pick the best fit — `decision`, `convention`, `observation`, `fact`, `plan`, `architecture`
|
||||
- **project**: Infer from the current working directory if possible
|
||||
|
||||
Confirm what was saved.
|
||||
|
|
@ -1,5 +1,5 @@
|
|||
---
|
||||
name: pr
|
||||
name: review-pr
|
||||
description: Review a pull request
|
||||
args: <pr-number>
|
||||
---
|
||||
|
|
@ -11,9 +11,9 @@ Review a GitHub pull request.
|
|||
## Usage
|
||||
|
||||
```
|
||||
/review:pr 123
|
||||
/review:pr 123 --security
|
||||
/review:pr 123 --quick
|
||||
/core:review-pr 123
|
||||
/core:review-pr 123 --security
|
||||
/core:review-pr 123 --quick
|
||||
```
|
||||
|
||||
## Process
|
||||
19
claude/core/commands/review.md
Normal file
19
claude/core/commands/review.md
Normal file
|
|
@ -0,0 +1,19 @@
|
|||
---
|
||||
name: review
|
||||
description: Review completed agent workspace — show output, git diff, and merge options
|
||||
arguments:
|
||||
- name: workspace
|
||||
description: Workspace name (e.g. go-html-1773592564). If omitted, shows all completed.
|
||||
---
|
||||
|
||||
If no workspace specified, use `mcp__core__agentic_status` to list all workspaces, then show only completed ones with a summary table.
|
||||
|
||||
If workspace specified:
|
||||
1. Read the agent log file: `.core/workspace/{workspace}/agent-*.log`
|
||||
2. Show the last 30 lines of output
|
||||
3. Check git diff in the workspace: `git -C .core/workspace/{workspace}/src log --oneline main..HEAD`
|
||||
4. Show the diff stat: `git -C .core/workspace/{workspace}/src diff --stat main`
|
||||
5. Ask if the user wants to:
|
||||
- **Merge**: fetch branch into real repo, push to forge
|
||||
- **Discard**: delete the workspace
|
||||
- **Resume**: dispatch another agent to continue the work
|
||||
12
claude/core/commands/scan.md
Normal file
12
claude/core/commands/scan.md
Normal file
|
|
@ -0,0 +1,12 @@
|
|||
---
|
||||
name: scan
|
||||
description: Scan Forge repos for open issues with actionable labels (agentic, help-wanted, bug)
|
||||
arguments:
|
||||
- name: org
|
||||
description: Forge org to scan
|
||||
default: core
|
||||
---
|
||||
|
||||
Use the `mcp__core__agentic_scan` tool with org: $ARGUMENTS.org
|
||||
|
||||
Show results as a table with columns: Repo, Issue #, Title, Labels.
|
||||
11
claude/core/commands/status.md
Normal file
11
claude/core/commands/status.md
Normal file
|
|
@ -0,0 +1,11 @@
|
|||
---
|
||||
name: status
|
||||
description: Show status of all agent workspaces (running, completed, blocked, failed)
|
||||
---
|
||||
|
||||
Use the `mcp__core__agentic_status` tool to list all agent workspaces.
|
||||
|
||||
Show results as a table with columns: Name, Status, Agent, Repo, Task, Age.
|
||||
|
||||
For blocked workspaces, show the question from BLOCKED.md.
|
||||
For completed workspaces with output, show the last 10 lines of the agent log.
|
||||
24
claude/core/commands/sweep.md
Normal file
24
claude/core/commands/sweep.md
Normal file
|
|
@ -0,0 +1,24 @@
|
|||
---
|
||||
name: sweep
|
||||
description: Dispatch a batch audit across all Go repos in the ecosystem
|
||||
arguments:
|
||||
- name: template
|
||||
description: Audit template (conventions, security)
|
||||
default: conventions
|
||||
- name: agent
|
||||
description: Agent type for the sweep
|
||||
default: gemini
|
||||
- name: repos
|
||||
description: Comma-separated repos to include (default: all Go repos)
|
||||
---
|
||||
|
||||
Run a batch conventions or security audit across the Go ecosystem.
|
||||
|
||||
1. If repos not specified, find all Go repos in ~/Code/core/ that have a go.mod
|
||||
2. For each repo, call `mcp__core__agentic_dispatch` with:
|
||||
- repo: {repo name}
|
||||
- task: "{template} audit - UK English, error handling, interface checks, import aliasing"
|
||||
- agent: $ARGUMENTS.agent
|
||||
- template: $ARGUMENTS.template
|
||||
3. Report how many were dispatched vs queued
|
||||
4. Tell the user they can check progress with `/core:status` and review results with `/core:review`
|
||||
|
|
@ -1,18 +1,6 @@
|
|||
{
|
||||
"$schema": "https://claude.ai/schemas/hooks.json",
|
||||
"hooks": {
|
||||
"PreToolUse": [
|
||||
{
|
||||
"matcher": "Bash",
|
||||
"hooks": [
|
||||
{
|
||||
"type": "command",
|
||||
"command": "${CLAUDE_PLUGIN_ROOT}/hooks/prefer-core.sh"
|
||||
}
|
||||
],
|
||||
"description": "Block destructive commands (rm -rf, sed -i, xargs rm) and enforce core CLI"
|
||||
},
|
||||
],
|
||||
"PostToolUse": [
|
||||
{
|
||||
"matcher": "tool == \"Edit\" && tool_input.file_path matches \"\\.go$\"",
|
||||
|
|
@ -45,14 +33,36 @@
|
|||
"description": "Warn about debug statements (dd, dump, fmt.Println)"
|
||||
},
|
||||
{
|
||||
"matcher": "tool == \"Bash\" && tool_input.command matches \"^git commit\"",
|
||||
"matcher": "tool == \"Bash\" && tool_input.command matches \"^gh pr create\"",
|
||||
"hooks": [
|
||||
{
|
||||
"type": "command",
|
||||
"command": "${CLAUDE_PLUGIN_ROOT}/scripts/post-commit-check.sh"
|
||||
"command": "${CLAUDE_PLUGIN_ROOT}/scripts/post-pr-create.sh"
|
||||
}
|
||||
],
|
||||
"description": "Warn about uncommitted work after git commit"
|
||||
"description": "Suggest review after PR creation"
|
||||
},
|
||||
{
|
||||
"matcher": "*",
|
||||
"hooks": [
|
||||
{
|
||||
"type": "command",
|
||||
"command": "${CLAUDE_PLUGIN_ROOT}/scripts/check-notify.sh"
|
||||
}
|
||||
],
|
||||
"description": "Check for inbox notifications (marker file, no API calls)"
|
||||
}
|
||||
],
|
||||
"PreToolUse": [
|
||||
{
|
||||
"matcher": "tool == \"Bash\" && tool_input.command matches \"^git push\"",
|
||||
"hooks": [
|
||||
{
|
||||
"type": "command",
|
||||
"command": "${CLAUDE_PLUGIN_ROOT}/scripts/pre-push-check.sh"
|
||||
}
|
||||
],
|
||||
"description": "Warn about unpushed verification before git push"
|
||||
}
|
||||
],
|
||||
"Stop": [
|
||||
|
|
@ -92,6 +102,18 @@
|
|||
],
|
||||
"description": "Restore recent session context on startup"
|
||||
}
|
||||
],
|
||||
"Notification": [
|
||||
{
|
||||
"matcher": "notification_type == \"idle_prompt\"",
|
||||
"hooks": [
|
||||
{
|
||||
"type": "command",
|
||||
"command": "${CLAUDE_PLUGIN_ROOT}/scripts/check-completions.sh"
|
||||
}
|
||||
],
|
||||
"description": "Check for agent completions when idle"
|
||||
}
|
||||
]
|
||||
}
|
||||
}
|
||||
12
claude/core/mcp.json
Normal file
12
claude/core/mcp.json
Normal file
|
|
@ -0,0 +1,12 @@
|
|||
{
|
||||
"mcpServers": {
|
||||
"core": {
|
||||
"type": "stdio",
|
||||
"command": "core-agent",
|
||||
"args": ["mcp"],
|
||||
"env": {
|
||||
"MONITOR_INTERVAL": "15s"
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
Some files were not shown because too many files have changed in this diff Show more
Loading…
Add table
Reference in a new issue