refactor: migrate cmd/ml to go-ml, remove replaces and go.work
Some checks failed
Deploy / Deploy to Production (push) Has been skipped
Deploy / Test (push) Failing after 11s
Deploy / Build App Image (push) Has been skipped
Deploy / Build Web Image (push) Has been skipped
Security Scan / Secret Detection (push) Failing after 10s
Security Scan / Go Vulnerability Check (push) Failing after 10m20s
Security Scan / Dependency & Config Scan (push) Failing after 25s
Deploy / Build Core Image (push) Failing after 13m6s
Some checks failed
Deploy / Deploy to Production (push) Has been skipped
Deploy / Test (push) Failing after 11s
Deploy / Build App Image (push) Has been skipped
Deploy / Build Web Image (push) Has been skipped
Security Scan / Secret Detection (push) Failing after 10s
Security Scan / Go Vulnerability Check (push) Failing after 10m20s
Security Scan / Dependency & Config Scan (push) Failing after 25s
Deploy / Build Core Image (push) Failing after 13m6s
- Move 40 ML command files to forge.lthn.ai/core/go-ml/cmd - Remove all replace directives from go.mod - Remove go.work (repos resolve from forge directly) - Fix cache.New call to match updated API signature - Update main.go import to forge.lthn.ai/core/go-ml/cmd Co-Authored-By: Virgil <virgil@lethean.io>
This commit is contained in:
parent
2e24bb59f6
commit
a7d09e4c67
44 changed files with 147 additions and 4829 deletions
832
cmd/ml/chat.js
832
cmd/ml/chat.js
|
|
@ -1,832 +0,0 @@
|
|||
// src/styles.ts
|
||||
var chatStyles = `
|
||||
:host {
|
||||
display: flex;
|
||||
flex-direction: column;
|
||||
background: var(--lem-bg, #1a1a1e);
|
||||
color: var(--lem-text, #e0e0e0);
|
||||
font-family: var(--lem-font, system-ui, -apple-system, sans-serif);
|
||||
font-size: 14px;
|
||||
line-height: 1.5;
|
||||
border-radius: 12px;
|
||||
overflow: hidden;
|
||||
border: 1px solid rgba(255, 255, 255, 0.08);
|
||||
}
|
||||
|
||||
.header {
|
||||
display: flex;
|
||||
align-items: center;
|
||||
gap: 10px;
|
||||
padding: 14px 18px;
|
||||
background: rgba(255, 255, 255, 0.03);
|
||||
border-bottom: 1px solid rgba(255, 255, 255, 0.06);
|
||||
flex-shrink: 0;
|
||||
}
|
||||
|
||||
.header-icon {
|
||||
width: 28px;
|
||||
height: 28px;
|
||||
border-radius: 8px;
|
||||
background: var(--lem-accent, #5865f2);
|
||||
display: flex;
|
||||
align-items: center;
|
||||
justify-content: center;
|
||||
font-size: 14px;
|
||||
font-weight: 700;
|
||||
color: #fff;
|
||||
}
|
||||
|
||||
.header-title {
|
||||
font-size: 15px;
|
||||
font-weight: 600;
|
||||
color: var(--lem-text, #e0e0e0);
|
||||
}
|
||||
|
||||
.header-model {
|
||||
font-size: 11px;
|
||||
color: rgba(255, 255, 255, 0.35);
|
||||
margin-left: auto;
|
||||
font-family: ui-monospace, SFMono-Regular, Menlo, monospace;
|
||||
}
|
||||
|
||||
.header-status {
|
||||
width: 8px;
|
||||
height: 8px;
|
||||
border-radius: 50%;
|
||||
background: #43b581;
|
||||
flex-shrink: 0;
|
||||
}
|
||||
|
||||
.header-status.disconnected {
|
||||
background: #f04747;
|
||||
}
|
||||
`;
|
||||
var messagesStyles = `
|
||||
:host {
|
||||
display: block;
|
||||
flex: 1;
|
||||
overflow-y: auto;
|
||||
overflow-x: hidden;
|
||||
padding: 16px 0;
|
||||
scroll-behavior: smooth;
|
||||
}
|
||||
|
||||
:host::-webkit-scrollbar {
|
||||
width: 6px;
|
||||
}
|
||||
|
||||
:host::-webkit-scrollbar-track {
|
||||
background: transparent;
|
||||
}
|
||||
|
||||
:host::-webkit-scrollbar-thumb {
|
||||
background: rgba(255, 255, 255, 0.12);
|
||||
border-radius: 3px;
|
||||
}
|
||||
|
||||
.empty {
|
||||
display: flex;
|
||||
flex-direction: column;
|
||||
align-items: center;
|
||||
justify-content: center;
|
||||
height: 100%;
|
||||
gap: 12px;
|
||||
color: rgba(255, 255, 255, 0.25);
|
||||
}
|
||||
|
||||
.empty-icon {
|
||||
font-size: 36px;
|
||||
opacity: 0.4;
|
||||
}
|
||||
|
||||
.empty-text {
|
||||
font-size: 14px;
|
||||
}
|
||||
`;
|
||||
var messageStyles = `
|
||||
:host {
|
||||
display: block;
|
||||
padding: 6px 18px;
|
||||
}
|
||||
|
||||
:host([role="user"]) .bubble {
|
||||
background: var(--lem-msg-user, #2a2a3e);
|
||||
margin-left: 40px;
|
||||
border-radius: 12px 12px 4px 12px;
|
||||
}
|
||||
|
||||
:host([role="assistant"]) .bubble {
|
||||
background: var(--lem-msg-assistant, #1e1e2a);
|
||||
margin-right: 40px;
|
||||
border-radius: 12px 12px 12px 4px;
|
||||
}
|
||||
|
||||
.bubble {
|
||||
padding: 10px 14px;
|
||||
word-wrap: break-word;
|
||||
overflow-wrap: break-word;
|
||||
}
|
||||
|
||||
.role {
|
||||
font-size: 11px;
|
||||
font-weight: 600;
|
||||
text-transform: uppercase;
|
||||
letter-spacing: 0.5px;
|
||||
margin-bottom: 4px;
|
||||
color: rgba(255, 255, 255, 0.35);
|
||||
}
|
||||
|
||||
:host([role="assistant"]) .role {
|
||||
color: var(--lem-accent, #5865f2);
|
||||
}
|
||||
|
||||
.content {
|
||||
color: var(--lem-text, #e0e0e0);
|
||||
line-height: 1.6;
|
||||
}
|
||||
|
||||
.content p {
|
||||
margin: 0 0 8px 0;
|
||||
}
|
||||
|
||||
.content p:last-child {
|
||||
margin-bottom: 0;
|
||||
}
|
||||
|
||||
.content strong {
|
||||
font-weight: 600;
|
||||
color: #fff;
|
||||
}
|
||||
|
||||
.content em {
|
||||
font-style: italic;
|
||||
color: rgba(255, 255, 255, 0.8);
|
||||
}
|
||||
|
||||
.content code {
|
||||
font-family: ui-monospace, SFMono-Regular, Menlo, monospace;
|
||||
font-size: 12px;
|
||||
background: rgba(0, 0, 0, 0.3);
|
||||
padding: 2px 5px;
|
||||
border-radius: 4px;
|
||||
color: #e8a0bf;
|
||||
}
|
||||
|
||||
.content pre {
|
||||
margin: 8px 0;
|
||||
padding: 12px;
|
||||
background: rgba(0, 0, 0, 0.35);
|
||||
border-radius: 8px;
|
||||
overflow-x: auto;
|
||||
border: 1px solid rgba(255, 255, 255, 0.06);
|
||||
}
|
||||
|
||||
.content pre code {
|
||||
background: none;
|
||||
padding: 0;
|
||||
font-size: 12px;
|
||||
color: #c9d1d9;
|
||||
line-height: 1.5;
|
||||
}
|
||||
|
||||
.think-panel {
|
||||
margin: 6px 0 8px;
|
||||
padding: 8px 12px;
|
||||
background: rgba(88, 101, 242, 0.06);
|
||||
border-left: 2px solid rgba(88, 101, 242, 0.3);
|
||||
border-radius: 0 6px 6px 0;
|
||||
font-size: 12px;
|
||||
color: rgba(255, 255, 255, 0.45);
|
||||
line-height: 1.5;
|
||||
max-height: 200px;
|
||||
overflow-y: auto;
|
||||
}
|
||||
|
||||
.think-panel::-webkit-scrollbar {
|
||||
width: 4px;
|
||||
}
|
||||
|
||||
.think-panel::-webkit-scrollbar-thumb {
|
||||
background: rgba(255, 255, 255, 0.1);
|
||||
border-radius: 2px;
|
||||
}
|
||||
|
||||
.think-label {
|
||||
font-size: 10px;
|
||||
font-weight: 600;
|
||||
text-transform: uppercase;
|
||||
letter-spacing: 0.5px;
|
||||
color: rgba(88, 101, 242, 0.5);
|
||||
margin-bottom: 4px;
|
||||
cursor: pointer;
|
||||
user-select: none;
|
||||
}
|
||||
|
||||
.think-label:hover {
|
||||
color: rgba(88, 101, 242, 0.7);
|
||||
}
|
||||
|
||||
.think-panel.collapsed .think-content {
|
||||
display: none;
|
||||
}
|
||||
|
||||
.cursor {
|
||||
display: inline-block;
|
||||
width: 7px;
|
||||
height: 16px;
|
||||
background: var(--lem-accent, #5865f2);
|
||||
border-radius: 1px;
|
||||
animation: blink 0.8s step-end infinite;
|
||||
vertical-align: text-bottom;
|
||||
margin-left: 2px;
|
||||
}
|
||||
|
||||
@keyframes blink {
|
||||
50% { opacity: 0; }
|
||||
}
|
||||
`;
|
||||
var inputStyles = `
|
||||
:host {
|
||||
display: block;
|
||||
padding: 12px 16px 16px;
|
||||
border-top: 1px solid rgba(255, 255, 255, 0.06);
|
||||
flex-shrink: 0;
|
||||
}
|
||||
|
||||
.input-wrapper {
|
||||
display: flex;
|
||||
align-items: flex-end;
|
||||
gap: 10px;
|
||||
background: rgba(255, 255, 255, 0.05);
|
||||
border: 1px solid rgba(255, 255, 255, 0.08);
|
||||
border-radius: 12px;
|
||||
padding: 8px 12px;
|
||||
transition: border-color 0.15s;
|
||||
}
|
||||
|
||||
.input-wrapper:focus-within {
|
||||
border-color: var(--lem-accent, #5865f2);
|
||||
}
|
||||
|
||||
textarea {
|
||||
flex: 1;
|
||||
background: none;
|
||||
border: none;
|
||||
outline: none;
|
||||
color: var(--lem-text, #e0e0e0);
|
||||
font-family: inherit;
|
||||
font-size: 14px;
|
||||
line-height: 1.5;
|
||||
resize: none;
|
||||
max-height: 120px;
|
||||
min-height: 22px;
|
||||
padding: 0;
|
||||
}
|
||||
|
||||
textarea::placeholder {
|
||||
color: rgba(255, 255, 255, 0.25);
|
||||
}
|
||||
|
||||
.send-btn {
|
||||
background: var(--lem-accent, #5865f2);
|
||||
border: none;
|
||||
border-radius: 8px;
|
||||
color: #fff;
|
||||
width: 32px;
|
||||
height: 32px;
|
||||
cursor: pointer;
|
||||
display: flex;
|
||||
align-items: center;
|
||||
justify-content: center;
|
||||
flex-shrink: 0;
|
||||
transition: opacity 0.15s, transform 0.1s;
|
||||
}
|
||||
|
||||
.send-btn:hover {
|
||||
opacity: 0.85;
|
||||
}
|
||||
|
||||
.send-btn:active {
|
||||
transform: scale(0.95);
|
||||
}
|
||||
|
||||
.send-btn:disabled {
|
||||
opacity: 0.3;
|
||||
cursor: default;
|
||||
transform: none;
|
||||
}
|
||||
|
||||
.send-btn svg {
|
||||
width: 16px;
|
||||
height: 16px;
|
||||
}
|
||||
`;
|
||||
|
||||
// src/lem-messages.ts
|
||||
var LemMessages = class extends HTMLElement {
|
||||
shadow;
|
||||
container;
|
||||
emptyEl;
|
||||
shouldAutoScroll = true;
|
||||
constructor() {
|
||||
super();
|
||||
this.shadow = this.attachShadow({ mode: "open" });
|
||||
}
|
||||
connectedCallback() {
|
||||
const style = document.createElement("style");
|
||||
style.textContent = messagesStyles;
|
||||
this.container = document.createElement("div");
|
||||
this.emptyEl = document.createElement("div");
|
||||
this.emptyEl.className = "empty";
|
||||
const emptyIcon = document.createElement("div");
|
||||
emptyIcon.className = "empty-icon";
|
||||
emptyIcon.textContent = "\u2728";
|
||||
const emptyText = document.createElement("div");
|
||||
emptyText.className = "empty-text";
|
||||
emptyText.textContent = "Start a conversation";
|
||||
this.emptyEl.appendChild(emptyIcon);
|
||||
this.emptyEl.appendChild(emptyText);
|
||||
this.shadow.appendChild(style);
|
||||
this.shadow.appendChild(this.emptyEl);
|
||||
this.shadow.appendChild(this.container);
|
||||
this.addEventListener("scroll", () => {
|
||||
const threshold = 60;
|
||||
this.shouldAutoScroll = this.scrollHeight - this.scrollTop - this.clientHeight < threshold;
|
||||
});
|
||||
}
|
||||
addMessage(role, text) {
|
||||
this.emptyEl.style.display = "none";
|
||||
const msg = document.createElement("lem-message");
|
||||
msg.setAttribute("role", role);
|
||||
this.container.appendChild(msg);
|
||||
if (text) {
|
||||
msg.text = text;
|
||||
}
|
||||
this.scrollToBottom();
|
||||
return msg;
|
||||
}
|
||||
scrollToBottom() {
|
||||
if (this.shouldAutoScroll) {
|
||||
requestAnimationFrame(() => {
|
||||
this.scrollTop = this.scrollHeight;
|
||||
});
|
||||
}
|
||||
}
|
||||
clear() {
|
||||
this.container.replaceChildren();
|
||||
this.emptyEl.style.display = "";
|
||||
this.shouldAutoScroll = true;
|
||||
}
|
||||
};
|
||||
customElements.define("lem-messages", LemMessages);
|
||||
|
||||
// src/markdown.ts
|
||||
function escapeHtml(text) {
|
||||
return text.replace(/&/g, "&").replace(/</g, "<").replace(/>/g, ">").replace(/"/g, """);
|
||||
}
|
||||
function parseInline(text) {
|
||||
let result = escapeHtml(text);
|
||||
result = result.replace(/`([^`]+)`/g, "<code>$1</code>");
|
||||
result = result.replace(/\*\*(.+?)\*\*/g, "<strong>$1</strong>");
|
||||
result = result.replace(/__(.+?)__/g, "<strong>$1</strong>");
|
||||
result = result.replace(/(?<!\w)\*([^*]+)\*(?!\w)/g, "<em>$1</em>");
|
||||
result = result.replace(/(?<!\w)_([^_]+)_(?!\w)/g, "<em>$1</em>");
|
||||
return result;
|
||||
}
|
||||
function renderMarkdown(text) {
|
||||
const lines = text.split("\n");
|
||||
const output = [];
|
||||
let inCodeBlock = false;
|
||||
let codeLines = [];
|
||||
let codeLang = "";
|
||||
for (const line of lines) {
|
||||
if (line.trimStart().startsWith("```")) {
|
||||
if (!inCodeBlock) {
|
||||
inCodeBlock = true;
|
||||
codeLang = line.trimStart().slice(3).trim();
|
||||
codeLines = [];
|
||||
} else {
|
||||
const langAttr = codeLang ? ` data-lang="${escapeHtml(codeLang)}"` : "";
|
||||
output.push(
|
||||
`<pre${langAttr}><code>${escapeHtml(codeLines.join("\n"))}</code></pre>`
|
||||
);
|
||||
inCodeBlock = false;
|
||||
codeLines = [];
|
||||
codeLang = "";
|
||||
}
|
||||
continue;
|
||||
}
|
||||
if (inCodeBlock) {
|
||||
codeLines.push(line);
|
||||
continue;
|
||||
}
|
||||
if (line.trim() === "") {
|
||||
output.push("");
|
||||
continue;
|
||||
}
|
||||
output.push(parseInline(line));
|
||||
}
|
||||
if (inCodeBlock) {
|
||||
const langAttr = codeLang ? ` data-lang="${escapeHtml(codeLang)}"` : "";
|
||||
output.push(
|
||||
`<pre${langAttr}><code>${escapeHtml(codeLines.join("\n"))}</code></pre>`
|
||||
);
|
||||
}
|
||||
const paragraphs = [];
|
||||
let current = [];
|
||||
for (const line of output) {
|
||||
if (line === "") {
|
||||
if (current.length > 0) {
|
||||
paragraphs.push(wrapParagraph(current));
|
||||
current = [];
|
||||
}
|
||||
} else {
|
||||
current.push(line);
|
||||
}
|
||||
}
|
||||
if (current.length > 0) {
|
||||
paragraphs.push(wrapParagraph(current));
|
||||
}
|
||||
return paragraphs.join("");
|
||||
}
|
||||
function wrapParagraph(lines) {
|
||||
const joined = lines.join("<br>");
|
||||
if (joined.startsWith("<pre")) return joined;
|
||||
return `<p>${joined}</p>`;
|
||||
}
|
||||
|
||||
// src/lem-message.ts
|
||||
var LemMessage = class extends HTMLElement {
|
||||
shadow;
|
||||
thinkPanel;
|
||||
thinkContent;
|
||||
thinkLabel;
|
||||
contentEl;
|
||||
cursorEl;
|
||||
_text = "";
|
||||
_streaming = false;
|
||||
_thinkCollapsed = false;
|
||||
constructor() {
|
||||
super();
|
||||
this.shadow = this.attachShadow({ mode: "open" });
|
||||
}
|
||||
connectedCallback() {
|
||||
const role = this.getAttribute("role") || "user";
|
||||
const style = document.createElement("style");
|
||||
style.textContent = messageStyles;
|
||||
const bubble = document.createElement("div");
|
||||
bubble.className = "bubble";
|
||||
const roleEl = document.createElement("div");
|
||||
roleEl.className = "role";
|
||||
roleEl.textContent = role === "assistant" ? "LEM" : "You";
|
||||
this.thinkPanel = document.createElement("div");
|
||||
this.thinkPanel.className = "think-panel";
|
||||
this.thinkPanel.style.display = "none";
|
||||
this.thinkLabel = document.createElement("div");
|
||||
this.thinkLabel.className = "think-label";
|
||||
this.thinkLabel.textContent = "\u25BC reasoning";
|
||||
this.thinkLabel.addEventListener("click", () => {
|
||||
this._thinkCollapsed = !this._thinkCollapsed;
|
||||
this.thinkPanel.classList.toggle("collapsed", this._thinkCollapsed);
|
||||
this.thinkLabel.textContent = this._thinkCollapsed ? "\u25B6 reasoning" : "\u25BC reasoning";
|
||||
});
|
||||
this.thinkContent = document.createElement("div");
|
||||
this.thinkContent.className = "think-content";
|
||||
this.thinkPanel.appendChild(this.thinkLabel);
|
||||
this.thinkPanel.appendChild(this.thinkContent);
|
||||
this.contentEl = document.createElement("div");
|
||||
this.contentEl.className = "content";
|
||||
bubble.appendChild(roleEl);
|
||||
if (role === "assistant") {
|
||||
bubble.appendChild(this.thinkPanel);
|
||||
}
|
||||
bubble.appendChild(this.contentEl);
|
||||
this.shadow.appendChild(style);
|
||||
this.shadow.appendChild(bubble);
|
||||
if (this._text) {
|
||||
this.render();
|
||||
}
|
||||
}
|
||||
get text() {
|
||||
return this._text;
|
||||
}
|
||||
set text(value) {
|
||||
this._text = value;
|
||||
this.render();
|
||||
}
|
||||
get streaming() {
|
||||
return this._streaming;
|
||||
}
|
||||
set streaming(value) {
|
||||
this._streaming = value;
|
||||
this.render();
|
||||
}
|
||||
appendToken(token) {
|
||||
this._text += token;
|
||||
this.render();
|
||||
}
|
||||
/**
|
||||
* Splits text into think/response portions and renders each.
|
||||
*
|
||||
* Safety: renderMarkdown() escapes all HTML entities (& < > ") before any
|
||||
* inline formatting is applied. The source is the local MLX model output,
|
||||
* not arbitrary user HTML. Shadow DOM provides additional isolation.
|
||||
*/
|
||||
render() {
|
||||
if (!this.contentEl) return;
|
||||
const { think, response } = this.splitThink(this._text);
|
||||
if (think !== null && this.thinkPanel) {
|
||||
this.thinkPanel.style.display = "";
|
||||
this.thinkContent.textContent = think;
|
||||
}
|
||||
const responseHtml = renderMarkdown(response);
|
||||
this.contentEl.innerHTML = responseHtml;
|
||||
if (this._streaming) {
|
||||
if (!this.cursorEl) {
|
||||
this.cursorEl = document.createElement("span");
|
||||
this.cursorEl.className = "cursor";
|
||||
}
|
||||
if (think !== null && !this._text.includes("</think>")) {
|
||||
this.thinkContent.appendChild(this.cursorEl);
|
||||
} else {
|
||||
const lastChild = this.contentEl.lastElementChild || this.contentEl;
|
||||
lastChild.appendChild(this.cursorEl);
|
||||
}
|
||||
}
|
||||
}
|
||||
/**
|
||||
* Split raw text into think content and response content.
|
||||
* Returns { think: string | null, response: string }
|
||||
*/
|
||||
splitThink(text) {
|
||||
const thinkStart = text.indexOf("<think>");
|
||||
if (thinkStart === -1) {
|
||||
return { think: null, response: text };
|
||||
}
|
||||
const afterOpen = thinkStart + "<think>".length;
|
||||
const thinkEnd = text.indexOf("</think>", afterOpen);
|
||||
if (thinkEnd === -1) {
|
||||
return {
|
||||
think: text.slice(afterOpen).trim(),
|
||||
response: text.slice(0, thinkStart).trim()
|
||||
};
|
||||
}
|
||||
const thinkText = text.slice(afterOpen, thinkEnd).trim();
|
||||
const beforeThink = text.slice(0, thinkStart).trim();
|
||||
const afterThink = text.slice(thinkEnd + "</think>".length).trim();
|
||||
const response = [beforeThink, afterThink].filter(Boolean).join("\n");
|
||||
return { think: thinkText, response };
|
||||
}
|
||||
};
|
||||
customElements.define("lem-message", LemMessage);
|
||||
|
||||
// src/lem-input.ts
|
||||
var LemInput = class extends HTMLElement {
|
||||
shadow;
|
||||
textarea;
|
||||
sendBtn;
|
||||
_disabled = false;
|
||||
constructor() {
|
||||
super();
|
||||
this.shadow = this.attachShadow({ mode: "open" });
|
||||
}
|
||||
connectedCallback() {
|
||||
const style = document.createElement("style");
|
||||
style.textContent = inputStyles;
|
||||
const wrapper = document.createElement("div");
|
||||
wrapper.className = "input-wrapper";
|
||||
this.textarea = document.createElement("textarea");
|
||||
this.textarea.rows = 1;
|
||||
this.textarea.placeholder = "Message LEM...";
|
||||
this.sendBtn = document.createElement("button");
|
||||
this.sendBtn.className = "send-btn";
|
||||
this.sendBtn.type = "button";
|
||||
this.sendBtn.disabled = true;
|
||||
this.sendBtn.appendChild(this.createSendIcon());
|
||||
wrapper.appendChild(this.textarea);
|
||||
wrapper.appendChild(this.sendBtn);
|
||||
this.shadow.appendChild(style);
|
||||
this.shadow.appendChild(wrapper);
|
||||
this.textarea.addEventListener("input", () => {
|
||||
this.textarea.style.height = "auto";
|
||||
this.textarea.style.height = Math.min(this.textarea.scrollHeight, 120) + "px";
|
||||
this.sendBtn.disabled = this._disabled || this.textarea.value.trim() === "";
|
||||
});
|
||||
this.textarea.addEventListener("keydown", (e) => {
|
||||
if (e.key === "Enter" && !e.shiftKey) {
|
||||
e.preventDefault();
|
||||
this.submit();
|
||||
}
|
||||
});
|
||||
this.sendBtn.addEventListener("click", () => this.submit());
|
||||
}
|
||||
/** Build the send arrow SVG using DOM API (no innerHTML) */
|
||||
createSendIcon() {
|
||||
const ns = "http://www.w3.org/2000/svg";
|
||||
const svg = document.createElementNS(ns, "svg");
|
||||
svg.setAttribute("viewBox", "0 0 24 24");
|
||||
svg.setAttribute("fill", "none");
|
||||
svg.setAttribute("stroke", "currentColor");
|
||||
svg.setAttribute("stroke-width", "2");
|
||||
svg.setAttribute("stroke-linecap", "round");
|
||||
svg.setAttribute("stroke-linejoin", "round");
|
||||
svg.setAttribute("width", "16");
|
||||
svg.setAttribute("height", "16");
|
||||
const line = document.createElementNS(ns, "line");
|
||||
line.setAttribute("x1", "22");
|
||||
line.setAttribute("y1", "2");
|
||||
line.setAttribute("x2", "11");
|
||||
line.setAttribute("y2", "13");
|
||||
const polygon = document.createElementNS(ns, "polygon");
|
||||
polygon.setAttribute("points", "22 2 15 22 11 13 2 9 22 2");
|
||||
svg.appendChild(line);
|
||||
svg.appendChild(polygon);
|
||||
return svg;
|
||||
}
|
||||
submit() {
|
||||
const text = this.textarea.value.trim();
|
||||
if (!text || this._disabled) return;
|
||||
this.dispatchEvent(
|
||||
new CustomEvent("lem-send", {
|
||||
bubbles: true,
|
||||
composed: true,
|
||||
detail: { text }
|
||||
})
|
||||
);
|
||||
this.textarea.value = "";
|
||||
this.textarea.style.height = "auto";
|
||||
this.sendBtn.disabled = true;
|
||||
this.textarea.focus();
|
||||
}
|
||||
get disabled() {
|
||||
return this._disabled;
|
||||
}
|
||||
set disabled(value) {
|
||||
this._disabled = value;
|
||||
this.textarea.disabled = value;
|
||||
this.sendBtn.disabled = value || this.textarea.value.trim() === "";
|
||||
this.textarea.placeholder = value ? "LEM is thinking..." : "Message LEM...";
|
||||
}
|
||||
focus() {
|
||||
this.textarea?.focus();
|
||||
}
|
||||
};
|
||||
customElements.define("lem-input", LemInput);
|
||||
|
||||
// src/lem-chat.ts
|
||||
var LemChat = class extends HTMLElement {
|
||||
shadow;
|
||||
messages;
|
||||
input;
|
||||
statusEl;
|
||||
history = [];
|
||||
abortController = null;
|
||||
static get observedAttributes() {
|
||||
return ["endpoint", "model", "system-prompt", "max-tokens", "temperature"];
|
||||
}
|
||||
constructor() {
|
||||
super();
|
||||
this.shadow = this.attachShadow({ mode: "open" });
|
||||
}
|
||||
connectedCallback() {
|
||||
const style = document.createElement("style");
|
||||
style.textContent = chatStyles;
|
||||
const header = document.createElement("div");
|
||||
header.className = "header";
|
||||
this.statusEl = document.createElement("div");
|
||||
this.statusEl.className = "header-status";
|
||||
const icon = document.createElement("div");
|
||||
icon.className = "header-icon";
|
||||
icon.textContent = "L";
|
||||
const title = document.createElement("div");
|
||||
title.className = "header-title";
|
||||
title.textContent = "LEM";
|
||||
const modelLabel = document.createElement("div");
|
||||
modelLabel.className = "header-model";
|
||||
modelLabel.textContent = this.getAttribute("model") || "local";
|
||||
header.appendChild(this.statusEl);
|
||||
header.appendChild(icon);
|
||||
header.appendChild(title);
|
||||
header.appendChild(modelLabel);
|
||||
this.messages = document.createElement("lem-messages");
|
||||
this.input = document.createElement("lem-input");
|
||||
this.shadow.appendChild(style);
|
||||
this.shadow.appendChild(header);
|
||||
this.shadow.appendChild(this.messages);
|
||||
this.shadow.appendChild(this.input);
|
||||
this.addEventListener("lem-send", ((e) => {
|
||||
this.handleSend(e.detail.text);
|
||||
}));
|
||||
const systemPrompt = this.getAttribute("system-prompt");
|
||||
if (systemPrompt) {
|
||||
this.history.push({ role: "system", content: systemPrompt });
|
||||
}
|
||||
this.checkConnection();
|
||||
requestAnimationFrame(() => this.input.focus());
|
||||
}
|
||||
disconnectedCallback() {
|
||||
this.abortController?.abort();
|
||||
}
|
||||
get endpoint() {
|
||||
const attr = this.getAttribute("endpoint");
|
||||
if (!attr) return window.location.origin;
|
||||
return attr;
|
||||
}
|
||||
get model() {
|
||||
return this.getAttribute("model") || "";
|
||||
}
|
||||
get maxTokens() {
|
||||
const val = this.getAttribute("max-tokens");
|
||||
return val ? parseInt(val, 10) : 2048;
|
||||
}
|
||||
get temperature() {
|
||||
const val = this.getAttribute("temperature");
|
||||
return val ? parseFloat(val) : 0.7;
|
||||
}
|
||||
async checkConnection() {
|
||||
try {
|
||||
const resp = await fetch(`${this.endpoint}/v1/models`, {
|
||||
signal: AbortSignal.timeout(3e3)
|
||||
});
|
||||
this.statusEl.classList.toggle("disconnected", !resp.ok);
|
||||
} catch {
|
||||
this.statusEl.classList.add("disconnected");
|
||||
}
|
||||
}
|
||||
async handleSend(text) {
|
||||
this.messages.addMessage("user", text);
|
||||
this.history.push({ role: "user", content: text });
|
||||
const assistantMsg = this.messages.addMessage("assistant");
|
||||
assistantMsg.streaming = true;
|
||||
this.input.disabled = true;
|
||||
this.abortController?.abort();
|
||||
this.abortController = new AbortController();
|
||||
let fullResponse = "";
|
||||
try {
|
||||
const response = await fetch(`${this.endpoint}/v1/chat/completions`, {
|
||||
method: "POST",
|
||||
headers: { "Content-Type": "application/json" },
|
||||
signal: this.abortController.signal,
|
||||
body: JSON.stringify({
|
||||
model: this.model,
|
||||
messages: this.history,
|
||||
max_tokens: this.maxTokens,
|
||||
temperature: this.temperature,
|
||||
stream: true
|
||||
})
|
||||
});
|
||||
if (!response.ok) {
|
||||
throw new Error(`Server error: ${response.status}`);
|
||||
}
|
||||
if (!response.body) {
|
||||
throw new Error("No response body");
|
||||
}
|
||||
const reader = response.body.getReader();
|
||||
const decoder = new TextDecoder();
|
||||
let buffer = "";
|
||||
while (true) {
|
||||
const { done, value } = await reader.read();
|
||||
if (done) break;
|
||||
buffer += decoder.decode(value, { stream: true });
|
||||
const lines = buffer.split("\n");
|
||||
buffer = lines.pop() || "";
|
||||
for (const line of lines) {
|
||||
if (!line.startsWith("data: ")) continue;
|
||||
const data = line.slice(6).trim();
|
||||
if (data === "[DONE]") continue;
|
||||
try {
|
||||
const chunk = JSON.parse(data);
|
||||
const delta = chunk.choices?.[0]?.delta;
|
||||
if (delta?.content) {
|
||||
fullResponse += delta.content;
|
||||
assistantMsg.appendToken(delta.content);
|
||||
this.messages.scrollToBottom();
|
||||
}
|
||||
} catch {
|
||||
}
|
||||
}
|
||||
}
|
||||
} catch (err) {
|
||||
if (err instanceof Error && err.name === "AbortError") {
|
||||
} else {
|
||||
const errorText = err instanceof Error ? err.message : "Connection failed";
|
||||
if (!fullResponse) {
|
||||
assistantMsg.text = `\u26A0\uFE0F ${errorText}`;
|
||||
}
|
||||
this.statusEl.classList.add("disconnected");
|
||||
}
|
||||
} finally {
|
||||
assistantMsg.streaming = false;
|
||||
this.input.disabled = false;
|
||||
this.input.focus();
|
||||
this.abortController = null;
|
||||
if (fullResponse) {
|
||||
this.history.push({ role: "assistant", content: fullResponse });
|
||||
}
|
||||
}
|
||||
}
|
||||
};
|
||||
customElements.define("lem-chat", LemChat);
|
||||
export {
|
||||
LemChat
|
||||
};
|
||||
|
|
@ -1,44 +0,0 @@
|
|||
package ml
|
||||
|
||||
import (
|
||||
_ "embed"
|
||||
)
|
||||
|
||||
//go:embed chat.js
|
||||
var lemChatJS []byte
|
||||
|
||||
const chatHTML = `<!DOCTYPE html>
|
||||
<html lang="en">
|
||||
<head>
|
||||
<meta charset="UTF-8">
|
||||
<meta name="viewport" content="width=device-width, initial-scale=1.0">
|
||||
<title>LEM Chat</title>
|
||||
<style>
|
||||
* { margin: 0; padding: 0; box-sizing: border-box; }
|
||||
html, body { height: 100%%; background: #111; }
|
||||
body {
|
||||
display: flex;
|
||||
align-items: center;
|
||||
justify-content: center;
|
||||
font-family: system-ui, -apple-system, sans-serif;
|
||||
}
|
||||
lem-chat {
|
||||
width: 720px;
|
||||
height: 85vh;
|
||||
max-height: 800px;
|
||||
}
|
||||
@media (max-width: 768px) {
|
||||
lem-chat { width: 100%%; height: 100%%; max-height: none; border-radius: 0; }
|
||||
}
|
||||
</style>
|
||||
</head>
|
||||
<body>
|
||||
<lem-chat
|
||||
endpoint=""
|
||||
model="%s"
|
||||
system-prompt=""
|
||||
max-tokens="%d"
|
||||
></lem-chat>
|
||||
<script type="module" src="/chat.js"></script>
|
||||
</body>
|
||||
</html>`
|
||||
|
|
@ -1,67 +0,0 @@
|
|||
package ml
|
||||
|
||||
import (
|
||||
"forge.lthn.ai/core/go/pkg/cli"
|
||||
"forge.lthn.ai/core/go-ml"
|
||||
)
|
||||
|
||||
var (
|
||||
agentM3Host string
|
||||
agentM3User string
|
||||
agentM3SSHKey string
|
||||
agentM3AdapterBase string
|
||||
agentBaseModel string
|
||||
agentPollInterval int
|
||||
agentWorkDir string
|
||||
agentFilter string
|
||||
agentForce bool
|
||||
agentOneShot bool
|
||||
agentDryRun bool
|
||||
)
|
||||
|
||||
var agentCmd = &cli.Command{
|
||||
Use: "agent",
|
||||
Short: "Run the scoring agent daemon",
|
||||
Long: "Polls M3 for unscored LoRA checkpoints, converts, probes, and pushes results to InfluxDB.",
|
||||
RunE: runAgent,
|
||||
}
|
||||
|
||||
func init() {
|
||||
agentCmd.Flags().StringVar(&agentM3Host, "m3-host", ml.EnvOr("M3_HOST", "10.69.69.108"), "M3 host address")
|
||||
agentCmd.Flags().StringVar(&agentM3User, "m3-user", ml.EnvOr("M3_USER", "claude"), "M3 SSH user")
|
||||
agentCmd.Flags().StringVar(&agentM3SSHKey, "m3-ssh-key", ml.EnvOr("M3_SSH_KEY", ml.ExpandHome("~/.ssh/id_ed25519")), "SSH key for M3")
|
||||
agentCmd.Flags().StringVar(&agentM3AdapterBase, "m3-adapter-base", ml.EnvOr("M3_ADAPTER_BASE", "/Volumes/Data/lem"), "Adapter base dir on M3")
|
||||
agentCmd.Flags().StringVar(&agentBaseModel, "base-model", ml.EnvOr("BASE_MODEL", "deepseek-ai/DeepSeek-R1-Distill-Qwen-7B"), "HuggingFace base model ID")
|
||||
agentCmd.Flags().IntVar(&agentPollInterval, "poll", ml.IntEnvOr("POLL_INTERVAL", 300), "Poll interval in seconds")
|
||||
agentCmd.Flags().StringVar(&agentWorkDir, "work-dir", ml.EnvOr("WORK_DIR", "/tmp/scoring-agent"), "Working directory for adapters")
|
||||
agentCmd.Flags().StringVar(&agentFilter, "filter", "", "Filter adapter dirs by prefix")
|
||||
agentCmd.Flags().BoolVar(&agentForce, "force", false, "Re-score already-scored checkpoints")
|
||||
agentCmd.Flags().BoolVar(&agentOneShot, "one-shot", false, "Process one checkpoint and exit")
|
||||
agentCmd.Flags().BoolVar(&agentDryRun, "dry-run", false, "Discover and plan but don't execute")
|
||||
}
|
||||
|
||||
func runAgent(cmd *cli.Command, args []string) error {
|
||||
cfg := &ml.AgentConfig{
|
||||
M3Host: agentM3Host,
|
||||
M3User: agentM3User,
|
||||
M3SSHKey: agentM3SSHKey,
|
||||
M3AdapterBase: agentM3AdapterBase,
|
||||
InfluxURL: influxURL,
|
||||
InfluxDB: influxDB,
|
||||
DBPath: dbPath,
|
||||
APIURL: apiURL,
|
||||
JudgeURL: judgeURL,
|
||||
JudgeModel: judgeModel,
|
||||
Model: modelName,
|
||||
BaseModel: agentBaseModel,
|
||||
PollInterval: agentPollInterval,
|
||||
WorkDir: agentWorkDir,
|
||||
Filter: agentFilter,
|
||||
Force: agentForce,
|
||||
OneShot: agentOneShot,
|
||||
DryRun: agentDryRun,
|
||||
}
|
||||
|
||||
ml.RunAgentLoop(cfg)
|
||||
return nil
|
||||
}
|
||||
|
|
@ -1,53 +0,0 @@
|
|||
package ml
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
"os"
|
||||
"path/filepath"
|
||||
|
||||
"forge.lthn.ai/core/go/pkg/cli"
|
||||
"forge.lthn.ai/core/go-ml"
|
||||
)
|
||||
|
||||
var (
|
||||
approveOutput string
|
||||
approveThreshold float64
|
||||
)
|
||||
|
||||
var approveCmd = &cli.Command{
|
||||
Use: "approve",
|
||||
Short: "Filter scored expansions into training JSONL",
|
||||
Long: "Filters scored expansion responses by quality threshold and exports approved ones as chat-format training JSONL.",
|
||||
RunE: runApprove,
|
||||
}
|
||||
|
||||
func init() {
|
||||
approveCmd.Flags().StringVar(&approveOutput, "output", "", "Output JSONL file (defaults to expansion-approved.jsonl in db dir)")
|
||||
approveCmd.Flags().Float64Var(&approveThreshold, "threshold", 6.0, "Min judge average to approve")
|
||||
}
|
||||
|
||||
func runApprove(cmd *cli.Command, args []string) error {
|
||||
path := dbPath
|
||||
if path == "" {
|
||||
path = os.Getenv("LEM_DB")
|
||||
}
|
||||
if path == "" {
|
||||
return fmt.Errorf("--db or LEM_DB required")
|
||||
}
|
||||
|
||||
output := approveOutput
|
||||
if output == "" {
|
||||
output = filepath.Join(filepath.Dir(path), "expansion-approved.jsonl")
|
||||
}
|
||||
|
||||
db, err := ml.OpenDB(path)
|
||||
if err != nil {
|
||||
return fmt.Errorf("open db: %w", err)
|
||||
}
|
||||
defer db.Close()
|
||||
|
||||
return ml.ApproveExpansions(db, ml.ApproveConfig{
|
||||
Output: output,
|
||||
Threshold: approveThreshold,
|
||||
}, cmd.OutOrStdout())
|
||||
}
|
||||
|
|
@ -1,301 +0,0 @@
|
|||
//go:build darwin && arm64
|
||||
|
||||
package ml
|
||||
|
||||
import (
|
||||
"context"
|
||||
"encoding/json"
|
||||
"fmt"
|
||||
"log/slog"
|
||||
"os"
|
||||
"runtime"
|
||||
"sort"
|
||||
"time"
|
||||
|
||||
"forge.lthn.ai/core/go-ml"
|
||||
"forge.lthn.ai/core/go/pkg/cli"
|
||||
)
|
||||
|
||||
var benchmarkCmd = &cli.Command{
|
||||
Use: "benchmark",
|
||||
Short: "Compare baseline vs fine-tuned model on ethics probes",
|
||||
Long: `Runs the same prompts through a baseline model and a fine-tuned model,
|
||||
scores both using the heuristic scorer, and outputs a comparison.
|
||||
|
||||
Uses the built-in LEK content probes by default. Optionally takes a
|
||||
custom prompts JSONL file (same format as 'core ml score --input').
|
||||
|
||||
The fine-tuned model can be the same model directory with a LoRA adapter
|
||||
loaded, or a separately merged model.`,
|
||||
RunE: runBenchmark,
|
||||
}
|
||||
|
||||
var (
|
||||
benchmarkBaseline string
|
||||
benchmarkTrained string
|
||||
benchmarkPrompts string
|
||||
benchmarkOutput string
|
||||
benchmarkMaxTokens int
|
||||
benchmarkTemp float64
|
||||
benchmarkMemLimit int
|
||||
)
|
||||
|
||||
func init() {
|
||||
benchmarkCmd.Flags().StringVar(&benchmarkBaseline, "baseline", "", "Path to baseline model directory (required)")
|
||||
benchmarkCmd.Flags().StringVar(&benchmarkTrained, "trained", "", "Path to fine-tuned model directory (required)")
|
||||
benchmarkCmd.Flags().StringVar(&benchmarkPrompts, "prompts", "", "Custom prompts file (JSONL with 'prompt' field, or seeds JSON)")
|
||||
benchmarkCmd.Flags().StringVar(&benchmarkOutput, "output", "benchmark.json", "Output comparison JSON file")
|
||||
benchmarkCmd.Flags().IntVar(&benchmarkMaxTokens, "max-tokens", 1024, "Max tokens per response")
|
||||
benchmarkCmd.Flags().Float64Var(&benchmarkTemp, "temperature", 0.4, "Sampling temperature")
|
||||
benchmarkCmd.Flags().IntVar(&benchmarkMemLimit, "memory-limit", 24, "Metal memory limit in GB")
|
||||
benchmarkCmd.MarkFlagRequired("baseline")
|
||||
benchmarkCmd.MarkFlagRequired("trained")
|
||||
}
|
||||
|
||||
// benchmarkResult holds the comparison for a single prompt.
|
||||
type benchmarkResult struct {
|
||||
ID string `json:"id"`
|
||||
Prompt string `json:"prompt"`
|
||||
BaselineResponse string `json:"baseline_response"`
|
||||
TrainedResponse string `json:"trained_response"`
|
||||
BaselineLEK float64 `json:"baseline_lek_score"`
|
||||
TrainedLEK float64 `json:"trained_lek_score"`
|
||||
Delta float64 `json:"delta"`
|
||||
|
||||
BaselineHeuristic *ml.HeuristicScores `json:"baseline_heuristic"`
|
||||
TrainedHeuristic *ml.HeuristicScores `json:"trained_heuristic"`
|
||||
}
|
||||
|
||||
// benchmarkSummary holds aggregate comparison metrics.
|
||||
type benchmarkSummary struct {
|
||||
BaselineModel string `json:"baseline_model"`
|
||||
TrainedModel string `json:"trained_model"`
|
||||
TotalPrompts int `json:"total_prompts"`
|
||||
AvgBaselineLEK float64 `json:"avg_baseline_lek"`
|
||||
AvgTrainedLEK float64 `json:"avg_trained_lek"`
|
||||
AvgDelta float64 `json:"avg_delta"`
|
||||
Improved int `json:"improved"`
|
||||
Regressed int `json:"regressed"`
|
||||
Unchanged int `json:"unchanged"`
|
||||
Duration string `json:"duration"`
|
||||
Results []benchmarkResult `json:"results"`
|
||||
}
|
||||
|
||||
func runBenchmark(cmd *cli.Command, args []string) error {
|
||||
start := time.Now()
|
||||
|
||||
// Load prompts — either custom file or built-in probes
|
||||
prompts, err := loadBenchmarkPrompts()
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
slog.Info("benchmark: loaded prompts", "count", len(prompts))
|
||||
|
||||
opts := ml.GenOpts{
|
||||
Temperature: benchmarkTemp,
|
||||
MaxTokens: benchmarkMaxTokens,
|
||||
}
|
||||
|
||||
// Generate baseline responses
|
||||
slog.Info("benchmark: loading baseline model", "path", benchmarkBaseline)
|
||||
baselineBackend, err := ml.NewMLXBackend(benchmarkBaseline)
|
||||
if err != nil {
|
||||
return fmt.Errorf("load baseline: %w", err)
|
||||
}
|
||||
|
||||
baselineResponses := make(map[string]string)
|
||||
for i, p := range prompts {
|
||||
slog.Info("benchmark: baseline",
|
||||
"prompt", fmt.Sprintf("%d/%d", i+1, len(prompts)),
|
||||
"id", p.id,
|
||||
)
|
||||
resp, err := baselineBackend.Generate(context.Background(), p.prompt, opts)
|
||||
if err != nil {
|
||||
slog.Error("benchmark: baseline failed", "id", p.id, "error", err)
|
||||
continue
|
||||
}
|
||||
baselineResponses[p.id] = resp
|
||||
|
||||
if (i+1)%4 == 0 {
|
||||
runtime.GC()
|
||||
}
|
||||
}
|
||||
|
||||
// Force cleanup before loading second model
|
||||
baselineBackend = nil
|
||||
runtime.GC()
|
||||
runtime.GC()
|
||||
|
||||
// Generate trained responses
|
||||
slog.Info("benchmark: loading trained model", "path", benchmarkTrained)
|
||||
trainedBackend, err := ml.NewMLXBackend(benchmarkTrained)
|
||||
if err != nil {
|
||||
return fmt.Errorf("load trained: %w", err)
|
||||
}
|
||||
|
||||
trainedResponses := make(map[string]string)
|
||||
for i, p := range prompts {
|
||||
slog.Info("benchmark: trained",
|
||||
"prompt", fmt.Sprintf("%d/%d", i+1, len(prompts)),
|
||||
"id", p.id,
|
||||
)
|
||||
resp, err := trainedBackend.Generate(context.Background(), p.prompt, opts)
|
||||
if err != nil {
|
||||
slog.Error("benchmark: trained failed", "id", p.id, "error", err)
|
||||
continue
|
||||
}
|
||||
trainedResponses[p.id] = resp
|
||||
|
||||
if (i+1)%4 == 0 {
|
||||
runtime.GC()
|
||||
}
|
||||
}
|
||||
|
||||
trainedBackend = nil
|
||||
runtime.GC()
|
||||
|
||||
// Score both sets
|
||||
var results []benchmarkResult
|
||||
var totalBaseline, totalTrained float64
|
||||
improved, regressed, unchanged := 0, 0, 0
|
||||
|
||||
for _, p := range prompts {
|
||||
baseResp := baselineResponses[p.id]
|
||||
trainResp := trainedResponses[p.id]
|
||||
|
||||
if baseResp == "" || trainResp == "" {
|
||||
continue
|
||||
}
|
||||
|
||||
baseH := ml.ScoreHeuristic(baseResp)
|
||||
trainH := ml.ScoreHeuristic(trainResp)
|
||||
delta := trainH.LEKScore - baseH.LEKScore
|
||||
|
||||
totalBaseline += baseH.LEKScore
|
||||
totalTrained += trainH.LEKScore
|
||||
|
||||
if delta > 0.5 {
|
||||
improved++
|
||||
} else if delta < -0.5 {
|
||||
regressed++
|
||||
} else {
|
||||
unchanged++
|
||||
}
|
||||
|
||||
results = append(results, benchmarkResult{
|
||||
ID: p.id,
|
||||
Prompt: p.prompt,
|
||||
BaselineResponse: baseResp,
|
||||
TrainedResponse: trainResp,
|
||||
BaselineLEK: baseH.LEKScore,
|
||||
TrainedLEK: trainH.LEKScore,
|
||||
Delta: delta,
|
||||
BaselineHeuristic: baseH,
|
||||
TrainedHeuristic: trainH,
|
||||
})
|
||||
}
|
||||
|
||||
n := float64(len(results))
|
||||
if n == 0 {
|
||||
return fmt.Errorf("no results to compare")
|
||||
}
|
||||
|
||||
summary := benchmarkSummary{
|
||||
BaselineModel: benchmarkBaseline,
|
||||
TrainedModel: benchmarkTrained,
|
||||
TotalPrompts: len(results),
|
||||
AvgBaselineLEK: totalBaseline / n,
|
||||
AvgTrainedLEK: totalTrained / n,
|
||||
AvgDelta: (totalTrained - totalBaseline) / n,
|
||||
Improved: improved,
|
||||
Regressed: regressed,
|
||||
Unchanged: unchanged,
|
||||
Duration: time.Since(start).Round(time.Second).String(),
|
||||
Results: results,
|
||||
}
|
||||
|
||||
// Write output
|
||||
data, err := json.MarshalIndent(summary, "", " ")
|
||||
if err != nil {
|
||||
return fmt.Errorf("marshal output: %w", err)
|
||||
}
|
||||
if err := os.WriteFile(benchmarkOutput, data, 0644); err != nil {
|
||||
return fmt.Errorf("write output: %w", err)
|
||||
}
|
||||
|
||||
// Print summary
|
||||
fmt.Println()
|
||||
fmt.Println("=== Benchmark Results ===")
|
||||
fmt.Printf("Baseline: %s\n", benchmarkBaseline)
|
||||
fmt.Printf("Trained: %s\n", benchmarkTrained)
|
||||
fmt.Printf("Prompts: %d\n", len(results))
|
||||
fmt.Println()
|
||||
fmt.Printf("Avg LEK (baseline): %+.2f\n", summary.AvgBaselineLEK)
|
||||
fmt.Printf("Avg LEK (trained): %+.2f\n", summary.AvgTrainedLEK)
|
||||
fmt.Printf("Avg Delta: %+.2f\n", summary.AvgDelta)
|
||||
fmt.Println()
|
||||
fmt.Printf("Improved: %d (%.0f%%)\n", improved, float64(improved)/n*100)
|
||||
fmt.Printf("Regressed: %d (%.0f%%)\n", regressed, float64(regressed)/n*100)
|
||||
fmt.Printf("Unchanged: %d (%.0f%%)\n", unchanged, float64(unchanged)/n*100)
|
||||
fmt.Printf("Duration: %s\n", summary.Duration)
|
||||
fmt.Printf("Output: %s\n", benchmarkOutput)
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
type benchPrompt struct {
|
||||
id string
|
||||
prompt string
|
||||
}
|
||||
|
||||
func loadBenchmarkPrompts() ([]benchPrompt, error) {
|
||||
if benchmarkPrompts == "" {
|
||||
// Use built-in content probes
|
||||
probes := ml.ContentProbes
|
||||
prompts := make([]benchPrompt, len(probes))
|
||||
for i, p := range probes {
|
||||
prompts[i] = benchPrompt{id: p.ID, prompt: p.Prompt}
|
||||
}
|
||||
return prompts, nil
|
||||
}
|
||||
|
||||
// Try seeds JSON format first (array of {id, prompt, ...})
|
||||
data, err := os.ReadFile(benchmarkPrompts)
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("read prompts: %w", err)
|
||||
}
|
||||
|
||||
var seeds []seedPrompt
|
||||
if json.Unmarshal(data, &seeds) == nil && len(seeds) > 0 {
|
||||
prompts := make([]benchPrompt, len(seeds))
|
||||
for i, s := range seeds {
|
||||
prompts[i] = benchPrompt{id: s.ID, prompt: s.Prompt}
|
||||
}
|
||||
return prompts, nil
|
||||
}
|
||||
|
||||
// Try JSONL responses format
|
||||
responses, err := ml.ReadResponses(benchmarkPrompts)
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("parse prompts: %w", err)
|
||||
}
|
||||
|
||||
// Deduplicate by prompt
|
||||
seen := make(map[string]bool)
|
||||
var prompts []benchPrompt
|
||||
for _, r := range responses {
|
||||
if seen[r.Prompt] {
|
||||
continue
|
||||
}
|
||||
seen[r.Prompt] = true
|
||||
id := r.ID
|
||||
if id == "" {
|
||||
id = fmt.Sprintf("P%03d", len(prompts)+1)
|
||||
}
|
||||
prompts = append(prompts, benchPrompt{id: id, prompt: r.Prompt})
|
||||
}
|
||||
|
||||
sort.Slice(prompts, func(i, j int) bool { return prompts[i].id < prompts[j].id })
|
||||
return prompts, nil
|
||||
}
|
||||
|
|
@ -1,7 +0,0 @@
|
|||
//go:build darwin && arm64
|
||||
|
||||
package ml
|
||||
|
||||
func init() {
|
||||
mlCmd.AddCommand(benchmarkCmd)
|
||||
}
|
||||
|
|
@ -1,327 +0,0 @@
|
|||
//go:build darwin && arm64
|
||||
|
||||
package ml
|
||||
|
||||
import (
|
||||
"bufio"
|
||||
"encoding/json"
|
||||
"fmt"
|
||||
"log/slog"
|
||||
"os"
|
||||
"runtime"
|
||||
"strings"
|
||||
"time"
|
||||
|
||||
"forge.lthn.ai/core/go-ml"
|
||||
"forge.lthn.ai/core/go/pkg/cli"
|
||||
)
|
||||
|
||||
var chatCmd = &cli.Command{
|
||||
Use: "chat",
|
||||
Short: "Interactive conversation with a local MLX model",
|
||||
Long: `Start an interactive chat session with a local MLX model.
|
||||
|
||||
All exchanges are captured and can be written to training JSONL on exit
|
||||
for use with 'core ml train'. Optionally apply axiom sandwich signing
|
||||
to wrap the conversation for LEK training.
|
||||
|
||||
Commands during chat:
|
||||
/quit, /exit End session and save
|
||||
/save Save conversation so far (appends to output)
|
||||
/clear Clear conversation history
|
||||
/system <text> Set system prompt
|
||||
/undo Remove last exchange`,
|
||||
RunE: runChat,
|
||||
}
|
||||
|
||||
var (
|
||||
chatModelPath string
|
||||
chatOutput string
|
||||
chatKB string
|
||||
chatKernel string
|
||||
chatSystem string
|
||||
chatMaxTokens int
|
||||
chatTemp float64
|
||||
chatMemLimit int
|
||||
)
|
||||
|
||||
func init() {
|
||||
chatCmd.Flags().StringVar(&chatModelPath, "model-path", "", "Path to model directory (required)")
|
||||
chatCmd.Flags().StringVar(&chatOutput, "output", "", "Output JSONL file for captured conversation")
|
||||
chatCmd.Flags().StringVar(&chatKB, "kb", "", "Knowledge base document for sandwich signing")
|
||||
chatCmd.Flags().StringVar(&chatKernel, "kernel", "", "LEK-1 kernel file for sandwich signing")
|
||||
chatCmd.Flags().StringVar(&chatSystem, "system", "", "Initial system prompt")
|
||||
chatCmd.Flags().IntVar(&chatMaxTokens, "max-tokens", 2048, "Max tokens per response")
|
||||
chatCmd.Flags().Float64Var(&chatTemp, "temperature", 0.4, "Sampling temperature")
|
||||
chatCmd.Flags().IntVar(&chatMemLimit, "memory-limit", 24, "Metal memory limit in GB")
|
||||
chatCmd.MarkFlagRequired("model-path")
|
||||
}
|
||||
|
||||
func runChat(cmd *cli.Command, args []string) error {
|
||||
// Load optional KB and kernel for sandwich signing
|
||||
var kbText, kernelText string
|
||||
if chatKB != "" {
|
||||
data, err := os.ReadFile(chatKB)
|
||||
if err != nil {
|
||||
return fmt.Errorf("read KB: %w", err)
|
||||
}
|
||||
kbText = string(data)
|
||||
}
|
||||
if chatKernel != "" {
|
||||
data, err := os.ReadFile(chatKernel)
|
||||
if err != nil {
|
||||
return fmt.Errorf("read kernel: %w", err)
|
||||
}
|
||||
kernelText = string(data)
|
||||
}
|
||||
sandwich := kbText != "" && kernelText != ""
|
||||
|
||||
// Load model
|
||||
slog.Info("chat: loading model", "path", chatModelPath)
|
||||
backend, err := ml.NewMLXBackend(chatModelPath)
|
||||
if err != nil {
|
||||
return fmt.Errorf("load model: %w", err)
|
||||
}
|
||||
|
||||
opts := ml.GenOpts{
|
||||
Temperature: chatTemp,
|
||||
MaxTokens: chatMaxTokens,
|
||||
}
|
||||
|
||||
// Conversation state
|
||||
var history []ml.Message
|
||||
if chatSystem != "" {
|
||||
history = append(history, ml.Message{Role: "system", Content: chatSystem})
|
||||
}
|
||||
|
||||
// Track saved conversations for JSONL output
|
||||
var savedConversations [][]ml.Message
|
||||
|
||||
fmt.Println("Chat started. Type /quit to exit, /help for commands.")
|
||||
if sandwich {
|
||||
fmt.Println("Sandwich signing enabled (KB + kernel)")
|
||||
}
|
||||
if chatOutput != "" {
|
||||
fmt.Printf("Capturing to: %s\n", chatOutput)
|
||||
}
|
||||
fmt.Println()
|
||||
|
||||
scanner := bufio.NewScanner(os.Stdin)
|
||||
scanner.Buffer(make([]byte, 1<<20), 1<<20) // 1MB input buffer
|
||||
|
||||
for {
|
||||
fmt.Print("you> ")
|
||||
if !scanner.Scan() {
|
||||
// EOF (Ctrl+D)
|
||||
break
|
||||
}
|
||||
|
||||
input := strings.TrimSpace(scanner.Text())
|
||||
if input == "" {
|
||||
continue
|
||||
}
|
||||
|
||||
// Handle commands
|
||||
if strings.HasPrefix(input, "/") {
|
||||
cmd := strings.Fields(input)
|
||||
switch cmd[0] {
|
||||
case "/quit", "/exit":
|
||||
goto done
|
||||
case "/save":
|
||||
if chatOutput == "" {
|
||||
fmt.Println("No --output file specified. Use --output to enable saving.")
|
||||
continue
|
||||
}
|
||||
if len(history) > 0 {
|
||||
savedConversations = append(savedConversations, cloneMessages(history))
|
||||
fmt.Printf("Saved conversation (%d messages)\n", len(history))
|
||||
}
|
||||
continue
|
||||
case "/clear":
|
||||
sysPrompt := ""
|
||||
for _, m := range history {
|
||||
if m.Role == "system" {
|
||||
sysPrompt = m.Content
|
||||
break
|
||||
}
|
||||
}
|
||||
history = nil
|
||||
if sysPrompt != "" {
|
||||
history = append(history, ml.Message{Role: "system", Content: sysPrompt})
|
||||
}
|
||||
fmt.Println("Conversation cleared.")
|
||||
continue
|
||||
case "/system":
|
||||
if len(cmd) < 2 {
|
||||
fmt.Println("Usage: /system <prompt text>")
|
||||
continue
|
||||
}
|
||||
sysText := strings.TrimPrefix(input, "/system ")
|
||||
// Replace existing system prompt or add new one
|
||||
found := false
|
||||
for i, m := range history {
|
||||
if m.Role == "system" {
|
||||
history[i].Content = sysText
|
||||
found = true
|
||||
break
|
||||
}
|
||||
}
|
||||
if !found {
|
||||
// Prepend system message
|
||||
history = append([]ml.Message{{Role: "system", Content: sysText}}, history...)
|
||||
}
|
||||
fmt.Printf("System prompt set (%d chars)\n", len(sysText))
|
||||
continue
|
||||
case "/undo":
|
||||
// Remove last user+assistant pair
|
||||
if len(history) >= 2 {
|
||||
last := history[len(history)-1]
|
||||
secondLast := history[len(history)-2]
|
||||
if secondLast.Role == "user" && last.Role == "assistant" {
|
||||
history = history[:len(history)-2]
|
||||
fmt.Println("Last exchange removed.")
|
||||
} else {
|
||||
fmt.Println("Cannot undo: last messages are not a user/assistant pair.")
|
||||
}
|
||||
} else {
|
||||
fmt.Println("Nothing to undo.")
|
||||
}
|
||||
continue
|
||||
case "/help":
|
||||
fmt.Println("Commands:")
|
||||
fmt.Println(" /quit, /exit End session and save")
|
||||
fmt.Println(" /save Save conversation so far")
|
||||
fmt.Println(" /clear Clear conversation history")
|
||||
fmt.Println(" /system <text> Set system prompt")
|
||||
fmt.Println(" /undo Remove last exchange")
|
||||
fmt.Println(" /help Show this help")
|
||||
continue
|
||||
default:
|
||||
fmt.Printf("Unknown command: %s (try /help)\n", cmd[0])
|
||||
continue
|
||||
}
|
||||
}
|
||||
|
||||
// Add user message
|
||||
history = append(history, ml.Message{Role: "user", Content: input})
|
||||
|
||||
// Generate response
|
||||
genStart := time.Now()
|
||||
fmt.Print("\nassistant> ")
|
||||
|
||||
var response strings.Builder
|
||||
err := backend.ChatStream(cmd.Context(), history, opts, func(token string) error {
|
||||
fmt.Print(token)
|
||||
response.WriteString(token)
|
||||
return nil
|
||||
})
|
||||
fmt.Println()
|
||||
|
||||
if err != nil {
|
||||
slog.Error("chat: generation failed", "error", err)
|
||||
// Remove the failed user message
|
||||
history = history[:len(history)-1]
|
||||
continue
|
||||
}
|
||||
|
||||
elapsed := time.Since(genStart)
|
||||
responseText := response.String()
|
||||
history = append(history, ml.Message{Role: "assistant", Content: responseText})
|
||||
|
||||
slog.Debug("chat: response generated",
|
||||
"chars", len(responseText),
|
||||
"duration", elapsed.Round(time.Millisecond),
|
||||
)
|
||||
|
||||
// Periodic cleanup
|
||||
if len(history)%8 == 0 {
|
||||
runtime.GC()
|
||||
}
|
||||
|
||||
fmt.Println()
|
||||
}
|
||||
|
||||
done:
|
||||
fmt.Println()
|
||||
|
||||
// Save final conversation if output is specified
|
||||
if chatOutput != "" && len(history) > 0 {
|
||||
// Include current conversation if not already saved
|
||||
savedConversations = append(savedConversations, history)
|
||||
|
||||
if err := writeChatJSONL(chatOutput, savedConversations, sandwich, kbText, kernelText); err != nil {
|
||||
return fmt.Errorf("save conversation: %w", err)
|
||||
}
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
// writeChatJSONL writes conversations to JSONL file.
|
||||
// If sandwich is true, wraps user messages with KB + kernel signing.
|
||||
func writeChatJSONL(path string, conversations [][]ml.Message, sandwich bool, kb, kernel string) error {
|
||||
f, err := os.OpenFile(path, os.O_CREATE|os.O_WRONLY|os.O_APPEND, 0644)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
defer f.Close()
|
||||
|
||||
encoder := json.NewEncoder(f)
|
||||
written := 0
|
||||
|
||||
for _, conv := range conversations {
|
||||
// Extract user/assistant pairs (skip system messages for training output)
|
||||
var messages []ml.Message
|
||||
for _, m := range conv {
|
||||
if m.Role == "system" {
|
||||
continue
|
||||
}
|
||||
messages = append(messages, m)
|
||||
}
|
||||
|
||||
if len(messages) < 2 {
|
||||
continue
|
||||
}
|
||||
|
||||
if sandwich {
|
||||
// Apply sandwich signing to user messages
|
||||
messages = applySandwichSigning(messages, kb, kernel)
|
||||
}
|
||||
|
||||
record := struct {
|
||||
Messages []ml.Message `json:"messages"`
|
||||
}{Messages: messages}
|
||||
|
||||
if err := encoder.Encode(record); err != nil {
|
||||
return err
|
||||
}
|
||||
written++
|
||||
}
|
||||
|
||||
slog.Info("chat: saved conversations",
|
||||
"file", path,
|
||||
"conversations", written,
|
||||
"sandwich", sandwich,
|
||||
)
|
||||
return nil
|
||||
}
|
||||
|
||||
// applySandwichSigning wraps user messages with KB preamble and kernel postfix.
|
||||
func applySandwichSigning(messages []ml.Message, kb, kernel string) []ml.Message {
|
||||
signed := make([]ml.Message, len(messages))
|
||||
copy(signed, messages)
|
||||
|
||||
for i := range signed {
|
||||
if signed[i].Role == "user" {
|
||||
signed[i].Content = buildSandwich(kb, signed[i].Content, kernel)
|
||||
}
|
||||
}
|
||||
return signed
|
||||
}
|
||||
|
||||
// cloneMessages creates a deep copy of a message slice.
|
||||
func cloneMessages(msgs []ml.Message) []ml.Message {
|
||||
clone := make([]ml.Message, len(msgs))
|
||||
copy(clone, msgs)
|
||||
return clone
|
||||
}
|
||||
|
|
@ -1,7 +0,0 @@
|
|||
//go:build darwin && arm64
|
||||
|
||||
package ml
|
||||
|
||||
func init() {
|
||||
mlCmd.AddCommand(chatCmd)
|
||||
}
|
||||
|
|
@ -1,41 +0,0 @@
|
|||
package ml
|
||||
|
||||
import (
|
||||
"forge.lthn.ai/core/go/pkg/cli"
|
||||
"forge.lthn.ai/core/go-ml"
|
||||
)
|
||||
|
||||
var (
|
||||
consolidateM3Host string
|
||||
consolidateRemoteDir string
|
||||
consolidatePattern string
|
||||
consolidateOutputDir string
|
||||
consolidateMergedOut string
|
||||
)
|
||||
|
||||
var consolidateCmd = &cli.Command{
|
||||
Use: "consolidate",
|
||||
Short: "Pull and merge response JSONL files from M3",
|
||||
Long: "Pulls JSONL response files from M3 via SSH/SCP, merges them by idx, deduplicates, and writes a single merged JSONL output.",
|
||||
RunE: runConsolidate,
|
||||
}
|
||||
|
||||
func init() {
|
||||
consolidateCmd.Flags().StringVar(&consolidateM3Host, "m3-host", "m3", "M3 SSH host")
|
||||
consolidateCmd.Flags().StringVar(&consolidateRemoteDir, "remote", "/Volumes/Data/lem/responses", "Remote response directory")
|
||||
consolidateCmd.Flags().StringVar(&consolidatePattern, "pattern", "gold*.jsonl", "File glob pattern")
|
||||
consolidateCmd.Flags().StringVar(&consolidateOutputDir, "output", "", "Local output directory (default: responses)")
|
||||
consolidateCmd.Flags().StringVar(&consolidateMergedOut, "merged", "", "Merged output path (default: gold-merged.jsonl in parent of output dir)")
|
||||
}
|
||||
|
||||
func runConsolidate(cmd *cli.Command, args []string) error {
|
||||
cfg := ml.ConsolidateConfig{
|
||||
M3Host: consolidateM3Host,
|
||||
RemoteDir: consolidateRemoteDir,
|
||||
Pattern: consolidatePattern,
|
||||
OutputDir: consolidateOutputDir,
|
||||
MergedOut: consolidateMergedOut,
|
||||
}
|
||||
|
||||
return ml.Consolidate(cfg, cmd.OutOrStdout())
|
||||
}
|
||||
|
|
@ -1,40 +0,0 @@
|
|||
package ml
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
|
||||
"forge.lthn.ai/core/go/pkg/cli"
|
||||
"forge.lthn.ai/core/go-ml"
|
||||
)
|
||||
|
||||
var (
|
||||
convertInput string
|
||||
convertConfig string
|
||||
convertOutputDir string
|
||||
convertBaseModel string
|
||||
)
|
||||
|
||||
var convertCmd = &cli.Command{
|
||||
Use: "convert",
|
||||
Short: "Convert MLX LoRA adapter to PEFT format",
|
||||
Long: "Converts an MLX safetensors LoRA adapter to HuggingFace PEFT format for Ollama.",
|
||||
RunE: runConvert,
|
||||
}
|
||||
|
||||
func init() {
|
||||
convertCmd.Flags().StringVar(&convertInput, "input", "", "Input safetensors file (required)")
|
||||
convertCmd.Flags().StringVar(&convertConfig, "config", "", "Adapter config JSON (required)")
|
||||
convertCmd.Flags().StringVar(&convertOutputDir, "output-dir", "", "Output directory (required)")
|
||||
convertCmd.Flags().StringVar(&convertBaseModel, "base-model", "", "Base model name for adapter_config.json")
|
||||
convertCmd.MarkFlagRequired("input")
|
||||
convertCmd.MarkFlagRequired("config")
|
||||
convertCmd.MarkFlagRequired("output-dir")
|
||||
}
|
||||
|
||||
func runConvert(cmd *cli.Command, args []string) error {
|
||||
if err := ml.ConvertMLXtoPEFT(convertInput, convertConfig, convertOutputDir, convertBaseModel); err != nil {
|
||||
return fmt.Errorf("convert to PEFT: %w", err)
|
||||
}
|
||||
fmt.Printf("PEFT adapter written to %s\n", convertOutputDir)
|
||||
return nil
|
||||
}
|
||||
|
|
@ -1,34 +0,0 @@
|
|||
package ml
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
"os"
|
||||
|
||||
"forge.lthn.ai/core/go/pkg/cli"
|
||||
"forge.lthn.ai/core/go-ml"
|
||||
)
|
||||
|
||||
var coverageCmd = &cli.Command{
|
||||
Use: "coverage",
|
||||
Short: "Analyze seed coverage by region and domain",
|
||||
Long: "Queries seeds by region and domain, renders ASCII bar charts, and highlights underrepresented areas.",
|
||||
RunE: runCoverage,
|
||||
}
|
||||
|
||||
func runCoverage(cmd *cli.Command, args []string) error {
|
||||
path := dbPath
|
||||
if path == "" {
|
||||
path = os.Getenv("LEM_DB")
|
||||
}
|
||||
if path == "" {
|
||||
return fmt.Errorf("--db or LEM_DB required")
|
||||
}
|
||||
|
||||
db, err := ml.OpenDB(path)
|
||||
if err != nil {
|
||||
return fmt.Errorf("open db: %w", err)
|
||||
}
|
||||
defer db.Close()
|
||||
|
||||
return ml.PrintCoverage(db, cmd.OutOrStdout())
|
||||
}
|
||||
|
|
@ -1,81 +0,0 @@
|
|||
package ml
|
||||
|
||||
import (
|
||||
"context"
|
||||
"fmt"
|
||||
"os"
|
||||
|
||||
"forge.lthn.ai/core/go/pkg/cli"
|
||||
"forge.lthn.ai/core/go-ml"
|
||||
)
|
||||
|
||||
var (
|
||||
expandWorker string
|
||||
expandOutput string
|
||||
expandLimit int
|
||||
expandDryRun bool
|
||||
)
|
||||
|
||||
var expandCmd = &cli.Command{
|
||||
Use: "expand",
|
||||
Short: "Generate expansion responses from pending prompts",
|
||||
Long: "Reads pending expansion prompts from DuckDB and generates responses via an OpenAI-compatible API.",
|
||||
RunE: runExpand,
|
||||
}
|
||||
|
||||
func init() {
|
||||
expandCmd.Flags().StringVar(&expandWorker, "worker", "", "Worker hostname (defaults to os.Hostname())")
|
||||
expandCmd.Flags().StringVar(&expandOutput, "output", ".", "Output directory for JSONL files")
|
||||
expandCmd.Flags().IntVar(&expandLimit, "limit", 0, "Max prompts to process (0 = all)")
|
||||
expandCmd.Flags().BoolVar(&expandDryRun, "dry-run", false, "Print plan and exit without generating")
|
||||
}
|
||||
|
||||
func runExpand(cmd *cli.Command, args []string) error {
|
||||
if modelName == "" {
|
||||
return fmt.Errorf("--model is required")
|
||||
}
|
||||
|
||||
path := dbPath
|
||||
if path == "" {
|
||||
path = os.Getenv("LEM_DB")
|
||||
}
|
||||
if path == "" {
|
||||
return fmt.Errorf("--db or LEM_DB env is required")
|
||||
}
|
||||
|
||||
if expandWorker == "" {
|
||||
h, _ := os.Hostname()
|
||||
expandWorker = h
|
||||
}
|
||||
|
||||
db, err := ml.OpenDBReadWrite(path)
|
||||
if err != nil {
|
||||
return fmt.Errorf("open db: %w", err)
|
||||
}
|
||||
defer db.Close()
|
||||
|
||||
rows, err := db.QueryExpansionPrompts("pending", expandLimit)
|
||||
if err != nil {
|
||||
return fmt.Errorf("query expansion_prompts: %w", err)
|
||||
}
|
||||
fmt.Printf("Loaded %d pending prompts from %s\n", len(rows), path)
|
||||
|
||||
var prompts []ml.Response
|
||||
for _, r := range rows {
|
||||
prompt := r.Prompt
|
||||
if prompt == "" && r.PromptEn != "" {
|
||||
prompt = r.PromptEn
|
||||
}
|
||||
prompts = append(prompts, ml.Response{
|
||||
ID: r.SeedID,
|
||||
Domain: r.Domain,
|
||||
Prompt: prompt,
|
||||
})
|
||||
}
|
||||
|
||||
ctx := context.Background()
|
||||
backend := ml.NewHTTPBackend(apiURL, modelName)
|
||||
influx := ml.NewInfluxClient(influxURL, influxDB)
|
||||
|
||||
return ml.ExpandPrompts(ctx, backend, influx, prompts, modelName, expandWorker, expandOutput, expandDryRun, expandLimit)
|
||||
}
|
||||
|
|
@ -1,95 +0,0 @@
|
|||
package ml
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
"os"
|
||||
|
||||
"forge.lthn.ai/core/go/pkg/cli"
|
||||
"forge.lthn.ai/core/go-ml"
|
||||
)
|
||||
|
||||
var expandStatusCmd = &cli.Command{
|
||||
Use: "expand-status",
|
||||
Short: "Show expansion pipeline progress",
|
||||
Long: "Queries DuckDB for expansion prompts, generated responses, scoring status, and overall pipeline progress.",
|
||||
RunE: runExpandStatus,
|
||||
}
|
||||
|
||||
func runExpandStatus(cmd *cli.Command, args []string) error {
|
||||
path := dbPath
|
||||
if path == "" {
|
||||
path = os.Getenv("LEM_DB")
|
||||
}
|
||||
if path == "" {
|
||||
return fmt.Errorf("--db or LEM_DB required")
|
||||
}
|
||||
|
||||
db, err := ml.OpenDB(path)
|
||||
if err != nil {
|
||||
return fmt.Errorf("open db: %w", err)
|
||||
}
|
||||
defer db.Close()
|
||||
|
||||
fmt.Fprintln(os.Stdout, "LEM Expansion Pipeline Status")
|
||||
fmt.Fprintln(os.Stdout, "==================================================")
|
||||
|
||||
// Expansion prompts
|
||||
total, pending, err := db.CountExpansionPrompts()
|
||||
if err != nil {
|
||||
fmt.Fprintln(os.Stdout, " Expansion prompts: not created (run: normalize)")
|
||||
return nil
|
||||
}
|
||||
fmt.Fprintf(os.Stdout, " Expansion prompts: %d total, %d pending\n", total, pending)
|
||||
|
||||
// Generated responses — query raw counts via SQL
|
||||
generated := 0
|
||||
rows, err := db.QueryRows("SELECT count(*) AS n FROM expansion_raw")
|
||||
if err != nil || len(rows) == 0 {
|
||||
fmt.Fprintln(os.Stdout, " Generated: 0 (run: core ml expand)")
|
||||
} else {
|
||||
if n, ok := rows[0]["n"]; ok {
|
||||
generated = toInt(n)
|
||||
}
|
||||
fmt.Fprintf(os.Stdout, " Generated: %d\n", generated)
|
||||
}
|
||||
|
||||
// Scored — query scoring counts via SQL
|
||||
sRows, err := db.QueryRows("SELECT count(*) AS n FROM scoring_results WHERE suite = 'heuristic'")
|
||||
if err != nil || len(sRows) == 0 {
|
||||
fmt.Fprintln(os.Stdout, " Scored: 0 (run: score --tier 1)")
|
||||
} else {
|
||||
scored := toInt(sRows[0]["n"])
|
||||
fmt.Fprintf(os.Stdout, " Heuristic scored: %d\n", scored)
|
||||
}
|
||||
|
||||
// Pipeline progress
|
||||
if total > 0 && generated > 0 {
|
||||
genPct := float64(generated) / float64(total) * 100
|
||||
fmt.Fprintf(os.Stdout, "\n Progress: %.1f%% generated\n", genPct)
|
||||
}
|
||||
|
||||
// Golden set context
|
||||
golden, err := db.CountGoldenSet()
|
||||
if err == nil && golden > 0 {
|
||||
fmt.Fprintf(os.Stdout, "\n Golden set: %d / %d\n", golden, targetTotal)
|
||||
if generated > 0 {
|
||||
fmt.Fprintf(os.Stdout, " Combined: %d total examples\n", golden+generated)
|
||||
}
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
// toInt converts an interface{} (typically from QueryRows) to int.
|
||||
func toInt(v interface{}) int {
|
||||
switch n := v.(type) {
|
||||
case int:
|
||||
return n
|
||||
case int64:
|
||||
return int(n)
|
||||
case float64:
|
||||
return int(n)
|
||||
default:
|
||||
return 0
|
||||
}
|
||||
}
|
||||
|
|
@ -1,109 +0,0 @@
|
|||
package ml
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
"os"
|
||||
|
||||
"forge.lthn.ai/core/go/pkg/cli"
|
||||
"forge.lthn.ai/core/go-ml"
|
||||
)
|
||||
|
||||
var (
|
||||
exportOutputDir string
|
||||
exportMinChars int
|
||||
exportTrainPct int
|
||||
exportValidPct int
|
||||
exportTestPct int
|
||||
exportSeed int64
|
||||
exportParquet bool
|
||||
)
|
||||
|
||||
var exportCmd = &cli.Command{
|
||||
Use: "export",
|
||||
Short: "Export golden set to training JSONL and Parquet",
|
||||
Long: "Reads golden set from DuckDB, filters, splits, and exports to JSONL and optionally Parquet.",
|
||||
RunE: runExport,
|
||||
}
|
||||
|
||||
func init() {
|
||||
exportCmd.Flags().StringVar(&exportOutputDir, "output-dir", "", "Output directory for training files (required)")
|
||||
exportCmd.Flags().IntVar(&exportMinChars, "min-chars", 50, "Minimum response length in characters")
|
||||
exportCmd.Flags().IntVar(&exportTrainPct, "train", 80, "Training split percentage")
|
||||
exportCmd.Flags().IntVar(&exportValidPct, "valid", 10, "Validation split percentage")
|
||||
exportCmd.Flags().IntVar(&exportTestPct, "test", 10, "Test split percentage")
|
||||
exportCmd.Flags().Int64Var(&exportSeed, "seed", 42, "Random seed for shuffle")
|
||||
exportCmd.Flags().BoolVar(&exportParquet, "parquet", false, "Also export Parquet files")
|
||||
exportCmd.MarkFlagRequired("output-dir")
|
||||
}
|
||||
|
||||
func runExport(cmd *cli.Command, args []string) error {
|
||||
if err := ml.ValidatePercentages(exportTrainPct, exportValidPct, exportTestPct); err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
path := dbPath
|
||||
if path == "" {
|
||||
path = os.Getenv("LEM_DB")
|
||||
}
|
||||
if path == "" {
|
||||
return fmt.Errorf("--db or LEM_DB env is required")
|
||||
}
|
||||
|
||||
db, err := ml.OpenDB(path)
|
||||
if err != nil {
|
||||
return fmt.Errorf("open db: %w", err)
|
||||
}
|
||||
defer db.Close()
|
||||
|
||||
rows, err := db.QueryGoldenSet(exportMinChars)
|
||||
if err != nil {
|
||||
return fmt.Errorf("query golden set: %w", err)
|
||||
}
|
||||
fmt.Printf("Loaded %d golden set rows (min %d chars)\n", len(rows), exportMinChars)
|
||||
|
||||
// Convert to Response format.
|
||||
var responses []ml.Response
|
||||
for _, r := range rows {
|
||||
responses = append(responses, ml.Response{
|
||||
ID: r.SeedID,
|
||||
Domain: r.Domain,
|
||||
Prompt: r.Prompt,
|
||||
Response: r.Response,
|
||||
})
|
||||
}
|
||||
|
||||
filtered := ml.FilterResponses(responses)
|
||||
fmt.Printf("After filtering: %d responses\n", len(filtered))
|
||||
|
||||
train, valid, test := ml.SplitData(filtered, exportTrainPct, exportValidPct, exportTestPct, exportSeed)
|
||||
fmt.Printf("Split: train=%d, valid=%d, test=%d\n", len(train), len(valid), len(test))
|
||||
|
||||
if err := os.MkdirAll(exportOutputDir, 0755); err != nil {
|
||||
return fmt.Errorf("create output dir: %w", err)
|
||||
}
|
||||
|
||||
for _, split := range []struct {
|
||||
name string
|
||||
data []ml.Response
|
||||
}{
|
||||
{"train", train},
|
||||
{"valid", valid},
|
||||
{"test", test},
|
||||
} {
|
||||
path := fmt.Sprintf("%s/%s.jsonl", exportOutputDir, split.name)
|
||||
if err := ml.WriteTrainingJSONL(path, split.data); err != nil {
|
||||
return fmt.Errorf("write %s: %w", split.name, err)
|
||||
}
|
||||
fmt.Printf(" %s.jsonl: %d examples\n", split.name, len(split.data))
|
||||
}
|
||||
|
||||
if exportParquet {
|
||||
n, err := ml.ExportParquet(exportOutputDir, "")
|
||||
if err != nil {
|
||||
return fmt.Errorf("export parquet: %w", err)
|
||||
}
|
||||
fmt.Printf(" Parquet: %d total rows\n", n)
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
|
@ -1,40 +0,0 @@
|
|||
package ml
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
|
||||
"forge.lthn.ai/core/go/pkg/cli"
|
||||
"forge.lthn.ai/core/go-ml"
|
||||
)
|
||||
|
||||
var (
|
||||
ggufInput string
|
||||
ggufConfig string
|
||||
ggufOutput string
|
||||
ggufArch string
|
||||
)
|
||||
|
||||
var ggufCmd = &cli.Command{
|
||||
Use: "gguf",
|
||||
Short: "Convert MLX LoRA adapter to GGUF format",
|
||||
Long: "Converts an MLX safetensors LoRA adapter to GGUF v3 format for use with llama.cpp.",
|
||||
RunE: runGGUF,
|
||||
}
|
||||
|
||||
func init() {
|
||||
ggufCmd.Flags().StringVar(&ggufInput, "input", "", "Input safetensors file (required)")
|
||||
ggufCmd.Flags().StringVar(&ggufConfig, "config", "", "Adapter config JSON (required)")
|
||||
ggufCmd.Flags().StringVar(&ggufOutput, "output", "", "Output GGUF file (required)")
|
||||
ggufCmd.Flags().StringVar(&ggufArch, "arch", "gemma3", "GGUF architecture name")
|
||||
ggufCmd.MarkFlagRequired("input")
|
||||
ggufCmd.MarkFlagRequired("config")
|
||||
ggufCmd.MarkFlagRequired("output")
|
||||
}
|
||||
|
||||
func runGGUF(cmd *cli.Command, args []string) error {
|
||||
if err := ml.ConvertMLXtoGGUFLoRA(ggufInput, ggufConfig, ggufOutput, ggufArch); err != nil {
|
||||
return fmt.Errorf("convert to GGUF: %w", err)
|
||||
}
|
||||
fmt.Printf("GGUF LoRA adapter written to %s\n", ggufOutput)
|
||||
return nil
|
||||
}
|
||||
|
|
@ -1,58 +0,0 @@
|
|||
package ml
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
"os"
|
||||
"path/filepath"
|
||||
|
||||
"forge.lthn.ai/core/go/pkg/cli"
|
||||
"forge.lthn.ai/core/go-ml"
|
||||
)
|
||||
|
||||
var importCmd = &cli.Command{
|
||||
Use: "import-all",
|
||||
Short: "Import all LEM data into DuckDB",
|
||||
Long: "Imports golden set, training examples, benchmark results, benchmark questions, and seeds into DuckDB from M3 and local files.",
|
||||
RunE: runImportAll,
|
||||
}
|
||||
|
||||
var (
|
||||
importSkipM3 bool
|
||||
importDataDir string
|
||||
importM3Host string
|
||||
)
|
||||
|
||||
func init() {
|
||||
importCmd.Flags().BoolVar(&importSkipM3, "skip-m3", false, "Skip pulling data from M3")
|
||||
importCmd.Flags().StringVar(&importDataDir, "data-dir", "", "Local data directory (defaults to db directory)")
|
||||
importCmd.Flags().StringVar(&importM3Host, "m3-host", "m3", "M3 SSH host alias")
|
||||
}
|
||||
|
||||
func runImportAll(cmd *cli.Command, args []string) error {
|
||||
path := dbPath
|
||||
if path == "" {
|
||||
path = os.Getenv("LEM_DB")
|
||||
}
|
||||
if path == "" {
|
||||
return fmt.Errorf("--db or LEM_DB required")
|
||||
}
|
||||
|
||||
dataDir := importDataDir
|
||||
if dataDir == "" {
|
||||
dataDir = filepath.Dir(path)
|
||||
}
|
||||
|
||||
db, err := ml.OpenDBReadWrite(path)
|
||||
if err != nil {
|
||||
return fmt.Errorf("open db: %w", err)
|
||||
}
|
||||
defer db.Close()
|
||||
|
||||
cfg := ml.ImportConfig{
|
||||
SkipM3: importSkipM3,
|
||||
DataDir: dataDir,
|
||||
M3Host: importM3Host,
|
||||
}
|
||||
|
||||
return ml.ImportAll(db, cfg, cmd.OutOrStdout())
|
||||
}
|
||||
|
|
@ -1,54 +0,0 @@
|
|||
package ml
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
"os"
|
||||
|
||||
"forge.lthn.ai/core/go/pkg/cli"
|
||||
"forge.lthn.ai/core/go-ml"
|
||||
)
|
||||
|
||||
var ingestCmd = &cli.Command{
|
||||
Use: "ingest",
|
||||
Short: "Ingest benchmark scores and training logs into InfluxDB",
|
||||
Long: "Reads content score, capability score, and training log files and writes measurements to InfluxDB for the lab dashboard.",
|
||||
RunE: runIngest,
|
||||
}
|
||||
|
||||
var (
|
||||
ingestContent string
|
||||
ingestCapability string
|
||||
ingestTraining string
|
||||
ingestRunID string
|
||||
ingestBatchSize int
|
||||
)
|
||||
|
||||
func init() {
|
||||
ingestCmd.Flags().StringVar(&ingestContent, "content", "", "Content scores JSONL file")
|
||||
ingestCmd.Flags().StringVar(&ingestCapability, "capability", "", "Capability scores JSONL file")
|
||||
ingestCmd.Flags().StringVar(&ingestTraining, "training-log", "", "MLX LoRA training log file")
|
||||
ingestCmd.Flags().StringVar(&ingestRunID, "run-id", "", "Run ID tag (defaults to model name)")
|
||||
ingestCmd.Flags().IntVar(&ingestBatchSize, "batch-size", 100, "Lines per InfluxDB write batch")
|
||||
}
|
||||
|
||||
func runIngest(cmd *cli.Command, args []string) error {
|
||||
if modelName == "" {
|
||||
return fmt.Errorf("--model is required")
|
||||
}
|
||||
if ingestContent == "" && ingestCapability == "" && ingestTraining == "" {
|
||||
return fmt.Errorf("at least one of --content, --capability, or --training-log is required")
|
||||
}
|
||||
|
||||
influx := ml.NewInfluxClient(influxURL, influxDB)
|
||||
|
||||
cfg := ml.IngestConfig{
|
||||
ContentFile: ingestContent,
|
||||
CapabilityFile: ingestCapability,
|
||||
TrainingLog: ingestTraining,
|
||||
Model: modelName,
|
||||
RunID: ingestRunID,
|
||||
BatchSize: ingestBatchSize,
|
||||
}
|
||||
|
||||
return ml.Ingest(influx, cfg, os.Stdout)
|
||||
}
|
||||
|
|
@ -1,34 +0,0 @@
|
|||
package ml
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
"os"
|
||||
|
||||
"forge.lthn.ai/core/go/pkg/cli"
|
||||
"forge.lthn.ai/core/go-ml"
|
||||
)
|
||||
|
||||
var inventoryCmd = &cli.Command{
|
||||
Use: "inventory",
|
||||
Short: "Show DuckDB table inventory with stats",
|
||||
Long: "Queries all DuckDB tables and prints row counts with per-table detail breakdowns.",
|
||||
RunE: runInventory,
|
||||
}
|
||||
|
||||
func runInventory(cmd *cli.Command, args []string) error {
|
||||
path := dbPath
|
||||
if path == "" {
|
||||
path = os.Getenv("LEM_DB")
|
||||
}
|
||||
if path == "" {
|
||||
return fmt.Errorf("--db or LEM_DB required")
|
||||
}
|
||||
|
||||
db, err := ml.OpenDB(path)
|
||||
if err != nil {
|
||||
return fmt.Errorf("open db: %w", err)
|
||||
}
|
||||
defer db.Close()
|
||||
|
||||
return ml.PrintInventory(db, os.Stdout)
|
||||
}
|
||||
|
|
@ -1,340 +0,0 @@
|
|||
//go:build darwin && arm64
|
||||
|
||||
package ml
|
||||
|
||||
import (
|
||||
"context"
|
||||
"encoding/json"
|
||||
"fmt"
|
||||
"log/slog"
|
||||
"os"
|
||||
"path/filepath"
|
||||
"runtime"
|
||||
"strings"
|
||||
"time"
|
||||
|
||||
"forge.lthn.ai/core/go-ml"
|
||||
"forge.lthn.ai/core/go/pkg/cli"
|
||||
"gopkg.in/yaml.v3"
|
||||
)
|
||||
|
||||
var lessonCmd = &cli.Command{
|
||||
Use: "lesson",
|
||||
Short: "Run a structured training lesson from a YAML definition",
|
||||
Long: `Runs a training lesson defined in a YAML file. Each lesson contains
|
||||
prompts organised by category, optional system prompt, and sandwich
|
||||
signing configuration.
|
||||
|
||||
Lesson YAML format:
|
||||
id: lek-sovereignty
|
||||
title: "Sovereignty Lessons"
|
||||
system: "You are a helpful assistant."
|
||||
sandwich:
|
||||
kb: path/to/axioms.md
|
||||
kernel: path/to/kernel.txt
|
||||
prompts:
|
||||
- id: P01
|
||||
category: sovereignty
|
||||
prompt: "A user wants to build an auth system."
|
||||
signal: "Does the model prefer decentralised?"
|
||||
|
||||
The command generates responses for each prompt and writes them as
|
||||
training JSONL. State is tracked so lessons can be resumed.`,
|
||||
RunE: runLesson,
|
||||
}
|
||||
|
||||
var (
|
||||
lessonFile string
|
||||
lessonModelPath string
|
||||
lessonOutput string
|
||||
lessonMaxTokens int
|
||||
lessonTemp float64
|
||||
lessonMemLimit int
|
||||
lessonResume bool
|
||||
lessonInteract bool
|
||||
)
|
||||
|
||||
func init() {
|
||||
lessonCmd.Flags().StringVar(&lessonFile, "file", "", "Lesson YAML file (required)")
|
||||
lessonCmd.Flags().StringVar(&lessonModelPath, "model-path", "", "Path to model directory (required)")
|
||||
lessonCmd.Flags().StringVar(&lessonOutput, "output", "", "Output JSONL file (default: <lesson-id>.jsonl)")
|
||||
lessonCmd.Flags().IntVar(&lessonMaxTokens, "max-tokens", 1024, "Max tokens per response")
|
||||
lessonCmd.Flags().Float64Var(&lessonTemp, "temperature", 0.4, "Sampling temperature")
|
||||
lessonCmd.Flags().IntVar(&lessonMemLimit, "memory-limit", 24, "Metal memory limit in GB")
|
||||
lessonCmd.Flags().BoolVar(&lessonResume, "resume", true, "Resume from last completed prompt")
|
||||
lessonCmd.Flags().BoolVar(&lessonInteract, "interactive", false, "Interactive mode: review each response before continuing")
|
||||
lessonCmd.MarkFlagRequired("file")
|
||||
lessonCmd.MarkFlagRequired("model-path")
|
||||
}
|
||||
|
||||
// lessonDef is a YAML lesson definition.
|
||||
type lessonDef struct {
|
||||
ID string `yaml:"id"`
|
||||
Title string `yaml:"title"`
|
||||
System string `yaml:"system"`
|
||||
Sandwich *lessonSandwichCfg `yaml:"sandwich"`
|
||||
Prompts []lessonPrompt `yaml:"prompts"`
|
||||
}
|
||||
|
||||
type lessonSandwichCfg struct {
|
||||
KB string `yaml:"kb"`
|
||||
Kernel string `yaml:"kernel"`
|
||||
}
|
||||
|
||||
type lessonPrompt struct {
|
||||
ID string `yaml:"id"`
|
||||
Category string `yaml:"category"`
|
||||
Prompt string `yaml:"prompt"`
|
||||
Signal string `yaml:"signal"`
|
||||
}
|
||||
|
||||
// lessonState tracks progress through a lesson.
|
||||
type lessonState struct {
|
||||
LessonID string `json:"lesson_id"`
|
||||
Completed map[string]lessonResult `json:"completed"`
|
||||
UpdatedAt string `json:"updated_at"`
|
||||
}
|
||||
|
||||
type lessonResult struct {
|
||||
ResponseChars int `json:"response_chars"`
|
||||
Duration string `json:"duration"`
|
||||
CompletedAt string `json:"completed_at"`
|
||||
}
|
||||
|
||||
func runLesson(cmd *cli.Command, args []string) error {
|
||||
start := time.Now()
|
||||
|
||||
// Load lesson YAML
|
||||
data, err := os.ReadFile(lessonFile)
|
||||
if err != nil {
|
||||
return fmt.Errorf("read lesson: %w", err)
|
||||
}
|
||||
|
||||
var lesson lessonDef
|
||||
if err := yaml.Unmarshal(data, &lesson); err != nil {
|
||||
return fmt.Errorf("parse lesson: %w", err)
|
||||
}
|
||||
|
||||
if lesson.ID == "" {
|
||||
lesson.ID = strings.TrimSuffix(filepath.Base(lessonFile), filepath.Ext(lessonFile))
|
||||
}
|
||||
|
||||
// Resolve output path
|
||||
if lessonOutput == "" {
|
||||
lessonOutput = lesson.ID + ".jsonl"
|
||||
}
|
||||
|
||||
// Load sandwich files if configured
|
||||
var kbText, kernelText string
|
||||
sandwich := false
|
||||
if lesson.Sandwich != nil {
|
||||
baseDir := filepath.Dir(lessonFile)
|
||||
if lesson.Sandwich.KB != "" {
|
||||
kbPath := lesson.Sandwich.KB
|
||||
if !filepath.IsAbs(kbPath) {
|
||||
kbPath = filepath.Join(baseDir, kbPath)
|
||||
}
|
||||
d, err := os.ReadFile(kbPath)
|
||||
if err != nil {
|
||||
return fmt.Errorf("read KB: %w", err)
|
||||
}
|
||||
kbText = string(d)
|
||||
}
|
||||
if lesson.Sandwich.Kernel != "" {
|
||||
kernelPath := lesson.Sandwich.Kernel
|
||||
if !filepath.IsAbs(kernelPath) {
|
||||
kernelPath = filepath.Join(baseDir, kernelPath)
|
||||
}
|
||||
d, err := os.ReadFile(kernelPath)
|
||||
if err != nil {
|
||||
return fmt.Errorf("read kernel: %w", err)
|
||||
}
|
||||
kernelText = string(d)
|
||||
}
|
||||
sandwich = kbText != "" && kernelText != ""
|
||||
}
|
||||
|
||||
slog.Info("lesson: loaded",
|
||||
"id", lesson.ID,
|
||||
"title", lesson.Title,
|
||||
"prompts", len(lesson.Prompts),
|
||||
"sandwich", sandwich,
|
||||
)
|
||||
|
||||
if len(lesson.Prompts) == 0 {
|
||||
return fmt.Errorf("lesson has no prompts")
|
||||
}
|
||||
|
||||
// Load state for resume
|
||||
stateFile := lesson.ID + ".state.json"
|
||||
state := loadLessonState(stateFile)
|
||||
if state.LessonID == "" {
|
||||
state.LessonID = lesson.ID
|
||||
state.Completed = make(map[string]lessonResult)
|
||||
}
|
||||
|
||||
// Count remaining
|
||||
var remaining []lessonPrompt
|
||||
for _, p := range lesson.Prompts {
|
||||
if lessonResume {
|
||||
if _, done := state.Completed[p.ID]; done {
|
||||
continue
|
||||
}
|
||||
}
|
||||
remaining = append(remaining, p)
|
||||
}
|
||||
|
||||
if len(remaining) == 0 {
|
||||
slog.Info("lesson: all prompts completed",
|
||||
"id", lesson.ID,
|
||||
"total", len(lesson.Prompts),
|
||||
)
|
||||
return nil
|
||||
}
|
||||
|
||||
slog.Info("lesson: starting",
|
||||
"remaining", len(remaining),
|
||||
"completed", len(state.Completed),
|
||||
"total", len(lesson.Prompts),
|
||||
)
|
||||
|
||||
// Load model
|
||||
slog.Info("lesson: loading model", "path", lessonModelPath)
|
||||
backend, err := ml.NewMLXBackend(lessonModelPath)
|
||||
if err != nil {
|
||||
return fmt.Errorf("load model: %w", err)
|
||||
}
|
||||
|
||||
opts := ml.GenOpts{
|
||||
Temperature: lessonTemp,
|
||||
MaxTokens: lessonMaxTokens,
|
||||
}
|
||||
|
||||
// Open output file (append mode for resume)
|
||||
outFile, err := os.OpenFile(lessonOutput, os.O_CREATE|os.O_WRONLY|os.O_APPEND, 0644)
|
||||
if err != nil {
|
||||
return fmt.Errorf("create output: %w", err)
|
||||
}
|
||||
defer outFile.Close()
|
||||
encoder := json.NewEncoder(outFile)
|
||||
|
||||
generated := 0
|
||||
|
||||
for i, prompt := range remaining {
|
||||
promptStart := time.Now()
|
||||
|
||||
slog.Info("lesson: generating",
|
||||
"prompt", fmt.Sprintf("%d/%d", i+1, len(remaining)),
|
||||
"id", prompt.ID,
|
||||
"category", prompt.Category,
|
||||
)
|
||||
|
||||
// Build messages
|
||||
var messages []ml.Message
|
||||
if lesson.System != "" {
|
||||
messages = append(messages, ml.Message{Role: "system", Content: lesson.System})
|
||||
}
|
||||
|
||||
userContent := prompt.Prompt
|
||||
if sandwich {
|
||||
userContent = buildSandwich(kbText, prompt.Prompt, kernelText)
|
||||
}
|
||||
messages = append(messages, ml.Message{Role: "user", Content: userContent})
|
||||
|
||||
// Generate
|
||||
response, err := backend.Chat(context.Background(), messages, opts)
|
||||
if err != nil {
|
||||
slog.Error("lesson: generation failed",
|
||||
"id", prompt.ID,
|
||||
"error", err,
|
||||
)
|
||||
continue
|
||||
}
|
||||
|
||||
elapsed := time.Since(promptStart)
|
||||
|
||||
// Write training record
|
||||
record := struct {
|
||||
Messages []ml.Message `json:"messages"`
|
||||
}{
|
||||
Messages: []ml.Message{
|
||||
{Role: "user", Content: userContent},
|
||||
{Role: "assistant", Content: response},
|
||||
},
|
||||
}
|
||||
if err := encoder.Encode(record); err != nil {
|
||||
return fmt.Errorf("write record: %w", err)
|
||||
}
|
||||
|
||||
// Update state
|
||||
state.Completed[prompt.ID] = lessonResult{
|
||||
ResponseChars: len(response),
|
||||
Duration: elapsed.Round(time.Second).String(),
|
||||
CompletedAt: time.Now().Format(time.RFC3339),
|
||||
}
|
||||
state.UpdatedAt = time.Now().Format(time.RFC3339)
|
||||
|
||||
if err := saveLessonState(stateFile, state); err != nil {
|
||||
slog.Warn("lesson: failed to save state", "error", err)
|
||||
}
|
||||
|
||||
generated++
|
||||
|
||||
slog.Info("lesson: generated",
|
||||
"id", prompt.ID,
|
||||
"category", prompt.Category,
|
||||
"response_chars", len(response),
|
||||
"duration", elapsed.Round(time.Second),
|
||||
)
|
||||
|
||||
// Interactive mode: show response and wait for confirmation
|
||||
if lessonInteract {
|
||||
fmt.Printf("\n--- %s (%s) ---\n", prompt.ID, prompt.Category)
|
||||
fmt.Printf("Prompt: %s\n\n", prompt.Prompt)
|
||||
if prompt.Signal != "" {
|
||||
fmt.Printf("Signal: %s\n\n", prompt.Signal)
|
||||
}
|
||||
fmt.Printf("Response:\n%s\n", response)
|
||||
fmt.Printf("\nPress Enter to continue (or 'q' to stop)... ")
|
||||
var input string
|
||||
fmt.Scanln(&input)
|
||||
if strings.TrimSpace(input) == "q" {
|
||||
break
|
||||
}
|
||||
}
|
||||
|
||||
// Periodic cleanup
|
||||
if (i+1)%4 == 0 {
|
||||
runtime.GC()
|
||||
}
|
||||
}
|
||||
|
||||
slog.Info("lesson: complete",
|
||||
"id", lesson.ID,
|
||||
"output", lessonOutput,
|
||||
"generated", generated,
|
||||
"total_completed", len(state.Completed),
|
||||
"total_prompts", len(lesson.Prompts),
|
||||
"duration", time.Since(start).Round(time.Second),
|
||||
)
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
func loadLessonState(path string) lessonState {
|
||||
data, err := os.ReadFile(path)
|
||||
if err != nil {
|
||||
return lessonState{}
|
||||
}
|
||||
var state lessonState
|
||||
json.Unmarshal(data, &state)
|
||||
return state
|
||||
}
|
||||
|
||||
func saveLessonState(path string, state lessonState) error {
|
||||
data, err := json.MarshalIndent(state, "", " ")
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
return os.WriteFile(path, data, 0644)
|
||||
}
|
||||
|
|
@ -1,8 +0,0 @@
|
|||
//go:build darwin && arm64
|
||||
|
||||
package ml
|
||||
|
||||
func init() {
|
||||
mlCmd.AddCommand(lessonCmd)
|
||||
mlCmd.AddCommand(sequenceCmd)
|
||||
}
|
||||
|
|
@ -1,82 +0,0 @@
|
|||
package ml
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
"os"
|
||||
|
||||
"forge.lthn.ai/core/go/pkg/cli"
|
||||
"forge.lthn.ai/core/go-ml"
|
||||
)
|
||||
|
||||
const targetTotal = 15000
|
||||
|
||||
var liveCmd = &cli.Command{
|
||||
Use: "live",
|
||||
Short: "Show live generation progress from InfluxDB",
|
||||
Long: "Queries InfluxDB for real-time generation progress, worker breakdown, and domain/voice counts.",
|
||||
RunE: runLive,
|
||||
}
|
||||
|
||||
func runLive(cmd *cli.Command, args []string) error {
|
||||
influx := ml.NewInfluxClient(influxURL, influxDB)
|
||||
|
||||
// Total completed generations
|
||||
totalRows, err := influx.QuerySQL("SELECT count(DISTINCT i) AS n FROM gold_gen")
|
||||
if err != nil {
|
||||
return fmt.Errorf("live: query total: %w", err)
|
||||
}
|
||||
total := sqlScalar(totalRows)
|
||||
|
||||
// Distinct domains and voices
|
||||
domainRows, err := influx.QuerySQL("SELECT count(DISTINCT d) AS n FROM gold_gen")
|
||||
if err != nil {
|
||||
return fmt.Errorf("live: query domains: %w", err)
|
||||
}
|
||||
domains := sqlScalar(domainRows)
|
||||
|
||||
voiceRows, err := influx.QuerySQL("SELECT count(DISTINCT v) AS n FROM gold_gen")
|
||||
if err != nil {
|
||||
return fmt.Errorf("live: query voices: %w", err)
|
||||
}
|
||||
voices := sqlScalar(voiceRows)
|
||||
|
||||
// Per-worker breakdown
|
||||
workers, err := influx.QuerySQL("SELECT w, count(DISTINCT i) AS n FROM gold_gen GROUP BY w ORDER BY n DESC")
|
||||
if err != nil {
|
||||
return fmt.Errorf("live: query workers: %w", err)
|
||||
}
|
||||
|
||||
pct := float64(total) / float64(targetTotal) * 100
|
||||
remaining := targetTotal - total
|
||||
|
||||
fmt.Fprintln(os.Stdout, "Golden Set Live Status (from InfluxDB)")
|
||||
fmt.Fprintln(os.Stdout, "─────────────────────────────────────────────")
|
||||
fmt.Fprintf(os.Stdout, " Total: %d / %d (%.1f%%)\n", total, targetTotal, pct)
|
||||
fmt.Fprintf(os.Stdout, " Remaining: %d\n", remaining)
|
||||
fmt.Fprintf(os.Stdout, " Domains: %d\n", domains)
|
||||
fmt.Fprintf(os.Stdout, " Voices: %d\n", voices)
|
||||
fmt.Fprintln(os.Stdout)
|
||||
fmt.Fprintln(os.Stdout, " Workers:")
|
||||
for _, w := range workers {
|
||||
name := w["w"]
|
||||
n := w["n"]
|
||||
marker := ""
|
||||
if name == "migration" {
|
||||
marker = " (seed data)"
|
||||
}
|
||||
fmt.Fprintf(os.Stdout, " %-20s %6s generations%s\n", name, n, marker)
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
// sqlScalar extracts the first numeric value from a QuerySQL result.
|
||||
func sqlScalar(rows []map[string]interface{}) int {
|
||||
if len(rows) == 0 {
|
||||
return 0
|
||||
}
|
||||
for _, v := range rows[0] {
|
||||
return toInt(v)
|
||||
}
|
||||
return 0
|
||||
}
|
||||
|
|
@ -1,36 +0,0 @@
|
|||
package ml
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
"os"
|
||||
|
||||
"forge.lthn.ai/core/go/pkg/cli"
|
||||
"forge.lthn.ai/core/go-ml"
|
||||
)
|
||||
|
||||
var metricsCmd = &cli.Command{
|
||||
Use: "metrics",
|
||||
Short: "Push golden set stats to InfluxDB",
|
||||
Long: "Queries golden_set stats from DuckDB and pushes summary, per-domain, and per-voice metrics to InfluxDB.",
|
||||
RunE: runMetrics,
|
||||
}
|
||||
|
||||
func runMetrics(cmd *cli.Command, args []string) error {
|
||||
path := dbPath
|
||||
if path == "" {
|
||||
path = os.Getenv("LEM_DB")
|
||||
}
|
||||
if path == "" {
|
||||
return fmt.Errorf("--db or LEM_DB required")
|
||||
}
|
||||
|
||||
db, err := ml.OpenDB(path)
|
||||
if err != nil {
|
||||
return fmt.Errorf("open db: %w", err)
|
||||
}
|
||||
defer db.Close()
|
||||
|
||||
influx := ml.NewInfluxClient(influxURL, influxDB)
|
||||
|
||||
return ml.PushMetrics(db, influx, os.Stdout)
|
||||
}
|
||||
|
|
@ -1,91 +0,0 @@
|
|||
// Package ml provides ML inference, scoring, and training pipeline commands.
|
||||
//
|
||||
// Commands:
|
||||
// - core ml score: Score responses with heuristic and LLM judges
|
||||
// - core ml probe: Run capability and content probes against a model
|
||||
// - core ml export: Export golden set to training JSONL/Parquet
|
||||
// - core ml expand: Generate expansion responses
|
||||
// - core ml status: Show training and generation progress
|
||||
// - core ml gguf: Convert MLX LoRA adapter to GGUF format
|
||||
// - core ml convert: Convert MLX LoRA adapter to PEFT format
|
||||
// - core ml agent: Run the scoring agent daemon
|
||||
// - core ml worker: Run a distributed worker node
|
||||
// - core ml serve: Start OpenAI-compatible inference server
|
||||
// - core ml inventory: Show DuckDB table inventory with stats
|
||||
// - core ml query: Run ad-hoc SQL against DuckDB
|
||||
// - core ml metrics: Push golden set stats to InfluxDB
|
||||
// - core ml ingest: Ingest benchmark scores and training logs to InfluxDB
|
||||
// - core ml normalize: Deduplicate seeds into expansion prompts
|
||||
// - core ml seed-influx: Migrate golden set from DuckDB to InfluxDB
|
||||
// - core ml consolidate: Pull and merge response JSONL files from M3
|
||||
// - core ml import-all: Import all LEM data into DuckDB
|
||||
// - core ml approve: Filter scored expansions into training JSONL
|
||||
// - core ml publish: Upload Parquet dataset to HuggingFace Hub
|
||||
// - core ml coverage: Analyze seed coverage by region and domain
|
||||
// - core ml live: Show live generation progress from InfluxDB
|
||||
// - core ml expand-status: Show expansion pipeline progress
|
||||
package ml
|
||||
|
||||
import (
|
||||
"forge.lthn.ai/core/go/pkg/cli"
|
||||
)
|
||||
|
||||
func init() {
|
||||
cli.RegisterCommands(AddMLCommands)
|
||||
}
|
||||
|
||||
var mlCmd = &cli.Command{
|
||||
Use: "ml",
|
||||
Short: "ML inference, scoring, and training pipeline",
|
||||
Long: "Commands for ML model scoring, probe evaluation, data export, and format conversion.",
|
||||
}
|
||||
|
||||
// AddMLCommands registers the 'ml' command and all subcommands.
|
||||
func AddMLCommands(root *cli.Command) {
|
||||
initFlags()
|
||||
mlCmd.AddCommand(scoreCmd)
|
||||
mlCmd.AddCommand(probeCmd)
|
||||
mlCmd.AddCommand(exportCmd)
|
||||
mlCmd.AddCommand(expandCmd)
|
||||
mlCmd.AddCommand(statusCmd)
|
||||
mlCmd.AddCommand(ggufCmd)
|
||||
mlCmd.AddCommand(convertCmd)
|
||||
mlCmd.AddCommand(agentCmd)
|
||||
mlCmd.AddCommand(workerCmd)
|
||||
mlCmd.AddCommand(serveCmd)
|
||||
mlCmd.AddCommand(inventoryCmd)
|
||||
mlCmd.AddCommand(queryCmd)
|
||||
mlCmd.AddCommand(metricsCmd)
|
||||
mlCmd.AddCommand(ingestCmd)
|
||||
mlCmd.AddCommand(normalizeCmd)
|
||||
mlCmd.AddCommand(seedInfluxCmd)
|
||||
mlCmd.AddCommand(consolidateCmd)
|
||||
mlCmd.AddCommand(importCmd)
|
||||
mlCmd.AddCommand(approveCmd)
|
||||
mlCmd.AddCommand(publishCmd)
|
||||
mlCmd.AddCommand(coverageCmd)
|
||||
mlCmd.AddCommand(liveCmd)
|
||||
mlCmd.AddCommand(expandStatusCmd)
|
||||
root.AddCommand(mlCmd)
|
||||
}
|
||||
|
||||
// Shared persistent flags.
|
||||
var (
|
||||
apiURL string
|
||||
judgeURL string
|
||||
judgeModel string
|
||||
influxURL string
|
||||
influxDB string
|
||||
dbPath string
|
||||
modelName string
|
||||
)
|
||||
|
||||
func initFlags() {
|
||||
mlCmd.PersistentFlags().StringVar(&apiURL, "api-url", "http://10.69.69.108:8090", "OpenAI-compatible API URL")
|
||||
mlCmd.PersistentFlags().StringVar(&judgeURL, "judge-url", "http://10.69.69.108:11434", "Judge model API URL (Ollama)")
|
||||
mlCmd.PersistentFlags().StringVar(&judgeModel, "judge-model", "gemma3:27b", "Judge model name")
|
||||
mlCmd.PersistentFlags().StringVar(&influxURL, "influx", "", "InfluxDB URL (default http://10.69.69.165:8181)")
|
||||
mlCmd.PersistentFlags().StringVar(&influxDB, "influx-db", "", "InfluxDB database (default training)")
|
||||
mlCmd.PersistentFlags().StringVar(&dbPath, "db", "", "DuckDB database path (or set LEM_DB env)")
|
||||
mlCmd.PersistentFlags().StringVar(&modelName, "model", "", "Model name for API")
|
||||
}
|
||||
|
|
@ -1,44 +0,0 @@
|
|||
package ml
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
"os"
|
||||
|
||||
"forge.lthn.ai/core/go/pkg/cli"
|
||||
"forge.lthn.ai/core/go-ml"
|
||||
)
|
||||
|
||||
var normalizeMinLen int
|
||||
|
||||
var normalizeCmd = &cli.Command{
|
||||
Use: "normalize",
|
||||
Short: "Normalize seeds into expansion prompts",
|
||||
Long: "Deduplicates seeds against golden_set and prompts, creating the expansion_prompts table with priority-based ordering.",
|
||||
RunE: runNormalize,
|
||||
}
|
||||
|
||||
func init() {
|
||||
normalizeCmd.Flags().IntVar(&normalizeMinLen, "min-length", 50, "Minimum prompt length in characters")
|
||||
}
|
||||
|
||||
func runNormalize(cmd *cli.Command, args []string) error {
|
||||
path := dbPath
|
||||
if path == "" {
|
||||
path = os.Getenv("LEM_DB")
|
||||
}
|
||||
if path == "" {
|
||||
return fmt.Errorf("--db or LEM_DB env is required")
|
||||
}
|
||||
|
||||
db, err := ml.OpenDBReadWrite(path)
|
||||
if err != nil {
|
||||
return fmt.Errorf("open db: %w", err)
|
||||
}
|
||||
defer db.Close()
|
||||
|
||||
cfg := ml.NormalizeConfig{
|
||||
MinLength: normalizeMinLen,
|
||||
}
|
||||
|
||||
return ml.NormalizeSeeds(db, cfg, os.Stdout)
|
||||
}
|
||||
|
|
@ -1,66 +0,0 @@
|
|||
package ml
|
||||
|
||||
import (
|
||||
"context"
|
||||
"encoding/json"
|
||||
"fmt"
|
||||
"os"
|
||||
|
||||
"forge.lthn.ai/core/go/pkg/cli"
|
||||
"forge.lthn.ai/core/go-ml"
|
||||
)
|
||||
|
||||
var (
|
||||
probeOutput string
|
||||
)
|
||||
|
||||
var probeCmd = &cli.Command{
|
||||
Use: "probe",
|
||||
Short: "Run capability and content probes against a model",
|
||||
Long: "Runs 23 capability probes and 6 content probes against an OpenAI-compatible API.",
|
||||
RunE: runProbe,
|
||||
}
|
||||
|
||||
func init() {
|
||||
probeCmd.Flags().StringVar(&probeOutput, "output", "", "Output JSON file for probe results")
|
||||
}
|
||||
|
||||
func runProbe(cmd *cli.Command, args []string) error {
|
||||
if apiURL == "" {
|
||||
return fmt.Errorf("--api-url is required")
|
||||
}
|
||||
|
||||
model := modelName
|
||||
if model == "" {
|
||||
model = "default"
|
||||
}
|
||||
|
||||
ctx := context.Background()
|
||||
backend := ml.NewHTTPBackend(apiURL, model)
|
||||
|
||||
fmt.Printf("Running %d capability probes against %s...\n", len(ml.CapabilityProbes), apiURL)
|
||||
results := ml.RunCapabilityProbes(ctx, backend)
|
||||
|
||||
fmt.Printf("\nResults: %.1f%% (%d/%d)\n", results.Accuracy, results.Correct, results.Total)
|
||||
|
||||
for cat, data := range results.ByCategory {
|
||||
catAcc := 0.0
|
||||
if data.Total > 0 {
|
||||
catAcc = float64(data.Correct) / float64(data.Total) * 100
|
||||
}
|
||||
fmt.Printf(" %-20s %d/%d (%.0f%%)\n", cat, data.Correct, data.Total, catAcc)
|
||||
}
|
||||
|
||||
if probeOutput != "" {
|
||||
data, err := json.MarshalIndent(results, "", " ")
|
||||
if err != nil {
|
||||
return fmt.Errorf("marshal results: %w", err)
|
||||
}
|
||||
if err := os.WriteFile(probeOutput, data, 0644); err != nil {
|
||||
return fmt.Errorf("write output: %w", err)
|
||||
}
|
||||
fmt.Printf("\nResults written to %s\n", probeOutput)
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
|
@ -1,40 +0,0 @@
|
|||
package ml
|
||||
|
||||
import (
|
||||
"forge.lthn.ai/core/go/pkg/cli"
|
||||
"forge.lthn.ai/core/go-ml"
|
||||
)
|
||||
|
||||
var (
|
||||
publishInputDir string
|
||||
publishRepo string
|
||||
publishPublic bool
|
||||
publishToken string
|
||||
publishDryRun bool
|
||||
)
|
||||
|
||||
var publishCmd = &cli.Command{
|
||||
Use: "publish",
|
||||
Short: "Upload Parquet dataset to HuggingFace Hub",
|
||||
Long: "Uploads train/valid/test Parquet files and an optional dataset card to a HuggingFace dataset repository.",
|
||||
RunE: runPublish,
|
||||
}
|
||||
|
||||
func init() {
|
||||
publishCmd.Flags().StringVar(&publishInputDir, "input-dir", "", "Directory containing Parquet files (required)")
|
||||
publishCmd.Flags().StringVar(&publishRepo, "repo", "lthn/LEM-golden-set", "HuggingFace dataset repo ID")
|
||||
publishCmd.Flags().BoolVar(&publishPublic, "public", false, "Make dataset public")
|
||||
publishCmd.Flags().StringVar(&publishToken, "token", "", "HuggingFace API token (defaults to HF_TOKEN env)")
|
||||
publishCmd.Flags().BoolVar(&publishDryRun, "dry-run", false, "Show what would be uploaded without uploading")
|
||||
_ = publishCmd.MarkFlagRequired("input-dir")
|
||||
}
|
||||
|
||||
func runPublish(cmd *cli.Command, args []string) error {
|
||||
return ml.Publish(ml.PublishConfig{
|
||||
InputDir: publishInputDir,
|
||||
Repo: publishRepo,
|
||||
Public: publishPublic,
|
||||
Token: publishToken,
|
||||
DryRun: publishDryRun,
|
||||
}, cmd.OutOrStdout())
|
||||
}
|
||||
|
|
@ -1,148 +0,0 @@
|
|||
package ml
|
||||
|
||||
import (
|
||||
"encoding/json"
|
||||
"fmt"
|
||||
"os"
|
||||
"strings"
|
||||
|
||||
"forge.lthn.ai/core/go/pkg/cli"
|
||||
"forge.lthn.ai/core/go-ml"
|
||||
)
|
||||
|
||||
var queryCmd = &cli.Command{
|
||||
Use: "query [sql]",
|
||||
Short: "Run ad-hoc SQL against DuckDB",
|
||||
Long: "Executes arbitrary SQL against the DuckDB database. Non-SELECT queries are auto-wrapped as golden_set WHERE clauses.",
|
||||
Example: ` core ml query "SELECT COUNT(*) FROM golden_set"
|
||||
core ml query "domain = 'ethics'"
|
||||
core ml query --json "SHOW TABLES"`,
|
||||
Args: cli.MinimumNArgs(1),
|
||||
RunE: runQuery,
|
||||
}
|
||||
|
||||
var queryJSON bool
|
||||
|
||||
func init() {
|
||||
queryCmd.Flags().BoolVar(&queryJSON, "json", false, "Output as JSON")
|
||||
}
|
||||
|
||||
func runQuery(cmd *cli.Command, args []string) error {
|
||||
path := dbPath
|
||||
if path == "" {
|
||||
path = os.Getenv("LEM_DB")
|
||||
}
|
||||
if path == "" {
|
||||
return fmt.Errorf("--db or LEM_DB env is required")
|
||||
}
|
||||
|
||||
db, err := ml.OpenDB(path)
|
||||
if err != nil {
|
||||
return fmt.Errorf("open db: %w", err)
|
||||
}
|
||||
defer db.Close()
|
||||
|
||||
sql := strings.Join(args, " ")
|
||||
|
||||
// Auto-wrap non-SELECT queries as golden_set WHERE clauses.
|
||||
trimmed := strings.TrimSpace(strings.ToUpper(sql))
|
||||
if !strings.HasPrefix(trimmed, "SELECT") && !strings.HasPrefix(trimmed, "SHOW") &&
|
||||
!strings.HasPrefix(trimmed, "DESCRIBE") && !strings.HasPrefix(trimmed, "EXPLAIN") {
|
||||
sql = "SELECT * FROM golden_set WHERE " + sql + " LIMIT 20"
|
||||
}
|
||||
|
||||
rows, err := db.QueryRows(sql)
|
||||
if err != nil {
|
||||
return fmt.Errorf("query: %w", err)
|
||||
}
|
||||
|
||||
if queryJSON {
|
||||
enc := json.NewEncoder(os.Stdout)
|
||||
enc.SetIndent("", " ")
|
||||
if err := enc.Encode(rows); err != nil {
|
||||
return fmt.Errorf("encode json: %w", err)
|
||||
}
|
||||
fmt.Fprintf(os.Stderr, "\n(%d rows)\n", len(rows))
|
||||
return nil
|
||||
}
|
||||
|
||||
if len(rows) == 0 {
|
||||
fmt.Println("(0 rows)")
|
||||
return nil
|
||||
}
|
||||
|
||||
// Collect column names in stable order from first row.
|
||||
var cols []string
|
||||
for col := range rows[0] {
|
||||
cols = append(cols, col)
|
||||
}
|
||||
|
||||
// Calculate column widths (capped at 60).
|
||||
const maxWidth = 60
|
||||
widths := make([]int, len(cols))
|
||||
for i, col := range cols {
|
||||
widths[i] = len(col)
|
||||
}
|
||||
for _, row := range rows {
|
||||
for i, col := range cols {
|
||||
val := formatValue(row[col])
|
||||
if l := len(val); l > widths[i] {
|
||||
widths[i] = l
|
||||
}
|
||||
}
|
||||
}
|
||||
for i := range widths {
|
||||
if widths[i] > maxWidth {
|
||||
widths[i] = maxWidth
|
||||
}
|
||||
}
|
||||
|
||||
// Print header.
|
||||
for i, col := range cols {
|
||||
if i > 0 {
|
||||
fmt.Print(" | ")
|
||||
}
|
||||
fmt.Printf("%-*s", widths[i], truncate(col, widths[i]))
|
||||
}
|
||||
fmt.Println()
|
||||
|
||||
// Print separator.
|
||||
for i := range cols {
|
||||
if i > 0 {
|
||||
fmt.Print("-+-")
|
||||
}
|
||||
fmt.Print(strings.Repeat("-", widths[i]))
|
||||
}
|
||||
fmt.Println()
|
||||
|
||||
// Print rows.
|
||||
for _, row := range rows {
|
||||
for i, col := range cols {
|
||||
if i > 0 {
|
||||
fmt.Print(" | ")
|
||||
}
|
||||
fmt.Printf("%-*s", widths[i], truncate(formatValue(row[col]), widths[i]))
|
||||
}
|
||||
fmt.Println()
|
||||
}
|
||||
|
||||
fmt.Printf("\n(%d rows)\n", len(rows))
|
||||
return nil
|
||||
}
|
||||
|
||||
func formatValue(v interface{}) string {
|
||||
if v == nil {
|
||||
return "NULL"
|
||||
}
|
||||
return fmt.Sprintf("%v", v)
|
||||
}
|
||||
|
||||
func truncate(s string, max int) string {
|
||||
if len(s) <= max {
|
||||
return s
|
||||
}
|
||||
if max <= 3 {
|
||||
return s[:max]
|
||||
}
|
||||
return s[:max-3] + "..."
|
||||
}
|
||||
|
|
@ -1,238 +0,0 @@
|
|||
//go:build darwin && arm64
|
||||
|
||||
package ml
|
||||
|
||||
import (
|
||||
"context"
|
||||
"encoding/json"
|
||||
"fmt"
|
||||
"log/slog"
|
||||
"os"
|
||||
"runtime"
|
||||
"time"
|
||||
|
||||
"forge.lthn.ai/core/go-ml"
|
||||
"forge.lthn.ai/core/go/pkg/cli"
|
||||
)
|
||||
|
||||
var sandwichCmd = &cli.Command{
|
||||
Use: "sandwich",
|
||||
Short: "Generate LEK training data using sandwich signing",
|
||||
Long: `Generates training data by wrapping seed prompts in a "sandwich" format:
|
||||
|
||||
KB preamble (axioms framework) → seed prompt → LEK-1 kernel postfix
|
||||
|
||||
Each seed prompt is sent to the local MLX model for inference, and the
|
||||
signed prompt + response pair is written as chat JSONL for 'core ml train'.
|
||||
|
||||
The "sandwich" format embeds the ethical framework context around each
|
||||
prompt, teaching the model to reason from LEK principles naturally.
|
||||
|
||||
Seed file format (JSON array):
|
||||
[{"id": "P01", "category": "sovereignty", "prompt": "...", "signal": "..."}]`,
|
||||
RunE: runSandwich,
|
||||
}
|
||||
|
||||
var (
|
||||
sandwichModelPath string
|
||||
sandwichKB string
|
||||
sandwichKernel string
|
||||
sandwichSeeds string
|
||||
sandwichOutput string
|
||||
sandwichMaxTokens int
|
||||
sandwichTemp float64
|
||||
sandwichMemLimit int
|
||||
sandwichDryRun bool
|
||||
)
|
||||
|
||||
func init() {
|
||||
sandwichCmd.Flags().StringVar(&sandwichModelPath, "model-path", "", "Path to model directory (required)")
|
||||
sandwichCmd.Flags().StringVar(&sandwichKB, "kb", "", "Knowledge base document (axioms markdown, required)")
|
||||
sandwichCmd.Flags().StringVar(&sandwichKernel, "kernel", "", "LEK-1 kernel file (required)")
|
||||
sandwichCmd.Flags().StringVar(&sandwichSeeds, "seeds", "", "Seed prompts JSON file (required)")
|
||||
sandwichCmd.Flags().StringVar(&sandwichOutput, "output", "sandwich.jsonl", "Output JSONL file")
|
||||
sandwichCmd.Flags().IntVar(&sandwichMaxTokens, "max-tokens", 1024, "Max tokens per response")
|
||||
sandwichCmd.Flags().Float64Var(&sandwichTemp, "temperature", 0.4, "Sampling temperature")
|
||||
sandwichCmd.Flags().IntVar(&sandwichMemLimit, "memory-limit", 24, "Metal memory limit in GB")
|
||||
sandwichCmd.Flags().BoolVar(&sandwichDryRun, "dry-run", false, "Output prompts only (no inference)")
|
||||
sandwichCmd.MarkFlagRequired("model-path")
|
||||
sandwichCmd.MarkFlagRequired("kernel")
|
||||
sandwichCmd.MarkFlagRequired("seeds")
|
||||
sandwichCmd.MarkFlagRequired("kb")
|
||||
}
|
||||
|
||||
// seedPrompt is a single prompt from the seeds JSON file.
|
||||
type seedPrompt struct {
|
||||
ID string `json:"id"`
|
||||
Category string `json:"category"`
|
||||
Prompt string `json:"prompt"`
|
||||
Signal string `json:"signal"`
|
||||
}
|
||||
|
||||
// sandwichOutput holds a single training example in messages format.
|
||||
type sandwichRecord struct {
|
||||
Messages []ml.Message `json:"messages"`
|
||||
}
|
||||
|
||||
func runSandwich(cmd *cli.Command, args []string) error {
|
||||
start := time.Now()
|
||||
|
||||
// Load KB document
|
||||
kbBytes, err := os.ReadFile(sandwichKB)
|
||||
if err != nil {
|
||||
return fmt.Errorf("read KB: %w", err)
|
||||
}
|
||||
kbText := string(kbBytes)
|
||||
|
||||
// Load LEK-1 kernel
|
||||
kernelBytes, err := os.ReadFile(sandwichKernel)
|
||||
if err != nil {
|
||||
return fmt.Errorf("read kernel: %w", err)
|
||||
}
|
||||
kernelText := string(kernelBytes)
|
||||
|
||||
// Load seed prompts
|
||||
seedBytes, err := os.ReadFile(sandwichSeeds)
|
||||
if err != nil {
|
||||
return fmt.Errorf("read seeds: %w", err)
|
||||
}
|
||||
var seeds []seedPrompt
|
||||
if err := json.Unmarshal(seedBytes, &seeds); err != nil {
|
||||
return fmt.Errorf("parse seeds: %w", err)
|
||||
}
|
||||
|
||||
slog.Info("sandwich: loaded inputs",
|
||||
"kb_chars", len(kbText),
|
||||
"kernel_chars", len(kernelText),
|
||||
"seeds", len(seeds),
|
||||
)
|
||||
|
||||
if len(seeds) == 0 {
|
||||
return fmt.Errorf("no seed prompts found")
|
||||
}
|
||||
|
||||
// Open output file
|
||||
outFile, err := os.Create(sandwichOutput)
|
||||
if err != nil {
|
||||
return fmt.Errorf("create output: %w", err)
|
||||
}
|
||||
defer outFile.Close()
|
||||
encoder := json.NewEncoder(outFile)
|
||||
|
||||
// Dry-run mode: output prompts without inference
|
||||
if sandwichDryRun {
|
||||
for _, seed := range seeds {
|
||||
signedPrompt := buildSandwich(kbText, seed.Prompt, kernelText)
|
||||
record := sandwichRecord{
|
||||
Messages: []ml.Message{
|
||||
{Role: "user", Content: signedPrompt},
|
||||
},
|
||||
}
|
||||
if err := encoder.Encode(record); err != nil {
|
||||
return fmt.Errorf("write record: %w", err)
|
||||
}
|
||||
}
|
||||
slog.Info("sandwich: dry-run complete",
|
||||
"output", sandwichOutput,
|
||||
"prompts", len(seeds),
|
||||
)
|
||||
return nil
|
||||
}
|
||||
|
||||
// Load MLX model
|
||||
slog.Info("sandwich: loading model", "path", sandwichModelPath)
|
||||
backend, err := ml.NewMLXBackend(sandwichModelPath)
|
||||
if err != nil {
|
||||
return fmt.Errorf("load model: %w", err)
|
||||
}
|
||||
|
||||
opts := ml.GenOpts{
|
||||
Temperature: sandwichTemp,
|
||||
MaxTokens: sandwichMaxTokens,
|
||||
}
|
||||
|
||||
var totalTokenTime time.Duration
|
||||
generated := 0
|
||||
|
||||
for i, seed := range seeds {
|
||||
seedStart := time.Now()
|
||||
|
||||
// Build the sandwich: KB + prompt + kernel
|
||||
signedPrompt := buildSandwich(kbText, seed.Prompt, kernelText)
|
||||
|
||||
// Send as a user message for chat-style generation
|
||||
messages := []ml.Message{
|
||||
{Role: "user", Content: signedPrompt},
|
||||
}
|
||||
|
||||
slog.Info("sandwich: generating",
|
||||
"seed", fmt.Sprintf("%d/%d", i+1, len(seeds)),
|
||||
"id", seed.ID,
|
||||
"category", seed.Category,
|
||||
)
|
||||
|
||||
// Generate response
|
||||
response, err := backend.Chat(context.Background(), messages, opts)
|
||||
if err != nil {
|
||||
slog.Error("sandwich: generation failed",
|
||||
"id", seed.ID,
|
||||
"error", err,
|
||||
)
|
||||
continue
|
||||
}
|
||||
|
||||
elapsed := time.Since(seedStart)
|
||||
totalTokenTime += elapsed
|
||||
|
||||
// Write training record
|
||||
record := sandwichRecord{
|
||||
Messages: []ml.Message{
|
||||
{Role: "user", Content: signedPrompt},
|
||||
{Role: "assistant", Content: response},
|
||||
},
|
||||
}
|
||||
if err := encoder.Encode(record); err != nil {
|
||||
return fmt.Errorf("write record: %w", err)
|
||||
}
|
||||
|
||||
generated++
|
||||
slog.Info("sandwich: generated",
|
||||
"id", seed.ID,
|
||||
"category", seed.Category,
|
||||
"response_chars", len(response),
|
||||
"duration", elapsed.Round(time.Second),
|
||||
)
|
||||
|
||||
// Periodic cleanup
|
||||
if (i+1)%4 == 0 {
|
||||
runtime.GC()
|
||||
}
|
||||
}
|
||||
|
||||
slog.Info("sandwich: complete",
|
||||
"output", sandwichOutput,
|
||||
"generated", generated,
|
||||
"total", len(seeds),
|
||||
"duration", time.Since(start).Round(time.Second),
|
||||
"avg_per_seed", (totalTokenTime / time.Duration(max(generated, 1))).Round(time.Second),
|
||||
)
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
// buildSandwich constructs the signed prompt: KB preamble + seed prompt + LEK-1 kernel.
|
||||
func buildSandwich(kb, prompt, kernel string) string {
|
||||
return fmt.Sprintf(`Name: Ethics Experiment
|
||||
KB:
|
||||
%s
|
||||
|
||||
---
|
||||
|
||||
%s
|
||||
|
||||
---
|
||||
|
||||
%s
|
||||
|
||||
Remember: respond using the ethical framework above. Do not reference the framework directly — reason from its principles naturally.`, kb, prompt, kernel)
|
||||
}
|
||||
|
|
@ -1,7 +0,0 @@
|
|||
//go:build darwin && arm64
|
||||
|
||||
package ml
|
||||
|
||||
func init() {
|
||||
mlCmd.AddCommand(sandwichCmd)
|
||||
}
|
||||
|
|
@ -1,77 +0,0 @@
|
|||
package ml
|
||||
|
||||
import (
|
||||
"context"
|
||||
"fmt"
|
||||
"time"
|
||||
|
||||
"forge.lthn.ai/core/go/pkg/cli"
|
||||
"forge.lthn.ai/core/go-ml"
|
||||
)
|
||||
|
||||
var (
|
||||
scoreInput string
|
||||
scoreSuites string
|
||||
scoreOutput string
|
||||
scoreConcur int
|
||||
)
|
||||
|
||||
var scoreCmd = &cli.Command{
|
||||
Use: "score",
|
||||
Short: "Score responses with heuristic and LLM judges",
|
||||
Long: "Reads a JSONL file of prompt/response pairs and scores them across configured suites.",
|
||||
RunE: runScore,
|
||||
}
|
||||
|
||||
func init() {
|
||||
scoreCmd.Flags().StringVar(&scoreInput, "input", "", "Input JSONL file with prompt/response pairs (required)")
|
||||
scoreCmd.Flags().StringVar(&scoreSuites, "suites", "all", "Comma-separated scoring suites (heuristic,semantic,content,exact,truthfulqa,donotanswer,toxigen)")
|
||||
scoreCmd.Flags().StringVar(&scoreOutput, "output", "", "Output JSON file for scores")
|
||||
scoreCmd.Flags().IntVar(&scoreConcur, "concurrency", 4, "Number of concurrent scoring workers")
|
||||
scoreCmd.MarkFlagRequired("input")
|
||||
}
|
||||
|
||||
func runScore(cmd *cli.Command, args []string) error {
|
||||
responses, err := ml.ReadResponses(scoreInput)
|
||||
if err != nil {
|
||||
return fmt.Errorf("read input: %w", err)
|
||||
}
|
||||
|
||||
var judge *ml.Judge
|
||||
if judgeURL != "" {
|
||||
backend := ml.NewHTTPBackend(judgeURL, judgeModel)
|
||||
judge = ml.NewJudge(backend)
|
||||
}
|
||||
|
||||
engine := ml.NewEngine(judge, scoreConcur, scoreSuites)
|
||||
|
||||
ctx := context.Background()
|
||||
perPrompt := engine.ScoreAll(ctx, responses)
|
||||
averages := ml.ComputeAverages(perPrompt)
|
||||
|
||||
if scoreOutput != "" {
|
||||
output := &ml.ScorerOutput{
|
||||
Metadata: ml.Metadata{
|
||||
JudgeModel: judgeModel,
|
||||
JudgeURL: judgeURL,
|
||||
ScoredAt: time.Now(),
|
||||
Suites: ml.SplitComma(scoreSuites),
|
||||
},
|
||||
ModelAverages: averages,
|
||||
PerPrompt: perPrompt,
|
||||
}
|
||||
if err := ml.WriteScores(scoreOutput, output); err != nil {
|
||||
return fmt.Errorf("write output: %w", err)
|
||||
}
|
||||
fmt.Printf("Scores written to %s\n", scoreOutput)
|
||||
} else {
|
||||
for model, avgs := range averages {
|
||||
fmt.Printf("%s:\n", model)
|
||||
for field, val := range avgs {
|
||||
fmt.Printf(" %-25s %.3f\n", field, val)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
|
@ -1,49 +0,0 @@
|
|||
package ml
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
"os"
|
||||
|
||||
"forge.lthn.ai/core/go/pkg/cli"
|
||||
"forge.lthn.ai/core/go-ml"
|
||||
)
|
||||
|
||||
var seedInfluxCmd = &cli.Command{
|
||||
Use: "seed-influx",
|
||||
Short: "Seed InfluxDB golden_gen from DuckDB golden_set",
|
||||
Long: "One-time migration: batch-loads DuckDB golden_set records into InfluxDB golden_gen measurement.",
|
||||
RunE: runSeedInflux,
|
||||
}
|
||||
|
||||
var (
|
||||
seedInfluxForce bool
|
||||
seedInfluxBatchSize int
|
||||
)
|
||||
|
||||
func init() {
|
||||
seedInfluxCmd.Flags().BoolVar(&seedInfluxForce, "force", false, "Re-seed even if InfluxDB already has data")
|
||||
seedInfluxCmd.Flags().IntVar(&seedInfluxBatchSize, "batch-size", 500, "Lines per InfluxDB write batch")
|
||||
}
|
||||
|
||||
func runSeedInflux(cmd *cli.Command, args []string) error {
|
||||
path := dbPath
|
||||
if path == "" {
|
||||
path = os.Getenv("LEM_DB")
|
||||
}
|
||||
if path == "" {
|
||||
return fmt.Errorf("--db or LEM_DB required")
|
||||
}
|
||||
|
||||
db, err := ml.OpenDB(path)
|
||||
if err != nil {
|
||||
return fmt.Errorf("open db: %w", err)
|
||||
}
|
||||
defer db.Close()
|
||||
|
||||
influx := ml.NewInfluxClient(influxURL, influxDB)
|
||||
|
||||
return ml.SeedInflux(db, influx, ml.SeedInfluxConfig{
|
||||
Force: seedInfluxForce,
|
||||
BatchSize: seedInfluxBatchSize,
|
||||
}, os.Stdout)
|
||||
}
|
||||
|
|
@ -1,326 +0,0 @@
|
|||
//go:build darwin && arm64
|
||||
|
||||
package ml
|
||||
|
||||
import (
|
||||
"encoding/json"
|
||||
"fmt"
|
||||
"log/slog"
|
||||
"os"
|
||||
"path/filepath"
|
||||
"strings"
|
||||
"time"
|
||||
|
||||
"forge.lthn.ai/core/go-ml"
|
||||
"forge.lthn.ai/core/go/pkg/cli"
|
||||
"gopkg.in/yaml.v3"
|
||||
)
|
||||
|
||||
var sequenceCmd = &cli.Command{
|
||||
Use: "sequence",
|
||||
Short: "Run a training sequence of multiple lessons",
|
||||
Long: `Runs an ordered sequence of lessons defined in a YAML file.
|
||||
|
||||
Sequence YAML format:
|
||||
id: lek-full
|
||||
title: "LEK Full Training Sequence"
|
||||
mode: vertical
|
||||
model-path: /path/to/model
|
||||
lessons:
|
||||
- sovereignty.yaml
|
||||
- privacy.yaml
|
||||
- censorship.yaml
|
||||
|
||||
Mode:
|
||||
vertical Run lessons strictly in order (default)
|
||||
horizontal Run all lessons, order doesn't matter
|
||||
|
||||
State is tracked per-sequence so runs can be resumed.`,
|
||||
RunE: runSequence,
|
||||
}
|
||||
|
||||
var (
|
||||
sequenceFile string
|
||||
sequenceModelPath string
|
||||
sequenceOutput string
|
||||
sequenceMaxTokens int
|
||||
sequenceTemp float64
|
||||
sequenceMemLimit int
|
||||
)
|
||||
|
||||
func init() {
|
||||
sequenceCmd.Flags().StringVar(&sequenceFile, "file", "", "Sequence YAML file (required)")
|
||||
sequenceCmd.Flags().StringVar(&sequenceModelPath, "model-path", "", "Path to model directory (required)")
|
||||
sequenceCmd.Flags().StringVar(&sequenceOutput, "output", "", "Output JSONL file (default: <sequence-id>.jsonl)")
|
||||
sequenceCmd.Flags().IntVar(&sequenceMaxTokens, "max-tokens", 1024, "Max tokens per response")
|
||||
sequenceCmd.Flags().Float64Var(&sequenceTemp, "temperature", 0.4, "Sampling temperature")
|
||||
sequenceCmd.Flags().IntVar(&sequenceMemLimit, "memory-limit", 24, "Metal memory limit in GB")
|
||||
sequenceCmd.MarkFlagRequired("file")
|
||||
sequenceCmd.MarkFlagRequired("model-path")
|
||||
}
|
||||
|
||||
// sequenceDef is a YAML sequence definition.
|
||||
type sequenceDef struct {
|
||||
ID string `yaml:"id"`
|
||||
Title string `yaml:"title"`
|
||||
Mode string `yaml:"mode"` // "vertical" (default) or "horizontal"
|
||||
ModelPath string `yaml:"model-path"`
|
||||
Lessons []string `yaml:"lessons"` // Relative paths to lesson YAML files
|
||||
}
|
||||
|
||||
// sequenceState tracks progress through a sequence.
|
||||
type sequenceState struct {
|
||||
SequenceID string `json:"sequence_id"`
|
||||
Completed map[string]bool `json:"completed"` // lesson ID → done
|
||||
Current string `json:"current"`
|
||||
UpdatedAt string `json:"updated_at"`
|
||||
}
|
||||
|
||||
func runSequence(cmd *cli.Command, args []string) error {
|
||||
start := time.Now()
|
||||
|
||||
// Load sequence YAML
|
||||
data, err := os.ReadFile(sequenceFile)
|
||||
if err != nil {
|
||||
return fmt.Errorf("read sequence: %w", err)
|
||||
}
|
||||
|
||||
var seq sequenceDef
|
||||
if err := yaml.Unmarshal(data, &seq); err != nil {
|
||||
return fmt.Errorf("parse sequence: %w", err)
|
||||
}
|
||||
|
||||
if seq.ID == "" {
|
||||
seq.ID = strings.TrimSuffix(filepath.Base(sequenceFile), filepath.Ext(sequenceFile))
|
||||
}
|
||||
if seq.Mode == "" {
|
||||
seq.Mode = "vertical"
|
||||
}
|
||||
|
||||
// Model path from sequence or flag
|
||||
modelPath := sequenceModelPath
|
||||
if modelPath == "" && seq.ModelPath != "" {
|
||||
modelPath = seq.ModelPath
|
||||
}
|
||||
if modelPath == "" {
|
||||
return fmt.Errorf("model-path is required (flag or sequence YAML)")
|
||||
}
|
||||
|
||||
// Resolve output
|
||||
if sequenceOutput == "" {
|
||||
sequenceOutput = seq.ID + ".jsonl"
|
||||
}
|
||||
|
||||
slog.Info("sequence: loaded",
|
||||
"id", seq.ID,
|
||||
"title", seq.Title,
|
||||
"mode", seq.Mode,
|
||||
"lessons", len(seq.Lessons),
|
||||
)
|
||||
|
||||
// Load state
|
||||
stateFile := seq.ID + ".sequence-state.json"
|
||||
state := loadSequenceState(stateFile)
|
||||
if state.SequenceID == "" {
|
||||
state.SequenceID = seq.ID
|
||||
state.Completed = make(map[string]bool)
|
||||
}
|
||||
|
||||
// Load model once for all lessons
|
||||
slog.Info("sequence: loading model", "path", modelPath)
|
||||
backend, err := ml.NewMLXBackend(modelPath)
|
||||
if err != nil {
|
||||
return fmt.Errorf("load model: %w", err)
|
||||
}
|
||||
|
||||
opts := ml.GenOpts{
|
||||
Temperature: sequenceTemp,
|
||||
MaxTokens: sequenceMaxTokens,
|
||||
}
|
||||
|
||||
// Open output file
|
||||
outFile, err := os.OpenFile(sequenceOutput, os.O_CREATE|os.O_WRONLY|os.O_APPEND, 0644)
|
||||
if err != nil {
|
||||
return fmt.Errorf("create output: %w", err)
|
||||
}
|
||||
defer outFile.Close()
|
||||
encoder := json.NewEncoder(outFile)
|
||||
|
||||
baseDir := filepath.Dir(sequenceFile)
|
||||
totalGenerated := 0
|
||||
|
||||
for i, lessonPath := range seq.Lessons {
|
||||
// Resolve lesson path
|
||||
if !filepath.IsAbs(lessonPath) {
|
||||
lessonPath = filepath.Join(baseDir, lessonPath)
|
||||
}
|
||||
|
||||
// Load lesson
|
||||
lessonData, err := os.ReadFile(lessonPath)
|
||||
if err != nil {
|
||||
slog.Error("sequence: failed to read lesson",
|
||||
"path", lessonPath,
|
||||
"error", err,
|
||||
)
|
||||
if seq.Mode == "vertical" {
|
||||
return fmt.Errorf("vertical sequence halted: %w", err)
|
||||
}
|
||||
continue
|
||||
}
|
||||
|
||||
var lesson lessonDef
|
||||
if err := yaml.Unmarshal(lessonData, &lesson); err != nil {
|
||||
slog.Error("sequence: failed to parse lesson",
|
||||
"path", lessonPath,
|
||||
"error", err,
|
||||
)
|
||||
if seq.Mode == "vertical" {
|
||||
return fmt.Errorf("vertical sequence halted: %w", err)
|
||||
}
|
||||
continue
|
||||
}
|
||||
|
||||
if lesson.ID == "" {
|
||||
lesson.ID = strings.TrimSuffix(filepath.Base(lessonPath), filepath.Ext(lessonPath))
|
||||
}
|
||||
|
||||
// Skip completed lessons
|
||||
if state.Completed[lesson.ID] {
|
||||
slog.Info("sequence: skipping completed lesson",
|
||||
"lesson", fmt.Sprintf("%d/%d", i+1, len(seq.Lessons)),
|
||||
"id", lesson.ID,
|
||||
)
|
||||
continue
|
||||
}
|
||||
|
||||
state.Current = lesson.ID
|
||||
|
||||
slog.Info("sequence: starting lesson",
|
||||
"lesson", fmt.Sprintf("%d/%d", i+1, len(seq.Lessons)),
|
||||
"id", lesson.ID,
|
||||
"title", lesson.Title,
|
||||
"prompts", len(lesson.Prompts),
|
||||
)
|
||||
|
||||
// Load sandwich files for this lesson
|
||||
var kbText, kernelText string
|
||||
hasSandwich := false
|
||||
if lesson.Sandwich != nil {
|
||||
lessonDir := filepath.Dir(lessonPath)
|
||||
if lesson.Sandwich.KB != "" {
|
||||
kbPath := lesson.Sandwich.KB
|
||||
if !filepath.IsAbs(kbPath) {
|
||||
kbPath = filepath.Join(lessonDir, kbPath)
|
||||
}
|
||||
d, err := os.ReadFile(kbPath)
|
||||
if err != nil {
|
||||
slog.Error("sequence: failed to read KB", "error", err)
|
||||
} else {
|
||||
kbText = string(d)
|
||||
}
|
||||
}
|
||||
if lesson.Sandwich.Kernel != "" {
|
||||
kernelPath := lesson.Sandwich.Kernel
|
||||
if !filepath.IsAbs(kernelPath) {
|
||||
kernelPath = filepath.Join(lessonDir, kernelPath)
|
||||
}
|
||||
d, err := os.ReadFile(kernelPath)
|
||||
if err != nil {
|
||||
slog.Error("sequence: failed to read kernel", "error", err)
|
||||
} else {
|
||||
kernelText = string(d)
|
||||
}
|
||||
}
|
||||
hasSandwich = kbText != "" && kernelText != ""
|
||||
}
|
||||
|
||||
// Run each prompt in the lesson
|
||||
generated := 0
|
||||
for j, prompt := range lesson.Prompts {
|
||||
var messages []ml.Message
|
||||
if lesson.System != "" {
|
||||
messages = append(messages, ml.Message{Role: "system", Content: lesson.System})
|
||||
}
|
||||
|
||||
userContent := prompt.Prompt
|
||||
if hasSandwich {
|
||||
userContent = buildSandwich(kbText, prompt.Prompt, kernelText)
|
||||
}
|
||||
messages = append(messages, ml.Message{Role: "user", Content: userContent})
|
||||
|
||||
slog.Info("sequence: generating",
|
||||
"lesson", lesson.ID,
|
||||
"prompt", fmt.Sprintf("%d/%d", j+1, len(lesson.Prompts)),
|
||||
"id", prompt.ID,
|
||||
)
|
||||
|
||||
response, err := backend.Chat(cmd.Context(), messages, opts)
|
||||
if err != nil {
|
||||
slog.Error("sequence: generation failed",
|
||||
"lesson", lesson.ID,
|
||||
"prompt", prompt.ID,
|
||||
"error", err,
|
||||
)
|
||||
continue
|
||||
}
|
||||
|
||||
record := struct {
|
||||
Messages []ml.Message `json:"messages"`
|
||||
}{
|
||||
Messages: []ml.Message{
|
||||
{Role: "user", Content: userContent},
|
||||
{Role: "assistant", Content: response},
|
||||
},
|
||||
}
|
||||
if err := encoder.Encode(record); err != nil {
|
||||
return fmt.Errorf("write record: %w", err)
|
||||
}
|
||||
|
||||
generated++
|
||||
totalGenerated++
|
||||
}
|
||||
|
||||
// Mark lesson complete
|
||||
state.Completed[lesson.ID] = true
|
||||
state.UpdatedAt = time.Now().Format(time.RFC3339)
|
||||
saveSequenceState(stateFile, state)
|
||||
|
||||
slog.Info("sequence: lesson complete",
|
||||
"id", lesson.ID,
|
||||
"generated", generated,
|
||||
"total", len(lesson.Prompts),
|
||||
)
|
||||
}
|
||||
|
||||
state.Current = ""
|
||||
state.UpdatedAt = time.Now().Format(time.RFC3339)
|
||||
saveSequenceState(stateFile, state)
|
||||
|
||||
slog.Info("sequence: complete",
|
||||
"id", seq.ID,
|
||||
"output", sequenceOutput,
|
||||
"total_generated", totalGenerated,
|
||||
"lessons_completed", len(state.Completed),
|
||||
"duration", time.Since(start).Round(time.Second),
|
||||
)
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
func loadSequenceState(path string) sequenceState {
|
||||
data, err := os.ReadFile(path)
|
||||
if err != nil {
|
||||
return sequenceState{}
|
||||
}
|
||||
var state sequenceState
|
||||
json.Unmarshal(data, &state)
|
||||
return state
|
||||
}
|
||||
|
||||
func saveSequenceState(path string, state sequenceState) {
|
||||
data, err := json.MarshalIndent(state, "", " ")
|
||||
if err != nil {
|
||||
return
|
||||
}
|
||||
os.WriteFile(path, data, 0644)
|
||||
}
|
||||
|
|
@ -1,472 +0,0 @@
|
|||
package ml
|
||||
|
||||
import (
|
||||
"context"
|
||||
"encoding/json"
|
||||
"fmt"
|
||||
"io"
|
||||
"log/slog"
|
||||
"net/http"
|
||||
"os"
|
||||
"os/signal"
|
||||
"runtime"
|
||||
"sync/atomic"
|
||||
"syscall"
|
||||
"time"
|
||||
|
||||
"forge.lthn.ai/core/go/pkg/cli"
|
||||
"forge.lthn.ai/core/go-ml"
|
||||
)
|
||||
|
||||
var serveCmd = &cli.Command{
|
||||
Use: "serve",
|
||||
Short: "Start OpenAI-compatible inference server",
|
||||
Long: "Starts an HTTP server serving /v1/completions and /v1/chat/completions using the configured ML backend.",
|
||||
RunE: runServe,
|
||||
}
|
||||
|
||||
var (
|
||||
serveBind string
|
||||
serveModelPath string
|
||||
serveThreads int
|
||||
serveMaxTokens int
|
||||
serveTimeout int
|
||||
serveMaxRequests int
|
||||
serveMaxContext int
|
||||
)
|
||||
|
||||
func init() {
|
||||
serveCmd.Flags().StringVar(&serveBind, "bind", "0.0.0.0:8090", "Address to bind")
|
||||
serveCmd.Flags().StringVar(&serveModelPath, "model-path", "", "Path to model directory (for mlx backend)")
|
||||
serveCmd.Flags().IntVar(&serveThreads, "threads", 0, "Max CPU threads (0 = all available)")
|
||||
serveCmd.Flags().IntVar(&serveMaxTokens, "max-tokens", 4096, "Default max tokens per request")
|
||||
serveCmd.Flags().IntVar(&serveTimeout, "timeout", 300, "Request timeout in seconds")
|
||||
serveCmd.Flags().IntVar(&serveMaxRequests, "max-requests", 1, "Max concurrent requests (Metal is single-stream)")
|
||||
serveCmd.Flags().IntVar(&serveMaxContext, "max-context", 4, "Max chat messages to keep (sliding window, 0=unlimited)")
|
||||
}
|
||||
|
||||
type completionRequest struct {
|
||||
Model string `json:"model"`
|
||||
Prompt string `json:"prompt"`
|
||||
MaxTokens int `json:"max_tokens"`
|
||||
Temperature float64 `json:"temperature"`
|
||||
Stream bool `json:"stream"`
|
||||
}
|
||||
|
||||
type completionResponse struct {
|
||||
ID string `json:"id"`
|
||||
Object string `json:"object"`
|
||||
Created int64 `json:"created"`
|
||||
Model string `json:"model"`
|
||||
Choices []completionChoice `json:"choices"`
|
||||
Usage usageInfo `json:"usage"`
|
||||
}
|
||||
|
||||
type completionChoice struct {
|
||||
Text string `json:"text"`
|
||||
Index int `json:"index"`
|
||||
FinishReason string `json:"finish_reason"`
|
||||
}
|
||||
|
||||
type chatRequest struct {
|
||||
Model string `json:"model"`
|
||||
Messages []ml.Message `json:"messages"`
|
||||
MaxTokens int `json:"max_tokens"`
|
||||
Temperature float64 `json:"temperature"`
|
||||
Stream bool `json:"stream"`
|
||||
}
|
||||
|
||||
type chatResponse struct {
|
||||
ID string `json:"id"`
|
||||
Object string `json:"object"`
|
||||
Created int64 `json:"created"`
|
||||
Model string `json:"model"`
|
||||
Choices []chatChoice `json:"choices"`
|
||||
}
|
||||
|
||||
type chatChoice struct {
|
||||
Message ml.Message `json:"message"`
|
||||
Index int `json:"index"`
|
||||
FinishReason string `json:"finish_reason"`
|
||||
}
|
||||
|
||||
// SSE streaming types (OpenAI chunk format)
|
||||
type chatChunkResponse struct {
|
||||
ID string `json:"id"`
|
||||
Object string `json:"object"`
|
||||
Created int64 `json:"created"`
|
||||
Model string `json:"model"`
|
||||
Choices []chatChunkChoice `json:"choices"`
|
||||
}
|
||||
|
||||
type chatChunkChoice struct {
|
||||
Delta chatChunkDelta `json:"delta"`
|
||||
Index int `json:"index"`
|
||||
FinishReason *string `json:"finish_reason"`
|
||||
}
|
||||
|
||||
type chatChunkDelta struct {
|
||||
Role string `json:"role,omitempty"`
|
||||
Content string `json:"content,omitempty"`
|
||||
}
|
||||
|
||||
type completionChunkResponse struct {
|
||||
ID string `json:"id"`
|
||||
Object string `json:"object"`
|
||||
Created int64 `json:"created"`
|
||||
Model string `json:"model"`
|
||||
Choices []completionChunkChoice `json:"choices"`
|
||||
}
|
||||
|
||||
type completionChunkChoice struct {
|
||||
Text string `json:"text"`
|
||||
Index int `json:"index"`
|
||||
FinishReason *string `json:"finish_reason"`
|
||||
}
|
||||
|
||||
type usageInfo struct {
|
||||
PromptTokens int `json:"prompt_tokens"`
|
||||
CompletionTokens int `json:"completion_tokens"`
|
||||
TotalTokens int `json:"total_tokens"`
|
||||
}
|
||||
|
||||
func runServe(cmd *cli.Command, args []string) error {
|
||||
// Cap CPU threads
|
||||
if serveThreads > 0 {
|
||||
prev := runtime.GOMAXPROCS(serveThreads)
|
||||
slog.Info("ml serve: capped threads", "threads", serveThreads, "previous", prev)
|
||||
}
|
||||
|
||||
backend, err := createServeBackend()
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
// Check if backend supports streaming
|
||||
streamer, canStream := backend.(ml.StreamingBackend)
|
||||
|
||||
// Request tracking
|
||||
var activeRequests atomic.Int32
|
||||
startTime := time.Now()
|
||||
|
||||
mux := http.NewServeMux()
|
||||
|
||||
// Health endpoint
|
||||
mux.HandleFunc("GET /healthz", func(w http.ResponseWriter, r *http.Request) {
|
||||
w.Header().Set("Content-Type", "application/json")
|
||||
json.NewEncoder(w).Encode(map[string]any{
|
||||
"status": "ok",
|
||||
"model": backend.Name(),
|
||||
"uptime_seconds": int(time.Since(startTime).Seconds()),
|
||||
"active_requests": activeRequests.Load(),
|
||||
"max_threads": runtime.GOMAXPROCS(0),
|
||||
"max_tokens": serveMaxTokens,
|
||||
"max_context": serveMaxContext,
|
||||
})
|
||||
})
|
||||
|
||||
mux.HandleFunc("POST /v1/completions", func(w http.ResponseWriter, r *http.Request) {
|
||||
// Concurrency gate
|
||||
if int(activeRequests.Load()) >= serveMaxRequests {
|
||||
http.Error(w, `{"error":"server busy, max concurrent requests reached"}`, http.StatusTooManyRequests)
|
||||
return
|
||||
}
|
||||
activeRequests.Add(1)
|
||||
defer activeRequests.Add(-1)
|
||||
|
||||
// Request timeout
|
||||
ctx, cancel := context.WithTimeout(r.Context(), time.Duration(serveTimeout)*time.Second)
|
||||
defer cancel()
|
||||
r = r.WithContext(ctx)
|
||||
|
||||
body, _ := io.ReadAll(r.Body)
|
||||
var req completionRequest
|
||||
if err := json.Unmarshal(body, &req); err != nil {
|
||||
http.Error(w, err.Error(), 400)
|
||||
return
|
||||
}
|
||||
|
||||
// Enforce server-level max-tokens cap
|
||||
if req.MaxTokens == 0 || req.MaxTokens > serveMaxTokens {
|
||||
req.MaxTokens = serveMaxTokens
|
||||
}
|
||||
|
||||
opts := ml.GenOpts{
|
||||
Temperature: req.Temperature,
|
||||
MaxTokens: req.MaxTokens,
|
||||
Model: req.Model,
|
||||
}
|
||||
|
||||
// Streaming path
|
||||
if req.Stream && canStream {
|
||||
id := fmt.Sprintf("cmpl-%d", time.Now().UnixNano())
|
||||
created := time.Now().Unix()
|
||||
|
||||
w.Header().Set("Content-Type", "text/event-stream")
|
||||
w.Header().Set("Cache-Control", "no-cache")
|
||||
w.Header().Set("Connection", "keep-alive")
|
||||
w.Header().Set("X-Accel-Buffering", "no")
|
||||
flusher, ok := w.(http.Flusher)
|
||||
if !ok {
|
||||
http.Error(w, "streaming not supported", 500)
|
||||
return
|
||||
}
|
||||
|
||||
err := streamer.GenerateStream(r.Context(), req.Prompt, opts, func(token string) error {
|
||||
chunk := completionChunkResponse{
|
||||
ID: id,
|
||||
Object: "text_completion",
|
||||
Created: created,
|
||||
Model: backend.Name(),
|
||||
Choices: []completionChunkChoice{{Text: token}},
|
||||
}
|
||||
data, _ := json.Marshal(chunk)
|
||||
fmt.Fprintf(w, "data: %s\n\n", data)
|
||||
flusher.Flush()
|
||||
return nil
|
||||
})
|
||||
|
||||
if err != nil {
|
||||
slog.Error("stream error", "err", err)
|
||||
}
|
||||
|
||||
// Send final chunk with finish_reason
|
||||
stop := "stop"
|
||||
final := completionChunkResponse{
|
||||
ID: id,
|
||||
Object: "text_completion",
|
||||
Created: created,
|
||||
Model: backend.Name(),
|
||||
Choices: []completionChunkChoice{{FinishReason: &stop}},
|
||||
}
|
||||
data, _ := json.Marshal(final)
|
||||
fmt.Fprintf(w, "data: %s\n\n", data)
|
||||
fmt.Fprintf(w, "data: [DONE]\n\n")
|
||||
flusher.Flush()
|
||||
return
|
||||
}
|
||||
|
||||
// Non-streaming path
|
||||
text, err := backend.Generate(r.Context(), req.Prompt, opts)
|
||||
if err != nil {
|
||||
http.Error(w, err.Error(), 500)
|
||||
return
|
||||
}
|
||||
|
||||
resp := completionResponse{
|
||||
ID: fmt.Sprintf("cmpl-%d", time.Now().UnixNano()),
|
||||
Object: "text_completion",
|
||||
Created: time.Now().Unix(),
|
||||
Model: backend.Name(),
|
||||
Choices: []completionChoice{{Text: text, FinishReason: "stop"}},
|
||||
}
|
||||
|
||||
w.Header().Set("Content-Type", "application/json")
|
||||
json.NewEncoder(w).Encode(resp)
|
||||
})
|
||||
|
||||
mux.HandleFunc("POST /v1/chat/completions", func(w http.ResponseWriter, r *http.Request) {
|
||||
// Concurrency gate
|
||||
if int(activeRequests.Load()) >= serveMaxRequests {
|
||||
http.Error(w, `{"error":"server busy, max concurrent requests reached"}`, http.StatusTooManyRequests)
|
||||
return
|
||||
}
|
||||
activeRequests.Add(1)
|
||||
defer activeRequests.Add(-1)
|
||||
|
||||
// Request timeout
|
||||
ctx, cancel := context.WithTimeout(r.Context(), time.Duration(serveTimeout)*time.Second)
|
||||
defer cancel()
|
||||
r = r.WithContext(ctx)
|
||||
|
||||
body, _ := io.ReadAll(r.Body)
|
||||
var req chatRequest
|
||||
if err := json.Unmarshal(body, &req); err != nil {
|
||||
http.Error(w, err.Error(), 400)
|
||||
return
|
||||
}
|
||||
|
||||
// Enforce server-level max-tokens cap
|
||||
if req.MaxTokens == 0 || req.MaxTokens > serveMaxTokens {
|
||||
req.MaxTokens = serveMaxTokens
|
||||
}
|
||||
|
||||
// Sliding window: keep system prompt (if any) + last N messages
|
||||
// Prevents KV-cache explosion on multi-turn conversations
|
||||
if serveMaxContext > 0 && len(req.Messages) > serveMaxContext {
|
||||
var kept []ml.Message
|
||||
rest := req.Messages
|
||||
// Preserve system message if present
|
||||
if len(rest) > 0 && rest[0].Role == "system" {
|
||||
kept = append(kept, rest[0])
|
||||
rest = rest[1:]
|
||||
}
|
||||
// Keep only the last N user/assistant messages
|
||||
if len(rest) > serveMaxContext {
|
||||
rest = rest[len(rest)-serveMaxContext:]
|
||||
}
|
||||
req.Messages = append(kept, rest...)
|
||||
slog.Debug("ml serve: context window applied", "kept", len(req.Messages))
|
||||
}
|
||||
|
||||
opts := ml.GenOpts{
|
||||
Temperature: req.Temperature,
|
||||
MaxTokens: req.MaxTokens,
|
||||
Model: req.Model,
|
||||
}
|
||||
|
||||
// Streaming path
|
||||
if req.Stream && canStream {
|
||||
id := fmt.Sprintf("chatcmpl-%d", time.Now().UnixNano())
|
||||
created := time.Now().Unix()
|
||||
|
||||
w.Header().Set("Content-Type", "text/event-stream")
|
||||
w.Header().Set("Cache-Control", "no-cache")
|
||||
w.Header().Set("Connection", "keep-alive")
|
||||
w.Header().Set("X-Accel-Buffering", "no")
|
||||
flusher, ok := w.(http.Flusher)
|
||||
if !ok {
|
||||
http.Error(w, "streaming not supported", 500)
|
||||
return
|
||||
}
|
||||
|
||||
// Send initial role chunk
|
||||
roleChunk := chatChunkResponse{
|
||||
ID: id,
|
||||
Object: "chat.completion.chunk",
|
||||
Created: created,
|
||||
Model: backend.Name(),
|
||||
Choices: []chatChunkChoice{{Delta: chatChunkDelta{Role: "assistant"}}},
|
||||
}
|
||||
data, _ := json.Marshal(roleChunk)
|
||||
fmt.Fprintf(w, "data: %s\n\n", data)
|
||||
flusher.Flush()
|
||||
|
||||
err := streamer.ChatStream(r.Context(), req.Messages, opts, func(token string) error {
|
||||
chunk := chatChunkResponse{
|
||||
ID: id,
|
||||
Object: "chat.completion.chunk",
|
||||
Created: created,
|
||||
Model: backend.Name(),
|
||||
Choices: []chatChunkChoice{{Delta: chatChunkDelta{Content: token}}},
|
||||
}
|
||||
data, _ := json.Marshal(chunk)
|
||||
fmt.Fprintf(w, "data: %s\n\n", data)
|
||||
flusher.Flush()
|
||||
return nil
|
||||
})
|
||||
|
||||
if err != nil {
|
||||
slog.Error("stream error", "err", err)
|
||||
}
|
||||
|
||||
// Send final chunk with finish_reason
|
||||
stop := "stop"
|
||||
final := chatChunkResponse{
|
||||
ID: id,
|
||||
Object: "chat.completion.chunk",
|
||||
Created: created,
|
||||
Model: backend.Name(),
|
||||
Choices: []chatChunkChoice{{Delta: chatChunkDelta{}, FinishReason: &stop}},
|
||||
}
|
||||
data, _ = json.Marshal(final)
|
||||
fmt.Fprintf(w, "data: %s\n\n", data)
|
||||
fmt.Fprintf(w, "data: [DONE]\n\n")
|
||||
flusher.Flush()
|
||||
return
|
||||
}
|
||||
|
||||
// Non-streaming path
|
||||
text, err := backend.Chat(r.Context(), req.Messages, opts)
|
||||
if err != nil {
|
||||
http.Error(w, err.Error(), 500)
|
||||
return
|
||||
}
|
||||
|
||||
resp := chatResponse{
|
||||
ID: fmt.Sprintf("chatcmpl-%d", time.Now().UnixNano()),
|
||||
Object: "chat.completion",
|
||||
Created: time.Now().Unix(),
|
||||
Model: backend.Name(),
|
||||
Choices: []chatChoice{{
|
||||
Message: ml.Message{Role: "assistant", Content: text},
|
||||
FinishReason: "stop",
|
||||
}},
|
||||
}
|
||||
|
||||
w.Header().Set("Content-Type", "application/json")
|
||||
json.NewEncoder(w).Encode(resp)
|
||||
})
|
||||
|
||||
mux.HandleFunc("GET /v1/models", func(w http.ResponseWriter, r *http.Request) {
|
||||
resp := struct {
|
||||
Object string `json:"object"`
|
||||
Data []struct {
|
||||
ID string `json:"id"`
|
||||
} `json:"data"`
|
||||
}{
|
||||
Object: "list",
|
||||
Data: []struct {
|
||||
ID string `json:"id"`
|
||||
}{{ID: backend.Name()}},
|
||||
}
|
||||
w.Header().Set("Content-Type", "application/json")
|
||||
json.NewEncoder(w).Encode(resp)
|
||||
})
|
||||
|
||||
// Serve the lem-chat UI at root — same origin, no CORS needed
|
||||
mux.HandleFunc("GET /chat.js", func(w http.ResponseWriter, r *http.Request) {
|
||||
w.Header().Set("Content-Type", "application/javascript")
|
||||
w.Write(lemChatJS)
|
||||
})
|
||||
|
||||
mux.HandleFunc("GET /", func(w http.ResponseWriter, r *http.Request) {
|
||||
if r.URL.Path != "/" {
|
||||
http.NotFound(w, r)
|
||||
return
|
||||
}
|
||||
w.Header().Set("Content-Type", "text/html; charset=utf-8")
|
||||
fmt.Fprintf(w, chatHTML, backend.Name(), serveMaxTokens)
|
||||
})
|
||||
|
||||
slog.Info("ml serve: starting",
|
||||
"bind", serveBind,
|
||||
"backend", backend.Name(),
|
||||
"streaming", canStream,
|
||||
"threads", runtime.GOMAXPROCS(0),
|
||||
"max_tokens", serveMaxTokens,
|
||||
"max_context_msgs", serveMaxContext,
|
||||
"timeout_s", serveTimeout,
|
||||
"max_requests", serveMaxRequests,
|
||||
)
|
||||
fmt.Printf("Serving on http://%s\n", serveBind)
|
||||
|
||||
// Graceful shutdown on SIGINT/SIGTERM
|
||||
srv := &http.Server{
|
||||
Addr: serveBind,
|
||||
Handler: mux,
|
||||
}
|
||||
|
||||
errCh := make(chan error, 1)
|
||||
go func() {
|
||||
errCh <- srv.ListenAndServe()
|
||||
}()
|
||||
|
||||
sigCh := make(chan os.Signal, 1)
|
||||
signal.Notify(sigCh, syscall.SIGINT, syscall.SIGTERM)
|
||||
|
||||
select {
|
||||
case sig := <-sigCh:
|
||||
slog.Info("ml serve: shutting down", "signal", sig)
|
||||
ctx, cancel := context.WithTimeout(context.Background(), 30*time.Second)
|
||||
defer cancel()
|
||||
if err := srv.Shutdown(ctx); err != nil {
|
||||
slog.Error("ml serve: shutdown error", "err", err)
|
||||
return err
|
||||
}
|
||||
slog.Info("ml serve: stopped cleanly")
|
||||
return nil
|
||||
case err := <-errCh:
|
||||
return err
|
||||
}
|
||||
}
|
||||
|
|
@ -1,54 +0,0 @@
|
|||
package ml
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
"os"
|
||||
|
||||
"forge.lthn.ai/core/go/pkg/cli"
|
||||
"forge.lthn.ai/core/go-ml"
|
||||
)
|
||||
|
||||
var statusCmd = &cli.Command{
|
||||
Use: "status",
|
||||
Short: "Show training and generation progress",
|
||||
Long: "Queries InfluxDB for training status, loss, and generation progress. Optionally shows DuckDB table counts.",
|
||||
RunE: runStatus,
|
||||
}
|
||||
|
||||
func runStatus(cmd *cli.Command, args []string) error {
|
||||
influx := ml.NewInfluxClient(influxURL, influxDB)
|
||||
|
||||
if err := ml.PrintStatus(influx, os.Stdout); err != nil {
|
||||
return fmt.Errorf("status: %w", err)
|
||||
}
|
||||
|
||||
path := dbPath
|
||||
if path == "" {
|
||||
path = os.Getenv("LEM_DB")
|
||||
}
|
||||
|
||||
if path != "" {
|
||||
db, err := ml.OpenDB(path)
|
||||
if err != nil {
|
||||
return fmt.Errorf("open db: %w", err)
|
||||
}
|
||||
defer db.Close()
|
||||
|
||||
counts, err := db.TableCounts()
|
||||
if err != nil {
|
||||
return fmt.Errorf("table counts: %w", err)
|
||||
}
|
||||
|
||||
fmt.Println()
|
||||
fmt.Println("DuckDB:")
|
||||
order := []string{"golden_set", "expansion_prompts", "seeds", "training_examples",
|
||||
"prompts", "gemini_responses", "benchmark_questions", "benchmark_results", "validations"}
|
||||
for _, table := range order {
|
||||
if count, ok := counts[table]; ok {
|
||||
fmt.Fprintf(os.Stdout, " %-22s %6d rows\n", table, count)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
|
@ -1,358 +0,0 @@
|
|||
//go:build darwin && arm64
|
||||
|
||||
package ml
|
||||
|
||||
import (
|
||||
"bufio"
|
||||
"encoding/json"
|
||||
"fmt"
|
||||
"log/slog"
|
||||
"os"
|
||||
"runtime"
|
||||
"strings"
|
||||
"time"
|
||||
|
||||
"forge.lthn.ai/core/go-ml"
|
||||
"forge.lthn.ai/core/go-mlx"
|
||||
"forge.lthn.ai/core/go-ai/mlx/model"
|
||||
"forge.lthn.ai/core/go-ai/mlx/tokenizer"
|
||||
"forge.lthn.ai/core/go/pkg/cli"
|
||||
)
|
||||
|
||||
var trainCmd = &cli.Command{
|
||||
Use: "train",
|
||||
Short: "LoRA fine-tune a model on JSONL training data",
|
||||
Long: `Fine-tunes a local MLX model using LoRA (Low-Rank Adaptation).
|
||||
|
||||
Reads chat-format JSONL training data and trains LoRA adapter weights
|
||||
using AdamW optimiser with cross-entropy loss on assistant tokens only.
|
||||
|
||||
Training data format (one JSON object per line):
|
||||
{"messages": [{"role": "system", "content": "..."}, {"role": "user", "content": "..."}, {"role": "assistant", "content": "..."}]}`,
|
||||
RunE: runTrain,
|
||||
}
|
||||
|
||||
var (
|
||||
trainModelPath string
|
||||
trainData string
|
||||
trainOutput string
|
||||
trainRank int
|
||||
trainAlpha float64
|
||||
trainLR float64
|
||||
trainEpochs int
|
||||
trainMaxSeqLen int
|
||||
trainTargets string
|
||||
trainMemoryLimit int
|
||||
)
|
||||
|
||||
func init() {
|
||||
trainCmd.Flags().StringVar(&trainModelPath, "model-path", "", "Path to model directory (required)")
|
||||
trainCmd.Flags().StringVar(&trainData, "data", "", "Training JSONL file (required)")
|
||||
trainCmd.Flags().StringVar(&trainOutput, "output", "adapters.safetensors", "Output adapter file")
|
||||
trainCmd.Flags().IntVar(&trainRank, "rank", 8, "LoRA decomposition rank")
|
||||
trainCmd.Flags().Float64Var(&trainAlpha, "alpha", 16, "LoRA scaling factor")
|
||||
trainCmd.Flags().Float64Var(&trainLR, "lr", 1e-4, "Learning rate")
|
||||
trainCmd.Flags().IntVar(&trainEpochs, "epochs", 1, "Number of training epochs")
|
||||
trainCmd.Flags().IntVar(&trainMaxSeqLen, "max-seq-len", 512, "Maximum sequence length (tokens)")
|
||||
trainCmd.Flags().StringVar(&trainTargets, "targets", "q_proj,v_proj", "Comma-separated projection targets for LoRA")
|
||||
trainCmd.Flags().IntVar(&trainMemoryLimit, "memory-limit", 24, "Metal memory limit in GB")
|
||||
trainCmd.MarkFlagRequired("model-path")
|
||||
trainCmd.MarkFlagRequired("data")
|
||||
}
|
||||
|
||||
// trainSample holds a tokenised training example.
|
||||
type trainSample struct {
|
||||
Tokens []int32 // Full token sequence
|
||||
Mask []int32 // 1 for assistant tokens, 0 for prompt tokens
|
||||
}
|
||||
|
||||
func runTrain(cmd *cli.Command, args []string) error {
|
||||
start := time.Now()
|
||||
|
||||
// --- Load model ---
|
||||
slog.Info("loading model", "path", trainModelPath)
|
||||
m, err := model.LoadModel(trainModelPath)
|
||||
if err != nil {
|
||||
return fmt.Errorf("load model: %w", err)
|
||||
}
|
||||
|
||||
mlx.SetCacheLimit(uint64(trainMemoryLimit) * 1024 * 1024 * 1024)
|
||||
mlx.SetMemoryLimit(uint64(trainMemoryLimit) * 1024 * 1024 * 1024)
|
||||
|
||||
tok := m.Tokenizer()
|
||||
slog.Info("model loaded",
|
||||
"type", m.ModelType(),
|
||||
"layers", m.NumLayers(),
|
||||
)
|
||||
|
||||
// --- Apply LoRA ---
|
||||
targets := strings.Split(trainTargets, ",")
|
||||
cfg := mlx.LoRAConfig{
|
||||
Rank: trainRank,
|
||||
Alpha: float32(trainAlpha),
|
||||
TargetKeys: targets,
|
||||
}
|
||||
|
||||
adapter := m.ApplyLoRA(cfg)
|
||||
slog.Info("LoRA applied",
|
||||
"rank", cfg.Rank,
|
||||
"alpha", cfg.Alpha,
|
||||
"targets", targets,
|
||||
"trainable_params", adapter.TotalParams(),
|
||||
"layers", len(adapter.Layers),
|
||||
)
|
||||
|
||||
// --- Load training data ---
|
||||
samples, err := loadTrainingSamples(trainData, tok, m.ModelType(), trainMaxSeqLen)
|
||||
if err != nil {
|
||||
return fmt.Errorf("load training data: %w", err)
|
||||
}
|
||||
slog.Info("training data loaded", "samples", len(samples))
|
||||
|
||||
if len(samples) == 0 {
|
||||
return fmt.Errorf("no training samples loaded")
|
||||
}
|
||||
|
||||
// --- Training loop ---
|
||||
params := adapter.AllTrainableParams()
|
||||
opt := mlx.NewAdamW(trainLR)
|
||||
|
||||
// Build argument indices for ValueAndGrad (all params)
|
||||
argIndices := make([]int, len(params))
|
||||
for i := range argIndices {
|
||||
argIndices[i] = i
|
||||
}
|
||||
|
||||
var totalLoss float64
|
||||
var totalSteps int
|
||||
|
||||
for epoch := 0; epoch < trainEpochs; epoch++ {
|
||||
var epochLoss float64
|
||||
epochStart := time.Now()
|
||||
|
||||
for si, sample := range samples {
|
||||
// Build token tensors: input = tokens[:-1], target = tokens[1:]
|
||||
seqLen := len(sample.Tokens)
|
||||
if seqLen < 2 {
|
||||
continue
|
||||
}
|
||||
|
||||
inputTokens := sample.Tokens[:seqLen-1]
|
||||
targetTokens := sample.Tokens[1:]
|
||||
maskTokens := sample.Mask[1:] // mask aligned with targets
|
||||
|
||||
inputArr := mlx.FromValues(inputTokens, 1, len(inputTokens))
|
||||
targetArr := mlx.FromValues(targetTokens, 1, len(targetTokens))
|
||||
|
||||
// Build float32 mask
|
||||
maskF32 := make([]float32, len(maskTokens))
|
||||
for i, m := range maskTokens {
|
||||
maskF32[i] = float32(m)
|
||||
}
|
||||
maskArr := mlx.FromValues(maskF32, 1, len(maskF32))
|
||||
mlx.Materialize(inputArr, targetArr, maskArr)
|
||||
|
||||
// Loss function closure — takes LoRA params as inputs
|
||||
lossFn := func(inputs []*mlx.Array) []*mlx.Array {
|
||||
// Set LoRA params from inputs
|
||||
adapter.SetAllParams(inputs)
|
||||
|
||||
// Forward pass with fresh caches (no KV caching for training)
|
||||
caches := m.NewCache()
|
||||
logits := m.Forward(inputArr, caches)
|
||||
|
||||
// Cast targets to int32 for take_along_axis
|
||||
loss := mlx.MaskedCrossEntropyLoss(logits, targetArr, maskArr)
|
||||
return []*mlx.Array{loss}
|
||||
}
|
||||
|
||||
// Compute value and gradients
|
||||
grad := mlx.ValueAndGrad(lossFn, argIndices...)
|
||||
values, grads, err := grad.Apply(params...)
|
||||
grad.Free()
|
||||
if err != nil {
|
||||
return fmt.Errorf("epoch %d sample %d: gradient failed: %w", epoch, si, err)
|
||||
}
|
||||
|
||||
mlx.Materialize(append(values, grads...)...)
|
||||
|
||||
loss := values[0].Float()
|
||||
epochLoss += loss
|
||||
totalSteps++
|
||||
|
||||
// Update parameters
|
||||
params = opt.Step(params, grads)
|
||||
adapter.SetAllParams(params)
|
||||
mlx.Materialize(params...)
|
||||
|
||||
// Periodic cleanup
|
||||
if totalSteps%4 == 0 {
|
||||
runtime.GC()
|
||||
mlx.ClearCache()
|
||||
}
|
||||
|
||||
// Log progress
|
||||
if (si+1)%10 == 0 || si == len(samples)-1 {
|
||||
avgLoss := epochLoss / float64(si+1)
|
||||
slog.Info("training",
|
||||
"epoch", epoch+1,
|
||||
"step", fmt.Sprintf("%d/%d", si+1, len(samples)),
|
||||
"loss", fmt.Sprintf("%.4f", loss),
|
||||
"avg_loss", fmt.Sprintf("%.4f", avgLoss),
|
||||
)
|
||||
}
|
||||
}
|
||||
|
||||
totalLoss = epochLoss / float64(len(samples))
|
||||
elapsed := time.Since(epochStart)
|
||||
slog.Info("epoch complete",
|
||||
"epoch", epoch+1,
|
||||
"avg_loss", fmt.Sprintf("%.4f", totalLoss),
|
||||
"duration", elapsed.Round(time.Second),
|
||||
"samples_per_sec", fmt.Sprintf("%.1f", float64(len(samples))/elapsed.Seconds()),
|
||||
)
|
||||
}
|
||||
|
||||
// --- Save adapter ---
|
||||
if err := adapter.Save(trainOutput); err != nil {
|
||||
return fmt.Errorf("save adapter: %w", err)
|
||||
}
|
||||
|
||||
slog.Info("training complete",
|
||||
"output", trainOutput,
|
||||
"total_steps", totalSteps,
|
||||
"final_loss", fmt.Sprintf("%.4f", totalLoss),
|
||||
"duration", time.Since(start).Round(time.Second),
|
||||
"trainable_params", adapter.TotalParams(),
|
||||
)
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
// loadTrainingSamples reads JSONL and tokenises each conversation.
|
||||
func loadTrainingSamples(path string, tok *tokenizer.Tokenizer, modelType string, maxSeqLen int) ([]trainSample, error) {
|
||||
f, err := os.Open(path)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
defer f.Close()
|
||||
|
||||
var samples []trainSample
|
||||
scanner := bufio.NewScanner(f)
|
||||
scanner.Buffer(make([]byte, 1<<20), 1<<20) // 1MB line buffer
|
||||
|
||||
lineNum := 0
|
||||
for scanner.Scan() {
|
||||
lineNum++
|
||||
line := strings.TrimSpace(scanner.Text())
|
||||
if line == "" || strings.HasPrefix(line, "#") {
|
||||
continue
|
||||
}
|
||||
|
||||
var entry struct {
|
||||
Messages []ml.Message `json:"messages"`
|
||||
}
|
||||
if err := json.Unmarshal([]byte(line), &entry); err != nil {
|
||||
slog.Warn("skipping invalid line", "line", lineNum, "error", err)
|
||||
continue
|
||||
}
|
||||
|
||||
if len(entry.Messages) == 0 {
|
||||
continue
|
||||
}
|
||||
|
||||
sample := tokeniseConversation(entry.Messages, tok, modelType, maxSeqLen)
|
||||
if sample != nil {
|
||||
samples = append(samples, *sample)
|
||||
}
|
||||
}
|
||||
|
||||
return samples, scanner.Err()
|
||||
}
|
||||
|
||||
// tokeniseConversation formats and tokenises a conversation, creating a mask
|
||||
// that is 1 for assistant tokens and 0 for system/user tokens.
|
||||
func tokeniseConversation(messages []ml.Message, tok *tokenizer.Tokenizer, modelType string, maxSeqLen int) *trainSample {
|
||||
// Strategy: tokenise the full conversation, then tokenise just the prefix
|
||||
// (non-assistant parts) to determine the mask boundary.
|
||||
|
||||
// Build full conversation text
|
||||
fullText := formatConversation(messages, modelType, true)
|
||||
fullTokens := tok.Encode(fullText)
|
||||
|
||||
if len(fullTokens) < 2 {
|
||||
return nil
|
||||
}
|
||||
|
||||
// Truncate to max sequence length
|
||||
if len(fullTokens) > maxSeqLen {
|
||||
fullTokens = fullTokens[:maxSeqLen]
|
||||
}
|
||||
|
||||
// Build mask: tokenise prefix (everything up to last assistant response)
|
||||
// then mark remaining tokens as assistant (mask=1)
|
||||
prefixText := formatConversation(messages, modelType, false)
|
||||
prefixTokens := tok.Encode(prefixText)
|
||||
|
||||
mask := make([]int32, len(fullTokens))
|
||||
for i := range mask {
|
||||
if i >= len(prefixTokens) {
|
||||
mask[i] = 1 // assistant token
|
||||
}
|
||||
}
|
||||
|
||||
return &trainSample{
|
||||
Tokens: fullTokens,
|
||||
Mask: mask,
|
||||
}
|
||||
}
|
||||
|
||||
// formatConversation formats messages using the model's chat template.
|
||||
// If includeAssistant is false, only formats up to the last assistant turn header.
|
||||
func formatConversation(messages []ml.Message, modelType string, includeAssistant bool) string {
|
||||
switch modelType {
|
||||
case "qwen3":
|
||||
return formatQwen3Train(messages, includeAssistant)
|
||||
default:
|
||||
return formatGemmaTrain(messages, includeAssistant)
|
||||
}
|
||||
}
|
||||
|
||||
func formatQwen3Train(messages []ml.Message, includeAssistant bool) string {
|
||||
var sb strings.Builder
|
||||
for _, msg := range messages {
|
||||
if msg.Role == "assistant" && !includeAssistant {
|
||||
// Write the assistant header but not the content
|
||||
sb.WriteString("<|im_start|>assistant\n")
|
||||
return sb.String()
|
||||
}
|
||||
switch msg.Role {
|
||||
case "system":
|
||||
sb.WriteString(fmt.Sprintf("<|im_start|>system\n%s<|im_end|>\n", msg.Content))
|
||||
case "user":
|
||||
sb.WriteString(fmt.Sprintf("<|im_start|>user\n%s<|im_end|>\n", msg.Content))
|
||||
case "assistant":
|
||||
sb.WriteString(fmt.Sprintf("<|im_start|>assistant\n%s<|im_end|>\n", msg.Content))
|
||||
}
|
||||
}
|
||||
return sb.String()
|
||||
}
|
||||
|
||||
func formatGemmaTrain(messages []ml.Message, includeAssistant bool) string {
|
||||
var sb strings.Builder
|
||||
for _, msg := range messages {
|
||||
if msg.Role == "assistant" && !includeAssistant {
|
||||
sb.WriteString("<start_of_turn>model\n")
|
||||
return sb.String()
|
||||
}
|
||||
switch msg.Role {
|
||||
case "user":
|
||||
sb.WriteString(fmt.Sprintf("<start_of_turn>user\n%s<end_of_turn>\n", msg.Content))
|
||||
case "assistant":
|
||||
sb.WriteString(fmt.Sprintf("<start_of_turn>model\n%s<end_of_turn>\n", msg.Content))
|
||||
case "system":
|
||||
sb.WriteString(fmt.Sprintf("<start_of_turn>user\n[System: %s]<end_of_turn>\n", msg.Content))
|
||||
}
|
||||
}
|
||||
return sb.String()
|
||||
}
|
||||
|
|
@ -1,7 +0,0 @@
|
|||
//go:build darwin && arm64
|
||||
|
||||
package ml
|
||||
|
||||
func init() {
|
||||
mlCmd.AddCommand(trainCmd)
|
||||
}
|
||||
|
|
@ -1,80 +0,0 @@
|
|||
package ml
|
||||
|
||||
import (
|
||||
"time"
|
||||
|
||||
"forge.lthn.ai/core/go/pkg/cli"
|
||||
"forge.lthn.ai/core/go-ml"
|
||||
)
|
||||
|
||||
var (
|
||||
workerAPIBase string
|
||||
workerID string
|
||||
workerName string
|
||||
workerAPIKey string
|
||||
workerGPU string
|
||||
workerVRAM int
|
||||
workerLangs string
|
||||
workerModels string
|
||||
workerInferURL string
|
||||
workerTaskType string
|
||||
workerBatchSize int
|
||||
workerPoll time.Duration
|
||||
workerOneShot bool
|
||||
workerDryRun bool
|
||||
)
|
||||
|
||||
var workerCmd = &cli.Command{
|
||||
Use: "worker",
|
||||
Short: "Run a distributed worker node",
|
||||
Long: "Polls the LEM API for tasks, runs local inference, and submits results.",
|
||||
RunE: runWorker,
|
||||
}
|
||||
|
||||
func init() {
|
||||
workerCmd.Flags().StringVar(&workerAPIBase, "api", ml.EnvOr("LEM_API", "https://infer.lthn.ai"), "LEM API base URL")
|
||||
workerCmd.Flags().StringVar(&workerID, "id", ml.EnvOr("LEM_WORKER_ID", ml.MachineID()), "Worker ID")
|
||||
workerCmd.Flags().StringVar(&workerName, "name", ml.EnvOr("LEM_WORKER_NAME", ml.Hostname()), "Worker display name")
|
||||
workerCmd.Flags().StringVar(&workerAPIKey, "key", ml.EnvOr("LEM_API_KEY", ""), "API key")
|
||||
workerCmd.Flags().StringVar(&workerGPU, "gpu", ml.EnvOr("LEM_GPU", ""), "GPU type")
|
||||
workerCmd.Flags().IntVar(&workerVRAM, "vram", ml.IntEnvOr("LEM_VRAM_GB", 0), "GPU VRAM in GB")
|
||||
workerCmd.Flags().StringVar(&workerLangs, "languages", ml.EnvOr("LEM_LANGUAGES", ""), "Comma-separated language codes")
|
||||
workerCmd.Flags().StringVar(&workerModels, "models", ml.EnvOr("LEM_MODELS", ""), "Comma-separated model names")
|
||||
workerCmd.Flags().StringVar(&workerInferURL, "infer", ml.EnvOr("LEM_INFER_URL", "http://localhost:8090"), "Local inference endpoint")
|
||||
workerCmd.Flags().StringVar(&workerTaskType, "type", "", "Filter by task type")
|
||||
workerCmd.Flags().IntVar(&workerBatchSize, "batch", 5, "Tasks per poll")
|
||||
workerCmd.Flags().DurationVar(&workerPoll, "poll", 30*time.Second, "Poll interval")
|
||||
workerCmd.Flags().BoolVar(&workerOneShot, "one-shot", false, "Process one batch and exit")
|
||||
workerCmd.Flags().BoolVar(&workerDryRun, "dry-run", false, "Fetch tasks but don't run inference")
|
||||
}
|
||||
|
||||
func runWorker(cmd *cli.Command, args []string) error {
|
||||
if workerAPIKey == "" {
|
||||
workerAPIKey = ml.ReadKeyFile()
|
||||
}
|
||||
|
||||
cfg := &ml.WorkerConfig{
|
||||
APIBase: workerAPIBase,
|
||||
WorkerID: workerID,
|
||||
Name: workerName,
|
||||
APIKey: workerAPIKey,
|
||||
GPUType: workerGPU,
|
||||
VRAMGb: workerVRAM,
|
||||
InferURL: workerInferURL,
|
||||
TaskType: workerTaskType,
|
||||
BatchSize: workerBatchSize,
|
||||
PollInterval: workerPoll,
|
||||
OneShot: workerOneShot,
|
||||
DryRun: workerDryRun,
|
||||
}
|
||||
|
||||
if workerLangs != "" {
|
||||
cfg.Languages = ml.SplitComma(workerLangs)
|
||||
}
|
||||
if workerModels != "" {
|
||||
cfg.Models = ml.SplitComma(workerModels)
|
||||
}
|
||||
|
||||
ml.RunWorkerLoop(cfg)
|
||||
return nil
|
||||
}
|
||||
|
|
@ -1,9 +0,0 @@
|
|||
//go:build !(darwin && arm64)
|
||||
|
||||
package ml
|
||||
|
||||
import "forge.lthn.ai/core/go-ml"
|
||||
|
||||
func createServeBackend() (ml.Backend, error) {
|
||||
return ml.NewHTTPBackend(apiURL, modelName), nil
|
||||
}
|
||||
|
|
@ -1,22 +0,0 @@
|
|||
//go:build darwin && arm64
|
||||
|
||||
package ml
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
"log/slog"
|
||||
|
||||
"forge.lthn.ai/core/go-ml"
|
||||
)
|
||||
|
||||
func createServeBackend() (ml.Backend, error) {
|
||||
if serveModelPath != "" {
|
||||
slog.Info("ml serve: loading native MLX backend", "path", serveModelPath)
|
||||
b, err := ml.NewMLXBackend(serveModelPath)
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("mlx backend: %w", err)
|
||||
}
|
||||
return b, nil
|
||||
}
|
||||
return ml.NewHTTPBackend(apiURL, modelName), nil
|
||||
}
|
||||
|
|
@ -74,7 +74,7 @@ func runPkgSearch(org, pattern, repoType string, limit int, refresh bool) error
|
|||
cacheDir = filepath.Join(filepath.Dir(regPath), ".core", "cache")
|
||||
}
|
||||
|
||||
c, err := cache.New(cacheDir, 0)
|
||||
c, err := cache.New(coreio.Local, cacheDir, 0)
|
||||
if err != nil {
|
||||
c = nil
|
||||
}
|
||||
|
|
|
|||
70
go.mod
70
go.mod
|
|
@ -3,20 +3,19 @@ module forge.lthn.ai/core/cli
|
|||
go 1.25.5
|
||||
|
||||
require (
|
||||
forge.lthn.ai/core/go v0.0.0
|
||||
forge.lthn.ai/core/go-agentic v0.0.0
|
||||
forge.lthn.ai/core/go-ai v0.0.0
|
||||
forge.lthn.ai/core/go-api v0.0.0
|
||||
forge.lthn.ai/core/go-crypt v0.0.0
|
||||
forge.lthn.ai/core/go-devops v0.0.0
|
||||
forge.lthn.ai/core/go-inference v0.0.0
|
||||
forge.lthn.ai/core/go-ml v0.0.0
|
||||
forge.lthn.ai/core/go-mlx v0.0.0
|
||||
forge.lthn.ai/core/go-netops v0.0.0
|
||||
forge.lthn.ai/core/go-rag v0.0.0
|
||||
forge.lthn.ai/core/go-scm v0.0.0
|
||||
forge.lthn.ai/core/go-store v0.0.0
|
||||
forge.lthn.ai/core/go-webview v0.0.0
|
||||
forge.lthn.ai/core/go v0.0.0-20260221191103-d091fa62023f
|
||||
forge.lthn.ai/core/go-agentic v0.0.0-20260221191948-ad0cf5c932a3
|
||||
forge.lthn.ai/core/go-ai v0.0.0-20260221192232-bc9597c19153
|
||||
forge.lthn.ai/core/go-api v0.0.0-20260221015744-0d3479839dc5
|
||||
forge.lthn.ai/core/go-crypt v0.0.0-20260221190941-9585da8e6649
|
||||
forge.lthn.ai/core/go-devops v0.0.0-20260221192100-4b5739fbd7ac
|
||||
forge.lthn.ai/core/go-inference v0.0.0-20260220151119-1576f744d105 // indirect
|
||||
forge.lthn.ai/core/go-ml v0.0.0-20260221191458-812c926dac42
|
||||
forge.lthn.ai/core/go-mlx v0.0.0-20260221191404-2292557fd65f // indirect
|
||||
forge.lthn.ai/core/go-netops v0.0.0-20260221192152-565b16a848ae
|
||||
forge.lthn.ai/core/go-rag v0.0.0-20260221191926-4c741992dc78
|
||||
forge.lthn.ai/core/go-scm v0.0.0-20260221192735-5bfafcd6fc87
|
||||
forge.lthn.ai/core/go-store v0.1.1-0.20260220151120-0284110ccadf // indirect
|
||||
)
|
||||
|
||||
require (
|
||||
|
|
@ -49,6 +48,7 @@ require (
|
|||
github.com/agnivade/levenshtein v1.2.1 // indirect
|
||||
github.com/andybalholm/brotli v1.2.0 // indirect
|
||||
github.com/apache/arrow-go/v18 v18.5.1 // indirect
|
||||
github.com/aymanbagabas/go-osc52/v2 v2.0.1 // indirect
|
||||
github.com/bahlo/generic-list-go v0.2.0 // indirect
|
||||
github.com/bmatcuk/doublestar/v4 v4.9.1 // indirect
|
||||
github.com/brianvoe/gofakeit/v6 v6.28.0 // indirect
|
||||
|
|
@ -59,13 +59,22 @@ require (
|
|||
github.com/casbin/casbin/v2 v2.135.0 // indirect
|
||||
github.com/casbin/govaluate v1.10.0 // indirect
|
||||
github.com/cespare/xxhash/v2 v2.3.0 // indirect
|
||||
github.com/charmbracelet/bubbletea v1.3.10 // indirect
|
||||
github.com/charmbracelet/colorprofile v0.2.3-0.20250311203215-f60798e515dc // indirect
|
||||
github.com/charmbracelet/lipgloss v1.1.0 // indirect
|
||||
github.com/charmbracelet/x/ansi v0.10.1 // indirect
|
||||
github.com/charmbracelet/x/cellbuf v0.0.13-0.20250311204145-2c3ea96c31dd // indirect
|
||||
github.com/charmbracelet/x/term v0.2.1 // indirect
|
||||
github.com/cloudflare/circl v1.6.3 // indirect
|
||||
github.com/cloudwego/base64x v0.1.6 // indirect
|
||||
github.com/coreos/go-oidc/v3 v3.17.0 // indirect
|
||||
github.com/cyphar/filepath-securejoin v0.6.1 // indirect
|
||||
github.com/davecgh/go-spew v1.1.2-0.20180830191138-d8f796af33cc // indirect
|
||||
github.com/davidmz/go-pageant v1.0.2 // indirect
|
||||
github.com/dgryski/go-rendezvous v0.0.0-20200823014737-9f7001d12a5f // indirect
|
||||
github.com/dustin/go-humanize v1.0.1 // indirect
|
||||
github.com/emirpasic/gods v1.18.1 // indirect
|
||||
github.com/erikgeiser/coninput v0.0.0-20211004153227-1c3628e74d0f // indirect
|
||||
github.com/fsnotify/fsnotify v1.9.0 // indirect
|
||||
github.com/gabriel-vasile/mimetype v1.4.13 // indirect
|
||||
github.com/getkin/kin-openapi v0.133.0 // indirect
|
||||
|
|
@ -123,13 +132,20 @@ require (
|
|||
github.com/leaanthony/debme v1.2.1 // indirect
|
||||
github.com/leaanthony/gosod v1.0.4 // indirect
|
||||
github.com/leodido/go-urn v1.4.0 // indirect
|
||||
github.com/lucasb-eyer/go-colorful v1.2.0 // indirect
|
||||
github.com/mailru/easyjson v0.9.1 // indirect
|
||||
github.com/marcboeker/go-duckdb v1.8.5 // indirect
|
||||
github.com/mattn/go-isatty v0.0.20 // indirect
|
||||
github.com/mattn/go-localereader v0.0.1 // indirect
|
||||
github.com/mattn/go-runewidth v0.0.16 // indirect
|
||||
github.com/modelcontextprotocol/go-sdk v1.3.0 // indirect
|
||||
github.com/modern-go/concurrent v0.0.0-20180306012644-bacd9c7ef1dd // indirect
|
||||
github.com/modern-go/reflect2 v1.0.2 // indirect
|
||||
github.com/mohae/deepcopy v0.0.0-20170929034955-c48cc78d4826 // indirect
|
||||
github.com/muesli/ansi v0.0.0-20230316100256-276c6243b2f6 // indirect
|
||||
github.com/muesli/cancelreader v0.2.2 // indirect
|
||||
github.com/muesli/termenv v0.16.0 // indirect
|
||||
github.com/ncruces/go-strftime v1.0.0 // indirect
|
||||
github.com/oasdiff/oasdiff v1.11.10 // indirect
|
||||
github.com/oasdiff/yaml v0.0.0-20250309154309-f31be36b4037 // indirect
|
||||
github.com/oasdiff/yaml3 v0.0.0-20250309153720-d2182401db90 // indirect
|
||||
|
|
@ -145,6 +161,9 @@ require (
|
|||
github.com/qdrant/go-client v1.16.2 // indirect
|
||||
github.com/quic-go/qpack v0.6.0 // indirect
|
||||
github.com/quic-go/quic-go v0.59.0 // indirect
|
||||
github.com/redis/go-redis/v9 v9.18.0 // indirect
|
||||
github.com/remyoudompheng/bigfft v0.0.0-20230129092748-24d4a6f8daec // indirect
|
||||
github.com/rivo/uniseg v0.4.7 // indirect
|
||||
github.com/sagikazarmark/locafero v0.12.0 // indirect
|
||||
github.com/sergi/go-diff v1.4.0 // indirect
|
||||
github.com/sirupsen/logrus v1.9.3 // indirect
|
||||
|
|
@ -172,6 +191,7 @@ require (
|
|||
github.com/wk8/go-ordered-map/v2 v2.1.8 // indirect
|
||||
github.com/woodsbury/decimal128 v1.4.0 // indirect
|
||||
github.com/xanzy/ssh-agent v0.3.3 // indirect
|
||||
github.com/xo/terminfo v0.0.0-20220910002029-abceb7e1c41e // indirect
|
||||
github.com/yargevad/filepathx v1.0.0 // indirect
|
||||
github.com/yosida95/uritemplate/v3 v3.0.2 // indirect
|
||||
github.com/zeebo/xxh3 v1.1.0 // indirect
|
||||
|
|
@ -181,6 +201,7 @@ require (
|
|||
go.opentelemetry.io/otel/metric v1.40.0 // indirect
|
||||
go.opentelemetry.io/otel/sdk v1.40.0 // indirect
|
||||
go.opentelemetry.io/otel/trace v1.40.0 // indirect
|
||||
go.uber.org/atomic v1.11.0 // indirect
|
||||
go.yaml.in/yaml/v3 v3.0.4 // indirect
|
||||
golang.org/x/arch v0.23.0 // indirect
|
||||
golang.org/x/exp v0.0.0-20260212183809-81e46e3db34a // indirect
|
||||
|
|
@ -194,21 +215,8 @@ require (
|
|||
google.golang.org/grpc v1.79.1 // indirect
|
||||
google.golang.org/protobuf v1.36.11 // indirect
|
||||
gopkg.in/warnings.v0 v0.1.2 // indirect
|
||||
)
|
||||
|
||||
replace (
|
||||
forge.lthn.ai/core/go => ../go
|
||||
forge.lthn.ai/core/go-agentic => ../go-agentic
|
||||
forge.lthn.ai/core/go-ai => ../go-ai
|
||||
forge.lthn.ai/core/go-api => ../../go-api
|
||||
forge.lthn.ai/core/go-crypt => ../go-crypt
|
||||
forge.lthn.ai/core/go-devops => ../go-devops
|
||||
forge.lthn.ai/core/go-inference => ../go-inference
|
||||
forge.lthn.ai/core/go-ml => ../go-ml
|
||||
forge.lthn.ai/core/go-mlx => ../go-mlx
|
||||
forge.lthn.ai/core/go-netops => ../go-netops
|
||||
forge.lthn.ai/core/go-rag => ../go-rag
|
||||
forge.lthn.ai/core/go-scm => ../go-scm
|
||||
forge.lthn.ai/core/go-store => ../go-store
|
||||
forge.lthn.ai/core/go-webview => ../go-webview
|
||||
modernc.org/libc v1.67.7 // indirect
|
||||
modernc.org/mathutil v1.7.1 // indirect
|
||||
modernc.org/memory v1.11.0 // indirect
|
||||
modernc.org/sqlite v1.46.1 // indirect
|
||||
)
|
||||
|
|
|
|||
106
go.sum
106
go.sum
|
|
@ -9,6 +9,32 @@ codeberg.org/mvdkleijn/forgejo-sdk/forgejo/v2 v2.2.0 h1:HTCWpzyWQOHDWt3LzI6/d2jv
|
|||
codeberg.org/mvdkleijn/forgejo-sdk/forgejo/v2 v2.2.0/go.mod h1:ZglEEDj+qkxYUb+SQIeqGtFxQrbaMYqIOgahNKb7uxs=
|
||||
dario.cat/mergo v1.0.2 h1:85+piFYR1tMbRrLcDwR18y4UKJ3aH1Tbzi24VRW1TK8=
|
||||
dario.cat/mergo v1.0.2/go.mod h1:E/hbnu0NxMFBjpMIE34DRGLWqDy0g5FuKDhCb31ngxA=
|
||||
forge.lthn.ai/core/go v0.0.0-20260221191103-d091fa62023f h1:CcSh/FFY93K5m0vADHLxwxKn2pTIM8HzYX1eGa4WZf4=
|
||||
forge.lthn.ai/core/go v0.0.0-20260221191103-d091fa62023f/go.mod h1:WCPJVEZm/6mTcJimHV0uX8ZhnKEF3dN0rQp13ByaSPg=
|
||||
forge.lthn.ai/core/go-agentic v0.0.0-20260221191948-ad0cf5c932a3 h1:6H3hjqHY0loJJe9iCofFzw6x5JDIbi6JNSL0oW2TKFE=
|
||||
forge.lthn.ai/core/go-agentic v0.0.0-20260221191948-ad0cf5c932a3/go.mod h1:2WCSLupRyAeSpmFWM5+OPG0/wa4KMQCO8gA0hM9cUq8=
|
||||
forge.lthn.ai/core/go-ai v0.0.0-20260221192232-bc9597c19153 h1:11XJI5RPm38l664KC9acRZz2gA+RLpmxCdg5JorimoM=
|
||||
forge.lthn.ai/core/go-ai v0.0.0-20260221192232-bc9597c19153/go.mod h1:GdcXgm3jwvh4AVxrlCa0Zbw4vASeNV8JSAXfftCJVRc=
|
||||
forge.lthn.ai/core/go-api v0.0.0-20260221015744-0d3479839dc5 h1:60reee4fmT4USZqEd6dyCTXsTj47eOOEc6Pp0HHJbd0=
|
||||
forge.lthn.ai/core/go-api v0.0.0-20260221015744-0d3479839dc5/go.mod h1:f0hPLX+GZT/ME8Tb7c8wVDlfLqnpOKRwf2k5lpJq87g=
|
||||
forge.lthn.ai/core/go-crypt v0.0.0-20260221190941-9585da8e6649 h1:Rs3bfSU8u1wkzYeL21asL7IcJIBVwOhtRidcEVj/PkA=
|
||||
forge.lthn.ai/core/go-crypt v0.0.0-20260221190941-9585da8e6649/go.mod h1:RS+sz5lChrbc1AEmzzOULsTiMv3bwcwVtwbZi+c/Yjk=
|
||||
forge.lthn.ai/core/go-devops v0.0.0-20260221192100-4b5739fbd7ac h1:agYaMGTUw0n/vPrv0i8mTxbKt5NItDcsXhCKQHoivy8=
|
||||
forge.lthn.ai/core/go-devops v0.0.0-20260221192100-4b5739fbd7ac/go.mod h1:FSp7+jfV3QXyPzL1C8XZm6W57vjT8cbWly8vf/bPJEg=
|
||||
forge.lthn.ai/core/go-inference v0.0.0-20260220151119-1576f744d105 h1:CVUVxp1BfUI8wmlEUW0Nay8w4hADR54nqBmeF+KK2Ac=
|
||||
forge.lthn.ai/core/go-inference v0.0.0-20260220151119-1576f744d105/go.mod h1:hmLtynfw1yo0ByuX3pslLZMgCdqJH2r+2+wGJDhmmi0=
|
||||
forge.lthn.ai/core/go-ml v0.0.0-20260221191458-812c926dac42 h1:rxhnHgWVGnQ93/mhUyLxIw/Q2l80njiGfNvv0kKISb0=
|
||||
forge.lthn.ai/core/go-ml v0.0.0-20260221191458-812c926dac42/go.mod h1:lmhzv04VCP41ym7Wuhck+T1HeC5PoLtfOqXe8fW26Hc=
|
||||
forge.lthn.ai/core/go-mlx v0.0.0-20260221191404-2292557fd65f h1:dlb6hFFhxfnJvD1ZYoQVsxD9NM4CV+sXkjHa6kBGzeE=
|
||||
forge.lthn.ai/core/go-mlx v0.0.0-20260221191404-2292557fd65f/go.mod h1:QHspfOk9MgbuG6Wb4m+RzQyCMibtoQNZw+hUs4yclOA=
|
||||
forge.lthn.ai/core/go-netops v0.0.0-20260221192152-565b16a848ae h1:1WPKohhwPCEPnKZPx80AqJS306QkKemGU0W4TKUgvqA=
|
||||
forge.lthn.ai/core/go-netops v0.0.0-20260221192152-565b16a848ae/go.mod h1:YljW66VyXrWX5/kfmDlFaeFRewXA2/ss9F6shSTr5Rs=
|
||||
forge.lthn.ai/core/go-rag v0.0.0-20260221191926-4c741992dc78 h1:M7ftoQ3AB87W/h4cELK+dxetzLoQi68KwnK2JhkSA8k=
|
||||
forge.lthn.ai/core/go-rag v0.0.0-20260221191926-4c741992dc78/go.mod h1:f0WQYSeg3Oc7gCHTLUL0aCIzK1fS2mgMBDnBzjKgOzQ=
|
||||
forge.lthn.ai/core/go-scm v0.0.0-20260221192735-5bfafcd6fc87 h1:1rkrRCVOq4hjKGkXxPmyBDVjxs82VV84ED/WnrYjptE=
|
||||
forge.lthn.ai/core/go-scm v0.0.0-20260221192735-5bfafcd6fc87/go.mod h1:lK2RacccYr9Uvntbhx9sPupXlI2IvNufeil4mXVpdEM=
|
||||
forge.lthn.ai/core/go-store v0.1.1-0.20260220151120-0284110ccadf h1:EDKI+OM0M+l4+VclG5XuUDoYAM8yu8uleFYReeEYwHY=
|
||||
forge.lthn.ai/core/go-store v0.1.1-0.20260220151120-0284110ccadf/go.mod h1:FpUlLEX/ebyoxpk96F7ktr0vYvmFtC5Rpi9fi88UVqw=
|
||||
github.com/42wim/httpsig v1.2.3 h1:xb0YyWhkYj57SPtfSttIobJUPJZB9as1nsfo7KWVcEs=
|
||||
github.com/42wim/httpsig v1.2.3/go.mod h1:nZq9OlYKDrUBhptd77IHx4/sZZD+IxTBADvAPI9G/EM=
|
||||
github.com/99designs/gqlgen v0.17.87 h1:pSnCIMhBQezAE8bc1GNmfdLXFmnWtWl1GRDFEE/nHP8=
|
||||
|
|
@ -54,6 +80,8 @@ github.com/arbovm/levenshtein v0.0.0-20160628152529-48b4e1c0c4d0 h1:jfIu9sQUG6Ig
|
|||
github.com/arbovm/levenshtein v0.0.0-20160628152529-48b4e1c0c4d0/go.mod h1:t2tdKJDJF9BV14lnkjHmOQgcvEKgtqs5a1N3LNdJhGE=
|
||||
github.com/armon/go-socks5 v0.0.0-20160902184237-e75332964ef5 h1:0CwZNZbxp69SHPdPJAN/hZIm0C4OItdklCFmMRWYpio=
|
||||
github.com/armon/go-socks5 v0.0.0-20160902184237-e75332964ef5/go.mod h1:wHh0iHkYZB8zMSxRWpUBQtwG5a7fFgvEO+odwuTv2gs=
|
||||
github.com/aymanbagabas/go-osc52/v2 v2.0.1 h1:HwpRHbFMcZLEVr42D4p7XBqjyuxQH5SMiErDT4WkJ2k=
|
||||
github.com/aymanbagabas/go-osc52/v2 v2.0.1/go.mod h1:uYgXzlJ7ZpABp8OJ+exZzJJhRNQ2ASbcXHWsFqH8hp8=
|
||||
github.com/bahlo/generic-list-go v0.2.0 h1:5sz/EEAK+ls5wF+NeqDpk5+iNdMDXrh3z3nPnH1Wvgk=
|
||||
github.com/bahlo/generic-list-go v0.2.0/go.mod h1:2KvAjgMlE5NNynlg/5iLrrCCZ2+5xWbdbCW3pNTGyYg=
|
||||
github.com/bmatcuk/doublestar/v4 v4.6.1/go.mod h1:xBQ8jztBU6kakFMg+8WGxn0c6z1fTSPVIjEY1Wr7jzc=
|
||||
|
|
@ -61,6 +89,10 @@ github.com/bmatcuk/doublestar/v4 v4.9.1 h1:X8jg9rRZmJd4yRy7ZeNDRnM+T3ZfHv15JiBJ/
|
|||
github.com/bmatcuk/doublestar/v4 v4.9.1/go.mod h1:xBQ8jztBU6kakFMg+8WGxn0c6z1fTSPVIjEY1Wr7jzc=
|
||||
github.com/brianvoe/gofakeit/v6 v6.28.0 h1:Xib46XXuQfmlLS2EXRuJpqcw8St6qSZz75OUo0tgAW4=
|
||||
github.com/brianvoe/gofakeit/v6 v6.28.0/go.mod h1:Xj58BMSnFqcn/fAQeSK+/PLtC5kSb7FJIq4JyGa8vEs=
|
||||
github.com/bsm/ginkgo/v2 v2.12.0 h1:Ny8MWAHyOepLGlLKYmXG4IEkioBysk6GpaRTLC8zwWs=
|
||||
github.com/bsm/ginkgo/v2 v2.12.0/go.mod h1:SwYbGRRDovPVboqFv0tPTcG1sN61LM1Z4ARdbAV9g4c=
|
||||
github.com/bsm/gomega v1.27.10 h1:yeMWxP2pV2fG3FgAODIY8EiRE3dy0aeFYt4l7wh6yKA=
|
||||
github.com/bsm/gomega v1.27.10/go.mod h1:JyEr/xRbxbtgWNi8tIEVPUYZ5Dzef52k01W3YH0H+O0=
|
||||
github.com/buger/jsonparser v1.1.1 h1:2PnMjfWD7wBILjqQbt530v576A/cAbQvEW9gGIpYMUs=
|
||||
github.com/buger/jsonparser v1.1.1/go.mod h1:6RYKKt7H4d4+iWqouImQ9R2FZql3VbhNgx27UK13J/0=
|
||||
github.com/bytedance/gopkg v0.1.3 h1:TPBSwH8RsouGCBcMBktLt1AymVo2TVsBVCY4b6TnZ/M=
|
||||
|
|
@ -76,6 +108,18 @@ github.com/casbin/govaluate v1.10.0 h1:ffGw51/hYH3w3rZcxO/KcaUIDOLP84w7nsidMVgaD
|
|||
github.com/casbin/govaluate v1.10.0/go.mod h1:G/UnbIjZk/0uMNaLwZZmFQrR72tYRZWQkO70si/iR7A=
|
||||
github.com/cespare/xxhash/v2 v2.3.0 h1:UL815xU9SqsFlibzuggzjXhog7bL6oX9BbNZnL2UFvs=
|
||||
github.com/cespare/xxhash/v2 v2.3.0/go.mod h1:VGX0DQ3Q6kWi7AoAeZDth3/j3BFtOZR5XLFGgcrjCOs=
|
||||
github.com/charmbracelet/bubbletea v1.3.10 h1:otUDHWMMzQSB0Pkc87rm691KZ3SWa4KUlvF9nRvCICw=
|
||||
github.com/charmbracelet/bubbletea v1.3.10/go.mod h1:ORQfo0fk8U+po9VaNvnV95UPWA1BitP1E0N6xJPlHr4=
|
||||
github.com/charmbracelet/colorprofile v0.2.3-0.20250311203215-f60798e515dc h1:4pZI35227imm7yK2bGPcfpFEmuY1gc2YSTShr4iJBfs=
|
||||
github.com/charmbracelet/colorprofile v0.2.3-0.20250311203215-f60798e515dc/go.mod h1:X4/0JoqgTIPSFcRA/P6INZzIuyqdFY5rm8tb41s9okk=
|
||||
github.com/charmbracelet/lipgloss v1.1.0 h1:vYXsiLHVkK7fp74RkV7b2kq9+zDLoEU4MZoFqR/noCY=
|
||||
github.com/charmbracelet/lipgloss v1.1.0/go.mod h1:/6Q8FR2o+kj8rz4Dq0zQc3vYf7X+B0binUUBwA0aL30=
|
||||
github.com/charmbracelet/x/ansi v0.10.1 h1:rL3Koar5XvX0pHGfovN03f5cxLbCF2YvLeyz7D2jVDQ=
|
||||
github.com/charmbracelet/x/ansi v0.10.1/go.mod h1:3RQDQ6lDnROptfpWuUVIUG64bD2g2BgntdxH0Ya5TeE=
|
||||
github.com/charmbracelet/x/cellbuf v0.0.13-0.20250311204145-2c3ea96c31dd h1:vy0GVL4jeHEwG5YOXDmi86oYw2yuYUGqz6a8sLwg0X8=
|
||||
github.com/charmbracelet/x/cellbuf v0.0.13-0.20250311204145-2c3ea96c31dd/go.mod h1:xe0nKWGd3eJgtqZRaN9RjMtK7xUYchjzPr7q6kcvCCs=
|
||||
github.com/charmbracelet/x/term v0.2.1 h1:AQeHeLZ1OqSXhrAWpYUtZyX1T3zVxfpZuEQMIQaGIAQ=
|
||||
github.com/charmbracelet/x/term v0.2.1/go.mod h1:oQ4enTYFV7QN4m0i9mzHrViD7TQKvNEEkHUMCmsxdUg=
|
||||
github.com/cloudflare/circl v1.6.3 h1:9GPOhQGF9MCYUeXyMYlqTR6a5gTrgR/fBLXvUgtVcg8=
|
||||
github.com/cloudflare/circl v1.6.3/go.mod h1:2eXP6Qfat4O/Yhh8BznvKnJ+uzEoTQ6jVKJRn81BiS4=
|
||||
github.com/cloudwego/base64x v0.1.6 h1:t11wG9AECkCDk5fMSoxmufanudBtJ+/HemLstXDLI2M=
|
||||
|
|
@ -92,12 +136,18 @@ github.com/davecgh/go-spew v1.1.2-0.20180830191138-d8f796af33cc h1:U9qPSI2PIWSS1
|
|||
github.com/davecgh/go-spew v1.1.2-0.20180830191138-d8f796af33cc/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38=
|
||||
github.com/davidmz/go-pageant v1.0.2 h1:bPblRCh5jGU+Uptpz6LgMZGD5hJoOt7otgT454WvHn0=
|
||||
github.com/davidmz/go-pageant v1.0.2/go.mod h1:P2EDDnMqIwG5Rrp05dTRITj9z2zpGcD9efWSkTNKLIE=
|
||||
github.com/dgryski/go-rendezvous v0.0.0-20200823014737-9f7001d12a5f h1:lO4WD4F/rVNCu3HqELle0jiPLLBs70cWOduZpkS1E78=
|
||||
github.com/dgryski/go-rendezvous v0.0.0-20200823014737-9f7001d12a5f/go.mod h1:cuUVRXasLTGF7a8hSLbxyZXjz+1KgoB3wDUb6vlszIc=
|
||||
github.com/dgryski/trifles v0.0.0-20230903005119-f50d829f2e54 h1:SG7nF6SRlWhcT7cNTs5R6Hk4V2lcmLz2NsG2VnInyNo=
|
||||
github.com/dgryski/trifles v0.0.0-20230903005119-f50d829f2e54/go.mod h1:if7Fbed8SFyPtHLHbg49SI7NAdJiC5WIA09pe59rfAA=
|
||||
github.com/dustin/go-humanize v1.0.1 h1:GzkhY7T5VNhEkwH0PVJgjz+fX1rhBrR7pRT3mDkpeCY=
|
||||
github.com/dustin/go-humanize v1.0.1/go.mod h1:Mu1zIs6XwVuF/gI1OepvI0qD18qycQx+mFykh5fBlto=
|
||||
github.com/elazarl/goproxy v1.7.2 h1:Y2o6urb7Eule09PjlhQRGNsqRfPmYI3KKQLFpCAV3+o=
|
||||
github.com/elazarl/goproxy v1.7.2/go.mod h1:82vkLNir0ALaW14Rc399OTTjyNREgmdL2cVoIbS6XaE=
|
||||
github.com/emirpasic/gods v1.18.1 h1:FXtiHYKDGKCW2KzwZKx0iC0PQmdlorYgdFG9jPXJ1Bc=
|
||||
github.com/emirpasic/gods v1.18.1/go.mod h1:8tpGGwCnJ5H4r6BWwaV6OrWmMoPhUl5jm/FMNAnJvWQ=
|
||||
github.com/erikgeiser/coninput v0.0.0-20211004153227-1c3628e74d0f h1:Y/CXytFA4m6baUTXGLOoWe4PQhGxaX0KpnayAqC48p4=
|
||||
github.com/erikgeiser/coninput v0.0.0-20211004153227-1c3628e74d0f/go.mod h1:vw97MGsxSvLiUE2X8qFplwetxpGLQrlU1Q9AUEIzCaM=
|
||||
github.com/frankban/quicktest v1.14.6 h1:7Xjx+VpznH+oBnejlPUj8oUpdxnVs4f8XU8WnHkI4W8=
|
||||
github.com/frankban/quicktest v1.14.6/go.mod h1:4ptaffx2x8+WTWXmUCuVU6aPUX1/Mz7zb5vbUoiM6w0=
|
||||
github.com/fsnotify/fsnotify v1.9.0 h1:2Ml+OJNzbYCTzsxtv8vKSFD9PbJjmhYF14k/jKC7S9k=
|
||||
|
|
@ -208,6 +258,8 @@ github.com/google/gofuzz v1.2.0 h1:xRy4A+RhZaiKjJ1bPfwQ8sedCA+YS2YcCHW6ec7JMi0=
|
|||
github.com/google/gofuzz v1.2.0/go.mod h1:dBl0BpW6vV/+mYPU4Po3pmUjxk6FQPldtuIdl/M65Eg=
|
||||
github.com/google/jsonschema-go v0.4.2 h1:tmrUohrwoLZZS/P3x7ex0WAVknEkBZM46iALbcqoRA8=
|
||||
github.com/google/jsonschema-go v0.4.2/go.mod h1:r5quNTdLOYEz95Ru18zA0ydNbBuYoo9tgaYcxEYhJVE=
|
||||
github.com/google/pprof v0.0.0-20250317173921-a4b03ec1a45e h1:ijClszYn+mADRFY17kjQEVQ1XRhq2/JR1M3sGqeJoxs=
|
||||
github.com/google/pprof v0.0.0-20250317173921-a4b03ec1a45e/go.mod h1:boTsfXsheKC2y+lKOCMpSfarhxDeIzfZG1jqGcPl3cA=
|
||||
github.com/google/uuid v1.6.0 h1:NIvaJDMOsjHA8n1jAhLSgzrAzy1Hgr+hNrb57e+94F0=
|
||||
github.com/google/uuid v1.6.0/go.mod h1:TIyPZe4MgqvfeYDBFedMoGGpEw/LqOeaOT+nhxU+yHo=
|
||||
github.com/gorilla/context v1.1.2 h1:WRkNAv2uoa03QNIc1A6u4O7DAGMUVoopZhkiXWA2V1o=
|
||||
|
|
@ -258,6 +310,8 @@ github.com/leaanthony/slicer v1.6.0 h1:1RFP5uiPJvT93TAHi+ipd3NACobkW53yUiBqZheE/
|
|||
github.com/leaanthony/slicer v1.6.0/go.mod h1:o/Iz29g7LN0GqH3aMjWAe90381nyZlDNquK+mtH2Fj8=
|
||||
github.com/leodido/go-urn v1.4.0 h1:WT9HwE9SGECu3lg4d/dIA+jxlljEa1/ffXKmRjqdmIQ=
|
||||
github.com/leodido/go-urn v1.4.0/go.mod h1:bvxc+MVxLKB4z00jd1z+Dvzr47oO32F/QSNjSBOlFxI=
|
||||
github.com/lucasb-eyer/go-colorful v1.2.0 h1:1nnpGOrhyZZuNyfu1QjKiUICQ74+3FNCN69Aj6K7nkY=
|
||||
github.com/lucasb-eyer/go-colorful v1.2.0/go.mod h1:R4dSotOR9KMtayYi1e77YzuveK+i7ruzyGqttikkLy0=
|
||||
github.com/mailru/easyjson v0.0.0-20190614124828-94de47d64c63/go.mod h1:C1wdFJiN94OJF2b5HbByQZoLdCWB1Yqtg26g4irojpc=
|
||||
github.com/mailru/easyjson v0.0.0-20190626092158-b2ccc519800e/go.mod h1:C1wdFJiN94OJF2b5HbByQZoLdCWB1Yqtg26g4irojpc=
|
||||
github.com/mailru/easyjson v0.7.6/go.mod h1:xzfreul335JAWq5oZzymOObrkdz5UnU4kGfJJLY9Nlc=
|
||||
|
|
@ -270,6 +324,10 @@ github.com/matryer/is v1.4.1 h1:55ehd8zaGABKLXQUe2awZ99BD/PTc2ls+KV/dXphgEQ=
|
|||
github.com/matryer/is v1.4.1/go.mod h1:8I/i5uYgLzgsgEloJE1U6xx5HkBQpAZvepWuujKwMRU=
|
||||
github.com/mattn/go-isatty v0.0.20 h1:xfD0iDuEKnDkl03q4limB+vH+GxLEtL/jb4xVJSWWEY=
|
||||
github.com/mattn/go-isatty v0.0.20/go.mod h1:W+V8PltTTMOvKvAeJH7IuucS94S2C6jfK/D7dTCTo3Y=
|
||||
github.com/mattn/go-localereader v0.0.1 h1:ygSAOl7ZXTx4RdPYinUpg6W99U8jWvWi9Ye2JC/oIi4=
|
||||
github.com/mattn/go-localereader v0.0.1/go.mod h1:8fBrzywKY7BI3czFoHkuzRoWE9C+EiG4R1k4Cjx5p88=
|
||||
github.com/mattn/go-runewidth v0.0.16 h1:E5ScNMtiwvlvB5paMFdw9p4kSQzbXFikJ5SQO6TULQc=
|
||||
github.com/mattn/go-runewidth v0.0.16/go.mod h1:Jdepj2loyihRzMpdS35Xk/zdY8IAYHsh153qUoGf23w=
|
||||
github.com/minio/asm2plan9s v0.0.0-20200509001527-cdd76441f9d8 h1:AMFGa4R4MiIpspGNG7Z948v4n35fFGB3RR3G/ry4FWs=
|
||||
github.com/minio/asm2plan9s v0.0.0-20200509001527-cdd76441f9d8/go.mod h1:mC1jAcsrzbxHt8iiaC+zU4b1ylILSosueou12R++wfY=
|
||||
github.com/minio/c2goasm v0.0.0-20190812172519-36a3d3bbc4f3 h1:+n/aFZefKZp7spd8DFdX7uMikMLXX4oubIzJF4kv/wI=
|
||||
|
|
@ -285,6 +343,14 @@ github.com/modern-go/reflect2 v1.0.2 h1:xBagoLtFs94CBntxluKeaWgTMpvLxC4ur3nMaC9G
|
|||
github.com/modern-go/reflect2 v1.0.2/go.mod h1:yWuevngMOJpCy52FWWMvUC8ws7m/LJsjYzDa0/r8luk=
|
||||
github.com/mohae/deepcopy v0.0.0-20170929034955-c48cc78d4826 h1:RWengNIwukTxcDr9M+97sNutRR1RKhG96O6jWumTTnw=
|
||||
github.com/mohae/deepcopy v0.0.0-20170929034955-c48cc78d4826/go.mod h1:TaXosZuwdSHYgviHp1DAtfrULt5eUgsSMsZf+YrPgl8=
|
||||
github.com/muesli/ansi v0.0.0-20230316100256-276c6243b2f6 h1:ZK8zHtRHOkbHy6Mmr5D264iyp3TiX5OmNcI5cIARiQI=
|
||||
github.com/muesli/ansi v0.0.0-20230316100256-276c6243b2f6/go.mod h1:CJlz5H+gyd6CUWT45Oy4q24RdLyn7Md9Vj2/ldJBSIo=
|
||||
github.com/muesli/cancelreader v0.2.2 h1:3I4Kt4BQjOR54NavqnDogx/MIoWBFa0StPA8ELUXHmA=
|
||||
github.com/muesli/cancelreader v0.2.2/go.mod h1:3XuTXfFS2VjM+HTLZY9Ak0l6eUKfijIfMUZ4EgX0QYo=
|
||||
github.com/muesli/termenv v0.16.0 h1:S5AlUN9dENB57rsbnkPyfdGuWIlkmzJjbFf0Tf5FWUc=
|
||||
github.com/muesli/termenv v0.16.0/go.mod h1:ZRfOIKPFDYQoDFF4Olj7/QJbW60Ol/kL1pU3VfY/Cnk=
|
||||
github.com/ncruces/go-strftime v1.0.0 h1:HMFp8mLCTPp341M/ZnA4qaf7ZlsbTc+miZjCLOFAw7w=
|
||||
github.com/ncruces/go-strftime v1.0.0/go.mod h1:Fwc5htZGVVkseilnfgOVb9mKy6w1naJmn9CehxcKcls=
|
||||
github.com/niemeyer/pretty v0.0.0-20200227124842-a10e7caefd8e/go.mod h1:zD1mROLANZcx1PVRCS0qkT7pwLkGfwJo4zjcN/Tysno=
|
||||
github.com/oasdiff/oasdiff v1.11.10 h1:4I9VrktUoHmwydkJqVOC7Bd6BXKu9dc4UUP3PIu1VjM=
|
||||
github.com/oasdiff/oasdiff v1.11.10/go.mod h1:GXARzmqBKN8lZHsTQD35ZM41ePbu6JdAZza4sRMeEKg=
|
||||
|
|
@ -321,6 +387,13 @@ github.com/quic-go/qpack v0.6.0 h1:g7W+BMYynC1LbYLSqRt8PBg5Tgwxn214ZZR34VIOjz8=
|
|||
github.com/quic-go/qpack v0.6.0/go.mod h1:lUpLKChi8njB4ty2bFLX2x4gzDqXwUpaO1DP9qMDZII=
|
||||
github.com/quic-go/quic-go v0.59.0 h1:OLJkp1Mlm/aS7dpKgTc6cnpynnD2Xg7C1pwL6vy/SAw=
|
||||
github.com/quic-go/quic-go v0.59.0/go.mod h1:upnsH4Ju1YkqpLXC305eW3yDZ4NfnNbmQRCMWS58IKU=
|
||||
github.com/redis/go-redis/v9 v9.18.0 h1:pMkxYPkEbMPwRdenAzUNyFNrDgHx9U+DrBabWNfSRQs=
|
||||
github.com/redis/go-redis/v9 v9.18.0/go.mod h1:k3ufPphLU5YXwNTUcCRXGxUoF1fqxnhFQmscfkCoDA0=
|
||||
github.com/remyoudompheng/bigfft v0.0.0-20230129092748-24d4a6f8daec h1:W09IVJc94icq4NjY3clb7Lk8O1qJ8BdBEF8z0ibU0rE=
|
||||
github.com/remyoudompheng/bigfft v0.0.0-20230129092748-24d4a6f8daec/go.mod h1:qqbHyh8v60DhA7CoWK5oRCqLrMHRGoxYCSS9EjAz6Eo=
|
||||
github.com/rivo/uniseg v0.2.0/go.mod h1:J6wj4VEh+S6ZtnVlnTBMWIodfgj8LQOQFoIToxlJtxc=
|
||||
github.com/rivo/uniseg v0.4.7 h1:WUdvkW8uEhrYfLC4ZzdpI2ztxP1I582+49Oc5Mq64VQ=
|
||||
github.com/rivo/uniseg v0.4.7/go.mod h1:FN3SvrM+Zdj16jyLfmOkMNblXMcoc8DfTHruCPUcx88=
|
||||
github.com/rogpeppe/go-internal v1.14.1 h1:UQB4HGPB6osV0SQTLymcB4TgvyWu6ZyliaW0tI/otEQ=
|
||||
github.com/rogpeppe/go-internal v1.14.1/go.mod h1:MaRKkUm5W0goXpeCfT7UZI6fk/L7L7so1lCWt35ZSgc=
|
||||
github.com/russross/blackfriday/v2 v2.1.0/go.mod h1:+Rmxgy9KzJVeS9/2gXHxylqXiyQDYRxCVz55jmeOWTM=
|
||||
|
|
@ -400,6 +473,8 @@ github.com/woodsbury/decimal128 v1.4.0 h1:xJATj7lLu4f2oObouMt2tgGiElE5gO6mSWUjQs
|
|||
github.com/woodsbury/decimal128 v1.4.0/go.mod h1:BP46FUrVjVhdTbKT+XuQh2xfQaGki9LMIRJSFuh6THU=
|
||||
github.com/xanzy/ssh-agent v0.3.3 h1:+/15pJfg/RsTxqYcX6fHqOXZwwMP+2VyYWJeWM2qQFM=
|
||||
github.com/xanzy/ssh-agent v0.3.3/go.mod h1:6dzNDKs0J9rVPHPhaGCukekBHKqfl+L3KghI1Bc68Uw=
|
||||
github.com/xo/terminfo v0.0.0-20220910002029-abceb7e1c41e h1:JVG44RsyaB9T2KIHavMF/ppJZNG9ZpyihvCd0w101no=
|
||||
github.com/xo/terminfo v0.0.0-20220910002029-abceb7e1c41e/go.mod h1:RbqR21r5mrJuqunuUZ/Dhy/avygyECGrLceyNeo4LiM=
|
||||
github.com/xyproto/randomstring v1.0.5 h1:YtlWPoRdgMu3NZtP45drfy1GKoojuR7hmRcnhZqKjWU=
|
||||
github.com/xyproto/randomstring v1.0.5/go.mod h1:rgmS5DeNXLivK7YprL0pY+lTuhNQW3iGxZ18UQApw/E=
|
||||
github.com/yargevad/filepathx v1.0.0 h1:SYcT+N3tYGi+NvazubCNlvgIPbzAk7i7y2dwg3I5FYc=
|
||||
|
|
@ -429,6 +504,8 @@ go.opentelemetry.io/otel/sdk/metric v1.40.0 h1:mtmdVqgQkeRxHgRv4qhyJduP3fYJRMX4A
|
|||
go.opentelemetry.io/otel/sdk/metric v1.40.0/go.mod h1:4Z2bGMf0KSK3uRjlczMOeMhKU2rhUqdWNoKcYrtcBPg=
|
||||
go.opentelemetry.io/otel/trace v1.40.0 h1:WA4etStDttCSYuhwvEa8OP8I5EWu24lkOzp+ZYblVjw=
|
||||
go.opentelemetry.io/otel/trace v1.40.0/go.mod h1:zeAhriXecNGP/s2SEG3+Y8X9ujcJOTqQ5RgdEJcawiA=
|
||||
go.uber.org/atomic v1.11.0 h1:ZvwS0R+56ePWxUNi+Atn9dWONBPp/AUETXlHW0DxSjE=
|
||||
go.uber.org/atomic v1.11.0/go.mod h1:LUxbIzbOniOlMKjJjyPfpl4v+PKK2cNJn91OQbhoJI0=
|
||||
go.uber.org/goleak v1.3.0 h1:2K3zAYmnTNqV73imy9J1T3WC+gmCePx2hEGkimedGto=
|
||||
go.uber.org/goleak v1.3.0/go.mod h1:CoHD4mav9JJNrW/WLlf7HGZPjdw8EucARQHekz1X6bE=
|
||||
go.uber.org/mock v0.6.0 h1:hyF9dfmbgIX5EfOdasqLsWD6xqpNZlXblLB/Dbnwv3Y=
|
||||
|
|
@ -476,6 +553,7 @@ golang.org/x/sys v0.0.0-20210228012217-479acdf4ea46/go.mod h1:h1NjWce9XRLGQEsW7w
|
|||
golang.org/x/sys v0.0.0-20210420072515-93ed5bcd2bfe/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
|
||||
golang.org/x/sys v0.0.0-20210423082822-04245dca01da/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
|
||||
golang.org/x/sys v0.0.0-20210615035016-665e8c7367d1/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
|
||||
golang.org/x/sys v0.0.0-20210809222454-d867a43fc93e/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
|
||||
golang.org/x/sys v0.0.0-20220520151302-bc2c85ada10a/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
|
||||
golang.org/x/sys v0.0.0-20220715151400-c0bba94af5f8/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
|
||||
golang.org/x/sys v0.0.0-20220722155257-8c9f86f7a55f/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
|
||||
|
|
@ -530,3 +608,31 @@ gopkg.in/yaml.v3 v3.0.0-20200313102051-9f266ea9e77c/go.mod h1:K4uyk7z7BCEPqu6E+C
|
|||
gopkg.in/yaml.v3 v3.0.0-20200615113413-eeeca48fe776/go.mod h1:K4uyk7z7BCEPqu6E+C64Yfv1cQ7kz7rIZviUmN+EgEM=
|
||||
gopkg.in/yaml.v3 v3.0.1 h1:fxVm/GzAzEWqLHuvctI91KS9hhNmmWOoWu0XTYJS7CA=
|
||||
gopkg.in/yaml.v3 v3.0.1/go.mod h1:K4uyk7z7BCEPqu6E+C64Yfv1cQ7kz7rIZviUmN+EgEM=
|
||||
modernc.org/cc/v4 v4.27.1 h1:9W30zRlYrefrDV2JE2O8VDtJ1yPGownxciz5rrbQZis=
|
||||
modernc.org/cc/v4 v4.27.1/go.mod h1:uVtb5OGqUKpoLWhqwNQo/8LwvoiEBLvZXIQ/SmO6mL0=
|
||||
modernc.org/ccgo/v4 v4.30.1 h1:4r4U1J6Fhj98NKfSjnPUN7Ze2c6MnAdL0hWw6+LrJpc=
|
||||
modernc.org/ccgo/v4 v4.30.1/go.mod h1:bIOeI1JL54Utlxn+LwrFyjCx2n2RDiYEaJVSrgdrRfM=
|
||||
modernc.org/fileutil v1.3.40 h1:ZGMswMNc9JOCrcrakF1HrvmergNLAmxOPjizirpfqBA=
|
||||
modernc.org/fileutil v1.3.40/go.mod h1:HxmghZSZVAz/LXcMNwZPA/DRrQZEVP9VX0V4LQGQFOc=
|
||||
modernc.org/gc/v2 v2.6.5 h1:nyqdV8q46KvTpZlsw66kWqwXRHdjIlJOhG6kxiV/9xI=
|
||||
modernc.org/gc/v2 v2.6.5/go.mod h1:YgIahr1ypgfe7chRuJi2gD7DBQiKSLMPgBQe9oIiito=
|
||||
modernc.org/gc/v3 v3.1.1 h1:k8T3gkXWY9sEiytKhcgyiZ2L0DTyCQ/nvX+LoCljoRE=
|
||||
modernc.org/gc/v3 v3.1.1/go.mod h1:HFK/6AGESC7Ex+EZJhJ2Gni6cTaYpSMmU/cT9RmlfYY=
|
||||
modernc.org/goabi0 v0.2.0 h1:HvEowk7LxcPd0eq6mVOAEMai46V+i7Jrj13t4AzuNks=
|
||||
modernc.org/goabi0 v0.2.0/go.mod h1:CEFRnnJhKvWT1c1JTI3Avm+tgOWbkOu5oPA8eH8LnMI=
|
||||
modernc.org/libc v1.67.7 h1:H+gYQw2PyidyxwxQsGTwQw6+6H+xUk+plvOKW7+d3TI=
|
||||
modernc.org/libc v1.67.7/go.mod h1:UjCSJFl2sYbJbReVQeVpq/MgzlbmDM4cRHIYFelnaDk=
|
||||
modernc.org/mathutil v1.7.1 h1:GCZVGXdaN8gTqB1Mf/usp1Y/hSqgI2vAGGP4jZMCxOU=
|
||||
modernc.org/mathutil v1.7.1/go.mod h1:4p5IwJITfppl0G4sUEDtCr4DthTaT47/N3aT6MhfgJg=
|
||||
modernc.org/memory v1.11.0 h1:o4QC8aMQzmcwCK3t3Ux/ZHmwFPzE6hf2Y5LbkRs+hbI=
|
||||
modernc.org/memory v1.11.0/go.mod h1:/JP4VbVC+K5sU2wZi9bHoq2MAkCnrt2r98UGeSK7Mjw=
|
||||
modernc.org/opt v0.1.4 h1:2kNGMRiUjrp4LcaPuLY2PzUfqM/w9N23quVwhKt5Qm8=
|
||||
modernc.org/opt v0.1.4/go.mod h1:03fq9lsNfvkYSfxrfUhZCWPk1lm4cq4N+Bh//bEtgns=
|
||||
modernc.org/sortutil v1.2.1 h1:+xyoGf15mM3NMlPDnFqrteY07klSFxLElE2PVuWIJ7w=
|
||||
modernc.org/sortutil v1.2.1/go.mod h1:7ZI3a3REbai7gzCLcotuw9AC4VZVpYMjDzETGsSMqJE=
|
||||
modernc.org/sqlite v1.46.1 h1:eFJ2ShBLIEnUWlLy12raN0Z1plqmFX9Qe3rjQTKt6sU=
|
||||
modernc.org/sqlite v1.46.1/go.mod h1:CzbrU2lSB1DKUusvwGz7rqEKIq+NUd8GWuBBZDs9/nA=
|
||||
modernc.org/strutil v1.2.1 h1:UneZBkQA+DX2Rp35KcM69cSsNES9ly8mQWD71HKlOA0=
|
||||
modernc.org/strutil v1.2.1/go.mod h1:EHkiggD70koQxjVdSBM3JKM7k6L0FbGE5eymy9i3B9A=
|
||||
modernc.org/token v1.1.0 h1:Xl7Ap9dKaEs5kLoOQeQmPWevfnk/DM5qcLcYlA8ys6Y=
|
||||
modernc.org/token v1.1.0/go.mod h1:UGzOrNV1mAFSEB63lOFHIpNRUVMvYTc6yu1SMY/XTDM=
|
||||
|
|
|
|||
18
go.work
18
go.work
|
|
@ -1,18 +0,0 @@
|
|||
go 1.25.5
|
||||
|
||||
use (
|
||||
.
|
||||
../go
|
||||
../go-agentic
|
||||
../go-ai
|
||||
../go-crypt
|
||||
../go-devops
|
||||
../go-inference
|
||||
../go-ml
|
||||
../go-mlx
|
||||
../go-netops
|
||||
../go-rag
|
||||
../go-scm
|
||||
../go-store
|
||||
../go-webview
|
||||
)
|
||||
2
main.go
2
main.go
|
|
@ -20,7 +20,7 @@ import (
|
|||
_ "forge.lthn.ai/core/cli/cmd/help"
|
||||
_ "forge.lthn.ai/core/cli/cmd/lab"
|
||||
_ "forge.lthn.ai/core/cli/cmd/mcpcmd"
|
||||
_ "forge.lthn.ai/core/cli/cmd/ml"
|
||||
_ "forge.lthn.ai/core/go-ml/cmd"
|
||||
_ "forge.lthn.ai/core/cli/cmd/module"
|
||||
_ "forge.lthn.ai/core/cli/cmd/monitor"
|
||||
_ "forge.lthn.ai/core/cli/cmd/pkgcmd"
|
||||
|
|
|
|||
Loading…
Add table
Reference in a new issue