From 0741fba88fa6e9ee953e5bb66213932892af61b7 Mon Sep 17 00:00:00 2001 From: Snider Date: Thu, 23 Apr 2026 12:32:49 +0100 Subject: [PATCH] fix(codex): config.toml compat with codex CLI 0.122+ MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit - model_reasoning_effort: "extra-high" → "xhigh" (the variant name was tightened in a recent codex CLI release; "extra-high" now fails config load) - Remove [model_providers.ollama] and [model_providers.lmstudio] overrides — codex CLI 0.122+ reserves these as built-in provider IDs and rejects overrides. The same localhost endpoints are used by the built-ins, so the overrides were redundant anyway. Profiles that reference `model_provider = "ollama"` continue to work via the built-in. --- .codex/config.toml | 15 +++++---------- 1 file changed, 5 insertions(+), 10 deletions(-) diff --git a/.codex/config.toml b/.codex/config.toml index 7f1c8c3..7f41003 100644 --- a/.codex/config.toml +++ b/.codex/config.toml @@ -2,7 +2,7 @@ # Shared between CLI and IDE extension model = "gpt-5.4" -model_reasoning_effort = "extra-high" +model_reasoning_effort = "xhigh" approval_policy = "on-request" sandbox_mode = "workspace-write" personality = "pragmatic" @@ -12,7 +12,7 @@ personality = "pragmatic" [profiles.review] model = "gpt-5.4" -model_reasoning_effort = "extra-high" +model_reasoning_effort = "xhigh" approval_policy = "never" sandbox_mode = "read-only" @@ -47,14 +47,9 @@ FORGE_TOKEN = "${FORGE_TOKEN}" CORE_BRAIN_KEY = "${CORE_BRAIN_KEY}" MONITOR_INTERVAL = "15s" -# Local model providers -[model_providers.ollama] -name = "Ollama" -base_url = "http://127.0.0.1:11434/v1" - -[model_providers.lmstudio] -name = "LM Studio" -base_url = "http://127.0.0.1:1234/v1" +# Model providers: codex CLI 0.122+ ships built-in `ollama` and `lmstudio` +# providers pointing at the same default localhost ports, so project-level +# overrides are both redundant and rejected ("reserved built-in provider IDs"). # Agent configuration [agents]