fix(codex): config.toml compat with codex CLI 0.122+

- model_reasoning_effort: "extra-high" → "xhigh" (the variant name was
  tightened in a recent codex CLI release; "extra-high" now fails config
  load)
- Remove [model_providers.ollama] and [model_providers.lmstudio]
  overrides — codex CLI 0.122+ reserves these as built-in provider IDs
  and rejects overrides. The same localhost endpoints are used by the
  built-ins, so the overrides were redundant anyway. Profiles that
  reference `model_provider = "ollama"` continue to work via the
  built-in.
This commit is contained in:
Snider 2026-04-23 12:32:49 +01:00
parent bd060c8aa0
commit 0741fba88f

View file

@ -2,7 +2,7 @@
# Shared between CLI and IDE extension
model = "gpt-5.4"
model_reasoning_effort = "extra-high"
model_reasoning_effort = "xhigh"
approval_policy = "on-request"
sandbox_mode = "workspace-write"
personality = "pragmatic"
@ -12,7 +12,7 @@ personality = "pragmatic"
[profiles.review]
model = "gpt-5.4"
model_reasoning_effort = "extra-high"
model_reasoning_effort = "xhigh"
approval_policy = "never"
sandbox_mode = "read-only"
@ -47,14 +47,9 @@ FORGE_TOKEN = "${FORGE_TOKEN}"
CORE_BRAIN_KEY = "${CORE_BRAIN_KEY}"
MONITOR_INTERVAL = "15s"
# Local model providers
[model_providers.ollama]
name = "Ollama"
base_url = "http://127.0.0.1:11434/v1"
[model_providers.lmstudio]
name = "LM Studio"
base_url = "http://127.0.0.1:1234/v1"
# Model providers: codex CLI 0.122+ ships built-in `ollama` and `lmstudio`
# providers pointing at the same default localhost ports, so project-level
# overrides are both redundant and rejected ("reserved built-in provider IDs").
# Agent configuration
[agents]