From ff6764e8084348b7945c3445015ad83b98a33187 Mon Sep 17 00:00:00 2001 From: Shaqayeq Date: Thu, 12 Mar 2026 09:22:01 -0700 Subject: [PATCH] Add Python app-server SDK (#14435) ## TL;DR Bring the Python app-server SDK from `main-with-prs-13953-and-14232` onto current `main` as a standalone SDK-only PR. - adds the new `sdk/python` and `sdk/python-runtime` package trees - keeps the scope to the SDK payload only, without the unrelated branch-history or workflow changes from the source branch - regenerates `sdk/python/src/codex_app_server/generated/v2_all.py` against current `main` schema so the extracted SDK matches today's protocol definitions ## Validation - `PYTHONPATH=sdk/python/src python3 -m pytest sdk/python/tests` Co-authored-by: Codex --- sdk/python-runtime/README.md | 9 + sdk/python-runtime/hatch_build.py | 15 + sdk/python-runtime/pyproject.toml | 45 + .../src/codex_cli_bin/__init__.py | 19 + sdk/python/README.md | 95 + sdk/python/docs/faq.md | 77 + sdk/python/docs/getting-started.md | 75 + sdk/python/pyproject.toml | 62 + sdk/python/scripts/update_sdk_artifacts.py | 998 ++ sdk/python/src/codex_app_server/__init__.py | 10 + sdk/python/src/codex_app_server/client.py | 540 ++ sdk/python/src/codex_app_server/errors.py | 125 + .../codex_app_server/generated/__init__.py | 1 + .../generated/notification_registry.py | 102 + .../src/codex_app_server/generated/v2_all.py | 8407 +++++++++++++++++ .../codex_app_server/generated/v2_types.py | 25 + sdk/python/src/codex_app_server/models.py | 97 + sdk/python/src/codex_app_server/py.typed | 0 sdk/python/src/codex_app_server/retry.py | 41 + sdk/python/tests/conftest.py | 16 + .../test_artifact_workflow_and_binaries.py | 411 + sdk/python/tests/test_client_rpc_methods.py | 95 + sdk/python/tests/test_contract_generation.py | 52 + 23 files changed, 11317 insertions(+) create mode 100644 sdk/python-runtime/README.md create mode 100644 sdk/python-runtime/hatch_build.py create mode 100644 sdk/python-runtime/pyproject.toml create mode 100644 sdk/python-runtime/src/codex_cli_bin/__init__.py create mode 100644 sdk/python/README.md create mode 100644 sdk/python/docs/faq.md create mode 100644 sdk/python/docs/getting-started.md create mode 100644 sdk/python/pyproject.toml create mode 100755 sdk/python/scripts/update_sdk_artifacts.py create mode 100644 sdk/python/src/codex_app_server/__init__.py create mode 100644 sdk/python/src/codex_app_server/client.py create mode 100644 sdk/python/src/codex_app_server/errors.py create mode 100644 sdk/python/src/codex_app_server/generated/__init__.py create mode 100644 sdk/python/src/codex_app_server/generated/notification_registry.py create mode 100644 sdk/python/src/codex_app_server/generated/v2_all.py create mode 100644 sdk/python/src/codex_app_server/generated/v2_types.py create mode 100644 sdk/python/src/codex_app_server/models.py create mode 100644 sdk/python/src/codex_app_server/py.typed create mode 100644 sdk/python/src/codex_app_server/retry.py create mode 100644 sdk/python/tests/conftest.py create mode 100644 sdk/python/tests/test_artifact_workflow_and_binaries.py create mode 100644 sdk/python/tests/test_client_rpc_methods.py create mode 100644 sdk/python/tests/test_contract_generation.py diff --git a/sdk/python-runtime/README.md b/sdk/python-runtime/README.md new file mode 100644 index 000000000..0e7b26d5f --- /dev/null +++ b/sdk/python-runtime/README.md @@ -0,0 +1,9 @@ +# Codex CLI Runtime for Python SDK + +Platform-specific runtime package consumed by the published `codex-app-server-sdk`. + +This package is staged during release so the SDK can pin an exact Codex CLI +version without checking platform binaries into the repo. + +`codex-cli-bin` is intentionally wheel-only. Do not build or publish an sdist +for this package. diff --git a/sdk/python-runtime/hatch_build.py b/sdk/python-runtime/hatch_build.py new file mode 100644 index 000000000..319e6973f --- /dev/null +++ b/sdk/python-runtime/hatch_build.py @@ -0,0 +1,15 @@ +from __future__ import annotations + +from hatchling.builders.hooks.plugin.interface import BuildHookInterface + + +class RuntimeBuildHook(BuildHookInterface): + def initialize(self, version: str, build_data: dict[str, object]) -> None: + del version + if self.target_name == "sdist": + raise RuntimeError( + "codex-cli-bin is wheel-only; build and publish platform wheels only." + ) + + build_data["pure_python"] = False + build_data["infer_tag"] = True diff --git a/sdk/python-runtime/pyproject.toml b/sdk/python-runtime/pyproject.toml new file mode 100644 index 000000000..761dccbb9 --- /dev/null +++ b/sdk/python-runtime/pyproject.toml @@ -0,0 +1,45 @@ +[build-system] +requires = ["hatchling>=1.24.0"] +build-backend = "hatchling.build" + +[project] +name = "codex-cli-bin" +version = "0.0.0-dev" +description = "Pinned Codex CLI runtime for the Python SDK" +readme = "README.md" +requires-python = ">=3.10" +license = { text = "Apache-2.0" } +authors = [{ name = "OpenAI" }] +classifiers = [ + "Development Status :: 4 - Beta", + "Intended Audience :: Developers", + "License :: OSI Approved :: Apache Software License", + "Programming Language :: Python :: 3", + "Programming Language :: Python :: 3.10", + "Programming Language :: Python :: 3.11", + "Programming Language :: Python :: 3.12", + "Programming Language :: Python :: 3.13", +] + +[project.urls] +Homepage = "https://github.com/openai/codex" +Repository = "https://github.com/openai/codex" +Issues = "https://github.com/openai/codex/issues" + +[tool.hatch.build] +exclude = [ + ".venv/**", + ".pytest_cache/**", + "dist/**", + "build/**", +] + +[tool.hatch.build.targets.wheel] +packages = ["src/codex_cli_bin"] +include = ["src/codex_cli_bin/bin/**"] + +[tool.hatch.build.targets.wheel.hooks.custom] + +[tool.hatch.build.targets.sdist] + +[tool.hatch.build.targets.sdist.hooks.custom] diff --git a/sdk/python-runtime/src/codex_cli_bin/__init__.py b/sdk/python-runtime/src/codex_cli_bin/__init__.py new file mode 100644 index 000000000..8059d3921 --- /dev/null +++ b/sdk/python-runtime/src/codex_cli_bin/__init__.py @@ -0,0 +1,19 @@ +from __future__ import annotations + +import os +from pathlib import Path + +PACKAGE_NAME = "codex-cli-bin" + + +def bundled_codex_path() -> Path: + exe = "codex.exe" if os.name == "nt" else "codex" + path = Path(__file__).resolve().parent / "bin" / exe + if not path.is_file(): + raise FileNotFoundError( + f"{PACKAGE_NAME} is installed but missing its packaged codex binary at {path}" + ) + return path + + +__all__ = ["PACKAGE_NAME", "bundled_codex_path"] diff --git a/sdk/python/README.md b/sdk/python/README.md new file mode 100644 index 000000000..ef3abdf63 --- /dev/null +++ b/sdk/python/README.md @@ -0,0 +1,95 @@ +# Codex App Server Python SDK (Experimental) + +Experimental Python SDK for `codex app-server` JSON-RPC v2 over stdio, with a small default surface optimized for real scripts and apps. + +The generated wire-model layer is currently sourced from the bundled v2 schema and exposed as Pydantic models with snake_case Python fields that serialize back to the app-server’s camelCase wire format. + +## Install + +```bash +cd sdk/python +python -m pip install -e . +``` + +Published SDK builds pin an exact `codex-cli-bin` runtime dependency. For local +repo development, pass `AppServerConfig(codex_bin=...)` to point at a local +build explicitly. + +## Quickstart + +```python +from codex_app_server import Codex, TextInput + +with Codex() as codex: + thread = codex.thread_start(model="gpt-5") + result = thread.turn(TextInput("Say hello in one sentence.")).run() + print(result.text) +``` + +## Docs map + +- Golden path tutorial: `docs/getting-started.md` +- API reference (signatures + behavior): `docs/api-reference.md` +- Common decisions and pitfalls: `docs/faq.md` +- Runnable examples index: `examples/README.md` +- Jupyter walkthrough notebook: `notebooks/sdk_walkthrough.ipynb` + +## Examples + +Start here: + +```bash +cd sdk/python +python examples/01_quickstart_constructor/sync.py +python examples/01_quickstart_constructor/async.py +``` + +## Runtime packaging + +The repo no longer checks `codex` binaries into `sdk/python`. + +Published SDK builds are pinned to an exact `codex-cli-bin` package version, +and that runtime package carries the platform-specific binary for the target +wheel. + +For local repo development, the checked-in `sdk/python-runtime` package is only +a template for staged release artifacts. Editable installs should use an +explicit `codex_bin` override instead. + +## Maintainer workflow + +```bash +cd sdk/python +python scripts/update_sdk_artifacts.py generate-types +python scripts/update_sdk_artifacts.py \ + stage-sdk \ + /tmp/codex-python-release/codex-app-server-sdk \ + --runtime-version 1.2.3 +python scripts/update_sdk_artifacts.py \ + stage-runtime \ + /tmp/codex-python-release/codex-cli-bin \ + /path/to/codex \ + --runtime-version 1.2.3 +``` + +This supports the CI release flow: + +- run `generate-types` before packaging +- stage `codex-app-server-sdk` once with an exact `codex-cli-bin==...` dependency +- stage `codex-cli-bin` on each supported platform runner with the same pinned runtime version +- build and publish `codex-cli-bin` as platform wheels only; do not publish an sdist + +## Compatibility and versioning + +- Package: `codex-app-server-sdk` +- Runtime package: `codex-cli-bin` +- Current SDK version in this repo: `0.2.0` +- Python: `>=3.10` +- Target protocol: Codex `app-server` JSON-RPC v2 +- Recommendation: keep SDK and `codex` CLI reasonably up to date together + +## Notes + +- `Codex()` is eager and performs startup + `initialize` in the constructor. +- Use context managers (`with Codex() as codex:`) to ensure shutdown. +- For transient overload, use `codex_app_server.retry.retry_on_overload`. diff --git a/sdk/python/docs/faq.md b/sdk/python/docs/faq.md new file mode 100644 index 000000000..ebfd2ddad --- /dev/null +++ b/sdk/python/docs/faq.md @@ -0,0 +1,77 @@ +# FAQ + +## Thread vs turn + +- A `Thread` is conversation state. +- A `Turn` is one model execution inside that thread. +- Multi-turn chat means multiple turns on the same `Thread`. + +## `run()` vs `stream()` + +- `Turn.run()` is the easiest path. It consumes events until completion and returns `TurnResult`. +- `Turn.stream()` yields raw notifications (`Notification`) so you can react event-by-event. + +Choose `run()` for most apps. Choose `stream()` for progress UIs, custom timeout logic, or custom parsing. + +## Sync vs async clients + +- `Codex` is the minimal sync SDK and best default. +- `AsyncAppServerClient` wraps the sync transport with `asyncio.to_thread(...)` for async-friendly call sites. + +If your app is not already async, stay with `Codex`. + +## `thread(...)` vs `thread_resume(...)` + +- `codex.thread(thread_id)` only binds a local helper to an existing thread ID. +- `codex.thread_resume(thread_id, ...)` performs a `thread/resume` RPC and can apply overrides (model, instructions, sandbox, etc.). + +Use `thread(...)` for simple continuation. Use `thread_resume(...)` when you need explicit resume semantics or override fields. + +## Why does constructor fail? + +`Codex()` is eager: it starts transport and calls `initialize` in `__init__`. + +Common causes: + +- published runtime package (`codex-cli-bin`) is not installed +- local `codex_bin` override points to a missing file +- local auth/session is missing +- incompatible/old app-server + +Maintainers stage releases by building the SDK once and the runtime once per +platform with the same pinned runtime version. Publish `codex-cli-bin` as +platform wheels only; do not publish an sdist: + +```bash +cd sdk/python +python scripts/update_sdk_artifacts.py generate-types +python scripts/update_sdk_artifacts.py \ + stage-sdk \ + /tmp/codex-python-release/codex-app-server-sdk \ + --runtime-version 1.2.3 +python scripts/update_sdk_artifacts.py \ + stage-runtime \ + /tmp/codex-python-release/codex-cli-bin \ + /path/to/codex \ + --runtime-version 1.2.3 +``` + +## Why does a turn "hang"? + +A turn is complete only when `turn/completed` arrives for that turn ID. + +- `run()` waits for this automatically. +- With `stream()`, make sure you keep consuming notifications until completion. + +## How do I retry safely? + +Use `retry_on_overload(...)` for transient overload failures (`ServerBusyError`). + +Do not blindly retry all errors. For `InvalidParamsError` or `MethodNotFoundError`, fix inputs/version compatibility instead. + +## Common pitfalls + +- Starting a new thread for every prompt when you wanted continuity. +- Forgetting to `close()` (or not using `with Codex() as codex:`). +- Ignoring `TurnResult.status` and `TurnResult.error`. +- Mixing SDK input classes with raw dicts incorrectly in minimal API paths. diff --git a/sdk/python/docs/getting-started.md b/sdk/python/docs/getting-started.md new file mode 100644 index 000000000..9108902b3 --- /dev/null +++ b/sdk/python/docs/getting-started.md @@ -0,0 +1,75 @@ +# Getting Started + +This is the fastest path from install to a multi-turn thread using the minimal SDK surface. + +## 1) Install + +From repo root: + +```bash +cd sdk/python +python -m pip install -e . +``` + +Requirements: + +- Python `>=3.10` +- installed `codex-cli-bin` runtime package, or an explicit `codex_bin` override +- Local Codex auth/session configured + +## 2) Run your first turn + +```python +from codex_app_server import Codex, TextInput + +with Codex() as codex: + print("Server:", codex.metadata.server_name, codex.metadata.server_version) + + thread = codex.thread_start(model="gpt-5") + result = thread.turn(TextInput("Say hello in one sentence.")).run() + + print("Thread:", result.thread_id) + print("Turn:", result.turn_id) + print("Status:", result.status) + print("Text:", result.text) +``` + +What happened: + +- `Codex()` started and initialized `codex app-server`. +- `thread_start(...)` created a thread. +- `turn(...).run()` consumed events until `turn/completed` and returned a `TurnResult`. + +## 3) Continue the same thread (multi-turn) + +```python +from codex_app_server import Codex, TextInput + +with Codex() as codex: + thread = codex.thread_start(model="gpt-5") + + first = thread.turn(TextInput("Summarize Rust ownership in 2 bullets.")).run() + second = thread.turn(TextInput("Now explain it to a Python developer.")).run() + + print("first:", first.text) + print("second:", second.text) +``` + +## 4) Resume an existing thread + +```python +from codex_app_server import Codex, TextInput + +THREAD_ID = "thr_123" # replace with a real id + +with Codex() as codex: + thread = codex.thread(THREAD_ID) + result = thread.turn(TextInput("Continue where we left off.")).run() + print(result.text) +``` + +## 5) Next stops + +- API surface and signatures: `docs/api-reference.md` +- Common decisions/pitfalls: `docs/faq.md` +- End-to-end runnable examples: `examples/README.md` diff --git a/sdk/python/pyproject.toml b/sdk/python/pyproject.toml new file mode 100644 index 000000000..f5129cbf9 --- /dev/null +++ b/sdk/python/pyproject.toml @@ -0,0 +1,62 @@ +[build-system] +requires = ["hatchling>=1.24.0"] +build-backend = "hatchling.build" + +[project] +name = "codex-app-server-sdk" +version = "0.2.0" +description = "Python SDK for Codex app-server v2" +readme = "README.md" +requires-python = ">=3.10" +license = { text = "Apache-2.0" } +authors = [{ name = "OpenClaw Assistant" }] +keywords = ["codex", "json-rpc", "sdk", "llm", "app-server"] +classifiers = [ + "Development Status :: 4 - Beta", + "Intended Audience :: Developers", + "License :: OSI Approved :: Apache Software License", + "Programming Language :: Python :: 3", + "Programming Language :: Python :: 3.10", + "Programming Language :: Python :: 3.11", + "Programming Language :: Python :: 3.12", + "Programming Language :: Python :: 3.13", + "Topic :: Software Development :: Libraries :: Python Modules", +] +dependencies = ["pydantic>=2.12"] + +[project.urls] +Homepage = "https://github.com/openai/codex" +Repository = "https://github.com/openai/codex" +Issues = "https://github.com/openai/codex/issues" + +[project.optional-dependencies] +dev = ["pytest>=8.0", "datamodel-code-generator==0.31.2", "ruff>=0.11"] + +[tool.hatch.build] +exclude = [ + ".venv/**", + ".venv2/**", + ".pytest_cache/**", + "dist/**", + "build/**", +] + +[tool.hatch.build.targets.wheel] +packages = ["src/codex_app_server"] +include = [ + "src/codex_app_server/py.typed", +] + +[tool.hatch.build.targets.sdist] +include = [ + "src/codex_app_server/**", + "README.md", + "CHANGELOG.md", + "CONTRIBUTING.md", + "RELEASE_CHECKLIST.md", + "pyproject.toml", +] + +[tool.pytest.ini_options] +addopts = "-q" +testpaths = ["tests"] diff --git a/sdk/python/scripts/update_sdk_artifacts.py b/sdk/python/scripts/update_sdk_artifacts.py new file mode 100755 index 000000000..da4cbceb1 --- /dev/null +++ b/sdk/python/scripts/update_sdk_artifacts.py @@ -0,0 +1,998 @@ +#!/usr/bin/env python3 +from __future__ import annotations + +import argparse +import importlib +import json +import platform +import re +import shutil +import stat +import subprocess +import sys +import tempfile +import types +import typing +from dataclasses import dataclass +from pathlib import Path +from typing import Any, Callable, Sequence, get_args, get_origin + + +def repo_root() -> Path: + return Path(__file__).resolve().parents[3] + + +def sdk_root() -> Path: + return repo_root() / "sdk" / "python" + + +def python_runtime_root() -> Path: + return repo_root() / "sdk" / "python-runtime" + + +def schema_bundle_path() -> Path: + return ( + repo_root() + / "codex-rs" + / "app-server-protocol" + / "schema" + / "json" + / "codex_app_server_protocol.v2.schemas.json" + ) + + +def schema_root_dir() -> Path: + return repo_root() / "codex-rs" / "app-server-protocol" / "schema" / "json" + + +def _is_windows() -> bool: + return platform.system().lower().startswith("win") + + +def runtime_binary_name() -> str: + return "codex.exe" if _is_windows() else "codex" + + +def staged_runtime_bin_path(root: Path) -> Path: + return root / "src" / "codex_cli_bin" / "bin" / runtime_binary_name() + + +def run(cmd: list[str], cwd: Path) -> None: + subprocess.run(cmd, cwd=str(cwd), check=True) + + +def run_python_module(module: str, args: list[str], cwd: Path) -> None: + run([sys.executable, "-m", module, *args], cwd) + + +def current_sdk_version() -> str: + match = re.search( + r'^version = "([^"]+)"$', + (sdk_root() / "pyproject.toml").read_text(), + flags=re.MULTILINE, + ) + if match is None: + raise RuntimeError("Could not determine Python SDK version from pyproject.toml") + return match.group(1) + + +def _copy_package_tree(src: Path, dst: Path) -> None: + if dst.exists(): + if dst.is_dir(): + shutil.rmtree(dst) + else: + dst.unlink() + shutil.copytree( + src, + dst, + ignore=shutil.ignore_patterns( + ".venv", + ".venv2", + ".pytest_cache", + "__pycache__", + "build", + "dist", + "*.pyc", + ), + ) + + +def _rewrite_project_version(pyproject_text: str, version: str) -> str: + updated, count = re.subn( + r'^version = "[^"]+"$', + f'version = "{version}"', + pyproject_text, + count=1, + flags=re.MULTILINE, + ) + if count != 1: + raise RuntimeError("Could not rewrite project version in pyproject.toml") + return updated + + +def _rewrite_sdk_runtime_dependency(pyproject_text: str, runtime_version: str) -> str: + match = re.search(r"^dependencies = \[(.*?)\]$", pyproject_text, flags=re.MULTILINE) + if match is None: + raise RuntimeError( + "Could not find dependencies array in sdk/python/pyproject.toml" + ) + + raw_items = [item.strip() for item in match.group(1).split(",") if item.strip()] + raw_items = [item for item in raw_items if "codex-cli-bin" not in item] + raw_items.append(f'"codex-cli-bin=={runtime_version}"') + replacement = "dependencies = [\n " + ",\n ".join(raw_items) + ",\n]" + return pyproject_text[: match.start()] + replacement + pyproject_text[match.end() :] + + +def stage_python_sdk_package( + staging_dir: Path, sdk_version: str, runtime_version: str +) -> Path: + _copy_package_tree(sdk_root(), staging_dir) + sdk_bin_dir = staging_dir / "src" / "codex_app_server" / "bin" + if sdk_bin_dir.exists(): + shutil.rmtree(sdk_bin_dir) + + pyproject_path = staging_dir / "pyproject.toml" + pyproject_text = pyproject_path.read_text() + pyproject_text = _rewrite_project_version(pyproject_text, sdk_version) + pyproject_text = _rewrite_sdk_runtime_dependency(pyproject_text, runtime_version) + pyproject_path.write_text(pyproject_text) + return staging_dir + + +def stage_python_runtime_package( + staging_dir: Path, runtime_version: str, binary_path: Path +) -> Path: + _copy_package_tree(python_runtime_root(), staging_dir) + + pyproject_path = staging_dir / "pyproject.toml" + pyproject_path.write_text( + _rewrite_project_version(pyproject_path.read_text(), runtime_version) + ) + + out_bin = staged_runtime_bin_path(staging_dir) + out_bin.parent.mkdir(parents=True, exist_ok=True) + shutil.copy2(binary_path, out_bin) + if not _is_windows(): + out_bin.chmod( + out_bin.stat().st_mode | stat.S_IXUSR | stat.S_IXGRP | stat.S_IXOTH + ) + return staging_dir + + +def _flatten_string_enum_one_of(definition: dict[str, Any]) -> bool: + branches = definition.get("oneOf") + if not isinstance(branches, list) or not branches: + return False + + enum_values: list[str] = [] + for branch in branches: + if not isinstance(branch, dict): + return False + if branch.get("type") != "string": + return False + + enum = branch.get("enum") + if not isinstance(enum, list) or len(enum) != 1 or not isinstance(enum[0], str): + return False + + extra_keys = set(branch) - {"type", "enum", "description", "title"} + if extra_keys: + return False + + enum_values.append(enum[0]) + + description = definition.get("description") + title = definition.get("title") + definition.clear() + definition["type"] = "string" + definition["enum"] = enum_values + if isinstance(description, str): + definition["description"] = description + if isinstance(title, str): + definition["title"] = title + return True + + +DISCRIMINATOR_KEYS = ("type", "method", "mode", "state", "status", "role", "reason") + + +def _to_pascal_case(value: str) -> str: + parts = re.split(r"[^0-9A-Za-z]+", value) + compact = "".join(part[:1].upper() + part[1:] for part in parts if part) + return compact or "Value" + + +def _string_literal(value: Any) -> str | None: + if not isinstance(value, dict): + return None + const = value.get("const") + if isinstance(const, str): + return const + + enum = value.get("enum") + if isinstance(enum, list) and enum and len(enum) == 1 and isinstance(enum[0], str): + return enum[0] + return None + + +def _enum_literals(value: Any) -> list[str] | None: + if not isinstance(value, dict): + return None + enum = value.get("enum") + if ( + not isinstance(enum, list) + or not enum + or not all(isinstance(item, str) for item in enum) + ): + return None + return list(enum) + + +def _literal_from_property(props: dict[str, Any], key: str) -> str | None: + return _string_literal(props.get(key)) + + +def _variant_definition_name(base: str, variant: dict[str, Any]) -> str | None: + # datamodel-code-generator invents numbered helper names for inline union + # branches unless they carry a stable, unique title up front. We derive + # those titles from the branch discriminator or other identifying shape. + props = variant.get("properties") + if isinstance(props, dict): + for key in DISCRIMINATOR_KEYS: + literal = _literal_from_property(props, key) + if literal is None: + continue + pascal = _to_pascal_case(literal) + if base == "ClientRequest": + return f"{pascal}Request" + if base == "ServerRequest": + return f"{pascal}ServerRequest" + if base == "ClientNotification": + return f"{pascal}ClientNotification" + if base == "ServerNotification": + return f"{pascal}ServerNotification" + if base == "EventMsg": + return f"{pascal}EventMsg" + return f"{pascal}{base}" + + if len(props) == 1: + key = next(iter(props)) + pascal = _string_literal(props[key]) + return f"{_to_pascal_case(pascal or key)}{base}" + + required = variant.get("required") + if ( + isinstance(required, list) + and len(required) == 1 + and isinstance(required[0], str) + ): + return f"{_to_pascal_case(required[0])}{base}" + + enum_literals = _enum_literals(variant) + if enum_literals is not None: + if len(enum_literals) == 1: + return f"{_to_pascal_case(enum_literals[0])}{base}" + return f"{base}Value" + + return None + + +def _variant_collision_key( + base: str, variant: dict[str, Any], generated_name: str +) -> str: + parts = [f"base={base}", f"generated={generated_name}"] + props = variant.get("properties") + if isinstance(props, dict): + for key in DISCRIMINATOR_KEYS: + literal = _literal_from_property(props, key) + if literal is not None: + parts.append(f"{key}={literal}") + if len(props) == 1: + parts.append(f"only_property={next(iter(props))}") + + required = variant.get("required") + if ( + isinstance(required, list) + and len(required) == 1 + and isinstance(required[0], str) + ): + parts.append(f"required_only={required[0]}") + + enum_literals = _enum_literals(variant) + if enum_literals is not None: + parts.append(f"enum={'|'.join(enum_literals)}") + + return "|".join(parts) + + +def _set_discriminator_titles(props: dict[str, Any], owner: str) -> None: + for key in DISCRIMINATOR_KEYS: + prop = props.get(key) + if not isinstance(prop, dict): + continue + if _string_literal(prop) is None or "title" in prop: + continue + prop["title"] = f"{owner}{_to_pascal_case(key)}" + + +def _annotate_variant_list(variants: list[Any], base: str | None) -> None: + seen = { + variant["title"] + for variant in variants + if isinstance(variant, dict) and isinstance(variant.get("title"), str) + } + + for variant in variants: + if not isinstance(variant, dict): + continue + + variant_name = variant.get("title") + generated_name = _variant_definition_name(base, variant) if base else None + if generated_name is not None and ( + not isinstance(variant_name, str) + or "/" in variant_name + or variant_name != generated_name + ): + # Titles like `Thread/startedNotification` sanitize poorly in + # Python, and envelope titles like `ErrorNotification` collide + # with their payload model names. Rewrite them before codegen so + # we get `ThreadStartedServerNotification` instead of `...1`. + if generated_name in seen and variant_name != generated_name: + raise RuntimeError( + "Variant title naming collision detected: " + f"{_variant_collision_key(base or '', variant, generated_name)}" + ) + variant["title"] = generated_name + seen.add(generated_name) + variant_name = generated_name + + if isinstance(variant_name, str): + props = variant.get("properties") + if isinstance(props, dict): + _set_discriminator_titles(props, variant_name) + + _annotate_schema(variant, base) + + +def _annotate_schema(value: Any, base: str | None = None) -> None: + if isinstance(value, list): + for item in value: + _annotate_schema(item, base) + return + + if not isinstance(value, dict): + return + + owner = value.get("title") + props = value.get("properties") + if isinstance(owner, str) and isinstance(props, dict): + _set_discriminator_titles(props, owner) + + one_of = value.get("oneOf") + if isinstance(one_of, list): + # Walk nested unions recursively so every inline branch gets the same + # title normalization treatment before we hand the bundle to Python + # codegen. + _annotate_variant_list(one_of, base) + + any_of = value.get("anyOf") + if isinstance(any_of, list): + _annotate_variant_list(any_of, base) + + definitions = value.get("definitions") + if isinstance(definitions, dict): + for name, schema in definitions.items(): + _annotate_schema(schema, name if isinstance(name, str) else base) + + defs = value.get("$defs") + if isinstance(defs, dict): + for name, schema in defs.items(): + _annotate_schema(schema, name if isinstance(name, str) else base) + + for key, child in value.items(): + if key in {"oneOf", "anyOf", "definitions", "$defs"}: + continue + _annotate_schema(child, base) + + +def _normalized_schema_bundle_text() -> str: + schema = json.loads(schema_bundle_path().read_text()) + definitions = schema.get("definitions", {}) + if isinstance(definitions, dict): + for definition in definitions.values(): + if isinstance(definition, dict): + _flatten_string_enum_one_of(definition) + # Normalize the schema into something datamodel-code-generator can map to + # stable class names instead of anonymous numbered helpers. + _annotate_schema(schema) + return json.dumps(schema, indent=2, sort_keys=True) + "\n" + + +def generate_v2_all() -> None: + out_path = sdk_root() / "src" / "codex_app_server" / "generated" / "v2_all.py" + out_dir = out_path.parent + old_package_dir = out_dir / "v2_all" + if old_package_dir.exists(): + shutil.rmtree(old_package_dir) + out_dir.mkdir(parents=True, exist_ok=True) + with tempfile.TemporaryDirectory() as td: + normalized_bundle = Path(td) / schema_bundle_path().name + normalized_bundle.write_text(_normalized_schema_bundle_text()) + run_python_module( + "datamodel_code_generator", + [ + "--input", + str(normalized_bundle), + "--input-file-type", + "jsonschema", + "--output", + str(out_path), + "--output-model-type", + "pydantic_v2.BaseModel", + "--target-python-version", + "3.11", + "--use-standard-collections", + "--enum-field-as-literal", + "one", + "--field-constraints", + "--use-default-kwarg", + "--snake-case-field", + "--allow-population-by-field-name", + # Once the schema prepass has assigned stable titles, tell the + # generator to prefer those titles as the emitted class names. + "--use-title-as-name", + "--use-annotated", + "--use-union-operator", + "--disable-timestamp", + # Keep the generated file formatted deterministically so the + # checked-in artifact only changes when the schema does. + "--formatters", + "ruff-format", + ], + cwd=sdk_root(), + ) + _normalize_generated_timestamps(out_path) + + +def _notification_specs() -> list[tuple[str, str]]: + server_notifications = json.loads( + (schema_root_dir() / "ServerNotification.json").read_text() + ) + one_of = server_notifications.get("oneOf", []) + generated_source = ( + sdk_root() / "src" / "codex_app_server" / "generated" / "v2_all.py" + ).read_text() + + specs: list[tuple[str, str]] = [] + + for variant in one_of: + props = variant.get("properties", {}) + method_meta = props.get("method", {}) + params_meta = props.get("params", {}) + + methods = method_meta.get("enum", []) + if len(methods) != 1: + continue + method = methods[0] + if not isinstance(method, str): + continue + + ref = params_meta.get("$ref") + if not isinstance(ref, str) or not ref.startswith("#/definitions/"): + continue + class_name = ref.split("/")[-1] + if ( + f"class {class_name}(" not in generated_source + and f"{class_name} =" not in generated_source + ): + # Skip schema variants that are not emitted into the generated v2 surface. + continue + specs.append((method, class_name)) + + specs.sort() + return specs + + +def generate_notification_registry() -> None: + out = ( + sdk_root() + / "src" + / "codex_app_server" + / "generated" + / "notification_registry.py" + ) + specs = _notification_specs() + class_names = sorted({class_name for _, class_name in specs}) + + lines = [ + "# Auto-generated by scripts/update_sdk_artifacts.py", + "# DO NOT EDIT MANUALLY.", + "", + "from __future__ import annotations", + "", + "from pydantic import BaseModel", + "", + ] + + for class_name in class_names: + lines.append(f"from .v2_all import {class_name}") + lines.extend( + [ + "", + "NOTIFICATION_MODELS: dict[str, type[BaseModel]] = {", + ] + ) + for method, class_name in specs: + lines.append(f' "{method}": {class_name},') + lines.extend(["}", ""]) + + out.write_text("\n".join(lines)) + + +def _normalize_generated_timestamps(root: Path) -> None: + timestamp_re = re.compile(r"^#\s+timestamp:\s+.+$", flags=re.MULTILINE) + py_files = [root] if root.is_file() else sorted(root.rglob("*.py")) + for py_file in py_files: + content = py_file.read_text() + normalized = timestamp_re.sub("# timestamp: ", content) + if normalized != content: + py_file.write_text(normalized) + + +FIELD_ANNOTATION_OVERRIDES: dict[str, str] = { + # Keep public API typed without falling back to `Any`. + "config": "JsonObject", + "output_schema": "JsonObject", +} + + +@dataclass(slots=True) +class PublicFieldSpec: + wire_name: str + py_name: str + annotation: str + required: bool + + +@dataclass(frozen=True) +class CliOps: + generate_types: Callable[[], None] + stage_python_sdk_package: Callable[[Path, str, str], Path] + stage_python_runtime_package: Callable[[Path, str, Path], Path] + current_sdk_version: Callable[[], str] + + +def _annotation_to_source(annotation: Any) -> str: + origin = get_origin(annotation) + if origin is typing.Annotated: + return _annotation_to_source(get_args(annotation)[0]) + if origin in (typing.Union, types.UnionType): + parts: list[str] = [] + for arg in get_args(annotation): + rendered = _annotation_to_source(arg) + if rendered not in parts: + parts.append(rendered) + return " | ".join(parts) + if origin is list: + args = get_args(annotation) + item = _annotation_to_source(args[0]) if args else "Any" + return f"list[{item}]" + if origin is dict: + args = get_args(annotation) + key = _annotation_to_source(args[0]) if args else "str" + val = _annotation_to_source(args[1]) if len(args) > 1 else "Any" + return f"dict[{key}, {val}]" + if annotation is Any or annotation is typing.Any: + return "Any" + if annotation is None or annotation is type(None): + return "None" + if isinstance(annotation, type): + if annotation.__module__ == "builtins": + return annotation.__name__ + return annotation.__name__ + return repr(annotation) + + +def _camel_to_snake(name: str) -> str: + head = re.sub(r"(.)([A-Z][a-z]+)", r"\1_\2", name) + return re.sub(r"([a-z0-9])([A-Z])", r"\1_\2", head).lower() + + +def _load_public_fields( + module_name: str, class_name: str, *, exclude: set[str] | None = None +) -> list[PublicFieldSpec]: + exclude = exclude or set() + module = importlib.import_module(module_name) + model = getattr(module, class_name) + fields: list[PublicFieldSpec] = [] + for name, field in model.model_fields.items(): + if name in exclude: + continue + required = field.is_required() + annotation = _annotation_to_source(field.annotation) + override = FIELD_ANNOTATION_OVERRIDES.get(name) + if override is not None: + annotation = override if required else f"{override} | None" + fields.append( + PublicFieldSpec( + wire_name=name, + py_name=name, + annotation=annotation, + required=required, + ) + ) + return fields + + +def _kw_signature_lines(fields: list[PublicFieldSpec]) -> list[str]: + lines: list[str] = [] + for field in fields: + default = "" if field.required else " = None" + lines.append(f" {field.py_name}: {field.annotation}{default},") + return lines + + +def _model_arg_lines( + fields: list[PublicFieldSpec], *, indent: str = " " +) -> list[str]: + return [f"{indent}{field.wire_name}={field.py_name}," for field in fields] + + +def _replace_generated_block(source: str, block_name: str, body: str) -> str: + start_tag = f" # BEGIN GENERATED: {block_name}" + end_tag = f" # END GENERATED: {block_name}" + pattern = re.compile(rf"(?s){re.escape(start_tag)}\n.*?\n{re.escape(end_tag)}") + replacement = f"{start_tag}\n{body.rstrip()}\n{end_tag}" + updated, count = pattern.subn(replacement, source, count=1) + if count != 1: + raise RuntimeError(f"Could not update generated block: {block_name}") + return updated + + +def _render_codex_block( + thread_start_fields: list[PublicFieldSpec], + thread_list_fields: list[PublicFieldSpec], + resume_fields: list[PublicFieldSpec], + fork_fields: list[PublicFieldSpec], +) -> str: + lines = [ + " def thread_start(", + " self,", + " *,", + *_kw_signature_lines(thread_start_fields), + " ) -> Thread:", + " params = ThreadStartParams(", + *_model_arg_lines(thread_start_fields), + " )", + " started = self._client.thread_start(params)", + " return Thread(self._client, started.thread.id)", + "", + " def thread_list(", + " self,", + " *,", + *_kw_signature_lines(thread_list_fields), + " ) -> ThreadListResponse:", + " params = ThreadListParams(", + *_model_arg_lines(thread_list_fields), + " )", + " return self._client.thread_list(params)", + "", + " def thread_resume(", + " self,", + " thread_id: str,", + " *,", + *_kw_signature_lines(resume_fields), + " ) -> Thread:", + " params = ThreadResumeParams(", + " thread_id=thread_id,", + *_model_arg_lines(resume_fields), + " )", + " resumed = self._client.thread_resume(thread_id, params)", + " return Thread(self._client, resumed.thread.id)", + "", + " def thread_fork(", + " self,", + " thread_id: str,", + " *,", + *_kw_signature_lines(fork_fields), + " ) -> Thread:", + " params = ThreadForkParams(", + " thread_id=thread_id,", + *_model_arg_lines(fork_fields), + " )", + " forked = self._client.thread_fork(thread_id, params)", + " return Thread(self._client, forked.thread.id)", + "", + " def thread_archive(self, thread_id: str) -> ThreadArchiveResponse:", + " return self._client.thread_archive(thread_id)", + "", + " def thread_unarchive(self, thread_id: str) -> Thread:", + " unarchived = self._client.thread_unarchive(thread_id)", + " return Thread(self._client, unarchived.thread.id)", + ] + return "\n".join(lines) + + +def _render_async_codex_block( + thread_start_fields: list[PublicFieldSpec], + thread_list_fields: list[PublicFieldSpec], + resume_fields: list[PublicFieldSpec], + fork_fields: list[PublicFieldSpec], +) -> str: + lines = [ + " async def thread_start(", + " self,", + " *,", + *_kw_signature_lines(thread_start_fields), + " ) -> AsyncThread:", + " await self._ensure_initialized()", + " params = ThreadStartParams(", + *_model_arg_lines(thread_start_fields), + " )", + " started = await self._client.thread_start(params)", + " return AsyncThread(self, started.thread.id)", + "", + " async def thread_list(", + " self,", + " *,", + *_kw_signature_lines(thread_list_fields), + " ) -> ThreadListResponse:", + " await self._ensure_initialized()", + " params = ThreadListParams(", + *_model_arg_lines(thread_list_fields), + " )", + " return await self._client.thread_list(params)", + "", + " async def thread_resume(", + " self,", + " thread_id: str,", + " *,", + *_kw_signature_lines(resume_fields), + " ) -> AsyncThread:", + " await self._ensure_initialized()", + " params = ThreadResumeParams(", + " thread_id=thread_id,", + *_model_arg_lines(resume_fields), + " )", + " resumed = await self._client.thread_resume(thread_id, params)", + " return AsyncThread(self, resumed.thread.id)", + "", + " async def thread_fork(", + " self,", + " thread_id: str,", + " *,", + *_kw_signature_lines(fork_fields), + " ) -> AsyncThread:", + " await self._ensure_initialized()", + " params = ThreadForkParams(", + " thread_id=thread_id,", + *_model_arg_lines(fork_fields), + " )", + " forked = await self._client.thread_fork(thread_id, params)", + " return AsyncThread(self, forked.thread.id)", + "", + " async def thread_archive(self, thread_id: str) -> ThreadArchiveResponse:", + " await self._ensure_initialized()", + " return await self._client.thread_archive(thread_id)", + "", + " async def thread_unarchive(self, thread_id: str) -> AsyncThread:", + " await self._ensure_initialized()", + " unarchived = await self._client.thread_unarchive(thread_id)", + " return AsyncThread(self, unarchived.thread.id)", + ] + return "\n".join(lines) + + +def _render_thread_block( + turn_fields: list[PublicFieldSpec], +) -> str: + lines = [ + " def turn(", + " self,", + " input: Input,", + " *,", + *_kw_signature_lines(turn_fields), + " ) -> Turn:", + " wire_input = _to_wire_input(input)", + " params = TurnStartParams(", + " thread_id=self.id,", + " input=wire_input,", + *_model_arg_lines(turn_fields), + " )", + " turn = self._client.turn_start(self.id, wire_input, params=params)", + " return Turn(self._client, self.id, turn.turn.id)", + ] + return "\n".join(lines) + + +def _render_async_thread_block( + turn_fields: list[PublicFieldSpec], +) -> str: + lines = [ + " async def turn(", + " self,", + " input: Input,", + " *,", + *_kw_signature_lines(turn_fields), + " ) -> AsyncTurn:", + " await self._codex._ensure_initialized()", + " wire_input = _to_wire_input(input)", + " params = TurnStartParams(", + " thread_id=self.id,", + " input=wire_input,", + *_model_arg_lines(turn_fields), + " )", + " turn = await self._codex._client.turn_start(", + " self.id,", + " wire_input,", + " params=params,", + " )", + " return AsyncTurn(self._codex, self.id, turn.turn.id)", + ] + return "\n".join(lines) + + +def generate_public_api_flat_methods() -> None: + src_dir = sdk_root() / "src" + public_api_path = src_dir / "codex_app_server" / "public_api.py" + if not public_api_path.exists(): + # PR2 can run codegen before the ergonomic public API layer is added. + return + src_dir_str = str(src_dir) + if src_dir_str not in sys.path: + sys.path.insert(0, src_dir_str) + + thread_start_fields = _load_public_fields( + "codex_app_server.generated.v2_all", + "ThreadStartParams", + ) + thread_list_fields = _load_public_fields( + "codex_app_server.generated.v2_all", + "ThreadListParams", + ) + thread_resume_fields = _load_public_fields( + "codex_app_server.generated.v2_all", + "ThreadResumeParams", + exclude={"thread_id"}, + ) + thread_fork_fields = _load_public_fields( + "codex_app_server.generated.v2_all", + "ThreadForkParams", + exclude={"thread_id"}, + ) + turn_start_fields = _load_public_fields( + "codex_app_server.generated.v2_all", + "TurnStartParams", + exclude={"thread_id", "input"}, + ) + + source = public_api_path.read_text() + source = _replace_generated_block( + source, + "Codex.flat_methods", + _render_codex_block( + thread_start_fields, + thread_list_fields, + thread_resume_fields, + thread_fork_fields, + ), + ) + source = _replace_generated_block( + source, + "AsyncCodex.flat_methods", + _render_async_codex_block( + thread_start_fields, + thread_list_fields, + thread_resume_fields, + thread_fork_fields, + ), + ) + source = _replace_generated_block( + source, + "Thread.flat_methods", + _render_thread_block(turn_start_fields), + ) + source = _replace_generated_block( + source, + "AsyncThread.flat_methods", + _render_async_thread_block(turn_start_fields), + ) + public_api_path.write_text(source) + + +def generate_types() -> None: + # v2_all is the authoritative generated surface. + generate_v2_all() + generate_notification_registry() + generate_public_api_flat_methods() + + +def build_parser() -> argparse.ArgumentParser: + parser = argparse.ArgumentParser(description="Single SDK maintenance entrypoint") + subparsers = parser.add_subparsers(dest="command", required=True) + + subparsers.add_parser( + "generate-types", help="Regenerate Python protocol-derived types" + ) + + stage_sdk_parser = subparsers.add_parser( + "stage-sdk", + help="Stage a releasable SDK package pinned to a runtime version", + ) + stage_sdk_parser.add_argument( + "staging_dir", + type=Path, + help="Output directory for the staged SDK package", + ) + stage_sdk_parser.add_argument( + "--runtime-version", + required=True, + help="Pinned codex-cli-bin version for the staged SDK package", + ) + stage_sdk_parser.add_argument( + "--sdk-version", + help="Version to write into the staged SDK package (defaults to sdk/python current version)", + ) + + stage_runtime_parser = subparsers.add_parser( + "stage-runtime", + help="Stage a releasable runtime package for the current platform", + ) + stage_runtime_parser.add_argument( + "staging_dir", + type=Path, + help="Output directory for the staged runtime package", + ) + stage_runtime_parser.add_argument( + "runtime_binary", + type=Path, + help="Path to the codex binary to package for this platform", + ) + stage_runtime_parser.add_argument( + "--runtime-version", + required=True, + help="Version to write into the staged runtime package", + ) + return parser + + +def parse_args(argv: Sequence[str] | None = None) -> argparse.Namespace: + return build_parser().parse_args(list(argv) if argv is not None else None) + + +def default_cli_ops() -> CliOps: + return CliOps( + generate_types=generate_types, + stage_python_sdk_package=stage_python_sdk_package, + stage_python_runtime_package=stage_python_runtime_package, + current_sdk_version=current_sdk_version, + ) + + +def run_command(args: argparse.Namespace, ops: CliOps) -> None: + if args.command == "generate-types": + ops.generate_types() + elif args.command == "stage-sdk": + ops.generate_types() + ops.stage_python_sdk_package( + args.staging_dir, + args.sdk_version or ops.current_sdk_version(), + args.runtime_version, + ) + elif args.command == "stage-runtime": + ops.stage_python_runtime_package( + args.staging_dir, + args.runtime_version, + args.runtime_binary.resolve(), + ) + + +def main(argv: Sequence[str] | None = None, ops: CliOps | None = None) -> None: + args = parse_args(argv) + run_command(args, ops or default_cli_ops()) + print("Done.") + + +if __name__ == "__main__": + main() diff --git a/sdk/python/src/codex_app_server/__init__.py b/sdk/python/src/codex_app_server/__init__.py new file mode 100644 index 000000000..aff63176b --- /dev/null +++ b/sdk/python/src/codex_app_server/__init__.py @@ -0,0 +1,10 @@ +from .client import AppServerClient, AppServerConfig +from .errors import AppServerError, JsonRpcError, TransportClosedError + +__all__ = [ + "AppServerClient", + "AppServerConfig", + "AppServerError", + "JsonRpcError", + "TransportClosedError", +] diff --git a/sdk/python/src/codex_app_server/client.py b/sdk/python/src/codex_app_server/client.py new file mode 100644 index 000000000..aa7b574a3 --- /dev/null +++ b/sdk/python/src/codex_app_server/client.py @@ -0,0 +1,540 @@ +from __future__ import annotations + +import json +import os +import subprocess +import threading +import uuid +from collections import deque +from dataclasses import dataclass +from pathlib import Path +from typing import Callable, Iterable, Iterator, TypeVar + +from pydantic import BaseModel + +from .errors import AppServerError, TransportClosedError, map_jsonrpc_error +from .generated.notification_registry import NOTIFICATION_MODELS +from .generated.v2_all import ( + AgentMessageDeltaNotification, + ModelListResponse, + ThreadArchiveResponse, + ThreadCompactStartResponse, + ThreadForkParams as V2ThreadForkParams, + ThreadForkResponse, + ThreadListParams as V2ThreadListParams, + ThreadListResponse, + ThreadReadResponse, + ThreadResumeParams as V2ThreadResumeParams, + ThreadResumeResponse, + ThreadSetNameResponse, + ThreadStartParams as V2ThreadStartParams, + ThreadStartResponse, + ThreadUnarchiveResponse, + TurnCompletedNotification, + TurnInterruptResponse, + TurnStartParams as V2TurnStartParams, + TurnStartResponse, + TurnSteerResponse, +) +from .models import ( + InitializeResponse, + JsonObject, + JsonValue, + Notification, + UnknownNotification, +) +from .retry import retry_on_overload + +ModelT = TypeVar("ModelT", bound=BaseModel) +ApprovalHandler = Callable[[str, JsonObject | None], JsonObject] +RUNTIME_PKG_NAME = "codex-cli-bin" + + +def _params_dict( + params: ( + V2ThreadStartParams + | V2ThreadResumeParams + | V2ThreadListParams + | V2ThreadForkParams + | V2TurnStartParams + | JsonObject + | None + ), +) -> JsonObject: + if params is None: + return {} + if hasattr(params, "model_dump"): + dumped = params.model_dump( + by_alias=True, + exclude_none=True, + mode="json", + ) + if not isinstance(dumped, dict): + raise TypeError("Expected model_dump() to return dict") + return dumped + if isinstance(params, dict): + return params + raise TypeError(f"Expected generated params model or dict, got {type(params).__name__}") + + +def _installed_codex_path() -> Path: + try: + from codex_cli_bin import bundled_codex_path + except ImportError as exc: + raise FileNotFoundError( + "Unable to locate the pinned Codex runtime. Install the published SDK build " + f"with its {RUNTIME_PKG_NAME} dependency, or set AppServerConfig.codex_bin " + "explicitly." + ) from exc + + return bundled_codex_path() + + +@dataclass(frozen=True) +class CodexBinResolverOps: + installed_codex_path: Callable[[], Path] + path_exists: Callable[[Path], bool] + + +def _default_codex_bin_resolver_ops() -> CodexBinResolverOps: + return CodexBinResolverOps( + installed_codex_path=_installed_codex_path, + path_exists=lambda path: path.exists(), + ) + + +def resolve_codex_bin(config: "AppServerConfig", ops: CodexBinResolverOps) -> Path: + if config.codex_bin is not None: + codex_bin = Path(config.codex_bin) + if not ops.path_exists(codex_bin): + raise FileNotFoundError( + f"Codex binary not found at {codex_bin}. Set AppServerConfig.codex_bin " + "to a valid binary path." + ) + return codex_bin + + return ops.installed_codex_path() + + +def _resolve_codex_bin(config: "AppServerConfig") -> Path: + return resolve_codex_bin(config, _default_codex_bin_resolver_ops()) + + +@dataclass(slots=True) +class AppServerConfig: + codex_bin: str | None = None + launch_args_override: tuple[str, ...] | None = None + config_overrides: tuple[str, ...] = () + cwd: str | None = None + env: dict[str, str] | None = None + client_name: str = "codex_python_sdk" + client_title: str = "Codex Python SDK" + client_version: str = "0.2.0" + experimental_api: bool = True + + +class AppServerClient: + """Synchronous typed JSON-RPC client for `codex app-server` over stdio.""" + + def __init__( + self, + config: AppServerConfig | None = None, + approval_handler: ApprovalHandler | None = None, + ) -> None: + self.config = config or AppServerConfig() + self._approval_handler = approval_handler or self._default_approval_handler + self._proc: subprocess.Popen[str] | None = None + self._lock = threading.Lock() + self._turn_consumer_lock = threading.Lock() + self._active_turn_consumer: str | None = None + self._pending_notifications: deque[Notification] = deque() + self._stderr_lines: deque[str] = deque(maxlen=400) + self._stderr_thread: threading.Thread | None = None + + def __enter__(self) -> "AppServerClient": + self.start() + return self + + def __exit__(self, _exc_type, _exc, _tb) -> None: + self.close() + + def start(self) -> None: + if self._proc is not None: + return + + if self.config.launch_args_override is not None: + args = list(self.config.launch_args_override) + else: + codex_bin = _resolve_codex_bin(self.config) + args = [str(codex_bin)] + for kv in self.config.config_overrides: + args.extend(["--config", kv]) + args.extend(["app-server", "--listen", "stdio://"]) + + env = os.environ.copy() + if self.config.env: + env.update(self.config.env) + + self._proc = subprocess.Popen( + args, + stdin=subprocess.PIPE, + stdout=subprocess.PIPE, + stderr=subprocess.PIPE, + text=True, + cwd=self.config.cwd, + env=env, + bufsize=1, + ) + + self._start_stderr_drain_thread() + + def close(self) -> None: + if self._proc is None: + return + proc = self._proc + self._proc = None + self._active_turn_consumer = None + + if proc.stdin: + proc.stdin.close() + try: + proc.terminate() + proc.wait(timeout=2) + except Exception: + proc.kill() + + if self._stderr_thread and self._stderr_thread.is_alive(): + self._stderr_thread.join(timeout=0.5) + + def initialize(self) -> InitializeResponse: + result = self.request( + "initialize", + { + "clientInfo": { + "name": self.config.client_name, + "title": self.config.client_title, + "version": self.config.client_version, + }, + "capabilities": { + "experimentalApi": self.config.experimental_api, + }, + }, + response_model=InitializeResponse, + ) + self.notify("initialized", None) + return result + + def request( + self, + method: str, + params: JsonObject | None, + *, + response_model: type[ModelT], + ) -> ModelT: + result = self._request_raw(method, params) + if not isinstance(result, dict): + raise AppServerError(f"{method} response must be a JSON object") + return response_model.model_validate(result) + + def _request_raw(self, method: str, params: JsonObject | None = None) -> JsonValue: + request_id = str(uuid.uuid4()) + self._write_message({"id": request_id, "method": method, "params": params or {}}) + + while True: + msg = self._read_message() + + if "method" in msg and "id" in msg: + response = self._handle_server_request(msg) + self._write_message({"id": msg["id"], "result": response}) + continue + + if "method" in msg and "id" not in msg: + self._pending_notifications.append( + self._coerce_notification(msg["method"], msg.get("params")) + ) + continue + + if msg.get("id") != request_id: + continue + + if "error" in msg: + err = msg["error"] + if isinstance(err, dict): + raise map_jsonrpc_error( + int(err.get("code", -32000)), + str(err.get("message", "unknown")), + err.get("data"), + ) + raise AppServerError("Malformed JSON-RPC error response") + + return msg.get("result") + + def notify(self, method: str, params: JsonObject | None = None) -> None: + self._write_message({"method": method, "params": params or {}}) + + def next_notification(self) -> Notification: + if self._pending_notifications: + return self._pending_notifications.popleft() + + while True: + msg = self._read_message() + if "method" in msg and "id" in msg: + response = self._handle_server_request(msg) + self._write_message({"id": msg["id"], "result": response}) + continue + if "method" in msg and "id" not in msg: + return self._coerce_notification(msg["method"], msg.get("params")) + + def acquire_turn_consumer(self, turn_id: str) -> None: + with self._turn_consumer_lock: + if self._active_turn_consumer is not None: + raise RuntimeError( + "Concurrent turn consumers are not yet supported in the experimental SDK. " + f"Client is already streaming turn {self._active_turn_consumer!r}; " + f"cannot start turn {turn_id!r} until the active consumer finishes." + ) + self._active_turn_consumer = turn_id + + def release_turn_consumer(self, turn_id: str) -> None: + with self._turn_consumer_lock: + if self._active_turn_consumer == turn_id: + self._active_turn_consumer = None + + def thread_start(self, params: V2ThreadStartParams | JsonObject | None = None) -> ThreadStartResponse: + return self.request("thread/start", _params_dict(params), response_model=ThreadStartResponse) + + def thread_resume( + self, + thread_id: str, + params: V2ThreadResumeParams | JsonObject | None = None, + ) -> ThreadResumeResponse: + payload = {"threadId": thread_id, **_params_dict(params)} + return self.request("thread/resume", payload, response_model=ThreadResumeResponse) + + def thread_list(self, params: V2ThreadListParams | JsonObject | None = None) -> ThreadListResponse: + return self.request("thread/list", _params_dict(params), response_model=ThreadListResponse) + + def thread_read(self, thread_id: str, include_turns: bool = False) -> ThreadReadResponse: + return self.request( + "thread/read", + {"threadId": thread_id, "includeTurns": include_turns}, + response_model=ThreadReadResponse, + ) + + def thread_fork( + self, + thread_id: str, + params: V2ThreadForkParams | JsonObject | None = None, + ) -> ThreadForkResponse: + payload = {"threadId": thread_id, **_params_dict(params)} + return self.request("thread/fork", payload, response_model=ThreadForkResponse) + + def thread_archive(self, thread_id: str) -> ThreadArchiveResponse: + return self.request("thread/archive", {"threadId": thread_id}, response_model=ThreadArchiveResponse) + + def thread_unarchive(self, thread_id: str) -> ThreadUnarchiveResponse: + return self.request("thread/unarchive", {"threadId": thread_id}, response_model=ThreadUnarchiveResponse) + + def thread_set_name(self, thread_id: str, name: str) -> ThreadSetNameResponse: + return self.request( + "thread/name/set", + {"threadId": thread_id, "name": name}, + response_model=ThreadSetNameResponse, + ) + + def thread_compact(self, thread_id: str) -> ThreadCompactStartResponse: + return self.request( + "thread/compact/start", + {"threadId": thread_id}, + response_model=ThreadCompactStartResponse, + ) + + def turn_start( + self, + thread_id: str, + input_items: list[JsonObject] | JsonObject | str, + params: V2TurnStartParams | JsonObject | None = None, + ) -> TurnStartResponse: + payload = { + **_params_dict(params), + "threadId": thread_id, + "input": self._normalize_input_items(input_items), + } + return self.request("turn/start", payload, response_model=TurnStartResponse) + + def turn_interrupt(self, thread_id: str, turn_id: str) -> TurnInterruptResponse: + return self.request( + "turn/interrupt", + {"threadId": thread_id, "turnId": turn_id}, + response_model=TurnInterruptResponse, + ) + + def turn_steer( + self, + thread_id: str, + expected_turn_id: str, + input_items: list[JsonObject] | JsonObject | str, + ) -> TurnSteerResponse: + return self.request( + "turn/steer", + { + "threadId": thread_id, + "expectedTurnId": expected_turn_id, + "input": self._normalize_input_items(input_items), + }, + response_model=TurnSteerResponse, + ) + + def model_list(self, include_hidden: bool = False) -> ModelListResponse: + return self.request( + "model/list", + {"includeHidden": include_hidden}, + response_model=ModelListResponse, + ) + + def request_with_retry_on_overload( + self, + method: str, + params: JsonObject | None, + *, + response_model: type[ModelT], + max_attempts: int = 3, + initial_delay_s: float = 0.25, + max_delay_s: float = 2.0, + ) -> ModelT: + return retry_on_overload( + lambda: self.request(method, params, response_model=response_model), + max_attempts=max_attempts, + initial_delay_s=initial_delay_s, + max_delay_s=max_delay_s, + ) + + def wait_for_turn_completed(self, turn_id: str) -> TurnCompletedNotification: + while True: + notification = self.next_notification() + if ( + notification.method == "turn/completed" + and isinstance(notification.payload, TurnCompletedNotification) + and notification.payload.turn.id == turn_id + ): + return notification.payload + + def stream_until_methods(self, methods: Iterable[str] | str) -> list[Notification]: + target_methods = {methods} if isinstance(methods, str) else set(methods) + out: list[Notification] = [] + while True: + notification = self.next_notification() + out.append(notification) + if notification.method in target_methods: + return out + + def stream_text( + self, + thread_id: str, + text: str, + params: V2TurnStartParams | JsonObject | None = None, + ) -> Iterator[AgentMessageDeltaNotification]: + started = self.turn_start(thread_id, text, params=params) + turn_id = started.turn.id + while True: + notification = self.next_notification() + if ( + notification.method == "item/agentMessage/delta" + and isinstance(notification.payload, AgentMessageDeltaNotification) + and notification.payload.turn_id == turn_id + ): + yield notification.payload + continue + if ( + notification.method == "turn/completed" + and isinstance(notification.payload, TurnCompletedNotification) + and notification.payload.turn.id == turn_id + ): + break + + def _coerce_notification(self, method: str, params: object) -> Notification: + params_dict = params if isinstance(params, dict) else {} + + model = NOTIFICATION_MODELS.get(method) + if model is None: + return Notification(method=method, payload=UnknownNotification(params=params_dict)) + + try: + payload = model.model_validate(params_dict) + except Exception: # noqa: BLE001 + return Notification(method=method, payload=UnknownNotification(params=params_dict)) + return Notification(method=method, payload=payload) + + def _normalize_input_items( + self, + input_items: list[JsonObject] | JsonObject | str, + ) -> list[JsonObject]: + if isinstance(input_items, str): + return [{"type": "text", "text": input_items}] + if isinstance(input_items, dict): + return [input_items] + return input_items + + def _default_approval_handler(self, method: str, params: JsonObject | None) -> JsonObject: + if method == "item/commandExecution/requestApproval": + return {"decision": "accept"} + if method == "item/fileChange/requestApproval": + return {"decision": "accept"} + return {} + + def _start_stderr_drain_thread(self) -> None: + if self._proc is None or self._proc.stderr is None: + return + + def _drain() -> None: + stderr = self._proc.stderr + if stderr is None: + return + for line in stderr: + self._stderr_lines.append(line.rstrip("\n")) + + self._stderr_thread = threading.Thread(target=_drain, daemon=True) + self._stderr_thread.start() + + def _stderr_tail(self, limit: int = 40) -> str: + return "\n".join(list(self._stderr_lines)[-limit:]) + + def _handle_server_request(self, msg: dict[str, JsonValue]) -> JsonObject: + method = msg["method"] + params = msg.get("params") + if not isinstance(method, str): + return {} + return self._approval_handler( + method, + params if isinstance(params, dict) else None, + ) + + def _write_message(self, payload: JsonObject) -> None: + if self._proc is None or self._proc.stdin is None: + raise TransportClosedError("app-server is not running") + with self._lock: + self._proc.stdin.write(json.dumps(payload) + "\n") + self._proc.stdin.flush() + + def _read_message(self) -> dict[str, JsonValue]: + if self._proc is None or self._proc.stdout is None: + raise TransportClosedError("app-server is not running") + + line = self._proc.stdout.readline() + if not line: + raise TransportClosedError( + f"app-server closed stdout. stderr_tail={self._stderr_tail()[:2000]}" + ) + + try: + message = json.loads(line) + except json.JSONDecodeError as exc: + raise AppServerError(f"Invalid JSON-RPC line: {line!r}") from exc + + if not isinstance(message, dict): + raise AppServerError(f"Invalid JSON-RPC payload: {message!r}") + return message + + +def default_codex_home() -> str: + return str(Path.home() / ".codex") diff --git a/sdk/python/src/codex_app_server/errors.py b/sdk/python/src/codex_app_server/errors.py new file mode 100644 index 000000000..24972e4fd --- /dev/null +++ b/sdk/python/src/codex_app_server/errors.py @@ -0,0 +1,125 @@ +from __future__ import annotations + +from typing import Any + + +class AppServerError(Exception): + """Base exception for SDK errors.""" + + +class JsonRpcError(AppServerError): + """Raw JSON-RPC error wrapper from the server.""" + + def __init__(self, code: int, message: str, data: Any = None): + super().__init__(f"JSON-RPC error {code}: {message}") + self.code = code + self.message = message + self.data = data + + +class TransportClosedError(AppServerError): + """Raised when the app-server transport closes unexpectedly.""" + + +class AppServerRpcError(JsonRpcError): + """Base typed error for JSON-RPC failures.""" + + +class ParseError(AppServerRpcError): + pass + + +class InvalidRequestError(AppServerRpcError): + pass + + +class MethodNotFoundError(AppServerRpcError): + pass + + +class InvalidParamsError(AppServerRpcError): + pass + + +class InternalRpcError(AppServerRpcError): + pass + + +class ServerBusyError(AppServerRpcError): + """Server is overloaded / unavailable and caller should retry.""" + + +class RetryLimitExceededError(ServerBusyError): + """Server exhausted internal retry budget for a retryable operation.""" + + +def _contains_retry_limit_text(message: str) -> bool: + lowered = message.lower() + return "retry limit" in lowered or "too many failed attempts" in lowered + + +def _is_server_overloaded(data: Any) -> bool: + if data is None: + return False + + if isinstance(data, str): + return data.lower() == "server_overloaded" + + if isinstance(data, dict): + direct = ( + data.get("codex_error_info") + or data.get("codexErrorInfo") + or data.get("errorInfo") + ) + if isinstance(direct, str) and direct.lower() == "server_overloaded": + return True + if isinstance(direct, dict): + for value in direct.values(): + if isinstance(value, str) and value.lower() == "server_overloaded": + return True + for value in data.values(): + if _is_server_overloaded(value): + return True + + if isinstance(data, list): + return any(_is_server_overloaded(value) for value in data) + + return False + + +def map_jsonrpc_error(code: int, message: str, data: Any = None) -> JsonRpcError: + """Map a raw JSON-RPC error into a richer SDK exception class.""" + + if code == -32700: + return ParseError(code, message, data) + if code == -32600: + return InvalidRequestError(code, message, data) + if code == -32601: + return MethodNotFoundError(code, message, data) + if code == -32602: + return InvalidParamsError(code, message, data) + if code == -32603: + return InternalRpcError(code, message, data) + + if -32099 <= code <= -32000: + if _is_server_overloaded(data): + if _contains_retry_limit_text(message): + return RetryLimitExceededError(code, message, data) + return ServerBusyError(code, message, data) + if _contains_retry_limit_text(message): + return RetryLimitExceededError(code, message, data) + return AppServerRpcError(code, message, data) + + return JsonRpcError(code, message, data) + + +def is_retryable_error(exc: BaseException) -> bool: + """True if the exception is a transient overload-style error.""" + + if isinstance(exc, ServerBusyError): + return True + + if isinstance(exc, JsonRpcError): + return _is_server_overloaded(exc.data) + + return False diff --git a/sdk/python/src/codex_app_server/generated/__init__.py b/sdk/python/src/codex_app_server/generated/__init__.py new file mode 100644 index 000000000..d7b3f674b --- /dev/null +++ b/sdk/python/src/codex_app_server/generated/__init__.py @@ -0,0 +1 @@ +"""Auto-generated Python types derived from the app-server schemas.""" diff --git a/sdk/python/src/codex_app_server/generated/notification_registry.py b/sdk/python/src/codex_app_server/generated/notification_registry.py new file mode 100644 index 000000000..ef5b182d7 --- /dev/null +++ b/sdk/python/src/codex_app_server/generated/notification_registry.py @@ -0,0 +1,102 @@ +# Auto-generated by scripts/update_sdk_artifacts.py +# DO NOT EDIT MANUALLY. + +from __future__ import annotations + +from pydantic import BaseModel + +from .v2_all import AccountLoginCompletedNotification +from .v2_all import AccountRateLimitsUpdatedNotification +from .v2_all import AccountUpdatedNotification +from .v2_all import AgentMessageDeltaNotification +from .v2_all import AppListUpdatedNotification +from .v2_all import CommandExecOutputDeltaNotification +from .v2_all import CommandExecutionOutputDeltaNotification +from .v2_all import ConfigWarningNotification +from .v2_all import ContextCompactedNotification +from .v2_all import DeprecationNoticeNotification +from .v2_all import ErrorNotification +from .v2_all import FileChangeOutputDeltaNotification +from .v2_all import FuzzyFileSearchSessionCompletedNotification +from .v2_all import FuzzyFileSearchSessionUpdatedNotification +from .v2_all import HookCompletedNotification +from .v2_all import HookStartedNotification +from .v2_all import ItemCompletedNotification +from .v2_all import ItemStartedNotification +from .v2_all import McpServerOauthLoginCompletedNotification +from .v2_all import McpToolCallProgressNotification +from .v2_all import ModelReroutedNotification +from .v2_all import PlanDeltaNotification +from .v2_all import ReasoningSummaryPartAddedNotification +from .v2_all import ReasoningSummaryTextDeltaNotification +from .v2_all import ReasoningTextDeltaNotification +from .v2_all import ServerRequestResolvedNotification +from .v2_all import SkillsChangedNotification +from .v2_all import TerminalInteractionNotification +from .v2_all import ThreadArchivedNotification +from .v2_all import ThreadClosedNotification +from .v2_all import ThreadNameUpdatedNotification +from .v2_all import ThreadRealtimeClosedNotification +from .v2_all import ThreadRealtimeErrorNotification +from .v2_all import ThreadRealtimeItemAddedNotification +from .v2_all import ThreadRealtimeOutputAudioDeltaNotification +from .v2_all import ThreadRealtimeStartedNotification +from .v2_all import ThreadStartedNotification +from .v2_all import ThreadStatusChangedNotification +from .v2_all import ThreadTokenUsageUpdatedNotification +from .v2_all import ThreadUnarchivedNotification +from .v2_all import TurnCompletedNotification +from .v2_all import TurnDiffUpdatedNotification +from .v2_all import TurnPlanUpdatedNotification +from .v2_all import TurnStartedNotification +from .v2_all import WindowsSandboxSetupCompletedNotification +from .v2_all import WindowsWorldWritableWarningNotification + +NOTIFICATION_MODELS: dict[str, type[BaseModel]] = { + "account/login/completed": AccountLoginCompletedNotification, + "account/rateLimits/updated": AccountRateLimitsUpdatedNotification, + "account/updated": AccountUpdatedNotification, + "app/list/updated": AppListUpdatedNotification, + "command/exec/outputDelta": CommandExecOutputDeltaNotification, + "configWarning": ConfigWarningNotification, + "deprecationNotice": DeprecationNoticeNotification, + "error": ErrorNotification, + "fuzzyFileSearch/sessionCompleted": FuzzyFileSearchSessionCompletedNotification, + "fuzzyFileSearch/sessionUpdated": FuzzyFileSearchSessionUpdatedNotification, + "hook/completed": HookCompletedNotification, + "hook/started": HookStartedNotification, + "item/agentMessage/delta": AgentMessageDeltaNotification, + "item/commandExecution/outputDelta": CommandExecutionOutputDeltaNotification, + "item/commandExecution/terminalInteraction": TerminalInteractionNotification, + "item/completed": ItemCompletedNotification, + "item/fileChange/outputDelta": FileChangeOutputDeltaNotification, + "item/mcpToolCall/progress": McpToolCallProgressNotification, + "item/plan/delta": PlanDeltaNotification, + "item/reasoning/summaryPartAdded": ReasoningSummaryPartAddedNotification, + "item/reasoning/summaryTextDelta": ReasoningSummaryTextDeltaNotification, + "item/reasoning/textDelta": ReasoningTextDeltaNotification, + "item/started": ItemStartedNotification, + "mcpServer/oauthLogin/completed": McpServerOauthLoginCompletedNotification, + "model/rerouted": ModelReroutedNotification, + "serverRequest/resolved": ServerRequestResolvedNotification, + "skills/changed": SkillsChangedNotification, + "thread/archived": ThreadArchivedNotification, + "thread/closed": ThreadClosedNotification, + "thread/compacted": ContextCompactedNotification, + "thread/name/updated": ThreadNameUpdatedNotification, + "thread/realtime/closed": ThreadRealtimeClosedNotification, + "thread/realtime/error": ThreadRealtimeErrorNotification, + "thread/realtime/itemAdded": ThreadRealtimeItemAddedNotification, + "thread/realtime/outputAudio/delta": ThreadRealtimeOutputAudioDeltaNotification, + "thread/realtime/started": ThreadRealtimeStartedNotification, + "thread/started": ThreadStartedNotification, + "thread/status/changed": ThreadStatusChangedNotification, + "thread/tokenUsage/updated": ThreadTokenUsageUpdatedNotification, + "thread/unarchived": ThreadUnarchivedNotification, + "turn/completed": TurnCompletedNotification, + "turn/diff/updated": TurnDiffUpdatedNotification, + "turn/plan/updated": TurnPlanUpdatedNotification, + "turn/started": TurnStartedNotification, + "windows/worldWritableWarning": WindowsWorldWritableWarningNotification, + "windowsSandbox/setupCompleted": WindowsSandboxSetupCompletedNotification, +} diff --git a/sdk/python/src/codex_app_server/generated/v2_all.py b/sdk/python/src/codex_app_server/generated/v2_all.py new file mode 100644 index 000000000..f746fc771 --- /dev/null +++ b/sdk/python/src/codex_app_server/generated/v2_all.py @@ -0,0 +1,8407 @@ +# generated by datamodel-codegen: +# filename: codex_app_server_protocol.v2.schemas.json + +from __future__ import annotations +from pydantic import BaseModel, ConfigDict, Field, RootModel +from typing import Annotated, Any, Literal +from enum import Enum + + +class CodexAppServerProtocolV2(BaseModel): + pass + model_config = ConfigDict( + populate_by_name=True, + ) + + +class AbsolutePathBuf(RootModel[str]): + model_config = ConfigDict( + populate_by_name=True, + ) + root: Annotated[ + str, + Field( + description="A path that is guaranteed to be absolute and normalized (though it is not guaranteed to be canonicalized or exist on the filesystem).\n\nIMPORTANT: When deserializing an `AbsolutePathBuf`, a base path must be set using [AbsolutePathBufGuard::new]. If no base path is set, the deserialization will fail unless the path being deserialized is already absolute." + ), + ] + + +class ApiKeyAccount(BaseModel): + model_config = ConfigDict( + populate_by_name=True, + ) + type: Annotated[Literal["apiKey"], Field(title="ApiKeyAccountType")] + + +class AccountLoginCompletedNotification(BaseModel): + model_config = ConfigDict( + populate_by_name=True, + ) + error: str | None = None + login_id: Annotated[str | None, Field(alias="loginId")] = None + success: bool + + +class TextAgentMessageContent(BaseModel): + model_config = ConfigDict( + populate_by_name=True, + ) + text: str + type: Annotated[Literal["Text"], Field(title="TextAgentMessageContentType")] + + +class AgentMessageContent(RootModel[TextAgentMessageContent]): + model_config = ConfigDict( + populate_by_name=True, + ) + root: TextAgentMessageContent + + +class AgentMessageDeltaNotification(BaseModel): + model_config = ConfigDict( + populate_by_name=True, + ) + delta: str + item_id: Annotated[str, Field(alias="itemId")] + thread_id: Annotated[str, Field(alias="threadId")] + turn_id: Annotated[str, Field(alias="turnId")] + + +class CompletedAgentStatus(BaseModel): + model_config = ConfigDict( + extra="forbid", + populate_by_name=True, + ) + completed: str | None = None + + +class ErroredAgentStatus(BaseModel): + model_config = ConfigDict( + extra="forbid", + populate_by_name=True, + ) + errored: str + + +class AgentStatus( + RootModel[ + Literal["pending_init"] + | Literal["running"] + | CompletedAgentStatus + | ErroredAgentStatus + | Literal["shutdown"] + | Literal["not_found"] + ] +): + model_config = ConfigDict( + populate_by_name=True, + ) + root: Annotated[ + Literal["pending_init"] + | Literal["running"] + | CompletedAgentStatus + | ErroredAgentStatus + | Literal["shutdown"] + | Literal["not_found"], + Field(description="Agent lifecycle status, derived from emitted events."), + ] + + +class AnalyticsConfig(BaseModel): + model_config = ConfigDict( + extra="allow", + populate_by_name=True, + ) + enabled: bool | None = None + + +class AppBranding(BaseModel): + model_config = ConfigDict( + populate_by_name=True, + ) + category: str | None = None + developer: str | None = None + is_discoverable_app: Annotated[bool, Field(alias="isDiscoverableApp")] + privacy_policy: Annotated[str | None, Field(alias="privacyPolicy")] = None + terms_of_service: Annotated[str | None, Field(alias="termsOfService")] = None + website: str | None = None + + +class AppReview(BaseModel): + model_config = ConfigDict( + populate_by_name=True, + ) + status: str + + +class AppScreenshot(BaseModel): + model_config = ConfigDict( + populate_by_name=True, + ) + file_id: Annotated[str | None, Field(alias="fileId")] = None + url: str | None = None + user_prompt: Annotated[str, Field(alias="userPrompt")] + + +class AppSummary(BaseModel): + model_config = ConfigDict( + populate_by_name=True, + ) + description: str | None = None + id: str + install_url: Annotated[str | None, Field(alias="installUrl")] = None + name: str + + +class AppToolApproval(Enum): + auto = "auto" + prompt = "prompt" + approve = "approve" + + +class AppToolConfig(BaseModel): + model_config = ConfigDict( + populate_by_name=True, + ) + approval_mode: AppToolApproval | None = None + enabled: bool | None = None + + +class AppToolsConfig(BaseModel): + pass + model_config = ConfigDict( + populate_by_name=True, + ) + + +class AppsDefaultConfig(BaseModel): + model_config = ConfigDict( + populate_by_name=True, + ) + destructive_enabled: bool | None = True + enabled: bool | None = True + open_world_enabled: bool | None = True + + +class AppsListParams(BaseModel): + model_config = ConfigDict( + populate_by_name=True, + ) + cursor: Annotated[ + str | None, + Field(description="Opaque pagination cursor returned by a previous call."), + ] = None + force_refetch: Annotated[ + bool | None, + Field( + alias="forceRefetch", + description="When true, bypass app caches and fetch the latest data from sources.", + ), + ] = None + limit: Annotated[ + int | None, + Field( + description="Optional page size; defaults to a reasonable server-side value.", + ge=0, + ), + ] = None + thread_id: Annotated[ + str | None, + Field( + alias="threadId", + description="Optional thread id used to evaluate app feature gating from that thread's config.", + ), + ] = None + + +class AskForApprovalValue(Enum): + untrusted = "untrusted" + on_failure = "on-failure" + on_request = "on-request" + never = "never" + + +class Reject(BaseModel): + model_config = ConfigDict( + populate_by_name=True, + ) + mcp_elicitations: bool + request_permissions: bool | None = False + rules: bool + sandbox_approval: bool + skill_approval: bool | None = False + + +class RejectAskForApproval(BaseModel): + model_config = ConfigDict( + extra="forbid", + populate_by_name=True, + ) + reject: Reject + + +class AskForApproval(RootModel[AskForApprovalValue | RejectAskForApproval]): + model_config = ConfigDict( + populate_by_name=True, + ) + root: AskForApprovalValue | RejectAskForApproval + + +class AuthMode(Enum): + apikey = "apikey" + chatgpt = "chatgpt" + chatgpt_auth_tokens = "chatgptAuthTokens" + + +class ByteRange(BaseModel): + model_config = ConfigDict( + populate_by_name=True, + ) + end: Annotated[int, Field(ge=0)] + start: Annotated[int, Field(ge=0)] + + +class CallToolResult(BaseModel): + model_config = ConfigDict( + populate_by_name=True, + ) + field_meta: Annotated[Any | None, Field(alias="_meta")] = None + content: list + is_error: Annotated[bool | None, Field(alias="isError")] = None + structured_content: Annotated[Any | None, Field(alias="structuredContent")] = None + + +class CancelLoginAccountParams(BaseModel): + model_config = ConfigDict( + populate_by_name=True, + ) + login_id: Annotated[str, Field(alias="loginId")] + + +class CancelLoginAccountStatus(Enum): + canceled = "canceled" + not_found = "notFound" + + +class ClientInfo(BaseModel): + model_config = ConfigDict( + populate_by_name=True, + ) + name: str + title: str | None = None + version: str + + +class CodexErrorInfoValue(Enum): + context_window_exceeded = "contextWindowExceeded" + usage_limit_exceeded = "usageLimitExceeded" + server_overloaded = "serverOverloaded" + internal_server_error = "internalServerError" + unauthorized = "unauthorized" + bad_request = "badRequest" + thread_rollback_failed = "threadRollbackFailed" + sandbox_error = "sandboxError" + other = "other" + + +class HttpConnectionFailed(BaseModel): + model_config = ConfigDict( + populate_by_name=True, + ) + http_status_code: Annotated[int | None, Field(alias="httpStatusCode", ge=0)] = None + + +class HttpConnectionFailedCodexErrorInfo(BaseModel): + model_config = ConfigDict( + extra="forbid", + populate_by_name=True, + ) + http_connection_failed: Annotated[ + HttpConnectionFailed, Field(alias="httpConnectionFailed") + ] + + +class ResponseStreamConnectionFailed(BaseModel): + model_config = ConfigDict( + populate_by_name=True, + ) + http_status_code: Annotated[int | None, Field(alias="httpStatusCode", ge=0)] = None + + +class ResponseStreamConnectionFailedCodexErrorInfo(BaseModel): + model_config = ConfigDict( + extra="forbid", + populate_by_name=True, + ) + response_stream_connection_failed: Annotated[ + ResponseStreamConnectionFailed, Field(alias="responseStreamConnectionFailed") + ] + + +class ResponseStreamDisconnected(BaseModel): + model_config = ConfigDict( + populate_by_name=True, + ) + http_status_code: Annotated[int | None, Field(alias="httpStatusCode", ge=0)] = None + + +class ResponseStreamDisconnectedCodexErrorInfo(BaseModel): + model_config = ConfigDict( + extra="forbid", + populate_by_name=True, + ) + response_stream_disconnected: Annotated[ + ResponseStreamDisconnected, Field(alias="responseStreamDisconnected") + ] + + +class ResponseTooManyFailedAttempts(BaseModel): + model_config = ConfigDict( + populate_by_name=True, + ) + http_status_code: Annotated[int | None, Field(alias="httpStatusCode", ge=0)] = None + + +class ResponseTooManyFailedAttemptsCodexErrorInfo(BaseModel): + model_config = ConfigDict( + extra="forbid", + populate_by_name=True, + ) + response_too_many_failed_attempts: Annotated[ + ResponseTooManyFailedAttempts, Field(alias="responseTooManyFailedAttempts") + ] + + +class CodexErrorInfo( + RootModel[ + CodexErrorInfoValue + | HttpConnectionFailedCodexErrorInfo + | ResponseStreamConnectionFailedCodexErrorInfo + | ResponseStreamDisconnectedCodexErrorInfo + | ResponseTooManyFailedAttemptsCodexErrorInfo + ] +): + model_config = ConfigDict( + populate_by_name=True, + ) + root: Annotated[ + CodexErrorInfoValue + | HttpConnectionFailedCodexErrorInfo + | ResponseStreamConnectionFailedCodexErrorInfo + | ResponseStreamDisconnectedCodexErrorInfo + | ResponseTooManyFailedAttemptsCodexErrorInfo, + Field( + description="This translation layer make sure that we expose codex error code in camel case.\n\nWhen an upstream HTTP status is available (for example, from the Responses API or a provider), it is forwarded in `httpStatusCode` on the relevant `codexErrorInfo` variant." + ), + ] + + +class CollabAgentStatus(Enum): + pending_init = "pendingInit" + running = "running" + completed = "completed" + errored = "errored" + shutdown = "shutdown" + not_found = "notFound" + + +class CollabAgentTool(Enum): + spawn_agent = "spawnAgent" + send_input = "sendInput" + resume_agent = "resumeAgent" + wait = "wait" + close_agent = "closeAgent" + + +class CollabAgentToolCallStatus(Enum): + in_progress = "inProgress" + completed = "completed" + failed = "failed" + + +class ReadCommandAction(BaseModel): + model_config = ConfigDict( + populate_by_name=True, + ) + command: str + name: str + path: str + type: Annotated[Literal["read"], Field(title="ReadCommandActionType")] + + +class ListFilesCommandAction(BaseModel): + model_config = ConfigDict( + populate_by_name=True, + ) + command: str + path: str | None = None + type: Annotated[Literal["listFiles"], Field(title="ListFilesCommandActionType")] + + +class SearchCommandAction(BaseModel): + model_config = ConfigDict( + populate_by_name=True, + ) + command: str + path: str | None = None + query: str | None = None + type: Annotated[Literal["search"], Field(title="SearchCommandActionType")] + + +class UnknownCommandAction(BaseModel): + model_config = ConfigDict( + populate_by_name=True, + ) + command: str + type: Annotated[Literal["unknown"], Field(title="UnknownCommandActionType")] + + +class CommandAction( + RootModel[ + ReadCommandAction + | ListFilesCommandAction + | SearchCommandAction + | UnknownCommandAction + ] +): + model_config = ConfigDict( + populate_by_name=True, + ) + root: ( + ReadCommandAction + | ListFilesCommandAction + | SearchCommandAction + | UnknownCommandAction + ) + + +class CommandExecOutputStream(Enum): + stdout = "stdout" + stderr = "stderr" + + +class CommandExecResizeResponse(BaseModel): + pass + model_config = ConfigDict( + populate_by_name=True, + ) + + +class CommandExecResponse(BaseModel): + model_config = ConfigDict( + populate_by_name=True, + ) + exit_code: Annotated[int, Field(alias="exitCode", description="Process exit code.")] + stderr: Annotated[ + str, + Field( + description="Buffered stderr capture.\n\nEmpty when stderr was streamed via `command/exec/outputDelta`." + ), + ] + stdout: Annotated[ + str, + Field( + description="Buffered stdout capture.\n\nEmpty when stdout was streamed via `command/exec/outputDelta`." + ), + ] + + +class CommandExecTerminalSize(BaseModel): + model_config = ConfigDict( + populate_by_name=True, + ) + cols: Annotated[int, Field(description="Terminal width in character cells.", ge=0)] + rows: Annotated[int, Field(description="Terminal height in character cells.", ge=0)] + + +class CommandExecTerminateParams(BaseModel): + model_config = ConfigDict( + populate_by_name=True, + ) + process_id: Annotated[ + str, + Field( + alias="processId", + description="Client-supplied, connection-scoped `processId` from the original `command/exec` request.", + ), + ] + + +class CommandExecTerminateResponse(BaseModel): + pass + model_config = ConfigDict( + populate_by_name=True, + ) + + +class CommandExecWriteParams(BaseModel): + model_config = ConfigDict( + populate_by_name=True, + ) + close_stdin: Annotated[ + bool | None, + Field( + alias="closeStdin", + description="Close stdin after writing `deltaBase64`, if present.", + ), + ] = None + delta_base64: Annotated[ + str | None, + Field( + alias="deltaBase64", + description="Optional base64-encoded stdin bytes to write.", + ), + ] = None + process_id: Annotated[ + str, + Field( + alias="processId", + description="Client-supplied, connection-scoped `processId` from the original `command/exec` request.", + ), + ] + + +class CommandExecWriteResponse(BaseModel): + pass + model_config = ConfigDict( + populate_by_name=True, + ) + + +class CommandExecutionOutputDeltaNotification(BaseModel): + model_config = ConfigDict( + populate_by_name=True, + ) + delta: str + item_id: Annotated[str, Field(alias="itemId")] + thread_id: Annotated[str, Field(alias="threadId")] + turn_id: Annotated[str, Field(alias="turnId")] + + +class CommandExecutionStatus(Enum): + in_progress = "inProgress" + completed = "completed" + failed = "failed" + declined = "declined" + + +class MdmConfigLayerSource(BaseModel): + model_config = ConfigDict( + populate_by_name=True, + ) + domain: str + key: str + type: Annotated[Literal["mdm"], Field(title="MdmConfigLayerSourceType")] + + +class SystemConfigLayerSource(BaseModel): + model_config = ConfigDict( + populate_by_name=True, + ) + file: Annotated[ + AbsolutePathBuf, + Field( + description="This is the path to the system config.toml file, though it is not guaranteed to exist." + ), + ] + type: Annotated[Literal["system"], Field(title="SystemConfigLayerSourceType")] + + +class UserConfigLayerSource(BaseModel): + model_config = ConfigDict( + populate_by_name=True, + ) + file: Annotated[ + AbsolutePathBuf, + Field( + description="This is the path to the user's config.toml file, though it is not guaranteed to exist." + ), + ] + type: Annotated[Literal["user"], Field(title="UserConfigLayerSourceType")] + + +class ProjectConfigLayerSource(BaseModel): + model_config = ConfigDict( + populate_by_name=True, + ) + dot_codex_folder: Annotated[AbsolutePathBuf, Field(alias="dotCodexFolder")] + type: Annotated[Literal["project"], Field(title="ProjectConfigLayerSourceType")] + + +class SessionFlagsConfigLayerSource(BaseModel): + model_config = ConfigDict( + populate_by_name=True, + ) + type: Annotated[ + Literal["sessionFlags"], Field(title="SessionFlagsConfigLayerSourceType") + ] + + +class LegacyManagedConfigTomlFromFileConfigLayerSource(BaseModel): + model_config = ConfigDict( + populate_by_name=True, + ) + file: AbsolutePathBuf + type: Annotated[ + Literal["legacyManagedConfigTomlFromFile"], + Field(title="LegacyManagedConfigTomlFromFileConfigLayerSourceType"), + ] + + +class LegacyManagedConfigTomlFromMdmConfigLayerSource(BaseModel): + model_config = ConfigDict( + populate_by_name=True, + ) + type: Annotated[ + Literal["legacyManagedConfigTomlFromMdm"], + Field(title="LegacyManagedConfigTomlFromMdmConfigLayerSourceType"), + ] + + +class ConfigLayerSource( + RootModel[ + MdmConfigLayerSource + | SystemConfigLayerSource + | UserConfigLayerSource + | ProjectConfigLayerSource + | SessionFlagsConfigLayerSource + | LegacyManagedConfigTomlFromFileConfigLayerSource + | LegacyManagedConfigTomlFromMdmConfigLayerSource + ] +): + model_config = ConfigDict( + populate_by_name=True, + ) + root: ( + MdmConfigLayerSource + | SystemConfigLayerSource + | UserConfigLayerSource + | ProjectConfigLayerSource + | SessionFlagsConfigLayerSource + | LegacyManagedConfigTomlFromFileConfigLayerSource + | LegacyManagedConfigTomlFromMdmConfigLayerSource + ) + + +class ConfigReadParams(BaseModel): + model_config = ConfigDict( + populate_by_name=True, + ) + cwd: Annotated[ + str | None, + Field( + description="Optional working directory to resolve project config layers. If specified, return the effective config as seen from that directory (i.e., including any project layers between `cwd` and the project/repo root)." + ), + ] = None + include_layers: Annotated[bool | None, Field(alias="includeLayers")] = False + + +class InputTextContentItem(BaseModel): + model_config = ConfigDict( + populate_by_name=True, + ) + text: str + type: Annotated[Literal["input_text"], Field(title="InputTextContentItemType")] + + +class InputImageContentItem(BaseModel): + model_config = ConfigDict( + populate_by_name=True, + ) + image_url: str + type: Annotated[Literal["input_image"], Field(title="InputImageContentItemType")] + + +class OutputTextContentItem(BaseModel): + model_config = ConfigDict( + populate_by_name=True, + ) + text: str + type: Annotated[Literal["output_text"], Field(title="OutputTextContentItemType")] + + +class ContentItem( + RootModel[InputTextContentItem | InputImageContentItem | OutputTextContentItem] +): + model_config = ConfigDict( + populate_by_name=True, + ) + root: InputTextContentItem | InputImageContentItem | OutputTextContentItem + + +class ContextCompactedNotification(BaseModel): + model_config = ConfigDict( + populate_by_name=True, + ) + thread_id: Annotated[str, Field(alias="threadId")] + turn_id: Annotated[str, Field(alias="turnId")] + + +class CreditsSnapshot(BaseModel): + model_config = ConfigDict( + populate_by_name=True, + ) + balance: str | None = None + has_credits: Annotated[bool, Field(alias="hasCredits")] + unlimited: bool + + +class CustomPrompt(BaseModel): + model_config = ConfigDict( + populate_by_name=True, + ) + argument_hint: str | None = None + content: str + description: str | None = None + name: str + path: str + + +class DeprecationNoticeNotification(BaseModel): + model_config = ConfigDict( + populate_by_name=True, + ) + details: Annotated[ + str | None, + Field( + description="Optional extra guidance, such as migration steps or rationale." + ), + ] = None + summary: Annotated[str, Field(description="Concise summary of what is deprecated.")] + + +class Duration(BaseModel): + model_config = ConfigDict( + populate_by_name=True, + ) + nanos: Annotated[int, Field(ge=0)] + secs: Annotated[int, Field(ge=0)] + + +class InputTextDynamicToolCallOutputContentItem(BaseModel): + model_config = ConfigDict( + populate_by_name=True, + ) + text: str + type: Annotated[ + Literal["inputText"], + Field(title="InputTextDynamicToolCallOutputContentItemType"), + ] + + +class InputImageDynamicToolCallOutputContentItem(BaseModel): + model_config = ConfigDict( + populate_by_name=True, + ) + image_url: Annotated[str, Field(alias="imageUrl")] + type: Annotated[ + Literal["inputImage"], + Field(title="InputImageDynamicToolCallOutputContentItemType"), + ] + + +class DynamicToolCallOutputContentItem( + RootModel[ + InputTextDynamicToolCallOutputContentItem + | InputImageDynamicToolCallOutputContentItem + ] +): + model_config = ConfigDict( + populate_by_name=True, + ) + root: ( + InputTextDynamicToolCallOutputContentItem + | InputImageDynamicToolCallOutputContentItem + ) + + +class DynamicToolCallStatus(Enum): + in_progress = "inProgress" + completed = "completed" + failed = "failed" + + +class DynamicToolSpec(BaseModel): + model_config = ConfigDict( + populate_by_name=True, + ) + description: str + input_schema: Annotated[Any, Field(alias="inputSchema")] + name: str + + +class FormElicitationRequest(BaseModel): + model_config = ConfigDict( + populate_by_name=True, + ) + field_meta: Annotated[Any | None, Field(alias="_meta")] = None + message: str + mode: Annotated[Literal["form"], Field(title="FormElicitationRequestMode")] + requested_schema: Any + + +class UrlElicitationRequest(BaseModel): + model_config = ConfigDict( + populate_by_name=True, + ) + field_meta: Annotated[Any | None, Field(alias="_meta")] = None + elicitation_id: str + message: str + mode: Annotated[Literal["url"], Field(title="UrlElicitationRequestMode")] + url: str + + +class ElicitationRequest(RootModel[FormElicitationRequest | UrlElicitationRequest]): + model_config = ConfigDict( + populate_by_name=True, + ) + root: FormElicitationRequest | UrlElicitationRequest + + +class ErrorEventMsg(BaseModel): + model_config = ConfigDict( + populate_by_name=True, + ) + codex_error_info: CodexErrorInfo | None = None + message: str + type: Annotated[Literal["error"], Field(title="ErrorEventMsgType")] + + +class WarningEventMsg(BaseModel): + model_config = ConfigDict( + populate_by_name=True, + ) + message: str + type: Annotated[Literal["warning"], Field(title="WarningEventMsgType")] + + +class RealtimeConversationStartedEventMsg(BaseModel): + model_config = ConfigDict( + populate_by_name=True, + ) + session_id: str | None = None + type: Annotated[ + Literal["realtime_conversation_started"], + Field(title="RealtimeConversationStartedEventMsgType"), + ] + + +class RealtimeConversationClosedEventMsg(BaseModel): + model_config = ConfigDict( + populate_by_name=True, + ) + reason: str | None = None + type: Annotated[ + Literal["realtime_conversation_closed"], + Field(title="RealtimeConversationClosedEventMsgType"), + ] + + +class ContextCompactedEventMsg(BaseModel): + model_config = ConfigDict( + populate_by_name=True, + ) + type: Annotated[ + Literal["context_compacted"], Field(title="ContextCompactedEventMsgType") + ] + + +class ThreadRolledBackEventMsg(BaseModel): + model_config = ConfigDict( + populate_by_name=True, + ) + num_turns: Annotated[ + int, + Field(description="Number of user turns that were removed from context.", ge=0), + ] + type: Annotated[ + Literal["thread_rolled_back"], Field(title="ThreadRolledBackEventMsgType") + ] + + +class TaskCompleteEventMsg(BaseModel): + model_config = ConfigDict( + populate_by_name=True, + ) + last_agent_message: str | None = None + turn_id: str + type: Annotated[Literal["task_complete"], Field(title="TaskCompleteEventMsgType")] + + +class AgentMessageDeltaEventMsg(BaseModel): + model_config = ConfigDict( + populate_by_name=True, + ) + delta: str + type: Annotated[ + Literal["agent_message_delta"], Field(title="AgentMessageDeltaEventMsgType") + ] + + +class AgentReasoningEventMsg(BaseModel): + model_config = ConfigDict( + populate_by_name=True, + ) + text: str + type: Annotated[ + Literal["agent_reasoning"], Field(title="AgentReasoningEventMsgType") + ] + + +class AgentReasoningDeltaEventMsg(BaseModel): + model_config = ConfigDict( + populate_by_name=True, + ) + delta: str + type: Annotated[ + Literal["agent_reasoning_delta"], Field(title="AgentReasoningDeltaEventMsgType") + ] + + +class AgentReasoningRawContentEventMsg(BaseModel): + model_config = ConfigDict( + populate_by_name=True, + ) + text: str + type: Annotated[ + Literal["agent_reasoning_raw_content"], + Field(title="AgentReasoningRawContentEventMsgType"), + ] + + +class AgentReasoningRawContentDeltaEventMsg(BaseModel): + model_config = ConfigDict( + populate_by_name=True, + ) + delta: str + type: Annotated[ + Literal["agent_reasoning_raw_content_delta"], + Field(title="AgentReasoningRawContentDeltaEventMsgType"), + ] + + +class AgentReasoningSectionBreakEventMsg(BaseModel): + model_config = ConfigDict( + populate_by_name=True, + ) + item_id: str | None = "" + summary_index: int | None = 0 + type: Annotated[ + Literal["agent_reasoning_section_break"], + Field(title="AgentReasoningSectionBreakEventMsgType"), + ] + + +class WebSearchBeginEventMsg(BaseModel): + model_config = ConfigDict( + populate_by_name=True, + ) + call_id: str + type: Annotated[ + Literal["web_search_begin"], Field(title="WebSearchBeginEventMsgType") + ] + + +class ImageGenerationBeginEventMsg(BaseModel): + model_config = ConfigDict( + populate_by_name=True, + ) + call_id: str + type: Annotated[ + Literal["image_generation_begin"], + Field(title="ImageGenerationBeginEventMsgType"), + ] + + +class ImageGenerationEndEventMsg(BaseModel): + model_config = ConfigDict( + populate_by_name=True, + ) + call_id: str + result: str + revised_prompt: str | None = None + saved_path: str | None = None + status: str + type: Annotated[ + Literal["image_generation_end"], Field(title="ImageGenerationEndEventMsgType") + ] + + +class TerminalInteractionEventMsg(BaseModel): + model_config = ConfigDict( + populate_by_name=True, + ) + call_id: Annotated[ + str, + Field( + description="Identifier for the ExecCommandBegin that produced this chunk." + ), + ] + process_id: Annotated[ + str, Field(description="Process id associated with the running command.") + ] + stdin: Annotated[str, Field(description="Stdin sent to the running session.")] + type: Annotated[ + Literal["terminal_interaction"], Field(title="TerminalInteractionEventMsgType") + ] + + +class ViewImageToolCallEventMsg(BaseModel): + model_config = ConfigDict( + populate_by_name=True, + ) + call_id: Annotated[ + str, Field(description="Identifier for the originating tool call.") + ] + path: Annotated[ + str, Field(description="Local filesystem path provided to the tool.") + ] + type: Annotated[ + Literal["view_image_tool_call"], Field(title="ViewImageToolCallEventMsgType") + ] + + +class DynamicToolCallRequestEventMsg(BaseModel): + model_config = ConfigDict( + populate_by_name=True, + ) + arguments: Any + call_id: Annotated[str, Field(alias="callId")] + tool: str + turn_id: Annotated[str, Field(alias="turnId")] + type: Annotated[ + Literal["dynamic_tool_call_request"], + Field(title="DynamicToolCallRequestEventMsgType"), + ] + + +class DynamicToolCallResponseEventMsg(BaseModel): + model_config = ConfigDict( + populate_by_name=True, + ) + arguments: Annotated[Any, Field(description="Dynamic tool call arguments.")] + call_id: Annotated[ + str, + Field(description="Identifier for the corresponding DynamicToolCallRequest."), + ] + content_items: Annotated[ + list[DynamicToolCallOutputContentItem], + Field(description="Dynamic tool response content items."), + ] + duration: Annotated[ + Duration, Field(description="The duration of the dynamic tool call.") + ] + error: Annotated[ + str | None, + Field( + description="Optional error text when the tool call failed before producing a response." + ), + ] = None + success: Annotated[bool, Field(description="Whether the tool call succeeded.")] + tool: Annotated[str, Field(description="Dynamic tool name.")] + turn_id: Annotated[ + str, Field(description="Turn ID that this dynamic tool call belongs to.") + ] + type: Annotated[ + Literal["dynamic_tool_call_response"], + Field(title="DynamicToolCallResponseEventMsgType"), + ] + + +class DeprecationNoticeEventMsg(BaseModel): + model_config = ConfigDict( + populate_by_name=True, + ) + details: Annotated[ + str | None, + Field( + description="Optional extra guidance, such as migration steps or rationale." + ), + ] = None + summary: Annotated[str, Field(description="Concise summary of what is deprecated.")] + type: Annotated[ + Literal["deprecation_notice"], Field(title="DeprecationNoticeEventMsgType") + ] + + +class BackgroundEventEventMsg(BaseModel): + model_config = ConfigDict( + populate_by_name=True, + ) + message: str + type: Annotated[ + Literal["background_event"], Field(title="BackgroundEventEventMsgType") + ] + + +class UndoStartedEventMsg(BaseModel): + model_config = ConfigDict( + populate_by_name=True, + ) + message: str | None = None + type: Annotated[Literal["undo_started"], Field(title="UndoStartedEventMsgType")] + + +class UndoCompletedEventMsg(BaseModel): + model_config = ConfigDict( + populate_by_name=True, + ) + message: str | None = None + success: bool + type: Annotated[Literal["undo_completed"], Field(title="UndoCompletedEventMsgType")] + + +class StreamErrorEventMsg(BaseModel): + model_config = ConfigDict( + populate_by_name=True, + ) + additional_details: Annotated[ + str | None, + Field( + description="Optional details about the underlying stream failure (often the same human-readable message that is surfaced as the terminal error if retries are exhausted)." + ), + ] = None + codex_error_info: CodexErrorInfo | None = None + message: str + type: Annotated[Literal["stream_error"], Field(title="StreamErrorEventMsgType")] + + +class TurnDiffEventMsg(BaseModel): + model_config = ConfigDict( + populate_by_name=True, + ) + type: Annotated[Literal["turn_diff"], Field(title="TurnDiffEventMsgType")] + unified_diff: str + + +class ListCustomPromptsResponseEventMsg(BaseModel): + model_config = ConfigDict( + populate_by_name=True, + ) + custom_prompts: list[CustomPrompt] + type: Annotated[ + Literal["list_custom_prompts_response"], + Field(title="ListCustomPromptsResponseEventMsgType"), + ] + + +class RemoteSkillDownloadedEventMsg(BaseModel): + model_config = ConfigDict( + populate_by_name=True, + ) + id: str + name: str + path: str + type: Annotated[ + Literal["remote_skill_downloaded"], + Field(title="RemoteSkillDownloadedEventMsgType"), + ] + + +class SkillsUpdateAvailableEventMsg(BaseModel): + model_config = ConfigDict( + populate_by_name=True, + ) + type: Annotated[ + Literal["skills_update_available"], + Field(title="SkillsUpdateAvailableEventMsgType"), + ] + + +class ShutdownCompleteEventMsg(BaseModel): + model_config = ConfigDict( + populate_by_name=True, + ) + type: Annotated[ + Literal["shutdown_complete"], Field(title="ShutdownCompleteEventMsgType") + ] + + +class AgentMessageContentDeltaEventMsg(BaseModel): + model_config = ConfigDict( + populate_by_name=True, + ) + delta: str + item_id: str + thread_id: str + turn_id: str + type: Annotated[ + Literal["agent_message_content_delta"], + Field(title="AgentMessageContentDeltaEventMsgType"), + ] + + +class PlanDeltaEventMsg(BaseModel): + model_config = ConfigDict( + populate_by_name=True, + ) + delta: str + item_id: str + thread_id: str + turn_id: str + type: Annotated[Literal["plan_delta"], Field(title="PlanDeltaEventMsgType")] + + +class ReasoningContentDeltaEventMsg(BaseModel): + model_config = ConfigDict( + populate_by_name=True, + ) + delta: str + item_id: str + summary_index: int | None = 0 + thread_id: str + turn_id: str + type: Annotated[ + Literal["reasoning_content_delta"], + Field(title="ReasoningContentDeltaEventMsgType"), + ] + + +class ReasoningRawContentDeltaEventMsg(BaseModel): + model_config = ConfigDict( + populate_by_name=True, + ) + content_index: int | None = 0 + delta: str + item_id: str + thread_id: str + turn_id: str + type: Annotated[ + Literal["reasoning_raw_content_delta"], + Field(title="ReasoningRawContentDeltaEventMsgType"), + ] + + +class ExecApprovalRequestSkillMetadata(BaseModel): + model_config = ConfigDict( + populate_by_name=True, + ) + path_to_skills_md: str + + +class ExecCommandSource(Enum): + agent = "agent" + user_shell = "user_shell" + unified_exec_startup = "unified_exec_startup" + unified_exec_interaction = "unified_exec_interaction" + + +class ExecCommandStatus(Enum): + completed = "completed" + failed = "failed" + declined = "declined" + + +class ExecOutputStream(Enum): + stdout = "stdout" + stderr = "stderr" + + +class ExperimentalFeatureListParams(BaseModel): + model_config = ConfigDict( + populate_by_name=True, + ) + cursor: Annotated[ + str | None, + Field(description="Opaque pagination cursor returned by a previous call."), + ] = None + limit: Annotated[ + int | None, + Field( + description="Optional page size; defaults to a reasonable server-side value.", + ge=0, + ), + ] = None + + +class ExperimentalFeatureStage(Enum): + beta = "beta" + under_development = "underDevelopment" + stable = "stable" + deprecated = "deprecated" + removed = "removed" + + +class ExternalAgentConfigDetectParams(BaseModel): + model_config = ConfigDict( + populate_by_name=True, + ) + cwds: Annotated[ + list[str] | None, + Field( + description="Zero or more working directories to include for repo-scoped detection." + ), + ] = None + include_home: Annotated[ + bool | None, + Field( + alias="includeHome", + description="If true, include detection under the user's home (~/.claude, ~/.codex, etc.).", + ), + ] = None + + +class ExternalAgentConfigImportResponse(BaseModel): + pass + model_config = ConfigDict( + populate_by_name=True, + ) + + +class ExternalAgentConfigMigrationItemType(Enum): + agents_md = "AGENTS_MD" + config = "CONFIG" + skills = "SKILLS" + mcp_server_config = "MCP_SERVER_CONFIG" + + +class FeedbackUploadParams(BaseModel): + model_config = ConfigDict( + populate_by_name=True, + ) + classification: str + extra_log_files: Annotated[list[str] | None, Field(alias="extraLogFiles")] = None + include_logs: Annotated[bool, Field(alias="includeLogs")] + reason: str | None = None + thread_id: Annotated[str | None, Field(alias="threadId")] = None + + +class FeedbackUploadResponse(BaseModel): + model_config = ConfigDict( + populate_by_name=True, + ) + thread_id: Annotated[str, Field(alias="threadId")] + + +class AddFileChange(BaseModel): + model_config = ConfigDict( + populate_by_name=True, + ) + content: str + type: Annotated[Literal["add"], Field(title="AddFileChangeType")] + + +class DeleteFileChange(BaseModel): + model_config = ConfigDict( + populate_by_name=True, + ) + content: str + type: Annotated[Literal["delete"], Field(title="DeleteFileChangeType")] + + +class UpdateFileChange(BaseModel): + model_config = ConfigDict( + populate_by_name=True, + ) + move_path: str | None = None + type: Annotated[Literal["update"], Field(title="UpdateFileChangeType")] + unified_diff: str + + +class FileChange(RootModel[AddFileChange | DeleteFileChange | UpdateFileChange]): + model_config = ConfigDict( + populate_by_name=True, + ) + root: AddFileChange | DeleteFileChange | UpdateFileChange + + +class FileChangeOutputDeltaNotification(BaseModel): + model_config = ConfigDict( + populate_by_name=True, + ) + delta: str + item_id: Annotated[str, Field(alias="itemId")] + thread_id: Annotated[str, Field(alias="threadId")] + turn_id: Annotated[str, Field(alias="turnId")] + + +class FileSystemPermissions(BaseModel): + model_config = ConfigDict( + populate_by_name=True, + ) + read: list[AbsolutePathBuf] | None = None + write: list[AbsolutePathBuf] | None = None + + +class ForcedLoginMethod(Enum): + chatgpt = "chatgpt" + api = "api" + + +class InputTextFunctionCallOutputContentItem(BaseModel): + model_config = ConfigDict( + populate_by_name=True, + ) + text: str + type: Annotated[ + Literal["input_text"], Field(title="InputTextFunctionCallOutputContentItemType") + ] + + +class FuzzyFileSearchParams(BaseModel): + model_config = ConfigDict( + populate_by_name=True, + ) + cancellation_token: Annotated[str | None, Field(alias="cancellationToken")] = None + query: str + roots: list[str] + + +class Indice(RootModel[int]): + model_config = ConfigDict( + populate_by_name=True, + ) + root: Annotated[int, Field(ge=0)] + + +class FuzzyFileSearchResult(BaseModel): + model_config = ConfigDict( + populate_by_name=True, + ) + file_name: str + indices: list[Indice] | None = None + path: str + root: str + score: Annotated[int, Field(ge=0)] + + +class FuzzyFileSearchSessionCompletedNotification(BaseModel): + model_config = ConfigDict( + populate_by_name=True, + ) + session_id: Annotated[str, Field(alias="sessionId")] + + +class FuzzyFileSearchSessionUpdatedNotification(BaseModel): + model_config = ConfigDict( + populate_by_name=True, + ) + files: list[FuzzyFileSearchResult] + query: str + session_id: Annotated[str, Field(alias="sessionId")] + + +class GetAccountParams(BaseModel): + model_config = ConfigDict( + populate_by_name=True, + ) + refresh_token: Annotated[ + bool | None, + Field( + alias="refreshToken", + description="When `true`, requests a proactive token refresh before returning.\n\nIn managed auth mode this triggers the normal refresh-token flow. In external auth mode this flag is ignored. Clients should refresh tokens themselves and call `account/login/start` with `chatgptAuthTokens`.", + ), + ] = False + + +class GhostCommit(BaseModel): + model_config = ConfigDict( + populate_by_name=True, + ) + id: str + parent: str | None = None + preexisting_untracked_dirs: list[str] + preexisting_untracked_files: list[str] + + +class GitInfo(BaseModel): + model_config = ConfigDict( + populate_by_name=True, + ) + branch: str | None = None + origin_url: Annotated[str | None, Field(alias="originUrl")] = None + sha: str | None = None + + +class HazelnutScope(Enum): + example = "example" + workspace_shared = "workspace-shared" + all_shared = "all-shared" + personal = "personal" + + +class HistoryEntry(BaseModel): + model_config = ConfigDict( + populate_by_name=True, + ) + conversation_id: str + text: str + ts: Annotated[int, Field(ge=0)] + + +class HookEventName(Enum): + session_start = "sessionStart" + stop = "stop" + + +class HookExecutionMode(Enum): + sync = "sync" + async_ = "async" + + +class HookHandlerType(Enum): + command = "command" + prompt = "prompt" + agent = "agent" + + +class HookOutputEntryKind(Enum): + warning = "warning" + stop = "stop" + feedback = "feedback" + context = "context" + error = "error" + + +class HookRunStatus(Enum): + running = "running" + completed = "completed" + failed = "failed" + blocked = "blocked" + stopped = "stopped" + + +class HookScope(Enum): + thread = "thread" + turn = "turn" + + +class ImageDetail(Enum): + auto = "auto" + low = "low" + high = "high" + original = "original" + + +class InitializeCapabilities(BaseModel): + model_config = ConfigDict( + populate_by_name=True, + ) + experimental_api: Annotated[ + bool | None, + Field( + alias="experimentalApi", + description="Opt into receiving experimental API methods and fields.", + ), + ] = False + opt_out_notification_methods: Annotated[ + list[str] | None, + Field( + alias="optOutNotificationMethods", + description="Exact notification method names that should be suppressed for this connection (for example `thread/started`).", + ), + ] = None + + +class InitializeParams(BaseModel): + model_config = ConfigDict( + populate_by_name=True, + ) + capabilities: InitializeCapabilities | None = None + client_info: Annotated[ClientInfo, Field(alias="clientInfo")] + + +class InputModality(Enum): + text = "text" + image = "image" + + +class ListMcpServerStatusParams(BaseModel): + model_config = ConfigDict( + populate_by_name=True, + ) + cursor: Annotated[ + str | None, + Field(description="Opaque pagination cursor returned by a previous call."), + ] = None + limit: Annotated[ + int | None, + Field( + description="Optional page size; defaults to a server-defined value.", ge=0 + ), + ] = None + + +class ExecLocalShellAction(BaseModel): + model_config = ConfigDict( + populate_by_name=True, + ) + command: list[str] + env: dict[str, Any] | None = None + timeout_ms: Annotated[int | None, Field(ge=0)] = None + type: Annotated[Literal["exec"], Field(title="ExecLocalShellActionType")] + user: str | None = None + working_directory: str | None = None + + +class LocalShellAction(RootModel[ExecLocalShellAction]): + model_config = ConfigDict( + populate_by_name=True, + ) + root: ExecLocalShellAction + + +class LocalShellStatus(Enum): + completed = "completed" + in_progress = "in_progress" + incomplete = "incomplete" + + +class ApiKeyLoginAccountParams(BaseModel): + model_config = ConfigDict( + populate_by_name=True, + ) + api_key: Annotated[str, Field(alias="apiKey")] + type: Annotated[Literal["apiKey"], Field(title="ApiKeyv2::LoginAccountParamsType")] + + +class ChatgptLoginAccountParams(BaseModel): + model_config = ConfigDict( + populate_by_name=True, + ) + type: Annotated[ + Literal["chatgpt"], Field(title="Chatgptv2::LoginAccountParamsType") + ] + + +class ChatgptAuthTokensLoginAccountParams(BaseModel): + model_config = ConfigDict( + populate_by_name=True, + ) + access_token: Annotated[ + str, + Field( + alias="accessToken", + description="Access token (JWT) supplied by the client. This token is used for backend API requests and email extraction.", + ), + ] + chatgpt_account_id: Annotated[ + str, + Field( + alias="chatgptAccountId", + description="Workspace/account identifier supplied by the client.", + ), + ] + chatgpt_plan_type: Annotated[ + str | None, + Field( + alias="chatgptPlanType", + description="Optional plan type supplied by the client.\n\nWhen `null`, Codex attempts to derive the plan type from access-token claims. If unavailable, the plan defaults to `unknown`.", + ), + ] = None + type: Annotated[ + Literal["chatgptAuthTokens"], + Field(title="ChatgptAuthTokensv2::LoginAccountParamsType"), + ] + + +class LoginAccountParams( + RootModel[ + ApiKeyLoginAccountParams + | ChatgptLoginAccountParams + | ChatgptAuthTokensLoginAccountParams + ] +): + model_config = ConfigDict( + populate_by_name=True, + ) + root: Annotated[ + ApiKeyLoginAccountParams + | ChatgptLoginAccountParams + | ChatgptAuthTokensLoginAccountParams, + Field(title="LoginAccountParams"), + ] + + +class ApiKeyLoginAccountResponse(BaseModel): + model_config = ConfigDict( + populate_by_name=True, + ) + type: Annotated[ + Literal["apiKey"], Field(title="ApiKeyv2::LoginAccountResponseType") + ] + + +class ChatgptLoginAccountResponse(BaseModel): + model_config = ConfigDict( + populate_by_name=True, + ) + auth_url: Annotated[ + str, + Field( + alias="authUrl", + description="URL the client should open in a browser to initiate the OAuth flow.", + ), + ] + login_id: Annotated[str, Field(alias="loginId")] + type: Annotated[ + Literal["chatgpt"], Field(title="Chatgptv2::LoginAccountResponseType") + ] + + +class ChatgptAuthTokensLoginAccountResponse(BaseModel): + model_config = ConfigDict( + populate_by_name=True, + ) + type: Annotated[ + Literal["chatgptAuthTokens"], + Field(title="ChatgptAuthTokensv2::LoginAccountResponseType"), + ] + + +class LoginAccountResponse( + RootModel[ + ApiKeyLoginAccountResponse + | ChatgptLoginAccountResponse + | ChatgptAuthTokensLoginAccountResponse + ] +): + model_config = ConfigDict( + populate_by_name=True, + ) + root: Annotated[ + ApiKeyLoginAccountResponse + | ChatgptLoginAccountResponse + | ChatgptAuthTokensLoginAccountResponse, + Field(title="LoginAccountResponse"), + ] + + +class LogoutAccountResponse(BaseModel): + pass + model_config = ConfigDict( + populate_by_name=True, + ) + + +class MacOsAutomationPermissionValue(Enum): + none = "none" + all = "all" + + +class BundleIdsMacOsAutomationPermission(BaseModel): + model_config = ConfigDict( + extra="forbid", + populate_by_name=True, + ) + bundle_ids: list[str] + + +class MacOsAutomationPermission( + RootModel[MacOsAutomationPermissionValue | BundleIdsMacOsAutomationPermission] +): + model_config = ConfigDict( + populate_by_name=True, + ) + root: MacOsAutomationPermissionValue | BundleIdsMacOsAutomationPermission + + +class MacOsContactsPermission(Enum): + none = "none" + read_only = "read_only" + read_write = "read_write" + + +class MacOsPreferencesPermission(Enum): + none = "none" + read_only = "read_only" + read_write = "read_write" + + +class MacOsSeatbeltProfileExtensions(BaseModel): + model_config = ConfigDict( + populate_by_name=True, + ) + macos_accessibility: bool | None = False + macos_automation: Annotated[MacOsAutomationPermission | None, Field()] = "none" + macos_calendar: bool | None = False + macos_contacts: MacOsContactsPermission | None = "none" + macos_launch_services: bool | None = False + macos_preferences: MacOsPreferencesPermission | None = "read_only" + macos_reminders: bool | None = False + + +class McpAuthStatus(Enum): + unsupported = "unsupported" + not_logged_in = "notLoggedIn" + bearer_token = "bearerToken" + o_auth = "oAuth" + + +class McpInvocation(BaseModel): + model_config = ConfigDict( + populate_by_name=True, + ) + arguments: Annotated[ + Any | None, Field(description="Arguments to the tool call.") + ] = None + server: Annotated[ + str, Field(description="Name of the MCP server as defined in the config.") + ] + tool: Annotated[ + str, Field(description="Name of the tool as given by the MCP server.") + ] + + +class McpServerOauthLoginCompletedNotification(BaseModel): + model_config = ConfigDict( + populate_by_name=True, + ) + error: str | None = None + name: str + success: bool + + +class McpServerOauthLoginParams(BaseModel): + model_config = ConfigDict( + populate_by_name=True, + ) + name: str + scopes: list[str] | None = None + timeout_secs: Annotated[int | None, Field(alias="timeoutSecs")] = None + + +class McpServerOauthLoginResponse(BaseModel): + model_config = ConfigDict( + populate_by_name=True, + ) + authorization_url: Annotated[str, Field(alias="authorizationUrl")] + + +class McpServerRefreshResponse(BaseModel): + pass + model_config = ConfigDict( + populate_by_name=True, + ) + + +class McpStartupFailure(BaseModel): + model_config = ConfigDict( + populate_by_name=True, + ) + error: str + server: str + + +class StartingMcpStartupStatus(BaseModel): + model_config = ConfigDict( + populate_by_name=True, + ) + state: Annotated[Literal["starting"], Field(title="StartingMcpStartupStatusState")] + + +class ReadyMcpStartupStatus(BaseModel): + model_config = ConfigDict( + populate_by_name=True, + ) + state: Annotated[Literal["ready"], Field(title="ReadyMcpStartupStatusState")] + + +class FailedMcpStartupStatus(BaseModel): + model_config = ConfigDict( + populate_by_name=True, + ) + error: str + state: Annotated[Literal["failed"], Field(title="FailedMcpStartupStatusState")] + + +class CancelledMcpStartupStatus(BaseModel): + model_config = ConfigDict( + populate_by_name=True, + ) + state: Annotated[ + Literal["cancelled"], Field(title="CancelledMcpStartupStatusState") + ] + + +class McpStartupStatus( + RootModel[ + StartingMcpStartupStatus + | ReadyMcpStartupStatus + | FailedMcpStartupStatus + | CancelledMcpStartupStatus + ] +): + model_config = ConfigDict( + populate_by_name=True, + ) + root: ( + StartingMcpStartupStatus + | ReadyMcpStartupStatus + | FailedMcpStartupStatus + | CancelledMcpStartupStatus + ) + + +class McpToolCallError(BaseModel): + model_config = ConfigDict( + populate_by_name=True, + ) + message: str + + +class McpToolCallProgressNotification(BaseModel): + model_config = ConfigDict( + populate_by_name=True, + ) + item_id: Annotated[str, Field(alias="itemId")] + message: str + thread_id: Annotated[str, Field(alias="threadId")] + turn_id: Annotated[str, Field(alias="turnId")] + + +class McpToolCallResult(BaseModel): + model_config = ConfigDict( + populate_by_name=True, + ) + content: list + structured_content: Annotated[Any | None, Field(alias="structuredContent")] = None + + +class McpToolCallStatus(Enum): + in_progress = "inProgress" + completed = "completed" + failed = "failed" + + +class MergeStrategy(Enum): + replace = "replace" + upsert = "upsert" + + +class MessagePhase(Enum): + commentary = "commentary" + final_answer = "final_answer" + + +class ModeKind(Enum): + plan = "plan" + default = "default" + + +class ModelAvailabilityNux(BaseModel): + model_config = ConfigDict( + populate_by_name=True, + ) + message: str + + +class ModelListParams(BaseModel): + model_config = ConfigDict( + populate_by_name=True, + ) + cursor: Annotated[ + str | None, + Field(description="Opaque pagination cursor returned by a previous call."), + ] = None + include_hidden: Annotated[ + bool | None, + Field( + alias="includeHidden", + description="When true, include models that are hidden from the default picker list.", + ), + ] = None + limit: Annotated[ + int | None, + Field( + description="Optional page size; defaults to a reasonable server-side value.", + ge=0, + ), + ] = None + + +class ModelRerouteReason(RootModel[Literal["highRiskCyberActivity"]]): + model_config = ConfigDict( + populate_by_name=True, + ) + root: Literal["highRiskCyberActivity"] + + +class ModelReroutedNotification(BaseModel): + model_config = ConfigDict( + populate_by_name=True, + ) + from_model: Annotated[str, Field(alias="fromModel")] + reason: ModelRerouteReason + thread_id: Annotated[str, Field(alias="threadId")] + to_model: Annotated[str, Field(alias="toModel")] + turn_id: Annotated[str, Field(alias="turnId")] + + +class ModelUpgradeInfo(BaseModel): + model_config = ConfigDict( + populate_by_name=True, + ) + migration_markdown: Annotated[str | None, Field(alias="migrationMarkdown")] = None + model: str + model_link: Annotated[str | None, Field(alias="modelLink")] = None + upgrade_copy: Annotated[str | None, Field(alias="upgradeCopy")] = None + + +class NetworkAccess(Enum): + restricted = "restricted" + enabled = "enabled" + + +class NetworkApprovalProtocol(Enum): + http = "http" + https = "https" + socks5_tcp = "socks5Tcp" + socks5_udp = "socks5Udp" + + +class NetworkPermissions(BaseModel): + model_config = ConfigDict( + populate_by_name=True, + ) + enabled: bool | None = None + + +class NetworkPolicyRuleAction(Enum): + allow = "allow" + deny = "deny" + + +class NetworkRequirements(BaseModel): + model_config = ConfigDict( + populate_by_name=True, + ) + allow_local_binding: Annotated[bool | None, Field(alias="allowLocalBinding")] = None + allow_unix_sockets: Annotated[list[str] | None, Field(alias="allowUnixSockets")] = ( + None + ) + allow_upstream_proxy: Annotated[bool | None, Field(alias="allowUpstreamProxy")] = ( + None + ) + allowed_domains: Annotated[list[str] | None, Field(alias="allowedDomains")] = None + dangerously_allow_all_unix_sockets: Annotated[ + bool | None, Field(alias="dangerouslyAllowAllUnixSockets") + ] = None + dangerously_allow_non_loopback_proxy: Annotated[ + bool | None, Field(alias="dangerouslyAllowNonLoopbackProxy") + ] = None + denied_domains: Annotated[list[str] | None, Field(alias="deniedDomains")] = None + enabled: bool | None = None + http_port: Annotated[int | None, Field(alias="httpPort", ge=0)] = None + socks_port: Annotated[int | None, Field(alias="socksPort", ge=0)] = None + + +class ReadParsedCommand(BaseModel): + model_config = ConfigDict( + populate_by_name=True, + ) + cmd: str + name: str + path: Annotated[ + str, + Field( + description="(Best effort) Path to the file being read by the command. When possible, this is an absolute path, though when relative, it should be resolved against the `cwd`` that will be used to run the command to derive the absolute path." + ), + ] + type: Annotated[Literal["read"], Field(title="ReadParsedCommandType")] + + +class ListFilesParsedCommand(BaseModel): + model_config = ConfigDict( + populate_by_name=True, + ) + cmd: str + path: str | None = None + type: Annotated[Literal["list_files"], Field(title="ListFilesParsedCommandType")] + + +class SearchParsedCommand(BaseModel): + model_config = ConfigDict( + populate_by_name=True, + ) + cmd: str + path: str | None = None + query: str | None = None + type: Annotated[Literal["search"], Field(title="SearchParsedCommandType")] + + +class UnknownParsedCommand(BaseModel): + model_config = ConfigDict( + populate_by_name=True, + ) + cmd: str + type: Annotated[Literal["unknown"], Field(title="UnknownParsedCommandType")] + + +class ParsedCommand( + RootModel[ + ReadParsedCommand + | ListFilesParsedCommand + | SearchParsedCommand + | UnknownParsedCommand + ] +): + model_config = ConfigDict( + populate_by_name=True, + ) + root: ( + ReadParsedCommand + | ListFilesParsedCommand + | SearchParsedCommand + | UnknownParsedCommand + ) + + +class PatchApplyStatus(Enum): + in_progress = "inProgress" + completed = "completed" + failed = "failed" + declined = "declined" + + +class AddPatchChangeKind(BaseModel): + model_config = ConfigDict( + populate_by_name=True, + ) + type: Annotated[Literal["add"], Field(title="AddPatchChangeKindType")] + + +class DeletePatchChangeKind(BaseModel): + model_config = ConfigDict( + populate_by_name=True, + ) + type: Annotated[Literal["delete"], Field(title="DeletePatchChangeKindType")] + + +class UpdatePatchChangeKind(BaseModel): + model_config = ConfigDict( + populate_by_name=True, + ) + move_path: str | None = None + type: Annotated[Literal["update"], Field(title="UpdatePatchChangeKindType")] + + +class PatchChangeKind( + RootModel[AddPatchChangeKind | DeletePatchChangeKind | UpdatePatchChangeKind] +): + model_config = ConfigDict( + populate_by_name=True, + ) + root: AddPatchChangeKind | DeletePatchChangeKind | UpdatePatchChangeKind + + +class PermissionProfile(BaseModel): + model_config = ConfigDict( + populate_by_name=True, + ) + file_system: FileSystemPermissions | None = None + macos: MacOsSeatbeltProfileExtensions | None = None + network: NetworkPermissions | None = None + + +class Personality(Enum): + none = "none" + friendly = "friendly" + pragmatic = "pragmatic" + + +class PlanDeltaNotification(BaseModel): + model_config = ConfigDict( + populate_by_name=True, + ) + delta: str + item_id: Annotated[str, Field(alias="itemId")] + thread_id: Annotated[str, Field(alias="threadId")] + turn_id: Annotated[str, Field(alias="turnId")] + + +class PlanType(Enum): + free = "free" + go = "go" + plus = "plus" + pro = "pro" + team = "team" + business = "business" + enterprise = "enterprise" + edu = "edu" + unknown = "unknown" + + +class PluginAuthPolicy(Enum): + on_install = "ON_INSTALL" + on_use = "ON_USE" + + +class PluginInstallParams(BaseModel): + model_config = ConfigDict( + populate_by_name=True, + ) + marketplace_path: Annotated[AbsolutePathBuf, Field(alias="marketplacePath")] + plugin_name: Annotated[str, Field(alias="pluginName")] + + +class PluginInstallPolicy(Enum): + not_available = "NOT_AVAILABLE" + available = "AVAILABLE" + installed_by_default = "INSTALLED_BY_DEFAULT" + + +class PluginInstallResponse(BaseModel): + model_config = ConfigDict( + populate_by_name=True, + ) + apps_needing_auth: Annotated[list[AppSummary], Field(alias="appsNeedingAuth")] + auth_policy: Annotated[PluginAuthPolicy, Field(alias="authPolicy")] + + +class PluginInterface(BaseModel): + model_config = ConfigDict( + populate_by_name=True, + ) + brand_color: Annotated[str | None, Field(alias="brandColor")] = None + capabilities: list[str] + category: str | None = None + composer_icon: Annotated[AbsolutePathBuf | None, Field(alias="composerIcon")] = None + default_prompt: Annotated[str | None, Field(alias="defaultPrompt")] = None + developer_name: Annotated[str | None, Field(alias="developerName")] = None + display_name: Annotated[str | None, Field(alias="displayName")] = None + logo: AbsolutePathBuf | None = None + long_description: Annotated[str | None, Field(alias="longDescription")] = None + privacy_policy_url: Annotated[str | None, Field(alias="privacyPolicyUrl")] = None + screenshots: list[AbsolutePathBuf] + short_description: Annotated[str | None, Field(alias="shortDescription")] = None + terms_of_service_url: Annotated[str | None, Field(alias="termsOfServiceUrl")] = None + website_url: Annotated[str | None, Field(alias="websiteUrl")] = None + + +class PluginListParams(BaseModel): + model_config = ConfigDict( + populate_by_name=True, + ) + cwds: Annotated[ + list[AbsolutePathBuf] | None, + Field( + description="Optional working directories used to discover repo marketplaces. When omitted, only home-scoped marketplaces and the official curated marketplace are considered." + ), + ] = None + force_remote_sync: Annotated[ + bool | None, + Field( + alias="forceRemoteSync", + description="When true, reconcile the official curated marketplace against the remote plugin state before listing marketplaces.", + ), + ] = None + + +class LocalPluginSource(BaseModel): + model_config = ConfigDict( + populate_by_name=True, + ) + path: AbsolutePathBuf + type: Annotated[Literal["local"], Field(title="LocalPluginSourceType")] + + +class PluginSource(RootModel[LocalPluginSource]): + model_config = ConfigDict( + populate_by_name=True, + ) + root: LocalPluginSource + + +class PluginSummary(BaseModel): + model_config = ConfigDict( + populate_by_name=True, + ) + auth_policy: Annotated[PluginAuthPolicy, Field(alias="authPolicy")] + enabled: bool + id: str + install_policy: Annotated[PluginInstallPolicy, Field(alias="installPolicy")] + installed: bool + interface: PluginInterface | None = None + name: str + source: PluginSource + + +class PluginUninstallParams(BaseModel): + model_config = ConfigDict( + populate_by_name=True, + ) + plugin_id: Annotated[str, Field(alias="pluginId")] + + +class PluginUninstallResponse(BaseModel): + pass + model_config = ConfigDict( + populate_by_name=True, + ) + + +class ProductSurface(Enum): + chatgpt = "chatgpt" + codex = "codex" + api = "api" + atlas = "atlas" + + +class RateLimitWindow(BaseModel): + model_config = ConfigDict( + populate_by_name=True, + ) + resets_at: Annotated[int | None, Field(alias="resetsAt")] = None + used_percent: Annotated[int, Field(alias="usedPercent")] + window_duration_mins: Annotated[int | None, Field(alias="windowDurationMins")] = ( + None + ) + + +class RestrictedReadOnlyAccess(BaseModel): + model_config = ConfigDict( + populate_by_name=True, + ) + include_platform_defaults: Annotated[ + bool | None, Field(alias="includePlatformDefaults") + ] = True + readable_roots: Annotated[ + list[AbsolutePathBuf] | None, Field(alias="readableRoots") + ] = [] + type: Annotated[Literal["restricted"], Field(title="RestrictedReadOnlyAccessType")] + + +class FullAccessReadOnlyAccess(BaseModel): + model_config = ConfigDict( + populate_by_name=True, + ) + type: Annotated[Literal["fullAccess"], Field(title="FullAccessReadOnlyAccessType")] + + +class ReadOnlyAccess(RootModel[RestrictedReadOnlyAccess | FullAccessReadOnlyAccess]): + model_config = ConfigDict( + populate_by_name=True, + ) + root: RestrictedReadOnlyAccess | FullAccessReadOnlyAccess + + +class RealtimeAudioFrame(BaseModel): + model_config = ConfigDict( + populate_by_name=True, + ) + data: str + num_channels: Annotated[int, Field(ge=0)] + sample_rate: Annotated[int, Field(ge=0)] + samples_per_channel: Annotated[int | None, Field(ge=0)] = None + + +class SessionUpdated(BaseModel): + model_config = ConfigDict( + populate_by_name=True, + ) + instructions: str | None = None + session_id: str + + +class SessionUpdatedRealtimeEvent(BaseModel): + model_config = ConfigDict( + extra="forbid", + populate_by_name=True, + ) + session_updated: Annotated[SessionUpdated, Field(alias="SessionUpdated")] + + +class AudioOutRealtimeEvent(BaseModel): + model_config = ConfigDict( + extra="forbid", + populate_by_name=True, + ) + audio_out: Annotated[RealtimeAudioFrame, Field(alias="AudioOut")] + + +class ConversationItemAddedRealtimeEvent(BaseModel): + model_config = ConfigDict( + extra="forbid", + populate_by_name=True, + ) + conversation_item_added: Annotated[Any, Field(alias="ConversationItemAdded")] + + +class ConversationItemDone(BaseModel): + model_config = ConfigDict( + populate_by_name=True, + ) + item_id: str + + +class ConversationItemDoneRealtimeEvent(BaseModel): + model_config = ConfigDict( + extra="forbid", + populate_by_name=True, + ) + conversation_item_done: Annotated[ + ConversationItemDone, Field(alias="ConversationItemDone") + ] + + +class ErrorRealtimeEvent(BaseModel): + model_config = ConfigDict( + extra="forbid", + populate_by_name=True, + ) + error: Annotated[str, Field(alias="Error")] + + +class RealtimeTranscriptDelta(BaseModel): + model_config = ConfigDict( + populate_by_name=True, + ) + delta: str + + +class RealtimeTranscriptEntry(BaseModel): + model_config = ConfigDict( + populate_by_name=True, + ) + role: str + text: str + + +class ReasoningEffort(Enum): + none = "none" + minimal = "minimal" + low = "low" + medium = "medium" + high = "high" + xhigh = "xhigh" + + +class ReasoningEffortOption(BaseModel): + model_config = ConfigDict( + populate_by_name=True, + ) + description: str + reasoning_effort: Annotated[ReasoningEffort, Field(alias="reasoningEffort")] + + +class ReasoningTextReasoningItemContent(BaseModel): + model_config = ConfigDict( + populate_by_name=True, + ) + text: str + type: Annotated[ + Literal["reasoning_text"], Field(title="ReasoningTextReasoningItemContentType") + ] + + +class TextReasoningItemContent(BaseModel): + model_config = ConfigDict( + populate_by_name=True, + ) + text: str + type: Annotated[Literal["text"], Field(title="TextReasoningItemContentType")] + + +class ReasoningItemContent( + RootModel[ReasoningTextReasoningItemContent | TextReasoningItemContent] +): + model_config = ConfigDict( + populate_by_name=True, + ) + root: ReasoningTextReasoningItemContent | TextReasoningItemContent + + +class SummaryTextReasoningItemReasoningSummary(BaseModel): + model_config = ConfigDict( + populate_by_name=True, + ) + text: str + type: Annotated[ + Literal["summary_text"], + Field(title="SummaryTextReasoningItemReasoningSummaryType"), + ] + + +class ReasoningItemReasoningSummary( + RootModel[SummaryTextReasoningItemReasoningSummary] +): + model_config = ConfigDict( + populate_by_name=True, + ) + root: SummaryTextReasoningItemReasoningSummary + + +class ReasoningSummaryValue(Enum): + auto = "auto" + concise = "concise" + detailed = "detailed" + + +class ReasoningSummary(RootModel[ReasoningSummaryValue | Literal["none"]]): + model_config = ConfigDict( + populate_by_name=True, + ) + root: Annotated[ + ReasoningSummaryValue | Literal["none"], + Field( + description="A summary of the reasoning performed by the model. This can be useful for debugging and understanding the model's reasoning process. See https://platform.openai.com/docs/guides/reasoning?api-mode=responses#reasoning-summaries" + ), + ] + + +class ReasoningSummaryPartAddedNotification(BaseModel): + model_config = ConfigDict( + populate_by_name=True, + ) + item_id: Annotated[str, Field(alias="itemId")] + summary_index: Annotated[int, Field(alias="summaryIndex")] + thread_id: Annotated[str, Field(alias="threadId")] + turn_id: Annotated[str, Field(alias="turnId")] + + +class ReasoningSummaryTextDeltaNotification(BaseModel): + model_config = ConfigDict( + populate_by_name=True, + ) + delta: str + item_id: Annotated[str, Field(alias="itemId")] + summary_index: Annotated[int, Field(alias="summaryIndex")] + thread_id: Annotated[str, Field(alias="threadId")] + turn_id: Annotated[str, Field(alias="turnId")] + + +class ReasoningTextDeltaNotification(BaseModel): + model_config = ConfigDict( + populate_by_name=True, + ) + content_index: Annotated[int, Field(alias="contentIndex")] + delta: str + item_id: Annotated[str, Field(alias="itemId")] + thread_id: Annotated[str, Field(alias="threadId")] + turn_id: Annotated[str, Field(alias="turnId")] + + +class RemoteSkillSummary(BaseModel): + model_config = ConfigDict( + populate_by_name=True, + ) + description: str + id: str + name: str + + +class RequestId(RootModel[str | int]): + model_config = ConfigDict( + populate_by_name=True, + ) + root: str | int + + +class RequestUserInputQuestionOption(BaseModel): + model_config = ConfigDict( + populate_by_name=True, + ) + description: str + label: str + + +class ResidencyRequirement(RootModel[Literal["us"]]): + model_config = ConfigDict( + populate_by_name=True, + ) + root: Literal["us"] + + +class Resource(BaseModel): + model_config = ConfigDict( + populate_by_name=True, + ) + field_meta: Annotated[Any | None, Field(alias="_meta")] = None + annotations: Any | None = None + description: str | None = None + icons: list | None = None + mime_type: Annotated[str | None, Field(alias="mimeType")] = None + name: str + size: int | None = None + title: str | None = None + uri: str + + +class ResourceTemplate(BaseModel): + model_config = ConfigDict( + populate_by_name=True, + ) + annotations: Any | None = None + description: str | None = None + mime_type: Annotated[str | None, Field(alias="mimeType")] = None + name: str + title: str | None = None + uri_template: Annotated[str, Field(alias="uriTemplate")] + + +class MessageResponseItem(BaseModel): + model_config = ConfigDict( + populate_by_name=True, + ) + content: list[ContentItem] + end_turn: bool | None = None + id: str | None = None + phase: MessagePhase | None = None + role: str + type: Annotated[Literal["message"], Field(title="MessageResponseItemType")] + + +class ReasoningResponseItem(BaseModel): + model_config = ConfigDict( + populate_by_name=True, + ) + content: list[ReasoningItemContent] | None = None + encrypted_content: str | None = None + id: str + summary: list[ReasoningItemReasoningSummary] + type: Annotated[Literal["reasoning"], Field(title="ReasoningResponseItemType")] + + +class LocalShellCallResponseItem(BaseModel): + model_config = ConfigDict( + populate_by_name=True, + ) + action: LocalShellAction + call_id: Annotated[ + str | None, Field(description="Set when using the Responses API.") + ] = None + id: Annotated[ + str | None, + Field( + description="Legacy id field retained for compatibility with older payloads." + ), + ] = None + status: LocalShellStatus + type: Annotated[ + Literal["local_shell_call"], Field(title="LocalShellCallResponseItemType") + ] + + +class FunctionCallResponseItem(BaseModel): + model_config = ConfigDict( + populate_by_name=True, + ) + arguments: str + call_id: str + id: str | None = None + name: str + namespace: str | None = None + type: Annotated[ + Literal["function_call"], Field(title="FunctionCallResponseItemType") + ] + + +class ToolSearchCallResponseItem(BaseModel): + model_config = ConfigDict( + populate_by_name=True, + ) + arguments: Any + call_id: str | None = None + execution: str + id: str | None = None + status: str | None = None + type: Annotated[ + Literal["tool_search_call"], Field(title="ToolSearchCallResponseItemType") + ] + + +class CustomToolCallResponseItem(BaseModel): + model_config = ConfigDict( + populate_by_name=True, + ) + call_id: str + id: str | None = None + input: str + name: str + status: str | None = None + type: Annotated[ + Literal["custom_tool_call"], Field(title="CustomToolCallResponseItemType") + ] + + +class ToolSearchOutputResponseItem(BaseModel): + model_config = ConfigDict( + populate_by_name=True, + ) + call_id: str | None = None + execution: str + status: str + tools: list + type: Annotated[ + Literal["tool_search_output"], Field(title="ToolSearchOutputResponseItemType") + ] + + +class ImageGenerationCallResponseItem(BaseModel): + model_config = ConfigDict( + populate_by_name=True, + ) + id: str + result: str + revised_prompt: str | None = None + status: str + type: Annotated[ + Literal["image_generation_call"], + Field(title="ImageGenerationCallResponseItemType"), + ] + + +class GhostSnapshotResponseItem(BaseModel): + model_config = ConfigDict( + populate_by_name=True, + ) + ghost_commit: GhostCommit + type: Annotated[ + Literal["ghost_snapshot"], Field(title="GhostSnapshotResponseItemType") + ] + + +class CompactionResponseItem(BaseModel): + model_config = ConfigDict( + populate_by_name=True, + ) + encrypted_content: str + type: Annotated[Literal["compaction"], Field(title="CompactionResponseItemType")] + + +class OtherResponseItem(BaseModel): + model_config = ConfigDict( + populate_by_name=True, + ) + type: Annotated[Literal["other"], Field(title="OtherResponseItemType")] + + +class SearchResponsesApiWebSearchAction(BaseModel): + model_config = ConfigDict( + populate_by_name=True, + ) + queries: list[str] | None = None + query: str | None = None + type: Annotated[ + Literal["search"], Field(title="SearchResponsesApiWebSearchActionType") + ] + + +class OpenPageResponsesApiWebSearchAction(BaseModel): + model_config = ConfigDict( + populate_by_name=True, + ) + type: Annotated[ + Literal["open_page"], Field(title="OpenPageResponsesApiWebSearchActionType") + ] + url: str | None = None + + +class FindInPageResponsesApiWebSearchAction(BaseModel): + model_config = ConfigDict( + populate_by_name=True, + ) + pattern: str | None = None + type: Annotated[ + Literal["find_in_page"], + Field(title="FindInPageResponsesApiWebSearchActionType"), + ] + url: str | None = None + + +class OtherResponsesApiWebSearchAction(BaseModel): + model_config = ConfigDict( + populate_by_name=True, + ) + type: Annotated[ + Literal["other"], Field(title="OtherResponsesApiWebSearchActionType") + ] + + +class ResponsesApiWebSearchAction( + RootModel[ + SearchResponsesApiWebSearchAction + | OpenPageResponsesApiWebSearchAction + | FindInPageResponsesApiWebSearchAction + | OtherResponsesApiWebSearchAction + ] +): + model_config = ConfigDict( + populate_by_name=True, + ) + root: ( + SearchResponsesApiWebSearchAction + | OpenPageResponsesApiWebSearchAction + | FindInPageResponsesApiWebSearchAction + | OtherResponsesApiWebSearchAction + ) + + +class OkResultOfCallToolResultOrString(BaseModel): + model_config = ConfigDict( + populate_by_name=True, + ) + ok: Annotated[CallToolResult, Field(alias="Ok")] + + +class ErrResultOfCallToolResultOrString(BaseModel): + model_config = ConfigDict( + populate_by_name=True, + ) + err: Annotated[str, Field(alias="Err")] + + +class ResultOfCallToolResultOrString( + RootModel[OkResultOfCallToolResultOrString | ErrResultOfCallToolResultOrString] +): + model_config = ConfigDict( + populate_by_name=True, + ) + root: OkResultOfCallToolResultOrString | ErrResultOfCallToolResultOrString + + +class ApprovedExecpolicyAmendment(BaseModel): + model_config = ConfigDict( + populate_by_name=True, + ) + proposed_execpolicy_amendment: list[str] + + +class ApprovedExecpolicyAmendmentReviewDecision(BaseModel): + model_config = ConfigDict( + extra="forbid", + populate_by_name=True, + ) + approved_execpolicy_amendment: ApprovedExecpolicyAmendment + + +class ReviewDelivery(Enum): + inline = "inline" + detached = "detached" + + +class ReviewLineRange(BaseModel): + model_config = ConfigDict( + populate_by_name=True, + ) + end: Annotated[int, Field(ge=0)] + start: Annotated[int, Field(ge=0)] + + +class UncommittedChangesReviewTarget(BaseModel): + model_config = ConfigDict( + populate_by_name=True, + ) + type: Annotated[ + Literal["uncommittedChanges"], Field(title="UncommittedChangesReviewTargetType") + ] + + +class BaseBranchReviewTarget(BaseModel): + model_config = ConfigDict( + populate_by_name=True, + ) + branch: str + type: Annotated[Literal["baseBranch"], Field(title="BaseBranchReviewTargetType")] + + +class CommitReviewTarget(BaseModel): + model_config = ConfigDict( + populate_by_name=True, + ) + sha: str + title: Annotated[ + str | None, + Field( + description="Optional human-readable label (e.g., commit subject) for UIs." + ), + ] = None + type: Annotated[Literal["commit"], Field(title="CommitReviewTargetType")] + + +class CustomReviewTarget(BaseModel): + model_config = ConfigDict( + populate_by_name=True, + ) + instructions: str + type: Annotated[Literal["custom"], Field(title="CustomReviewTargetType")] + + +class ReviewTarget( + RootModel[ + UncommittedChangesReviewTarget + | BaseBranchReviewTarget + | CommitReviewTarget + | CustomReviewTarget + ] +): + model_config = ConfigDict( + populate_by_name=True, + ) + root: ( + UncommittedChangesReviewTarget + | BaseBranchReviewTarget + | CommitReviewTarget + | CustomReviewTarget + ) + + +class SandboxMode(Enum): + read_only = "read-only" + workspace_write = "workspace-write" + danger_full_access = "danger-full-access" + + +class DangerFullAccessSandboxPolicy(BaseModel): + model_config = ConfigDict( + populate_by_name=True, + ) + type: Annotated[ + Literal["dangerFullAccess"], Field(title="DangerFullAccessSandboxPolicyType") + ] + + +class ReadOnlySandboxPolicy(BaseModel): + model_config = ConfigDict( + populate_by_name=True, + ) + access: Annotated[ReadOnlyAccess | None, Field()] = {"type": "fullAccess"} + network_access: Annotated[bool | None, Field(alias="networkAccess")] = False + type: Annotated[Literal["readOnly"], Field(title="ReadOnlySandboxPolicyType")] + + +class ExternalSandboxSandboxPolicy(BaseModel): + model_config = ConfigDict( + populate_by_name=True, + ) + network_access: Annotated[NetworkAccess | None, Field(alias="networkAccess")] = ( + "restricted" + ) + type: Annotated[ + Literal["externalSandbox"], Field(title="ExternalSandboxSandboxPolicyType") + ] + + +class WorkspaceWriteSandboxPolicy(BaseModel): + model_config = ConfigDict( + populate_by_name=True, + ) + exclude_slash_tmp: Annotated[bool | None, Field(alias="excludeSlashTmp")] = False + exclude_tmpdir_env_var: Annotated[ + bool | None, Field(alias="excludeTmpdirEnvVar") + ] = False + network_access: Annotated[bool | None, Field(alias="networkAccess")] = False + read_only_access: Annotated[ + ReadOnlyAccess | None, Field(alias="readOnlyAccess") + ] = {"type": "fullAccess"} + type: Annotated[ + Literal["workspaceWrite"], Field(title="WorkspaceWriteSandboxPolicyType") + ] + writable_roots: Annotated[ + list[AbsolutePathBuf] | None, Field(alias="writableRoots") + ] = [] + + +class SandboxPolicy( + RootModel[ + DangerFullAccessSandboxPolicy + | ReadOnlySandboxPolicy + | ExternalSandboxSandboxPolicy + | WorkspaceWriteSandboxPolicy + ] +): + model_config = ConfigDict( + populate_by_name=True, + ) + root: ( + DangerFullAccessSandboxPolicy + | ReadOnlySandboxPolicy + | ExternalSandboxSandboxPolicy + | WorkspaceWriteSandboxPolicy + ) + + +class SandboxWorkspaceWrite(BaseModel): + model_config = ConfigDict( + populate_by_name=True, + ) + exclude_slash_tmp: bool | None = False + exclude_tmpdir_env_var: bool | None = False + network_access: bool | None = False + writable_roots: list[str] | None = [] + + +class ItemAgentMessageDeltaServerNotification(BaseModel): + model_config = ConfigDict( + populate_by_name=True, + ) + method: Annotated[ + Literal["item/agentMessage/delta"], + Field(title="Item/agentMessage/deltaNotificationMethod"), + ] + params: AgentMessageDeltaNotification + + +class ItemPlanDeltaServerNotification(BaseModel): + model_config = ConfigDict( + populate_by_name=True, + ) + method: Annotated[ + Literal["item/plan/delta"], Field(title="Item/plan/deltaNotificationMethod") + ] + params: PlanDeltaNotification + + +class ItemCommandExecutionOutputDeltaServerNotification(BaseModel): + model_config = ConfigDict( + populate_by_name=True, + ) + method: Annotated[ + Literal["item/commandExecution/outputDelta"], + Field(title="Item/commandExecution/outputDeltaNotificationMethod"), + ] + params: CommandExecutionOutputDeltaNotification + + +class ItemFileChangeOutputDeltaServerNotification(BaseModel): + model_config = ConfigDict( + populate_by_name=True, + ) + method: Annotated[ + Literal["item/fileChange/outputDelta"], + Field(title="Item/fileChange/outputDeltaNotificationMethod"), + ] + params: FileChangeOutputDeltaNotification + + +class ItemMcpToolCallProgressServerNotification(BaseModel): + model_config = ConfigDict( + populate_by_name=True, + ) + method: Annotated[ + Literal["item/mcpToolCall/progress"], + Field(title="Item/mcpToolCall/progressNotificationMethod"), + ] + params: McpToolCallProgressNotification + + +class McpServerOauthLoginCompletedServerNotification(BaseModel): + model_config = ConfigDict( + populate_by_name=True, + ) + method: Annotated[ + Literal["mcpServer/oauthLogin/completed"], + Field(title="McpServer/oauthLogin/completedNotificationMethod"), + ] + params: McpServerOauthLoginCompletedNotification + + +class ItemReasoningSummaryTextDeltaServerNotification(BaseModel): + model_config = ConfigDict( + populate_by_name=True, + ) + method: Annotated[ + Literal["item/reasoning/summaryTextDelta"], + Field(title="Item/reasoning/summaryTextDeltaNotificationMethod"), + ] + params: ReasoningSummaryTextDeltaNotification + + +class ItemReasoningSummaryPartAddedServerNotification(BaseModel): + model_config = ConfigDict( + populate_by_name=True, + ) + method: Annotated[ + Literal["item/reasoning/summaryPartAdded"], + Field(title="Item/reasoning/summaryPartAddedNotificationMethod"), + ] + params: ReasoningSummaryPartAddedNotification + + +class ItemReasoningTextDeltaServerNotification(BaseModel): + model_config = ConfigDict( + populate_by_name=True, + ) + method: Annotated[ + Literal["item/reasoning/textDelta"], + Field(title="Item/reasoning/textDeltaNotificationMethod"), + ] + params: ReasoningTextDeltaNotification + + +class ThreadCompactedServerNotification(BaseModel): + model_config = ConfigDict( + populate_by_name=True, + ) + method: Annotated[ + Literal["thread/compacted"], Field(title="Thread/compactedNotificationMethod") + ] + params: ContextCompactedNotification + + +class ModelReroutedServerNotification(BaseModel): + model_config = ConfigDict( + populate_by_name=True, + ) + method: Annotated[ + Literal["model/rerouted"], Field(title="Model/reroutedNotificationMethod") + ] + params: ModelReroutedNotification + + +class DeprecationNoticeServerNotification(BaseModel): + model_config = ConfigDict( + populate_by_name=True, + ) + method: Annotated[ + Literal["deprecationNotice"], Field(title="DeprecationNoticeNotificationMethod") + ] + params: DeprecationNoticeNotification + + +class FuzzyFileSearchSessionUpdatedServerNotification(BaseModel): + model_config = ConfigDict( + populate_by_name=True, + ) + method: Annotated[ + Literal["fuzzyFileSearch/sessionUpdated"], + Field(title="FuzzyFileSearch/sessionUpdatedNotificationMethod"), + ] + params: FuzzyFileSearchSessionUpdatedNotification + + +class FuzzyFileSearchSessionCompletedServerNotification(BaseModel): + model_config = ConfigDict( + populate_by_name=True, + ) + method: Annotated[ + Literal["fuzzyFileSearch/sessionCompleted"], + Field(title="FuzzyFileSearch/sessionCompletedNotificationMethod"), + ] + params: FuzzyFileSearchSessionCompletedNotification + + +class AccountLoginCompletedServerNotification(BaseModel): + model_config = ConfigDict( + populate_by_name=True, + ) + method: Annotated[ + Literal["account/login/completed"], + Field(title="Account/login/completedNotificationMethod"), + ] + params: AccountLoginCompletedNotification + + +class ServerRequestResolvedNotification(BaseModel): + model_config = ConfigDict( + populate_by_name=True, + ) + request_id: Annotated[RequestId, Field(alias="requestId")] + thread_id: Annotated[str, Field(alias="threadId")] + + +class ServiceTier(Enum): + fast = "fast" + flex = "flex" + + +class SessionNetworkProxyRuntime(BaseModel): + model_config = ConfigDict( + populate_by_name=True, + ) + http_addr: str + socks_addr: str + + +class SessionSourceValue(Enum): + cli = "cli" + vscode = "vscode" + exec = "exec" + app_server = "appServer" + unknown = "unknown" + + +class Settings(BaseModel): + model_config = ConfigDict( + populate_by_name=True, + ) + developer_instructions: str | None = None + model: str + reasoning_effort: ReasoningEffort | None = None + + +class SkillErrorInfo(BaseModel): + model_config = ConfigDict( + populate_by_name=True, + ) + message: str + path: str + + +class SkillInterface(BaseModel): + model_config = ConfigDict( + populate_by_name=True, + ) + brand_color: Annotated[str | None, Field(alias="brandColor")] = None + default_prompt: Annotated[str | None, Field(alias="defaultPrompt")] = None + display_name: Annotated[str | None, Field(alias="displayName")] = None + icon_large: Annotated[str | None, Field(alias="iconLarge")] = None + icon_small: Annotated[str | None, Field(alias="iconSmall")] = None + short_description: Annotated[str | None, Field(alias="shortDescription")] = None + + +class SkillScope(Enum): + user = "user" + repo = "repo" + system = "system" + admin = "admin" + + +class SkillToolDependency(BaseModel): + model_config = ConfigDict( + populate_by_name=True, + ) + command: str | None = None + description: str | None = None + transport: str | None = None + type: str + url: str | None = None + value: str + + +class SkillsChangedNotification(BaseModel): + pass + model_config = ConfigDict( + populate_by_name=True, + ) + + +class SkillsConfigWriteParams(BaseModel): + model_config = ConfigDict( + populate_by_name=True, + ) + enabled: bool + path: str + + +class SkillsConfigWriteResponse(BaseModel): + model_config = ConfigDict( + populate_by_name=True, + ) + effective_enabled: Annotated[bool, Field(alias="effectiveEnabled")] + + +class SkillsListExtraRootsForCwd(BaseModel): + model_config = ConfigDict( + populate_by_name=True, + ) + cwd: str + extra_user_roots: Annotated[list[str], Field(alias="extraUserRoots")] + + +class SkillsListParams(BaseModel): + model_config = ConfigDict( + populate_by_name=True, + ) + cwds: Annotated[ + list[str] | None, + Field( + description="When empty, defaults to the current session working directory." + ), + ] = None + force_reload: Annotated[ + bool | None, + Field( + alias="forceReload", + description="When true, bypass the skills cache and re-scan skills from disk.", + ), + ] = None + per_cwd_extra_user_roots: Annotated[ + list[SkillsListExtraRootsForCwd] | None, + Field( + alias="perCwdExtraUserRoots", + description="Optional per-cwd extra roots to scan as user-scoped skills.", + ), + ] = None + + +class SkillsRemoteReadParams(BaseModel): + model_config = ConfigDict( + populate_by_name=True, + ) + enabled: bool | None = False + hazelnut_scope: Annotated[HazelnutScope | None, Field(alias="hazelnutScope")] = ( + "example" + ) + product_surface: Annotated[ProductSurface | None, Field(alias="productSurface")] = ( + "codex" + ) + + +class SkillsRemoteReadResponse(BaseModel): + model_config = ConfigDict( + populate_by_name=True, + ) + data: list[RemoteSkillSummary] + + +class SkillsRemoteWriteParams(BaseModel): + model_config = ConfigDict( + populate_by_name=True, + ) + hazelnut_id: Annotated[str, Field(alias="hazelnutId")] + + +class SkillsRemoteWriteResponse(BaseModel): + model_config = ConfigDict( + populate_by_name=True, + ) + id: str + path: str + + +class StepStatus(Enum): + pending = "pending" + in_progress = "in_progress" + completed = "completed" + + +class SubAgentSourceValue(Enum): + review = "review" + compact = "compact" + memory_consolidation = "memory_consolidation" + + +class OtherSubAgentSource(BaseModel): + model_config = ConfigDict( + extra="forbid", + populate_by_name=True, + ) + other: str + + +class TerminalInteractionNotification(BaseModel): + model_config = ConfigDict( + populate_by_name=True, + ) + item_id: Annotated[str, Field(alias="itemId")] + process_id: Annotated[str, Field(alias="processId")] + stdin: str + thread_id: Annotated[str, Field(alias="threadId")] + turn_id: Annotated[str, Field(alias="turnId")] + + +class TextElement(BaseModel): + model_config = ConfigDict( + populate_by_name=True, + ) + byte_range: Annotated[ + ByteRange, + Field( + alias="byteRange", + description="Byte range in the parent `text` buffer that this element occupies.", + ), + ] + placeholder: Annotated[ + str | None, + Field( + description="Optional human-readable placeholder for the element, displayed in the UI." + ), + ] = None + + +class TextPosition(BaseModel): + model_config = ConfigDict( + populate_by_name=True, + ) + column: Annotated[ + int, + Field(description="1-based column number (in Unicode scalar values).", ge=0), + ] + line: Annotated[int, Field(description="1-based line number.", ge=0)] + + +class TextRange(BaseModel): + model_config = ConfigDict( + populate_by_name=True, + ) + end: TextPosition + start: TextPosition + + +class ThreadActiveFlag(Enum): + waiting_on_approval = "waitingOnApproval" + waiting_on_user_input = "waitingOnUserInput" + + +class ThreadArchiveParams(BaseModel): + model_config = ConfigDict( + populate_by_name=True, + ) + thread_id: Annotated[str, Field(alias="threadId")] + + +class ThreadArchiveResponse(BaseModel): + pass + model_config = ConfigDict( + populate_by_name=True, + ) + + +class ThreadArchivedNotification(BaseModel): + model_config = ConfigDict( + populate_by_name=True, + ) + thread_id: Annotated[str, Field(alias="threadId")] + + +class ThreadClosedNotification(BaseModel): + model_config = ConfigDict( + populate_by_name=True, + ) + thread_id: Annotated[str, Field(alias="threadId")] + + +class ThreadCompactStartParams(BaseModel): + model_config = ConfigDict( + populate_by_name=True, + ) + thread_id: Annotated[str, Field(alias="threadId")] + + +class ThreadCompactStartResponse(BaseModel): + pass + model_config = ConfigDict( + populate_by_name=True, + ) + + +class ThreadForkParams(BaseModel): + model_config = ConfigDict( + populate_by_name=True, + ) + approval_policy: Annotated[AskForApproval | None, Field(alias="approvalPolicy")] = ( + None + ) + base_instructions: Annotated[str | None, Field(alias="baseInstructions")] = None + config: dict[str, Any] | None = None + cwd: str | None = None + developer_instructions: Annotated[ + str | None, Field(alias="developerInstructions") + ] = None + ephemeral: bool | None = None + model: Annotated[ + str | None, + Field(description="Configuration overrides for the forked thread, if any."), + ] = None + model_provider: Annotated[str | None, Field(alias="modelProvider")] = None + sandbox: SandboxMode | None = None + service_tier: Annotated[ServiceTier | None, Field(alias="serviceTier")] = None + thread_id: Annotated[str, Field(alias="threadId")] + + +class ThreadId(RootModel[str]): + model_config = ConfigDict( + populate_by_name=True, + ) + root: str + + +class AgentMessageThreadItem(BaseModel): + model_config = ConfigDict( + populate_by_name=True, + ) + id: str + phase: MessagePhase | None = None + text: str + type: Annotated[Literal["agentMessage"], Field(title="AgentMessageThreadItemType")] + + +class PlanThreadItem(BaseModel): + model_config = ConfigDict( + populate_by_name=True, + ) + id: str + text: str + type: Annotated[Literal["plan"], Field(title="PlanThreadItemType")] + + +class ReasoningThreadItem(BaseModel): + model_config = ConfigDict( + populate_by_name=True, + ) + content: list[str] | None = [] + id: str + summary: list[str] | None = [] + type: Annotated[Literal["reasoning"], Field(title="ReasoningThreadItemType")] + + +class CommandExecutionThreadItem(BaseModel): + model_config = ConfigDict( + populate_by_name=True, + ) + aggregated_output: Annotated[ + str | None, + Field( + alias="aggregatedOutput", + description="The command's output, aggregated from stdout and stderr.", + ), + ] = None + command: Annotated[str, Field(description="The command to be executed.")] + command_actions: Annotated[ + list[CommandAction], + Field( + alias="commandActions", + description="A best-effort parsing of the command to understand the action(s) it will perform. This returns a list of CommandAction objects because a single shell command may be composed of many commands piped together.", + ), + ] + cwd: Annotated[str, Field(description="The command's working directory.")] + duration_ms: Annotated[ + int | None, + Field( + alias="durationMs", + description="The duration of the command execution in milliseconds.", + ), + ] = None + exit_code: Annotated[ + int | None, Field(alias="exitCode", description="The command's exit code.") + ] = None + id: str + process_id: Annotated[ + str | None, + Field( + alias="processId", + description="Identifier for the underlying PTY process (when available).", + ), + ] = None + status: CommandExecutionStatus + type: Annotated[ + Literal["commandExecution"], Field(title="CommandExecutionThreadItemType") + ] + + +class McpToolCallThreadItem(BaseModel): + model_config = ConfigDict( + populate_by_name=True, + ) + arguments: Any + duration_ms: Annotated[ + int | None, + Field( + alias="durationMs", + description="The duration of the MCP tool call in milliseconds.", + ), + ] = None + error: McpToolCallError | None = None + id: str + result: McpToolCallResult | None = None + server: str + status: McpToolCallStatus + tool: str + type: Annotated[Literal["mcpToolCall"], Field(title="McpToolCallThreadItemType")] + + +class DynamicToolCallThreadItem(BaseModel): + model_config = ConfigDict( + populate_by_name=True, + ) + arguments: Any + content_items: Annotated[ + list[DynamicToolCallOutputContentItem] | None, Field(alias="contentItems") + ] = None + duration_ms: Annotated[ + int | None, + Field( + alias="durationMs", + description="The duration of the dynamic tool call in milliseconds.", + ), + ] = None + id: str + status: DynamicToolCallStatus + success: bool | None = None + tool: str + type: Annotated[ + Literal["dynamicToolCall"], Field(title="DynamicToolCallThreadItemType") + ] + + +class ImageViewThreadItem(BaseModel): + model_config = ConfigDict( + populate_by_name=True, + ) + id: str + path: str + type: Annotated[Literal["imageView"], Field(title="ImageViewThreadItemType")] + + +class ImageGenerationThreadItem(BaseModel): + model_config = ConfigDict( + populate_by_name=True, + ) + id: str + result: str + revised_prompt: Annotated[str | None, Field(alias="revisedPrompt")] = None + status: str + type: Annotated[ + Literal["imageGeneration"], Field(title="ImageGenerationThreadItemType") + ] + + +class EnteredReviewModeThreadItem(BaseModel): + model_config = ConfigDict( + populate_by_name=True, + ) + id: str + review: str + type: Annotated[ + Literal["enteredReviewMode"], Field(title="EnteredReviewModeThreadItemType") + ] + + +class ExitedReviewModeThreadItem(BaseModel): + model_config = ConfigDict( + populate_by_name=True, + ) + id: str + review: str + type: Annotated[ + Literal["exitedReviewMode"], Field(title="ExitedReviewModeThreadItemType") + ] + + +class ContextCompactionThreadItem(BaseModel): + model_config = ConfigDict( + populate_by_name=True, + ) + id: str + type: Annotated[ + Literal["contextCompaction"], Field(title="ContextCompactionThreadItemType") + ] + + +class ThreadLoadedListParams(BaseModel): + model_config = ConfigDict( + populate_by_name=True, + ) + cursor: Annotated[ + str | None, + Field(description="Opaque pagination cursor returned by a previous call."), + ] = None + limit: Annotated[ + int | None, Field(description="Optional page size; defaults to no limit.", ge=0) + ] = None + + +class ThreadLoadedListResponse(BaseModel): + model_config = ConfigDict( + populate_by_name=True, + ) + data: Annotated[ + list[str], + Field(description="Thread ids for sessions currently loaded in memory."), + ] + next_cursor: Annotated[ + str | None, + Field( + alias="nextCursor", + description="Opaque cursor to pass to the next call to continue after the last item. if None, there are no more items to return.", + ), + ] = None + + +class ThreadMetadataGitInfoUpdateParams(BaseModel): + model_config = ConfigDict( + populate_by_name=True, + ) + branch: Annotated[ + str | None, + Field( + description="Omit to leave the stored branch unchanged, set to `null` to clear it, or provide a non-empty string to replace it." + ), + ] = None + origin_url: Annotated[ + str | None, + Field( + alias="originUrl", + description="Omit to leave the stored origin URL unchanged, set to `null` to clear it, or provide a non-empty string to replace it.", + ), + ] = None + sha: Annotated[ + str | None, + Field( + description="Omit to leave the stored commit unchanged, set to `null` to clear it, or provide a non-empty string to replace it." + ), + ] = None + + +class ThreadMetadataUpdateParams(BaseModel): + model_config = ConfigDict( + populate_by_name=True, + ) + git_info: Annotated[ + ThreadMetadataGitInfoUpdateParams | None, + Field( + alias="gitInfo", + description="Patch the stored Git metadata for this thread. Omit a field to leave it unchanged, set it to `null` to clear it, or provide a string to replace the stored value.", + ), + ] = None + thread_id: Annotated[str, Field(alias="threadId")] + + +class ThreadNameUpdatedNotification(BaseModel): + model_config = ConfigDict( + populate_by_name=True, + ) + thread_id: Annotated[str, Field(alias="threadId")] + thread_name: Annotated[str | None, Field(alias="threadName")] = None + + +class ThreadReadParams(BaseModel): + model_config = ConfigDict( + populate_by_name=True, + ) + include_turns: Annotated[ + bool | None, + Field( + alias="includeTurns", + description="When true, include turns and their items from rollout history.", + ), + ] = False + thread_id: Annotated[str, Field(alias="threadId")] + + +class ThreadRealtimeAudioChunk(BaseModel): + model_config = ConfigDict( + populate_by_name=True, + ) + data: str + num_channels: Annotated[int, Field(alias="numChannels", ge=0)] + sample_rate: Annotated[int, Field(alias="sampleRate", ge=0)] + samples_per_channel: Annotated[ + int | None, Field(alias="samplesPerChannel", ge=0) + ] = None + + +class ThreadRealtimeClosedNotification(BaseModel): + model_config = ConfigDict( + populate_by_name=True, + ) + reason: str | None = None + thread_id: Annotated[str, Field(alias="threadId")] + + +class ThreadRealtimeErrorNotification(BaseModel): + model_config = ConfigDict( + populate_by_name=True, + ) + message: str + thread_id: Annotated[str, Field(alias="threadId")] + + +class ThreadRealtimeItemAddedNotification(BaseModel): + model_config = ConfigDict( + populate_by_name=True, + ) + item: Any + thread_id: Annotated[str, Field(alias="threadId")] + + +class ThreadRealtimeOutputAudioDeltaNotification(BaseModel): + model_config = ConfigDict( + populate_by_name=True, + ) + audio: ThreadRealtimeAudioChunk + thread_id: Annotated[str, Field(alias="threadId")] + + +class ThreadRealtimeStartedNotification(BaseModel): + model_config = ConfigDict( + populate_by_name=True, + ) + session_id: Annotated[str | None, Field(alias="sessionId")] = None + thread_id: Annotated[str, Field(alias="threadId")] + + +class ThreadResumeParams(BaseModel): + model_config = ConfigDict( + populate_by_name=True, + ) + approval_policy: Annotated[AskForApproval | None, Field(alias="approvalPolicy")] = ( + None + ) + base_instructions: Annotated[str | None, Field(alias="baseInstructions")] = None + config: dict[str, Any] | None = None + cwd: str | None = None + developer_instructions: Annotated[ + str | None, Field(alias="developerInstructions") + ] = None + model: Annotated[ + str | None, + Field(description="Configuration overrides for the resumed thread, if any."), + ] = None + model_provider: Annotated[str | None, Field(alias="modelProvider")] = None + personality: Personality | None = None + sandbox: SandboxMode | None = None + service_tier: Annotated[ServiceTier | None, Field(alias="serviceTier")] = None + thread_id: Annotated[str, Field(alias="threadId")] + + +class ThreadRollbackParams(BaseModel): + model_config = ConfigDict( + populate_by_name=True, + ) + num_turns: Annotated[ + int, + Field( + alias="numTurns", + description="The number of turns to drop from the end of the thread. Must be >= 1.\n\nThis only modifies the thread's history and does not revert local file changes that have been made by the agent. Clients are responsible for reverting these changes.", + ge=0, + ), + ] + thread_id: Annotated[str, Field(alias="threadId")] + + +class ThreadSetNameParams(BaseModel): + model_config = ConfigDict( + populate_by_name=True, + ) + name: str + thread_id: Annotated[str, Field(alias="threadId")] + + +class ThreadSetNameResponse(BaseModel): + pass + model_config = ConfigDict( + populate_by_name=True, + ) + + +class ThreadSortKey(Enum): + created_at = "created_at" + updated_at = "updated_at" + + +class ThreadSourceKind(Enum): + cli = "cli" + vscode = "vscode" + exec = "exec" + app_server = "appServer" + sub_agent = "subAgent" + sub_agent_review = "subAgentReview" + sub_agent_compact = "subAgentCompact" + sub_agent_thread_spawn = "subAgentThreadSpawn" + sub_agent_other = "subAgentOther" + unknown = "unknown" + + +class ThreadStartParams(BaseModel): + model_config = ConfigDict( + populate_by_name=True, + ) + approval_policy: Annotated[AskForApproval | None, Field(alias="approvalPolicy")] = ( + None + ) + base_instructions: Annotated[str | None, Field(alias="baseInstructions")] = None + config: dict[str, Any] | None = None + cwd: str | None = None + developer_instructions: Annotated[ + str | None, Field(alias="developerInstructions") + ] = None + ephemeral: bool | None = None + model: str | None = None + model_provider: Annotated[str | None, Field(alias="modelProvider")] = None + personality: Personality | None = None + sandbox: SandboxMode | None = None + service_name: Annotated[str | None, Field(alias="serviceName")] = None + service_tier: Annotated[ServiceTier | None, Field(alias="serviceTier")] = None + + +class NotLoadedThreadStatus(BaseModel): + model_config = ConfigDict( + populate_by_name=True, + ) + type: Annotated[Literal["notLoaded"], Field(title="NotLoadedThreadStatusType")] + + +class IdleThreadStatus(BaseModel): + model_config = ConfigDict( + populate_by_name=True, + ) + type: Annotated[Literal["idle"], Field(title="IdleThreadStatusType")] + + +class SystemErrorThreadStatus(BaseModel): + model_config = ConfigDict( + populate_by_name=True, + ) + type: Annotated[Literal["systemError"], Field(title="SystemErrorThreadStatusType")] + + +class ActiveThreadStatus(BaseModel): + model_config = ConfigDict( + populate_by_name=True, + ) + active_flags: Annotated[list[ThreadActiveFlag], Field(alias="activeFlags")] + type: Annotated[Literal["active"], Field(title="ActiveThreadStatusType")] + + +class ThreadStatus( + RootModel[ + NotLoadedThreadStatus + | IdleThreadStatus + | SystemErrorThreadStatus + | ActiveThreadStatus + ] +): + model_config = ConfigDict( + populate_by_name=True, + ) + root: ( + NotLoadedThreadStatus + | IdleThreadStatus + | SystemErrorThreadStatus + | ActiveThreadStatus + ) + + +class ThreadStatusChangedNotification(BaseModel): + model_config = ConfigDict( + populate_by_name=True, + ) + status: ThreadStatus + thread_id: Annotated[str, Field(alias="threadId")] + + +class ThreadUnarchiveParams(BaseModel): + model_config = ConfigDict( + populate_by_name=True, + ) + thread_id: Annotated[str, Field(alias="threadId")] + + +class ThreadUnarchivedNotification(BaseModel): + model_config = ConfigDict( + populate_by_name=True, + ) + thread_id: Annotated[str, Field(alias="threadId")] + + +class ThreadUnsubscribeParams(BaseModel): + model_config = ConfigDict( + populate_by_name=True, + ) + thread_id: Annotated[str, Field(alias="threadId")] + + +class ThreadUnsubscribeStatus(Enum): + not_loaded = "notLoaded" + not_subscribed = "notSubscribed" + unsubscribed = "unsubscribed" + + +class TokenUsage(BaseModel): + model_config = ConfigDict( + populate_by_name=True, + ) + cached_input_tokens: int + input_tokens: int + output_tokens: int + reasoning_output_tokens: int + total_tokens: int + + +class TokenUsageBreakdown(BaseModel): + model_config = ConfigDict( + populate_by_name=True, + ) + cached_input_tokens: Annotated[int, Field(alias="cachedInputTokens")] + input_tokens: Annotated[int, Field(alias="inputTokens")] + output_tokens: Annotated[int, Field(alias="outputTokens")] + reasoning_output_tokens: Annotated[int, Field(alias="reasoningOutputTokens")] + total_tokens: Annotated[int, Field(alias="totalTokens")] + + +class TokenUsageInfo(BaseModel): + model_config = ConfigDict( + populate_by_name=True, + ) + last_token_usage: TokenUsage + model_context_window: int | None = None + total_token_usage: TokenUsage + + +class Tool(BaseModel): + model_config = ConfigDict( + populate_by_name=True, + ) + field_meta: Annotated[Any | None, Field(alias="_meta")] = None + annotations: Any | None = None + description: str | None = None + icons: list | None = None + input_schema: Annotated[Any, Field(alias="inputSchema")] + name: str + output_schema: Annotated[Any | None, Field(alias="outputSchema")] = None + title: str | None = None + + +class TurnAbortReason(Enum): + interrupted = "interrupted" + replaced = "replaced" + review_ended = "review_ended" + + +class TurnDiffUpdatedNotification(BaseModel): + model_config = ConfigDict( + populate_by_name=True, + ) + diff: str + thread_id: Annotated[str, Field(alias="threadId")] + turn_id: Annotated[str, Field(alias="turnId")] + + +class TurnError(BaseModel): + model_config = ConfigDict( + populate_by_name=True, + ) + additional_details: Annotated[str | None, Field(alias="additionalDetails")] = None + codex_error_info: Annotated[ + CodexErrorInfo | None, Field(alias="codexErrorInfo") + ] = None + message: str + + +class TurnInterruptParams(BaseModel): + model_config = ConfigDict( + populate_by_name=True, + ) + thread_id: Annotated[str, Field(alias="threadId")] + turn_id: Annotated[str, Field(alias="turnId")] + + +class TurnInterruptResponse(BaseModel): + pass + model_config = ConfigDict( + populate_by_name=True, + ) + + +class AgentMessageTurnItem(BaseModel): + model_config = ConfigDict( + populate_by_name=True, + ) + content: list[AgentMessageContent] + id: str + phase: Annotated[ + MessagePhase | None, + Field( + description="Optional phase metadata carried through from `ResponseItem::Message`.\n\nThis is currently used by TUI rendering to distinguish mid-turn commentary from a final answer and avoid status-indicator jitter." + ), + ] = None + type: Annotated[Literal["AgentMessage"], Field(title="AgentMessageTurnItemType")] + + +class PlanTurnItem(BaseModel): + model_config = ConfigDict( + populate_by_name=True, + ) + id: str + text: str + type: Annotated[Literal["Plan"], Field(title="PlanTurnItemType")] + + +class ReasoningTurnItem(BaseModel): + model_config = ConfigDict( + populate_by_name=True, + ) + id: str + raw_content: list[str] | None = [] + summary_text: list[str] + type: Annotated[Literal["Reasoning"], Field(title="ReasoningTurnItemType")] + + +class WebSearchTurnItem(BaseModel): + model_config = ConfigDict( + populate_by_name=True, + ) + action: ResponsesApiWebSearchAction + id: str + query: str + type: Annotated[Literal["WebSearch"], Field(title="WebSearchTurnItemType")] + + +class ImageGenerationTurnItem(BaseModel): + model_config = ConfigDict( + populate_by_name=True, + ) + id: str + result: str + revised_prompt: str | None = None + saved_path: str | None = None + status: str + type: Annotated[ + Literal["ImageGeneration"], Field(title="ImageGenerationTurnItemType") + ] + + +class ContextCompactionTurnItem(BaseModel): + model_config = ConfigDict( + populate_by_name=True, + ) + id: str + type: Annotated[ + Literal["ContextCompaction"], Field(title="ContextCompactionTurnItemType") + ] + + +class TurnPlanStepStatus(Enum): + pending = "pending" + in_progress = "inProgress" + completed = "completed" + + +class TurnStatus(Enum): + completed = "completed" + interrupted = "interrupted" + failed = "failed" + in_progress = "inProgress" + + +class TurnSteerResponse(BaseModel): + model_config = ConfigDict( + populate_by_name=True, + ) + turn_id: Annotated[str, Field(alias="turnId")] + + +class TextUserInput(BaseModel): + model_config = ConfigDict( + populate_by_name=True, + ) + text: str + text_elements: Annotated[ + list[TextElement] | None, + Field( + description="UI-defined spans within `text` used to render or persist special elements." + ), + ] = [] + type: Annotated[Literal["text"], Field(title="TextUserInputType")] + + +class ImageUserInput(BaseModel): + model_config = ConfigDict( + populate_by_name=True, + ) + type: Annotated[Literal["image"], Field(title="ImageUserInputType")] + url: str + + +class LocalImageUserInput(BaseModel): + model_config = ConfigDict( + populate_by_name=True, + ) + path: str + type: Annotated[Literal["localImage"], Field(title="LocalImageUserInputType")] + + +class SkillUserInput(BaseModel): + model_config = ConfigDict( + populate_by_name=True, + ) + name: str + path: str + type: Annotated[Literal["skill"], Field(title="SkillUserInputType")] + + +class MentionUserInput(BaseModel): + model_config = ConfigDict( + populate_by_name=True, + ) + name: str + path: str + type: Annotated[Literal["mention"], Field(title="MentionUserInputType")] + + +class UserInput( + RootModel[ + TextUserInput + | ImageUserInput + | LocalImageUserInput + | SkillUserInput + | MentionUserInput + ] +): + model_config = ConfigDict( + populate_by_name=True, + ) + root: ( + TextUserInput + | ImageUserInput + | LocalImageUserInput + | SkillUserInput + | MentionUserInput + ) + + +class Verbosity(Enum): + low = "low" + medium = "medium" + high = "high" + + +class SearchWebSearchAction(BaseModel): + model_config = ConfigDict( + populate_by_name=True, + ) + queries: list[str] | None = None + query: str | None = None + type: Annotated[Literal["search"], Field(title="SearchWebSearchActionType")] + + +class OpenPageWebSearchAction(BaseModel): + model_config = ConfigDict( + populate_by_name=True, + ) + type: Annotated[Literal["openPage"], Field(title="OpenPageWebSearchActionType")] + url: str | None = None + + +class FindInPageWebSearchAction(BaseModel): + model_config = ConfigDict( + populate_by_name=True, + ) + pattern: str | None = None + type: Annotated[Literal["findInPage"], Field(title="FindInPageWebSearchActionType")] + url: str | None = None + + +class OtherWebSearchAction(BaseModel): + model_config = ConfigDict( + populate_by_name=True, + ) + type: Annotated[Literal["other"], Field(title="OtherWebSearchActionType")] + + +class WebSearchAction( + RootModel[ + SearchWebSearchAction + | OpenPageWebSearchAction + | FindInPageWebSearchAction + | OtherWebSearchAction + ] +): + model_config = ConfigDict( + populate_by_name=True, + ) + root: ( + SearchWebSearchAction + | OpenPageWebSearchAction + | FindInPageWebSearchAction + | OtherWebSearchAction + ) + + +class WebSearchContextSize(Enum): + low = "low" + medium = "medium" + high = "high" + + +class WebSearchLocation(BaseModel): + model_config = ConfigDict( + extra="forbid", + populate_by_name=True, + ) + city: str | None = None + country: str | None = None + region: str | None = None + timezone: str | None = None + + +class WebSearchMode(Enum): + disabled = "disabled" + cached = "cached" + live = "live" + + +class WebSearchToolConfig(BaseModel): + model_config = ConfigDict( + extra="forbid", + populate_by_name=True, + ) + allowed_domains: list[str] | None = None + context_size: WebSearchContextSize | None = None + location: WebSearchLocation | None = None + + +class WindowsSandboxSetupMode(Enum): + elevated = "elevated" + unelevated = "unelevated" + + +class WindowsSandboxSetupStartParams(BaseModel): + model_config = ConfigDict( + populate_by_name=True, + ) + cwd: AbsolutePathBuf | None = None + mode: WindowsSandboxSetupMode + + +class WindowsSandboxSetupStartResponse(BaseModel): + model_config = ConfigDict( + populate_by_name=True, + ) + started: bool + + +class WindowsWorldWritableWarningNotification(BaseModel): + model_config = ConfigDict( + populate_by_name=True, + ) + extra_count: Annotated[int, Field(alias="extraCount", ge=0)] + failed_scan: Annotated[bool, Field(alias="failedScan")] + sample_paths: Annotated[list[str], Field(alias="samplePaths")] + + +class WriteStatus(Enum): + ok = "ok" + ok_overridden = "okOverridden" + + +class ChatgptAccount(BaseModel): + model_config = ConfigDict( + populate_by_name=True, + ) + email: str + plan_type: Annotated[PlanType, Field(alias="planType")] + type: Annotated[Literal["chatgpt"], Field(title="ChatgptAccountType")] + + +class Account(RootModel[ApiKeyAccount | ChatgptAccount]): + model_config = ConfigDict( + populate_by_name=True, + ) + root: ApiKeyAccount | ChatgptAccount + + +class AccountUpdatedNotification(BaseModel): + model_config = ConfigDict( + populate_by_name=True, + ) + auth_mode: Annotated[AuthMode | None, Field(alias="authMode")] = None + plan_type: Annotated[PlanType | None, Field(alias="planType")] = None + + +class AppConfig(BaseModel): + model_config = ConfigDict( + populate_by_name=True, + ) + default_tools_approval_mode: AppToolApproval | None = None + default_tools_enabled: bool | None = None + destructive_enabled: bool | None = None + enabled: bool | None = True + open_world_enabled: bool | None = None + tools: AppToolsConfig | None = None + + +class AppMetadata(BaseModel): + model_config = ConfigDict( + populate_by_name=True, + ) + categories: list[str] | None = None + developer: str | None = None + first_party_requires_install: Annotated[ + bool | None, Field(alias="firstPartyRequiresInstall") + ] = None + first_party_type: Annotated[str | None, Field(alias="firstPartyType")] = None + review: AppReview | None = None + screenshots: list[AppScreenshot] | None = None + seo_description: Annotated[str | None, Field(alias="seoDescription")] = None + show_in_composer_when_unlinked: Annotated[ + bool | None, Field(alias="showInComposerWhenUnlinked") + ] = None + sub_categories: Annotated[list[str] | None, Field(alias="subCategories")] = None + version: str | None = None + version_id: Annotated[str | None, Field(alias="versionId")] = None + version_notes: Annotated[str | None, Field(alias="versionNotes")] = None + + +class AppsConfig(BaseModel): + model_config = ConfigDict( + populate_by_name=True, + ) + field_default: Annotated[AppsDefaultConfig | None, Field(alias="_default")] = None + + +class CancelLoginAccountResponse(BaseModel): + model_config = ConfigDict( + populate_by_name=True, + ) + status: CancelLoginAccountStatus + + +class InitializeRequest(BaseModel): + model_config = ConfigDict( + populate_by_name=True, + ) + id: RequestId + method: Annotated[Literal["initialize"], Field(title="InitializeRequestMethod")] + params: InitializeParams + + +class ThreadStartRequest(BaseModel): + model_config = ConfigDict( + populate_by_name=True, + ) + id: RequestId + method: Annotated[Literal["thread/start"], Field(title="Thread/startRequestMethod")] + params: ThreadStartParams + + +class ThreadResumeRequest(BaseModel): + model_config = ConfigDict( + populate_by_name=True, + ) + id: RequestId + method: Annotated[ + Literal["thread/resume"], Field(title="Thread/resumeRequestMethod") + ] + params: ThreadResumeParams + + +class ThreadForkRequest(BaseModel): + model_config = ConfigDict( + populate_by_name=True, + ) + id: RequestId + method: Annotated[Literal["thread/fork"], Field(title="Thread/forkRequestMethod")] + params: ThreadForkParams + + +class ThreadArchiveRequest(BaseModel): + model_config = ConfigDict( + populate_by_name=True, + ) + id: RequestId + method: Annotated[ + Literal["thread/archive"], Field(title="Thread/archiveRequestMethod") + ] + params: ThreadArchiveParams + + +class ThreadUnsubscribeRequest(BaseModel): + model_config = ConfigDict( + populate_by_name=True, + ) + id: RequestId + method: Annotated[ + Literal["thread/unsubscribe"], Field(title="Thread/unsubscribeRequestMethod") + ] + params: ThreadUnsubscribeParams + + +class ThreadNameSetRequest(BaseModel): + model_config = ConfigDict( + populate_by_name=True, + ) + id: RequestId + method: Annotated[ + Literal["thread/name/set"], Field(title="Thread/name/setRequestMethod") + ] + params: ThreadSetNameParams + + +class ThreadMetadataUpdateRequest(BaseModel): + model_config = ConfigDict( + populate_by_name=True, + ) + id: RequestId + method: Annotated[ + Literal["thread/metadata/update"], + Field(title="Thread/metadata/updateRequestMethod"), + ] + params: ThreadMetadataUpdateParams + + +class ThreadUnarchiveRequest(BaseModel): + model_config = ConfigDict( + populate_by_name=True, + ) + id: RequestId + method: Annotated[ + Literal["thread/unarchive"], Field(title="Thread/unarchiveRequestMethod") + ] + params: ThreadUnarchiveParams + + +class ThreadCompactStartRequest(BaseModel): + model_config = ConfigDict( + populate_by_name=True, + ) + id: RequestId + method: Annotated[ + Literal["thread/compact/start"], + Field(title="Thread/compact/startRequestMethod"), + ] + params: ThreadCompactStartParams + + +class ThreadRollbackRequest(BaseModel): + model_config = ConfigDict( + populate_by_name=True, + ) + id: RequestId + method: Annotated[ + Literal["thread/rollback"], Field(title="Thread/rollbackRequestMethod") + ] + params: ThreadRollbackParams + + +class ThreadLoadedListRequest(BaseModel): + model_config = ConfigDict( + populate_by_name=True, + ) + id: RequestId + method: Annotated[ + Literal["thread/loaded/list"], Field(title="Thread/loaded/listRequestMethod") + ] + params: ThreadLoadedListParams + + +class ThreadReadRequest(BaseModel): + model_config = ConfigDict( + populate_by_name=True, + ) + id: RequestId + method: Annotated[Literal["thread/read"], Field(title="Thread/readRequestMethod")] + params: ThreadReadParams + + +class SkillsListRequest(BaseModel): + model_config = ConfigDict( + populate_by_name=True, + ) + id: RequestId + method: Annotated[Literal["skills/list"], Field(title="Skills/listRequestMethod")] + params: SkillsListParams + + +class PluginListRequest(BaseModel): + model_config = ConfigDict( + populate_by_name=True, + ) + id: RequestId + method: Annotated[Literal["plugin/list"], Field(title="Plugin/listRequestMethod")] + params: PluginListParams + + +class SkillsRemoteListRequest(BaseModel): + model_config = ConfigDict( + populate_by_name=True, + ) + id: RequestId + method: Annotated[ + Literal["skills/remote/list"], Field(title="Skills/remote/listRequestMethod") + ] + params: SkillsRemoteReadParams + + +class SkillsRemoteExportRequest(BaseModel): + model_config = ConfigDict( + populate_by_name=True, + ) + id: RequestId + method: Annotated[ + Literal["skills/remote/export"], + Field(title="Skills/remote/exportRequestMethod"), + ] + params: SkillsRemoteWriteParams + + +class AppListRequest(BaseModel): + model_config = ConfigDict( + populate_by_name=True, + ) + id: RequestId + method: Annotated[Literal["app/list"], Field(title="App/listRequestMethod")] + params: AppsListParams + + +class SkillsConfigWriteRequest(BaseModel): + model_config = ConfigDict( + populate_by_name=True, + ) + id: RequestId + method: Annotated[ + Literal["skills/config/write"], Field(title="Skills/config/writeRequestMethod") + ] + params: SkillsConfigWriteParams + + +class PluginInstallRequest(BaseModel): + model_config = ConfigDict( + populate_by_name=True, + ) + id: RequestId + method: Annotated[ + Literal["plugin/install"], Field(title="Plugin/installRequestMethod") + ] + params: PluginInstallParams + + +class PluginUninstallRequest(BaseModel): + model_config = ConfigDict( + populate_by_name=True, + ) + id: RequestId + method: Annotated[ + Literal["plugin/uninstall"], Field(title="Plugin/uninstallRequestMethod") + ] + params: PluginUninstallParams + + +class TurnInterruptRequest(BaseModel): + model_config = ConfigDict( + populate_by_name=True, + ) + id: RequestId + method: Annotated[ + Literal["turn/interrupt"], Field(title="Turn/interruptRequestMethod") + ] + params: TurnInterruptParams + + +class ModelListRequest(BaseModel): + model_config = ConfigDict( + populate_by_name=True, + ) + id: RequestId + method: Annotated[Literal["model/list"], Field(title="Model/listRequestMethod")] + params: ModelListParams + + +class ExperimentalFeatureListRequest(BaseModel): + model_config = ConfigDict( + populate_by_name=True, + ) + id: RequestId + method: Annotated[ + Literal["experimentalFeature/list"], + Field(title="ExperimentalFeature/listRequestMethod"), + ] + params: ExperimentalFeatureListParams + + +class McpServerOauthLoginRequest(BaseModel): + model_config = ConfigDict( + populate_by_name=True, + ) + id: RequestId + method: Annotated[ + Literal["mcpServer/oauth/login"], + Field(title="McpServer/oauth/loginRequestMethod"), + ] + params: McpServerOauthLoginParams + + +class ConfigMcpServerReloadRequest(BaseModel): + model_config = ConfigDict( + populate_by_name=True, + ) + id: RequestId + method: Annotated[ + Literal["config/mcpServer/reload"], + Field(title="Config/mcpServer/reloadRequestMethod"), + ] + params: None = None + + +class McpServerStatusListRequest(BaseModel): + model_config = ConfigDict( + populate_by_name=True, + ) + id: RequestId + method: Annotated[ + Literal["mcpServerStatus/list"], + Field(title="McpServerStatus/listRequestMethod"), + ] + params: ListMcpServerStatusParams + + +class WindowsSandboxSetupStartRequest(BaseModel): + model_config = ConfigDict( + populate_by_name=True, + ) + id: RequestId + method: Annotated[ + Literal["windowsSandbox/setupStart"], + Field(title="WindowsSandbox/setupStartRequestMethod"), + ] + params: WindowsSandboxSetupStartParams + + +class AccountLoginStartRequest(BaseModel): + model_config = ConfigDict( + populate_by_name=True, + ) + id: RequestId + method: Annotated[ + Literal["account/login/start"], Field(title="Account/login/startRequestMethod") + ] + params: LoginAccountParams + + +class AccountLoginCancelRequest(BaseModel): + model_config = ConfigDict( + populate_by_name=True, + ) + id: RequestId + method: Annotated[ + Literal["account/login/cancel"], + Field(title="Account/login/cancelRequestMethod"), + ] + params: CancelLoginAccountParams + + +class AccountLogoutRequest(BaseModel): + model_config = ConfigDict( + populate_by_name=True, + ) + id: RequestId + method: Annotated[ + Literal["account/logout"], Field(title="Account/logoutRequestMethod") + ] + params: None = None + + +class AccountRateLimitsReadRequest(BaseModel): + model_config = ConfigDict( + populate_by_name=True, + ) + id: RequestId + method: Annotated[ + Literal["account/rateLimits/read"], + Field(title="Account/rateLimits/readRequestMethod"), + ] + params: None = None + + +class FeedbackUploadRequest(BaseModel): + model_config = ConfigDict( + populate_by_name=True, + ) + id: RequestId + method: Annotated[ + Literal["feedback/upload"], Field(title="Feedback/uploadRequestMethod") + ] + params: FeedbackUploadParams + + +class CommandExecWriteRequest(BaseModel): + model_config = ConfigDict( + populate_by_name=True, + ) + id: RequestId + method: Annotated[ + Literal["command/exec/write"], Field(title="Command/exec/writeRequestMethod") + ] + params: CommandExecWriteParams + + +class CommandExecTerminateRequest(BaseModel): + model_config = ConfigDict( + populate_by_name=True, + ) + id: RequestId + method: Annotated[ + Literal["command/exec/terminate"], + Field(title="Command/exec/terminateRequestMethod"), + ] + params: CommandExecTerminateParams + + +class ConfigReadRequest(BaseModel): + model_config = ConfigDict( + populate_by_name=True, + ) + id: RequestId + method: Annotated[Literal["config/read"], Field(title="Config/readRequestMethod")] + params: ConfigReadParams + + +class ExternalAgentConfigDetectRequest(BaseModel): + model_config = ConfigDict( + populate_by_name=True, + ) + id: RequestId + method: Annotated[ + Literal["externalAgentConfig/detect"], + Field(title="ExternalAgentConfig/detectRequestMethod"), + ] + params: ExternalAgentConfigDetectParams + + +class ConfigRequirementsReadRequest(BaseModel): + model_config = ConfigDict( + populate_by_name=True, + ) + id: RequestId + method: Annotated[ + Literal["configRequirements/read"], + Field(title="ConfigRequirements/readRequestMethod"), + ] + params: None = None + + +class AccountReadRequest(BaseModel): + model_config = ConfigDict( + populate_by_name=True, + ) + id: RequestId + method: Annotated[Literal["account/read"], Field(title="Account/readRequestMethod")] + params: GetAccountParams + + +class FuzzyFileSearchRequest(BaseModel): + model_config = ConfigDict( + populate_by_name=True, + ) + id: RequestId + method: Annotated[ + Literal["fuzzyFileSearch"], Field(title="FuzzyFileSearchRequestMethod") + ] + params: FuzzyFileSearchParams + + +class CollabAgentRef(BaseModel): + model_config = ConfigDict( + populate_by_name=True, + ) + agent_nickname: Annotated[ + str | None, + Field( + description="Optional nickname assigned to an AgentControl-spawned sub-agent." + ), + ] = None + agent_role: Annotated[ + str | None, + Field( + description="Optional role (agent_role) assigned to an AgentControl-spawned sub-agent." + ), + ] = None + thread_id: Annotated[ + ThreadId, Field(description="Thread ID of the receiver/new agent.") + ] + + +class CollabAgentState(BaseModel): + model_config = ConfigDict( + populate_by_name=True, + ) + message: str | None = None + status: CollabAgentStatus + + +class CollabAgentStatusEntry(BaseModel): + model_config = ConfigDict( + populate_by_name=True, + ) + agent_nickname: Annotated[ + str | None, + Field( + description="Optional nickname assigned to an AgentControl-spawned sub-agent." + ), + ] = None + agent_role: Annotated[ + str | None, + Field( + description="Optional role (agent_role) assigned to an AgentControl-spawned sub-agent." + ), + ] = None + status: Annotated[AgentStatus, Field(description="Last known status of the agent.")] + thread_id: Annotated[ + ThreadId, Field(description="Thread ID of the receiver/new agent.") + ] + + +class CollaborationMode(BaseModel): + model_config = ConfigDict( + populate_by_name=True, + ) + mode: ModeKind + settings: Settings + + +class CollaborationModeMask(BaseModel): + model_config = ConfigDict( + populate_by_name=True, + ) + mode: ModeKind | None = None + model: str | None = None + name: str + reasoning_effort: ReasoningEffort | None = None + + +class CommandExecOutputDeltaNotification(BaseModel): + model_config = ConfigDict( + populate_by_name=True, + ) + cap_reached: Annotated[ + bool, + Field( + alias="capReached", + description="`true` on the final streamed chunk for a stream when `outputBytesCap` truncated later output on that stream.", + ), + ] + delta_base64: Annotated[ + str, Field(alias="deltaBase64", description="Base64-encoded output bytes.") + ] + process_id: Annotated[ + str, + Field( + alias="processId", + description="Client-supplied, connection-scoped `processId` from the original `command/exec` request.", + ), + ] + stream: Annotated[ + CommandExecOutputStream, Field(description="Output stream for this chunk.") + ] + + +class CommandExecParams(BaseModel): + model_config = ConfigDict( + populate_by_name=True, + ) + command: Annotated[ + list[str], Field(description="Command argv vector. Empty arrays are rejected.") + ] + cwd: Annotated[ + str | None, + Field(description="Optional working directory. Defaults to the server cwd."), + ] = None + disable_output_cap: Annotated[ + bool | None, + Field( + alias="disableOutputCap", + description="Disable stdout/stderr capture truncation for this request.\n\nCannot be combined with `outputBytesCap`.", + ), + ] = None + disable_timeout: Annotated[ + bool | None, + Field( + alias="disableTimeout", + description="Disable the timeout entirely for this request.\n\nCannot be combined with `timeoutMs`.", + ), + ] = None + env: Annotated[ + dict[str, Any] | None, + Field( + description="Optional environment overrides merged into the server-computed environment.\n\nMatching names override inherited values. Set a key to `null` to unset an inherited variable." + ), + ] = None + output_bytes_cap: Annotated[ + int | None, + Field( + alias="outputBytesCap", + description="Optional per-stream stdout/stderr capture cap in bytes.\n\nWhen omitted, the server default applies. Cannot be combined with `disableOutputCap`.", + ge=0, + ), + ] = None + process_id: Annotated[ + str | None, + Field( + alias="processId", + description="Optional client-supplied, connection-scoped process id.\n\nRequired for `tty`, `streamStdin`, `streamStdoutStderr`, and follow-up `command/exec/write`, `command/exec/resize`, and `command/exec/terminate` calls. When omitted, buffered execution gets an internal id that is not exposed to the client.", + ), + ] = None + sandbox_policy: Annotated[ + SandboxPolicy | None, + Field( + alias="sandboxPolicy", + description="Optional sandbox policy for this command.\n\nUses the same shape as thread/turn execution sandbox configuration and defaults to the user's configured policy when omitted.", + ), + ] = None + size: Annotated[ + CommandExecTerminalSize | None, + Field( + description="Optional initial PTY size in character cells. Only valid when `tty` is true." + ), + ] = None + stream_stdin: Annotated[ + bool | None, + Field( + alias="streamStdin", + description="Allow follow-up `command/exec/write` requests to write stdin bytes.\n\nRequires a client-supplied `processId`.", + ), + ] = None + stream_stdout_stderr: Annotated[ + bool | None, + Field( + alias="streamStdoutStderr", + description="Stream stdout/stderr via `command/exec/outputDelta` notifications.\n\nStreamed bytes are not duplicated into the final response and require a client-supplied `processId`.", + ), + ] = None + timeout_ms: Annotated[ + int | None, + Field( + alias="timeoutMs", + description="Optional timeout in milliseconds.\n\nWhen omitted, the server default applies. Cannot be combined with `disableTimeout`.", + ), + ] = None + tty: Annotated[ + bool | None, + Field( + description="Enable PTY mode.\n\nThis implies `streamStdin` and `streamStdoutStderr`." + ), + ] = None + + +class CommandExecResizeParams(BaseModel): + model_config = ConfigDict( + populate_by_name=True, + ) + process_id: Annotated[ + str, + Field( + alias="processId", + description="Client-supplied, connection-scoped `processId` from the original `command/exec` request.", + ), + ] + size: Annotated[ + CommandExecTerminalSize, Field(description="New PTY size in character cells.") + ] + + +class ConfigEdit(BaseModel): + model_config = ConfigDict( + populate_by_name=True, + ) + key_path: Annotated[str, Field(alias="keyPath")] + merge_strategy: Annotated[MergeStrategy, Field(alias="mergeStrategy")] + value: Any + + +class ConfigLayer(BaseModel): + model_config = ConfigDict( + populate_by_name=True, + ) + config: Any + disabled_reason: Annotated[str | None, Field(alias="disabledReason")] = None + name: ConfigLayerSource + version: str + + +class ConfigLayerMetadata(BaseModel): + model_config = ConfigDict( + populate_by_name=True, + ) + name: ConfigLayerSource + version: str + + +class ConfigRequirements(BaseModel): + model_config = ConfigDict( + populate_by_name=True, + ) + allowed_approval_policies: Annotated[ + list[AskForApproval] | None, Field(alias="allowedApprovalPolicies") + ] = None + allowed_sandbox_modes: Annotated[ + list[SandboxMode] | None, Field(alias="allowedSandboxModes") + ] = None + allowed_web_search_modes: Annotated[ + list[WebSearchMode] | None, Field(alias="allowedWebSearchModes") + ] = None + enforce_residency: Annotated[ + ResidencyRequirement | None, Field(alias="enforceResidency") + ] = None + feature_requirements: Annotated[ + dict[str, Any] | None, Field(alias="featureRequirements") + ] = None + + +class ConfigRequirementsReadResponse(BaseModel): + model_config = ConfigDict( + populate_by_name=True, + ) + requirements: Annotated[ + ConfigRequirements | None, + Field( + description="Null if no requirements are configured (e.g. no requirements.toml/MDM entries)." + ), + ] = None + + +class ConfigValueWriteParams(BaseModel): + model_config = ConfigDict( + populate_by_name=True, + ) + expected_version: Annotated[str | None, Field(alias="expectedVersion")] = None + file_path: Annotated[ + str | None, + Field( + alias="filePath", + description="Path to the config file to write; defaults to the user's `config.toml` when omitted.", + ), + ] = None + key_path: Annotated[str, Field(alias="keyPath")] + merge_strategy: Annotated[MergeStrategy, Field(alias="mergeStrategy")] + value: Any + + +class ConfigWarningNotification(BaseModel): + model_config = ConfigDict( + populate_by_name=True, + ) + details: Annotated[ + str | None, Field(description="Optional extra guidance or error details.") + ] = None + path: Annotated[ + str | None, + Field( + description="Optional path to the config file that triggered the warning." + ), + ] = None + range: Annotated[ + TextRange | None, + Field( + description="Optional range for the error location inside the config file." + ), + ] = None + summary: Annotated[str, Field(description="Concise summary of the warning.")] + + +class ErrorNotification(BaseModel): + model_config = ConfigDict( + populate_by_name=True, + ) + error: TurnError + thread_id: Annotated[str, Field(alias="threadId")] + turn_id: Annotated[str, Field(alias="turnId")] + will_retry: Annotated[bool, Field(alias="willRetry")] + + +class ModelRerouteEventMsg(BaseModel): + model_config = ConfigDict( + populate_by_name=True, + ) + from_model: str + reason: ModelRerouteReason + to_model: str + type: Annotated[Literal["model_reroute"], Field(title="ModelRerouteEventMsgType")] + + +class TaskStartedEventMsg(BaseModel): + model_config = ConfigDict( + populate_by_name=True, + ) + collaboration_mode_kind: ModeKind | None = "default" + model_context_window: int | None = None + turn_id: str + type: Annotated[Literal["task_started"], Field(title="TaskStartedEventMsgType")] + + +class AgentMessageEventMsg(BaseModel): + model_config = ConfigDict( + populate_by_name=True, + ) + message: str + phase: MessagePhase | None = None + type: Annotated[Literal["agent_message"], Field(title="AgentMessageEventMsgType")] + + +class UserMessageEventMsg(BaseModel): + model_config = ConfigDict( + populate_by_name=True, + ) + images: Annotated[ + list[str] | None, + Field( + description="Image URLs sourced from `UserInput::Image`. These are safe to replay in legacy UI history events and correspond to images sent to the model." + ), + ] = None + local_images: Annotated[ + list[str] | None, + Field( + description="Local file paths sourced from `UserInput::LocalImage`. These are kept so the UI can reattach images when editing history, and should not be sent to the model or treated as API-ready URLs." + ), + ] = [] + message: str + text_elements: Annotated[ + list[TextElement] | None, + Field( + description="UI-defined spans within `message` used to render or persist special elements." + ), + ] = [] + type: Annotated[Literal["user_message"], Field(title="UserMessageEventMsgType")] + + +class ThreadNameUpdatedEventMsg(BaseModel): + model_config = ConfigDict( + populate_by_name=True, + ) + thread_id: ThreadId + thread_name: str | None = None + type: Annotated[ + Literal["thread_name_updated"], Field(title="ThreadNameUpdatedEventMsgType") + ] + + +class McpStartupUpdateEventMsg(BaseModel): + model_config = ConfigDict( + populate_by_name=True, + ) + server: Annotated[str, Field(description="Server name being started.")] + status: Annotated[McpStartupStatus, Field(description="Current startup status.")] + type: Annotated[ + Literal["mcp_startup_update"], Field(title="McpStartupUpdateEventMsgType") + ] + + +class McpStartupCompleteEventMsg(BaseModel): + model_config = ConfigDict( + populate_by_name=True, + ) + cancelled: list[str] + failed: list[McpStartupFailure] + ready: list[str] + type: Annotated[ + Literal["mcp_startup_complete"], Field(title="McpStartupCompleteEventMsgType") + ] + + +class McpToolCallBeginEventMsg(BaseModel): + model_config = ConfigDict( + populate_by_name=True, + ) + call_id: Annotated[ + str, + Field( + description="Identifier so this can be paired with the McpToolCallEnd event." + ), + ] + invocation: McpInvocation + type: Annotated[ + Literal["mcp_tool_call_begin"], Field(title="McpToolCallBeginEventMsgType") + ] + + +class McpToolCallEndEventMsg(BaseModel): + model_config = ConfigDict( + populate_by_name=True, + ) + call_id: Annotated[ + str, + Field( + description="Identifier for the corresponding McpToolCallBegin that finished." + ), + ] + duration: Duration + invocation: McpInvocation + result: Annotated[ + ResultOfCallToolResultOrString, + Field(description="Result of the tool call. Note this could be an error."), + ] + type: Annotated[ + Literal["mcp_tool_call_end"], Field(title="McpToolCallEndEventMsgType") + ] + + +class WebSearchEndEventMsg(BaseModel): + model_config = ConfigDict( + populate_by_name=True, + ) + action: ResponsesApiWebSearchAction + call_id: str + query: str + type: Annotated[Literal["web_search_end"], Field(title="WebSearchEndEventMsgType")] + + +class ExecCommandBeginEventMsg(BaseModel): + model_config = ConfigDict( + populate_by_name=True, + ) + call_id: Annotated[ + str, + Field( + description="Identifier so this can be paired with the ExecCommandEnd event." + ), + ] + command: Annotated[list[str], Field(description="The command to be executed.")] + cwd: Annotated[ + str, + Field( + description="The command's working directory if not the default cwd for the agent." + ), + ] + interaction_input: Annotated[ + str | None, + Field( + description="Raw input sent to a unified exec session (if this is an interaction event)." + ), + ] = None + parsed_cmd: list[ParsedCommand] + process_id: Annotated[ + str | None, + Field( + description="Identifier for the underlying PTY process (when available)." + ), + ] = None + source: Annotated[ + ExecCommandSource | None, + Field( + description="Where the command originated. Defaults to Agent for backward compatibility." + ), + ] = "agent" + turn_id: Annotated[str, Field(description="Turn ID that this command belongs to.")] + type: Annotated[ + Literal["exec_command_begin"], Field(title="ExecCommandBeginEventMsgType") + ] + + +class ExecCommandOutputDeltaEventMsg(BaseModel): + model_config = ConfigDict( + populate_by_name=True, + ) + call_id: Annotated[ + str, + Field( + description="Identifier for the ExecCommandBegin that produced this chunk." + ), + ] + chunk: Annotated[ + str, Field(description="Raw bytes from the stream (may not be valid UTF-8).") + ] + stream: Annotated[ + ExecOutputStream, Field(description="Which stream produced this chunk.") + ] + type: Annotated[ + Literal["exec_command_output_delta"], + Field(title="ExecCommandOutputDeltaEventMsgType"), + ] + + +class ExecCommandEndEventMsg(BaseModel): + model_config = ConfigDict( + populate_by_name=True, + ) + aggregated_output: Annotated[ + str | None, Field(description="Captured aggregated output") + ] = "" + call_id: Annotated[ + str, Field(description="Identifier for the ExecCommandBegin that finished.") + ] + command: Annotated[list[str], Field(description="The command that was executed.")] + cwd: Annotated[ + str, + Field( + description="The command's working directory if not the default cwd for the agent." + ), + ] + duration: Annotated[ + Duration, Field(description="The duration of the command execution.") + ] + exit_code: Annotated[int, Field(description="The command's exit code.")] + formatted_output: Annotated[ + str, + Field(description="Formatted output from the command, as seen by the model."), + ] + interaction_input: Annotated[ + str | None, + Field( + description="Raw input sent to a unified exec session (if this is an interaction event)." + ), + ] = None + parsed_cmd: list[ParsedCommand] + process_id: Annotated[ + str | None, + Field( + description="Identifier for the underlying PTY process (when available)." + ), + ] = None + source: Annotated[ + ExecCommandSource | None, + Field( + description="Where the command originated. Defaults to Agent for backward compatibility." + ), + ] = "agent" + status: Annotated[ + ExecCommandStatus, + Field(description="Completion status for this command execution."), + ] + stderr: Annotated[str, Field(description="Captured stderr")] + stdout: Annotated[str, Field(description="Captured stdout")] + turn_id: Annotated[str, Field(description="Turn ID that this command belongs to.")] + type: Annotated[ + Literal["exec_command_end"], Field(title="ExecCommandEndEventMsgType") + ] + + +class RequestPermissionsEventMsg(BaseModel): + model_config = ConfigDict( + populate_by_name=True, + ) + call_id: Annotated[ + str, + Field( + description="Responses API call id for the associated tool call, if available." + ), + ] + permissions: PermissionProfile + reason: str | None = None + turn_id: Annotated[ + str | None, + Field( + description="Turn ID that this request belongs to. Uses `#[serde(default)]` for backwards compatibility." + ), + ] = "" + type: Annotated[ + Literal["request_permissions"], Field(title="RequestPermissionsEventMsgType") + ] + + +class ElicitationRequestEventMsg(BaseModel): + model_config = ConfigDict( + populate_by_name=True, + ) + id: RequestId + request: ElicitationRequest + server_name: str + turn_id: Annotated[ + str | None, + Field(description="Turn ID that this elicitation belongs to, when known."), + ] = None + type: Annotated[ + Literal["elicitation_request"], Field(title="ElicitationRequestEventMsgType") + ] + + +class ApplyPatchApprovalRequestEventMsg(BaseModel): + model_config = ConfigDict( + populate_by_name=True, + ) + call_id: Annotated[ + str, + Field( + description="Responses API call id for the associated patch apply call, if available." + ), + ] + changes: dict[str, FileChange] + grant_root: Annotated[ + str | None, + Field( + description="When set, the agent is asking the user to allow writes under this root for the remainder of the session." + ), + ] = None + reason: Annotated[ + str | None, + Field( + description="Optional explanatory reason (e.g. request for extra write access)." + ), + ] = None + turn_id: Annotated[ + str | None, + Field( + description="Turn ID that this patch belongs to. Uses `#[serde(default)]` for backwards compatibility with older senders." + ), + ] = "" + type: Annotated[ + Literal["apply_patch_approval_request"], + Field(title="ApplyPatchApprovalRequestEventMsgType"), + ] + + +class PatchApplyBeginEventMsg(BaseModel): + model_config = ConfigDict( + populate_by_name=True, + ) + auto_approved: Annotated[ + bool, + Field( + description="If true, there was no ApplyPatchApprovalRequest for this patch." + ), + ] + call_id: Annotated[ + str, + Field( + description="Identifier so this can be paired with the PatchApplyEnd event." + ), + ] + changes: Annotated[ + dict[str, FileChange], Field(description="The changes to be applied.") + ] + turn_id: Annotated[ + str | None, + Field( + description="Turn ID that this patch belongs to. Uses `#[serde(default)]` for backwards compatibility." + ), + ] = "" + type: Annotated[ + Literal["patch_apply_begin"], Field(title="PatchApplyBeginEventMsgType") + ] + + +class PatchApplyEndEventMsg(BaseModel): + model_config = ConfigDict( + populate_by_name=True, + ) + call_id: Annotated[ + str, Field(description="Identifier for the PatchApplyBegin that finished.") + ] + changes: Annotated[ + dict[str, FileChange] | None, + Field( + description="The changes that were applied (mirrors PatchApplyBeginEvent::changes)." + ), + ] = {} + status: Annotated[ + PatchApplyStatus, + Field(description="Completion status for this patch application."), + ] + stderr: Annotated[ + str, Field(description="Captured stderr (parser errors, IO failures, etc.).") + ] + stdout: Annotated[ + str, Field(description="Captured stdout (summary printed by apply_patch).") + ] + success: Annotated[ + bool, Field(description="Whether the patch was applied successfully.") + ] + turn_id: Annotated[ + str | None, + Field( + description="Turn ID that this patch belongs to. Uses `#[serde(default)]` for backwards compatibility." + ), + ] = "" + type: Annotated[ + Literal["patch_apply_end"], Field(title="PatchApplyEndEventMsgType") + ] + + +class GetHistoryEntryResponseEventMsg(BaseModel): + model_config = ConfigDict( + populate_by_name=True, + ) + entry: Annotated[ + HistoryEntry | None, + Field( + description="The entry at the requested offset, if available and parseable." + ), + ] = None + log_id: Annotated[int, Field(ge=0)] + offset: Annotated[int, Field(ge=0)] + type: Annotated[ + Literal["get_history_entry_response"], + Field(title="GetHistoryEntryResponseEventMsgType"), + ] + + +class McpListToolsResponseEventMsg(BaseModel): + model_config = ConfigDict( + populate_by_name=True, + ) + auth_statuses: Annotated[ + dict[str, McpAuthStatus], + Field(description="Authentication status for each configured MCP server."), + ] + resource_templates: Annotated[ + dict[str, list[ResourceTemplate]], + Field(description="Known resource templates grouped by server name."), + ] + resources: Annotated[ + dict[str, list[Resource]], + Field(description="Known resources grouped by server name."), + ] + tools: Annotated[ + dict[str, Tool], + Field(description="Fully qualified tool name -> tool definition."), + ] + type: Annotated[ + Literal["mcp_list_tools_response"], + Field(title="McpListToolsResponseEventMsgType"), + ] + + +class ListRemoteSkillsResponseEventMsg(BaseModel): + model_config = ConfigDict( + populate_by_name=True, + ) + skills: list[RemoteSkillSummary] + type: Annotated[ + Literal["list_remote_skills_response"], + Field(title="ListRemoteSkillsResponseEventMsgType"), + ] + + +class TurnAbortedEventMsg(BaseModel): + model_config = ConfigDict( + populate_by_name=True, + ) + reason: TurnAbortReason + turn_id: str | None = None + type: Annotated[Literal["turn_aborted"], Field(title="TurnAbortedEventMsgType")] + + +class EnteredReviewModeEventMsg(BaseModel): + model_config = ConfigDict( + populate_by_name=True, + ) + target: ReviewTarget + type: Annotated[ + Literal["entered_review_mode"], Field(title="EnteredReviewModeEventMsgType") + ] + user_facing_hint: str | None = None + + +class CollabAgentSpawnBeginEventMsg(BaseModel): + model_config = ConfigDict( + populate_by_name=True, + ) + call_id: Annotated[str, Field(description="Identifier for the collab tool call.")] + model: str + prompt: Annotated[ + str, + Field( + description="Initial prompt sent to the agent. Can be empty to prevent CoT leaking at the beginning." + ), + ] + reasoning_effort: ReasoningEffort + sender_thread_id: Annotated[ThreadId, Field(description="Thread ID of the sender.")] + type: Annotated[ + Literal["collab_agent_spawn_begin"], + Field(title="CollabAgentSpawnBeginEventMsgType"), + ] + + +class CollabAgentSpawnEndEventMsg(BaseModel): + model_config = ConfigDict( + populate_by_name=True, + ) + call_id: Annotated[str, Field(description="Identifier for the collab tool call.")] + model: Annotated[str, Field(description="Model requested for the spawned agent.")] + new_agent_nickname: Annotated[ + str | None, Field(description="Optional nickname assigned to the new agent.") + ] = None + new_agent_role: Annotated[ + str | None, Field(description="Optional role assigned to the new agent.") + ] = None + new_thread_id: Annotated[ + ThreadId | None, + Field(description="Thread ID of the newly spawned agent, if it was created."), + ] = None + prompt: Annotated[ + str, + Field( + description="Initial prompt sent to the agent. Can be empty to prevent CoT leaking at the beginning." + ), + ] + reasoning_effort: Annotated[ + ReasoningEffort, + Field(description="Reasoning effort requested for the spawned agent."), + ] + sender_thread_id: Annotated[ThreadId, Field(description="Thread ID of the sender.")] + status: Annotated[ + AgentStatus, + Field( + description="Last known status of the new agent reported to the sender agent." + ), + ] + type: Annotated[ + Literal["collab_agent_spawn_end"], + Field(title="CollabAgentSpawnEndEventMsgType"), + ] + + +class CollabAgentInteractionBeginEventMsg(BaseModel): + model_config = ConfigDict( + populate_by_name=True, + ) + call_id: Annotated[str, Field(description="Identifier for the collab tool call.")] + prompt: Annotated[ + str, + Field( + description="Prompt sent from the sender to the receiver. Can be empty to prevent CoT leaking at the beginning." + ), + ] + receiver_thread_id: Annotated[ + ThreadId, Field(description="Thread ID of the receiver.") + ] + sender_thread_id: Annotated[ThreadId, Field(description="Thread ID of the sender.")] + type: Annotated[ + Literal["collab_agent_interaction_begin"], + Field(title="CollabAgentInteractionBeginEventMsgType"), + ] + + +class CollabAgentInteractionEndEventMsg(BaseModel): + model_config = ConfigDict( + populate_by_name=True, + ) + call_id: Annotated[str, Field(description="Identifier for the collab tool call.")] + prompt: Annotated[ + str, + Field( + description="Prompt sent from the sender to the receiver. Can be empty to prevent CoT leaking at the beginning." + ), + ] + receiver_agent_nickname: Annotated[ + str | None, + Field(description="Optional nickname assigned to the receiver agent."), + ] = None + receiver_agent_role: Annotated[ + str | None, Field(description="Optional role assigned to the receiver agent.") + ] = None + receiver_thread_id: Annotated[ + ThreadId, Field(description="Thread ID of the receiver.") + ] + sender_thread_id: Annotated[ThreadId, Field(description="Thread ID of the sender.")] + status: Annotated[ + AgentStatus, + Field( + description="Last known status of the receiver agent reported to the sender agent." + ), + ] + type: Annotated[ + Literal["collab_agent_interaction_end"], + Field(title="CollabAgentInteractionEndEventMsgType"), + ] + + +class CollabWaitingBeginEventMsg(BaseModel): + model_config = ConfigDict( + populate_by_name=True, + ) + call_id: Annotated[str, Field(description="ID of the waiting call.")] + receiver_agents: Annotated[ + list[CollabAgentRef] | None, + Field(description="Optional nicknames/roles for receivers."), + ] = None + receiver_thread_ids: Annotated[ + list[ThreadId], Field(description="Thread ID of the receivers.") + ] + sender_thread_id: Annotated[ThreadId, Field(description="Thread ID of the sender.")] + type: Annotated[ + Literal["collab_waiting_begin"], Field(title="CollabWaitingBeginEventMsgType") + ] + + +class CollabWaitingEndEventMsg(BaseModel): + model_config = ConfigDict( + populate_by_name=True, + ) + agent_statuses: Annotated[ + list[CollabAgentStatusEntry] | None, + Field(description="Optional receiver metadata paired with final statuses."), + ] = None + call_id: Annotated[str, Field(description="ID of the waiting call.")] + sender_thread_id: Annotated[ThreadId, Field(description="Thread ID of the sender.")] + statuses: Annotated[ + dict[str, AgentStatus], + Field( + description="Last known status of the receiver agents reported to the sender agent." + ), + ] + type: Annotated[ + Literal["collab_waiting_end"], Field(title="CollabWaitingEndEventMsgType") + ] + + +class CollabCloseBeginEventMsg(BaseModel): + model_config = ConfigDict( + populate_by_name=True, + ) + call_id: Annotated[str, Field(description="Identifier for the collab tool call.")] + receiver_thread_id: Annotated[ + ThreadId, Field(description="Thread ID of the receiver.") + ] + sender_thread_id: Annotated[ThreadId, Field(description="Thread ID of the sender.")] + type: Annotated[ + Literal["collab_close_begin"], Field(title="CollabCloseBeginEventMsgType") + ] + + +class CollabCloseEndEventMsg(BaseModel): + model_config = ConfigDict( + populate_by_name=True, + ) + call_id: Annotated[str, Field(description="Identifier for the collab tool call.")] + receiver_agent_nickname: Annotated[ + str | None, + Field(description="Optional nickname assigned to the receiver agent."), + ] = None + receiver_agent_role: Annotated[ + str | None, Field(description="Optional role assigned to the receiver agent.") + ] = None + receiver_thread_id: Annotated[ + ThreadId, Field(description="Thread ID of the receiver.") + ] + sender_thread_id: Annotated[ThreadId, Field(description="Thread ID of the sender.")] + status: Annotated[ + AgentStatus, + Field( + description="Last known status of the receiver agent reported to the sender agent before the close." + ), + ] + type: Annotated[ + Literal["collab_close_end"], Field(title="CollabCloseEndEventMsgType") + ] + + +class CollabResumeBeginEventMsg(BaseModel): + model_config = ConfigDict( + populate_by_name=True, + ) + call_id: Annotated[str, Field(description="Identifier for the collab tool call.")] + receiver_agent_nickname: Annotated[ + str | None, + Field(description="Optional nickname assigned to the receiver agent."), + ] = None + receiver_agent_role: Annotated[ + str | None, Field(description="Optional role assigned to the receiver agent.") + ] = None + receiver_thread_id: Annotated[ + ThreadId, Field(description="Thread ID of the receiver.") + ] + sender_thread_id: Annotated[ThreadId, Field(description="Thread ID of the sender.")] + type: Annotated[ + Literal["collab_resume_begin"], Field(title="CollabResumeBeginEventMsgType") + ] + + +class CollabResumeEndEventMsg(BaseModel): + model_config = ConfigDict( + populate_by_name=True, + ) + call_id: Annotated[str, Field(description="Identifier for the collab tool call.")] + receiver_agent_nickname: Annotated[ + str | None, + Field(description="Optional nickname assigned to the receiver agent."), + ] = None + receiver_agent_role: Annotated[ + str | None, Field(description="Optional role assigned to the receiver agent.") + ] = None + receiver_thread_id: Annotated[ + ThreadId, Field(description="Thread ID of the receiver.") + ] + sender_thread_id: Annotated[ThreadId, Field(description="Thread ID of the sender.")] + status: Annotated[ + AgentStatus, + Field( + description="Last known status of the receiver agent reported to the sender agent after resume." + ), + ] + type: Annotated[ + Literal["collab_resume_end"], Field(title="CollabResumeEndEventMsgType") + ] + + +class ExperimentalFeature(BaseModel): + model_config = ConfigDict( + populate_by_name=True, + ) + announcement: Annotated[ + str | None, + Field( + description="Announcement copy shown to users when the feature is introduced. Null when this feature is not in beta." + ), + ] = None + default_enabled: Annotated[ + bool, + Field( + alias="defaultEnabled", + description="Whether this feature is enabled by default.", + ), + ] + description: Annotated[ + str | None, + Field( + description="Short summary describing what the feature does. Null when this feature is not in beta." + ), + ] = None + display_name: Annotated[ + str | None, + Field( + alias="displayName", + description="User-facing display name shown in the experimental features UI. Null when this feature is not in beta.", + ), + ] = None + enabled: Annotated[ + bool, + Field( + description="Whether this feature is currently enabled in the loaded config." + ), + ] + name: Annotated[ + str, Field(description="Stable key used in config.toml and CLI flag toggles.") + ] + stage: Annotated[ + ExperimentalFeatureStage, + Field(description="Lifecycle stage of this feature flag."), + ] + + +class ExperimentalFeatureListResponse(BaseModel): + model_config = ConfigDict( + populate_by_name=True, + ) + data: list[ExperimentalFeature] + next_cursor: Annotated[ + str | None, + Field( + alias="nextCursor", + description="Opaque cursor to pass to the next call to continue after the last item. If None, there are no more items to return.", + ), + ] = None + + +class ExternalAgentConfigMigrationItem(BaseModel): + model_config = ConfigDict( + populate_by_name=True, + ) + cwd: Annotated[ + str | None, + Field( + description="Null or empty means home-scoped migration; non-empty means repo-scoped migration." + ), + ] = None + description: str + item_type: Annotated[ExternalAgentConfigMigrationItemType, Field(alias="itemType")] + + +class FileUpdateChange(BaseModel): + model_config = ConfigDict( + populate_by_name=True, + ) + diff: str + kind: PatchChangeKind + path: str + + +class InputImageFunctionCallOutputContentItem(BaseModel): + model_config = ConfigDict( + populate_by_name=True, + ) + detail: ImageDetail | None = None + image_url: str + type: Annotated[ + Literal["input_image"], + Field(title="InputImageFunctionCallOutputContentItemType"), + ] + + +class FunctionCallOutputContentItem( + RootModel[ + InputTextFunctionCallOutputContentItem | InputImageFunctionCallOutputContentItem + ] +): + model_config = ConfigDict( + populate_by_name=True, + ) + root: Annotated[ + InputTextFunctionCallOutputContentItem + | InputImageFunctionCallOutputContentItem, + Field( + description="Responses API compatible content items that can be returned by a tool call. This is a subset of ContentItem with the types we support as function call outputs." + ), + ] + + +class GetAccountResponse(BaseModel): + model_config = ConfigDict( + populate_by_name=True, + ) + account: Account | None = None + requires_openai_auth: Annotated[bool, Field(alias="requiresOpenaiAuth")] + + +class HookOutputEntry(BaseModel): + model_config = ConfigDict( + populate_by_name=True, + ) + kind: HookOutputEntryKind + text: str + + +class HookRunSummary(BaseModel): + model_config = ConfigDict( + populate_by_name=True, + ) + completed_at: Annotated[int | None, Field(alias="completedAt")] = None + display_order: Annotated[int, Field(alias="displayOrder")] + duration_ms: Annotated[int | None, Field(alias="durationMs")] = None + entries: list[HookOutputEntry] + event_name: Annotated[HookEventName, Field(alias="eventName")] + execution_mode: Annotated[HookExecutionMode, Field(alias="executionMode")] + handler_type: Annotated[HookHandlerType, Field(alias="handlerType")] + id: str + scope: HookScope + source_path: Annotated[str, Field(alias="sourcePath")] + started_at: Annotated[int, Field(alias="startedAt")] + status: HookRunStatus + status_message: Annotated[str | None, Field(alias="statusMessage")] = None + + +class HookStartedNotification(BaseModel): + model_config = ConfigDict( + populate_by_name=True, + ) + run: HookRunSummary + thread_id: Annotated[str, Field(alias="threadId")] + turn_id: Annotated[str | None, Field(alias="turnId")] = None + + +class McpServerStatus(BaseModel): + model_config = ConfigDict( + populate_by_name=True, + ) + auth_status: Annotated[McpAuthStatus, Field(alias="authStatus")] + name: str + resource_templates: Annotated[ + list[ResourceTemplate], Field(alias="resourceTemplates") + ] + resources: list[Resource] + tools: dict[str, Tool] + + +class Model(BaseModel): + model_config = ConfigDict( + populate_by_name=True, + ) + availability_nux: Annotated[ + ModelAvailabilityNux | None, Field(alias="availabilityNux") + ] = None + default_reasoning_effort: Annotated[ + ReasoningEffort, Field(alias="defaultReasoningEffort") + ] + description: str + display_name: Annotated[str, Field(alias="displayName")] + hidden: bool + id: str + input_modalities: Annotated[ + list[InputModality] | None, Field(alias="inputModalities") + ] = ["text", "image"] + is_default: Annotated[bool, Field(alias="isDefault")] + model: str + supported_reasoning_efforts: Annotated[ + list[ReasoningEffortOption], Field(alias="supportedReasoningEfforts") + ] + supports_personality: Annotated[bool | None, Field(alias="supportsPersonality")] = ( + False + ) + upgrade: str | None = None + upgrade_info: Annotated[ModelUpgradeInfo | None, Field(alias="upgradeInfo")] = None + + +class ModelListResponse(BaseModel): + model_config = ConfigDict( + populate_by_name=True, + ) + data: list[Model] + next_cursor: Annotated[ + str | None, + Field( + alias="nextCursor", + description="Opaque cursor to pass to the next call to continue after the last item. If None, there are no more items to return.", + ), + ] = None + + +class NetworkApprovalContext(BaseModel): + model_config = ConfigDict( + populate_by_name=True, + ) + host: str + protocol: NetworkApprovalProtocol + + +class NetworkPolicyAmendment(BaseModel): + model_config = ConfigDict( + populate_by_name=True, + ) + action: NetworkPolicyRuleAction + host: str + + +class OverriddenMetadata(BaseModel): + model_config = ConfigDict( + populate_by_name=True, + ) + effective_value: Annotated[Any, Field(alias="effectiveValue")] + message: str + overriding_layer: Annotated[ConfigLayerMetadata, Field(alias="overridingLayer")] + + +class PlanItemArg(BaseModel): + model_config = ConfigDict( + extra="forbid", + populate_by_name=True, + ) + status: StepStatus + step: str + + +class PluginMarketplaceEntry(BaseModel): + model_config = ConfigDict( + populate_by_name=True, + ) + name: str + path: AbsolutePathBuf + plugins: list[PluginSummary] + + +class RateLimitSnapshot(BaseModel): + model_config = ConfigDict( + populate_by_name=True, + ) + credits: CreditsSnapshot | None = None + limit_id: Annotated[str | None, Field(alias="limitId")] = None + limit_name: Annotated[str | None, Field(alias="limitName")] = None + plan_type: Annotated[PlanType | None, Field(alias="planType")] = None + primary: RateLimitWindow | None = None + secondary: RateLimitWindow | None = None + + +class InputTranscriptDeltaRealtimeEvent(BaseModel): + model_config = ConfigDict( + extra="forbid", + populate_by_name=True, + ) + input_transcript_delta: Annotated[ + RealtimeTranscriptDelta, Field(alias="InputTranscriptDelta") + ] + + +class OutputTranscriptDeltaRealtimeEvent(BaseModel): + model_config = ConfigDict( + extra="forbid", + populate_by_name=True, + ) + output_transcript_delta: Annotated[ + RealtimeTranscriptDelta, Field(alias="OutputTranscriptDelta") + ] + + +class RealtimeHandoffRequested(BaseModel): + model_config = ConfigDict( + populate_by_name=True, + ) + active_transcript: list[RealtimeTranscriptEntry] + handoff_id: str + input_transcript: str + item_id: str + + +class RequestUserInputQuestion(BaseModel): + model_config = ConfigDict( + populate_by_name=True, + ) + header: str + id: str + is_other: Annotated[bool | None, Field(alias="isOther")] = False + is_secret: Annotated[bool | None, Field(alias="isSecret")] = False + options: list[RequestUserInputQuestionOption] | None = None + question: str + + +class WebSearchCallResponseItem(BaseModel): + model_config = ConfigDict( + populate_by_name=True, + ) + action: ResponsesApiWebSearchAction | None = None + id: str | None = None + status: str | None = None + type: Annotated[ + Literal["web_search_call"], Field(title="WebSearchCallResponseItemType") + ] + + +class ReviewCodeLocation(BaseModel): + model_config = ConfigDict( + populate_by_name=True, + ) + absolute_file_path: str + line_range: ReviewLineRange + + +class NetworkPolicyAmendment1(BaseModel): + model_config = ConfigDict( + populate_by_name=True, + ) + network_policy_amendment: NetworkPolicyAmendment + + +class NetworkPolicyAmendmentReviewDecision(BaseModel): + model_config = ConfigDict( + extra="forbid", + populate_by_name=True, + ) + network_policy_amendment: NetworkPolicyAmendment1 + + +class ReviewDecision( + RootModel[ + Literal["approved"] + | ApprovedExecpolicyAmendmentReviewDecision + | Literal["approved_for_session"] + | NetworkPolicyAmendmentReviewDecision + | Literal["denied"] + | Literal["abort"] + ] +): + model_config = ConfigDict( + populate_by_name=True, + ) + root: Annotated[ + Literal["approved"] + | ApprovedExecpolicyAmendmentReviewDecision + | Literal["approved_for_session"] + | NetworkPolicyAmendmentReviewDecision + | Literal["denied"] + | Literal["abort"], + Field(description="User's decision in response to an ExecApprovalRequest."), + ] + + +class ReviewFinding(BaseModel): + model_config = ConfigDict( + populate_by_name=True, + ) + body: str + code_location: ReviewCodeLocation + confidence_score: float + priority: int + title: str + + +class ReviewOutputEvent(BaseModel): + model_config = ConfigDict( + populate_by_name=True, + ) + findings: list[ReviewFinding] + overall_confidence_score: float + overall_correctness: str + overall_explanation: str + + +class ReviewStartParams(BaseModel): + model_config = ConfigDict( + populate_by_name=True, + ) + delivery: Annotated[ + ReviewDelivery | None, + Field( + description="Where to run the review: inline (default) on the current thread or detached on a new thread (returned in `reviewThreadId`)." + ), + ] = None + target: ReviewTarget + thread_id: Annotated[str, Field(alias="threadId")] + + +class ErrorServerNotification(BaseModel): + model_config = ConfigDict( + populate_by_name=True, + ) + method: Annotated[Literal["error"], Field(title="ErrorNotificationMethod")] + params: ErrorNotification + + +class ThreadStatusChangedServerNotification(BaseModel): + model_config = ConfigDict( + populate_by_name=True, + ) + method: Annotated[ + Literal["thread/status/changed"], + Field(title="Thread/status/changedNotificationMethod"), + ] + params: ThreadStatusChangedNotification + + +class ThreadArchivedServerNotification(BaseModel): + model_config = ConfigDict( + populate_by_name=True, + ) + method: Annotated[ + Literal["thread/archived"], Field(title="Thread/archivedNotificationMethod") + ] + params: ThreadArchivedNotification + + +class ThreadUnarchivedServerNotification(BaseModel): + model_config = ConfigDict( + populate_by_name=True, + ) + method: Annotated[ + Literal["thread/unarchived"], Field(title="Thread/unarchivedNotificationMethod") + ] + params: ThreadUnarchivedNotification + + +class ThreadClosedServerNotification(BaseModel): + model_config = ConfigDict( + populate_by_name=True, + ) + method: Annotated[ + Literal["thread/closed"], Field(title="Thread/closedNotificationMethod") + ] + params: ThreadClosedNotification + + +class SkillsChangedServerNotification(BaseModel): + model_config = ConfigDict( + populate_by_name=True, + ) + method: Annotated[ + Literal["skills/changed"], Field(title="Skills/changedNotificationMethod") + ] + params: SkillsChangedNotification + + +class ThreadNameUpdatedServerNotification(BaseModel): + model_config = ConfigDict( + populate_by_name=True, + ) + method: Annotated[ + Literal["thread/name/updated"], + Field(title="Thread/name/updatedNotificationMethod"), + ] + params: ThreadNameUpdatedNotification + + +class HookStartedServerNotification(BaseModel): + model_config = ConfigDict( + populate_by_name=True, + ) + method: Annotated[ + Literal["hook/started"], Field(title="Hook/startedNotificationMethod") + ] + params: HookStartedNotification + + +class TurnDiffUpdatedServerNotification(BaseModel): + model_config = ConfigDict( + populate_by_name=True, + ) + method: Annotated[ + Literal["turn/diff/updated"], Field(title="Turn/diff/updatedNotificationMethod") + ] + params: TurnDiffUpdatedNotification + + +class CommandExecOutputDeltaServerNotification(BaseModel): + model_config = ConfigDict( + populate_by_name=True, + ) + method: Annotated[ + Literal["command/exec/outputDelta"], + Field(title="Command/exec/outputDeltaNotificationMethod"), + ] + params: CommandExecOutputDeltaNotification + + +class ItemCommandExecutionTerminalInteractionServerNotification(BaseModel): + model_config = ConfigDict( + populate_by_name=True, + ) + method: Annotated[ + Literal["item/commandExecution/terminalInteraction"], + Field(title="Item/commandExecution/terminalInteractionNotificationMethod"), + ] + params: TerminalInteractionNotification + + +class ServerRequestResolvedServerNotification(BaseModel): + model_config = ConfigDict( + populate_by_name=True, + ) + method: Annotated[ + Literal["serverRequest/resolved"], + Field(title="ServerRequest/resolvedNotificationMethod"), + ] + params: ServerRequestResolvedNotification + + +class AccountUpdatedServerNotification(BaseModel): + model_config = ConfigDict( + populate_by_name=True, + ) + method: Annotated[ + Literal["account/updated"], Field(title="Account/updatedNotificationMethod") + ] + params: AccountUpdatedNotification + + +class ConfigWarningServerNotification(BaseModel): + model_config = ConfigDict( + populate_by_name=True, + ) + method: Annotated[ + Literal["configWarning"], Field(title="ConfigWarningNotificationMethod") + ] + params: ConfigWarningNotification + + +class ThreadRealtimeStartedServerNotification(BaseModel): + model_config = ConfigDict( + populate_by_name=True, + ) + method: Annotated[ + Literal["thread/realtime/started"], + Field(title="Thread/realtime/startedNotificationMethod"), + ] + params: ThreadRealtimeStartedNotification + + +class ThreadRealtimeItemAddedServerNotification(BaseModel): + model_config = ConfigDict( + populate_by_name=True, + ) + method: Annotated[ + Literal["thread/realtime/itemAdded"], + Field(title="Thread/realtime/itemAddedNotificationMethod"), + ] + params: ThreadRealtimeItemAddedNotification + + +class ThreadRealtimeOutputAudioDeltaServerNotification(BaseModel): + model_config = ConfigDict( + populate_by_name=True, + ) + method: Annotated[ + Literal["thread/realtime/outputAudio/delta"], + Field(title="Thread/realtime/outputAudio/deltaNotificationMethod"), + ] + params: ThreadRealtimeOutputAudioDeltaNotification + + +class ThreadRealtimeErrorServerNotification(BaseModel): + model_config = ConfigDict( + populate_by_name=True, + ) + method: Annotated[ + Literal["thread/realtime/error"], + Field(title="Thread/realtime/errorNotificationMethod"), + ] + params: ThreadRealtimeErrorNotification + + +class ThreadRealtimeClosedServerNotification(BaseModel): + model_config = ConfigDict( + populate_by_name=True, + ) + method: Annotated[ + Literal["thread/realtime/closed"], + Field(title="Thread/realtime/closedNotificationMethod"), + ] + params: ThreadRealtimeClosedNotification + + +class WindowsWorldWritableWarningServerNotification(BaseModel): + model_config = ConfigDict( + populate_by_name=True, + ) + method: Annotated[ + Literal["windows/worldWritableWarning"], + Field(title="Windows/worldWritableWarningNotificationMethod"), + ] + params: WindowsWorldWritableWarningNotification + + +class SkillDependencies(BaseModel): + model_config = ConfigDict( + populate_by_name=True, + ) + tools: list[SkillToolDependency] + + +class SkillMetadata(BaseModel): + model_config = ConfigDict( + populate_by_name=True, + ) + dependencies: SkillDependencies | None = None + description: str + enabled: bool + interface: SkillInterface | None = None + name: str + path: str + scope: SkillScope + short_description: Annotated[ + str | None, + Field( + alias="shortDescription", + description="Legacy short_description from SKILL.md. Prefer SKILL.json interface.short_description.", + ), + ] = None + + +class SkillsListEntry(BaseModel): + model_config = ConfigDict( + populate_by_name=True, + ) + cwd: str + errors: list[SkillErrorInfo] + skills: list[SkillMetadata] + + +class SkillsListResponse(BaseModel): + model_config = ConfigDict( + populate_by_name=True, + ) + data: list[SkillsListEntry] + + +class ThreadSpawn(BaseModel): + model_config = ConfigDict( + populate_by_name=True, + ) + agent_nickname: str | None = None + agent_role: str | None = None + depth: int + parent_thread_id: ThreadId + + +class ThreadSpawnSubAgentSource(BaseModel): + model_config = ConfigDict( + extra="forbid", + populate_by_name=True, + ) + thread_spawn: ThreadSpawn + + +class SubAgentSource( + RootModel[SubAgentSourceValue | ThreadSpawnSubAgentSource | OtherSubAgentSource] +): + model_config = ConfigDict( + populate_by_name=True, + ) + root: SubAgentSourceValue | ThreadSpawnSubAgentSource | OtherSubAgentSource + + +class UserMessageThreadItem(BaseModel): + model_config = ConfigDict( + populate_by_name=True, + ) + content: list[UserInput] + id: str + type: Annotated[Literal["userMessage"], Field(title="UserMessageThreadItemType")] + + +class FileChangeThreadItem(BaseModel): + model_config = ConfigDict( + populate_by_name=True, + ) + changes: list[FileUpdateChange] + id: str + status: PatchApplyStatus + type: Annotated[Literal["fileChange"], Field(title="FileChangeThreadItemType")] + + +class CollabAgentToolCallThreadItem(BaseModel): + model_config = ConfigDict( + populate_by_name=True, + ) + agents_states: Annotated[ + dict[str, CollabAgentState], + Field( + alias="agentsStates", + description="Last known status of the target agents, when available.", + ), + ] + id: Annotated[ + str, Field(description="Unique identifier for this collab tool call.") + ] + model: Annotated[ + str | None, + Field(description="Model requested for the spawned agent, when applicable."), + ] = None + prompt: Annotated[ + str | None, + Field( + description="Prompt text sent as part of the collab tool call, when available." + ), + ] = None + reasoning_effort: Annotated[ + ReasoningEffort | None, + Field( + alias="reasoningEffort", + description="Reasoning effort requested for the spawned agent, when applicable.", + ), + ] = None + receiver_thread_ids: Annotated[ + list[str], + Field( + alias="receiverThreadIds", + description="Thread ID of the receiving agent, when applicable. In case of spawn operation, this corresponds to the newly spawned agent.", + ), + ] + sender_thread_id: Annotated[ + str, + Field( + alias="senderThreadId", + description="Thread ID of the agent issuing the collab request.", + ), + ] + status: Annotated[ + CollabAgentToolCallStatus, + Field(description="Current status of the collab tool call."), + ] + tool: Annotated[ + CollabAgentTool, Field(description="Name of the collab tool that was invoked.") + ] + type: Annotated[ + Literal["collabAgentToolCall"], Field(title="CollabAgentToolCallThreadItemType") + ] + + +class WebSearchThreadItem(BaseModel): + model_config = ConfigDict( + populate_by_name=True, + ) + action: WebSearchAction | None = None + id: str + query: str + type: Annotated[Literal["webSearch"], Field(title="WebSearchThreadItemType")] + + +class ThreadItem( + RootModel[ + UserMessageThreadItem + | AgentMessageThreadItem + | PlanThreadItem + | ReasoningThreadItem + | CommandExecutionThreadItem + | FileChangeThreadItem + | McpToolCallThreadItem + | DynamicToolCallThreadItem + | CollabAgentToolCallThreadItem + | WebSearchThreadItem + | ImageViewThreadItem + | ImageGenerationThreadItem + | EnteredReviewModeThreadItem + | ExitedReviewModeThreadItem + | ContextCompactionThreadItem + ] +): + model_config = ConfigDict( + populate_by_name=True, + ) + root: ( + UserMessageThreadItem + | AgentMessageThreadItem + | PlanThreadItem + | ReasoningThreadItem + | CommandExecutionThreadItem + | FileChangeThreadItem + | McpToolCallThreadItem + | DynamicToolCallThreadItem + | CollabAgentToolCallThreadItem + | WebSearchThreadItem + | ImageViewThreadItem + | ImageGenerationThreadItem + | EnteredReviewModeThreadItem + | ExitedReviewModeThreadItem + | ContextCompactionThreadItem + ) + + +class ThreadListParams(BaseModel): + model_config = ConfigDict( + populate_by_name=True, + ) + archived: Annotated[ + bool | None, + Field( + description="Optional archived filter; when set to true, only archived threads are returned. If false or null, only non-archived threads are returned." + ), + ] = None + cursor: Annotated[ + str | None, + Field(description="Opaque pagination cursor returned by a previous call."), + ] = None + cwd: Annotated[ + str | None, + Field( + description="Optional cwd filter; when set, only threads whose session cwd exactly matches this path are returned." + ), + ] = None + limit: Annotated[ + int | None, + Field( + description="Optional page size; defaults to a reasonable server-side value.", + ge=0, + ), + ] = None + model_providers: Annotated[ + list[str] | None, + Field( + alias="modelProviders", + description="Optional provider filter; when set, only sessions recorded under these providers are returned. When present but empty, includes all providers.", + ), + ] = None + search_term: Annotated[ + str | None, + Field( + alias="searchTerm", + description="Optional substring filter for the extracted thread title.", + ), + ] = None + sort_key: Annotated[ + ThreadSortKey | None, + Field( + alias="sortKey", description="Optional sort key; defaults to created_at." + ), + ] = None + source_kinds: Annotated[ + list[ThreadSourceKind] | None, + Field( + alias="sourceKinds", + description="Optional source filter; when set, only sessions from these source kinds are returned. When omitted or empty, defaults to interactive sources.", + ), + ] = None + + +class ThreadTokenUsage(BaseModel): + model_config = ConfigDict( + populate_by_name=True, + ) + last: TokenUsageBreakdown + model_context_window: Annotated[int | None, Field(alias="modelContextWindow")] = ( + None + ) + total: TokenUsageBreakdown + + +class ThreadTokenUsageUpdatedNotification(BaseModel): + model_config = ConfigDict( + populate_by_name=True, + ) + thread_id: Annotated[str, Field(alias="threadId")] + token_usage: Annotated[ThreadTokenUsage, Field(alias="tokenUsage")] + turn_id: Annotated[str, Field(alias="turnId")] + + +class ThreadUnsubscribeResponse(BaseModel): + model_config = ConfigDict( + populate_by_name=True, + ) + status: ThreadUnsubscribeStatus + + +class ToolsV2(BaseModel): + model_config = ConfigDict( + populate_by_name=True, + ) + view_image: bool | None = None + web_search: WebSearchToolConfig | None = None + + +class Turn(BaseModel): + model_config = ConfigDict( + populate_by_name=True, + ) + error: Annotated[ + TurnError | None, + Field(description="Only populated when the Turn's status is failed."), + ] = None + id: str + items: Annotated[ + list[ThreadItem], + Field( + description="Only populated on a `thread/resume` or `thread/fork` response. For all other responses and notifications returning a Turn, the items field will be an empty list." + ), + ] + status: TurnStatus + + +class TurnCompletedNotification(BaseModel): + model_config = ConfigDict( + populate_by_name=True, + ) + thread_id: Annotated[str, Field(alias="threadId")] + turn: Turn + + +class UserMessageTurnItem(BaseModel): + model_config = ConfigDict( + populate_by_name=True, + ) + content: list[UserInput] + id: str + type: Annotated[Literal["UserMessage"], Field(title="UserMessageTurnItemType")] + + +class TurnItem( + RootModel[ + UserMessageTurnItem + | AgentMessageTurnItem + | PlanTurnItem + | ReasoningTurnItem + | WebSearchTurnItem + | ImageGenerationTurnItem + | ContextCompactionTurnItem + ] +): + model_config = ConfigDict( + populate_by_name=True, + ) + root: ( + UserMessageTurnItem + | AgentMessageTurnItem + | PlanTurnItem + | ReasoningTurnItem + | WebSearchTurnItem + | ImageGenerationTurnItem + | ContextCompactionTurnItem + ) + + +class TurnPlanStep(BaseModel): + model_config = ConfigDict( + populate_by_name=True, + ) + status: TurnPlanStepStatus + step: str + + +class TurnPlanUpdatedNotification(BaseModel): + model_config = ConfigDict( + populate_by_name=True, + ) + explanation: str | None = None + plan: list[TurnPlanStep] + thread_id: Annotated[str, Field(alias="threadId")] + turn_id: Annotated[str, Field(alias="turnId")] + + +class TurnStartParams(BaseModel): + model_config = ConfigDict( + populate_by_name=True, + ) + approval_policy: Annotated[ + AskForApproval | None, + Field( + alias="approvalPolicy", + description="Override the approval policy for this turn and subsequent turns.", + ), + ] = None + cwd: Annotated[ + str | None, + Field( + description="Override the working directory for this turn and subsequent turns." + ), + ] = None + effort: Annotated[ + ReasoningEffort | None, + Field( + description="Override the reasoning effort for this turn and subsequent turns." + ), + ] = None + input: list[UserInput] + model: Annotated[ + str | None, + Field(description="Override the model for this turn and subsequent turns."), + ] = None + output_schema: Annotated[ + Any | None, + Field( + alias="outputSchema", + description="Optional JSON Schema used to constrain the final assistant message for this turn.", + ), + ] = None + personality: Annotated[ + Personality | None, + Field( + description="Override the personality for this turn and subsequent turns." + ), + ] = None + sandbox_policy: Annotated[ + SandboxPolicy | None, + Field( + alias="sandboxPolicy", + description="Override the sandbox policy for this turn and subsequent turns.", + ), + ] = None + service_tier: Annotated[ + ServiceTier | None, + Field( + alias="serviceTier", + description="Override the service tier for this turn and subsequent turns.", + ), + ] = None + summary: Annotated[ + ReasoningSummary | None, + Field( + description="Override the reasoning summary for this turn and subsequent turns." + ), + ] = None + thread_id: Annotated[str, Field(alias="threadId")] + + +class TurnStartResponse(BaseModel): + model_config = ConfigDict( + populate_by_name=True, + ) + turn: Turn + + +class TurnStartedNotification(BaseModel): + model_config = ConfigDict( + populate_by_name=True, + ) + thread_id: Annotated[str, Field(alias="threadId")] + turn: Turn + + +class TurnSteerParams(BaseModel): + model_config = ConfigDict( + populate_by_name=True, + ) + expected_turn_id: Annotated[ + str, + Field( + alias="expectedTurnId", + description="Required active turn id precondition. The request fails when it does not match the currently active turn.", + ), + ] + input: list[UserInput] + thread_id: Annotated[str, Field(alias="threadId")] + + +class WindowsSandboxSetupCompletedNotification(BaseModel): + model_config = ConfigDict( + populate_by_name=True, + ) + error: str | None = None + mode: WindowsSandboxSetupMode + success: bool + + +class AccountRateLimitsUpdatedNotification(BaseModel): + model_config = ConfigDict( + populate_by_name=True, + ) + rate_limits: Annotated[RateLimitSnapshot, Field(alias="rateLimits")] + + +class AppInfo(BaseModel): + model_config = ConfigDict( + populate_by_name=True, + ) + app_metadata: Annotated[AppMetadata | None, Field(alias="appMetadata")] = None + branding: AppBranding | None = None + description: str | None = None + distribution_channel: Annotated[str | None, Field(alias="distributionChannel")] = ( + None + ) + id: str + install_url: Annotated[str | None, Field(alias="installUrl")] = None + is_accessible: Annotated[bool | None, Field(alias="isAccessible")] = False + is_enabled: Annotated[ + bool | None, + Field( + alias="isEnabled", + description="Whether this app is enabled in config.toml. Example: ```toml [apps.bad_app] enabled = false ```", + ), + ] = True + labels: dict[str, Any] | None = None + logo_url: Annotated[str | None, Field(alias="logoUrl")] = None + logo_url_dark: Annotated[str | None, Field(alias="logoUrlDark")] = None + name: str + plugin_display_names: Annotated[ + list[str] | None, Field(alias="pluginDisplayNames") + ] = [] + + +class AppListUpdatedNotification(BaseModel): + model_config = ConfigDict( + populate_by_name=True, + ) + data: list[AppInfo] + + +class AppsListResponse(BaseModel): + model_config = ConfigDict( + populate_by_name=True, + ) + data: list[AppInfo] + next_cursor: Annotated[ + str | None, + Field( + alias="nextCursor", + description="Opaque cursor to pass to the next call to continue after the last item. If None, there are no more items to return.", + ), + ] = None + + +class ThreadListRequest(BaseModel): + model_config = ConfigDict( + populate_by_name=True, + ) + id: RequestId + method: Annotated[Literal["thread/list"], Field(title="Thread/listRequestMethod")] + params: ThreadListParams + + +class TurnStartRequest(BaseModel): + model_config = ConfigDict( + populate_by_name=True, + ) + id: RequestId + method: Annotated[Literal["turn/start"], Field(title="Turn/startRequestMethod")] + params: TurnStartParams + + +class TurnSteerRequest(BaseModel): + model_config = ConfigDict( + populate_by_name=True, + ) + id: RequestId + method: Annotated[Literal["turn/steer"], Field(title="Turn/steerRequestMethod")] + params: TurnSteerParams + + +class ReviewStartRequest(BaseModel): + model_config = ConfigDict( + populate_by_name=True, + ) + id: RequestId + method: Annotated[Literal["review/start"], Field(title="Review/startRequestMethod")] + params: ReviewStartParams + + +class CommandExecRequest(BaseModel): + model_config = ConfigDict( + populate_by_name=True, + ) + id: RequestId + method: Annotated[Literal["command/exec"], Field(title="Command/execRequestMethod")] + params: CommandExecParams + + +class CommandExecResizeRequest(BaseModel): + model_config = ConfigDict( + populate_by_name=True, + ) + id: RequestId + method: Annotated[ + Literal["command/exec/resize"], Field(title="Command/exec/resizeRequestMethod") + ] + params: CommandExecResizeParams + + +class ConfigValueWriteRequest(BaseModel): + model_config = ConfigDict( + populate_by_name=True, + ) + id: RequestId + method: Annotated[ + Literal["config/value/write"], Field(title="Config/value/writeRequestMethod") + ] + params: ConfigValueWriteParams + + +class ConfigBatchWriteParams(BaseModel): + model_config = ConfigDict( + populate_by_name=True, + ) + edits: list[ConfigEdit] + expected_version: Annotated[str | None, Field(alias="expectedVersion")] = None + file_path: Annotated[ + str | None, + Field( + alias="filePath", + description="Path to the config file to write; defaults to the user's `config.toml` when omitted.", + ), + ] = None + reload_user_config: Annotated[ + bool | None, + Field( + alias="reloadUserConfig", + description="When true, hot-reload the updated user config into all loaded threads after writing.", + ), + ] = None + + +class ConfigWriteResponse(BaseModel): + model_config = ConfigDict( + populate_by_name=True, + ) + file_path: Annotated[ + AbsolutePathBuf, + Field( + alias="filePath", + description="Canonical path to the config file that was written.", + ), + ] + overridden_metadata: Annotated[ + OverriddenMetadata | None, Field(alias="overriddenMetadata") + ] = None + status: WriteStatus + version: str + + +class TokenCountEventMsg(BaseModel): + model_config = ConfigDict( + populate_by_name=True, + ) + info: TokenUsageInfo | None = None + rate_limits: RateLimitSnapshot | None = None + type: Annotated[Literal["token_count"], Field(title="TokenCountEventMsgType")] + + +class ExecApprovalRequestEventMsg(BaseModel): + model_config = ConfigDict( + populate_by_name=True, + ) + additional_permissions: Annotated[ + PermissionProfile | None, + Field( + description="Optional additional filesystem permissions requested for this command." + ), + ] = None + approval_id: Annotated[ + str | None, + Field( + description="Identifier for this specific approval callback.\n\nWhen absent, the approval is for the command item itself (`call_id`). This is present for subcommand approvals (via execve intercept)." + ), + ] = None + available_decisions: Annotated[ + list[ReviewDecision] | None, + Field( + description="Ordered list of decisions the client may present for this prompt.\n\nWhen absent, clients should derive the legacy default set from the other fields on this request." + ), + ] = None + call_id: Annotated[ + str, Field(description="Identifier for the associated command execution item.") + ] + command: Annotated[list[str], Field(description="The command to be executed.")] + cwd: Annotated[str, Field(description="The command's working directory.")] + network_approval_context: Annotated[ + NetworkApprovalContext | None, + Field( + description="Optional network context for a blocked request that can be approved." + ), + ] = None + parsed_cmd: list[ParsedCommand] + proposed_execpolicy_amendment: Annotated[ + list[str] | None, + Field( + description="Proposed execpolicy amendment that can be applied to allow future runs." + ), + ] = None + proposed_network_policy_amendments: Annotated[ + list[NetworkPolicyAmendment] | None, + Field( + description="Proposed network policy amendments (for example allow/deny this host in future)." + ), + ] = None + reason: Annotated[ + str | None, + Field( + description="Optional human-readable reason for the approval (e.g. retry without sandbox)." + ), + ] = None + skill_metadata: Annotated[ + ExecApprovalRequestSkillMetadata | None, + Field( + description="Optional skill metadata when the approval was triggered by a skill script." + ), + ] = None + turn_id: Annotated[ + str | None, + Field( + description="Turn ID that this command belongs to. Uses `#[serde(default)]` for backwards compatibility." + ), + ] = "" + type: Annotated[ + Literal["exec_approval_request"], Field(title="ExecApprovalRequestEventMsgType") + ] + + +class RequestUserInputEventMsg(BaseModel): + model_config = ConfigDict( + populate_by_name=True, + ) + call_id: Annotated[ + str, + Field( + description="Responses API call id for the associated tool call, if available." + ), + ] + questions: list[RequestUserInputQuestion] + turn_id: Annotated[ + str | None, + Field( + description="Turn ID that this request belongs to. Uses `#[serde(default)]` for backwards compatibility." + ), + ] = "" + type: Annotated[ + Literal["request_user_input"], Field(title="RequestUserInputEventMsgType") + ] + + +class ListSkillsResponseEventMsg(BaseModel): + model_config = ConfigDict( + populate_by_name=True, + ) + skills: list[SkillsListEntry] + type: Annotated[ + Literal["list_skills_response"], Field(title="ListSkillsResponseEventMsgType") + ] + + +class PlanUpdateEventMsg(BaseModel): + model_config = ConfigDict( + populate_by_name=True, + ) + explanation: Annotated[ + str | None, + Field( + description="Arguments for the `update_plan` todo/checklist tool (not plan mode)." + ), + ] = None + plan: list[PlanItemArg] + type: Annotated[Literal["plan_update"], Field(title="PlanUpdateEventMsgType")] + + +class ExitedReviewModeEventMsg(BaseModel): + model_config = ConfigDict( + populate_by_name=True, + ) + review_output: ReviewOutputEvent | None = None + type: Annotated[ + Literal["exited_review_mode"], Field(title="ExitedReviewModeEventMsgType") + ] + + +class ItemStartedEventMsg(BaseModel): + model_config = ConfigDict( + populate_by_name=True, + ) + item: TurnItem + thread_id: ThreadId + turn_id: str + type: Annotated[Literal["item_started"], Field(title="ItemStartedEventMsgType")] + + +class ItemCompletedEventMsg(BaseModel): + model_config = ConfigDict( + populate_by_name=True, + ) + item: TurnItem + thread_id: ThreadId + turn_id: str + type: Annotated[Literal["item_completed"], Field(title="ItemCompletedEventMsgType")] + + +class HookStartedEventMsg(BaseModel): + model_config = ConfigDict( + populate_by_name=True, + ) + run: HookRunSummary + turn_id: str | None = None + type: Annotated[Literal["hook_started"], Field(title="HookStartedEventMsgType")] + + +class HookCompletedEventMsg(BaseModel): + model_config = ConfigDict( + populate_by_name=True, + ) + run: HookRunSummary + turn_id: str | None = None + type: Annotated[Literal["hook_completed"], Field(title="HookCompletedEventMsgType")] + + +class ExternalAgentConfigDetectResponse(BaseModel): + model_config = ConfigDict( + populate_by_name=True, + ) + items: list[ExternalAgentConfigMigrationItem] + + +class ExternalAgentConfigImportParams(BaseModel): + model_config = ConfigDict( + populate_by_name=True, + ) + migration_items: Annotated[ + list[ExternalAgentConfigMigrationItem], Field(alias="migrationItems") + ] + + +class FunctionCallOutputBody(RootModel[str | list[FunctionCallOutputContentItem]]): + model_config = ConfigDict( + populate_by_name=True, + ) + root: str | list[FunctionCallOutputContentItem] + + +class FunctionCallOutputPayload(BaseModel): + model_config = ConfigDict( + populate_by_name=True, + ) + body: FunctionCallOutputBody + success: bool | None = None + + +class GetAccountRateLimitsResponse(BaseModel): + model_config = ConfigDict( + populate_by_name=True, + ) + rate_limits: Annotated[ + RateLimitSnapshot, + Field( + alias="rateLimits", + description="Backward-compatible single-bucket view; mirrors the historical payload.", + ), + ] + rate_limits_by_limit_id: Annotated[ + dict[str, Any] | None, + Field( + alias="rateLimitsByLimitId", + description="Multi-bucket view keyed by metered `limit_id` (for example, `codex`).", + ), + ] = None + + +class HookCompletedNotification(BaseModel): + model_config = ConfigDict( + populate_by_name=True, + ) + run: HookRunSummary + thread_id: Annotated[str, Field(alias="threadId")] + turn_id: Annotated[str | None, Field(alias="turnId")] = None + + +class ItemCompletedNotification(BaseModel): + model_config = ConfigDict( + populate_by_name=True, + ) + item: ThreadItem + thread_id: Annotated[str, Field(alias="threadId")] + turn_id: Annotated[str, Field(alias="turnId")] + + +class ItemStartedNotification(BaseModel): + model_config = ConfigDict( + populate_by_name=True, + ) + item: ThreadItem + thread_id: Annotated[str, Field(alias="threadId")] + turn_id: Annotated[str, Field(alias="turnId")] + + +class ListMcpServerStatusResponse(BaseModel): + model_config = ConfigDict( + populate_by_name=True, + ) + data: list[McpServerStatus] + next_cursor: Annotated[ + str | None, + Field( + alias="nextCursor", + description="Opaque cursor to pass to the next call to continue after the last item. If None, there are no more items to return.", + ), + ] = None + + +class PluginListResponse(BaseModel): + model_config = ConfigDict( + populate_by_name=True, + ) + marketplaces: list[PluginMarketplaceEntry] + remote_sync_error: Annotated[str | None, Field(alias="remoteSyncError")] = None + + +class ProfileV2(BaseModel): + model_config = ConfigDict( + extra="allow", + populate_by_name=True, + ) + approval_policy: AskForApproval | None = None + chatgpt_base_url: str | None = None + model: str | None = None + model_provider: str | None = None + model_reasoning_effort: ReasoningEffort | None = None + model_reasoning_summary: ReasoningSummary | None = None + model_verbosity: Verbosity | None = None + service_tier: ServiceTier | None = None + tools: ToolsV2 | None = None + web_search: WebSearchMode | None = None + + +class HandoffRequestedRealtimeEvent(BaseModel): + model_config = ConfigDict( + extra="forbid", + populate_by_name=True, + ) + handoff_requested: Annotated[ + RealtimeHandoffRequested, Field(alias="HandoffRequested") + ] + + +class RealtimeEvent( + RootModel[ + SessionUpdatedRealtimeEvent + | InputTranscriptDeltaRealtimeEvent + | OutputTranscriptDeltaRealtimeEvent + | AudioOutRealtimeEvent + | ConversationItemAddedRealtimeEvent + | ConversationItemDoneRealtimeEvent + | HandoffRequestedRealtimeEvent + | ErrorRealtimeEvent + ] +): + model_config = ConfigDict( + populate_by_name=True, + ) + root: ( + SessionUpdatedRealtimeEvent + | InputTranscriptDeltaRealtimeEvent + | OutputTranscriptDeltaRealtimeEvent + | AudioOutRealtimeEvent + | ConversationItemAddedRealtimeEvent + | ConversationItemDoneRealtimeEvent + | HandoffRequestedRealtimeEvent + | ErrorRealtimeEvent + ) + + +class FunctionCallOutputResponseItem(BaseModel): + model_config = ConfigDict( + populate_by_name=True, + ) + call_id: str + output: FunctionCallOutputPayload + type: Annotated[ + Literal["function_call_output"], + Field(title="FunctionCallOutputResponseItemType"), + ] + + +class CustomToolCallOutputResponseItem(BaseModel): + model_config = ConfigDict( + populate_by_name=True, + ) + call_id: str + output: FunctionCallOutputPayload + type: Annotated[ + Literal["custom_tool_call_output"], + Field(title="CustomToolCallOutputResponseItemType"), + ] + + +class ResponseItem( + RootModel[ + MessageResponseItem + | ReasoningResponseItem + | LocalShellCallResponseItem + | FunctionCallResponseItem + | ToolSearchCallResponseItem + | FunctionCallOutputResponseItem + | CustomToolCallResponseItem + | CustomToolCallOutputResponseItem + | ToolSearchOutputResponseItem + | WebSearchCallResponseItem + | ImageGenerationCallResponseItem + | GhostSnapshotResponseItem + | CompactionResponseItem + | OtherResponseItem + ] +): + model_config = ConfigDict( + populate_by_name=True, + ) + root: ( + MessageResponseItem + | ReasoningResponseItem + | LocalShellCallResponseItem + | FunctionCallResponseItem + | ToolSearchCallResponseItem + | FunctionCallOutputResponseItem + | CustomToolCallResponseItem + | CustomToolCallOutputResponseItem + | ToolSearchOutputResponseItem + | WebSearchCallResponseItem + | ImageGenerationCallResponseItem + | GhostSnapshotResponseItem + | CompactionResponseItem + | OtherResponseItem + ) + + +class ReviewStartResponse(BaseModel): + model_config = ConfigDict( + populate_by_name=True, + ) + review_thread_id: Annotated[ + str, + Field( + alias="reviewThreadId", + description="Identifies the thread where the review runs.\n\nFor inline reviews, this is the original thread id. For detached reviews, this is the id of the new review thread.", + ), + ] + turn: Turn + + +class ThreadTokenUsageUpdatedServerNotification(BaseModel): + model_config = ConfigDict( + populate_by_name=True, + ) + method: Annotated[ + Literal["thread/tokenUsage/updated"], + Field(title="Thread/tokenUsage/updatedNotificationMethod"), + ] + params: ThreadTokenUsageUpdatedNotification + + +class TurnStartedServerNotification(BaseModel): + model_config = ConfigDict( + populate_by_name=True, + ) + method: Annotated[ + Literal["turn/started"], Field(title="Turn/startedNotificationMethod") + ] + params: TurnStartedNotification + + +class TurnCompletedServerNotification(BaseModel): + model_config = ConfigDict( + populate_by_name=True, + ) + method: Annotated[ + Literal["turn/completed"], Field(title="Turn/completedNotificationMethod") + ] + params: TurnCompletedNotification + + +class HookCompletedServerNotification(BaseModel): + model_config = ConfigDict( + populate_by_name=True, + ) + method: Annotated[ + Literal["hook/completed"], Field(title="Hook/completedNotificationMethod") + ] + params: HookCompletedNotification + + +class TurnPlanUpdatedServerNotification(BaseModel): + model_config = ConfigDict( + populate_by_name=True, + ) + method: Annotated[ + Literal["turn/plan/updated"], Field(title="Turn/plan/updatedNotificationMethod") + ] + params: TurnPlanUpdatedNotification + + +class ItemStartedServerNotification(BaseModel): + model_config = ConfigDict( + populate_by_name=True, + ) + method: Annotated[ + Literal["item/started"], Field(title="Item/startedNotificationMethod") + ] + params: ItemStartedNotification + + +class ItemCompletedServerNotification(BaseModel): + model_config = ConfigDict( + populate_by_name=True, + ) + method: Annotated[ + Literal["item/completed"], Field(title="Item/completedNotificationMethod") + ] + params: ItemCompletedNotification + + +class AccountRateLimitsUpdatedServerNotification(BaseModel): + model_config = ConfigDict( + populate_by_name=True, + ) + method: Annotated[ + Literal["account/rateLimits/updated"], + Field(title="Account/rateLimits/updatedNotificationMethod"), + ] + params: AccountRateLimitsUpdatedNotification + + +class AppListUpdatedServerNotification(BaseModel): + model_config = ConfigDict( + populate_by_name=True, + ) + method: Annotated[ + Literal["app/list/updated"], Field(title="App/list/updatedNotificationMethod") + ] + params: AppListUpdatedNotification + + +class WindowsSandboxSetupCompletedServerNotification(BaseModel): + model_config = ConfigDict( + populate_by_name=True, + ) + method: Annotated[ + Literal["windowsSandbox/setupCompleted"], + Field(title="WindowsSandbox/setupCompletedNotificationMethod"), + ] + params: WindowsSandboxSetupCompletedNotification + + +class SubAgentSessionSource(BaseModel): + model_config = ConfigDict( + extra="forbid", + populate_by_name=True, + ) + sub_agent: Annotated[SubAgentSource, Field(alias="subAgent")] + + +class SessionSource(RootModel[SessionSourceValue | SubAgentSessionSource]): + model_config = ConfigDict( + populate_by_name=True, + ) + root: SessionSourceValue | SubAgentSessionSource + + +class Thread(BaseModel): + model_config = ConfigDict( + populate_by_name=True, + ) + agent_nickname: Annotated[ + str | None, + Field( + alias="agentNickname", + description="Optional random unique nickname assigned to an AgentControl-spawned sub-agent.", + ), + ] = None + agent_role: Annotated[ + str | None, + Field( + alias="agentRole", + description="Optional role (agent_role) assigned to an AgentControl-spawned sub-agent.", + ), + ] = None + cli_version: Annotated[ + str, + Field( + alias="cliVersion", + description="Version of the CLI that created the thread.", + ), + ] + created_at: Annotated[ + int, + Field( + alias="createdAt", + description="Unix timestamp (in seconds) when the thread was created.", + ), + ] + cwd: Annotated[str, Field(description="Working directory captured for the thread.")] + ephemeral: Annotated[ + bool, + Field( + description="Whether the thread is ephemeral and should not be materialized on disk." + ), + ] + git_info: Annotated[ + GitInfo | None, + Field( + alias="gitInfo", + description="Optional Git metadata captured when the thread was created.", + ), + ] = None + id: str + model_provider: Annotated[ + str, + Field( + alias="modelProvider", + description="Model provider used for this thread (for example, 'openai').", + ), + ] + name: Annotated[ + str | None, Field(description="Optional user-facing thread title.") + ] = None + path: Annotated[ + str | None, Field(description="[UNSTABLE] Path to the thread on disk.") + ] = None + preview: Annotated[ + str, + Field( + description="Usually the first user message in the thread, if available." + ), + ] + source: Annotated[ + SessionSource, + Field( + description="Origin of the thread (CLI, VSCode, codex exec, codex app-server, etc.)." + ), + ] + status: Annotated[ + ThreadStatus, Field(description="Current runtime status for the thread.") + ] + turns: Annotated[ + list[Turn], + Field( + description="Only populated on `thread/resume`, `thread/rollback`, `thread/fork`, and `thread/read` (when `includeTurns` is true) responses. For all other responses and notifications returning a Thread, the turns field will be an empty list." + ), + ] + updated_at: Annotated[ + int, + Field( + alias="updatedAt", + description="Unix timestamp (in seconds) when the thread was last updated.", + ), + ] + + +class ThreadForkResponse(BaseModel): + model_config = ConfigDict( + populate_by_name=True, + ) + approval_policy: Annotated[AskForApproval, Field(alias="approvalPolicy")] + cwd: str + model: str + model_provider: Annotated[str, Field(alias="modelProvider")] + reasoning_effort: Annotated[ + ReasoningEffort | None, Field(alias="reasoningEffort") + ] = None + sandbox: SandboxPolicy + service_tier: Annotated[ServiceTier | None, Field(alias="serviceTier")] = None + thread: Thread + + +class ThreadListResponse(BaseModel): + model_config = ConfigDict( + populate_by_name=True, + ) + data: list[Thread] + next_cursor: Annotated[ + str | None, + Field( + alias="nextCursor", + description="Opaque cursor to pass to the next call to continue after the last item. if None, there are no more items to return.", + ), + ] = None + + +class ThreadMetadataUpdateResponse(BaseModel): + model_config = ConfigDict( + populate_by_name=True, + ) + thread: Thread + + +class ThreadReadResponse(BaseModel): + model_config = ConfigDict( + populate_by_name=True, + ) + thread: Thread + + +class ThreadResumeResponse(BaseModel): + model_config = ConfigDict( + populate_by_name=True, + ) + approval_policy: Annotated[AskForApproval, Field(alias="approvalPolicy")] + cwd: str + model: str + model_provider: Annotated[str, Field(alias="modelProvider")] + reasoning_effort: Annotated[ + ReasoningEffort | None, Field(alias="reasoningEffort") + ] = None + sandbox: SandboxPolicy + service_tier: Annotated[ServiceTier | None, Field(alias="serviceTier")] = None + thread: Thread + + +class ThreadRollbackResponse(BaseModel): + model_config = ConfigDict( + populate_by_name=True, + ) + thread: Annotated[ + Thread, + Field( + description="The updated thread after applying the rollback, with `turns` populated.\n\nThe ThreadItems stored in each Turn are lossy since we explicitly do not persist all agent interactions, such as command executions. This is the same behavior as `thread/resume`." + ), + ] + + +class ThreadStartResponse(BaseModel): + model_config = ConfigDict( + populate_by_name=True, + ) + approval_policy: Annotated[AskForApproval, Field(alias="approvalPolicy")] + cwd: str + model: str + model_provider: Annotated[str, Field(alias="modelProvider")] + reasoning_effort: Annotated[ + ReasoningEffort | None, Field(alias="reasoningEffort") + ] = None + sandbox: SandboxPolicy + service_tier: Annotated[ServiceTier | None, Field(alias="serviceTier")] = None + thread: Thread + + +class ThreadStartedNotification(BaseModel): + model_config = ConfigDict( + populate_by_name=True, + ) + thread: Thread + + +class ThreadUnarchiveResponse(BaseModel): + model_config = ConfigDict( + populate_by_name=True, + ) + thread: Thread + + +class ExternalAgentConfigImportRequest(BaseModel): + model_config = ConfigDict( + populate_by_name=True, + ) + id: RequestId + method: Annotated[ + Literal["externalAgentConfig/import"], + Field(title="ExternalAgentConfig/importRequestMethod"), + ] + params: ExternalAgentConfigImportParams + + +class ConfigBatchWriteRequest(BaseModel): + model_config = ConfigDict( + populate_by_name=True, + ) + id: RequestId + method: Annotated[ + Literal["config/batchWrite"], Field(title="Config/batchWriteRequestMethod") + ] + params: ConfigBatchWriteParams + + +class ClientRequest( + RootModel[ + InitializeRequest + | ThreadStartRequest + | ThreadResumeRequest + | ThreadForkRequest + | ThreadArchiveRequest + | ThreadUnsubscribeRequest + | ThreadNameSetRequest + | ThreadMetadataUpdateRequest + | ThreadUnarchiveRequest + | ThreadCompactStartRequest + | ThreadRollbackRequest + | ThreadListRequest + | ThreadLoadedListRequest + | ThreadReadRequest + | SkillsListRequest + | PluginListRequest + | SkillsRemoteListRequest + | SkillsRemoteExportRequest + | AppListRequest + | SkillsConfigWriteRequest + | PluginInstallRequest + | PluginUninstallRequest + | TurnStartRequest + | TurnSteerRequest + | TurnInterruptRequest + | ReviewStartRequest + | ModelListRequest + | ExperimentalFeatureListRequest + | McpServerOauthLoginRequest + | ConfigMcpServerReloadRequest + | McpServerStatusListRequest + | WindowsSandboxSetupStartRequest + | AccountLoginStartRequest + | AccountLoginCancelRequest + | AccountLogoutRequest + | AccountRateLimitsReadRequest + | FeedbackUploadRequest + | CommandExecRequest + | CommandExecWriteRequest + | CommandExecTerminateRequest + | CommandExecResizeRequest + | ConfigReadRequest + | ExternalAgentConfigDetectRequest + | ExternalAgentConfigImportRequest + | ConfigValueWriteRequest + | ConfigBatchWriteRequest + | ConfigRequirementsReadRequest + | AccountReadRequest + | FuzzyFileSearchRequest + ] +): + model_config = ConfigDict( + populate_by_name=True, + ) + root: Annotated[ + InitializeRequest + | ThreadStartRequest + | ThreadResumeRequest + | ThreadForkRequest + | ThreadArchiveRequest + | ThreadUnsubscribeRequest + | ThreadNameSetRequest + | ThreadMetadataUpdateRequest + | ThreadUnarchiveRequest + | ThreadCompactStartRequest + | ThreadRollbackRequest + | ThreadListRequest + | ThreadLoadedListRequest + | ThreadReadRequest + | SkillsListRequest + | PluginListRequest + | SkillsRemoteListRequest + | SkillsRemoteExportRequest + | AppListRequest + | SkillsConfigWriteRequest + | PluginInstallRequest + | PluginUninstallRequest + | TurnStartRequest + | TurnSteerRequest + | TurnInterruptRequest + | ReviewStartRequest + | ModelListRequest + | ExperimentalFeatureListRequest + | McpServerOauthLoginRequest + | ConfigMcpServerReloadRequest + | McpServerStatusListRequest + | WindowsSandboxSetupStartRequest + | AccountLoginStartRequest + | AccountLoginCancelRequest + | AccountLogoutRequest + | AccountRateLimitsReadRequest + | FeedbackUploadRequest + | CommandExecRequest + | CommandExecWriteRequest + | CommandExecTerminateRequest + | CommandExecResizeRequest + | ConfigReadRequest + | ExternalAgentConfigDetectRequest + | ExternalAgentConfigImportRequest + | ConfigValueWriteRequest + | ConfigBatchWriteRequest + | ConfigRequirementsReadRequest + | AccountReadRequest + | FuzzyFileSearchRequest, + Field( + description="Request from the client to the server.", title="ClientRequest" + ), + ] + + +class Config(BaseModel): + model_config = ConfigDict( + extra="allow", + populate_by_name=True, + ) + analytics: AnalyticsConfig | None = None + approval_policy: AskForApproval | None = None + compact_prompt: str | None = None + developer_instructions: str | None = None + forced_chatgpt_workspace_id: str | None = None + forced_login_method: ForcedLoginMethod | None = None + instructions: str | None = None + model: str | None = None + model_auto_compact_token_limit: int | None = None + model_context_window: int | None = None + model_provider: str | None = None + model_reasoning_effort: ReasoningEffort | None = None + model_reasoning_summary: ReasoningSummary | None = None + model_verbosity: Verbosity | None = None + profile: str | None = None + profiles: dict[str, ProfileV2] | None = {} + review_model: str | None = None + sandbox_mode: SandboxMode | None = None + sandbox_workspace_write: SandboxWorkspaceWrite | None = None + service_tier: ServiceTier | None = None + tools: ToolsV2 | None = None + web_search: WebSearchMode | None = None + + +class ConfigReadResponse(BaseModel): + model_config = ConfigDict( + populate_by_name=True, + ) + config: Config + layers: list[ConfigLayer] | None = None + origins: dict[str, ConfigLayerMetadata] + + +class RealtimeConversationRealtimeEventMsg(BaseModel): + model_config = ConfigDict( + populate_by_name=True, + ) + payload: RealtimeEvent + type: Annotated[ + Literal["realtime_conversation_realtime"], + Field(title="RealtimeConversationRealtimeEventMsgType"), + ] + + +class RawResponseItemEventMsg(BaseModel): + model_config = ConfigDict( + populate_by_name=True, + ) + item: ResponseItem + type: Annotated[ + Literal["raw_response_item"], Field(title="RawResponseItemEventMsgType") + ] + + +class RawResponseItemCompletedNotification(BaseModel): + model_config = ConfigDict( + populate_by_name=True, + ) + item: ResponseItem + thread_id: Annotated[str, Field(alias="threadId")] + turn_id: Annotated[str, Field(alias="turnId")] + + +class ThreadStartedServerNotification(BaseModel): + model_config = ConfigDict( + populate_by_name=True, + ) + method: Annotated[ + Literal["thread/started"], Field(title="Thread/startedNotificationMethod") + ] + params: ThreadStartedNotification + + +class ServerNotification( + RootModel[ + ErrorServerNotification + | ThreadStartedServerNotification + | ThreadStatusChangedServerNotification + | ThreadArchivedServerNotification + | ThreadUnarchivedServerNotification + | ThreadClosedServerNotification + | SkillsChangedServerNotification + | ThreadNameUpdatedServerNotification + | ThreadTokenUsageUpdatedServerNotification + | TurnStartedServerNotification + | HookStartedServerNotification + | TurnCompletedServerNotification + | HookCompletedServerNotification + | TurnDiffUpdatedServerNotification + | TurnPlanUpdatedServerNotification + | ItemStartedServerNotification + | ItemCompletedServerNotification + | ItemAgentMessageDeltaServerNotification + | ItemPlanDeltaServerNotification + | CommandExecOutputDeltaServerNotification + | ItemCommandExecutionOutputDeltaServerNotification + | ItemCommandExecutionTerminalInteractionServerNotification + | ItemFileChangeOutputDeltaServerNotification + | ServerRequestResolvedServerNotification + | ItemMcpToolCallProgressServerNotification + | McpServerOauthLoginCompletedServerNotification + | AccountUpdatedServerNotification + | AccountRateLimitsUpdatedServerNotification + | AppListUpdatedServerNotification + | ItemReasoningSummaryTextDeltaServerNotification + | ItemReasoningSummaryPartAddedServerNotification + | ItemReasoningTextDeltaServerNotification + | ThreadCompactedServerNotification + | ModelReroutedServerNotification + | DeprecationNoticeServerNotification + | ConfigWarningServerNotification + | FuzzyFileSearchSessionUpdatedServerNotification + | FuzzyFileSearchSessionCompletedServerNotification + | ThreadRealtimeStartedServerNotification + | ThreadRealtimeItemAddedServerNotification + | ThreadRealtimeOutputAudioDeltaServerNotification + | ThreadRealtimeErrorServerNotification + | ThreadRealtimeClosedServerNotification + | WindowsWorldWritableWarningServerNotification + | WindowsSandboxSetupCompletedServerNotification + | AccountLoginCompletedServerNotification + ] +): + model_config = ConfigDict( + populate_by_name=True, + ) + root: Annotated[ + ErrorServerNotification + | ThreadStartedServerNotification + | ThreadStatusChangedServerNotification + | ThreadArchivedServerNotification + | ThreadUnarchivedServerNotification + | ThreadClosedServerNotification + | SkillsChangedServerNotification + | ThreadNameUpdatedServerNotification + | ThreadTokenUsageUpdatedServerNotification + | TurnStartedServerNotification + | HookStartedServerNotification + | TurnCompletedServerNotification + | HookCompletedServerNotification + | TurnDiffUpdatedServerNotification + | TurnPlanUpdatedServerNotification + | ItemStartedServerNotification + | ItemCompletedServerNotification + | ItemAgentMessageDeltaServerNotification + | ItemPlanDeltaServerNotification + | CommandExecOutputDeltaServerNotification + | ItemCommandExecutionOutputDeltaServerNotification + | ItemCommandExecutionTerminalInteractionServerNotification + | ItemFileChangeOutputDeltaServerNotification + | ServerRequestResolvedServerNotification + | ItemMcpToolCallProgressServerNotification + | McpServerOauthLoginCompletedServerNotification + | AccountUpdatedServerNotification + | AccountRateLimitsUpdatedServerNotification + | AppListUpdatedServerNotification + | ItemReasoningSummaryTextDeltaServerNotification + | ItemReasoningSummaryPartAddedServerNotification + | ItemReasoningTextDeltaServerNotification + | ThreadCompactedServerNotification + | ModelReroutedServerNotification + | DeprecationNoticeServerNotification + | ConfigWarningServerNotification + | FuzzyFileSearchSessionUpdatedServerNotification + | FuzzyFileSearchSessionCompletedServerNotification + | ThreadRealtimeStartedServerNotification + | ThreadRealtimeItemAddedServerNotification + | ThreadRealtimeOutputAudioDeltaServerNotification + | ThreadRealtimeErrorServerNotification + | ThreadRealtimeClosedServerNotification + | WindowsWorldWritableWarningServerNotification + | WindowsSandboxSetupCompletedServerNotification + | AccountLoginCompletedServerNotification, + Field( + description="Notification sent from the server to the client.", + title="ServerNotification", + ), + ] + + +class SessionConfiguredEventMsg(BaseModel): + model_config = ConfigDict( + populate_by_name=True, + ) + approval_policy: Annotated[ + AskForApproval, Field(description="When to escalate for approval for execution") + ] + cwd: Annotated[ + str, + Field( + description="Working directory that should be treated as the *root* of the session." + ), + ] + forked_from_id: ThreadId | None = None + history_entry_count: Annotated[ + int, Field(description="Current number of entries in the history log.", ge=0) + ] + history_log_id: Annotated[ + int, + Field( + description="Identifier of the history log file (inode on Unix, 0 otherwise).", + ge=0, + ), + ] + initial_messages: Annotated[ + list[EventMsg] | None, + Field( + description="Optional initial messages (as events) for resumed sessions. When present, UIs can use these to seed the history." + ), + ] = None + model: Annotated[ + str, Field(description="Tell the client what model is being queried.") + ] + model_provider_id: str + network_proxy: Annotated[ + SessionNetworkProxyRuntime | None, + Field( + description="Runtime proxy bind addresses, when the managed proxy was started for this session." + ), + ] = None + reasoning_effort: Annotated[ + ReasoningEffort | None, + Field( + description="The effort the model is putting into reasoning about the user's request." + ), + ] = None + rollout_path: Annotated[ + str | None, + Field( + description="Path in which the rollout is stored. Can be `None` for ephemeral threads" + ), + ] = None + sandbox_policy: Annotated[ + SandboxPolicy, + Field(description="How to sandbox commands executed in the system"), + ] + service_tier: ServiceTier | None = None + session_id: ThreadId + thread_name: Annotated[ + str | None, + Field(description="Optional user-facing thread name (may be unset)."), + ] = None + type: Annotated[ + Literal["session_configured"], Field(title="SessionConfiguredEventMsgType") + ] + + +class EventMsg( + RootModel[ + ErrorEventMsg + | WarningEventMsg + | RealtimeConversationStartedEventMsg + | RealtimeConversationRealtimeEventMsg + | RealtimeConversationClosedEventMsg + | ModelRerouteEventMsg + | ContextCompactedEventMsg + | ThreadRolledBackEventMsg + | TaskStartedEventMsg + | TaskCompleteEventMsg + | TokenCountEventMsg + | AgentMessageEventMsg + | UserMessageEventMsg + | AgentMessageDeltaEventMsg + | AgentReasoningEventMsg + | AgentReasoningDeltaEventMsg + | AgentReasoningRawContentEventMsg + | AgentReasoningRawContentDeltaEventMsg + | AgentReasoningSectionBreakEventMsg + | SessionConfiguredEventMsg + | ThreadNameUpdatedEventMsg + | McpStartupUpdateEventMsg + | McpStartupCompleteEventMsg + | McpToolCallBeginEventMsg + | McpToolCallEndEventMsg + | WebSearchBeginEventMsg + | WebSearchEndEventMsg + | ImageGenerationBeginEventMsg + | ImageGenerationEndEventMsg + | ExecCommandBeginEventMsg + | ExecCommandOutputDeltaEventMsg + | TerminalInteractionEventMsg + | ExecCommandEndEventMsg + | ViewImageToolCallEventMsg + | ExecApprovalRequestEventMsg + | RequestPermissionsEventMsg + | RequestUserInputEventMsg + | DynamicToolCallRequestEventMsg + | DynamicToolCallResponseEventMsg + | ElicitationRequestEventMsg + | ApplyPatchApprovalRequestEventMsg + | DeprecationNoticeEventMsg + | BackgroundEventEventMsg + | UndoStartedEventMsg + | UndoCompletedEventMsg + | StreamErrorEventMsg + | PatchApplyBeginEventMsg + | PatchApplyEndEventMsg + | TurnDiffEventMsg + | GetHistoryEntryResponseEventMsg + | McpListToolsResponseEventMsg + | ListCustomPromptsResponseEventMsg + | ListSkillsResponseEventMsg + | ListRemoteSkillsResponseEventMsg + | RemoteSkillDownloadedEventMsg + | SkillsUpdateAvailableEventMsg + | PlanUpdateEventMsg + | TurnAbortedEventMsg + | ShutdownCompleteEventMsg + | EnteredReviewModeEventMsg + | ExitedReviewModeEventMsg + | RawResponseItemEventMsg + | ItemStartedEventMsg + | ItemCompletedEventMsg + | HookStartedEventMsg + | HookCompletedEventMsg + | AgentMessageContentDeltaEventMsg + | PlanDeltaEventMsg + | ReasoningContentDeltaEventMsg + | ReasoningRawContentDeltaEventMsg + | CollabAgentSpawnBeginEventMsg + | CollabAgentSpawnEndEventMsg + | CollabAgentInteractionBeginEventMsg + | CollabAgentInteractionEndEventMsg + | CollabWaitingBeginEventMsg + | CollabWaitingEndEventMsg + | CollabCloseBeginEventMsg + | CollabCloseEndEventMsg + | CollabResumeBeginEventMsg + | CollabResumeEndEventMsg + ] +): + model_config = ConfigDict( + populate_by_name=True, + ) + root: Annotated[ + ErrorEventMsg + | WarningEventMsg + | RealtimeConversationStartedEventMsg + | RealtimeConversationRealtimeEventMsg + | RealtimeConversationClosedEventMsg + | ModelRerouteEventMsg + | ContextCompactedEventMsg + | ThreadRolledBackEventMsg + | TaskStartedEventMsg + | TaskCompleteEventMsg + | TokenCountEventMsg + | AgentMessageEventMsg + | UserMessageEventMsg + | AgentMessageDeltaEventMsg + | AgentReasoningEventMsg + | AgentReasoningDeltaEventMsg + | AgentReasoningRawContentEventMsg + | AgentReasoningRawContentDeltaEventMsg + | AgentReasoningSectionBreakEventMsg + | SessionConfiguredEventMsg + | ThreadNameUpdatedEventMsg + | McpStartupUpdateEventMsg + | McpStartupCompleteEventMsg + | McpToolCallBeginEventMsg + | McpToolCallEndEventMsg + | WebSearchBeginEventMsg + | WebSearchEndEventMsg + | ImageGenerationBeginEventMsg + | ImageGenerationEndEventMsg + | ExecCommandBeginEventMsg + | ExecCommandOutputDeltaEventMsg + | TerminalInteractionEventMsg + | ExecCommandEndEventMsg + | ViewImageToolCallEventMsg + | ExecApprovalRequestEventMsg + | RequestPermissionsEventMsg + | RequestUserInputEventMsg + | DynamicToolCallRequestEventMsg + | DynamicToolCallResponseEventMsg + | ElicitationRequestEventMsg + | ApplyPatchApprovalRequestEventMsg + | DeprecationNoticeEventMsg + | BackgroundEventEventMsg + | UndoStartedEventMsg + | UndoCompletedEventMsg + | StreamErrorEventMsg + | PatchApplyBeginEventMsg + | PatchApplyEndEventMsg + | TurnDiffEventMsg + | GetHistoryEntryResponseEventMsg + | McpListToolsResponseEventMsg + | ListCustomPromptsResponseEventMsg + | ListSkillsResponseEventMsg + | ListRemoteSkillsResponseEventMsg + | RemoteSkillDownloadedEventMsg + | SkillsUpdateAvailableEventMsg + | PlanUpdateEventMsg + | TurnAbortedEventMsg + | ShutdownCompleteEventMsg + | EnteredReviewModeEventMsg + | ExitedReviewModeEventMsg + | RawResponseItemEventMsg + | ItemStartedEventMsg + | ItemCompletedEventMsg + | HookStartedEventMsg + | HookCompletedEventMsg + | AgentMessageContentDeltaEventMsg + | PlanDeltaEventMsg + | ReasoningContentDeltaEventMsg + | ReasoningRawContentDeltaEventMsg + | CollabAgentSpawnBeginEventMsg + | CollabAgentSpawnEndEventMsg + | CollabAgentInteractionBeginEventMsg + | CollabAgentInteractionEndEventMsg + | CollabWaitingBeginEventMsg + | CollabWaitingEndEventMsg + | CollabCloseBeginEventMsg + | CollabCloseEndEventMsg + | CollabResumeBeginEventMsg + | CollabResumeEndEventMsg, + Field( + description="Response event from the agent NOTE: Make sure none of these values have optional types, as it will mess up the extension code-gen.", + title="EventMsg", + ), + ] + + +SessionConfiguredEventMsg.model_rebuild() diff --git a/sdk/python/src/codex_app_server/generated/v2_types.py b/sdk/python/src/codex_app_server/generated/v2_types.py new file mode 100644 index 000000000..932ab438d --- /dev/null +++ b/sdk/python/src/codex_app_server/generated/v2_types.py @@ -0,0 +1,25 @@ +"""Stable aliases over full v2 autogenerated models (datamodel-code-generator).""" + +from .v2_all.ModelListResponse import ModelListResponse +from .v2_all.ThreadCompactStartResponse import ThreadCompactStartResponse +from .v2_all.ThreadListResponse import ThreadListResponse +from .v2_all.ThreadReadResponse import ThreadReadResponse +from .v2_all.ThreadTokenUsageUpdatedNotification import ( + ThreadTokenUsageUpdatedNotification, +) +from .v2_all.TurnCompletedNotification import ThreadItem153 as ThreadItem +from .v2_all.TurnCompletedNotification import ( + TurnCompletedNotification as TurnCompletedNotificationPayload, +) +from .v2_all.TurnSteerResponse import TurnSteerResponse + +__all__ = [ + "ModelListResponse", + "ThreadCompactStartResponse", + "ThreadListResponse", + "ThreadReadResponse", + "ThreadTokenUsageUpdatedNotification", + "TurnCompletedNotificationPayload", + "TurnSteerResponse", + "ThreadItem", +] diff --git a/sdk/python/src/codex_app_server/models.py b/sdk/python/src/codex_app_server/models.py new file mode 100644 index 000000000..7c5bb34de --- /dev/null +++ b/sdk/python/src/codex_app_server/models.py @@ -0,0 +1,97 @@ +from __future__ import annotations + +from dataclasses import dataclass +from typing import TypeAlias + +from pydantic import BaseModel + +from .generated.v2_all import ( + AccountLoginCompletedNotification, + AccountRateLimitsUpdatedNotification, + AccountUpdatedNotification, + AgentMessageDeltaNotification, + AppListUpdatedNotification, + CommandExecutionOutputDeltaNotification, + ConfigWarningNotification, + ContextCompactedNotification, + DeprecationNoticeNotification, + ErrorNotification, + FileChangeOutputDeltaNotification, + ItemCompletedNotification, + ItemStartedNotification, + McpServerOauthLoginCompletedNotification, + McpToolCallProgressNotification, + PlanDeltaNotification, + RawResponseItemCompletedNotification, + ReasoningSummaryPartAddedNotification, + ReasoningSummaryTextDeltaNotification, + ReasoningTextDeltaNotification, + TerminalInteractionNotification, + ThreadNameUpdatedNotification, + ThreadStartedNotification, + ThreadTokenUsageUpdatedNotification, + TurnCompletedNotification, + TurnDiffUpdatedNotification, + TurnPlanUpdatedNotification, + TurnStartedNotification, + WindowsWorldWritableWarningNotification, +) + +JsonScalar: TypeAlias = str | int | float | bool | None +JsonValue: TypeAlias = JsonScalar | dict[str, "JsonValue"] | list["JsonValue"] +JsonObject: TypeAlias = dict[str, JsonValue] + + +@dataclass(slots=True) +class UnknownNotification: + params: JsonObject + + +NotificationPayload: TypeAlias = ( + AccountLoginCompletedNotification + | AccountRateLimitsUpdatedNotification + | AccountUpdatedNotification + | AgentMessageDeltaNotification + | AppListUpdatedNotification + | CommandExecutionOutputDeltaNotification + | ConfigWarningNotification + | ContextCompactedNotification + | DeprecationNoticeNotification + | ErrorNotification + | FileChangeOutputDeltaNotification + | ItemCompletedNotification + | ItemStartedNotification + | McpServerOauthLoginCompletedNotification + | McpToolCallProgressNotification + | PlanDeltaNotification + | RawResponseItemCompletedNotification + | ReasoningSummaryPartAddedNotification + | ReasoningSummaryTextDeltaNotification + | ReasoningTextDeltaNotification + | TerminalInteractionNotification + | ThreadNameUpdatedNotification + | ThreadStartedNotification + | ThreadTokenUsageUpdatedNotification + | TurnCompletedNotification + | TurnDiffUpdatedNotification + | TurnPlanUpdatedNotification + | TurnStartedNotification + | WindowsWorldWritableWarningNotification + | UnknownNotification +) + + +@dataclass(slots=True) +class Notification: + method: str + payload: NotificationPayload + + +class ServerInfo(BaseModel): + name: str | None = None + version: str | None = None + + +class InitializeResponse(BaseModel): + serverInfo: ServerInfo | None = None + userAgent: str | None = None diff --git a/sdk/python/src/codex_app_server/py.typed b/sdk/python/src/codex_app_server/py.typed new file mode 100644 index 000000000..e69de29bb diff --git a/sdk/python/src/codex_app_server/retry.py b/sdk/python/src/codex_app_server/retry.py new file mode 100644 index 000000000..b7e4f7740 --- /dev/null +++ b/sdk/python/src/codex_app_server/retry.py @@ -0,0 +1,41 @@ +from __future__ import annotations + +import random +import time +from typing import Callable, TypeVar + +from .errors import is_retryable_error + +T = TypeVar("T") + + +def retry_on_overload( + op: Callable[[], T], + *, + max_attempts: int = 3, + initial_delay_s: float = 0.25, + max_delay_s: float = 2.0, + jitter_ratio: float = 0.2, +) -> T: + """Retry helper for transient server-overload errors.""" + + if max_attempts < 1: + raise ValueError("max_attempts must be >= 1") + + delay = initial_delay_s + attempt = 0 + while True: + attempt += 1 + try: + return op() + except Exception as exc: + if attempt >= max_attempts: + raise + if not is_retryable_error(exc): + raise + + jitter = delay * jitter_ratio + sleep_for = min(max_delay_s, delay) + random.uniform(-jitter, jitter) + if sleep_for > 0: + time.sleep(sleep_for) + delay = min(max_delay_s, delay * 2) diff --git a/sdk/python/tests/conftest.py b/sdk/python/tests/conftest.py new file mode 100644 index 000000000..f23f55e4d --- /dev/null +++ b/sdk/python/tests/conftest.py @@ -0,0 +1,16 @@ +from __future__ import annotations + +import sys +from pathlib import Path + +ROOT = Path(__file__).resolve().parents[1] +SRC = ROOT / "src" + +src_str = str(SRC) +if src_str in sys.path: + sys.path.remove(src_str) +sys.path.insert(0, src_str) + +for module_name in list(sys.modules): + if module_name == "codex_app_server" or module_name.startswith("codex_app_server."): + sys.modules.pop(module_name) diff --git a/sdk/python/tests/test_artifact_workflow_and_binaries.py b/sdk/python/tests/test_artifact_workflow_and_binaries.py new file mode 100644 index 000000000..90446451d --- /dev/null +++ b/sdk/python/tests/test_artifact_workflow_and_binaries.py @@ -0,0 +1,411 @@ +from __future__ import annotations + +import ast +import importlib.util +import json +import sys +import tomllib +from pathlib import Path + +import pytest + +ROOT = Path(__file__).resolve().parents[1] + + +def _load_update_script_module(): + script_path = ROOT / "scripts" / "update_sdk_artifacts.py" + spec = importlib.util.spec_from_file_location("update_sdk_artifacts", script_path) + if spec is None or spec.loader is None: + raise AssertionError(f"Failed to load script module: {script_path}") + module = importlib.util.module_from_spec(spec) + sys.modules[spec.name] = module + spec.loader.exec_module(module) + return module + + +def test_generation_has_single_maintenance_entrypoint_script() -> None: + scripts = sorted(p.name for p in (ROOT / "scripts").glob("*.py")) + assert scripts == ["update_sdk_artifacts.py"] + + +def test_generate_types_wires_all_generation_steps() -> None: + source = (ROOT / "scripts" / "update_sdk_artifacts.py").read_text() + tree = ast.parse(source) + + generate_types_fn = next( + ( + node + for node in tree.body + if isinstance(node, ast.FunctionDef) and node.name == "generate_types" + ), + None, + ) + assert generate_types_fn is not None + + calls: list[str] = [] + for node in generate_types_fn.body: + if isinstance(node, ast.Expr) and isinstance(node.value, ast.Call): + fn = node.value.func + if isinstance(fn, ast.Name): + calls.append(fn.id) + + assert calls == [ + "generate_v2_all", + "generate_notification_registry", + "generate_public_api_flat_methods", + ] + + +def test_schema_normalization_only_flattens_string_literal_oneofs() -> None: + script = _load_update_script_module() + schema = json.loads( + ( + ROOT.parent.parent + / "codex-rs" + / "app-server-protocol" + / "schema" + / "json" + / "codex_app_server_protocol.v2.schemas.json" + ).read_text() + ) + + definitions = schema["definitions"] + flattened = [ + name + for name, definition in definitions.items() + if isinstance(definition, dict) + and script._flatten_string_enum_one_of(definition.copy()) + ] + + assert flattened == [ + "AuthMode", + "CommandExecOutputStream", + "ExperimentalFeatureStage", + "InputModality", + "MessagePhase", + ] + + +def test_python_codegen_schema_annotation_adds_stable_variant_titles() -> None: + script = _load_update_script_module() + schema = json.loads( + ( + ROOT.parent.parent + / "codex-rs" + / "app-server-protocol" + / "schema" + / "json" + / "codex_app_server_protocol.v2.schemas.json" + ).read_text() + ) + + script._annotate_schema(schema) + definitions = schema["definitions"] + + server_notification_titles = { + variant.get("title") + for variant in definitions["ServerNotification"]["oneOf"] + if isinstance(variant, dict) + } + assert "ErrorServerNotification" in server_notification_titles + assert "ThreadStartedServerNotification" in server_notification_titles + assert "ErrorNotification" not in server_notification_titles + assert "Thread/startedNotification" not in server_notification_titles + + ask_for_approval_titles = [ + variant.get("title") for variant in definitions["AskForApproval"]["oneOf"] + ] + assert ask_for_approval_titles == [ + "AskForApprovalValue", + "RejectAskForApproval", + ] + + reasoning_summary_titles = [ + variant.get("title") for variant in definitions["ReasoningSummary"]["oneOf"] + ] + assert reasoning_summary_titles == [ + "ReasoningSummaryValue", + "NoneReasoningSummary", + ] + + +def test_generate_v2_all_uses_titles_for_generated_names() -> None: + source = (ROOT / "scripts" / "update_sdk_artifacts.py").read_text() + assert "--use-title-as-name" in source + assert "--use-annotated" in source + assert "--formatters" in source + assert "ruff-format" in source + + +def test_runtime_package_template_has_no_checked_in_binaries() -> None: + runtime_root = ROOT.parent / "python-runtime" / "src" / "codex_cli_bin" + assert sorted( + path.name + for path in runtime_root.rglob("*") + if path.is_file() and "__pycache__" not in path.parts + ) == ["__init__.py"] + + +def test_runtime_package_is_wheel_only_and_builds_platform_specific_wheels() -> None: + pyproject = tomllib.loads( + (ROOT.parent / "python-runtime" / "pyproject.toml").read_text() + ) + hook_source = (ROOT.parent / "python-runtime" / "hatch_build.py").read_text() + hook_tree = ast.parse(hook_source) + initialize_fn = next( + node + for node in ast.walk(hook_tree) + if isinstance(node, ast.FunctionDef) and node.name == "initialize" + ) + + sdist_guard = next( + ( + node + for node in initialize_fn.body + if isinstance(node, ast.If) + and isinstance(node.test, ast.Compare) + and isinstance(node.test.left, ast.Attribute) + and isinstance(node.test.left.value, ast.Name) + and node.test.left.value.id == "self" + and node.test.left.attr == "target_name" + and len(node.test.ops) == 1 + and isinstance(node.test.ops[0], ast.Eq) + and len(node.test.comparators) == 1 + and isinstance(node.test.comparators[0], ast.Constant) + and node.test.comparators[0].value == "sdist" + ), + None, + ) + build_data_assignments = { + node.targets[0].slice.value: node.value.value + for node in initialize_fn.body + if isinstance(node, ast.Assign) + and len(node.targets) == 1 + and isinstance(node.targets[0], ast.Subscript) + and isinstance(node.targets[0].value, ast.Name) + and node.targets[0].value.id == "build_data" + and isinstance(node.targets[0].slice, ast.Constant) + and isinstance(node.targets[0].slice.value, str) + and isinstance(node.value, ast.Constant) + } + + assert pyproject["tool"]["hatch"]["build"]["targets"]["wheel"] == { + "packages": ["src/codex_cli_bin"], + "include": ["src/codex_cli_bin/bin/**"], + "hooks": {"custom": {}}, + } + assert pyproject["tool"]["hatch"]["build"]["targets"]["sdist"] == { + "hooks": {"custom": {}}, + } + assert sdist_guard is not None + assert build_data_assignments == {"pure_python": False, "infer_tag": True} + + +def test_stage_runtime_release_copies_binary_and_sets_version(tmp_path: Path) -> None: + script = _load_update_script_module() + fake_binary = tmp_path / script.runtime_binary_name() + fake_binary.write_text("fake codex\n") + + staged = script.stage_python_runtime_package( + tmp_path / "runtime-stage", + "1.2.3", + fake_binary, + ) + + assert staged == tmp_path / "runtime-stage" + assert script.staged_runtime_bin_path(staged).read_text() == "fake codex\n" + assert 'version = "1.2.3"' in (staged / "pyproject.toml").read_text() + + +def test_stage_runtime_release_replaces_existing_staging_dir(tmp_path: Path) -> None: + script = _load_update_script_module() + staging_dir = tmp_path / "runtime-stage" + old_file = staging_dir / "stale.txt" + old_file.parent.mkdir(parents=True) + old_file.write_text("stale") + + fake_binary = tmp_path / script.runtime_binary_name() + fake_binary.write_text("fake codex\n") + + staged = script.stage_python_runtime_package( + staging_dir, + "1.2.3", + fake_binary, + ) + + assert staged == staging_dir + assert not old_file.exists() + assert script.staged_runtime_bin_path(staged).read_text() == "fake codex\n" + + +def test_stage_sdk_release_injects_exact_runtime_pin(tmp_path: Path) -> None: + script = _load_update_script_module() + staged = script.stage_python_sdk_package(tmp_path / "sdk-stage", "0.2.1", "1.2.3") + + pyproject = (staged / "pyproject.toml").read_text() + assert 'version = "0.2.1"' in pyproject + assert '"codex-cli-bin==1.2.3"' in pyproject + assert not any((staged / "src" / "codex_app_server").glob("bin/**")) + + +def test_stage_sdk_release_replaces_existing_staging_dir(tmp_path: Path) -> None: + script = _load_update_script_module() + staging_dir = tmp_path / "sdk-stage" + old_file = staging_dir / "stale.txt" + old_file.parent.mkdir(parents=True) + old_file.write_text("stale") + + staged = script.stage_python_sdk_package(staging_dir, "0.2.1", "1.2.3") + + assert staged == staging_dir + assert not old_file.exists() + + +def test_stage_sdk_runs_type_generation_before_staging(tmp_path: Path) -> None: + script = _load_update_script_module() + calls: list[str] = [] + args = script.parse_args( + [ + "stage-sdk", + str(tmp_path / "sdk-stage"), + "--runtime-version", + "1.2.3", + ] + ) + + def fake_generate_types() -> None: + calls.append("generate_types") + + def fake_stage_sdk_package( + _staging_dir: Path, _sdk_version: str, _runtime_version: str + ) -> Path: + calls.append("stage_sdk") + return tmp_path / "sdk-stage" + + def fake_stage_runtime_package( + _staging_dir: Path, _runtime_version: str, _runtime_binary: Path + ) -> Path: + raise AssertionError("runtime staging should not run for stage-sdk") + + def fake_current_sdk_version() -> str: + return "0.2.0" + + ops = script.CliOps( + generate_types=fake_generate_types, + stage_python_sdk_package=fake_stage_sdk_package, + stage_python_runtime_package=fake_stage_runtime_package, + current_sdk_version=fake_current_sdk_version, + ) + + script.run_command(args, ops) + + assert calls == ["generate_types", "stage_sdk"] + + +def test_stage_runtime_stages_binary_without_type_generation(tmp_path: Path) -> None: + script = _load_update_script_module() + fake_binary = tmp_path / script.runtime_binary_name() + fake_binary.write_text("fake codex\n") + calls: list[str] = [] + args = script.parse_args( + [ + "stage-runtime", + str(tmp_path / "runtime-stage"), + str(fake_binary), + "--runtime-version", + "1.2.3", + ] + ) + + def fake_generate_types() -> None: + calls.append("generate_types") + + def fake_stage_sdk_package( + _staging_dir: Path, _sdk_version: str, _runtime_version: str + ) -> Path: + raise AssertionError("sdk staging should not run for stage-runtime") + + def fake_stage_runtime_package( + _staging_dir: Path, _runtime_version: str, _runtime_binary: Path + ) -> Path: + calls.append("stage_runtime") + return tmp_path / "runtime-stage" + + def fake_current_sdk_version() -> str: + return "0.2.0" + + ops = script.CliOps( + generate_types=fake_generate_types, + stage_python_sdk_package=fake_stage_sdk_package, + stage_python_runtime_package=fake_stage_runtime_package, + current_sdk_version=fake_current_sdk_version, + ) + + script.run_command(args, ops) + + assert calls == ["stage_runtime"] + + +def test_default_runtime_is_resolved_from_installed_runtime_package( + tmp_path: Path, +) -> None: + from codex_app_server import client as client_module + + fake_binary = tmp_path / ("codex.exe" if client_module.os.name == "nt" else "codex") + fake_binary.write_text("") + ops = client_module.CodexBinResolverOps( + installed_codex_path=lambda: fake_binary, + path_exists=lambda path: path == fake_binary, + ) + + config = client_module.AppServerConfig() + assert config.codex_bin is None + assert client_module.resolve_codex_bin(config, ops) == fake_binary + + +def test_explicit_codex_bin_override_takes_priority(tmp_path: Path) -> None: + from codex_app_server import client as client_module + + explicit_binary = tmp_path / ( + "custom-codex.exe" if client_module.os.name == "nt" else "custom-codex" + ) + explicit_binary.write_text("") + ops = client_module.CodexBinResolverOps( + installed_codex_path=lambda: (_ for _ in ()).throw( + AssertionError("packaged runtime should not be used") + ), + path_exists=lambda path: path == explicit_binary, + ) + + config = client_module.AppServerConfig(codex_bin=str(explicit_binary)) + assert client_module.resolve_codex_bin(config, ops) == explicit_binary + + +def test_missing_runtime_package_requires_explicit_codex_bin() -> None: + from codex_app_server import client as client_module + + ops = client_module.CodexBinResolverOps( + installed_codex_path=lambda: (_ for _ in ()).throw( + FileNotFoundError("missing packaged runtime") + ), + path_exists=lambda _path: False, + ) + + with pytest.raises(FileNotFoundError, match="missing packaged runtime"): + client_module.resolve_codex_bin(client_module.AppServerConfig(), ops) + + +def test_broken_runtime_package_does_not_fall_back() -> None: + from codex_app_server import client as client_module + + ops = client_module.CodexBinResolverOps( + installed_codex_path=lambda: (_ for _ in ()).throw( + FileNotFoundError("missing packaged binary") + ), + path_exists=lambda _path: False, + ) + + with pytest.raises(FileNotFoundError) as exc_info: + client_module.resolve_codex_bin(client_module.AppServerConfig(), ops) + + assert str(exc_info.value) == ("missing packaged binary") diff --git a/sdk/python/tests/test_client_rpc_methods.py b/sdk/python/tests/test_client_rpc_methods.py new file mode 100644 index 000000000..274218056 --- /dev/null +++ b/sdk/python/tests/test_client_rpc_methods.py @@ -0,0 +1,95 @@ +from __future__ import annotations + +from pathlib import Path +from typing import Any + +from codex_app_server.client import AppServerClient, _params_dict +from codex_app_server.generated.v2_all import ThreadListParams, ThreadTokenUsageUpdatedNotification +from codex_app_server.models import UnknownNotification + +ROOT = Path(__file__).resolve().parents[1] + + +def test_thread_set_name_and_compact_use_current_rpc_methods() -> None: + client = AppServerClient() + calls: list[tuple[str, dict[str, Any] | None]] = [] + + def fake_request(method: str, params, *, response_model): # type: ignore[no-untyped-def] + calls.append((method, params)) + return response_model.model_validate({}) + + client.request = fake_request # type: ignore[method-assign] + + client.thread_set_name("thread-1", "sdk-name") + client.thread_compact("thread-1") + + assert calls[0][0] == "thread/name/set" + assert calls[1][0] == "thread/compact/start" + + +def test_generated_params_models_are_snake_case_and_dump_by_alias() -> None: + params = ThreadListParams(search_term="needle", limit=5) + + assert "search_term" in ThreadListParams.model_fields + dumped = _params_dict(params) + assert dumped == {"searchTerm": "needle", "limit": 5} + + +def test_generated_v2_bundle_has_single_shared_plan_type_definition() -> None: + source = (ROOT / "src" / "codex_app_server" / "generated" / "v2_all.py").read_text() + assert source.count("class PlanType(") == 1 + + +def test_notifications_are_typed_with_canonical_v2_methods() -> None: + client = AppServerClient() + event = client._coerce_notification( + "thread/tokenUsage/updated", + { + "threadId": "thread-1", + "turnId": "turn-1", + "tokenUsage": { + "last": { + "cachedInputTokens": 0, + "inputTokens": 1, + "outputTokens": 2, + "reasoningOutputTokens": 0, + "totalTokens": 3, + }, + "total": { + "cachedInputTokens": 0, + "inputTokens": 1, + "outputTokens": 2, + "reasoningOutputTokens": 0, + "totalTokens": 3, + }, + }, + }, + ) + + assert event.method == "thread/tokenUsage/updated" + assert isinstance(event.payload, ThreadTokenUsageUpdatedNotification) + assert event.payload.turn_id == "turn-1" + + +def test_unknown_notifications_fall_back_to_unknown_payloads() -> None: + client = AppServerClient() + event = client._coerce_notification( + "unknown/notification", + { + "id": "evt-1", + "conversationId": "thread-1", + "msg": {"type": "turn_aborted"}, + }, + ) + + assert event.method == "unknown/notification" + assert isinstance(event.payload, UnknownNotification) + assert event.payload.params["msg"] == {"type": "turn_aborted"} + + +def test_invalid_notification_payload_falls_back_to_unknown() -> None: + client = AppServerClient() + event = client._coerce_notification("thread/tokenUsage/updated", {"threadId": "missing"}) + + assert event.method == "thread/tokenUsage/updated" + assert isinstance(event.payload, UnknownNotification) diff --git a/sdk/python/tests/test_contract_generation.py b/sdk/python/tests/test_contract_generation.py new file mode 100644 index 000000000..ae926e481 --- /dev/null +++ b/sdk/python/tests/test_contract_generation.py @@ -0,0 +1,52 @@ +from __future__ import annotations + +import os +import subprocess +import sys +from pathlib import Path + +ROOT = Path(__file__).resolve().parents[1] +GENERATED_TARGETS = [ + Path("src/codex_app_server/generated/notification_registry.py"), + Path("src/codex_app_server/generated/v2_all.py"), + Path("src/codex_app_server/public_api.py"), +] + + +def _snapshot_target(root: Path, rel_path: Path) -> dict[str, bytes] | bytes | None: + target = root / rel_path + if not target.exists(): + return None + if target.is_file(): + return target.read_bytes() + + snapshot: dict[str, bytes] = {} + for path in sorted(target.rglob("*")): + if path.is_file() and "__pycache__" not in path.parts: + snapshot[str(path.relative_to(target))] = path.read_bytes() + return snapshot + + +def _snapshot_targets(root: Path) -> dict[str, dict[str, bytes] | bytes | None]: + return { + str(rel_path): _snapshot_target(root, rel_path) for rel_path in GENERATED_TARGETS + } + + +def test_generated_files_are_up_to_date(): + before = _snapshot_targets(ROOT) + + # Regenerate contract artifacts via single maintenance entrypoint. + env = os.environ.copy() + python_bin = str(Path(sys.executable).parent) + env["PATH"] = f"{python_bin}{os.pathsep}{env.get('PATH', '')}" + + subprocess.run( + [sys.executable, "scripts/update_sdk_artifacts.py", "generate-types"], + cwd=ROOT, + check=True, + env=env, + ) + + after = _snapshot_targets(ROOT) + assert before == after, "Generated files drifted after regeneration"