{ "cells": [ { "cell_type": "markdown", "metadata": {}, "source": [ "# Codex Python SDK Walkthrough\n", "\n", "Public SDK surface only (`codex_app_server` root exports)." ] }, { "cell_type": "code", "execution_count": null, "metadata": {}, "outputs": [], "source": [ "# Cell 1: bootstrap local SDK imports + pinned runtime package\n", "import os\n", "import sys\n", "from pathlib import Path\n", "\n", "if sys.version_info < (3, 10):\n", " raise RuntimeError(\n", " f'Notebook requires Python 3.10+; current interpreter is {sys.version.split()[0]}.'\n", " )\n", "\n", "try:\n", " _ = os.getcwd()\n", "except FileNotFoundError:\n", " os.chdir(str(Path.home()))\n", "\n", "\n", "def _is_sdk_python_dir(path: Path) -> bool:\n", " return (path / 'pyproject.toml').exists() and (path / 'src' / 'codex_app_server').exists()\n", "\n", "\n", "def _iter_home_fallback_candidates(home: Path):\n", " # bounded depth scan under home to support launching notebooks from unrelated cwd values\n", " patterns = ('sdk/python', '*/sdk/python', '*/*/sdk/python', '*/*/*/sdk/python')\n", " for pattern in patterns:\n", " yield from home.glob(pattern)\n", "\n", "\n", "def _find_sdk_python_dir(start: Path) -> Path | None:\n", " checked = set()\n", "\n", " def _consider(candidate: Path) -> Path | None:\n", " resolved = candidate.resolve()\n", " if resolved in checked:\n", " return None\n", " checked.add(resolved)\n", " if _is_sdk_python_dir(resolved):\n", " return resolved\n", " return None\n", "\n", " for candidate in [start, *start.parents]:\n", " found = _consider(candidate)\n", " if found is not None:\n", " return found\n", "\n", " for candidate in [start / 'sdk' / 'python', *(parent / 'sdk' / 'python' for parent in start.parents)]:\n", " found = _consider(candidate)\n", " if found is not None:\n", " return found\n", "\n", " env_dir = os.environ.get('CODEX_PYTHON_SDK_DIR')\n", " if env_dir:\n", " found = _consider(Path(env_dir).expanduser())\n", " if found is not None:\n", " return found\n", "\n", " for entry in sys.path:\n", " if not entry:\n", " continue\n", " entry_path = Path(entry).expanduser()\n", " for candidate in (entry_path, entry_path / 'sdk' / 'python'):\n", " found = _consider(candidate)\n", " if found is not None:\n", " return found\n", "\n", " home = Path.home()\n", " for candidate in _iter_home_fallback_candidates(home):\n", " found = _consider(candidate)\n", " if found is not None:\n", " return found\n", "\n", " return None\n", "\n", "\n", "repo_python_dir = _find_sdk_python_dir(Path.cwd())\n", "if repo_python_dir is None:\n", " raise RuntimeError('Could not locate sdk/python. Set CODEX_PYTHON_SDK_DIR to your sdk/python path.')\n", "\n", "repo_python_str = str(repo_python_dir)\n", "if repo_python_str not in sys.path:\n", " sys.path.insert(0, repo_python_str)\n", "\n", "from _runtime_setup import ensure_runtime_package_installed\n", "\n", "runtime_version = ensure_runtime_package_installed(\n", " sys.executable,\n", " repo_python_dir,\n", ")\n", "\n", "src_dir = repo_python_dir / 'src'\n", "examples_dir = repo_python_dir / 'examples'\n", "src_str = str(src_dir)\n", "examples_str = str(examples_dir)\n", "if src_str not in sys.path:\n", " sys.path.insert(0, src_str)\n", "if examples_str not in sys.path:\n", " sys.path.insert(0, examples_str)\n", "\n", "# Force fresh imports after SDK upgrades in the same notebook kernel.\n", "for module_name in list(sys.modules):\n", " if module_name == 'codex_app_server' or module_name.startswith('codex_app_server.'):\n", " sys.modules.pop(module_name, None)\n", "\n", "print('Kernel:', sys.executable)\n", "print('SDK source:', src_dir)\n", "print('Runtime package:', runtime_version)\n" ] }, { "cell_type": "code", "execution_count": null, "metadata": {}, "outputs": [], "source": [ "# Cell 2: imports (public only)\n", "from _bootstrap import assistant_text_from_turn, find_turn_by_id, server_label\n", "from codex_app_server import (\n", " AsyncCodex,\n", " Codex,\n", " ImageInput,\n", " LocalImageInput,\n", " TextInput,\n", " retry_on_overload,\n", ")\n" ] }, { "cell_type": "code", "execution_count": null, "metadata": {}, "outputs": [], "source": [ "# Cell 3: simple sync conversation\n", "with Codex() as codex:\n", " thread = codex.thread_start(model='gpt-5.4', config={'model_reasoning_effort': 'high'})\n", " turn = thread.turn(TextInput('Explain gradient descent in 3 bullets.'))\n", " result = turn.run()\n", " persisted = thread.read(include_turns=True)\n", " persisted_turn = find_turn_by_id(persisted.thread.turns, result.id)\n", "\n", " print('server:', server_label(codex.metadata))\n", " print('status:', result.status)\n", " print(assistant_text_from_turn(persisted_turn))\n" ] }, { "cell_type": "code", "execution_count": null, "metadata": {}, "outputs": [], "source": [ "# Cell 4: multi-turn continuity in same thread\n", "with Codex() as codex:\n", " thread = codex.thread_start(model='gpt-5.4', config={'model_reasoning_effort': 'high'})\n", "\n", " first = thread.turn(TextInput('Give a short summary of transformers.')).run()\n", " second = thread.turn(TextInput('Now explain that to a high-school student.')).run()\n", " persisted = thread.read(include_turns=True)\n", " second_turn = find_turn_by_id(persisted.thread.turns, second.id)\n", "\n", " print('first status:', first.status)\n", " print('second status:', second.status)\n", " print('second text:', assistant_text_from_turn(second_turn))\n" ] }, { "cell_type": "code", "execution_count": null, "metadata": {}, "outputs": [], "source": [ "# Cell 5: full thread lifecycle and branching (sync)\n", "with Codex() as codex:\n", " thread = codex.thread_start(model='gpt-5.4', config={'model_reasoning_effort': 'high'})\n", " first = thread.turn(TextInput('One sentence about structured planning.')).run()\n", " second = thread.turn(TextInput('Now restate it for a junior engineer.')).run()\n", "\n", " reopened = codex.thread_resume(thread.id)\n", " listing_active = codex.thread_list(limit=20, archived=False)\n", " reading = reopened.read(include_turns=True)\n", "\n", " _ = reopened.set_name('sdk-lifecycle-demo')\n", " _ = codex.thread_archive(reopened.id)\n", " listing_archived = codex.thread_list(limit=20, archived=True)\n", " unarchived = codex.thread_unarchive(reopened.id)\n", "\n", " resumed_info = 'n/a'\n", " try:\n", " resumed = codex.thread_resume(\n", " unarchived.id,\n", " model='gpt-5.4',\n", " config={'model_reasoning_effort': 'high'},\n", " )\n", " resumed_result = resumed.turn(TextInput('Continue in one short sentence.')).run()\n", " resumed_info = f'{resumed_result.id} {resumed_result.status}'\n", " except Exception as e:\n", " resumed_info = f'skipped({type(e).__name__})'\n", "\n", " forked_info = 'n/a'\n", " try:\n", " forked = codex.thread_fork(unarchived.id, model='gpt-5.4')\n", " forked_result = forked.turn(TextInput('Take a different angle in one short sentence.')).run()\n", " forked_info = f'{forked_result.id} {forked_result.status}'\n", " except Exception as e:\n", " forked_info = f'skipped({type(e).__name__})'\n", "\n", " compact_info = 'sent'\n", " try:\n", " _ = unarchived.compact()\n", " except Exception as e:\n", " compact_info = f'skipped({type(e).__name__})'\n", "\n", " print('Lifecycle OK:', thread.id)\n", " print('first:', first.id, first.status)\n", " print('second:', second.id, second.status)\n", " print('read.turns:', len(reading.thread.turns or []))\n", " print('list.active:', len(listing_active.data))\n", " print('list.archived:', len(listing_archived.data))\n", " print('resumed:', resumed_info)\n", " print('forked:', forked_info)\n", " print('compact:', compact_info)\n" ] }, { "cell_type": "code", "execution_count": null, "metadata": {}, "outputs": [], "source": [ "# Cell 5b: one turn with most optional turn params\n", "from pathlib import Path\n", "from codex_app_server import (\n", " AskForApproval,\n", " Personality,\n", " ReasoningEffort,\n", " ReasoningSummary,\n", " SandboxPolicy,\n", ")\n", "\n", "output_schema = {\n", " 'type': 'object',\n", " 'properties': {\n", " 'summary': {'type': 'string'},\n", " 'actions': {'type': 'array', 'items': {'type': 'string'}},\n", " },\n", " 'required': ['summary', 'actions'],\n", " 'additionalProperties': False,\n", "}\n", "\n", "sandbox_policy = SandboxPolicy.model_validate({'type': 'readOnly', 'access': {'type': 'fullAccess'}})\n", "summary = ReasoningSummary.model_validate('concise')\n", "\n", "with Codex() as codex:\n", " thread = codex.thread_start(model='gpt-5.4', config={'model_reasoning_effort': 'high'})\n", " turn = thread.turn(\n", " TextInput('Propose a safe production feature-flag rollout. Return JSON matching the schema.'),\n", " approval_policy=AskForApproval.model_validate('never'),\n", " cwd=str(Path.cwd()),\n", " effort=ReasoningEffort.medium,\n", " model='gpt-5.4',\n", " output_schema=output_schema,\n", " personality=Personality.pragmatic,\n", " sandbox_policy=sandbox_policy,\n", " summary=summary,\n", " )\n", " result = turn.run()\n", " persisted = thread.read(include_turns=True)\n", " persisted_turn = find_turn_by_id(persisted.thread.turns, result.id)\n", "\n", " print('status:', result.status)\n", " print(assistant_text_from_turn(persisted_turn))\n" ] }, { "cell_type": "code", "execution_count": null, "metadata": {}, "outputs": [], "source": [ "# Cell 5c: choose highest model + highest supported reasoning, then run turns\n", "from pathlib import Path\n", "from codex_app_server import (\n", " AskForApproval,\n", " Personality,\n", " ReasoningEffort,\n", " ReasoningSummary,\n", " SandboxPolicy,\n", ")\n", "\n", "reasoning_rank = {\n", " 'none': 0,\n", " 'minimal': 1,\n", " 'low': 2,\n", " 'medium': 3,\n", " 'high': 4,\n", " 'xhigh': 5,\n", "}\n", "\n", "\n", "def pick_highest_model(models):\n", " visible = [m for m in models if not m.hidden] or models\n", " known_names = {m.id for m in visible} | {m.model for m in visible}\n", " top_candidates = [m for m in visible if not (m.upgrade and m.upgrade in known_names)]\n", " pool = top_candidates or visible\n", " return max(pool, key=lambda m: (m.model, m.id))\n", "\n", "\n", "def pick_highest_turn_effort(model) -> ReasoningEffort:\n", " if not model.supported_reasoning_efforts:\n", " return ReasoningEffort.medium\n", " best = max(model.supported_reasoning_efforts, key=lambda opt: reasoning_rank.get(opt.reasoning_effort.value, -1))\n", " return ReasoningEffort(best.reasoning_effort.value)\n", "\n", "\n", "output_schema = {\n", " 'type': 'object',\n", " 'properties': {\n", " 'summary': {'type': 'string'},\n", " 'actions': {'type': 'array', 'items': {'type': 'string'}},\n", " },\n", " 'required': ['summary', 'actions'],\n", " 'additionalProperties': False,\n", "}\n", "sandbox_policy = SandboxPolicy.model_validate({'type': 'readOnly', 'access': {'type': 'fullAccess'}})\n", "\n", "with Codex() as codex:\n", " models = codex.models(include_hidden=True)\n", " selected_model = pick_highest_model(models.data)\n", " selected_effort = pick_highest_turn_effort(selected_model)\n", "\n", " print('selected.model:', selected_model.model)\n", " print('selected.effort:', selected_effort.value)\n", "\n", " thread = codex.thread_start(model=selected_model.model, config={'model_reasoning_effort': selected_effort.value})\n", "\n", " first = thread.turn(\n", " TextInput('Give one short sentence about reliable production releases.'),\n", " model=selected_model.model,\n", " effort=selected_effort,\n", " ).run()\n", " persisted = thread.read(include_turns=True)\n", " first_turn = find_turn_by_id(persisted.thread.turns, first.id)\n", " print('agent.message:', assistant_text_from_turn(first_turn))\n", " print('items:', 0 if first_turn is None else len(first_turn.items or []))\n", "\n", " second = thread.turn(\n", " TextInput('Return JSON for a safe feature-flag rollout plan.'),\n", " approval_policy=AskForApproval.model_validate('never'),\n", " cwd=str(Path.cwd()),\n", " effort=selected_effort,\n", " model=selected_model.model,\n", " output_schema=output_schema,\n", " personality=Personality.pragmatic,\n", " sandbox_policy=sandbox_policy,\n", " summary=ReasoningSummary.model_validate('concise'),\n", " ).run()\n", " persisted = thread.read(include_turns=True)\n", " second_turn = find_turn_by_id(persisted.thread.turns, second.id)\n", " print('agent.message.params:', assistant_text_from_turn(second_turn))\n", " print('items.params:', 0 if second_turn is None else len(second_turn.items or []))\n" ] }, { "cell_type": "code", "execution_count": null, "metadata": {}, "outputs": [], "source": [ "# Cell 6: multimodal with remote image\n", "remote_image_url = 'https://raw.githubusercontent.com/github/explore/main/topics/python/python.png'\n", "\n", "with Codex() as codex:\n", " thread = codex.thread_start(model='gpt-5.4', config={'model_reasoning_effort': 'high'})\n", " result = thread.turn([\n", " TextInput('What do you see in this image? 3 bullets.'),\n", " ImageInput(remote_image_url),\n", " ]).run()\n", " persisted = thread.read(include_turns=True)\n", " persisted_turn = find_turn_by_id(persisted.thread.turns, result.id)\n", "\n", " print('status:', result.status)\n", " print(assistant_text_from_turn(persisted_turn))\n" ] }, { "cell_type": "code", "execution_count": null, "metadata": {}, "outputs": [], "source": [ "# Cell 7: multimodal with local image (generated temporary file)\n", "with temporary_sample_image_path() as local_image_path:\n", " with Codex() as codex:\n", " thread = codex.thread_start(model='gpt-5.4', config={'model_reasoning_effort': 'high'})\n", " result = thread.turn([\n", " TextInput('Describe the colors and layout in this generated local image in 2 bullets.'),\n", " LocalImageInput(str(local_image_path.resolve())),\n", " ]).run()\n", " persisted = thread.read(include_turns=True)\n", " persisted_turn = find_turn_by_id(persisted.thread.turns, result.id)\n", "\n", " print('status:', result.status)\n", " print(assistant_text_from_turn(persisted_turn))\n" ] }, { "cell_type": "code", "execution_count": null, "metadata": {}, "outputs": [], "source": [ "# Cell 8: retry-on-overload pattern\n", "with Codex() as codex:\n", " thread = codex.thread_start(model='gpt-5.4', config={'model_reasoning_effort': 'high'})\n", "\n", " result = retry_on_overload(\n", " lambda: thread.turn(TextInput('List 5 failure modes in distributed systems.')).run(),\n", " max_attempts=3,\n", " initial_delay_s=0.25,\n", " max_delay_s=2.0,\n", " )\n", " persisted = thread.read(include_turns=True)\n", " persisted_turn = find_turn_by_id(persisted.thread.turns, result.id)\n", "\n", " print('status:', result.status)\n", " print(assistant_text_from_turn(persisted_turn))\n" ] }, { "cell_type": "code", "execution_count": null, "metadata": {}, "outputs": [], "source": [ "# Cell 9: full thread lifecycle and branching (async)\n", "import asyncio\n", "\n", "\n", "async def async_lifecycle_demo():\n", " async with AsyncCodex() as codex:\n", " thread = await codex.thread_start(model='gpt-5.4', config={'model_reasoning_effort': 'high'})\n", " first = await (await thread.turn(TextInput('One sentence about structured planning.'))).run()\n", " second = await (await thread.turn(TextInput('Now restate it for a junior engineer.'))).run()\n", "\n", " reopened = await codex.thread_resume(thread.id)\n", " listing_active = await codex.thread_list(limit=20, archived=False)\n", " reading = await reopened.read(include_turns=True)\n", "\n", " _ = await reopened.set_name('sdk-lifecycle-demo')\n", " _ = await codex.thread_archive(reopened.id)\n", " listing_archived = await codex.thread_list(limit=20, archived=True)\n", " unarchived = await codex.thread_unarchive(reopened.id)\n", "\n", " resumed_info = 'n/a'\n", " try:\n", " resumed = await codex.thread_resume(\n", " unarchived.id,\n", " model='gpt-5.4',\n", " config={'model_reasoning_effort': 'high'},\n", " )\n", " resumed_result = await (await resumed.turn(TextInput('Continue in one short sentence.'))).run()\n", " resumed_info = f'{resumed_result.id} {resumed_result.status}'\n", " except Exception as e:\n", " resumed_info = f'skipped({type(e).__name__})'\n", "\n", " forked_info = 'n/a'\n", " try:\n", " forked = await codex.thread_fork(unarchived.id, model='gpt-5.4')\n", " forked_result = await (await forked.turn(TextInput('Take a different angle in one short sentence.'))).run()\n", " forked_info = f'{forked_result.id} {forked_result.status}'\n", " except Exception as e:\n", " forked_info = f'skipped({type(e).__name__})'\n", "\n", " compact_info = 'sent'\n", " try:\n", " _ = await unarchived.compact()\n", " except Exception as e:\n", " compact_info = f'skipped({type(e).__name__})'\n", "\n", " print('Lifecycle OK:', thread.id)\n", " print('first:', first.id, first.status)\n", " print('second:', second.id, second.status)\n", " print('read.turns:', len(reading.thread.turns or []))\n", " print('list.active:', len(listing_active.data))\n", " print('list.archived:', len(listing_archived.data))\n", " print('resumed:', resumed_info)\n", " print('forked:', forked_info)\n", " print('compact:', compact_info)\n", "\n", "\n", "await async_lifecycle_demo()\n" ] }, { "cell_type": "code", "execution_count": null, "metadata": {}, "outputs": [], "source": [ "# Cell 10: async turn controls (best effort steer + interrupt)\n", "import asyncio\n", "\n", "\n", "async def async_stream_demo():\n", " async with AsyncCodex() as codex:\n", " thread = await codex.thread_start(model='gpt-5.4', config={'model_reasoning_effort': 'high'})\n", " steer_turn = await thread.turn(TextInput('Count from 1 to 40 with commas, then one summary sentence.'))\n", "\n", " steer_result = 'sent'\n", " try:\n", " _ = await steer_turn.steer(TextInput('Keep it brief and stop after 10 numbers.'))\n", " except Exception as e:\n", " steer_result = f'skipped {type(e).__name__}'\n", "\n", " steer_event_count = 0\n", " steer_completed_status = 'unknown'\n", " steer_completed_turn = None\n", " async for event in steer_turn.stream():\n", " steer_event_count += 1\n", " if event.method == 'turn/completed':\n", " steer_completed_turn = event.payload.turn\n", " steer_completed_status = getattr(event.payload.turn.status, 'value', str(event.payload.turn.status))\n", "\n", " steer_preview = assistant_text_from_turn(steer_completed_turn).strip() or '[no assistant text]'\n", "\n", " interrupt_turn = await thread.turn(TextInput('Count from 1 to 200 with commas, then one summary sentence.'))\n", " interrupt_result = 'sent'\n", " try:\n", " _ = await interrupt_turn.interrupt()\n", " except Exception as e:\n", " interrupt_result = f'skipped {type(e).__name__}'\n", "\n", " interrupt_event_count = 0\n", " interrupt_completed_status = 'unknown'\n", " interrupt_completed_turn = None\n", " async for event in interrupt_turn.stream():\n", " interrupt_event_count += 1\n", " if event.method == 'turn/completed':\n", " interrupt_completed_turn = event.payload.turn\n", " interrupt_completed_status = getattr(event.payload.turn.status, 'value', str(event.payload.turn.status))\n", "\n", " interrupt_preview = assistant_text_from_turn(interrupt_completed_turn).strip() or '[no assistant text]'\n", "\n", " print('steer.result:', steer_result)\n", " print('steer.final.status:', steer_completed_status)\n", " print('steer.events.count:', steer_event_count)\n", " print('steer.assistant.preview:', steer_preview)\n", " print('interrupt.result:', interrupt_result)\n", " print('interrupt.final.status:', interrupt_completed_status)\n", " print('interrupt.events.count:', interrupt_event_count)\n", " print('interrupt.assistant.preview:', interrupt_preview)\n", "\n", "\n", "await async_stream_demo()\n" ] } ], "metadata": { "kernelspec": { "display_name": "Python 3", "language": "python", "name": "python3" }, "language_info": { "name": "python", "version": "3.10+" } }, "nbformat": 4, "nbformat_minor": 5 }