//! Defines the protocol for a Codex session between a client and an agent.
//!
//! Uses a SQ (Submission Queue) / EQ (Event Queue) pattern to asynchronously communicate
//! between user and agent.
use std::collections::HashMap;
use std::collections::HashSet;
use std::ffi::OsStr;
use std::fmt;
use std::path::Path;
use std::path::PathBuf;
use std::str::FromStr;
use std::time::Duration;
use crate::ThreadId;
use crate::approvals::ElicitationRequestEvent;
use crate::config_types::ApprovalsReviewer;
use crate::config_types::CollaborationMode;
use crate::config_types::ModeKind;
use crate::config_types::Personality;
use crate::config_types::ReasoningSummary as ReasoningSummaryConfig;
use crate::config_types::ServiceTier;
use crate::config_types::WindowsSandboxLevel;
use crate::custom_prompts::CustomPrompt;
use crate::dynamic_tools::DynamicToolCallOutputContentItem;
use crate::dynamic_tools::DynamicToolCallRequest;
use crate::dynamic_tools::DynamicToolResponse;
use crate::dynamic_tools::DynamicToolSpec;
use crate::items::TurnItem;
use crate::mcp::CallToolResult;
use crate::mcp::RequestId;
use crate::mcp::Resource as McpResource;
use crate::mcp::ResourceTemplate as McpResourceTemplate;
use crate::mcp::Tool as McpTool;
use crate::memory_citation::MemoryCitation;
use crate::message_history::HistoryEntry;
use crate::models::BaseInstructions;
use crate::models::ContentItem;
use crate::models::MessagePhase;
use crate::models::ResponseItem;
use crate::models::WebSearchAction;
use crate::num_format::format_with_separators;
use crate::openai_models::ReasoningEffort as ReasoningEffortConfig;
use crate::parse_command::ParsedCommand;
use crate::plan_tool::UpdatePlanArgs;
use crate::request_permissions::RequestPermissionsEvent;
use crate::request_permissions::RequestPermissionsResponse;
use crate::request_user_input::RequestUserInputResponse;
use crate::user_input::UserInput;
use codex_utils_absolute_path::AbsolutePathBuf;
use schemars::JsonSchema;
use serde::Deserialize;
use serde::Serialize;
use serde_json::Value;
use serde_with::serde_as;
use strum_macros::Display;
use tracing::error;
use ts_rs::TS;
pub use crate::approvals::ApplyPatchApprovalRequestEvent;
pub use crate::approvals::ElicitationAction;
pub use crate::approvals::ExecApprovalRequestEvent;
pub use crate::approvals::ExecApprovalRequestSkillMetadata;
pub use crate::approvals::ExecPolicyAmendment;
pub use crate::approvals::GuardianAssessmentEvent;
pub use crate::approvals::GuardianAssessmentStatus;
pub use crate::approvals::GuardianRiskLevel;
pub use crate::approvals::NetworkApprovalContext;
pub use crate::approvals::NetworkApprovalProtocol;
pub use crate::approvals::NetworkPolicyAmendment;
pub use crate::approvals::NetworkPolicyRuleAction;
pub use crate::permissions::FileSystemAccessMode;
pub use crate::permissions::FileSystemPath;
pub use crate::permissions::FileSystemSandboxEntry;
pub use crate::permissions::FileSystemSandboxKind;
pub use crate::permissions::FileSystemSandboxPolicy;
pub use crate::permissions::FileSystemSpecialPath;
pub use crate::permissions::NetworkSandboxPolicy;
pub use crate::request_permissions::RequestPermissionsArgs;
pub use crate::request_user_input::RequestUserInputEvent;
/// Open/close tags for special user-input blocks. Used across crates to avoid
/// duplicated hardcoded strings.
pub const USER_INSTRUCTIONS_OPEN_TAG: &str = "";
pub const USER_INSTRUCTIONS_CLOSE_TAG: &str = " ";
pub const ENVIRONMENT_CONTEXT_OPEN_TAG: &str = "";
pub const ENVIRONMENT_CONTEXT_CLOSE_TAG: &str = " ";
pub const APPS_INSTRUCTIONS_OPEN_TAG: &str = "";
pub const APPS_INSTRUCTIONS_CLOSE_TAG: &str = " ";
pub const SKILLS_INSTRUCTIONS_OPEN_TAG: &str = "";
pub const SKILLS_INSTRUCTIONS_CLOSE_TAG: &str = " ";
pub const PLUGINS_INSTRUCTIONS_OPEN_TAG: &str = "";
pub const PLUGINS_INSTRUCTIONS_CLOSE_TAG: &str = " ";
pub const COLLABORATION_MODE_OPEN_TAG: &str = "";
pub const COLLABORATION_MODE_CLOSE_TAG: &str = " ";
pub const REALTIME_CONVERSATION_OPEN_TAG: &str = "";
pub const REALTIME_CONVERSATION_CLOSE_TAG: &str = " ";
pub const USER_MESSAGE_BEGIN: &str = "## My request for Codex:";
/// Submission Queue Entry - requests from user
#[derive(Debug, Clone, Deserialize, Serialize, JsonSchema)]
pub struct Submission {
/// Unique id for this Submission to correlate with Events
pub id: String,
/// Payload
pub op: Op,
/// Optional W3C trace carrier propagated across async submission handoffs.
#[serde(default, skip_serializing_if = "Option::is_none")]
pub trace: Option,
}
#[derive(Debug, Clone, Deserialize, Serialize, PartialEq, Eq, JsonSchema, TS)]
pub struct W3cTraceContext {
#[serde(default, skip_serializing_if = "Option::is_none")]
#[ts(optional)]
pub traceparent: Option,
#[serde(default, skip_serializing_if = "Option::is_none")]
#[ts(optional)]
pub tracestate: Option,
}
/// Config payload for refreshing MCP servers.
#[derive(Debug, Clone, Deserialize, Serialize, PartialEq, JsonSchema)]
pub struct McpServerRefreshConfig {
pub mcp_servers: Value,
pub mcp_oauth_credentials_store_mode: Value,
}
#[derive(Debug, Clone, Deserialize, Serialize, PartialEq, JsonSchema, TS)]
pub struct ConversationStartParams {
pub prompt: String,
#[serde(skip_serializing_if = "Option::is_none")]
pub session_id: Option,
}
#[derive(Debug, Clone, Deserialize, Serialize, PartialEq, Eq, JsonSchema, TS)]
pub struct RealtimeAudioFrame {
pub data: String,
pub sample_rate: u32,
pub num_channels: u16,
#[serde(skip_serializing_if = "Option::is_none")]
pub samples_per_channel: Option,
#[serde(skip_serializing_if = "Option::is_none")]
pub item_id: Option,
}
#[derive(Debug, Clone, Deserialize, Serialize, PartialEq, Eq, JsonSchema, TS)]
pub struct RealtimeTranscriptDelta {
pub delta: String,
}
#[derive(Debug, Clone, Deserialize, Serialize, PartialEq, Eq, JsonSchema, TS)]
pub struct RealtimeTranscriptEntry {
pub role: String,
pub text: String,
}
#[derive(Debug, Clone, Deserialize, Serialize, PartialEq, Eq, JsonSchema, TS)]
pub struct RealtimeHandoffRequested {
pub handoff_id: String,
pub item_id: String,
pub input_transcript: String,
pub active_transcript: Vec,
}
#[derive(Debug, Clone, Deserialize, Serialize, PartialEq, Eq, JsonSchema, TS)]
pub struct RealtimeInputAudioSpeechStarted {
pub item_id: Option,
}
#[derive(Debug, Clone, Deserialize, Serialize, PartialEq, Eq, JsonSchema, TS)]
pub struct RealtimeResponseCancelled {
pub response_id: Option,
}
#[derive(Debug, Clone, Deserialize, Serialize, PartialEq, Eq, JsonSchema, TS)]
pub enum RealtimeEvent {
SessionUpdated {
session_id: String,
instructions: Option,
},
InputAudioSpeechStarted(RealtimeInputAudioSpeechStarted),
InputTranscriptDelta(RealtimeTranscriptDelta),
OutputTranscriptDelta(RealtimeTranscriptDelta),
AudioOut(RealtimeAudioFrame),
ResponseCancelled(RealtimeResponseCancelled),
ConversationItemAdded(Value),
ConversationItemDone {
item_id: String,
},
HandoffRequested(RealtimeHandoffRequested),
Error(String),
}
#[derive(Debug, Clone, Deserialize, Serialize, PartialEq, JsonSchema, TS)]
pub struct ConversationAudioParams {
pub frame: RealtimeAudioFrame,
}
#[derive(Debug, Clone, Deserialize, Serialize, PartialEq, JsonSchema, TS)]
pub struct ConversationTextParams {
pub text: String,
}
/// Submission operation
#[derive(Debug, Clone, Deserialize, Serialize, PartialEq, JsonSchema)]
#[serde(tag = "type", rename_all = "snake_case")]
#[allow(clippy::large_enum_variant)]
#[non_exhaustive]
pub enum Op {
/// Abort current task without terminating background terminal processes.
/// This server sends [`EventMsg::TurnAborted`] in response.
Interrupt,
/// Terminate all running background terminal processes for this thread.
/// Use this when callers intentionally want to stop long-lived background shells.
CleanBackgroundTerminals,
/// Start a realtime conversation stream.
RealtimeConversationStart(ConversationStartParams),
/// Send audio input to the running realtime conversation stream.
RealtimeConversationAudio(ConversationAudioParams),
/// Send text input to the running realtime conversation stream.
RealtimeConversationText(ConversationTextParams),
/// Close the running realtime conversation stream.
RealtimeConversationClose,
/// Legacy user input.
///
/// Prefer [`Op::UserTurn`] so the caller provides full turn context
/// (cwd/approval/sandbox/model/etc.) for each turn.
UserInput {
/// User input items, see `InputItem`
items: Vec,
/// Optional JSON Schema used to constrain the final assistant message for this turn.
#[serde(skip_serializing_if = "Option::is_none")]
final_output_json_schema: Option,
},
/// Similar to [`Op::UserInput`], but contains additional context required
/// for a turn of a [`crate::codex_thread::CodexThread`].
UserTurn {
/// User input items, see `InputItem`
items: Vec,
/// `cwd` to use with the [`SandboxPolicy`] and potentially tool calls
/// such as `local_shell`.
cwd: PathBuf,
/// Policy to use for command approval.
approval_policy: AskForApproval,
/// Policy to use for tool calls such as `local_shell`.
sandbox_policy: SandboxPolicy,
/// Must be a valid model slug for the configured client session
/// associated with this conversation.
model: String,
/// Will only be honored if the model is configured to use reasoning.
#[serde(skip_serializing_if = "Option::is_none")]
effort: Option,
/// Will only be honored if the model is configured to use reasoning.
///
/// When omitted, the session keeps the current setting (which allows core to
/// fall back to the selected model's default on new sessions).
#[serde(default, skip_serializing_if = "Option::is_none")]
summary: Option,
/// Optional service tier override for this turn.
///
/// Use `Some(Some(_))` to set a specific tier for this turn, `Some(None)` to
/// explicitly clear the tier for this turn, or `None` to keep the existing
/// session preference.
#[serde(default, skip_serializing_if = "Option::is_none")]
service_tier: Option>,
// The JSON schema to use for the final assistant message
final_output_json_schema: Option,
/// EXPERIMENTAL - set a pre-set collaboration mode.
/// Takes precedence over model, effort, and developer instructions if set.
#[serde(skip_serializing_if = "Option::is_none")]
collaboration_mode: Option,
/// Optional personality override for this turn.
#[serde(skip_serializing_if = "Option::is_none")]
personality: Option,
},
/// Override parts of the persistent turn context for subsequent turns.
///
/// All fields are optional; when omitted, the existing value is preserved.
/// This does not enqueue any input – it only updates defaults used for
/// turns that rely on persistent session-level context (for example,
/// [`Op::UserInput`]).
OverrideTurnContext {
/// Updated `cwd` for sandbox/tool calls.
#[serde(skip_serializing_if = "Option::is_none")]
cwd: Option,
/// Updated command approval policy.
#[serde(skip_serializing_if = "Option::is_none")]
approval_policy: Option,
/// Updated approval reviewer for future approval prompts.
#[serde(skip_serializing_if = "Option::is_none")]
approvals_reviewer: Option,
/// Updated sandbox policy for tool calls.
#[serde(skip_serializing_if = "Option::is_none")]
sandbox_policy: Option,
/// Updated Windows sandbox mode for tool execution.
#[serde(skip_serializing_if = "Option::is_none")]
windows_sandbox_level: Option,
/// Updated model slug. When set, the model info is derived
/// automatically.
#[serde(skip_serializing_if = "Option::is_none")]
model: Option,
/// Updated reasoning effort (honored only for reasoning-capable models).
///
/// Use `Some(Some(_))` to set a specific effort, `Some(None)` to clear
/// the effort, or `None` to leave the existing value unchanged.
#[serde(skip_serializing_if = "Option::is_none")]
effort: Option>,
/// Updated reasoning summary preference (honored only for reasoning-capable models).
#[serde(skip_serializing_if = "Option::is_none")]
summary: Option,
/// Updated service tier preference for future turns.
///
/// Use `Some(Some(_))` to set a specific tier, `Some(None)` to clear the
/// preference, or `None` to leave the existing value unchanged.
#[serde(skip_serializing_if = "Option::is_none")]
service_tier: Option>,
/// EXPERIMENTAL - set a pre-set collaboration mode.
/// Takes precedence over model, effort, and developer instructions if set.
#[serde(skip_serializing_if = "Option::is_none")]
collaboration_mode: Option,
/// Updated personality preference.
#[serde(skip_serializing_if = "Option::is_none")]
personality: Option,
},
/// Approve a command execution
ExecApproval {
/// The id of the submission we are approving
id: String,
/// Turn id associated with the approval event, when available.
#[serde(default, skip_serializing_if = "Option::is_none")]
turn_id: Option,
/// The user's decision in response to the request.
decision: ReviewDecision,
},
/// Approve a code patch
PatchApproval {
/// The id of the submission we are approving
id: String,
/// The user's decision in response to the request.
decision: ReviewDecision,
},
/// Resolve an MCP elicitation request.
ResolveElicitation {
/// Name of the MCP server that issued the request.
server_name: String,
/// Request identifier from the MCP server.
request_id: RequestId,
/// User's decision for the request.
decision: ElicitationAction,
/// Structured user input supplied for accepted elicitations.
#[serde(default, skip_serializing_if = "Option::is_none")]
content: Option,
/// Optional client metadata associated with the elicitation response.
#[serde(default, skip_serializing_if = "Option::is_none")]
meta: Option,
},
/// Resolve a request_user_input tool call.
#[serde(rename = "user_input_answer", alias = "request_user_input_response")]
UserInputAnswer {
/// Turn id for the in-flight request.
id: String,
/// User-provided answers.
response: RequestUserInputResponse,
},
/// Resolve a request_permissions tool call.
RequestPermissionsResponse {
/// Call id for the in-flight request.
id: String,
/// User-granted permissions.
response: RequestPermissionsResponse,
},
/// Resolve a dynamic tool call request.
DynamicToolResponse {
/// Call id for the in-flight request.
id: String,
/// Tool output payload.
response: DynamicToolResponse,
},
/// Append an entry to the persistent cross-session message history.
///
/// Note the entry is not guaranteed to be logged if the user has
/// history disabled, it matches the list of "sensitive" patterns, etc.
AddToHistory {
/// The message text to be stored.
text: String,
},
/// Request a single history entry identified by `log_id` + `offset`.
GetHistoryEntryRequest { offset: usize, log_id: u64 },
/// Request the list of MCP tools available across all configured servers.
/// Reply is delivered via `EventMsg::McpListToolsResponse`.
ListMcpTools,
/// Request MCP servers to reinitialize and refresh cached tool lists.
RefreshMcpServers { config: McpServerRefreshConfig },
/// Reload user config layer overrides for the active session.
///
/// This updates runtime config-derived behavior (for example app
/// enable/disable state) without restarting the thread.
ReloadUserConfig,
/// Request the list of available custom prompts.
ListCustomPrompts,
/// Request the list of skills for the provided `cwd` values or the session default.
ListSkills {
/// Working directories to scope repo skills discovery.
///
/// When empty, the session default working directory is used.
#[serde(default, skip_serializing_if = "Vec::is_empty")]
cwds: Vec,
/// When true, recompute skills even if a cached result exists.
#[serde(default, skip_serializing_if = "std::ops::Not::not")]
force_reload: bool,
},
/// Request the agent to summarize the current conversation context.
/// The agent will use its existing context (either conversation history or previous response id)
/// to generate a summary which will be returned as an AgentMessage event.
Compact,
/// Drop all persisted memory artifacts and memory-tracking DB rows.
DropMemories,
/// Trigger a single pass of the startup memory pipeline.
UpdateMemories,
/// Set a user-facing thread name in the persisted rollout metadata.
/// This is a local-only operation handled by codex-core; it does not
/// involve the model.
SetThreadName { name: String },
/// Request Codex to undo a turn (turn are stacked so it is the same effect as CMD + Z).
Undo,
/// Request Codex to drop the last N user turns from in-memory context.
///
/// This does not attempt to revert local filesystem changes. Clients are
/// responsible for undoing any edits on disk.
ThreadRollback { num_turns: u32 },
/// Request a code review from the agent.
Review { review_request: ReviewRequest },
/// Request to shut down codex instance.
Shutdown,
/// Execute a user-initiated one-off shell command (triggered by "!cmd").
///
/// The command string is executed using the user's default shell and may
/// include shell syntax (pipes, redirects, etc.). Output is streamed via
/// `ExecCommand*` events and the UI regains control upon `TurnComplete`.
RunUserShellCommand {
/// The raw command string after '!'
command: String,
},
/// Request the list of available models.
ListModels,
}
impl Op {
pub fn kind(&self) -> &'static str {
match self {
Self::Interrupt => "interrupt",
Self::CleanBackgroundTerminals => "clean_background_terminals",
Self::RealtimeConversationStart(_) => "realtime_conversation_start",
Self::RealtimeConversationAudio(_) => "realtime_conversation_audio",
Self::RealtimeConversationText(_) => "realtime_conversation_text",
Self::RealtimeConversationClose => "realtime_conversation_close",
Self::UserInput { .. } => "user_input",
Self::UserTurn { .. } => "user_turn",
Self::OverrideTurnContext { .. } => "override_turn_context",
Self::ExecApproval { .. } => "exec_approval",
Self::PatchApproval { .. } => "patch_approval",
Self::ResolveElicitation { .. } => "resolve_elicitation",
Self::UserInputAnswer { .. } => "user_input_answer",
Self::RequestPermissionsResponse { .. } => "request_permissions_response",
Self::DynamicToolResponse { .. } => "dynamic_tool_response",
Self::AddToHistory { .. } => "add_to_history",
Self::GetHistoryEntryRequest { .. } => "get_history_entry_request",
Self::ListMcpTools => "list_mcp_tools",
Self::RefreshMcpServers { .. } => "refresh_mcp_servers",
Self::ReloadUserConfig => "reload_user_config",
Self::ListCustomPrompts => "list_custom_prompts",
Self::ListSkills { .. } => "list_skills",
Self::Compact => "compact",
Self::DropMemories => "drop_memories",
Self::UpdateMemories => "update_memories",
Self::SetThreadName { .. } => "set_thread_name",
Self::Undo => "undo",
Self::ThreadRollback { .. } => "thread_rollback",
Self::Review { .. } => "review",
Self::Shutdown => "shutdown",
Self::RunUserShellCommand { .. } => "run_user_shell_command",
Self::ListModels => "list_models",
}
}
}
/// Determines the conditions under which the user is consulted to approve
/// running the command proposed by Codex.
#[derive(
Debug,
Clone,
Copy,
Default,
PartialEq,
Eq,
Hash,
Serialize,
Deserialize,
Display,
JsonSchema,
TS,
)]
#[serde(rename_all = "kebab-case")]
#[strum(serialize_all = "kebab-case")]
pub enum AskForApproval {
/// Under this policy, only "known safe" commands—as determined by
/// `is_safe_command()`—that **only read files** are auto‑approved.
/// Everything else will ask the user to approve.
#[serde(rename = "untrusted")]
#[strum(serialize = "untrusted")]
UnlessTrusted,
/// DEPRECATED: *All* commands are auto‑approved, but they are expected to
/// run inside a sandbox where network access is disabled and writes are
/// confined to a specific set of paths. If the command fails, it will be
/// escalated to the user to approve execution without a sandbox.
/// Prefer `OnRequest` for interactive runs or `Never` for non-interactive
/// runs.
OnFailure,
/// The model decides when to ask the user for approval.
#[default]
OnRequest,
/// Fine-grained controls for individual approval flows.
///
/// When a field is `true`, commands in that category are allowed. When it
/// is `false`, those requests are automatically rejected instead of shown
/// to the user.
#[strum(serialize = "granular")]
Granular(GranularApprovalConfig),
/// Never ask the user to approve commands. Failures are immediately returned
/// to the model, and never escalated to the user for approval.
Never,
}
#[derive(Debug, Clone, Copy, PartialEq, Eq, Hash, Serialize, Deserialize, JsonSchema, TS)]
pub struct GranularApprovalConfig {
/// Whether to allow shell command approval requests, including inline
/// `with_additional_permissions` and `require_escalated` requests.
pub sandbox_approval: bool,
/// Whether to allow prompts triggered by execpolicy `prompt` rules.
pub rules: bool,
/// Whether to allow approval prompts triggered by skill script execution.
#[serde(default)]
pub skill_approval: bool,
/// Whether to allow prompts triggered by the `request_permissions` tool.
#[serde(default)]
pub request_permissions: bool,
/// Whether to allow MCP elicitation prompts.
pub mcp_elicitations: bool,
}
impl GranularApprovalConfig {
pub const fn allows_sandbox_approval(self) -> bool {
self.sandbox_approval
}
pub const fn allows_rules_approval(self) -> bool {
self.rules
}
pub const fn allows_skill_approval(self) -> bool {
self.skill_approval
}
pub const fn allows_request_permissions(self) -> bool {
self.request_permissions
}
pub const fn allows_mcp_elicitations(self) -> bool {
self.mcp_elicitations
}
}
/// Represents whether outbound network access is available to the agent.
#[derive(
Debug, Clone, Copy, PartialEq, Eq, Serialize, Deserialize, Display, Default, JsonSchema, TS,
)]
#[serde(rename_all = "kebab-case")]
#[strum(serialize_all = "kebab-case")]
pub enum NetworkAccess {
#[default]
Restricted,
Enabled,
}
impl NetworkAccess {
pub fn is_enabled(self) -> bool {
matches!(self, NetworkAccess::Enabled)
}
}
fn default_include_platform_defaults() -> bool {
true
}
/// Determines how read-only file access is granted inside a restricted
/// sandbox.
#[derive(Debug, Clone, PartialEq, Eq, Serialize, Deserialize, Display, Default, JsonSchema, TS)]
#[strum(serialize_all = "kebab-case")]
#[serde(tag = "type", rename_all = "kebab-case")]
#[ts(tag = "type")]
pub enum ReadOnlyAccess {
/// Restrict reads to an explicit set of roots.
///
/// When `include_platform_defaults` is `true`, platform defaults required
/// for basic execution are included in addition to `readable_roots`.
Restricted {
/// Include built-in platform read roots required for basic process
/// execution.
#[serde(default = "default_include_platform_defaults")]
include_platform_defaults: bool,
/// Additional absolute roots that should be readable.
#[serde(default, skip_serializing_if = "Vec::is_empty")]
readable_roots: Vec,
},
/// Allow unrestricted file reads.
#[default]
FullAccess,
}
impl ReadOnlyAccess {
pub fn has_full_disk_read_access(&self) -> bool {
matches!(self, ReadOnlyAccess::FullAccess)
}
/// Returns true if platform defaults should be included for restricted read access.
pub fn include_platform_defaults(&self) -> bool {
matches!(
self,
ReadOnlyAccess::Restricted {
include_platform_defaults: true,
..
}
)
}
/// Returns the readable roots for restricted read access.
///
/// For [`ReadOnlyAccess::FullAccess`], returns an empty list because
/// callers should grant blanket read access instead.
pub fn get_readable_roots_with_cwd(&self, cwd: &Path) -> Vec {
let mut roots: Vec = match self {
ReadOnlyAccess::FullAccess => return Vec::new(),
ReadOnlyAccess::Restricted { readable_roots, .. } => {
let mut roots = readable_roots.clone();
match AbsolutePathBuf::from_absolute_path(cwd) {
Ok(cwd_root) => roots.push(cwd_root),
Err(err) => {
error!("Ignoring invalid cwd {cwd:?} for sandbox readable root: {err}");
}
}
roots
}
};
let mut seen = HashSet::new();
roots.retain(|root| seen.insert(root.to_path_buf()));
roots
}
}
/// Determines execution restrictions for model shell commands.
#[derive(Debug, Clone, PartialEq, Eq, Serialize, Deserialize, Display, JsonSchema, TS)]
#[strum(serialize_all = "kebab-case")]
#[serde(tag = "type", rename_all = "kebab-case")]
pub enum SandboxPolicy {
/// No restrictions whatsoever. Use with caution.
#[serde(rename = "danger-full-access")]
DangerFullAccess,
/// Read-only access configuration.
#[serde(rename = "read-only")]
ReadOnly {
/// Read access granted while running under this policy.
#[serde(
default,
skip_serializing_if = "ReadOnlyAccess::has_full_disk_read_access"
)]
access: ReadOnlyAccess,
/// When set to `true`, outbound network access is allowed. `false` by
/// default.
#[serde(default, skip_serializing_if = "std::ops::Not::not")]
network_access: bool,
},
/// Indicates the process is already in an external sandbox. Allows full
/// disk access while honoring the provided network setting.
#[serde(rename = "external-sandbox")]
ExternalSandbox {
/// Whether the external sandbox permits outbound network traffic.
#[serde(default)]
network_access: NetworkAccess,
},
/// Same as `ReadOnly` but additionally grants write access to the current
/// working directory ("workspace").
#[serde(rename = "workspace-write")]
WorkspaceWrite {
/// Additional folders (beyond cwd and possibly TMPDIR) that should be
/// writable from within the sandbox.
#[serde(default, skip_serializing_if = "Vec::is_empty")]
writable_roots: Vec,
/// Read access granted while running under this policy.
#[serde(
default,
skip_serializing_if = "ReadOnlyAccess::has_full_disk_read_access"
)]
read_only_access: ReadOnlyAccess,
/// When set to `true`, outbound network access is allowed. `false` by
/// default.
#[serde(default)]
network_access: bool,
/// When set to `true`, will NOT include the per-user `TMPDIR`
/// environment variable among the default writable roots. Defaults to
/// `false`.
#[serde(default)]
exclude_tmpdir_env_var: bool,
/// When set to `true`, will NOT include the `/tmp` among the default
/// writable roots on UNIX. Defaults to `false`.
#[serde(default)]
exclude_slash_tmp: bool,
},
}
/// A writable root path accompanied by a list of subpaths that should remain
/// read‑only even when the root is writable. This is primarily used to ensure
/// that folders containing files that could be modified to escalate the
/// privileges of the agent (e.g. `.codex`, `.git`, notably `.git/hooks`) under
/// a writable root are not modified by the agent.
#[derive(Debug, Clone, PartialEq, Eq, JsonSchema)]
pub struct WritableRoot {
pub root: AbsolutePathBuf,
/// By construction, these subpaths are all under `root`.
pub read_only_subpaths: Vec,
}
impl WritableRoot {
pub fn is_path_writable(&self, path: &Path) -> bool {
// Check if the path is under the root.
if !path.starts_with(&self.root) {
return false;
}
// Check if the path is under any of the read-only subpaths.
for subpath in &self.read_only_subpaths {
if path.starts_with(subpath) {
return false;
}
}
true
}
}
impl FromStr for SandboxPolicy {
type Err = serde_json::Error;
fn from_str(s: &str) -> Result {
serde_json::from_str(s)
}
}
impl FromStr for FileSystemSandboxPolicy {
type Err = serde_json::Error;
fn from_str(s: &str) -> Result {
serde_json::from_str(s)
}
}
impl FromStr for NetworkSandboxPolicy {
type Err = serde_json::Error;
fn from_str(s: &str) -> Result {
serde_json::from_str(s)
}
}
impl SandboxPolicy {
/// Returns a policy with read-only disk access and no network.
pub fn new_read_only_policy() -> Self {
SandboxPolicy::ReadOnly {
access: ReadOnlyAccess::FullAccess,
network_access: false,
}
}
/// Returns a policy that can read the entire disk, but can only write to
/// the current working directory and the per-user tmp dir on macOS. It does
/// not allow network access.
pub fn new_workspace_write_policy() -> Self {
SandboxPolicy::WorkspaceWrite {
writable_roots: vec![],
read_only_access: ReadOnlyAccess::FullAccess,
network_access: false,
exclude_tmpdir_env_var: false,
exclude_slash_tmp: false,
}
}
pub fn has_full_disk_read_access(&self) -> bool {
match self {
SandboxPolicy::DangerFullAccess => true,
SandboxPolicy::ExternalSandbox { .. } => true,
SandboxPolicy::ReadOnly { access, .. } => access.has_full_disk_read_access(),
SandboxPolicy::WorkspaceWrite {
read_only_access, ..
} => read_only_access.has_full_disk_read_access(),
}
}
pub fn has_full_disk_write_access(&self) -> bool {
match self {
SandboxPolicy::DangerFullAccess => true,
SandboxPolicy::ExternalSandbox { .. } => true,
SandboxPolicy::ReadOnly { .. } => false,
SandboxPolicy::WorkspaceWrite { .. } => false,
}
}
pub fn has_full_network_access(&self) -> bool {
match self {
SandboxPolicy::DangerFullAccess => true,
SandboxPolicy::ExternalSandbox { network_access } => network_access.is_enabled(),
SandboxPolicy::ReadOnly { network_access, .. } => *network_access,
SandboxPolicy::WorkspaceWrite { network_access, .. } => *network_access,
}
}
/// Returns true if platform defaults should be included for restricted read access.
pub fn include_platform_defaults(&self) -> bool {
if self.has_full_disk_read_access() {
return false;
}
match self {
SandboxPolicy::ReadOnly { access, .. } => access.include_platform_defaults(),
SandboxPolicy::WorkspaceWrite {
read_only_access, ..
} => read_only_access.include_platform_defaults(),
SandboxPolicy::DangerFullAccess | SandboxPolicy::ExternalSandbox { .. } => false,
}
}
/// Returns the list of readable roots (tailored to the current working
/// directory) when read access is restricted.
///
/// For policies with full read access, this returns an empty list because
/// callers should grant blanket reads.
pub fn get_readable_roots_with_cwd(&self, cwd: &Path) -> Vec {
let mut roots = match self {
SandboxPolicy::DangerFullAccess | SandboxPolicy::ExternalSandbox { .. } => Vec::new(),
SandboxPolicy::ReadOnly { access, .. } => access.get_readable_roots_with_cwd(cwd),
SandboxPolicy::WorkspaceWrite {
read_only_access, ..
} => {
let mut roots = read_only_access.get_readable_roots_with_cwd(cwd);
roots.extend(
self.get_writable_roots_with_cwd(cwd)
.into_iter()
.map(|root| root.root),
);
roots
}
};
let mut seen = HashSet::new();
roots.retain(|root| seen.insert(root.to_path_buf()));
roots
}
/// Returns the list of writable roots (tailored to the current working
/// directory) together with subpaths that should remain read‑only under
/// each writable root.
pub fn get_writable_roots_with_cwd(&self, cwd: &Path) -> Vec {
match self {
SandboxPolicy::DangerFullAccess => Vec::new(),
SandboxPolicy::ExternalSandbox { .. } => Vec::new(),
SandboxPolicy::ReadOnly { .. } => Vec::new(),
SandboxPolicy::WorkspaceWrite {
writable_roots,
read_only_access: _,
exclude_tmpdir_env_var,
exclude_slash_tmp,
network_access: _,
} => {
// Start from explicitly configured writable roots.
let mut roots: Vec = writable_roots.clone();
// Always include defaults: cwd, /tmp (if present on Unix), and
// on macOS, the per-user TMPDIR unless explicitly excluded.
// TODO(mbolin): cwd param should be AbsolutePathBuf.
let cwd_absolute = AbsolutePathBuf::from_absolute_path(cwd);
match cwd_absolute {
Ok(cwd) => {
roots.push(cwd);
}
Err(e) => {
error!(
"Ignoring invalid cwd {:?} for sandbox writable root: {}",
cwd, e
);
}
}
// Include /tmp on Unix unless explicitly excluded.
if cfg!(unix) && !exclude_slash_tmp {
#[allow(clippy::expect_used)]
let slash_tmp =
AbsolutePathBuf::from_absolute_path("/tmp").expect("/tmp is absolute");
if slash_tmp.as_path().is_dir() {
roots.push(slash_tmp);
}
}
// Include $TMPDIR unless explicitly excluded. On macOS, TMPDIR
// is per-user, so writes to TMPDIR should not be readable by
// other users on the system.
//
// By comparison, TMPDIR is not guaranteed to be defined on
// Linux or Windows, but supporting it here gives users a way to
// provide the model with their own temporary directory without
// having to hardcode it in the config.
if !exclude_tmpdir_env_var
&& let Some(tmpdir) = std::env::var_os("TMPDIR")
&& !tmpdir.is_empty()
{
match AbsolutePathBuf::from_absolute_path(PathBuf::from(&tmpdir)) {
Ok(tmpdir_path) => {
roots.push(tmpdir_path);
}
Err(e) => {
error!(
"Ignoring invalid TMPDIR value {tmpdir:?} for sandbox writable root: {e}",
);
}
}
}
// For each root, compute subpaths that should remain read-only.
roots
.into_iter()
.map(|writable_root| WritableRoot {
read_only_subpaths: default_read_only_subpaths_for_writable_root(
&writable_root,
),
root: writable_root,
})
.collect()
}
}
}
}
fn default_read_only_subpaths_for_writable_root(
writable_root: &AbsolutePathBuf,
) -> Vec {
let mut subpaths: Vec = Vec::new();
#[allow(clippy::expect_used)]
let top_level_git = writable_root
.join(".git")
.expect(".git is a valid relative path");
// This applies to typical repos (directory .git), worktrees/submodules
// (file .git with gitdir pointer), and bare repos when the gitdir is the
// writable root itself.
let top_level_git_is_file = top_level_git.as_path().is_file();
let top_level_git_is_dir = top_level_git.as_path().is_dir();
if top_level_git_is_dir || top_level_git_is_file {
if top_level_git_is_file
&& is_git_pointer_file(&top_level_git)
&& let Some(gitdir) = resolve_gitdir_from_file(&top_level_git)
{
subpaths.push(gitdir);
}
subpaths.push(top_level_git);
}
// Make .agents/skills and .codex/config.toml and related files read-only
// to the agent, by default.
for subdir in &[".agents", ".codex"] {
#[allow(clippy::expect_used)]
let top_level_codex = writable_root.join(subdir).expect("valid relative path");
if top_level_codex.as_path().is_dir() {
subpaths.push(top_level_codex);
}
}
let mut deduped = Vec::with_capacity(subpaths.len());
let mut seen = HashSet::new();
for path in subpaths {
if seen.insert(path.to_path_buf()) {
deduped.push(path);
}
}
deduped
}
fn is_git_pointer_file(path: &AbsolutePathBuf) -> bool {
path.as_path().is_file() && path.as_path().file_name() == Some(OsStr::new(".git"))
}
fn resolve_gitdir_from_file(dot_git: &AbsolutePathBuf) -> Option {
let contents = match std::fs::read_to_string(dot_git.as_path()) {
Ok(contents) => contents,
Err(err) => {
error!(
"Failed to read {path} for gitdir pointer: {err}",
path = dot_git.as_path().display()
);
return None;
}
};
let trimmed = contents.trim();
let (_, gitdir_raw) = match trimmed.split_once(':') {
Some(parts) => parts,
None => {
error!(
"Expected {path} to contain a gitdir pointer, but it did not match `gitdir: `.",
path = dot_git.as_path().display()
);
return None;
}
};
let gitdir_raw = gitdir_raw.trim();
if gitdir_raw.is_empty() {
error!(
"Expected {path} to contain a gitdir pointer, but it was empty.",
path = dot_git.as_path().display()
);
return None;
}
let base = match dot_git.as_path().parent() {
Some(base) => base,
None => {
error!(
"Unable to resolve parent directory for {path}.",
path = dot_git.as_path().display()
);
return None;
}
};
let gitdir_path = match AbsolutePathBuf::resolve_path_against_base(gitdir_raw, base) {
Ok(path) => path,
Err(err) => {
error!(
"Failed to resolve gitdir path {gitdir_raw} from {path}: {err}",
path = dot_git.as_path().display()
);
return None;
}
};
if !gitdir_path.as_path().exists() {
error!(
"Resolved gitdir path {path} does not exist.",
path = gitdir_path.as_path().display()
);
return None;
}
Some(gitdir_path)
}
/// Event Queue Entry - events from agent
#[derive(Debug, Clone, Deserialize, Serialize)]
pub struct Event {
/// Submission `id` that this event is correlated with.
pub id: String,
/// Payload
pub msg: EventMsg,
}
/// Response event from the agent
/// NOTE: Make sure none of these values have optional types, as it will mess up the extension code-gen.
#[derive(Debug, Clone, Deserialize, Serialize, Display, JsonSchema, TS)]
#[serde(tag = "type", rename_all = "snake_case")]
#[ts(tag = "type")]
#[strum(serialize_all = "snake_case")]
pub enum EventMsg {
/// Error while executing a submission
Error(ErrorEvent),
/// Warning issued while processing a submission. Unlike `Error`, this
/// indicates the turn continued but the user should still be notified.
Warning(WarningEvent),
/// Realtime conversation lifecycle start event.
RealtimeConversationStarted(RealtimeConversationStartedEvent),
/// Realtime conversation streaming payload event.
RealtimeConversationRealtime(RealtimeConversationRealtimeEvent),
/// Realtime conversation lifecycle close event.
RealtimeConversationClosed(RealtimeConversationClosedEvent),
/// Model routing changed from the requested model to a different model.
ModelReroute(ModelRerouteEvent),
/// Conversation history was compacted (either automatically or manually).
ContextCompacted(ContextCompactedEvent),
/// Conversation history was rolled back by dropping the last N user turns.
ThreadRolledBack(ThreadRolledBackEvent),
/// Agent has started a turn.
/// v1 wire format uses `task_started`; accept `turn_started` for v2 interop.
#[serde(rename = "task_started", alias = "turn_started")]
TurnStarted(TurnStartedEvent),
/// Agent has completed all actions.
/// v1 wire format uses `task_complete`; accept `turn_complete` for v2 interop.
#[serde(rename = "task_complete", alias = "turn_complete")]
TurnComplete(TurnCompleteEvent),
/// Usage update for the current session, including totals and last turn.
/// Optional means unknown — UIs should not display when `None`.
TokenCount(TokenCountEvent),
/// Agent text output message
AgentMessage(AgentMessageEvent),
/// User/system input message (what was sent to the model)
UserMessage(UserMessageEvent),
/// Agent text output delta message
AgentMessageDelta(AgentMessageDeltaEvent),
/// Reasoning event from agent.
AgentReasoning(AgentReasoningEvent),
/// Agent reasoning delta event from agent.
AgentReasoningDelta(AgentReasoningDeltaEvent),
/// Raw chain-of-thought from agent.
AgentReasoningRawContent(AgentReasoningRawContentEvent),
/// Agent reasoning content delta event from agent.
AgentReasoningRawContentDelta(AgentReasoningRawContentDeltaEvent),
/// Signaled when the model begins a new reasoning summary section (e.g., a new titled block).
AgentReasoningSectionBreak(AgentReasoningSectionBreakEvent),
/// Ack the client's configure message.
SessionConfigured(SessionConfiguredEvent),
/// Updated session metadata (e.g., thread name changes).
ThreadNameUpdated(ThreadNameUpdatedEvent),
/// Incremental MCP startup progress updates.
McpStartupUpdate(McpStartupUpdateEvent),
/// Aggregate MCP startup completion summary.
McpStartupComplete(McpStartupCompleteEvent),
McpToolCallBegin(McpToolCallBeginEvent),
McpToolCallEnd(McpToolCallEndEvent),
WebSearchBegin(WebSearchBeginEvent),
WebSearchEnd(WebSearchEndEvent),
ImageGenerationBegin(ImageGenerationBeginEvent),
ImageGenerationEnd(ImageGenerationEndEvent),
/// Notification that the server is about to execute a command.
ExecCommandBegin(ExecCommandBeginEvent),
/// Incremental chunk of output from a running command.
ExecCommandOutputDelta(ExecCommandOutputDeltaEvent),
/// Terminal interaction for an in-progress command (stdin sent and stdout observed).
TerminalInteraction(TerminalInteractionEvent),
ExecCommandEnd(ExecCommandEndEvent),
/// Notification that the agent attached a local image via the view_image tool.
ViewImageToolCall(ViewImageToolCallEvent),
ExecApprovalRequest(ExecApprovalRequestEvent),
RequestPermissions(RequestPermissionsEvent),
RequestUserInput(RequestUserInputEvent),
DynamicToolCallRequest(DynamicToolCallRequest),
DynamicToolCallResponse(DynamicToolCallResponseEvent),
ElicitationRequest(ElicitationRequestEvent),
ApplyPatchApprovalRequest(ApplyPatchApprovalRequestEvent),
/// Structured lifecycle event for a guardian-reviewed approval request.
GuardianAssessment(GuardianAssessmentEvent),
/// Notification advising the user that something they are using has been
/// deprecated and should be phased out.
DeprecationNotice(DeprecationNoticeEvent),
BackgroundEvent(BackgroundEventEvent),
UndoStarted(UndoStartedEvent),
UndoCompleted(UndoCompletedEvent),
/// Notification that a model stream experienced an error or disconnect
/// and the system is handling it (e.g., retrying with backoff).
StreamError(StreamErrorEvent),
/// Notification that the agent is about to apply a code patch. Mirrors
/// `ExecCommandBegin` so front‑ends can show progress indicators.
PatchApplyBegin(PatchApplyBeginEvent),
/// Notification that a patch application has finished.
PatchApplyEnd(PatchApplyEndEvent),
TurnDiff(TurnDiffEvent),
/// Response to GetHistoryEntryRequest.
GetHistoryEntryResponse(GetHistoryEntryResponseEvent),
/// List of MCP tools available to the agent.
McpListToolsResponse(McpListToolsResponseEvent),
/// List of custom prompts available to the agent.
ListCustomPromptsResponse(ListCustomPromptsResponseEvent),
/// List of skills available to the agent.
ListSkillsResponse(ListSkillsResponseEvent),
/// Notification that skill data may have been updated and clients may want to reload.
SkillsUpdateAvailable,
PlanUpdate(UpdatePlanArgs),
TurnAborted(TurnAbortedEvent),
/// Notification that the agent is shutting down.
ShutdownComplete,
/// Entered review mode.
EnteredReviewMode(ReviewRequest),
/// Exited review mode with an optional final result to apply.
ExitedReviewMode(ExitedReviewModeEvent),
RawResponseItem(RawResponseItemEvent),
ItemStarted(ItemStartedEvent),
ItemCompleted(ItemCompletedEvent),
HookStarted(HookStartedEvent),
HookCompleted(HookCompletedEvent),
AgentMessageContentDelta(AgentMessageContentDeltaEvent),
PlanDelta(PlanDeltaEvent),
ReasoningContentDelta(ReasoningContentDeltaEvent),
ReasoningRawContentDelta(ReasoningRawContentDeltaEvent),
/// Collab interaction: agent spawn begin.
CollabAgentSpawnBegin(CollabAgentSpawnBeginEvent),
/// Collab interaction: agent spawn end.
CollabAgentSpawnEnd(CollabAgentSpawnEndEvent),
/// Collab interaction: agent interaction begin.
CollabAgentInteractionBegin(CollabAgentInteractionBeginEvent),
/// Collab interaction: agent interaction end.
CollabAgentInteractionEnd(CollabAgentInteractionEndEvent),
/// Collab interaction: waiting begin.
CollabWaitingBegin(CollabWaitingBeginEvent),
/// Collab interaction: waiting end.
CollabWaitingEnd(CollabWaitingEndEvent),
/// Collab interaction: close begin.
CollabCloseBegin(CollabCloseBeginEvent),
/// Collab interaction: close end.
CollabCloseEnd(CollabCloseEndEvent),
/// Collab interaction: resume begin.
CollabResumeBegin(CollabResumeBeginEvent),
/// Collab interaction: resume end.
CollabResumeEnd(CollabResumeEndEvent),
}
#[derive(Debug, Clone, Copy, Deserialize, Serialize, PartialEq, Eq, JsonSchema, TS)]
#[serde(rename_all = "snake_case")]
pub enum HookEventName {
SessionStart,
UserPromptSubmit,
Stop,
}
#[derive(Debug, Clone, Copy, Deserialize, Serialize, PartialEq, Eq, JsonSchema, TS)]
#[serde(rename_all = "snake_case")]
pub enum HookHandlerType {
Command,
Prompt,
Agent,
}
#[derive(Debug, Clone, Copy, Deserialize, Serialize, PartialEq, Eq, JsonSchema, TS)]
#[serde(rename_all = "snake_case")]
pub enum HookExecutionMode {
Sync,
Async,
}
#[derive(Debug, Clone, Copy, Deserialize, Serialize, PartialEq, Eq, JsonSchema, TS)]
#[serde(rename_all = "snake_case")]
pub enum HookScope {
Thread,
Turn,
}
#[derive(Debug, Clone, Copy, Deserialize, Serialize, PartialEq, Eq, JsonSchema, TS)]
#[serde(rename_all = "snake_case")]
pub enum HookRunStatus {
Running,
Completed,
Failed,
Blocked,
Stopped,
}
#[derive(Debug, Clone, Copy, Deserialize, Serialize, PartialEq, Eq, JsonSchema, TS)]
#[serde(rename_all = "snake_case")]
pub enum HookOutputEntryKind {
Warning,
Stop,
Feedback,
Context,
Error,
}
#[derive(Debug, Clone, Deserialize, Serialize, PartialEq, Eq, JsonSchema, TS)]
#[serde(rename_all = "snake_case")]
pub struct HookOutputEntry {
pub kind: HookOutputEntryKind,
pub text: String,
}
#[derive(Debug, Clone, Deserialize, Serialize, PartialEq, Eq, JsonSchema, TS)]
#[serde(rename_all = "snake_case")]
pub struct HookRunSummary {
pub id: String,
pub event_name: HookEventName,
pub handler_type: HookHandlerType,
pub execution_mode: HookExecutionMode,
pub scope: HookScope,
pub source_path: PathBuf,
pub display_order: i64,
pub status: HookRunStatus,
pub status_message: Option,
#[ts(type = "number")]
pub started_at: i64,
#[ts(type = "number | null")]
pub completed_at: Option,
#[ts(type = "number | null")]
pub duration_ms: Option,
pub entries: Vec,
}
#[derive(Debug, Clone, Deserialize, Serialize, PartialEq, Eq, JsonSchema, TS)]
#[serde(rename_all = "snake_case")]
pub struct HookStartedEvent {
pub turn_id: Option,
pub run: HookRunSummary,
}
#[derive(Debug, Clone, Deserialize, Serialize, PartialEq, Eq, JsonSchema, TS)]
#[serde(rename_all = "snake_case")]
pub struct HookCompletedEvent {
pub turn_id: Option,
pub run: HookRunSummary,
}
#[derive(Debug, Clone, Copy, Default, Deserialize, Serialize, PartialEq, Eq, JsonSchema, TS)]
#[serde(rename_all = "snake_case")]
pub enum RealtimeConversationVersion {
#[default]
V1,
V2,
}
#[derive(Debug, Clone, Deserialize, Serialize, PartialEq, JsonSchema, TS)]
pub struct RealtimeConversationStartedEvent {
pub session_id: Option,
pub version: RealtimeConversationVersion,
}
#[derive(Debug, Clone, Deserialize, Serialize, PartialEq, JsonSchema, TS)]
pub struct RealtimeConversationRealtimeEvent {
pub payload: RealtimeEvent,
}
#[derive(Debug, Clone, Deserialize, Serialize, PartialEq, JsonSchema, TS)]
pub struct RealtimeConversationClosedEvent {
#[serde(skip_serializing_if = "Option::is_none")]
pub reason: Option,
}
impl From for EventMsg {
fn from(event: CollabAgentSpawnBeginEvent) -> Self {
EventMsg::CollabAgentSpawnBegin(event)
}
}
impl From for EventMsg {
fn from(event: CollabAgentSpawnEndEvent) -> Self {
EventMsg::CollabAgentSpawnEnd(event)
}
}
impl From for EventMsg {
fn from(event: CollabAgentInteractionBeginEvent) -> Self {
EventMsg::CollabAgentInteractionBegin(event)
}
}
impl From for EventMsg {
fn from(event: CollabAgentInteractionEndEvent) -> Self {
EventMsg::CollabAgentInteractionEnd(event)
}
}
impl From for EventMsg {
fn from(event: CollabWaitingBeginEvent) -> Self {
EventMsg::CollabWaitingBegin(event)
}
}
impl From for EventMsg {
fn from(event: CollabWaitingEndEvent) -> Self {
EventMsg::CollabWaitingEnd(event)
}
}
impl From for EventMsg {
fn from(event: CollabCloseBeginEvent) -> Self {
EventMsg::CollabCloseBegin(event)
}
}
impl From for EventMsg {
fn from(event: CollabCloseEndEvent) -> Self {
EventMsg::CollabCloseEnd(event)
}
}
impl From for EventMsg {
fn from(event: CollabResumeBeginEvent) -> Self {
EventMsg::CollabResumeBegin(event)
}
}
impl From for EventMsg {
fn from(event: CollabResumeEndEvent) -> Self {
EventMsg::CollabResumeEnd(event)
}
}
/// Agent lifecycle status, derived from emitted events.
#[derive(Debug, Clone, Deserialize, Serialize, PartialEq, Eq, JsonSchema, TS, Default)]
#[serde(rename_all = "snake_case")]
#[ts(rename_all = "snake_case")]
pub enum AgentStatus {
/// Agent is waiting for initialization.
#[default]
PendingInit,
/// Agent is currently running.
Running,
/// Agent's current turn was interrupted and it may receive more input.
Interrupted,
/// Agent is done. Contains the final assistant message.
Completed(Option),
/// Agent encountered an error.
Errored(String),
/// Agent has been shutdown.
Shutdown,
/// Agent is not found.
NotFound,
}
/// Codex errors that we expose to clients.
#[derive(Serialize, Deserialize, Clone, Debug, PartialEq, Eq, JsonSchema, TS)]
#[serde(rename_all = "snake_case")]
#[ts(rename_all = "snake_case")]
pub enum CodexErrorInfo {
ContextWindowExceeded,
UsageLimitExceeded,
ServerOverloaded,
HttpConnectionFailed {
http_status_code: Option,
},
/// Failed to connect to the response SSE stream.
ResponseStreamConnectionFailed {
http_status_code: Option,
},
InternalServerError,
Unauthorized,
BadRequest,
SandboxError,
/// The response SSE stream disconnected in the middle of a turnbefore completion.
ResponseStreamDisconnected {
http_status_code: Option,
},
/// Reached the retry limit for responses.
ResponseTooManyFailedAttempts {
http_status_code: Option,
},
ThreadRollbackFailed,
Other,
}
impl CodexErrorInfo {
/// Whether this error should mark the current turn as failed when replaying history.
pub fn affects_turn_status(&self) -> bool {
match self {
Self::ThreadRollbackFailed => false,
Self::ContextWindowExceeded
| Self::UsageLimitExceeded
| Self::ServerOverloaded
| Self::HttpConnectionFailed { .. }
| Self::ResponseStreamConnectionFailed { .. }
| Self::InternalServerError
| Self::Unauthorized
| Self::BadRequest
| Self::SandboxError
| Self::ResponseStreamDisconnected { .. }
| Self::ResponseTooManyFailedAttempts { .. }
| Self::Other => true,
}
}
}
#[derive(Debug, Clone, Deserialize, Serialize, TS, JsonSchema)]
pub struct RawResponseItemEvent {
pub item: ResponseItem,
}
#[derive(Debug, Clone, Deserialize, Serialize, TS, JsonSchema)]
pub struct ItemStartedEvent {
pub thread_id: ThreadId,
pub turn_id: String,
pub item: TurnItem,
}
impl HasLegacyEvent for ItemStartedEvent {
fn as_legacy_events(&self, _: bool) -> Vec {
match &self.item {
TurnItem::WebSearch(item) => vec![EventMsg::WebSearchBegin(WebSearchBeginEvent {
call_id: item.id.clone(),
})],
TurnItem::ImageGeneration(item) => {
vec![EventMsg::ImageGenerationBegin(ImageGenerationBeginEvent {
call_id: item.id.clone(),
})]
}
_ => Vec::new(),
}
}
}
#[derive(Debug, Clone, Deserialize, Serialize, TS, JsonSchema)]
pub struct ItemCompletedEvent {
pub thread_id: ThreadId,
pub turn_id: String,
pub item: TurnItem,
}
pub trait HasLegacyEvent {
fn as_legacy_events(&self, show_raw_agent_reasoning: bool) -> Vec;
}
impl HasLegacyEvent for ItemCompletedEvent {
fn as_legacy_events(&self, show_raw_agent_reasoning: bool) -> Vec {
self.item.as_legacy_events(show_raw_agent_reasoning)
}
}
#[derive(Debug, Clone, Deserialize, Serialize, TS, JsonSchema)]
pub struct AgentMessageContentDeltaEvent {
pub thread_id: String,
pub turn_id: String,
pub item_id: String,
pub delta: String,
}
impl HasLegacyEvent for AgentMessageContentDeltaEvent {
fn as_legacy_events(&self, _: bool) -> Vec {
vec![EventMsg::AgentMessageDelta(AgentMessageDeltaEvent {
delta: self.delta.clone(),
})]
}
}
#[derive(Debug, Clone, Deserialize, Serialize, TS, JsonSchema)]
pub struct PlanDeltaEvent {
pub thread_id: String,
pub turn_id: String,
pub item_id: String,
pub delta: String,
}
#[derive(Debug, Clone, Deserialize, Serialize, TS, JsonSchema)]
pub struct ReasoningContentDeltaEvent {
pub thread_id: String,
pub turn_id: String,
pub item_id: String,
pub delta: String,
// load with default value so it's backward compatible with the old format.
#[serde(default)]
pub summary_index: i64,
}
impl HasLegacyEvent for ReasoningContentDeltaEvent {
fn as_legacy_events(&self, _: bool) -> Vec {
vec![EventMsg::AgentReasoningDelta(AgentReasoningDeltaEvent {
delta: self.delta.clone(),
})]
}
}
#[derive(Debug, Clone, Deserialize, Serialize, TS, JsonSchema)]
pub struct ReasoningRawContentDeltaEvent {
pub thread_id: String,
pub turn_id: String,
pub item_id: String,
pub delta: String,
// load with default value so it's backward compatible with the old format.
#[serde(default)]
pub content_index: i64,
}
impl HasLegacyEvent for ReasoningRawContentDeltaEvent {
fn as_legacy_events(&self, _: bool) -> Vec {
vec![EventMsg::AgentReasoningRawContentDelta(
AgentReasoningRawContentDeltaEvent {
delta: self.delta.clone(),
},
)]
}
}
impl HasLegacyEvent for EventMsg {
fn as_legacy_events(&self, show_raw_agent_reasoning: bool) -> Vec {
match self {
EventMsg::ItemStarted(event) => event.as_legacy_events(show_raw_agent_reasoning),
EventMsg::ItemCompleted(event) => event.as_legacy_events(show_raw_agent_reasoning),
EventMsg::AgentMessageContentDelta(event) => {
event.as_legacy_events(show_raw_agent_reasoning)
}
EventMsg::ReasoningContentDelta(event) => {
event.as_legacy_events(show_raw_agent_reasoning)
}
EventMsg::ReasoningRawContentDelta(event) => {
event.as_legacy_events(show_raw_agent_reasoning)
}
_ => Vec::new(),
}
}
}
#[derive(Debug, Clone, Deserialize, Serialize, JsonSchema, TS)]
pub struct ExitedReviewModeEvent {
pub review_output: Option,
}
// Individual event payload types matching each `EventMsg` variant.
#[derive(Debug, Clone, Deserialize, Serialize, JsonSchema, TS)]
pub struct ErrorEvent {
pub message: String,
#[serde(default)]
pub codex_error_info: Option,
}
impl ErrorEvent {
/// Whether this error should mark the current turn as failed when replaying history.
pub fn affects_turn_status(&self) -> bool {
self.codex_error_info
.as_ref()
.is_none_or(CodexErrorInfo::affects_turn_status)
}
}
#[derive(Debug, Clone, Deserialize, Serialize, JsonSchema, TS)]
pub struct WarningEvent {
pub message: String,
}
#[derive(Debug, Clone, Deserialize, Serialize, PartialEq, Eq, JsonSchema, TS)]
#[serde(rename_all = "snake_case")]
#[ts(rename_all = "snake_case")]
pub enum ModelRerouteReason {
HighRiskCyberActivity,
}
#[derive(Debug, Clone, Deserialize, Serialize, PartialEq, Eq, JsonSchema, TS)]
pub struct ModelRerouteEvent {
pub from_model: String,
pub to_model: String,
pub reason: ModelRerouteReason,
}
#[derive(Debug, Clone, Deserialize, Serialize, JsonSchema, TS)]
pub struct ContextCompactedEvent;
#[derive(Debug, Clone, Deserialize, Serialize, JsonSchema, TS)]
pub struct TurnCompleteEvent {
pub turn_id: String,
pub last_agent_message: Option,
}
#[derive(Debug, Clone, Deserialize, Serialize, JsonSchema, TS)]
pub struct TurnStartedEvent {
pub turn_id: String,
// TODO(aibrahim): make this not optional
pub model_context_window: Option,
#[serde(default)]
pub collaboration_mode_kind: ModeKind,
}
#[derive(Debug, Clone, Deserialize, Serialize, Default, PartialEq, Eq, JsonSchema, TS)]
pub struct TokenUsage {
#[ts(type = "number")]
pub input_tokens: i64,
#[ts(type = "number")]
pub cached_input_tokens: i64,
#[ts(type = "number")]
pub output_tokens: i64,
#[ts(type = "number")]
pub reasoning_output_tokens: i64,
#[ts(type = "number")]
pub total_tokens: i64,
}
#[derive(Debug, Clone, Deserialize, Serialize, PartialEq, Eq, JsonSchema, TS)]
pub struct TokenUsageInfo {
pub total_token_usage: TokenUsage,
pub last_token_usage: TokenUsage,
// TODO(aibrahim): make this not optional
#[ts(type = "number | null")]
pub model_context_window: Option,
}
impl TokenUsageInfo {
pub fn new_or_append(
info: &Option,
last: &Option,
model_context_window: Option,
) -> Option {
if info.is_none() && last.is_none() {
return None;
}
let mut info = match info {
Some(info) => info.clone(),
None => Self {
total_token_usage: TokenUsage::default(),
last_token_usage: TokenUsage::default(),
model_context_window,
},
};
if let Some(last) = last {
info.append_last_usage(last);
}
if let Some(model_context_window) = model_context_window {
info.model_context_window = Some(model_context_window);
}
Some(info)
}
pub fn append_last_usage(&mut self, last: &TokenUsage) {
self.total_token_usage.add_assign(last);
self.last_token_usage = last.clone();
}
pub fn fill_to_context_window(&mut self, context_window: i64) {
let previous_total = self.total_token_usage.total_tokens;
let delta = (context_window - previous_total).max(0);
self.model_context_window = Some(context_window);
self.total_token_usage = TokenUsage {
total_tokens: context_window,
..TokenUsage::default()
};
self.last_token_usage = TokenUsage {
total_tokens: delta,
..TokenUsage::default()
};
}
pub fn full_context_window(context_window: i64) -> Self {
let mut info = Self {
total_token_usage: TokenUsage::default(),
last_token_usage: TokenUsage::default(),
model_context_window: Some(context_window),
};
info.fill_to_context_window(context_window);
info
}
}
#[derive(Debug, Clone, Deserialize, Serialize, JsonSchema, TS)]
pub struct TokenCountEvent {
pub info: Option,
pub rate_limits: Option,
}
#[derive(Debug, Clone, PartialEq, Deserialize, Serialize, JsonSchema, TS)]
pub struct RateLimitSnapshot {
pub limit_id: Option,
pub limit_name: Option,
pub primary: Option,
pub secondary: Option,
pub credits: Option,
pub plan_type: Option,
}
#[derive(Debug, Clone, PartialEq, Deserialize, Serialize, JsonSchema, TS)]
pub struct RateLimitWindow {
/// Percentage (0-100) of the window that has been consumed.
pub used_percent: f64,
/// Rolling window duration, in minutes.
#[ts(type = "number | null")]
pub window_minutes: Option,
/// Unix timestamp (seconds since epoch) when the window resets.
#[ts(type = "number | null")]
pub resets_at: Option,
}
#[derive(Debug, Clone, PartialEq, Deserialize, Serialize, JsonSchema, TS)]
pub struct CreditsSnapshot {
pub has_credits: bool,
pub unlimited: bool,
pub balance: Option,
}
// Includes prompts, tools and space to call compact.
const BASELINE_TOKENS: i64 = 12000;
impl TokenUsage {
pub fn is_zero(&self) -> bool {
self.total_tokens == 0
}
pub fn cached_input(&self) -> i64 {
self.cached_input_tokens.max(0)
}
pub fn non_cached_input(&self) -> i64 {
(self.input_tokens - self.cached_input()).max(0)
}
/// Primary count for display as a single absolute value: non-cached input + output.
pub fn blended_total(&self) -> i64 {
(self.non_cached_input() + self.output_tokens.max(0)).max(0)
}
pub fn tokens_in_context_window(&self) -> i64 {
self.total_tokens
}
/// Estimate the remaining user-controllable percentage of the model's context window.
///
/// `context_window` is the total size of the model's context window.
/// `BASELINE_TOKENS` should capture tokens that are always present in
/// the context (e.g., system prompt and fixed tool instructions) so that
/// the percentage reflects the portion the user can influence.
///
/// This normalizes both the numerator and denominator by subtracting the
/// baseline, so immediately after the first prompt the UI shows 100% left
/// and trends toward 0% as the user fills the effective window.
pub fn percent_of_context_window_remaining(&self, context_window: i64) -> i64 {
if context_window <= BASELINE_TOKENS {
return 0;
}
let effective_window = context_window - BASELINE_TOKENS;
let used = (self.tokens_in_context_window() - BASELINE_TOKENS).max(0);
let remaining = (effective_window - used).max(0);
((remaining as f64 / effective_window as f64) * 100.0)
.clamp(0.0, 100.0)
.round() as i64
}
/// In-place element-wise sum of token counts.
pub fn add_assign(&mut self, other: &TokenUsage) {
self.input_tokens += other.input_tokens;
self.cached_input_tokens += other.cached_input_tokens;
self.output_tokens += other.output_tokens;
self.reasoning_output_tokens += other.reasoning_output_tokens;
self.total_tokens += other.total_tokens;
}
}
#[derive(Debug, Clone, Deserialize, Serialize, JsonSchema)]
pub struct FinalOutput {
pub token_usage: TokenUsage,
}
impl From for FinalOutput {
fn from(token_usage: TokenUsage) -> Self {
Self { token_usage }
}
}
impl fmt::Display for FinalOutput {
fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
let token_usage = &self.token_usage;
write!(
f,
"Token usage: total={} input={}{} output={}{}",
format_with_separators(token_usage.blended_total()),
format_with_separators(token_usage.non_cached_input()),
if token_usage.cached_input() > 0 {
format!(
" (+ {} cached)",
format_with_separators(token_usage.cached_input())
)
} else {
String::new()
},
format_with_separators(token_usage.output_tokens),
if token_usage.reasoning_output_tokens > 0 {
format!(
" (reasoning {})",
format_with_separators(token_usage.reasoning_output_tokens)
)
} else {
String::new()
}
)
}
}
#[derive(Debug, Clone, Deserialize, Serialize, JsonSchema, TS)]
pub struct AgentMessageEvent {
pub message: String,
#[serde(default)]
pub phase: Option,
#[serde(default)]
pub memory_citation: Option,
}
#[derive(Debug, Clone, Deserialize, Serialize, JsonSchema, TS)]
pub struct UserMessageEvent {
pub message: String,
/// Image URLs sourced from `UserInput::Image`. These are safe
/// to replay in legacy UI history events and correspond to images sent to
/// the model.
#[serde(skip_serializing_if = "Option::is_none")]
pub images: Option>,
/// Local file paths sourced from `UserInput::LocalImage`. These are kept so
/// the UI can reattach images when editing history, and should not be sent
/// to the model or treated as API-ready URLs.
#[serde(default)]
pub local_images: Vec,
/// UI-defined spans within `message` used to render or persist special elements.
#[serde(default)]
pub text_elements: Vec,
}
#[derive(Debug, Clone, Deserialize, Serialize, JsonSchema, TS)]
pub struct AgentMessageDeltaEvent {
pub delta: String,
}
#[derive(Debug, Clone, Deserialize, Serialize, JsonSchema, TS)]
pub struct AgentReasoningEvent {
pub text: String,
}
#[derive(Debug, Clone, Deserialize, Serialize, JsonSchema, TS)]
pub struct AgentReasoningRawContentEvent {
pub text: String,
}
#[derive(Debug, Clone, Deserialize, Serialize, JsonSchema, TS)]
pub struct AgentReasoningRawContentDeltaEvent {
pub delta: String,
}
#[derive(Debug, Clone, Deserialize, Serialize, JsonSchema, TS)]
pub struct AgentReasoningSectionBreakEvent {
// load with default value so it's backward compatible with the old format.
#[serde(default)]
pub item_id: String,
#[serde(default)]
pub summary_index: i64,
}
#[derive(Debug, Clone, Deserialize, Serialize, JsonSchema, TS)]
pub struct AgentReasoningDeltaEvent {
pub delta: String,
}
#[derive(Debug, Clone, Deserialize, Serialize, JsonSchema, TS, PartialEq)]
pub struct McpInvocation {
/// Name of the MCP server as defined in the config.
pub server: String,
/// Name of the tool as given by the MCP server.
pub tool: String,
/// Arguments to the tool call.
pub arguments: Option,
}
#[derive(Debug, Clone, Deserialize, Serialize, JsonSchema, TS, PartialEq)]
pub struct McpToolCallBeginEvent {
/// Identifier so this can be paired with the McpToolCallEnd event.
pub call_id: String,
pub invocation: McpInvocation,
}
#[derive(Debug, Clone, Deserialize, Serialize, JsonSchema, TS, PartialEq)]
pub struct McpToolCallEndEvent {
/// Identifier for the corresponding McpToolCallBegin that finished.
pub call_id: String,
pub invocation: McpInvocation,
#[ts(type = "string")]
pub duration: Duration,
/// Result of the tool call. Note this could be an error.
pub result: Result,
}
#[derive(Debug, Clone, Deserialize, Serialize, JsonSchema, TS, PartialEq)]
pub struct DynamicToolCallResponseEvent {
/// Identifier for the corresponding DynamicToolCallRequest.
pub call_id: String,
/// Turn ID that this dynamic tool call belongs to.
pub turn_id: String,
/// Dynamic tool name.
pub tool: String,
/// Dynamic tool call arguments.
pub arguments: serde_json::Value,
/// Dynamic tool response content items.
pub content_items: Vec,
/// Whether the tool call succeeded.
pub success: bool,
/// Optional error text when the tool call failed before producing a response.
pub error: Option,
/// The duration of the dynamic tool call.
#[ts(type = "string")]
pub duration: Duration,
}
impl McpToolCallEndEvent {
pub fn is_success(&self) -> bool {
match &self.result {
Ok(result) => !result.is_error.unwrap_or(false),
Err(_) => false,
}
}
}
#[derive(Debug, Clone, Deserialize, Serialize, JsonSchema, TS)]
pub struct WebSearchBeginEvent {
pub call_id: String,
}
#[derive(Debug, Clone, Deserialize, Serialize, JsonSchema, TS)]
pub struct WebSearchEndEvent {
pub call_id: String,
pub query: String,
pub action: WebSearchAction,
}
#[derive(Debug, Clone, Deserialize, Serialize, JsonSchema, TS)]
pub struct ImageGenerationBeginEvent {
pub call_id: String,
}
#[derive(Debug, Clone, Deserialize, Serialize, JsonSchema, TS)]
pub struct ImageGenerationEndEvent {
pub call_id: String,
pub status: String,
#[serde(skip_serializing_if = "Option::is_none")]
#[ts(optional)]
pub revised_prompt: Option,
pub result: String,
#[serde(skip_serializing_if = "Option::is_none")]
#[ts(optional)]
pub saved_path: Option,
}
// Conversation kept for backward compatibility.
/// Response payload for `Op::GetHistory` containing the current session's
/// in-memory transcript.
#[derive(Debug, Clone, Deserialize, Serialize, JsonSchema, TS)]
pub struct ConversationPathResponseEvent {
pub conversation_id: ThreadId,
pub path: PathBuf,
}
#[derive(Debug, Clone, Deserialize, Serialize, JsonSchema, TS)]
pub struct ResumedHistory {
pub conversation_id: ThreadId,
pub history: Vec,
pub rollout_path: PathBuf,
}
#[derive(Debug, Clone, Deserialize, Serialize, JsonSchema, TS)]
pub enum InitialHistory {
New,
Resumed(ResumedHistory),
Forked(Vec),
}
impl InitialHistory {
pub fn forked_from_id(&self) -> Option {
match self {
InitialHistory::New => None,
InitialHistory::Resumed(resumed) => {
resumed.history.iter().find_map(|item| match item {
RolloutItem::SessionMeta(meta_line) => meta_line.meta.forked_from_id,
_ => None,
})
}
InitialHistory::Forked(items) => items.iter().find_map(|item| match item {
RolloutItem::SessionMeta(meta_line) => Some(meta_line.meta.id),
_ => None,
}),
}
}
pub fn session_cwd(&self) -> Option {
match self {
InitialHistory::New => None,
InitialHistory::Resumed(resumed) => session_cwd_from_items(&resumed.history),
InitialHistory::Forked(items) => session_cwd_from_items(items),
}
}
pub fn get_rollout_items(&self) -> Vec {
match self {
InitialHistory::New => Vec::new(),
InitialHistory::Resumed(resumed) => resumed.history.clone(),
InitialHistory::Forked(items) => items.clone(),
}
}
pub fn get_event_msgs(&self) -> Option> {
match self {
InitialHistory::New => None,
InitialHistory::Resumed(resumed) => Some(
resumed
.history
.iter()
.filter_map(|ri| match ri {
RolloutItem::EventMsg(ev) => Some(ev.clone()),
_ => None,
})
.collect(),
),
InitialHistory::Forked(items) => Some(
items
.iter()
.filter_map(|ri| match ri {
RolloutItem::EventMsg(ev) => Some(ev.clone()),
_ => None,
})
.collect(),
),
}
}
pub fn get_base_instructions(&self) -> Option {
// TODO: SessionMeta should (in theory) always be first in the history, so we can probably only check the first item?
match self {
InitialHistory::New => None,
InitialHistory::Resumed(resumed) => {
resumed.history.iter().find_map(|item| match item {
RolloutItem::SessionMeta(meta_line) => meta_line.meta.base_instructions.clone(),
_ => None,
})
}
InitialHistory::Forked(items) => items.iter().find_map(|item| match item {
RolloutItem::SessionMeta(meta_line) => meta_line.meta.base_instructions.clone(),
_ => None,
}),
}
}
pub fn get_dynamic_tools(&self) -> Option> {
match self {
InitialHistory::New => None,
InitialHistory::Resumed(resumed) => {
resumed.history.iter().find_map(|item| match item {
RolloutItem::SessionMeta(meta_line) => meta_line.meta.dynamic_tools.clone(),
_ => None,
})
}
InitialHistory::Forked(items) => items.iter().find_map(|item| match item {
RolloutItem::SessionMeta(meta_line) => meta_line.meta.dynamic_tools.clone(),
_ => None,
}),
}
}
}
fn session_cwd_from_items(items: &[RolloutItem]) -> Option {
items.iter().find_map(|item| match item {
RolloutItem::SessionMeta(meta_line) => Some(meta_line.meta.cwd.clone()),
_ => None,
})
}
#[derive(Serialize, Deserialize, Clone, Debug, PartialEq, Eq, JsonSchema, TS, Default)]
#[serde(rename_all = "lowercase")]
#[ts(rename_all = "lowercase")]
pub enum SessionSource {
Cli,
#[default]
VSCode,
Exec,
Mcp,
Custom(String),
SubAgent(SubAgentSource),
#[serde(other)]
Unknown,
}
#[derive(Serialize, Deserialize, Clone, Debug, PartialEq, Eq, JsonSchema, TS)]
#[serde(rename_all = "snake_case")]
#[ts(rename_all = "snake_case")]
pub enum SubAgentSource {
Review,
Compact,
ThreadSpawn {
parent_thread_id: ThreadId,
depth: i32,
#[serde(default)]
agent_nickname: Option,
#[serde(default, alias = "agent_type")]
agent_role: Option,
},
MemoryConsolidation,
Other(String),
}
impl fmt::Display for SessionSource {
fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
match self {
SessionSource::Cli => f.write_str("cli"),
SessionSource::VSCode => f.write_str("vscode"),
SessionSource::Exec => f.write_str("exec"),
SessionSource::Mcp => f.write_str("mcp"),
SessionSource::Custom(source) => f.write_str(source),
SessionSource::SubAgent(sub_source) => write!(f, "subagent_{sub_source}"),
SessionSource::Unknown => f.write_str("unknown"),
}
}
}
impl SessionSource {
pub fn from_startup_arg(value: &str) -> Result {
let trimmed = value.trim();
if trimmed.is_empty() {
return Err("session source must not be empty");
}
let normalized = trimmed.to_ascii_lowercase();
Ok(match normalized.as_str() {
"cli" => SessionSource::Cli,
"vscode" => SessionSource::VSCode,
"exec" => SessionSource::Exec,
"mcp" | "appserver" | "app-server" | "app_server" => SessionSource::Mcp,
"unknown" => SessionSource::Unknown,
_ => SessionSource::Custom(normalized),
})
}
pub fn get_nickname(&self) -> Option {
match self {
SessionSource::SubAgent(SubAgentSource::ThreadSpawn { agent_nickname, .. }) => {
agent_nickname.clone()
}
SessionSource::SubAgent(SubAgentSource::MemoryConsolidation) => {
Some("Morpheus".to_string())
}
_ => None,
}
}
pub fn get_agent_role(&self) -> Option {
match self {
SessionSource::SubAgent(SubAgentSource::ThreadSpawn { agent_role, .. }) => {
agent_role.clone()
}
SessionSource::SubAgent(SubAgentSource::MemoryConsolidation) => {
Some("memory builder".to_string())
}
_ => None,
}
}
pub fn restriction_product(&self) -> Option {
match self {
SessionSource::Custom(source) => Product::from_session_source_name(source),
SessionSource::Cli
| SessionSource::VSCode
| SessionSource::Exec
| SessionSource::Mcp
| SessionSource::Unknown => Some(Product::Codex),
SessionSource::SubAgent(_) => None,
}
}
pub fn matches_product_restriction(&self, products: &[Product]) -> bool {
products.is_empty()
|| self
.restriction_product()
.is_some_and(|product| product.matches_product_restriction(products))
}
}
impl fmt::Display for SubAgentSource {
fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
match self {
SubAgentSource::Review => f.write_str("review"),
SubAgentSource::Compact => f.write_str("compact"),
SubAgentSource::MemoryConsolidation => f.write_str("memory_consolidation"),
SubAgentSource::ThreadSpawn {
parent_thread_id,
depth,
..
} => {
write!(f, "thread_spawn_{parent_thread_id}_d{depth}")
}
SubAgentSource::Other(other) => f.write_str(other),
}
}
}
/// SessionMeta contains session-level data that doesn't correspond to a specific turn.
///
/// NOTE: There used to be an `instructions` field here, which stored user_instructions, but we
/// now save that on TurnContext. base_instructions stores the base instructions for the session,
/// and should be used when there is no config override.
#[derive(Serialize, Deserialize, Clone, Debug, JsonSchema, TS)]
pub struct SessionMeta {
pub id: ThreadId,
#[serde(skip_serializing_if = "Option::is_none")]
pub forked_from_id: Option,
pub timestamp: String,
pub cwd: PathBuf,
pub originator: String,
pub cli_version: String,
#[serde(default)]
pub source: SessionSource,
/// Optional random unique nickname assigned to an AgentControl-spawned sub-agent.
#[serde(skip_serializing_if = "Option::is_none")]
pub agent_nickname: Option,
/// Optional role (agent_role) assigned to an AgentControl-spawned sub-agent.
#[serde(default, alias = "agent_type", skip_serializing_if = "Option::is_none")]
pub agent_role: Option,
pub model_provider: Option,
/// base_instructions for the session. This *should* always be present when creating a new session,
/// but may be missing for older sessions. If not present, fall back to rendering the base_instructions
/// from ModelsManager.
pub base_instructions: Option,
#[serde(skip_serializing_if = "Option::is_none")]
pub dynamic_tools: Option>,
#[serde(skip_serializing_if = "Option::is_none")]
pub memory_mode: Option,
}
impl Default for SessionMeta {
fn default() -> Self {
SessionMeta {
id: ThreadId::default(),
forked_from_id: None,
timestamp: String::new(),
cwd: PathBuf::new(),
originator: String::new(),
cli_version: String::new(),
source: SessionSource::default(),
agent_nickname: None,
agent_role: None,
model_provider: None,
base_instructions: None,
dynamic_tools: None,
memory_mode: None,
}
}
}
#[derive(Serialize, Deserialize, Debug, Clone, JsonSchema, TS)]
pub struct SessionMetaLine {
#[serde(flatten)]
pub meta: SessionMeta,
#[serde(skip_serializing_if = "Option::is_none")]
pub git: Option,
}
#[derive(Serialize, Deserialize, Debug, Clone, JsonSchema, TS)]
#[serde(tag = "type", content = "payload", rename_all = "snake_case")]
pub enum RolloutItem {
SessionMeta(SessionMetaLine),
ResponseItem(ResponseItem),
Compacted(CompactedItem),
TurnContext(TurnContextItem),
EventMsg(EventMsg),
}
#[derive(Serialize, Deserialize, Clone, Debug, JsonSchema, TS)]
pub struct CompactedItem {
pub message: String,
#[serde(default, skip_serializing_if = "Option::is_none")]
pub replacement_history: Option>,
}
impl From for ResponseItem {
fn from(value: CompactedItem) -> Self {
ResponseItem::Message {
id: None,
role: "assistant".to_string(),
content: vec![ContentItem::OutputText {
text: value.message,
}],
end_turn: None,
phase: None,
}
}
}
#[derive(Serialize, Deserialize, Clone, Debug, PartialEq, Eq, JsonSchema, TS)]
pub struct TurnContextNetworkItem {
pub allowed_domains: Vec,
pub denied_domains: Vec,
}
/// Persist once per real user turn after computing that turn's model-visible
/// context updates, and again after mid-turn compaction when replacement
/// history re-establishes full context, so resume/fork replay can recover the
/// latest durable baseline.
#[derive(Serialize, Deserialize, Clone, Debug, JsonSchema, TS)]
pub struct TurnContextItem {
#[serde(default, skip_serializing_if = "Option::is_none")]
pub turn_id: Option,
#[serde(default, skip_serializing_if = "Option::is_none")]
pub trace_id: Option,
pub cwd: PathBuf,
#[serde(default, skip_serializing_if = "Option::is_none")]
pub current_date: Option,
#[serde(default, skip_serializing_if = "Option::is_none")]
pub timezone: Option,
pub approval_policy: AskForApproval,
pub sandbox_policy: SandboxPolicy,
#[serde(skip_serializing_if = "Option::is_none")]
pub network: Option,
pub model: String,
#[serde(skip_serializing_if = "Option::is_none")]
pub personality: Option,
#[serde(default, skip_serializing_if = "Option::is_none")]
pub collaboration_mode: Option,
#[serde(default, skip_serializing_if = "Option::is_none")]
pub realtime_active: Option,
#[serde(skip_serializing_if = "Option::is_none")]
pub effort: Option,
pub summary: ReasoningSummaryConfig,
#[serde(skip_serializing_if = "Option::is_none")]
pub user_instructions: Option,
#[serde(skip_serializing_if = "Option::is_none")]
pub developer_instructions: Option,
#[serde(skip_serializing_if = "Option::is_none")]
pub final_output_json_schema: Option,
#[serde(skip_serializing_if = "Option::is_none")]
pub truncation_policy: Option,
}
#[derive(Debug, Clone, Copy, Deserialize, Serialize, PartialEq, Eq, JsonSchema, TS)]
#[serde(tag = "mode", content = "limit", rename_all = "snake_case")]
pub enum TruncationPolicy {
Bytes(usize),
Tokens(usize),
}
#[derive(Serialize, Deserialize, Clone, JsonSchema)]
pub struct RolloutLine {
pub timestamp: String,
#[serde(flatten)]
pub item: RolloutItem,
}
#[derive(Serialize, Deserialize, Clone, Debug, JsonSchema, TS)]
pub struct GitInfo {
/// Current commit hash (SHA)
#[serde(skip_serializing_if = "Option::is_none")]
pub commit_hash: Option,
/// Current branch name
#[serde(skip_serializing_if = "Option::is_none")]
pub branch: Option,
/// Repository URL (if available from remote)
#[serde(skip_serializing_if = "Option::is_none")]
pub repository_url: Option,
}
#[derive(Debug, Clone, Copy, Deserialize, Serialize, PartialEq, Eq, JsonSchema, TS)]
#[serde(rename_all = "snake_case")]
pub enum ReviewDelivery {
Inline,
Detached,
}
#[derive(Serialize, Deserialize, Clone, Debug, PartialEq, JsonSchema, TS)]
#[serde(tag = "type", rename_all = "camelCase")]
#[ts(tag = "type")]
pub enum ReviewTarget {
/// Review the working tree: staged, unstaged, and untracked files.
UncommittedChanges,
/// Review changes between the current branch and the given base branch.
#[serde(rename_all = "camelCase")]
#[ts(rename_all = "camelCase")]
BaseBranch { branch: String },
/// Review the changes introduced by a specific commit.
#[serde(rename_all = "camelCase")]
#[ts(rename_all = "camelCase")]
Commit {
sha: String,
/// Optional human-readable label (e.g., commit subject) for UIs.
title: Option,
},
/// Arbitrary instructions provided by the user.
#[serde(rename_all = "camelCase")]
#[ts(rename_all = "camelCase")]
Custom { instructions: String },
}
#[derive(Debug, Clone, Deserialize, Serialize, PartialEq, JsonSchema, TS)]
/// Review request sent to the review session.
pub struct ReviewRequest {
pub target: ReviewTarget,
#[serde(skip_serializing_if = "Option::is_none")]
#[ts(optional)]
pub user_facing_hint: Option,
}
/// Structured review result produced by a child review session.
#[derive(Debug, Clone, Deserialize, Serialize, PartialEq, JsonSchema, TS)]
pub struct ReviewOutputEvent {
pub findings: Vec,
pub overall_correctness: String,
pub overall_explanation: String,
pub overall_confidence_score: f32,
}
impl Default for ReviewOutputEvent {
fn default() -> Self {
Self {
findings: Vec::new(),
overall_correctness: String::default(),
overall_explanation: String::default(),
overall_confidence_score: 0.0,
}
}
}
/// A single review finding describing an observed issue or recommendation.
#[derive(Debug, Clone, Deserialize, Serialize, PartialEq, JsonSchema, TS)]
pub struct ReviewFinding {
pub title: String,
pub body: String,
pub confidence_score: f32,
pub priority: i32,
pub code_location: ReviewCodeLocation,
}
/// Location of the code related to a review finding.
#[derive(Debug, Clone, Deserialize, Serialize, PartialEq, JsonSchema, TS)]
pub struct ReviewCodeLocation {
pub absolute_file_path: PathBuf,
pub line_range: ReviewLineRange,
}
/// Inclusive line range in a file associated with the finding.
#[derive(Debug, Clone, Deserialize, Serialize, PartialEq, JsonSchema, TS)]
pub struct ReviewLineRange {
pub start: u32,
pub end: u32,
}
#[derive(
Debug, Clone, Copy, Display, Deserialize, Serialize, PartialEq, Eq, JsonSchema, TS, Default,
)]
#[serde(rename_all = "snake_case")]
pub enum ExecCommandSource {
#[default]
Agent,
UserShell,
UnifiedExecStartup,
UnifiedExecInteraction,
}
#[derive(Debug, Clone, Deserialize, Serialize, PartialEq, Eq, JsonSchema, TS)]
#[serde(rename_all = "snake_case")]
pub enum ExecCommandStatus {
Completed,
Failed,
Declined,
}
#[derive(Debug, Clone, Deserialize, Serialize, JsonSchema, TS)]
pub struct ExecCommandBeginEvent {
/// Identifier so this can be paired with the ExecCommandEnd event.
pub call_id: String,
/// Identifier for the underlying PTY process (when available).
#[serde(default, skip_serializing_if = "Option::is_none")]
#[ts(optional)]
pub process_id: Option,
/// Turn ID that this command belongs to.
pub turn_id: String,
/// The command to be executed.
pub command: Vec,
/// The command's working directory if not the default cwd for the agent.
pub cwd: PathBuf,
pub parsed_cmd: Vec,
/// Where the command originated. Defaults to Agent for backward compatibility.
#[serde(default)]
pub source: ExecCommandSource,
/// Raw input sent to a unified exec session (if this is an interaction event).
#[serde(default, skip_serializing_if = "Option::is_none")]
#[ts(optional)]
pub interaction_input: Option,
}
#[derive(Debug, Clone, Deserialize, Serialize, JsonSchema, TS)]
pub struct ExecCommandEndEvent {
/// Identifier for the ExecCommandBegin that finished.
pub call_id: String,
/// Identifier for the underlying PTY process (when available).
#[serde(default, skip_serializing_if = "Option::is_none")]
#[ts(optional)]
pub process_id: Option,
/// Turn ID that this command belongs to.
pub turn_id: String,
/// The command that was executed.
pub command: Vec,
/// The command's working directory if not the default cwd for the agent.
pub cwd: PathBuf,
pub parsed_cmd: Vec,
/// Where the command originated. Defaults to Agent for backward compatibility.
#[serde(default)]
pub source: ExecCommandSource,
/// Raw input sent to a unified exec session (if this is an interaction event).
#[serde(default, skip_serializing_if = "Option::is_none")]
#[ts(optional)]
pub interaction_input: Option,
/// Captured stdout
pub stdout: String,
/// Captured stderr
pub stderr: String,
/// Captured aggregated output
#[serde(default)]
pub aggregated_output: String,
/// The command's exit code.
pub exit_code: i32,
/// The duration of the command execution.
#[ts(type = "string")]
pub duration: Duration,
/// Formatted output from the command, as seen by the model.
pub formatted_output: String,
/// Completion status for this command execution.
pub status: ExecCommandStatus,
}
#[derive(Debug, Clone, Deserialize, Serialize, JsonSchema, TS)]
pub struct ViewImageToolCallEvent {
/// Identifier for the originating tool call.
pub call_id: String,
/// Local filesystem path provided to the tool.
pub path: PathBuf,
}
#[derive(Debug, Clone, Deserialize, Serialize, PartialEq, JsonSchema, TS)]
#[serde(rename_all = "snake_case")]
pub enum ExecOutputStream {
Stdout,
Stderr,
}
#[serde_as]
#[derive(Debug, Clone, Deserialize, Serialize, PartialEq, JsonSchema, TS)]
pub struct ExecCommandOutputDeltaEvent {
/// Identifier for the ExecCommandBegin that produced this chunk.
pub call_id: String,
/// Which stream produced this chunk.
pub stream: ExecOutputStream,
/// Raw bytes from the stream (may not be valid UTF-8).
#[serde_as(as = "serde_with::base64::Base64")]
#[schemars(with = "String")]
#[ts(type = "string")]
pub chunk: Vec,
}
#[serde_as]
#[derive(Debug, Clone, Deserialize, Serialize, PartialEq, JsonSchema, TS)]
pub struct TerminalInteractionEvent {
/// Identifier for the ExecCommandBegin that produced this chunk.
pub call_id: String,
/// Process id associated with the running command.
pub process_id: String,
/// Stdin sent to the running session.
pub stdin: String,
}
#[derive(Debug, Clone, Deserialize, Serialize, JsonSchema, TS)]
pub struct BackgroundEventEvent {
pub message: String,
}
#[derive(Debug, Clone, Deserialize, Serialize, JsonSchema, TS)]
pub struct DeprecationNoticeEvent {
/// Concise summary of what is deprecated.
pub summary: String,
/// Optional extra guidance, such as migration steps or rationale.
#[serde(skip_serializing_if = "Option::is_none")]
pub details: Option,
}
#[derive(Debug, Clone, Deserialize, Serialize, JsonSchema, TS)]
pub struct UndoStartedEvent {
#[serde(skip_serializing_if = "Option::is_none")]
pub message: Option,
}
#[derive(Debug, Clone, Deserialize, Serialize, JsonSchema, TS)]
pub struct UndoCompletedEvent {
pub success: bool,
#[serde(skip_serializing_if = "Option::is_none")]
pub message: Option,
}
#[derive(Debug, Clone, Deserialize, Serialize, JsonSchema, TS)]
pub struct ThreadRolledBackEvent {
/// Number of user turns that were removed from context.
pub num_turns: u32,
}
#[derive(Debug, Clone, Deserialize, Serialize, JsonSchema, TS)]
pub struct StreamErrorEvent {
pub message: String,
#[serde(default)]
pub codex_error_info: Option,
/// Optional details about the underlying stream failure (often the same
/// human-readable message that is surfaced as the terminal error if retries
/// are exhausted).
#[serde(default)]
pub additional_details: Option,
}
#[derive(Debug, Clone, Deserialize, Serialize, JsonSchema, TS)]
pub struct StreamInfoEvent {
pub message: String,
}
#[derive(Debug, Clone, Deserialize, Serialize, JsonSchema, TS)]
pub struct PatchApplyBeginEvent {
/// Identifier so this can be paired with the PatchApplyEnd event.
pub call_id: String,
/// Turn ID that this patch belongs to.
/// Uses `#[serde(default)]` for backwards compatibility.
#[serde(default)]
pub turn_id: String,
/// If true, there was no ApplyPatchApprovalRequest for this patch.
pub auto_approved: bool,
/// The changes to be applied.
pub changes: HashMap,
}
#[derive(Debug, Clone, Deserialize, Serialize, JsonSchema, TS)]
pub struct PatchApplyEndEvent {
/// Identifier for the PatchApplyBegin that finished.
pub call_id: String,
/// Turn ID that this patch belongs to.
/// Uses `#[serde(default)]` for backwards compatibility.
#[serde(default)]
pub turn_id: String,
/// Captured stdout (summary printed by apply_patch).
pub stdout: String,
/// Captured stderr (parser errors, IO failures, etc.).
pub stderr: String,
/// Whether the patch was applied successfully.
pub success: bool,
/// The changes that were applied (mirrors PatchApplyBeginEvent::changes).
#[serde(default)]
pub changes: HashMap,
/// Completion status for this patch application.
pub status: PatchApplyStatus,
}
#[derive(Debug, Clone, Deserialize, Serialize, PartialEq, Eq, JsonSchema, TS)]
#[serde(rename_all = "snake_case")]
pub enum PatchApplyStatus {
Completed,
Failed,
Declined,
}
#[derive(Debug, Clone, Deserialize, Serialize, JsonSchema, TS)]
pub struct TurnDiffEvent {
pub unified_diff: String,
}
#[derive(Debug, Clone, Deserialize, Serialize, JsonSchema, TS)]
pub struct GetHistoryEntryResponseEvent {
pub offset: usize,
pub log_id: u64,
/// The entry at the requested offset, if available and parseable.
#[serde(skip_serializing_if = "Option::is_none")]
pub entry: Option,
}
#[derive(Debug, Clone, Deserialize, Serialize, JsonSchema, TS)]
pub struct McpListToolsResponseEvent {
/// Fully qualified tool name -> tool definition.
pub tools: std::collections::HashMap,
/// Known resources grouped by server name.
pub resources: std::collections::HashMap>,
/// Known resource templates grouped by server name.
pub resource_templates: std::collections::HashMap>,
/// Authentication status for each configured MCP server.
pub auth_statuses: std::collections::HashMap,
}
#[derive(Debug, Clone, Deserialize, Serialize, JsonSchema, TS)]
pub struct McpStartupUpdateEvent {
/// Server name being started.
pub server: String,
/// Current startup status.
pub status: McpStartupStatus,
}
#[derive(Debug, Clone, Deserialize, Serialize, JsonSchema, TS)]
#[serde(rename_all = "snake_case", tag = "state")]
#[ts(rename_all = "snake_case", tag = "state")]
pub enum McpStartupStatus {
Starting,
Ready,
Failed { error: String },
Cancelled,
}
#[derive(Debug, Clone, Deserialize, Serialize, JsonSchema, TS, Default)]
pub struct McpStartupCompleteEvent {
pub ready: Vec,
pub failed: Vec,
pub cancelled: Vec,
}
#[derive(Debug, Clone, Deserialize, Serialize, JsonSchema, TS)]
pub struct McpStartupFailure {
pub server: String,
pub error: String,
}
#[derive(Debug, Clone, Copy, PartialEq, Eq, Serialize, Deserialize, JsonSchema, TS)]
#[serde(rename_all = "snake_case")]
#[ts(rename_all = "snake_case")]
pub enum McpAuthStatus {
Unsupported,
NotLoggedIn,
BearerToken,
OAuth,
}
impl fmt::Display for McpAuthStatus {
fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
let text = match self {
McpAuthStatus::Unsupported => "Unsupported",
McpAuthStatus::NotLoggedIn => "Not logged in",
McpAuthStatus::BearerToken => "Bearer token",
McpAuthStatus::OAuth => "OAuth",
};
f.write_str(text)
}
}
/// Response payload for `Op::ListCustomPrompts`.
#[derive(Debug, Clone, Deserialize, Serialize, JsonSchema, TS)]
pub struct ListCustomPromptsResponseEvent {
pub custom_prompts: Vec,
}
/// Response payload for `Op::ListSkills`.
#[derive(Debug, Clone, Deserialize, Serialize, JsonSchema, TS)]
pub struct ListSkillsResponseEvent {
pub skills: Vec,
}
#[derive(Debug, Clone, Copy, PartialEq, Eq, Hash, Serialize, Deserialize, JsonSchema, TS)]
#[serde(rename_all = "lowercase")]
#[ts(rename_all = "lowercase")]
pub enum Product {
#[serde(alias = "CHATGPT")]
Chatgpt,
#[serde(alias = "CODEX")]
Codex,
#[serde(alias = "ATLAS")]
Atlas,
}
impl Product {
pub fn from_session_source_name(value: &str) -> Option {
let normalized = value.trim().to_ascii_lowercase();
match normalized.as_str() {
"chatgpt" => Some(Self::Chatgpt),
"codex" => Some(Self::Codex),
"atlas" => Some(Self::Atlas),
_ => None,
}
}
pub fn matches_product_restriction(&self, products: &[Product]) -> bool {
products.is_empty() || products.contains(self)
}
}
#[derive(Debug, Clone, Copy, PartialEq, Eq, Serialize, Deserialize, JsonSchema, TS)]
#[serde(rename_all = "snake_case")]
#[ts(rename_all = "snake_case")]
pub enum SkillScope {
User,
Repo,
System,
Admin,
}
#[derive(Debug, Clone, Deserialize, Serialize, JsonSchema, TS)]
pub struct SkillMetadata {
pub name: String,
pub description: String,
#[serde(default, skip_serializing_if = "Option::is_none")]
#[ts(optional)]
/// Legacy short_description from SKILL.md. Prefer SKILL.json interface.short_description.
pub short_description: Option,
#[serde(default, skip_serializing_if = "Option::is_none")]
#[ts(optional)]
pub interface: Option,
#[serde(default, skip_serializing_if = "Option::is_none")]
#[ts(optional)]
pub dependencies: Option,
pub path: PathBuf,
pub scope: SkillScope,
pub enabled: bool,
}
#[derive(Debug, Clone, Deserialize, Serialize, JsonSchema, TS, PartialEq, Eq)]
pub struct SkillInterface {
#[ts(optional)]
pub display_name: Option,
#[ts(optional)]
pub short_description: Option,
#[ts(optional)]
pub icon_small: Option,
#[ts(optional)]
pub icon_large: Option,
#[ts(optional)]
pub brand_color: Option,
#[ts(optional)]
pub default_prompt: Option,
}
#[derive(Debug, Clone, Deserialize, Serialize, JsonSchema, TS, PartialEq, Eq)]
pub struct SkillDependencies {
pub tools: Vec,
}
#[derive(Debug, Clone, Deserialize, Serialize, JsonSchema, TS, PartialEq, Eq)]
pub struct SkillToolDependency {
#[serde(rename = "type")]
#[ts(rename = "type")]
pub r#type: String,
pub value: String,
#[serde(default, skip_serializing_if = "Option::is_none")]
#[ts(optional)]
pub description: Option,
#[serde(default, skip_serializing_if = "Option::is_none")]
#[ts(optional)]
pub transport: Option,
#[serde(default, skip_serializing_if = "Option::is_none")]
#[ts(optional)]
pub command: Option,
#[serde(default, skip_serializing_if = "Option::is_none")]
#[ts(optional)]
pub url: Option,
}
#[derive(Debug, Clone, Deserialize, Serialize, JsonSchema, TS)]
pub struct SkillErrorInfo {
pub path: PathBuf,
pub message: String,
}
#[derive(Debug, Clone, Deserialize, Serialize, JsonSchema, TS)]
pub struct SkillsListEntry {
pub cwd: PathBuf,
pub skills: Vec,
pub errors: Vec,
}
#[derive(Debug, Clone, Deserialize, Serialize, JsonSchema, TS, PartialEq, Eq)]
pub struct SessionNetworkProxyRuntime {
pub http_addr: String,
pub socks_addr: String,
}
#[derive(Debug, Clone, Deserialize, Serialize, JsonSchema, TS)]
pub struct SessionConfiguredEvent {
pub session_id: ThreadId,
#[serde(skip_serializing_if = "Option::is_none")]
pub forked_from_id: Option,
/// Optional user-facing thread name (may be unset).
#[serde(default, skip_serializing_if = "Option::is_none")]
#[ts(optional)]
pub thread_name: Option,
/// Tell the client what model is being queried.
pub model: String,
pub model_provider_id: String,
#[serde(skip_serializing_if = "Option::is_none")]
pub service_tier: Option,
/// When to escalate for approval for execution
pub approval_policy: AskForApproval,
/// Configures who approval requests are routed to for review once they have
/// been escalated. This does not disable separate safety checks such as
/// ARC.
#[serde(default)]
pub approvals_reviewer: ApprovalsReviewer,
/// How to sandbox commands executed in the system
pub sandbox_policy: SandboxPolicy,
/// Working directory that should be treated as the *root* of the
/// session.
pub cwd: PathBuf,
/// The effort the model is putting into reasoning about the user's request.
#[serde(skip_serializing_if = "Option::is_none")]
pub reasoning_effort: Option,
/// Identifier of the history log file (inode on Unix, 0 otherwise).
pub history_log_id: u64,
/// Current number of entries in the history log.
pub history_entry_count: usize,
/// Optional initial messages (as events) for resumed sessions.
/// When present, UIs can use these to seed the history.
#[serde(skip_serializing_if = "Option::is_none")]
pub initial_messages: Option>,
/// Runtime proxy bind addresses, when the managed proxy was started for this session.
#[serde(default, skip_serializing_if = "Option::is_none")]
#[ts(optional)]
pub network_proxy: Option,
/// Path in which the rollout is stored. Can be `None` for ephemeral threads
#[serde(skip_serializing_if = "Option::is_none")]
pub rollout_path: Option,
}
#[derive(Debug, Clone, Deserialize, Serialize, JsonSchema, TS)]
pub struct ThreadNameUpdatedEvent {
pub thread_id: ThreadId,
#[serde(default, skip_serializing_if = "Option::is_none")]
#[ts(optional)]
pub thread_name: Option,
}
/// User's decision in response to an ExecApprovalRequest.
#[derive(Debug, Default, Clone, Deserialize, Serialize, PartialEq, Eq, Display, JsonSchema, TS)]
#[serde(rename_all = "snake_case")]
pub enum ReviewDecision {
/// User has approved this command and the agent should execute it.
Approved,
/// User has approved this command and wants to apply the proposed execpolicy
/// amendment so future matching commands are permitted.
ApprovedExecpolicyAmendment {
proposed_execpolicy_amendment: ExecPolicyAmendment,
},
/// User has approved this request and wants future prompts in the same
/// session-scoped approval cache to be automatically approved for the
/// remainder of the session.
ApprovedForSession,
/// User chose to persist a network policy rule (allow/deny) for future
/// requests to the same host.
NetworkPolicyAmendment {
network_policy_amendment: NetworkPolicyAmendment,
},
/// User has denied this command and the agent should not execute it, but
/// it should continue the session and try something else.
#[default]
Denied,
/// User has denied this command and the agent should not do anything until
/// the user's next command.
Abort,
}
impl ReviewDecision {
/// Returns an opaque version of the decision without PII. We can't use an ignored flag
/// on `serde` because the serialization is required by some surfaces.
pub fn to_opaque_string(&self) -> &'static str {
match self {
ReviewDecision::Approved => "approved",
ReviewDecision::ApprovedExecpolicyAmendment { .. } => "approved_with_amendment",
ReviewDecision::ApprovedForSession => "approved_for_session",
ReviewDecision::NetworkPolicyAmendment {
network_policy_amendment,
} => match network_policy_amendment.action {
NetworkPolicyRuleAction::Allow => "approved_with_network_policy_allow",
NetworkPolicyRuleAction::Deny => "denied_with_network_policy_deny",
},
ReviewDecision::Denied => "denied",
ReviewDecision::Abort => "abort",
}
}
}
#[derive(Debug, Clone, Deserialize, Serialize, PartialEq, JsonSchema, TS)]
#[serde(tag = "type", rename_all = "snake_case")]
#[ts(tag = "type")]
pub enum FileChange {
Add {
content: String,
},
Delete {
content: String,
},
Update {
unified_diff: String,
move_path: Option,
},
}
#[derive(Debug, Clone, Deserialize, Serialize, JsonSchema, TS)]
pub struct Chunk {
/// 1-based line index of the first line in the original file
pub orig_index: u32,
pub deleted_lines: Vec,
pub inserted_lines: Vec,
}
#[derive(Debug, Clone, Deserialize, Serialize, JsonSchema, TS)]
pub struct TurnAbortedEvent {
pub turn_id: Option,
pub reason: TurnAbortReason,
}
#[derive(Debug, Clone, Deserialize, Serialize, PartialEq, JsonSchema, TS)]
#[serde(rename_all = "snake_case")]
pub enum TurnAbortReason {
Interrupted,
Replaced,
ReviewEnded,
}
#[derive(Debug, Clone, Deserialize, Serialize, PartialEq, JsonSchema, TS)]
pub struct CollabAgentSpawnBeginEvent {
/// Identifier for the collab tool call.
pub call_id: String,
/// Thread ID of the sender.
pub sender_thread_id: ThreadId,
/// Initial prompt sent to the agent. Can be empty to prevent CoT leaking at the
/// beginning.
pub prompt: String,
pub model: String,
pub reasoning_effort: ReasoningEffortConfig,
}
#[derive(Debug, Clone, Deserialize, Serialize, PartialEq, Eq, JsonSchema, TS)]
pub struct CollabAgentRef {
/// Thread ID of the receiver/new agent.
pub thread_id: ThreadId,
/// Optional nickname assigned to an AgentControl-spawned sub-agent.
#[serde(default, skip_serializing_if = "Option::is_none")]
pub agent_nickname: Option,
/// Optional role (agent_role) assigned to an AgentControl-spawned sub-agent.
#[serde(default, alias = "agent_type", skip_serializing_if = "Option::is_none")]
pub agent_role: Option,
}
#[derive(Debug, Clone, Deserialize, Serialize, PartialEq, Eq, JsonSchema, TS)]
pub struct CollabAgentStatusEntry {
/// Thread ID of the receiver/new agent.
pub thread_id: ThreadId,
/// Optional nickname assigned to an AgentControl-spawned sub-agent.
#[serde(default, skip_serializing_if = "Option::is_none")]
pub agent_nickname: Option,
/// Optional role (agent_role) assigned to an AgentControl-spawned sub-agent.
#[serde(default, alias = "agent_type", skip_serializing_if = "Option::is_none")]
pub agent_role: Option,
/// Last known status of the agent.
pub status: AgentStatus,
}
#[derive(Debug, Clone, Deserialize, Serialize, PartialEq, JsonSchema, TS)]
pub struct CollabAgentSpawnEndEvent {
/// Identifier for the collab tool call.
pub call_id: String,
/// Thread ID of the sender.
pub sender_thread_id: ThreadId,
/// Thread ID of the newly spawned agent, if it was created.
pub new_thread_id: Option,
/// Optional nickname assigned to the new agent.
#[serde(default, skip_serializing_if = "Option::is_none")]
pub new_agent_nickname: Option,
/// Optional role assigned to the new agent.
#[serde(default, skip_serializing_if = "Option::is_none")]
pub new_agent_role: Option,
/// Initial prompt sent to the agent. Can be empty to prevent CoT leaking at the
/// beginning.
pub prompt: String,
/// Effective model used by the spawned agent after inheritance and role overrides.
pub model: String,
/// Effective reasoning effort used by the spawned agent after inheritance and role overrides.
pub reasoning_effort: ReasoningEffortConfig,
/// Last known status of the new agent reported to the sender agent.
pub status: AgentStatus,
}
#[derive(Debug, Clone, Deserialize, Serialize, PartialEq, JsonSchema, TS)]
pub struct CollabAgentInteractionBeginEvent {
/// Identifier for the collab tool call.
pub call_id: String,
/// Thread ID of the sender.
pub sender_thread_id: ThreadId,
/// Thread ID of the receiver.
pub receiver_thread_id: ThreadId,
/// Prompt sent from the sender to the receiver. Can be empty to prevent CoT
/// leaking at the beginning.
pub prompt: String,
}
#[derive(Debug, Clone, Deserialize, Serialize, PartialEq, JsonSchema, TS)]
pub struct CollabAgentInteractionEndEvent {
/// Identifier for the collab tool call.
pub call_id: String,
/// Thread ID of the sender.
pub sender_thread_id: ThreadId,
/// Thread ID of the receiver.
pub receiver_thread_id: ThreadId,
/// Optional nickname assigned to the receiver agent.
#[serde(default, skip_serializing_if = "Option::is_none")]
pub receiver_agent_nickname: Option,
/// Optional role assigned to the receiver agent.
#[serde(default, skip_serializing_if = "Option::is_none")]
pub receiver_agent_role: Option,
/// Prompt sent from the sender to the receiver. Can be empty to prevent CoT
/// leaking at the beginning.
pub prompt: String,
/// Last known status of the receiver agent reported to the sender agent.
pub status: AgentStatus,
}
#[derive(Debug, Clone, Deserialize, Serialize, PartialEq, JsonSchema, TS)]
pub struct CollabWaitingBeginEvent {
/// Thread ID of the sender.
pub sender_thread_id: ThreadId,
/// Thread ID of the receivers.
pub receiver_thread_ids: Vec,
/// Optional nicknames/roles for receivers.
#[serde(default, skip_serializing_if = "Vec::is_empty")]
pub receiver_agents: Vec,
/// ID of the waiting call.
pub call_id: String,
}
#[derive(Debug, Clone, Deserialize, Serialize, PartialEq, JsonSchema, TS)]
pub struct CollabWaitingEndEvent {
/// Thread ID of the sender.
pub sender_thread_id: ThreadId,
/// ID of the waiting call.
pub call_id: String,
/// Optional receiver metadata paired with final statuses.
#[serde(default, skip_serializing_if = "Vec::is_empty")]
pub agent_statuses: Vec,
/// Last known status of the receiver agents reported to the sender agent.
pub statuses: HashMap,
}
#[derive(Debug, Clone, Deserialize, Serialize, PartialEq, JsonSchema, TS)]
pub struct CollabCloseBeginEvent {
/// Identifier for the collab tool call.
pub call_id: String,
/// Thread ID of the sender.
pub sender_thread_id: ThreadId,
/// Thread ID of the receiver.
pub receiver_thread_id: ThreadId,
}
#[derive(Debug, Clone, Deserialize, Serialize, PartialEq, JsonSchema, TS)]
pub struct CollabCloseEndEvent {
/// Identifier for the collab tool call.
pub call_id: String,
/// Thread ID of the sender.
pub sender_thread_id: ThreadId,
/// Thread ID of the receiver.
pub receiver_thread_id: ThreadId,
/// Optional nickname assigned to the receiver agent.
#[serde(default, skip_serializing_if = "Option::is_none")]
pub receiver_agent_nickname: Option,
/// Optional role assigned to the receiver agent.
#[serde(default, skip_serializing_if = "Option::is_none")]
pub receiver_agent_role: Option,
/// Last known status of the receiver agent reported to the sender agent before
/// the close.
pub status: AgentStatus,
}
#[derive(Debug, Clone, Deserialize, Serialize, PartialEq, JsonSchema, TS)]
pub struct CollabResumeBeginEvent {
/// Identifier for the collab tool call.
pub call_id: String,
/// Thread ID of the sender.
pub sender_thread_id: ThreadId,
/// Thread ID of the receiver.
pub receiver_thread_id: ThreadId,
/// Optional nickname assigned to the receiver agent.
#[serde(default, skip_serializing_if = "Option::is_none")]
pub receiver_agent_nickname: Option,
/// Optional role assigned to the receiver agent.
#[serde(default, skip_serializing_if = "Option::is_none")]
pub receiver_agent_role: Option,
}
#[derive(Debug, Clone, Deserialize, Serialize, PartialEq, JsonSchema, TS)]
pub struct CollabResumeEndEvent {
/// Identifier for the collab tool call.
pub call_id: String,
/// Thread ID of the sender.
pub sender_thread_id: ThreadId,
/// Thread ID of the receiver.
pub receiver_thread_id: ThreadId,
/// Optional nickname assigned to the receiver agent.
#[serde(default, skip_serializing_if = "Option::is_none")]
pub receiver_agent_nickname: Option,
/// Optional role assigned to the receiver agent.
#[serde(default, skip_serializing_if = "Option::is_none")]
pub receiver_agent_role: Option,
/// Last known status of the receiver agent reported to the sender agent after
/// resume.
pub status: AgentStatus,
}
#[cfg(test)]
mod tests {
use super::*;
use crate::items::ImageGenerationItem;
use crate::items::UserMessageItem;
use crate::items::WebSearchItem;
use crate::permissions::FileSystemAccessMode;
use crate::permissions::FileSystemPath;
use crate::permissions::FileSystemSandboxEntry;
use crate::permissions::FileSystemSandboxPolicy;
use crate::permissions::FileSystemSpecialPath;
use crate::permissions::NetworkSandboxPolicy;
use anyhow::Result;
use codex_utils_absolute_path::AbsolutePathBuf;
use pretty_assertions::assert_eq;
use serde_json::json;
use std::path::PathBuf;
use tempfile::NamedTempFile;
use tempfile::TempDir;
fn sorted_writable_roots(roots: Vec) -> Vec<(PathBuf, Vec)> {
let mut sorted_roots: Vec<(PathBuf, Vec)> = roots
.into_iter()
.map(|root| {
let mut read_only_subpaths: Vec = root
.read_only_subpaths
.into_iter()
.map(|path| path.to_path_buf())
.collect();
read_only_subpaths.sort();
(root.root.to_path_buf(), read_only_subpaths)
})
.collect();
sorted_roots.sort_by(|left, right| left.0.cmp(&right.0));
sorted_roots
}
fn sandbox_policy_allows_read(policy: &SandboxPolicy, path: &Path, cwd: &Path) -> bool {
if policy.has_full_disk_read_access() {
return true;
}
policy
.get_readable_roots_with_cwd(cwd)
.iter()
.any(|root| path.starts_with(root.as_path()))
|| policy
.get_writable_roots_with_cwd(cwd)
.iter()
.any(|root| path.starts_with(root.root.as_path()))
}
fn sandbox_policy_allows_write(policy: &SandboxPolicy, path: &Path, cwd: &Path) -> bool {
if policy.has_full_disk_write_access() {
return true;
}
policy
.get_writable_roots_with_cwd(cwd)
.iter()
.any(|root| root.is_path_writable(path))
}
#[test]
fn session_source_from_startup_arg_maps_known_values() {
assert_eq!(
SessionSource::from_startup_arg("vscode").unwrap(),
SessionSource::VSCode
);
assert_eq!(
SessionSource::from_startup_arg("app-server").unwrap(),
SessionSource::Mcp
);
}
#[test]
fn session_source_from_startup_arg_normalizes_custom_values() {
assert_eq!(
SessionSource::from_startup_arg("atlas").unwrap(),
SessionSource::Custom("atlas".to_string())
);
assert_eq!(
SessionSource::from_startup_arg(" Atlas ").unwrap(),
SessionSource::Custom("atlas".to_string())
);
}
#[test]
fn session_source_restriction_product_defaults_non_subagent_sources_to_codex() {
assert_eq!(
SessionSource::Cli.restriction_product(),
Some(Product::Codex)
);
assert_eq!(
SessionSource::VSCode.restriction_product(),
Some(Product::Codex)
);
assert_eq!(
SessionSource::Exec.restriction_product(),
Some(Product::Codex)
);
assert_eq!(
SessionSource::Mcp.restriction_product(),
Some(Product::Codex)
);
assert_eq!(
SessionSource::Unknown.restriction_product(),
Some(Product::Codex)
);
}
#[test]
fn session_source_restriction_product_does_not_guess_subagent_products() {
assert_eq!(
SessionSource::SubAgent(SubAgentSource::Review).restriction_product(),
None
);
}
#[test]
fn session_source_restriction_product_maps_custom_sources_to_products() {
assert_eq!(
SessionSource::Custom("chatgpt".to_string()).restriction_product(),
Some(Product::Chatgpt)
);
assert_eq!(
SessionSource::Custom("ATLAS".to_string()).restriction_product(),
Some(Product::Atlas)
);
assert_eq!(
SessionSource::Custom("codex".to_string()).restriction_product(),
Some(Product::Codex)
);
assert_eq!(
SessionSource::Custom("atlas-dev".to_string()).restriction_product(),
None
);
}
#[test]
fn session_source_matches_product_restriction() {
assert!(
SessionSource::Custom("chatgpt".to_string())
.matches_product_restriction(&[Product::Chatgpt])
);
assert!(
!SessionSource::Custom("chatgpt".to_string())
.matches_product_restriction(&[Product::Codex])
);
assert!(SessionSource::VSCode.matches_product_restriction(&[Product::Codex]));
assert!(
!SessionSource::Custom("atlas-dev".to_string())
.matches_product_restriction(&[Product::Atlas])
);
assert!(SessionSource::Custom("atlas-dev".to_string()).matches_product_restriction(&[]));
}
fn sandbox_policy_probe_paths(policy: &SandboxPolicy, cwd: &Path) -> Vec {
let mut paths = vec![cwd.to_path_buf()];
paths.extend(
policy
.get_readable_roots_with_cwd(cwd)
.into_iter()
.map(|path| path.to_path_buf()),
);
for root in policy.get_writable_roots_with_cwd(cwd) {
paths.push(root.root.to_path_buf());
paths.extend(
root.read_only_subpaths
.into_iter()
.map(|path| path.to_path_buf()),
);
}
paths.sort();
paths.dedup();
paths
}
fn assert_same_sandbox_policy_semantics(
expected: &SandboxPolicy,
actual: &SandboxPolicy,
cwd: &Path,
) {
assert_eq!(
actual.has_full_disk_read_access(),
expected.has_full_disk_read_access()
);
assert_eq!(
actual.has_full_disk_write_access(),
expected.has_full_disk_write_access()
);
assert_eq!(
actual.has_full_network_access(),
expected.has_full_network_access()
);
assert_eq!(
actual.include_platform_defaults(),
expected.include_platform_defaults()
);
let mut probe_paths = sandbox_policy_probe_paths(expected, cwd);
probe_paths.extend(sandbox_policy_probe_paths(actual, cwd));
probe_paths.sort();
probe_paths.dedup();
for path in probe_paths {
assert_eq!(
sandbox_policy_allows_read(actual, &path, cwd),
sandbox_policy_allows_read(expected, &path, cwd),
"read access mismatch for {}",
path.display()
);
assert_eq!(
sandbox_policy_allows_write(actual, &path, cwd),
sandbox_policy_allows_write(expected, &path, cwd),
"write access mismatch for {}",
path.display()
);
}
}
#[test]
fn external_sandbox_reports_full_access_flags() {
let restricted = SandboxPolicy::ExternalSandbox {
network_access: NetworkAccess::Restricted,
};
assert!(restricted.has_full_disk_write_access());
assert!(!restricted.has_full_network_access());
let enabled = SandboxPolicy::ExternalSandbox {
network_access: NetworkAccess::Enabled,
};
assert!(enabled.has_full_disk_write_access());
assert!(enabled.has_full_network_access());
}
#[test]
fn read_only_reports_network_access_flags() {
let restricted = SandboxPolicy::new_read_only_policy();
assert!(!restricted.has_full_network_access());
let enabled = SandboxPolicy::ReadOnly {
access: ReadOnlyAccess::FullAccess,
network_access: true,
};
assert!(enabled.has_full_network_access());
}
#[test]
fn granular_approval_config_mcp_elicitation_flag_is_field_driven() {
assert!(
GranularApprovalConfig {
sandbox_approval: false,
rules: false,
skill_approval: false,
request_permissions: false,
mcp_elicitations: true,
}
.allows_mcp_elicitations()
);
assert!(
!GranularApprovalConfig {
sandbox_approval: false,
rules: false,
skill_approval: false,
request_permissions: false,
mcp_elicitations: false,
}
.allows_mcp_elicitations()
);
}
#[test]
fn granular_approval_config_skill_approval_flag_is_field_driven() {
assert!(
GranularApprovalConfig {
sandbox_approval: false,
rules: false,
skill_approval: true,
request_permissions: false,
mcp_elicitations: false,
}
.allows_skill_approval()
);
assert!(
!GranularApprovalConfig {
sandbox_approval: false,
rules: false,
skill_approval: false,
request_permissions: false,
mcp_elicitations: false,
}
.allows_skill_approval()
);
}
#[test]
fn granular_approval_config_request_permissions_flag_is_field_driven() {
assert!(
GranularApprovalConfig {
sandbox_approval: false,
rules: false,
skill_approval: false,
request_permissions: true,
mcp_elicitations: false,
}
.allows_request_permissions()
);
assert!(
!GranularApprovalConfig {
sandbox_approval: false,
rules: false,
skill_approval: false,
request_permissions: false,
mcp_elicitations: false,
}
.allows_request_permissions()
);
}
#[test]
fn granular_approval_config_defaults_missing_optional_flags_to_false() {
let decoded = serde_json::from_value::(serde_json::json!({
"sandbox_approval": true,
"rules": false,
"mcp_elicitations": true,
}))
.expect("granular approval config should deserialize");
assert_eq!(
decoded,
GranularApprovalConfig {
sandbox_approval: true,
rules: false,
skill_approval: false,
request_permissions: false,
mcp_elicitations: true,
}
);
}
#[test]
fn workspace_write_restricted_read_access_includes_effective_writable_roots() {
let cwd = if cfg!(windows) {
Path::new(r"C:\workspace")
} else {
Path::new("/tmp/workspace")
};
let policy = SandboxPolicy::WorkspaceWrite {
writable_roots: vec![],
read_only_access: ReadOnlyAccess::Restricted {
include_platform_defaults: false,
readable_roots: vec![],
},
network_access: false,
exclude_tmpdir_env_var: true,
exclude_slash_tmp: false,
};
let readable_roots = policy.get_readable_roots_with_cwd(cwd);
let writable_roots = policy.get_writable_roots_with_cwd(cwd);
for writable_root in writable_roots {
assert!(
readable_roots
.iter()
.any(|root| root.as_path() == writable_root.root.as_path()),
"expected writable root {} to also be readable",
writable_root.root.as_path().display()
);
}
}
#[test]
fn restricted_file_system_policy_reports_full_access_from_root_entries() {
let read_only = FileSystemSandboxPolicy::restricted(vec![FileSystemSandboxEntry {
path: FileSystemPath::Special {
value: FileSystemSpecialPath::Root,
},
access: FileSystemAccessMode::Read,
}]);
assert!(read_only.has_full_disk_read_access());
assert!(!read_only.has_full_disk_write_access());
assert!(!read_only.include_platform_defaults());
let writable = FileSystemSandboxPolicy::restricted(vec![FileSystemSandboxEntry {
path: FileSystemPath::Special {
value: FileSystemSpecialPath::Root,
},
access: FileSystemAccessMode::Write,
}]);
assert!(writable.has_full_disk_read_access());
assert!(writable.has_full_disk_write_access());
}
#[test]
fn restricted_file_system_policy_treats_root_with_carveouts_as_scoped_access() {
let cwd = TempDir::new().expect("tempdir");
let canonical_cwd = cwd.path().canonicalize().expect("canonicalize cwd");
let root = AbsolutePathBuf::from_absolute_path(&canonical_cwd)
.expect("absolute canonical tempdir")
.as_path()
.ancestors()
.last()
.and_then(|path| AbsolutePathBuf::from_absolute_path(path).ok())
.expect("filesystem root");
let blocked = AbsolutePathBuf::resolve_path_against_base("blocked", cwd.path())
.expect("resolve blocked");
let expected_blocked = AbsolutePathBuf::from_absolute_path(
cwd.path()
.canonicalize()
.expect("canonicalize cwd")
.join("blocked"),
)
.expect("canonical blocked");
let policy = FileSystemSandboxPolicy::restricted(vec![
FileSystemSandboxEntry {
path: FileSystemPath::Special {
value: FileSystemSpecialPath::Root,
},
access: FileSystemAccessMode::Write,
},
FileSystemSandboxEntry {
path: FileSystemPath::Path { path: blocked },
access: FileSystemAccessMode::None,
},
]);
assert!(!policy.has_full_disk_read_access());
assert!(!policy.has_full_disk_write_access());
assert_eq!(
policy.get_readable_roots_with_cwd(cwd.path()),
vec![root.clone()]
);
assert_eq!(
policy.get_unreadable_roots_with_cwd(cwd.path()),
vec![expected_blocked.clone()]
);
let writable_roots = policy.get_writable_roots_with_cwd(cwd.path());
assert_eq!(writable_roots.len(), 1);
assert_eq!(writable_roots[0].root, root);
assert!(
writable_roots[0]
.read_only_subpaths
.iter()
.any(|path| path.as_path() == expected_blocked.as_path())
);
}
#[test]
fn restricted_file_system_policy_derives_effective_paths() {
let cwd = TempDir::new().expect("tempdir");
std::fs::create_dir_all(cwd.path().join(".agents")).expect("create .agents");
std::fs::create_dir_all(cwd.path().join(".codex")).expect("create .codex");
let canonical_cwd = cwd.path().canonicalize().expect("canonicalize cwd");
let cwd_absolute =
AbsolutePathBuf::from_absolute_path(&canonical_cwd).expect("absolute tempdir");
let secret = AbsolutePathBuf::resolve_path_against_base("secret", cwd.path())
.expect("resolve unreadable path");
let expected_secret = AbsolutePathBuf::from_absolute_path(canonical_cwd.join("secret"))
.expect("canonical secret");
let expected_agents = AbsolutePathBuf::from_absolute_path(canonical_cwd.join(".agents"))
.expect("canonical .agents");
let expected_codex = AbsolutePathBuf::from_absolute_path(canonical_cwd.join(".codex"))
.expect("canonical .codex");
let policy = FileSystemSandboxPolicy::restricted(vec![
FileSystemSandboxEntry {
path: FileSystemPath::Special {
value: FileSystemSpecialPath::Minimal,
},
access: FileSystemAccessMode::Read,
},
FileSystemSandboxEntry {
path: FileSystemPath::Special {
value: FileSystemSpecialPath::CurrentWorkingDirectory,
},
access: FileSystemAccessMode::Write,
},
FileSystemSandboxEntry {
path: FileSystemPath::Path { path: secret },
access: FileSystemAccessMode::None,
},
]);
assert!(!policy.has_full_disk_read_access());
assert!(!policy.has_full_disk_write_access());
assert!(policy.include_platform_defaults());
assert_eq!(
policy.get_readable_roots_with_cwd(cwd.path()),
vec![cwd_absolute.clone()]
);
assert_eq!(
policy.get_unreadable_roots_with_cwd(cwd.path()),
vec![expected_secret.clone()]
);
let writable_roots = policy.get_writable_roots_with_cwd(cwd.path());
assert_eq!(writable_roots.len(), 1);
assert_eq!(writable_roots[0].root, cwd_absolute);
assert!(
writable_roots[0]
.read_only_subpaths
.iter()
.any(|path| path.as_path() == expected_secret.as_path())
);
assert!(
writable_roots[0]
.read_only_subpaths
.iter()
.any(|path| path.as_path() == expected_agents.as_path())
);
assert!(
writable_roots[0]
.read_only_subpaths
.iter()
.any(|path| path.as_path() == expected_codex.as_path())
);
}
#[test]
fn restricted_file_system_policy_treats_read_entries_as_read_only_subpaths() {
let cwd = TempDir::new().expect("tempdir");
let canonical_cwd = cwd.path().canonicalize().expect("canonicalize cwd");
let docs =
AbsolutePathBuf::resolve_path_against_base("docs", cwd.path()).expect("resolve docs");
let docs_public = AbsolutePathBuf::resolve_path_against_base("docs/public", cwd.path())
.expect("resolve docs/public");
let expected_docs = AbsolutePathBuf::from_absolute_path(canonical_cwd.join("docs"))
.expect("canonical docs");
let expected_docs_public =
AbsolutePathBuf::from_absolute_path(canonical_cwd.join("docs/public"))
.expect("canonical docs/public");
let policy = FileSystemSandboxPolicy::restricted(vec![
FileSystemSandboxEntry {
path: FileSystemPath::Special {
value: FileSystemSpecialPath::CurrentWorkingDirectory,
},
access: FileSystemAccessMode::Write,
},
FileSystemSandboxEntry {
path: FileSystemPath::Path { path: docs },
access: FileSystemAccessMode::Read,
},
FileSystemSandboxEntry {
path: FileSystemPath::Path { path: docs_public },
access: FileSystemAccessMode::Write,
},
]);
assert!(!policy.has_full_disk_write_access());
assert_eq!(
sorted_writable_roots(policy.get_writable_roots_with_cwd(cwd.path())),
vec![
(canonical_cwd, vec![expected_docs.to_path_buf()]),
(expected_docs_public.to_path_buf(), Vec::new()),
]
);
}
#[test]
fn legacy_workspace_write_nested_readable_root_stays_writable() {
let cwd = TempDir::new().expect("tempdir");
let docs =
AbsolutePathBuf::resolve_path_against_base("docs", cwd.path()).expect("resolve docs");
let canonical_cwd = cwd.path().canonicalize().expect("canonicalize cwd");
let policy = SandboxPolicy::WorkspaceWrite {
writable_roots: vec![],
read_only_access: ReadOnlyAccess::Restricted {
include_platform_defaults: true,
readable_roots: vec![docs],
},
network_access: false,
exclude_tmpdir_env_var: true,
exclude_slash_tmp: true,
};
assert_eq!(
sorted_writable_roots(
FileSystemSandboxPolicy::from_legacy_sandbox_policy(&policy, cwd.path())
.get_writable_roots_with_cwd(cwd.path())
),
vec![(canonical_cwd, Vec::new())]
);
}
#[test]
fn file_system_policy_rejects_legacy_bridge_for_non_workspace_writes() {
let cwd = if cfg!(windows) {
Path::new(r"C:\workspace")
} else {
Path::new("/tmp/workspace")
};
let external_write_path = if cfg!(windows) {
AbsolutePathBuf::from_absolute_path(r"C:\temp").expect("absolute windows temp path")
} else {
AbsolutePathBuf::from_absolute_path("/tmp").expect("absolute tmp path")
};
let policy = FileSystemSandboxPolicy::restricted(vec![FileSystemSandboxEntry {
path: FileSystemPath::Path {
path: external_write_path,
},
access: FileSystemAccessMode::Write,
}]);
let err = policy
.to_legacy_sandbox_policy(NetworkSandboxPolicy::Restricted, cwd)
.expect_err("non-workspace writes should be rejected");
assert!(
err.to_string()
.contains("filesystem writes outside the workspace root"),
"{err}"
);
}
#[test]
fn legacy_sandbox_policy_semantics_survive_split_bridge() {
let cwd = TempDir::new().expect("tempdir");
let readable_root = AbsolutePathBuf::resolve_path_against_base("readable", cwd.path())
.expect("resolve readable root");
let writable_root = AbsolutePathBuf::resolve_path_against_base("writable", cwd.path())
.expect("resolve writable root");
let nested_readable_root = AbsolutePathBuf::resolve_path_against_base("docs", cwd.path())
.expect("resolve nested readable root");
let policies = [
SandboxPolicy::DangerFullAccess,
SandboxPolicy::ExternalSandbox {
network_access: NetworkAccess::Restricted,
},
SandboxPolicy::ExternalSandbox {
network_access: NetworkAccess::Enabled,
},
SandboxPolicy::ReadOnly {
access: ReadOnlyAccess::FullAccess,
network_access: false,
},
SandboxPolicy::ReadOnly {
access: ReadOnlyAccess::Restricted {
include_platform_defaults: true,
readable_roots: vec![readable_root.clone()],
},
network_access: true,
},
SandboxPolicy::WorkspaceWrite {
writable_roots: vec![],
read_only_access: ReadOnlyAccess::FullAccess,
network_access: false,
exclude_tmpdir_env_var: true,
exclude_slash_tmp: true,
},
SandboxPolicy::WorkspaceWrite {
writable_roots: vec![writable_root],
read_only_access: ReadOnlyAccess::Restricted {
include_platform_defaults: true,
readable_roots: vec![readable_root],
},
network_access: true,
exclude_tmpdir_env_var: false,
exclude_slash_tmp: true,
},
SandboxPolicy::WorkspaceWrite {
writable_roots: vec![],
read_only_access: ReadOnlyAccess::Restricted {
include_platform_defaults: true,
readable_roots: vec![nested_readable_root],
},
network_access: false,
exclude_tmpdir_env_var: true,
exclude_slash_tmp: true,
},
];
for expected in policies {
let actual = FileSystemSandboxPolicy::from_legacy_sandbox_policy(&expected, cwd.path())
.to_legacy_sandbox_policy(NetworkSandboxPolicy::from(&expected), cwd.path())
.expect("legacy bridge should preserve legacy policy semantics");
assert_same_sandbox_policy_semantics(&expected, &actual, cwd.path());
}
}
#[test]
fn item_started_event_from_web_search_emits_begin_event() {
let event = ItemStartedEvent {
thread_id: ThreadId::new(),
turn_id: "turn-1".into(),
item: TurnItem::WebSearch(WebSearchItem {
id: "search-1".into(),
query: "find docs".into(),
action: WebSearchAction::Search {
query: Some("find docs".into()),
queries: None,
},
}),
};
let legacy_events = event.as_legacy_events(false);
assert_eq!(legacy_events.len(), 1);
match &legacy_events[0] {
EventMsg::WebSearchBegin(event) => assert_eq!(event.call_id, "search-1"),
_ => panic!("expected WebSearchBegin event"),
}
}
#[test]
fn item_started_event_from_non_web_search_emits_no_legacy_events() {
let event = ItemStartedEvent {
thread_id: ThreadId::new(),
turn_id: "turn-1".into(),
item: TurnItem::UserMessage(UserMessageItem::new(&[])),
};
assert!(event.as_legacy_events(false).is_empty());
}
#[test]
fn item_started_event_from_image_generation_emits_begin_event() {
let event = ItemStartedEvent {
thread_id: ThreadId::new(),
turn_id: "turn-1".into(),
item: TurnItem::ImageGeneration(ImageGenerationItem {
id: "ig-1".into(),
status: "in_progress".into(),
revised_prompt: None,
result: String::new(),
saved_path: None,
}),
};
let legacy_events = event.as_legacy_events(false);
assert_eq!(legacy_events.len(), 1);
match &legacy_events[0] {
EventMsg::ImageGenerationBegin(event) => assert_eq!(event.call_id, "ig-1"),
_ => panic!("expected ImageGenerationBegin event"),
}
}
#[test]
fn item_completed_event_from_image_generation_emits_end_event() {
let event = ItemCompletedEvent {
thread_id: ThreadId::new(),
turn_id: "turn-1".into(),
item: TurnItem::ImageGeneration(ImageGenerationItem {
id: "ig-1".into(),
status: "completed".into(),
revised_prompt: Some("A tiny blue square".into()),
result: "Zm9v".into(),
saved_path: Some("/tmp/ig-1.png".into()),
}),
};
let legacy_events = event.as_legacy_events(false);
assert_eq!(legacy_events.len(), 1);
match &legacy_events[0] {
EventMsg::ImageGenerationEnd(event) => {
assert_eq!(event.call_id, "ig-1");
assert_eq!(event.status, "completed");
assert_eq!(event.revised_prompt.as_deref(), Some("A tiny blue square"));
assert_eq!(event.result, "Zm9v");
assert_eq!(event.saved_path.as_deref(), Some("/tmp/ig-1.png"));
}
_ => panic!("expected ImageGenerationEnd event"),
}
}
#[test]
fn rollback_failed_error_does_not_affect_turn_status() {
let event = ErrorEvent {
message: "rollback failed".into(),
codex_error_info: Some(CodexErrorInfo::ThreadRollbackFailed),
};
assert!(!event.affects_turn_status());
}
#[test]
fn generic_error_affects_turn_status() {
let event = ErrorEvent {
message: "generic".into(),
codex_error_info: Some(CodexErrorInfo::Other),
};
assert!(event.affects_turn_status());
}
#[test]
fn conversation_op_serializes_as_unnested_variants() {
let audio = Op::RealtimeConversationAudio(ConversationAudioParams {
frame: RealtimeAudioFrame {
data: "AQID".to_string(),
sample_rate: 24_000,
num_channels: 1,
samples_per_channel: Some(480),
item_id: None,
},
});
let start = Op::RealtimeConversationStart(ConversationStartParams {
prompt: "be helpful".to_string(),
session_id: Some("conv_1".to_string()),
});
let text = Op::RealtimeConversationText(ConversationTextParams {
text: "hello".to_string(),
});
let close = Op::RealtimeConversationClose;
assert_eq!(
serde_json::to_value(&start).unwrap(),
json!({
"type": "realtime_conversation_start",
"prompt": "be helpful",
"session_id": "conv_1"
})
);
assert_eq!(
serde_json::to_value(&audio).unwrap(),
json!({
"type": "realtime_conversation_audio",
"frame": {
"data": "AQID",
"sample_rate": 24000,
"num_channels": 1,
"samples_per_channel": 480
}
})
);
assert_eq!(
serde_json::from_value::(serde_json::to_value(&text).unwrap()).unwrap(),
text
);
assert_eq!(
serde_json::to_value(&close).unwrap(),
json!({
"type": "realtime_conversation_close"
})
);
assert_eq!(
serde_json::from_value::(serde_json::to_value(&close).unwrap()).unwrap(),
close
);
}
#[test]
fn user_input_serialization_omits_final_output_json_schema_when_none() -> Result<()> {
let op = Op::UserInput {
items: Vec::new(),
final_output_json_schema: None,
};
let json_op = serde_json::to_value(op)?;
assert_eq!(json_op, json!({ "type": "user_input", "items": [] }));
Ok(())
}
#[test]
fn user_input_deserializes_without_final_output_json_schema_field() -> Result<()> {
let op: Op = serde_json::from_value(json!({ "type": "user_input", "items": [] }))?;
assert_eq!(
op,
Op::UserInput {
items: Vec::new(),
final_output_json_schema: None,
}
);
Ok(())
}
#[test]
fn user_input_serialization_includes_final_output_json_schema_when_some() -> Result<()> {
let schema = json!({
"type": "object",
"properties": {
"answer": { "type": "string" }
},
"required": ["answer"],
"additionalProperties": false
});
let op = Op::UserInput {
items: Vec::new(),
final_output_json_schema: Some(schema.clone()),
};
let json_op = serde_json::to_value(op)?;
assert_eq!(
json_op,
json!({
"type": "user_input",
"items": [],
"final_output_json_schema": schema,
})
);
Ok(())
}
#[test]
fn user_input_text_serializes_empty_text_elements() -> Result<()> {
let input = UserInput::Text {
text: "hello".to_string(),
text_elements: Vec::new(),
};
let json_input = serde_json::to_value(input)?;
assert_eq!(
json_input,
json!({
"type": "text",
"text": "hello",
"text_elements": [],
})
);
Ok(())
}
#[test]
fn user_message_event_serializes_empty_metadata_vectors() -> Result<()> {
let event = UserMessageEvent {
message: "hello".to_string(),
images: None,
local_images: Vec::new(),
text_elements: Vec::new(),
};
let json_event = serde_json::to_value(event)?;
assert_eq!(
json_event,
json!({
"message": "hello",
"local_images": [],
"text_elements": [],
})
);
Ok(())
}
#[test]
fn turn_aborted_event_deserializes_without_turn_id() -> Result<()> {
let event: EventMsg = serde_json::from_value(json!({
"type": "turn_aborted",
"reason": "interrupted",
}))?;
match event {
EventMsg::TurnAborted(TurnAbortedEvent { turn_id, reason }) => {
assert_eq!(turn_id, None);
assert_eq!(reason, TurnAbortReason::Interrupted);
}
_ => panic!("expected turn_aborted event"),
}
Ok(())
}
#[test]
fn turn_context_item_deserializes_without_network() -> Result<()> {
let item: TurnContextItem = serde_json::from_value(json!({
"cwd": "/tmp",
"approval_policy": "never",
"sandbox_policy": { "type": "danger-full-access" },
"model": "gpt-5",
"summary": "auto",
}))?;
assert_eq!(item.trace_id, None);
assert_eq!(item.network, None);
Ok(())
}
#[test]
fn turn_context_item_serializes_network_when_present() -> Result<()> {
let item = TurnContextItem {
turn_id: None,
trace_id: None,
cwd: PathBuf::from("/tmp"),
current_date: None,
timezone: None,
approval_policy: AskForApproval::Never,
sandbox_policy: SandboxPolicy::DangerFullAccess,
network: Some(TurnContextNetworkItem {
allowed_domains: vec!["api.example.com".to_string()],
denied_domains: vec!["blocked.example.com".to_string()],
}),
model: "gpt-5".to_string(),
personality: None,
collaboration_mode: None,
realtime_active: None,
effort: None,
summary: ReasoningSummaryConfig::Auto,
user_instructions: None,
developer_instructions: None,
final_output_json_schema: None,
truncation_policy: None,
};
let value = serde_json::to_value(item)?;
assert_eq!(
value["network"],
json!({
"allowed_domains": ["api.example.com"],
"denied_domains": ["blocked.example.com"],
})
);
Ok(())
}
/// Serialize Event to verify that its JSON representation has the expected
/// amount of nesting.
#[test]
fn serialize_event() -> Result<()> {
let conversation_id = ThreadId::from_string("67e55044-10b1-426f-9247-bb680e5fe0c8")?;
let rollout_file = NamedTempFile::new()?;
let event = Event {
id: "1234".to_string(),
msg: EventMsg::SessionConfigured(SessionConfiguredEvent {
session_id: conversation_id,
forked_from_id: None,
thread_name: None,
model: "codex-mini-latest".to_string(),
model_provider_id: "openai".to_string(),
service_tier: None,
approval_policy: AskForApproval::Never,
approvals_reviewer: ApprovalsReviewer::User,
sandbox_policy: SandboxPolicy::new_read_only_policy(),
cwd: PathBuf::from("/home/user/project"),
reasoning_effort: Some(ReasoningEffortConfig::default()),
history_log_id: 0,
history_entry_count: 0,
initial_messages: None,
network_proxy: None,
rollout_path: Some(rollout_file.path().to_path_buf()),
}),
};
let expected = json!({
"id": "1234",
"msg": {
"type": "session_configured",
"session_id": "67e55044-10b1-426f-9247-bb680e5fe0c8",
"model": "codex-mini-latest",
"model_provider_id": "openai",
"approval_policy": "never",
"approvals_reviewer": "user",
"sandbox_policy": {
"type": "read-only"
},
"cwd": "/home/user/project",
"reasoning_effort": "medium",
"history_log_id": 0,
"history_entry_count": 0,
"rollout_path": format!("{}", rollout_file.path().display()),
}
});
assert_eq!(expected, serde_json::to_value(&event)?);
Ok(())
}
#[test]
fn vec_u8_as_base64_serialization_and_deserialization() -> Result<()> {
let event = ExecCommandOutputDeltaEvent {
call_id: "call21".to_string(),
stream: ExecOutputStream::Stdout,
chunk: vec![1, 2, 3, 4, 5],
};
let serialized = serde_json::to_string(&event)?;
assert_eq!(
r#"{"call_id":"call21","stream":"stdout","chunk":"AQIDBAU="}"#,
serialized,
);
let deserialized: ExecCommandOutputDeltaEvent = serde_json::from_str(&serialized)?;
assert_eq!(deserialized, event);
Ok(())
}
#[test]
fn serialize_mcp_startup_update_event() -> Result<()> {
let event = Event {
id: "init".to_string(),
msg: EventMsg::McpStartupUpdate(McpStartupUpdateEvent {
server: "srv".to_string(),
status: McpStartupStatus::Failed {
error: "boom".to_string(),
},
}),
};
let value = serde_json::to_value(&event)?;
assert_eq!(value["msg"]["type"], "mcp_startup_update");
assert_eq!(value["msg"]["server"], "srv");
assert_eq!(value["msg"]["status"]["state"], "failed");
assert_eq!(value["msg"]["status"]["error"], "boom");
Ok(())
}
#[test]
fn serialize_mcp_startup_complete_event() -> Result<()> {
let event = Event {
id: "init".to_string(),
msg: EventMsg::McpStartupComplete(McpStartupCompleteEvent {
ready: vec!["a".to_string()],
failed: vec![McpStartupFailure {
server: "b".to_string(),
error: "bad".to_string(),
}],
cancelled: vec!["c".to_string()],
}),
};
let value = serde_json::to_value(&event)?;
assert_eq!(value["msg"]["type"], "mcp_startup_complete");
assert_eq!(value["msg"]["ready"][0], "a");
assert_eq!(value["msg"]["failed"][0]["server"], "b");
assert_eq!(value["msg"]["failed"][0]["error"], "bad");
assert_eq!(value["msg"]["cancelled"][0], "c");
Ok(())
}
#[test]
fn token_usage_info_new_or_append_updates_context_window_when_provided() {
let initial = Some(TokenUsageInfo {
total_token_usage: TokenUsage::default(),
last_token_usage: TokenUsage::default(),
model_context_window: Some(258_400),
});
let last = Some(TokenUsage {
input_tokens: 10,
cached_input_tokens: 0,
output_tokens: 0,
reasoning_output_tokens: 0,
total_tokens: 10,
});
let info = TokenUsageInfo::new_or_append(&initial, &last, Some(128_000))
.expect("new_or_append should return info");
assert_eq!(info.model_context_window, Some(128_000));
}
#[test]
fn token_usage_info_new_or_append_preserves_context_window_when_not_provided() {
let initial = Some(TokenUsageInfo {
total_token_usage: TokenUsage::default(),
last_token_usage: TokenUsage::default(),
model_context_window: Some(258_400),
});
let last = Some(TokenUsage {
input_tokens: 10,
cached_input_tokens: 0,
output_tokens: 0,
reasoning_output_tokens: 0,
total_tokens: 10,
});
let info = TokenUsageInfo::new_or_append(&initial, &last, None)
.expect("new_or_append should return info");
assert_eq!(info.model_context_window, Some(258_400));
}
}