Full JSON Schema for the AtomRequest, CacheDirective, and AgentConfig objects.
{
"type": "object",
"required": ["atoms", "instructions"],
"properties": {
"atoms": {
"type": "object",
"description": "Schema definitions for each atom type. Keys are atom type names. Values are JSON Schema objects.",
"additionalProperties": {
"type": "object",
"additionalProperties": true
}
},
"instructions": {
"type": "string",
"description": "Free-form text instructions for the agent. Can include markdown, code blocks, structured examples."
},
"context": {
"type": "object",
"description": "Product-defined context for cache granularity. Platform injects org_id and user_id from session.",
"additionalProperties": true
},
"cache": {
"$ref": "#/$defs/CacheDirective",
"description": "Client-controlled caching directives"
},
"tools": {
"type": "array",
"items": { "type": "string" },
"description": "DEPRECATED: Use agent.tools instead."
},
"agent": {
"$ref": "#/$defs/AgentConfig",
"description": "Agent configuration overrides"
}
}
}{
"type": "object",
"properties": {
"model": {
"anyOf": [
{ "type": "string" },
{ "type": "array", "items": { "type": "string" } },
{ "type": "null" }
],
"default": null,
"description": "LLM model name or ordered list of model names. First is primary, rest are fallbacks. Provider derived from name. Default: 'claude-sonnet-4-5'."
},
"temperature": {
"anyOf": [{ "type": "number" }, { "type": "null" }],
"default": null,
"description": "LLM temperature. Default: 0.3."
},
"max_tokens": {
"anyOf": [{ "type": "integer" }, { "type": "null" }],
"default": null,
"description": "Maximum output tokens per LLM call. Default: 32000."
},
"tools": {
"anyOf": [
{ "type": "array", "items": { "type": "string" } },
{ "type": "null" }
],
"default": null,
"description": "Tool names to enable for the agent. Default: []."
}
}
}{
"type": "object",
"properties": {
"read": {
"type": "boolean",
"default": true,
"description": "Read from cache if available and fresh"
},
"write": {
"type": "boolean",
"default": true,
"description": "Write generated atoms to cache"
},
"maxStale": {
"type": "integer",
"default": 86400,
"description": "Maximum staleness in seconds (default: 86400 = 24h)"
},
"revalidate": {
"type": "boolean",
"default": false,
"description": "Serve cached then stream refresh (SWR mode)"
}
}
}Model provider is derived from the model name:
| Model Prefix | Provider |
|---|---|
claude-* |
Anthropic |
gpt-* |
OpenAI |
gemini-* |
model:"claude-sonnet-4-5"temperature:0.3max_tokens:32000tools:[]cache.read:truecache.write:truecache.revalidate:falsecache.maxStale:86400(24 hours)