Documentation
¶
Index ¶
Constants ¶
This section is empty.
Variables ¶
View Source
var (
ErrInterrupted = xerrors.New("chat interrupted")
)
Functions ¶
Types ¶
type CompactionOptions ¶
type CompactionOptions struct {
ThresholdPercent int32
ContextLimit int64
SummaryPrompt string
SystemSummaryPrefix string
Timeout time.Duration
Persist func(context.Context, CompactionResult) error
// ToolCallID and ToolName identify the synthetic tool call
// used to represent compaction in the message stream.
ToolCallID string
ToolName string
// PublishMessagePart publishes streaming parts to connected
// clients so they see "Summarizing..." / "Summarized" UI
// transitions during compaction.
PublishMessagePart func(codersdk.ChatMessageRole, codersdk.ChatMessagePart)
OnError func(error)
}
type CompactionResult ¶
type PersistedStep ¶
type PersistedStep struct {
Content []fantasy.Content
Usage fantasy.Usage
ContextLimit sql.NullInt64
ProviderResponseID string
// Runtime is the wall-clock duration of this step,
// covering LLM streaming, tool execution, and retries.
// Zero indicates the duration was not measured (e.g.
// interrupted steps).
Runtime time.Duration
}
PersistedStep contains the full content of a completed or interrupted agent step. Content includes both assistant blocks (text, reasoning, tool calls) and tool result blocks. The persistence layer is responsible for splitting these into separate database messages by role.
type ProviderTool ¶
ProviderTool pairs a provider-native tool definition with an optional local executor. When Runner is nil the tool is fully provider-executed (e.g. web search). When Runner is non-nil the definition is sent to the API but execution is handled locally (e.g. computer use).
type RunOptions ¶
type RunOptions struct {
Model fantasy.LanguageModel
Messages []fantasy.Message
Tools []fantasy.AgentTool
MaxSteps int
// StartupTimeout bounds how long each model attempt may
// spend opening the provider stream and waiting for its
// first stream part before the attempt is canceled and
// retried. Zero uses the production default.
StartupTimeout time.Duration
ActiveTools []string
ContextLimitFallback int64
// ModelConfig holds per-call LLM parameters (temperature,
// max tokens, etc.) read from the chat model configuration.
ModelConfig codersdk.ChatModelCallConfig
// ProviderOptions are provider-specific call options
// converted from ModelConfig.ProviderOptions. This is a
// separate field because the conversion requires knowledge
// of the provider, which lives in chatd, not chatloop.
ProviderOptions fantasy.ProviderOptions
// ProviderTools are provider-native tools (like web search
// and computer use) whose definitions are passed directly
// to the provider API. When a ProviderTool has a non-nil
// Runner, tool calls are executed locally; otherwise the
// provider handles execution (e.g. web search).
ProviderTools []ProviderTool
PersistStep func(context.Context, PersistedStep) error
PublishMessagePart func(
role codersdk.ChatMessageRole,
part codersdk.ChatMessagePart,
)
Compaction *CompactionOptions
ReloadMessages func(context.Context) ([]fantasy.Message, error)
DisableChainMode func()
// OnRetry is called before each retry attempt when the LLM
// stream fails with a retryable error. It provides the attempt
// number, raw error, normalized classification, and backoff
// delay so callers can publish status events to connected
// clients. Callers should also clear any buffered stream state
// from the failed attempt in this callback to avoid sending
// duplicated content.
OnRetry chatretry.OnRetryFn
OnInterruptedPersistError func(error)
}
RunOptions configures a single streaming chat loop run.
Click to show internal directories.
Click to hide internal directories.