-
Notifications
You must be signed in to change notification settings - Fork 2
Expand file tree
/
Copy pathevaluatee_response.py
More file actions
60 lines (46 loc) · 2.18 KB
/
evaluatee_response.py
File metadata and controls
60 lines (46 loc) · 2.18 KB
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
# This file was auto-generated by Fern from our API Definition.
from __future__ import annotations
import datetime as dt
import typing
import pydantic
from ..core.pydantic_utilities import IS_PYDANTIC_V2, update_forward_refs
from ..core.unchecked_base_model import UncheckedBaseModel
from .run_version_response import RunVersionResponse
class EvaluateeResponse(UncheckedBaseModel):
"""
Version of the Evaluatee being evaluated.
"""
version: typing.Optional[RunVersionResponse] = None
batch_id: typing.Optional[str] = pydantic.Field(default=None)
"""
Unique identifier for the batch of Logs to include in the Evaluation.
"""
orchestrated: bool = pydantic.Field()
"""
Whether the Prompt/Tool is orchestrated by Humanloop. Default is `True`. If `False`, a log for the Prompt/Tool should be submitted by the user via the API.
"""
pinned: bool = pydantic.Field()
"""
Pinned Evaluatees are shown in Humanloop's Overview, allowing you to use them as baselines for comparison.
"""
added_at: typing.Optional[dt.datetime] = pydantic.Field(default=None)
"""
When the Evaluatee was added to the Evaluation.
"""
if IS_PYDANTIC_V2:
model_config: typing.ClassVar[pydantic.ConfigDict] = pydantic.ConfigDict(extra="allow", frozen=True) # type: ignore # Pydantic v2
else:
class Config:
frozen = True
smart_union = True
extra = pydantic.Extra.allow
from .agent_linked_file_response import AgentLinkedFileResponse # noqa: E402, F401, I001
from .agent_response import AgentResponse # noqa: E402, F401, I001
from .evaluator_response import EvaluatorResponse # noqa: E402, F401, I001
from .flow_response import FlowResponse # noqa: E402, F401, I001
from .monitoring_evaluator_response import MonitoringEvaluatorResponse # noqa: E402, F401, I001
from .prompt_response import PromptResponse # noqa: E402, F401, I001
from .tool_response import ToolResponse # noqa: E402, F401, I001
from .version_deployment_response import VersionDeploymentResponse # noqa: E402, F401, I001
from .version_id_response import VersionIdResponse # noqa: E402, F401, I001
update_forward_refs(EvaluateeResponse)