forked from stacklok/codegate
-
Notifications
You must be signed in to change notification settings - Fork 0
Expand file tree
/
Copy pathbase.py
More file actions
323 lines (277 loc) · 11.9 KB
/
base.py
File metadata and controls
323 lines (277 loc) · 11.9 KB
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
162
163
164
165
166
167
168
169
170
171
172
173
174
175
176
177
178
179
180
181
182
183
184
185
186
187
188
189
190
191
192
193
194
195
196
197
198
199
200
201
202
203
204
205
206
207
208
209
210
211
212
213
214
215
216
217
218
219
220
221
222
223
224
225
226
227
228
229
230
231
232
233
234
235
236
237
238
239
240
241
242
243
244
245
246
247
248
249
250
251
252
253
254
255
256
257
258
259
260
261
262
263
264
265
266
267
268
269
270
271
272
273
274
275
276
277
278
279
280
281
282
283
284
285
286
287
288
289
290
291
292
293
294
295
296
297
298
299
300
301
302
303
304
305
306
307
308
309
310
311
312
313
314
315
316
317
318
319
320
321
322
323
import datetime
import os
import tempfile
from abc import ABC, abstractmethod
from pathlib import Path
from typing import Any, AsyncIterator, Callable, Dict, List, Optional, Union
import structlog
from fastapi import APIRouter
from litellm import ModelResponse
from litellm.types.llms.openai import ChatCompletionRequest
from codegate.clients.clients import ClientType
from codegate.codegate_logging import setup_logging
from codegate.db.connection import DbRecorder
from codegate.pipeline.base import (
PipelineContext,
PipelineResult,
)
from codegate.pipeline.factory import PipelineFactory
from codegate.pipeline.output import OutputPipelineInstance
from codegate.providers.completion.base import BaseCompletionHandler
from codegate.providers.formatting.input_pipeline import PipelineResponseFormatter
from codegate.providers.normalizer.base import ModelInputNormalizer, ModelOutputNormalizer
from codegate.providers.normalizer.completion import CompletionNormalizer
setup_logging()
logger = structlog.get_logger("codegate")
TEMPDIR = None
if os.getenv("CODEGATE_DUMP_DIR"):
basedir = os.getenv("CODEGATE_DUMP_DIR")
TEMPDIR = tempfile.TemporaryDirectory(prefix="codegate-", dir=basedir, delete=False)
StreamGenerator = Callable[[AsyncIterator[Any]], AsyncIterator[str]]
class ModelFetchError(Exception):
pass
class BaseProvider(ABC):
"""
The provider class is responsible for defining the API routes and
calling the completion method using the completion handler.
"""
def __init__(
self,
input_normalizer: ModelInputNormalizer,
output_normalizer: ModelOutputNormalizer,
completion_handler: BaseCompletionHandler,
pipeline_factory: PipelineFactory,
):
self.router = APIRouter()
self._completion_handler = completion_handler
self._input_normalizer = input_normalizer
self._output_normalizer = output_normalizer
self._pipeline_factory = pipeline_factory
self._db_recorder = DbRecorder()
self._pipeline_response_formatter = PipelineResponseFormatter(
output_normalizer, self._db_recorder
)
self._fim_normalizer = CompletionNormalizer()
self._setup_routes()
@abstractmethod
def _setup_routes(self) -> None:
pass
@abstractmethod
def models(self, endpoint, str=None, api_key: str = None) -> List[str]:
pass
@abstractmethod
async def process_request(
self,
data: dict,
api_key: str,
request_url_path: str,
client_type: ClientType,
):
pass
@property
@abstractmethod
def provider_route_name(self) -> str:
pass
async def _run_output_stream_pipeline(
self,
input_context: PipelineContext,
model_stream: AsyncIterator[ModelResponse],
is_fim_request: bool,
) -> AsyncIterator[ModelResponse]:
# Decide which pipeline processor to use
out_pipeline_processor = None
if is_fim_request:
out_pipeline_processor = self._pipeline_factory.create_fim_output_pipeline()
logger.info("FIM pipeline selected for output.")
else:
out_pipeline_processor = self._pipeline_factory.create_output_pipeline()
logger.info("Chat completion pipeline selected for output.")
if out_pipeline_processor is None:
logger.info("No output pipeline processor found, passing through")
return model_stream
# HACK! for anthropic we always need to run the output FIM pipeline even
# if empty to run the normalizers
if (
len(out_pipeline_processor.pipeline_steps) == 0
and self.provider_route_name != "anthropic"
):
logger.info("No output pipeline steps configured, passing through")
return model_stream
normalized_stream = self._output_normalizer.normalize_streaming(model_stream)
output_pipeline_instance = OutputPipelineInstance(
pipeline_steps=out_pipeline_processor.pipeline_steps,
input_context=input_context,
)
pipeline_output_stream = output_pipeline_instance.process_stream(normalized_stream)
denormalized_stream = self._output_normalizer.denormalize_streaming(pipeline_output_stream)
return denormalized_stream
def _run_output_pipeline(
self,
normalized_response: ModelResponse,
) -> ModelResponse:
# we don't have a pipeline for non-streamed output yet
return normalized_response
async def _run_input_pipeline(
self,
normalized_request: ChatCompletionRequest,
api_key: Optional[str],
api_base: Optional[str],
client_type: ClientType,
is_fim_request: bool,
) -> PipelineResult:
# Decide which pipeline processor to use
if is_fim_request:
pipeline_processor = self._pipeline_factory.create_fim_pipeline(client_type)
logger.info("FIM pipeline selected for execution.")
normalized_request = self._fim_normalizer.normalize(normalized_request)
else:
pipeline_processor = self._pipeline_factory.create_input_pipeline(client_type)
logger.info("Chat completion pipeline selected for execution.")
if pipeline_processor is None:
return PipelineResult(request=normalized_request)
result = await pipeline_processor.process_request(
request=normalized_request,
provider=self.provider_route_name,
model=normalized_request.get("model"),
api_key=api_key,
api_base=api_base,
)
# TODO(jakub): handle this by returning a message to the client
if result.error_message:
raise Exception(result.error_message)
return result
def _is_fim_request_url(self, request_url_path: str) -> bool:
"""
Checks the request URL to determine if a request is FIM or chat completion.
Used by: llama.cpp
"""
# Evaluate first a larger substring.
if request_url_path.endswith("/chat/completions"):
return False
# /completions is for OpenAI standard. /api/generate is for ollama.
if request_url_path.endswith("/completions") or request_url_path.endswith("/api/generate"):
return True
return False
def _is_fim_request_body(self, data: Dict) -> bool:
"""
Determine from the raw incoming data if it's a FIM request.
Used by: OpenAI and Anthropic
"""
messages = data.get("messages", [])
if not messages:
return False
first_message_content = messages[0].get("content")
if first_message_content is None:
return False
fim_stop_sequences = ["</COMPLETION>", "<COMPLETION>", "</QUERY>", "<QUERY>"]
if isinstance(first_message_content, str):
msg_prompt = first_message_content
elif isinstance(first_message_content, list):
msg_prompt = first_message_content[0].get("text", "")
else:
logger.warning(f"Could not determine if message was FIM from data: {data}")
return False
return all([stop_sequence in msg_prompt for stop_sequence in fim_stop_sequences])
def _is_fim_request(self, request_url_path: str, data: Dict) -> bool:
"""
Determine if the request is FIM by the URL or the data of the request.
"""
# first check if we are in specific tools to discard FIM
prompt = data.get("prompt", "")
tools = ["cline", "kodu", "open interpreter"]
for tool in tools:
if tool in prompt.lower():
# those tools can never be FIM
return False
# Avoid more expensive inspection of body by just checking the URL.
if self._is_fim_request_url(request_url_path):
return True
return self._is_fim_request_body(data)
async def _cleanup_after_streaming(
self, stream: AsyncIterator[ModelResponse], context: PipelineContext
) -> AsyncIterator[ModelResponse]:
"""Wraps the stream to ensure cleanup after consumption"""
try:
async for item in stream:
yield item
finally:
if context:
# Ensure sensitive data is cleaned up after the stream is consumed
if context.sensitive:
context.sensitive.secure_cleanup()
def _dump_request_response(self, prefix: str, data: Any) -> None:
"""Dump request or response data to a file if CODEGATE_DUMP_DIR is set"""
if not TEMPDIR:
return
ts = datetime.datetime.now()
fname = (
Path(TEMPDIR.name)
/ f"{prefix}-{self.provider_route_name}-{ts.strftime('%Y%m%dT%H%M%S%f')}.json"
)
if isinstance(data, (dict, list)):
import json
with open(fname, "w") as f:
json.dump(data, f, indent=2)
else:
with open(fname, "w") as f:
f.write(str(data))
async def complete(
self,
data: Dict,
api_key: Optional[str],
is_fim_request: bool,
client_type: ClientType,
) -> Union[ModelResponse, AsyncIterator[ModelResponse]]:
"""
Main completion flow with pipeline integration
The flow has three main steps:
- Translate the request to the OpenAI API format used internally
- Process the request with the pipeline processor. This can modify the request
or yield a response. The response can either be returned or streamed back to
the client
- Execute the completion and translate the response back to the
provider-specific format
"""
# Dump the incoming request
self._dump_request_response("request", data)
normalized_request = self._input_normalizer.normalize(data)
# Dump the normalized request
self._dump_request_response("normalized-request", normalized_request)
streaming = normalized_request.get("stream", False)
# Get detected client if available
input_pipeline_result = await self._run_input_pipeline(
normalized_request,
api_key,
data.get("base_url"),
client_type,
is_fim_request,
)
if input_pipeline_result.response and input_pipeline_result.context:
return await self._pipeline_response_formatter.handle_pipeline_response(
input_pipeline_result.response, streaming, context=input_pipeline_result.context
)
if input_pipeline_result.request:
provider_request = self._input_normalizer.denormalize(input_pipeline_result.request)
if is_fim_request:
provider_request = self._fim_normalizer.denormalize(provider_request) # type: ignore
self._dump_request_response("provider-request", provider_request)
# Execute the completion and translate the response
# This gives us either a single response or a stream of responses
# based on the streaming flag
model_response = await self._completion_handler.execute_completion(
provider_request,
api_key=api_key,
stream=streaming,
is_fim_request=is_fim_request,
)
if not streaming:
normalized_response = self._output_normalizer.normalize(model_response)
pipeline_output = self._run_output_pipeline(normalized_response)
await self._db_recorder.record_context(input_pipeline_result.context)
return self._output_normalizer.denormalize(pipeline_output)
pipeline_output_stream = await self._run_output_stream_pipeline(
input_pipeline_result.context, model_response, is_fim_request=is_fim_request # type: ignore
)
return self._cleanup_after_streaming(pipeline_output_stream, input_pipeline_result.context) # type: ignore
def get_routes(self) -> APIRouter:
return self.router