diff --git a/src/codegate/config.py b/src/codegate/config.py
index c3926994d..dbe17fff4 100644
--- a/src/codegate/config.py
+++ b/src/codegate/config.py
@@ -20,7 +20,7 @@
"anthropic": "https://api.anthropic.com/v1",
"vllm": "http://localhost:8000", # Base URL without /v1 path
"ollama": "http://localhost:11434", # Default Ollama server URL
- "lm_studio": "http://localhost:1234"
+ "lm_studio": "http://localhost:1234",
}
diff --git a/src/codegate/pipeline/base.py b/src/codegate/pipeline/base.py
index 52270724b..f0e131960 100644
--- a/src/codegate/pipeline/base.py
+++ b/src/codegate/pipeline/base.py
@@ -321,7 +321,8 @@ async def process(
class InputPipelineInstance:
def __init__(
- self, pipeline_steps: List[PipelineStep], secret_manager: SecretsManager, is_fim: bool):
+ self, pipeline_steps: List[PipelineStep], secret_manager: SecretsManager, is_fim: bool
+ ):
self.pipeline_steps = pipeline_steps
self.secret_manager = secret_manager
self.is_fim = is_fim
@@ -384,7 +385,8 @@ async def process_request(
class SequentialPipelineProcessor:
def __init__(
- self, pipeline_steps: List[PipelineStep], secret_manager: SecretsManager, is_fim: bool):
+ self, pipeline_steps: List[PipelineStep], secret_manager: SecretsManager, is_fim: bool
+ ):
self.pipeline_steps = pipeline_steps
self.secret_manager = secret_manager
self.is_fim = is_fim
diff --git a/src/codegate/pipeline/codegate_context_retriever/codegate.py b/src/codegate/pipeline/codegate_context_retriever/codegate.py
index f16115b9b..f4a78e686 100644
--- a/src/codegate/pipeline/codegate_context_retriever/codegate.py
+++ b/src/codegate/pipeline/codegate_context_retriever/codegate.py
@@ -95,10 +95,12 @@ async def process(
# in the rest of the user query/messsages
user_messages = re.sub(r"```.*?```", "", user_message, flags=re.DOTALL)
user_messages = re.sub(r"⋮...*?⋮...\n\n", "", user_messages, flags=re.DOTALL)
- user_messages = re.sub(r".*?", "", user_messages, flags=re.DOTALL)
+ user_messages = re.sub(
+ r".*?", "", user_messages, flags=re.DOTALL
+ )
# split messages into double newlines, to avoid passing so many content in the search
- split_messages = re.split(r'?task>|(\n\n)', user_messages)
+ split_messages = re.split(r"?task>|(\n\n)", user_messages)
collected_bad_packages = []
for item_message in split_messages:
# Vector search to find bad packages
@@ -143,7 +145,7 @@ async def process(
# Combine the updated task block with the rest of the message
context_msg = updated_task_content + rest_of_message
else:
- context_msg = f'Context: {context_str} \n\n Query: {message_str}' # type: ignore
+ context_msg = f"Context: {context_str} \n\n Query: {message_str}" # type: ignore
message["content"] = context_msg
logger.debug("Final context message", context_message=context_msg)
diff --git a/src/codegate/pipeline/secrets/secrets.py b/src/codegate/pipeline/secrets/secrets.py
index 6676b00a7..f2f0fca48 100644
--- a/src/codegate/pipeline/secrets/secrets.py
+++ b/src/codegate/pipeline/secrets/secrets.py
@@ -452,7 +452,8 @@ async def process_chunk(
return [chunk]
is_cline_client = any(
- "Cline" in str(message.trigger_string or "") for message in input_context.alerts_raised or []
+ "Cline" in str(message.trigger_string or "")
+ for message in input_context.alerts_raised or []
)
# Check if this is the first chunk (delta role will be present, others will not)
diff --git a/src/codegate/providers/base.py b/src/codegate/providers/base.py
index c99ce7e1a..46318d396 100644
--- a/src/codegate/providers/base.py
+++ b/src/codegate/providers/base.py
@@ -199,7 +199,8 @@ async def _cleanup_after_streaming(
context.sensitive.secure_cleanup()
async def complete(
- self, data: Dict, api_key: Optional[str], is_fim_request: bool) -> Union[ModelResponse, AsyncIterator[ModelResponse]]:
+ self, data: Dict, api_key: Optional[str], is_fim_request: bool
+ ) -> Union[ModelResponse, AsyncIterator[ModelResponse]]:
"""
Main completion flow with pipeline integration
diff --git a/src/codegate/providers/openai/provider.py b/src/codegate/providers/openai/provider.py
index ec4cf4816..9cd685fad 100644
--- a/src/codegate/providers/openai/provider.py
+++ b/src/codegate/providers/openai/provider.py
@@ -56,7 +56,7 @@ async def create_completion(
# if model starts with lm_studio, propagate it
if data.get("model", "").startswith("lm_studio"):
- data["base_url"] = self.lm_studio_url+"/v1/"
+ data["base_url"] = self.lm_studio_url + "/v1/"
is_fim_request = self._is_fim_request(request, data)
try:
stream = await self.complete(data, api_key, is_fim_request=is_fim_request)
diff --git a/src/codegate/storage/storage_engine.py b/src/codegate/storage/storage_engine.py
index 18aa33a00..9543fe706 100644
--- a/src/codegate/storage/storage_engine.py
+++ b/src/codegate/storage/storage_engine.py
@@ -53,9 +53,7 @@ def __init__(self, data_path="./sqlite_data"):
self.inference_engine = LlamaCppInferenceEngine()
conf = Config.get_config()
if conf and conf.model_base_path and conf.embedding_model:
- self.model_path = (
- f"{conf.model_base_path}/{conf.embedding_model}"
- )
+ self.model_path = f"{conf.model_base_path}/{conf.embedding_model}"
else:
self.model_path = ""