diff --git a/README.md b/README.md index c286de6..f61c09e 100644 --- a/README.md +++ b/README.md @@ -62,6 +62,8 @@ Let's assume we want to download utility bills: **Recommended to use gpt-4o as the model for graph generation as it supports function calling. Integuru will automatically switch to o1-preview for code generation if available in the user's OpenAI account.** ⚠️ **Note: o1-preview does not support function calls.** + **Ollama support is now available! You can use the Ollama model by specifying `--model ollama` in the command.** + ## Usage After setting up the project, you can use Integuru to analyze and reverse-engineer API requests for external platforms. Simply provide the appropriate .har file and a prompt describing the action that you want to trigger. diff --git a/integuru/__main__.py b/integuru/__main__.py index 2c001a1..137a7bf 100644 --- a/integuru/__main__.py +++ b/integuru/__main__.py @@ -11,7 +11,7 @@ @click.command() @click.option( - "--model", default="gpt-4o", help="The LLM model to use (default is gpt-4o)" + "--model", default="gpt-4o", help="The LLM model to use (default is gpt-4o, supports ollama)" ) @click.option("--prompt", required=True, help="The prompt for the model") @click.option( diff --git a/integuru/util/LLM.py b/integuru/util/LLM.py index 9e20293..4497184 100644 --- a/integuru/util/LLM.py +++ b/integuru/util/LLM.py @@ -1,17 +1,22 @@ from langchain_openai import ChatOpenAI +from ollama import Ollama class LLMSingleton: _instance = None _default_model = "gpt-4o" _alternate_model = "o1-preview" + _ollama_model = "ollama" @classmethod def get_instance(cls, model: str = None): if model is None: model = cls._default_model - if cls._instance is None: - cls._instance = ChatOpenAI(model=model, temperature=1) + if cls._instance is None or cls._instance.model != model: + if model == cls._ollama_model: + cls._instance = Ollama(model=model) + else: + cls._instance = ChatOpenAI(model=model, temperature=1) return cls._instance @classmethod @@ -34,5 +39,10 @@ def switch_to_alternate_model(cls): return cls._instance -llm = LLMSingleton() + @classmethod + def get_ollama_instance(cls): + """Returns an Ollama instance""" + cls._instance = Ollama(model=cls._ollama_model) + return cls._instance +llm = LLMSingleton()