Skip to content

ontocast.tool.llm

Language Model (LLM) integration tool for OntoCast.

This module provides integration with various language models through LangChain, supporting both OpenAI and Ollama providers. It enables text generation and structured data extraction capabilities.

LLMTool

Bases: Tool

Tool for interacting with language models.

This class provides a unified interface for working with different language model providers (OpenAI, Ollama) through LangChain. It supports both synchronous and asynchronous operations.

Attributes:

Name Type Description
provider str

The LLM provider to use (default: "openai").

model str

The specific model to use (default: "gpt-4o-mini").

api_key Optional[str]

Optional API key for the provider.

base_url Optional[str]

Optional base URL for the provider.

temperature float

Temperature parameter for generation (default: 0.1).

Source code in ontocast/tool/llm.py
class LLMTool(Tool):
    """Tool for interacting with language models.

    This class provides a unified interface for working with different language model
    providers (OpenAI, Ollama) through LangChain. It supports both synchronous and
    asynchronous operations.

    Attributes:
        provider: The LLM provider to use (default: "openai").
        model: The specific model to use (default: "gpt-4o-mini").
        api_key: Optional API key for the provider.
        base_url: Optional base URL for the provider.
        temperature: Temperature parameter for generation (default: 0.1).
    """

    provider: str = Field(default="openai")
    model: str = Field(default="gpt-4o-mini")
    api_key: Optional[str] = None
    base_url: Optional[str] = None
    temperature: float = 0.1

    def __init__(
        self,
        **kwargs,
    ):
        """Initialize the LLM tool.

        Args:
            **kwargs: Additional keyword arguments passed to the parent class.
        """
        super().__init__(**kwargs)
        self._llm = None

    @classmethod
    def create(cls, **kwargs):
        """Create a new LLM tool instance synchronously.

        Args:
            **kwargs: Keyword arguments for initialization.

        Returns:
            LLMTool: A new instance of the LLM tool.
        """
        return asyncio.run(cls.acreate(**kwargs))

    @classmethod
    async def acreate(cls, **kwargs):
        """Create a new LLM tool instance asynchronously.

        Args:
            **kwargs: Keyword arguments for initialization.

        Returns:
            LLMTool: A new instance of the LLM tool.
        """
        self = cls.__new__(cls)
        self.__init__(**kwargs)
        await self.setup()
        return self

    async def setup(self):
        """Set up the language model based on the configured provider.

        Raises:
            ValueError: If the provider is not supported.
        """
        if self.provider == "openai":
            self._llm = ChatOpenAI(
                model=self.model,
                temperature=self.temperature,
            )
        elif self.provider == "ollama":
            self._llm = ChatOllama(
                model=self.model, base_url=self.base_url, temperature=self.temperature
            )
        else:
            raise ValueError(f"Unsupported provider: {self.provider}")

    def __call__(self, *args: Any, **kwds: Any) -> Any:
        """Call the language model directly.

        Args:
            *args: Positional arguments passed to the LLM.
            **kwds: Keyword arguments passed to the LLM.

        Returns:
            Any: The LLM's response.
        """
        return self.llm.invoke(*args, **kwds)

    @property
    def llm(self) -> BaseChatModel:
        """Get the underlying language model instance.

        Returns:
            BaseChatModel: The configured language model.

        Raises:
            RuntimeError: If the LLM has not been properly initialized.
        """
        if self._llm is None:
            raise RuntimeError(
                "LLM resource not properly initialized. Call setup() first."
            )
        return self._llm

    async def complete(self, prompt: str, **kwargs) -> Any:
        """Generate a completion for the given prompt.

        Args:
            prompt: The input prompt for generation.
            **kwargs: Additional keyword arguments for generation.

        Returns:
            Any: The generated completion.
        """
        response = await self.llm.ainvoke(prompt)
        return response.content

    async def extract(self, prompt: str, output_schema: Type[T], **kwargs) -> T:
        """Extract structured data from the prompt according to a schema.

        Args:
            prompt: The input prompt for extraction.
            output_schema: The Pydantic model class defining the output structure.
            **kwargs: Additional keyword arguments for extraction.

        Returns:
            T: The extracted data conforming to the output schema.
        """
        parser = PydanticOutputParser(pydantic_object=output_schema)
        format_instructions = parser.get_format_instructions()

        full_prompt = f"{prompt}\n\n{format_instructions}"
        response = await self.llm.ainvoke(full_prompt)

        return parser.parse(response.content)

llm property

Get the underlying language model instance.

Returns:

Name Type Description
BaseChatModel BaseChatModel

The configured language model.

Raises:

Type Description
RuntimeError

If the LLM has not been properly initialized.

__call__(*args, **kwds)

Call the language model directly.

Parameters:

Name Type Description Default
*args Any

Positional arguments passed to the LLM.

()
**kwds Any

Keyword arguments passed to the LLM.

{}

Returns:

Name Type Description
Any Any

The LLM's response.

Source code in ontocast/tool/llm.py
def __call__(self, *args: Any, **kwds: Any) -> Any:
    """Call the language model directly.

    Args:
        *args: Positional arguments passed to the LLM.
        **kwds: Keyword arguments passed to the LLM.

    Returns:
        Any: The LLM's response.
    """
    return self.llm.invoke(*args, **kwds)

__init__(**kwargs)

Initialize the LLM tool.

Parameters:

Name Type Description Default
**kwargs

Additional keyword arguments passed to the parent class.

{}
Source code in ontocast/tool/llm.py
def __init__(
    self,
    **kwargs,
):
    """Initialize the LLM tool.

    Args:
        **kwargs: Additional keyword arguments passed to the parent class.
    """
    super().__init__(**kwargs)
    self._llm = None

acreate(**kwargs) async classmethod

Create a new LLM tool instance asynchronously.

Parameters:

Name Type Description Default
**kwargs

Keyword arguments for initialization.

{}

Returns:

Name Type Description
LLMTool

A new instance of the LLM tool.

Source code in ontocast/tool/llm.py
@classmethod
async def acreate(cls, **kwargs):
    """Create a new LLM tool instance asynchronously.

    Args:
        **kwargs: Keyword arguments for initialization.

    Returns:
        LLMTool: A new instance of the LLM tool.
    """
    self = cls.__new__(cls)
    self.__init__(**kwargs)
    await self.setup()
    return self

complete(prompt, **kwargs) async

Generate a completion for the given prompt.

Parameters:

Name Type Description Default
prompt str

The input prompt for generation.

required
**kwargs

Additional keyword arguments for generation.

{}

Returns:

Name Type Description
Any Any

The generated completion.

Source code in ontocast/tool/llm.py
async def complete(self, prompt: str, **kwargs) -> Any:
    """Generate a completion for the given prompt.

    Args:
        prompt: The input prompt for generation.
        **kwargs: Additional keyword arguments for generation.

    Returns:
        Any: The generated completion.
    """
    response = await self.llm.ainvoke(prompt)
    return response.content

create(**kwargs) classmethod

Create a new LLM tool instance synchronously.

Parameters:

Name Type Description Default
**kwargs

Keyword arguments for initialization.

{}

Returns:

Name Type Description
LLMTool

A new instance of the LLM tool.

Source code in ontocast/tool/llm.py
@classmethod
def create(cls, **kwargs):
    """Create a new LLM tool instance synchronously.

    Args:
        **kwargs: Keyword arguments for initialization.

    Returns:
        LLMTool: A new instance of the LLM tool.
    """
    return asyncio.run(cls.acreate(**kwargs))

extract(prompt, output_schema, **kwargs) async

Extract structured data from the prompt according to a schema.

Parameters:

Name Type Description Default
prompt str

The input prompt for extraction.

required
output_schema Type[T]

The Pydantic model class defining the output structure.

required
**kwargs

Additional keyword arguments for extraction.

{}

Returns:

Name Type Description
T T

The extracted data conforming to the output schema.

Source code in ontocast/tool/llm.py
async def extract(self, prompt: str, output_schema: Type[T], **kwargs) -> T:
    """Extract structured data from the prompt according to a schema.

    Args:
        prompt: The input prompt for extraction.
        output_schema: The Pydantic model class defining the output structure.
        **kwargs: Additional keyword arguments for extraction.

    Returns:
        T: The extracted data conforming to the output schema.
    """
    parser = PydanticOutputParser(pydantic_object=output_schema)
    format_instructions = parser.get_format_instructions()

    full_prompt = f"{prompt}\n\n{format_instructions}"
    response = await self.llm.ainvoke(full_prompt)

    return parser.parse(response.content)

setup() async

Set up the language model based on the configured provider.

Raises:

Type Description
ValueError

If the provider is not supported.

Source code in ontocast/tool/llm.py
async def setup(self):
    """Set up the language model based on the configured provider.

    Raises:
        ValueError: If the provider is not supported.
    """
    if self.provider == "openai":
        self._llm = ChatOpenAI(
            model=self.model,
            temperature=self.temperature,
        )
    elif self.provider == "ollama":
        self._llm = ChatOllama(
            model=self.model, base_url=self.base_url, temperature=self.temperature
        )
    else:
        raise ValueError(f"Unsupported provider: {self.provider}")