Skip to content

dspy.LM

dspy.LM(model: str, model_type: Literal['chat', 'text'] = 'chat', temperature: float = 0.0, max_tokens: int = 1000, cache: bool = True, cache_in_memory: bool = True, callbacks: Optional[List[BaseCallback]] = None, num_retries: int = 3, provider=None, finetuning_model: Optional[str] = None, launch_kwargs: Optional[dict[str, Any]] = None, train_kwargs: Optional[dict[str, Any]] = None, **kwargs)

Bases: BaseLM

A language model supporting chat or text completion requests for use with DSPy modules.

Create a new language model instance for use with DSPy modules and programs.

Parameters:

Name Type Description Default
model str

The model to use. This should be a string of the form "llm_provider/llm_name" supported by LiteLLM. For example, "openai/gpt-4o".

required
model_type Literal['chat', 'text']

The type of the model, either "chat" or "text".

'chat'
temperature float

The sampling temperature to use when generating responses.

0.0
max_tokens int

The maximum number of tokens to generate per response.

1000
cache bool

Whether to cache the model responses for reuse to improve performance and reduce costs.

True
cache_in_memory deprecated

To enable additional caching with LRU in memory.

True
callbacks Optional[List[BaseCallback]]

A list of callback functions to run before and after each request.

None
num_retries int

The number of times to retry a request if it fails transiently due to network error, rate limiting, etc. Requests are retried with exponential backoff.

3
provider

The provider to use. If not specified, the provider will be inferred from the model.

None
finetuning_model Optional[str]

The model to finetune. In some providers, the models available for finetuning is different from the models available for inference.

None
Source code in dspy/clients/lm.py
def __init__(
    self,
    model: str,
    model_type: Literal["chat", "text"] = "chat",
    temperature: float = 0.0,
    max_tokens: int = 1000,
    cache: bool = True,
    cache_in_memory: bool = True,
    callbacks: Optional[List[BaseCallback]] = None,
    num_retries: int = 3,
    provider=None,
    finetuning_model: Optional[str] = None,
    launch_kwargs: Optional[dict[str, Any]] = None,
    train_kwargs: Optional[dict[str, Any]] = None,
    **kwargs,
):
    """
    Create a new language model instance for use with DSPy modules and programs.

    Args:
        model: The model to use. This should be a string of the form ``"llm_provider/llm_name"``
               supported by LiteLLM. For example, ``"openai/gpt-4o"``.
        model_type: The type of the model, either ``"chat"`` or ``"text"``.
        temperature: The sampling temperature to use when generating responses.
        max_tokens: The maximum number of tokens to generate per response.
        cache: Whether to cache the model responses for reuse to improve performance
               and reduce costs.
        cache_in_memory (deprecated): To enable additional caching with LRU in memory.
        callbacks: A list of callback functions to run before and after each request.
        num_retries: The number of times to retry a request if it fails transiently due to
                     network error, rate limiting, etc. Requests are retried with exponential
                     backoff.
        provider: The provider to use. If not specified, the provider will be inferred from the model.
        finetuning_model: The model to finetune. In some providers, the models available for finetuning is different
            from the models available for inference.
    """
    # Remember to update LM.copy() if you modify the constructor!
    self.model = model
    self.model_type = model_type
    self.cache = cache
    self.cache_in_memory = cache_in_memory
    self.provider = provider or self.infer_provider()
    self.callbacks = callbacks or []
    self.history = []
    self.callbacks = callbacks or []
    self.num_retries = num_retries
    self.finetuning_model = finetuning_model
    self.launch_kwargs = launch_kwargs or {}
    self.train_kwargs = train_kwargs or {}

    # Handle model-specific configuration for different model families
    model_family = model.split("/")[-1].lower() if "/" in model else model.lower()

    # Match pattern: o[1,3,4] at the start, optionally followed by -mini and anything else
    model_pattern = re.match(r"^o([134])(?:-mini)?", model_family)

    if model_pattern:
        # Handle OpenAI reasoning models (o1, o3)
        assert (
            max_tokens >= 20_000 and temperature == 1.0
        ), "OpenAI's reasoning models require passing temperature=1.0 and max_tokens >= 20_000 to `dspy.LM(...)`"
        self.kwargs = dict(temperature=temperature, max_completion_tokens=max_tokens, **kwargs)
    else:
        self.kwargs = dict(temperature=temperature, max_tokens=max_tokens, **kwargs)

Functions

__call__(prompt=None, messages=None, **kwargs)

Source code in dspy/clients/base_lm.py
@with_callbacks
def __call__(self, prompt=None, messages=None, **kwargs):
    response = self.forward(prompt=prompt, messages=messages, **kwargs)
    outputs = self._process_lm_response(response, prompt, messages, **kwargs)

    return outputs

acall(prompt=None, messages=None, **kwargs) async

Source code in dspy/clients/base_lm.py
@with_callbacks
async def acall(self, prompt=None, messages=None, **kwargs):
    response = await self.aforward(prompt=prompt, messages=messages, **kwargs)
    outputs = self._process_lm_response(response, prompt, messages, **kwargs)
    return outputs

aforward(prompt=None, messages=None, **kwargs) async

Source code in dspy/clients/lm.py
async def aforward(self, prompt=None, messages=None, **kwargs):
    # Build the request.
    cache = kwargs.pop("cache", self.cache)
    enable_memory_cache = kwargs.pop("cache_in_memory", self.cache_in_memory)

    messages = messages or [{"role": "user", "content": prompt}]
    kwargs = {**self.kwargs, **kwargs}

    completion = alitellm_completion if self.model_type == "chat" else alitellm_text_completion
    completion, litellm_cache_args = self._get_cached_completion_fn(completion, cache, enable_memory_cache)

    results = await completion(
        request=dict(model=self.model, messages=messages, **kwargs),
        num_retries=self.num_retries,
        cache=litellm_cache_args,
    )

    if not getattr(results, "cache_hit", False) and dspy.settings.usage_tracker and hasattr(results, "usage"):
        settings.usage_tracker.add_usage(self.model, dict(results.usage))
    return results

copy(**kwargs)

Returns a copy of the language model with possibly updated parameters.

Source code in dspy/clients/base_lm.py
def copy(self, **kwargs):
    """Returns a copy of the language model with possibly updated parameters."""

    import copy

    new_instance = copy.deepcopy(self)
    new_instance.history = []

    for key, value in kwargs.items():
        if hasattr(self, key):
            setattr(new_instance, key, value)
        if (key in self.kwargs) or (not hasattr(self, key)):
            new_instance.kwargs[key] = value

    return new_instance

dump_state()

Source code in dspy/clients/lm.py
def dump_state(self):
    state_keys = [
        "model",
        "model_type",
        "cache",
        "cache_in_memory",
        "num_retries",
        "finetuning_model",
        "launch_kwargs",
        "train_kwargs",
    ]
    return {key: getattr(self, key) for key in state_keys} | self.kwargs

finetune(train_data: List[Dict[str, Any]], train_data_format: Optional[TrainDataFormat], train_kwargs: Optional[Dict[str, Any]] = None) -> TrainingJob

Source code in dspy/clients/lm.py
def finetune(
    self,
    train_data: List[Dict[str, Any]],
    train_data_format: Optional[TrainDataFormat],
    train_kwargs: Optional[Dict[str, Any]] = None,
) -> TrainingJob:
    from dspy import settings as settings

    err = "Fine-tuning is an experimental feature."
    err += " Set `dspy.settings.experimental` to `True` to use it."
    assert settings.experimental, err

    err = f"Provider {self.provider} does not support fine-tuning."
    assert self.provider.finetunable, err

    def thread_function_wrapper():
        return self._run_finetune_job(job)

    thread = threading.Thread(target=thread_function_wrapper)
    train_kwargs = train_kwargs or self.train_kwargs
    model_to_finetune = self.finetuning_model or self.model
    job = self.provider.TrainingJob(
        thread=thread,
        model=model_to_finetune,
        train_data=train_data,
        train_data_format=train_data_format,
        train_kwargs=train_kwargs,
    )
    thread.start()

    return job

forward(prompt=None, messages=None, **kwargs)

Source code in dspy/clients/lm.py
def forward(self, prompt=None, messages=None, **kwargs):
    # Build the request.
    cache = kwargs.pop("cache", self.cache)
    enable_memory_cache = kwargs.pop("cache_in_memory", self.cache_in_memory)

    messages = messages or [{"role": "user", "content": prompt}]
    kwargs = {**self.kwargs, **kwargs}

    completion = litellm_completion if self.model_type == "chat" else litellm_text_completion
    completion, litellm_cache_args = self._get_cached_completion_fn(completion, cache, enable_memory_cache)

    results = completion(
        request=dict(model=self.model, messages=messages, **kwargs),
        num_retries=self.num_retries,
        cache=litellm_cache_args,
    )

    if not getattr(results, "cache_hit", False) and dspy.settings.usage_tracker and hasattr(results, "usage"):
        settings.usage_tracker.add_usage(self.model, dict(results.usage))
    return results

infer_provider() -> Provider

Source code in dspy/clients/lm.py
def infer_provider(self) -> Provider:
    if OpenAIProvider.is_provider_model(self.model):
        return OpenAIProvider()
    return Provider()

inspect_history(n: int = 1)

Source code in dspy/clients/base_lm.py
def inspect_history(self, n: int = 1):
    _inspect_history(self.history, n)

kill(launch_kwargs: Optional[Dict[str, Any]] = None)

Source code in dspy/clients/lm.py
def kill(self, launch_kwargs: Optional[Dict[str, Any]] = None):
    self.provider.kill(self, launch_kwargs)

launch(launch_kwargs: Optional[Dict[str, Any]] = None)

Source code in dspy/clients/lm.py
def launch(self, launch_kwargs: Optional[Dict[str, Any]] = None):
    self.provider.launch(self, launch_kwargs)

update_global_history(entry)

Source code in dspy/clients/base_lm.py
def update_global_history(self, entry):
    if settings.disable_history:
        return

    if len(GLOBAL_HISTORY) >= MAX_HISTORY_SIZE:
        GLOBAL_HISTORY.pop(0)

    GLOBAL_HISTORY.append(entry)