Skip to content

dspy.LM

dspy.LM(model: str, model_type: Literal['chat', 'text'] = 'chat', temperature: float = 0.0, max_tokens: int = 1000, cache: bool = True, cache_in_memory: bool = True, callbacks: Optional[List[BaseCallback]] = None, num_retries: int = 8, provider=None, finetuning_model: Optional[str] = None, launch_kwargs: Optional[dict[str, Any]] = None, train_kwargs: Optional[dict[str, Any]] = None, **kwargs)

Bases: BaseLM

A language model supporting chat or text completion requests for use with DSPy modules.

Create a new language model instance for use with DSPy modules and programs.

Parameters:

Name Type Description Default
model str

The model to use. This should be a string of the form "llm_provider/llm_name" supported by LiteLLM. For example, "openai/gpt-4o".

required
model_type Literal['chat', 'text']

The type of the model, either "chat" or "text".

'chat'
temperature float

The sampling temperature to use when generating responses.

0.0
max_tokens int

The maximum number of tokens to generate per response.

1000
cache bool

Whether to cache the model responses for reuse to improve performance and reduce costs.

True
cache_in_memory bool

To enable additional caching with LRU in memory.

True
callbacks Optional[List[BaseCallback]]

A list of callback functions to run before and after each request.

None
num_retries int

The number of times to retry a request if it fails transiently due to network error, rate limiting, etc. Requests are retried with exponential backoff.

8
provider

The provider to use. If not specified, the provider will be inferred from the model.

None
finetuning_model Optional[str]

The model to finetune. In some providers, the models available for finetuning is different from the models available for inference.

None
Source code in dspy/clients/lm.py
def __init__(
    self,
    model: str,
    model_type: Literal["chat", "text"] = "chat",
    temperature: float = 0.0,
    max_tokens: int = 1000,
    cache: bool = True,
    cache_in_memory: bool = True,
    callbacks: Optional[List[BaseCallback]] = None,
    num_retries: int = 8,
    provider=None,
    finetuning_model: Optional[str] = None,
    launch_kwargs: Optional[dict[str, Any]] = None,
    train_kwargs: Optional[dict[str, Any]] = None,
    **kwargs,
):
    """
    Create a new language model instance for use with DSPy modules and programs.

    Args:
        model: The model to use. This should be a string of the form ``"llm_provider/llm_name"``
               supported by LiteLLM. For example, ``"openai/gpt-4o"``.
        model_type: The type of the model, either ``"chat"`` or ``"text"``.
        temperature: The sampling temperature to use when generating responses.
        max_tokens: The maximum number of tokens to generate per response.
        cache: Whether to cache the model responses for reuse to improve performance
               and reduce costs.
        cache_in_memory: To enable additional caching with LRU in memory.
        callbacks: A list of callback functions to run before and after each request.
        num_retries: The number of times to retry a request if it fails transiently due to
                     network error, rate limiting, etc. Requests are retried with exponential
                     backoff.
        provider: The provider to use. If not specified, the provider will be inferred from the model.
        finetuning_model: The model to finetune. In some providers, the models available for finetuning is different
            from the models available for inference.
    """
    # Remember to update LM.copy() if you modify the constructor!
    self.model = model
    self.model_type = model_type
    self.cache = cache
    self.cache_in_memory = cache_in_memory
    self.provider = provider or self.infer_provider()
    self.callbacks = callbacks or []
    self.history = []
    self.callbacks = callbacks or []
    self.num_retries = num_retries
    self.finetuning_model = finetuning_model
    self.launch_kwargs = launch_kwargs or {}
    self.train_kwargs = train_kwargs or {}

    # Handle model-specific configuration for different model families
    model_family = model.split("/")[-1].lower() if "/" in model else model.lower()

    # Match pattern: o[1,3] at the start, optionally followed by -mini and anything else
    model_pattern = re.match(r"^o([13])(?:-mini)?", model_family)

    if model_pattern:
        # Handle OpenAI reasoning models (o1, o3)
        assert (
            max_tokens >= 5000 and temperature == 1.0
        ), "OpenAI's reasoning models require passing temperature=1.0 and max_tokens >= 5000 to `dspy.LM(...)`"
        self.kwargs = dict(temperature=temperature, max_completion_tokens=max_tokens, **kwargs)
    else:
        self.kwargs = dict(temperature=temperature, max_tokens=max_tokens, **kwargs)

Functions

dump_state()

Source code in dspy/clients/lm.py
def dump_state(self):
    state_keys = [
        "model",
        "model_type",
        "cache",
        "cache_in_memory",
        "num_retries",
        "finetuning_model",
        "launch_kwargs",
        "train_kwargs",
    ]
    return {key: getattr(self, key) for key in state_keys} | self.kwargs

finetune(train_data: List[Dict[str, Any]], train_data_format: Optional[TrainDataFormat], train_kwargs: Optional[Dict[str, Any]] = None) -> TrainingJob

Source code in dspy/clients/lm.py
def finetune(
    self,
    train_data: List[Dict[str, Any]],
    train_data_format: Optional[TrainDataFormat],
    train_kwargs: Optional[Dict[str, Any]] = None,
) -> TrainingJob:
    from dspy import settings as settings

    err = "Fine-tuning is an experimental feature."
    err += " Set `dspy.settings.experimental` to `True` to use it."
    assert settings.experimental, err

    err = f"Provider {self.provider} does not support fine-tuning."
    assert self.provider.finetunable, err

    def thread_function_wrapper():
        return self._run_finetune_job(job)

    thread = threading.Thread(target=thread_function_wrapper)
    train_kwargs = train_kwargs or self.train_kwargs
    model_to_finetune = self.finetuning_model or self.model
    job = self.provider.TrainingJob(
        thread=thread,
        model=model_to_finetune,
        train_data=train_data,
        train_data_format=train_data_format,
        train_kwargs=train_kwargs,
    )
    thread.start()

    return job

infer_provider() -> Provider

Source code in dspy/clients/lm.py
def infer_provider(self) -> Provider:
    if OpenAIProvider.is_provider_model(self.model):
        return OpenAIProvider()
    return Provider()

kill(launch_kwargs: Optional[Dict[str, Any]] = None)

Source code in dspy/clients/lm.py
def kill(self, launch_kwargs: Optional[Dict[str, Any]] = None):
    self.provider.kill(self, launch_kwargs)

launch(launch_kwargs: Optional[Dict[str, Any]] = None)

Source code in dspy/clients/lm.py
def launch(self, launch_kwargs: Optional[Dict[str, Any]] = None):
    self.provider.launch(self, launch_kwargs)