Skip to content

dspy.Evaluate

dspy.Evaluate(*, devset: List[dspy.Example], metric: Optional[Callable] = None, num_threads: Optional[int] = None, display_progress: bool = False, display_table: Union[bool, int] = False, max_errors: int = 5, return_all_scores: bool = False, return_outputs: bool = False, provide_traceback: Optional[bool] = None, failure_score: float = 0.0, **kwargs)

DSPy Evaluate class.

This class is used to evaluate the performance of a DSPy program. Users need to provide a evaluation dataset and a metric function in order to use this class. This class supports parallel evaluation on the provided dataset.

Parameters:

Name Type Description Default
devset List[Example]

the evaluation dataset.

required
metric Callable

The metric function to use for evaluation.

None
num_threads Optional[int]

The number of threads to use for parallel evaluation.

None
display_progress bool

Whether to display progress during evaluation.

False
display_table Union[bool, int]

Whether to display the evaluation results in a table. If a number is passed, the evaluation results will be truncated to that number before displayed.

False
max_errors int

The maximum number of errors to allow before stopping evaluation.

5
return_all_scores bool

Whether to return scores for every data record in devset.

False
return_outputs bool

Whether to return the dspy program's outputs for every data in devset.

False
provide_traceback Optional[bool]

Whether to provide traceback information during evaluation.

None
failure_score float

The default score to use if evaluation fails due to an exception.

0.0
Source code in dspy/evaluate/evaluate.py
def __init__(
    self,
    *,
    devset: List["dspy.Example"],
    metric: Optional[Callable] = None,
    num_threads: Optional[int] = None,
    display_progress: bool = False,
    display_table: Union[bool, int] = False,
    max_errors: int = 5,
    return_all_scores: bool = False,
    return_outputs: bool = False,
    provide_traceback: Optional[bool] = None,
    failure_score: float = 0.0,
    **kwargs,
):
    """
    Args:
        devset (List[dspy.Example]): the evaluation dataset.
        metric (Callable): The metric function to use for evaluation.
        num_threads (Optional[int]): The number of threads to use for parallel evaluation.
        display_progress (bool): Whether to display progress during evaluation.
        display_table (Union[bool, int]): Whether to display the evaluation results in a table. 
            If a number is passed, the evaluation results will be truncated to that number before displayed. 
        max_errors (int): The maximum number of errors to allow before stopping evaluation.
        return_all_scores (bool): Whether to return scores for every data record in `devset`.
        return_outputs (bool): Whether to return the dspy program's outputs for every data in `devset`.
        provide_traceback (Optional[bool]): Whether to provide traceback information during evaluation.
        failure_score (float): The default score to use if evaluation fails due to an exception.
    """
    self.devset = devset
    self.metric = metric
    self.num_threads = num_threads
    self.display_progress = display_progress
    self.display_table = display_table
    self.max_errors = max_errors
    self.return_all_scores = return_all_scores
    self.return_outputs = return_outputs
    self.provide_traceback = provide_traceback
    self.failure_score = failure_score

Functions

__call__(program: dspy.Module, metric: Optional[Callable] = None, devset: Optional[List[dspy.Example]] = None, num_threads: Optional[int] = None, display_progress: Optional[bool] = None, display_table: Optional[Union[bool, int]] = None, return_all_scores: Optional[bool] = None, return_outputs: Optional[bool] = None, callback_metadata: Optional[dict[str, Any]] = None)

Parameters:

Name Type Description Default
program Module

The DSPy program to evaluate.

required
metric Callable

The metric function to use for evaluation. if not provided, use self.metric.

None
devset List[Example]

the evaluation dataset. if not provided, use self.devset.

None
num_threads Optional[int]

The number of threads to use for parallel evaluation. if not provided, use self.num_threads.

None
display_progress bool

Whether to display progress during evaluation. if not provided, use self.display_progress.

None
display_table Union[bool, int]

Whether to display the evaluation results in a table. if not provided, use self.display_table. If a number is passed, the evaluation results will be truncated to that number before displayed.

None
return_all_scores bool

Whether to return scores for every data record in devset. if not provided, use self.return_all_scores.

None
return_outputs bool

Whether to return the dspy program's outputs for every data in devset. if not provided, use self.return_outputs.

None
callback_metadata dict

Metadata to be used for evaluate callback handlers.

None

Returns:

Type Description

The evaluation results are returned in different formats based on the flags:

  • Base return: A float percentage score (e.g., 67.30) representing overall performance
  • With return_all_scores=True: Returns (overall_score, individual_scores) where individual_scores is a list of float scores for each example in devset
  • With return_outputs=True: Returns (overall_score, result_triples) where result_triples is a list of (example, prediction, score) tuples for each example in devset
  • With both flags=True: Returns (overall_score, result_triples, individual_scores)
Source code in dspy/evaluate/evaluate.py
@with_callbacks
def __call__(
    self,
    program: "dspy.Module",
    metric: Optional[Callable] = None,
    devset: Optional[List["dspy.Example"]] = None,
    num_threads: Optional[int] = None,
    display_progress: Optional[bool] = None,
    display_table: Optional[Union[bool, int]] = None,
    return_all_scores: Optional[bool] = None,
    return_outputs: Optional[bool] = None,
    callback_metadata: Optional[dict[str, Any]] = None,
):
    """
    Args:
        program (dspy.Module): The DSPy program to evaluate.
        metric (Callable): The metric function to use for evaluation. if not provided, use `self.metric`.
        devset (List[dspy.Example]): the evaluation dataset. if not provided, use `self.devset`.
        num_threads (Optional[int]): The number of threads to use for parallel evaluation. if not provided, use
            `self.num_threads`.
        display_progress (bool): Whether to display progress during evaluation. if not provided, use
            `self.display_progress`.
        display_table (Union[bool, int]): Whether to display the evaluation results in a table. if not provided, use
            `self.display_table`. If a number is passed, the evaluation results will be truncated to that number before displayed.
        return_all_scores (bool): Whether to return scores for every data record in `devset`. if not provided,
            use `self.return_all_scores`.
        return_outputs (bool): Whether to return the dspy program's outputs for every data in `devset`. if not
            provided, use `self.return_outputs`.
        callback_metadata (dict): Metadata to be used for evaluate callback handlers.

    Returns:
        The evaluation results are returned in different formats based on the flags:

        - Base return: A float percentage score (e.g., 67.30) representing overall performance

        - With `return_all_scores=True`:
            Returns (overall_score, individual_scores) where individual_scores is a list of 
            float scores for each example in devset

        - With `return_outputs=True`:
            Returns (overall_score, result_triples) where result_triples is a list of 
            (example, prediction, score) tuples for each example in devset

        - With both flags=True:
            Returns (overall_score, result_triples, individual_scores)

    """
    metric = metric if metric is not None else self.metric
    devset = devset if devset is not None else self.devset
    num_threads = num_threads if num_threads is not None else self.num_threads
    display_progress = display_progress if display_progress is not None else self.display_progress
    display_table = display_table if display_table is not None else self.display_table
    return_all_scores = return_all_scores if return_all_scores is not None else self.return_all_scores
    return_outputs = return_outputs if return_outputs is not None else self.return_outputs

    if callback_metadata:
        logger.debug(f"Evaluate is called with callback metadata: {callback_metadata}")

    tqdm.tqdm._instances.clear()

    executor = ParallelExecutor(
        num_threads=num_threads,
        disable_progress_bar=not display_progress,
        max_errors=self.max_errors,
        provide_traceback=self.provide_traceback,
        compare_results=True,
    )

    def process_item(example):
        prediction = program(**example.inputs())
        score = metric(example, prediction)

        # Increment assert and suggest failures to program's attributes
        if hasattr(program, "_assert_failures"):
            program._assert_failures += dspy.settings.get("assert_failures")
        if hasattr(program, "_suggest_failures"):
            program._suggest_failures += dspy.settings.get("suggest_failures")

        return prediction, score

    results = executor.execute(process_item, devset)
    assert len(devset) == len(results)

    results = [((dspy.Prediction(), self.failure_score) if r is None else r) for r in results]
    results = [(example, prediction, score) for example, (prediction, score) in zip(devset, results)]
    ncorrect, ntotal = sum(score for *_, score in results), len(devset)

    logger.info(f"Average Metric: {ncorrect} / {ntotal} ({round(100 * ncorrect / ntotal, 1)}%)")

    if display_table:
        # Rename the 'correct' column to the name of the metric object
        metric_name = metric.__name__ if isinstance(metric, types.FunctionType) else metric.__class__.__name__
        # Construct a pandas DataFrame from the results
        result_df = self._construct_result_table(results, metric_name)

        self._display_result_table(result_df, display_table, metric_name)

    if return_all_scores and return_outputs:
        return round(100 * ncorrect / ntotal, 2), results, [score for *_, score in results]
    if return_all_scores:
        return round(100 * ncorrect / ntotal, 2), [score for *_, score in results]
    if return_outputs:
        return round(100 * ncorrect / ntotal, 2), results

    return round(100 * ncorrect / ntotal, 2)