Skip to content

PythonTask

evotoolkit.task.python_task.python_task.PythonTask

Bases: BaseTask

Abstract base class for Python-based evolutionary optimization tasks.

This class unifies PythonEvaluator and PythonTaskConfig functionality, providing a common base for Python code evaluation tasks.

Source code in src/evotoolkit/task/python_task/python_task.py
class PythonTask(BaseTask):
    """
    Abstract base class for Python-based evolutionary optimization tasks.

    This class unifies PythonEvaluator and PythonTaskConfig functionality,
    providing a common base for Python code evaluation tasks.
    """

    def __init__(self, data, timeout_seconds: float = 30.0):
        """
        Initialize the Python task with input data.

        Args:
            data (Any): Task-specific input data.
            timeout_seconds (float): Execution timeout for code evaluation.
        """
        self.timeout_seconds = timeout_seconds
        super().__init__(data)

    def get_task_type(self) -> str:
        """Get task type as 'Python'."""
        return "Python"

    def evaluate_code(self, candidate_code: str) -> EvaluationResult:
        """
        Evaluate Python code.

        Default implementation provides basic error handling framework.
        Subclasses should override this method with specific evaluation logic.

        Args:
            candidate_code: Python code to evaluate

        Returns:
            EvaluationResult: Result of the evaluation
        """
        try:
            return self._evaluate_code_impl(candidate_code)
        except Exception as e:
            return EvaluationResult(
                valid=False,
                score=float("-inf"),
                additional_info={
                    "error": f"Evaluation error: {str(e)}",
                    "traceback": traceback.format_exc(),
                },
            )

    @abstractmethod
    def _evaluate_code_impl(self, candidate_code: str) -> EvaluationResult:
        """
        Implement specific code evaluation logic.

        Subclasses must implement this method with their specific
        evaluation logic. This method is called by evaluate_code
        within a try-catch block.

        Args:
            candidate_code: Python code to evaluate

        Returns:
            EvaluationResult: Result of the evaluation
        """
        pass

__init__

__init__(data, timeout_seconds: float = 30.0)

Initialize the Python task with input data.

Parameters:

Name Type Description Default
data Any

Task-specific input data.

required
timeout_seconds float

Execution timeout for code evaluation.

30.0
Source code in src/evotoolkit/task/python_task/python_task.py
def __init__(self, data, timeout_seconds: float = 30.0):
    """
    Initialize the Python task with input data.

    Args:
        data (Any): Task-specific input data.
        timeout_seconds (float): Execution timeout for code evaluation.
    """
    self.timeout_seconds = timeout_seconds
    super().__init__(data)

get_task_type

get_task_type() -> str

Get task type as 'Python'.

Source code in src/evotoolkit/task/python_task/python_task.py
def get_task_type(self) -> str:
    """Get task type as 'Python'."""
    return "Python"

evaluate_code

evaluate_code(candidate_code: str) -> EvaluationResult

Evaluate Python code.

Default implementation provides basic error handling framework. Subclasses should override this method with specific evaluation logic.

Parameters:

Name Type Description Default
candidate_code str

Python code to evaluate

required

Returns:

Name Type Description
EvaluationResult EvaluationResult

Result of the evaluation

Source code in src/evotoolkit/task/python_task/python_task.py
def evaluate_code(self, candidate_code: str) -> EvaluationResult:
    """
    Evaluate Python code.

    Default implementation provides basic error handling framework.
    Subclasses should override this method with specific evaluation logic.

    Args:
        candidate_code: Python code to evaluate

    Returns:
        EvaluationResult: Result of the evaluation
    """
    try:
        return self._evaluate_code_impl(candidate_code)
    except Exception as e:
        return EvaluationResult(
            valid=False,
            score=float("-inf"),
            additional_info={
                "error": f"Evaluation error: {str(e)}",
                "traceback": traceback.format_exc(),
            },
        )