Skip to content
Merged
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
8 changes: 3 additions & 5 deletions openevolve/cli.py
Original file line number Diff line number Diff line change
Expand Up @@ -78,12 +78,11 @@ async def main_async() -> int:
print(f"Error: Evaluation file '{args.evaluation_file}' not found")
return 1

# Load base config from file or defaults
config = load_config(args.config)

# Create config object with command-line overrides
config = None
if args.api_base or args.primary_model or args.secondary_model:
# Load base config from file or defaults
config = load_config(args.config)

# Apply command-line overrides
if args.api_base:
config.llm.api_base = args.api_base
Expand All @@ -110,7 +109,6 @@ async def main_async() -> int:
initial_program_path=args.initial_program,
evaluation_file=args.evaluation_file,
config=config,
config_path=args.config if config is None else None,
output_dir=args.output,
)

Expand Down
104 changes: 17 additions & 87 deletions openevolve/config.py
Original file line number Diff line number Diff line change
Expand Up @@ -3,7 +3,7 @@
"""

import os
from dataclasses import dataclass, field
from dataclasses import asdict, dataclass, field
from pathlib import Path
from typing import TYPE_CHECKING, Any, Callable, Dict, List, Optional, Union

Expand Down Expand Up @@ -41,7 +41,7 @@ class LLMModelConfig:

# Reproducibility
random_seed: Optional[int] = None

# Reasoning parameters
reasoning_effort: Optional[str] = None

Expand Down Expand Up @@ -75,7 +75,7 @@ class LLMConfig(LLMModelConfig):
primary_model_weight: float = None
secondary_model: str = None
secondary_model_weight: float = None

# Reasoning parameters (inherited from LLMModelConfig but can be overridden)
reasoning_effort: Optional[str] = None

Expand Down Expand Up @@ -146,7 +146,7 @@ def rebuild_models(self) -> None:
# Clear existing models lists
self.models = []
self.evaluator_models = []

# Re-run model generation logic from __post_init__
if self.primary_model:
# Create primary model
Expand Down Expand Up @@ -205,6 +205,7 @@ class PromptConfig:
template_variations: Dict[str, List[str]] = field(default_factory=dict)

# Meta-prompting
# Note: meta-prompting features not implemented
use_meta_prompting: bool = False
meta_prompt_weight: float = 0.1

Expand Down Expand Up @@ -254,6 +255,7 @@ class DatabaseConfig:
elite_selection_ratio: float = 0.1
exploration_ratio: float = 0.2
exploitation_ratio: float = 0.7
# Note: diversity_metric fixed to "edit_distance"
diversity_metric: str = "edit_distance" # Options: "edit_distance", "feature_based"

# Feature map dimensions for MAP-Elites
Expand Down Expand Up @@ -291,6 +293,7 @@ class DatabaseConfig:
embedding_model: Optional[str] = None
similarity_threshold: float = 0.99


@dataclass
class EvaluatorConfig:
"""Configuration for program evaluation"""
Expand All @@ -300,6 +303,7 @@ class EvaluatorConfig:
max_retries: int = 3

# Resource limits for evaluation
# Note: resource limits not implemented
memory_limit_mb: Optional[int] = None
cpu_limit: Optional[float] = None

Expand All @@ -309,6 +313,7 @@ class EvaluatorConfig:

# Parallel evaluation
parallel_evaluations: int = 1
# Note: distributed evaluation not implemented
distributed: bool = False

# LLM-based feedback
Expand All @@ -323,7 +328,7 @@ class EvaluatorConfig:
@dataclass
class EvolutionTraceConfig:
"""Configuration for evolution trace logging"""

enabled: bool = False
format: str = "jsonl" # Options: "jsonl", "json", "hdf5"
include_code: bool = False
Expand Down Expand Up @@ -362,6 +367,9 @@ class Config:
convergence_threshold: float = 0.001
early_stopping_metric: str = "combined_score"

# Parallel controller settings
max_tasks_per_child: Optional[int] = None

@classmethod
def from_yaml(cls, path: Union[str, Path]) -> "Config":
"""Load configuration from a YAML file"""
Expand All @@ -377,7 +385,9 @@ def from_dict(cls, config_dict: Dict[str, Any]) -> "Config":

# Update top-level fields
for key, value in config_dict.items():
if key not in ["llm", "prompt", "database", "evaluator", "evolution_trace"] and hasattr(config, key):
if key not in ["llm", "prompt", "database", "evaluator", "evolution_trace"] and hasattr(
config, key
):
setattr(config, key, value)

# Update nested configs
Expand Down Expand Up @@ -406,87 +416,7 @@ def from_dict(cls, config_dict: Dict[str, Any]) -> "Config":
return config

def to_dict(self) -> Dict[str, Any]:
"""Convert configuration to a dictionary"""
return {
# General settings
"max_iterations": self.max_iterations,
"checkpoint_interval": self.checkpoint_interval,
"log_level": self.log_level,
"log_dir": self.log_dir,
"random_seed": self.random_seed,
# Component configurations
"llm": {
"models": self.llm.models,
"evaluator_models": self.llm.evaluator_models,
"api_base": self.llm.api_base,
"temperature": self.llm.temperature,
"top_p": self.llm.top_p,
"max_tokens": self.llm.max_tokens,
"timeout": self.llm.timeout,
"retries": self.llm.retries,
"retry_delay": self.llm.retry_delay,
},
"prompt": {
"template_dir": self.prompt.template_dir,
"system_message": self.prompt.system_message,
"evaluator_system_message": self.prompt.evaluator_system_message,
"num_top_programs": self.prompt.num_top_programs,
"num_diverse_programs": self.prompt.num_diverse_programs,
"use_template_stochasticity": self.prompt.use_template_stochasticity,
"template_variations": self.prompt.template_variations,
# Note: meta-prompting features not implemented
# "use_meta_prompting": self.prompt.use_meta_prompting,
# "meta_prompt_weight": self.prompt.meta_prompt_weight,
},
"database": {
"db_path": self.database.db_path,
"in_memory": self.database.in_memory,
"population_size": self.database.population_size,
"archive_size": self.database.archive_size,
"num_islands": self.database.num_islands,
"elite_selection_ratio": self.database.elite_selection_ratio,
"exploration_ratio": self.database.exploration_ratio,
"exploitation_ratio": self.database.exploitation_ratio,
# Note: diversity_metric fixed to "edit_distance"
# "diversity_metric": self.database.diversity_metric,
"feature_dimensions": self.database.feature_dimensions,
"feature_bins": self.database.feature_bins,
"migration_interval": self.database.migration_interval,
"migration_rate": self.database.migration_rate,
"random_seed": self.database.random_seed,
"log_prompts": self.database.log_prompts,
},
"evaluator": {
"timeout": self.evaluator.timeout,
"max_retries": self.evaluator.max_retries,
# Note: resource limits not implemented
# "memory_limit_mb": self.evaluator.memory_limit_mb,
# "cpu_limit": self.evaluator.cpu_limit,
"cascade_evaluation": self.evaluator.cascade_evaluation,
"cascade_thresholds": self.evaluator.cascade_thresholds,
"parallel_evaluations": self.evaluator.parallel_evaluations,
# Note: distributed evaluation not implemented
# "distributed": self.evaluator.distributed,
"use_llm_feedback": self.evaluator.use_llm_feedback,
"llm_feedback_weight": self.evaluator.llm_feedback_weight,
},
"evolution_trace": {
"enabled": self.evolution_trace.enabled,
"format": self.evolution_trace.format,
"include_code": self.evolution_trace.include_code,
"include_prompts": self.evolution_trace.include_prompts,
"output_path": self.evolution_trace.output_path,
"buffer_size": self.evolution_trace.buffer_size,
"compress": self.evolution_trace.compress,
},
# Evolution settings
"diff_based_evolution": self.diff_based_evolution,
"max_code_length": self.max_code_length,
# Early stopping settings
"early_stopping_patience": self.early_stopping_patience,
"convergence_threshold": self.convergence_threshold,
"early_stopping_metric": self.early_stopping_metric,
}
return asdict(self)

def to_yaml(self, path: Union[str, Path]) -> None:
"""Save configuration to a YAML file"""
Expand Down
44 changes: 18 additions & 26 deletions openevolve/controller.py
Original file line number Diff line number Diff line change
Expand Up @@ -16,15 +16,10 @@
from openevolve.evaluator import Evaluator
from openevolve.evolution_trace import EvolutionTracer
from openevolve.llm.ensemble import LLMEnsemble
from openevolve.prompt.sampler import PromptSampler
from openevolve.process_parallel import ProcessParallelController
from openevolve.utils.code_utils import (
extract_code_language,
)
from openevolve.utils.format_utils import (
format_metrics_safe,
format_improvement_safe,
)
from openevolve.prompt.sampler import PromptSampler
from openevolve.utils.code_utils import extract_code_language
from openevolve.utils.format_utils import format_improvement_safe, format_metrics_safe

logger = logging.getLogger(__name__)

Expand Down Expand Up @@ -75,17 +70,11 @@ def __init__(
self,
initial_program_path: str,
evaluation_file: str,
config_path: Optional[str] = None,
config: Optional[Config] = None,
config: Config,
output_dir: Optional[str] = None,
):
# Load configuration
if config is not None:
# Use provided Config object directly
self.config = config
else:
# Load from file or use defaults
self.config = load_config(config_path)
# Load configuration (loaded in main_async)
self.config = config

# Set up output directory
self.output_dir = output_dir or os.path.join(
Expand All @@ -98,9 +87,10 @@ def __init__(

# Set random seed for reproducibility if specified
if self.config.random_seed is not None:
import hashlib
import random

import numpy as np
import hashlib

# Set global random seeds
random.seed(self.config.random_seed)
Expand Down Expand Up @@ -139,7 +129,7 @@ def __init__(
self.file_extension = f".{self.file_extension}"

# Set the file_suffix in config (can be overridden in YAML)
if not hasattr(self.config, 'file_suffix') or self.config.file_suffix == ".py":
if not hasattr(self.config, "file_suffix") or self.config.file_suffix == ".py":
self.config.file_suffix = self.file_extension

# Initialize components
Expand Down Expand Up @@ -175,18 +165,17 @@ def __init__(
if not trace_output_path:
# Default to output_dir/evolution_trace.{format}
trace_output_path = os.path.join(
self.output_dir,
f"evolution_trace.{self.config.evolution_trace.format}"
self.output_dir, f"evolution_trace.{self.config.evolution_trace.format}"
)

self.evolution_tracer = EvolutionTracer(
output_path=trace_output_path,
format=self.config.evolution_trace.format,
include_code=self.config.evolution_trace.include_code,
include_prompts=self.config.evolution_trace.include_prompts,
enabled=True,
buffer_size=self.config.evolution_trace.buffer_size,
compress=self.config.evolution_trace.compress
compress=self.config.evolution_trace.compress,
)
logger.info(f"Evolution tracing enabled: {trace_output_path}")
else:
Expand Down Expand Up @@ -305,8 +294,11 @@ async def run(
# Initialize improved parallel processing
try:
self.parallel_controller = ProcessParallelController(
self.config, self.evaluation_file, self.database, self.evolution_tracer,
file_suffix=self.config.file_suffix
self.config,
self.evaluation_file,
self.database,
self.evolution_tracer,
file_suffix=self.config.file_suffix,
)

# Set up signal handlers for graceful shutdown
Expand Down Expand Up @@ -349,7 +341,7 @@ def force_exit_handler(signum, frame):
if self.parallel_controller:
self.parallel_controller.stop()
self.parallel_controller = None

# Close evolution tracer
if self.evolution_tracer:
self.evolution_tracer.close()
Expand Down
Loading