Skip to content

optimization

orchard.optimization

Optuna Hyperparameter Optimization Module.

Provides components for automated hyperparameter search:

  • SearchSpaceRegistry: Predefined search distributions
  • OptunaObjective: Objective function for optimization
  • OptunaOrchestrator: Study lifecycle management
  • run_optimization: Convenience function for complete workflow

StudyEarlyStoppingCallback(threshold, direction='maximize', patience=2, enabled=True)

Callback to stop Optuna study when target metric is achieved.

Prevents wasteful computation when near-perfect performance is reached (e.g., AUC > 0.9999 for classification tasks).

Usage

callback = StudyEarlyStoppingCallback( threshold=0.9999, direction="maximize", patience=3 ) study.optimize(objective, callbacks=[callback])

Attributes:

Name Type Description
threshold

Metric value that triggers early stopping

direction

"maximize" or "minimize"

patience

Number of trials meeting threshold before stopping

_count

Internal counter for consecutive threshold hits

Initialize early stopping callback.

Parameters:

Name Type Description Default
threshold float

Target metric value (e.g., 0.9999 for AUC)

required
direction str

"maximize" or "minimize" (should match study direction)

'maximize'
patience int

Number of consecutive trials meeting threshold before stop

2
enabled bool

Whether callback is active (allows runtime disable)

True
Source code in orchard/optimization/early_stopping.py
def __init__(
    self, threshold: float, direction: str = "maximize", patience: int = 2, enabled: bool = True
) -> None:
    """
    Initialize early stopping callback.

    Args:
        threshold: Target metric value (e.g., 0.9999 for AUC)
        direction: "maximize" or "minimize" (should match study direction)
        patience: Number of consecutive trials meeting threshold before stop
        enabled: Whether callback is active (allows runtime disable)
    """
    self.threshold = threshold
    self.direction = direction
    self.patience = patience
    self.enabled = enabled
    self._count = 0

    if direction not in ("maximize", "minimize"):
        raise ValueError(f"direction must be 'maximize' or 'minimize', got '{direction}'")

__call__(study, trial)

Callback invoked after each trial completion.

Parameters:

Name Type Description Default
study Study

Optuna study instance

required
trial FrozenTrial

Completed trial

required
Side Effects

Calls study.stop() when early stopping criteria are met.

Source code in orchard/optimization/early_stopping.py
def __call__(self, study: Study, trial: FrozenTrial) -> None:
    """
    Callback invoked after each trial completion.

    Args:
        study: Optuna study instance
        trial: Completed trial

    Side Effects:
        Calls ``study.stop()`` when early stopping criteria are met.
    """
    if not self.enabled:
        return

    if trial.state != TrialState.COMPLETE:
        self._count = 0
        return

    value = trial.value
    if value is None:
        self._count = 0
        return
    threshold_met = (
        value >= self.threshold if self.direction == "maximize" else value <= self.threshold
    )

    if not threshold_met:
        self._count = 0
        return

    # Threshold met
    self._count += 1
    cmp = "≥" if self.direction == "maximize" else "≤"  # pragma: no mutate
    logger.info(
        "%s%s Trial %d reached threshold (%.6f %s %.6f) [%d/%d]",
        LogStyle.INDENT,
        LogStyle.SUCCESS,
        trial.number,
        value,
        cmp,
        self.threshold,
        self._count,
        self.patience,
    )

    if self._count < self.patience:
        return

    # Calculate trials saved
    total_trials = study.user_attrs.get("n_trials")
    trials_saved: int | str
    if isinstance(total_trials, int):
        trials_saved = total_trials - (trial.number + 1)
    else:
        trials_saved = "N/A"

    # Use LogStyle for consistent formatting
    Reporter.log_phase_header(  # pragma: no mutate
        logger,
        "EARLY STOPPING: Target performance achieved!",
        LogStyle.DOUBLE,  # pragma: no mutate
    )
    logger.info(
        "%s%s Metric           : %.6f",
        LogStyle.INDENT,
        LogStyle.SUCCESS,
        value,
    )
    logger.info(
        "%s%s Threshold        : %.6f",
        LogStyle.INDENT,
        LogStyle.ARROW,
        self.threshold,
    )
    logger.info(
        "%s%s Trials completed : %d",
        LogStyle.INDENT,
        LogStyle.ARROW,
        trial.number + 1,
    )
    logger.info(
        "%s%s Trials saved     : %s",
        LogStyle.INDENT,
        LogStyle.SUCCESS,
        trials_saved,
    )
    logger.info(LogStyle.DOUBLE)
    logger.info("")

    study.stop()

MetricExtractor(metric_name, direction='maximize')

Extracts and tracks metrics from validation results.

Handles metric extraction with validation and maintains the best metric value achieved during training. Direction-aware: uses max() for maximize objectives, min() for minimize.

Attributes:

Name Type Description
metric_name

Name of metric to track (e.g., 'auc', 'accuracy')

direction

Optimization direction ('maximize' or 'minimize')

best_metric

Best metric value achieved so far

Example

extractor = MetricExtractor("auc", direction="maximize") val_metrics = {"loss": 0.5, "accuracy": 0.85, "auc": 0.92} current = extractor.extract(val_metrics) # 0.92 best = extractor.update_best(current) # 0.92

Initialize metric extractor.

Parameters:

Name Type Description Default
metric_name str

Name of metric to track

required
direction str

'maximize' or 'minimize'

'maximize'
Source code in orchard/optimization/objective/metric_extractor.py
def __init__(self, metric_name: str, direction: str = "maximize") -> None:
    """
    Initialize metric extractor.

    Args:
        metric_name: Name of metric to track
        direction: 'maximize' or 'minimize'
    """
    self.metric_name = metric_name
    self.direction = direction
    self._is_maximize = direction == "maximize"
    self.best_metric = -float("inf") if self._is_maximize else float("inf")

extract(val_metrics)

Extract target metric from validation results.

Parameters:

Name Type Description Default
val_metrics Mapping[str, float]

Dictionary of validation metrics

required

Returns:

Type Description
float

Value of target metric

Raises:

Type Description
KeyError

If metric_name not found in val_metrics

Source code in orchard/optimization/objective/metric_extractor.py
def extract(self, val_metrics: Mapping[str, float]) -> float:
    """
    Extract target metric from validation results.

    Args:
        val_metrics: Dictionary of validation metrics

    Returns:
        Value of target metric

    Raises:
        KeyError: If metric_name not found in val_metrics
    """
    if self.metric_name not in val_metrics:
        available = list(val_metrics.keys())
        raise KeyError(f"Metric '{self.metric_name}' not found. Available: {available}")
    return val_metrics[self.metric_name]

reset()

Reset best metric tracking for a new trial.

Source code in orchard/optimization/objective/metric_extractor.py
def reset(self) -> None:
    """Reset best metric tracking for a new trial."""
    self.best_metric = -float("inf") if self._is_maximize else float("inf")

update_best(current_metric)

Update and return best metric achieved within current trial.

Direction-aware: uses max() for maximize, min() for minimize. NaN values are ignored to prevent poisoning the best-metric state (max(-inf, NaN) returns NaN in Python, which would permanently corrupt comparisons).

Parameters:

Name Type Description Default
current_metric float

Current metric value

required

Returns:

Type Description
float

Best metric value achieved so far

Source code in orchard/optimization/objective/metric_extractor.py
def update_best(self, current_metric: float) -> float:
    """
    Update and return best metric achieved within current trial.

    Direction-aware: uses max() for maximize, min() for minimize.
    NaN values are ignored to prevent poisoning the best-metric state
    (``max(-inf, NaN)`` returns NaN in Python, which would permanently
    corrupt comparisons).

    Args:
        current_metric: Current metric value

    Returns:
        Best metric value achieved so far
    """
    if math.isnan(current_metric):
        return self.best_metric

    comparator = max if self._is_maximize else min
    self.best_metric = comparator(self.best_metric, current_metric)

    return self.best_metric

OptunaObjective(cfg, search_space, device, dataset_loader=None, dataloader_factory=None, model_factory=None, tracker=None)

Optuna objective function with dependency injection.

Orchestrates hyperparameter optimization trials by:

  • Building trial-specific configurations
  • Creating data loaders, models, and optimizers
  • Executing training with pruning
  • Tracking and returning best metrics

All external dependencies are injectable for testability:

  • dataset_loader: Dataset loading function
  • dataloader_factory: DataLoader creation function
  • model_factory: Model instantiation function

Attributes:

Name Type Description
cfg

Base configuration (single source of truth)

search_space

Hyperparameter search space

device

Training device (CPU/CUDA/MPS)

config_builder

Builds trial-specific configs

metric_extractor

Handles metric extraction

dataset_data

Cached dataset (loaded once, reused across trials)

Example

objective = OptunaObjective( ... cfg=config, ... search_space=search_space, ... device=torch.device("cuda"), ... ) study = optuna.create_study(direction="maximize") study.optimize(objective, n_trials=50)

Initialize Optuna objective.

Parameters:

Name Type Description Default
cfg Config

Base configuration (reads optuna.* settings)

required
search_space Mapping[str, Any]

Hyperparameter search space

required
device device

Training device

required
dataset_loader DatasetLoaderProtocol | None

Dataset loading function (default: load_dataset)

None
dataloader_factory DataloaderFactoryProtocol | None

DataLoader factory (default: get_dataloaders)

None
model_factory ModelFactoryProtocol | None

Model factory (default: get_model)

None
tracker TrackerProtocol | None

Optional experiment tracker for nested trial logging

None
Source code in orchard/optimization/objective/objective.py
def __init__(
    self,
    cfg: Config,
    search_space: Mapping[str, Any],
    device: torch.device,
    dataset_loader: DatasetLoaderProtocol | None = None,
    dataloader_factory: DataloaderFactoryProtocol | None = None,
    model_factory: ModelFactoryProtocol | None = None,
    tracker: TrackerProtocol | None = None,
) -> None:
    """
    Initialize Optuna objective.

    Args:
        cfg: Base configuration (reads optuna.* settings)
        search_space: Hyperparameter search space
        device: Training device
        dataset_loader: Dataset loading function (default: load_dataset)
        dataloader_factory: DataLoader factory (default: get_dataloaders)
        model_factory: Model factory (default: get_model)
        tracker: Optional experiment tracker for nested trial logging
    """
    self.cfg = cfg
    self.search_space = search_space
    self.device = device
    self.tracker = tracker

    # Dependency injection with defaults
    self._dataset_loader = dataset_loader or load_dataset
    self._dataloader_factory = dataloader_factory or get_dataloaders
    self._model_factory = model_factory or get_model

    # Components (monitor_metric is the single source of truth for the
    # optimisation target — shared by trainer checkpointing and Optuna ranking)
    self.config_builder = TrialConfigBuilder(cfg)
    self.metric_extractor = MetricExtractor(
        cfg.training.monitor_metric, direction=cfg.optuna.direction
    )

    # Load dataset once (reused across all trials)
    self.dataset_data = self._dataset_loader(self.config_builder.base_metadata)

__call__(trial)

Execute single Optuna trial.

Samples hyperparameters, builds trial configuration, trains model, and returns best validation metric. Failed trials return the worst possible metric instead of crashing the study.

Parameters:

Name Type Description Default
trial Trial

Optuna trial object

required

Returns:

Type Description
float

Best validation metric achieved during training,

float

or worst-case metric if the trial fails.

Raises:

Type Description
TrialPruned

If trial is pruned during training

Source code in orchard/optimization/objective/objective.py
def __call__(self, trial: optuna.Trial) -> float:
    """
    Execute single Optuna trial.

    Samples hyperparameters, builds trial configuration, trains model,
    and returns best validation metric. Failed trials return the worst
    possible metric instead of crashing the study.

    Args:
        trial: Optuna trial object

    Returns:
        Best validation metric achieved during training,
        or worst-case metric if the trial fails.

    Raises:
        optuna.TrialPruned: If trial is pruned during training
    """
    # Reset per-trial metric tracking
    self.metric_extractor.reset()

    # Sample parameters
    params = self._sample_params(trial)

    # Build trial config
    trial_cfg = self.config_builder.build(params)

    # Inject recipe-level flags for logging (not Optuna params)
    log_params = {**params, "pretrained": self.cfg.architecture.pretrained}

    # Log trial start
    log_trial_start(trial.number, log_params)

    # Start nested MLflow run for this trial
    if self.tracker is not None:
        self.tracker.start_optuna_trial(trial.number, log_params)

    trial_succeeded = False
    try:
        # Setup training components
        train_loader, val_loader, _ = self._dataloader_factory(
            self.dataset_data,
            trial_cfg.dataset,
            trial_cfg.training,
            trial_cfg.augmentation,
            trial_cfg.num_workers,
            is_optuna=True,
        )
        model = self._model_factory(self.device, trial_cfg.dataset, trial_cfg.architecture)
        optimizer = get_optimizer(model, trial_cfg.training)
        scheduler = get_scheduler(optimizer, trial_cfg.training)

        class_weights = None
        if trial_cfg.training.weighted_loss:
            train_labels = train_loader.dataset.labels.flatten()  # type: ignore[attr-defined]
            num_classes = self.config_builder.base_metadata.num_classes
            class_weights = compute_class_weights(train_labels, num_classes, self.device)

        criterion = get_criterion(trial_cfg.training, class_weights=class_weights)

        # Execute training
        executor = TrialTrainingExecutor(
            model=model,
            train_loader=train_loader,
            val_loader=val_loader,
            optimizer=optimizer,
            scheduler=scheduler,
            criterion=criterion,
            training=trial_cfg.training,
            optuna=trial_cfg.optuna,
            log_interval=trial_cfg.telemetry.log_interval,
            device=self.device,
            metric_extractor=self.metric_extractor,
        )

        best_metric = executor.execute(trial)
        trial_succeeded = True

        return best_metric

    except optuna.TrialPruned:
        trial_succeeded = True  # pruned trials have valid metrics
        raise

    except Exception as e:  # must not crash study
        logger.error(  # pragma: no mutate
            "%s%s Trial %d failed: %s: %s",
            LogStyle.INDENT,  # pragma: no mutate
            LogStyle.FAILURE,  # pragma: no mutate
            trial.number,  # pragma: no mutate
            type(e).__name__,  # pragma: no mutate
            e,  # pragma: no mutate
        )
        return self._worst_metric()

    finally:
        # End nested MLflow run for this trial
        if self.tracker is not None:
            if trial_succeeded:
                self.tracker.end_optuna_trial(self.metric_extractor.best_metric)
            else:
                # Trial failed before any validation — close run without metric
                self.tracker.end_optuna_trial(self._worst_metric())

        # Cleanup GPU memory between trials
        self._cleanup()

TrialConfigBuilder(base_cfg)

Builds trial-specific Config instances for Optuna trials.

Handles parameter mapping from Optuna's flat namespace to Config's hierarchical structure, preserves dataset metadata excluded from serialization, and validates via Pydantic.

Attributes:

Name Type Description
base_cfg

Base configuration template

optuna_epochs

Number of epochs for Optuna trials (from cfg.optuna.epochs)

base_metadata

Cached dataset metadata

Example

builder = TrialConfigBuilder(base_cfg) trial_params = {"learning_rate": 0.001, "dropout": 0.3} trial_cfg = builder.build(trial_params)

Initialize config builder.

Parameters:

Name Type Description Default
base_cfg Config

Base configuration template

required
Source code in orchard/optimization/objective/config_builder.py
def __init__(self, base_cfg: Config) -> None:
    """
    Initialize config builder.

    Args:
        base_cfg: Base configuration template
    """
    self.base_cfg = base_cfg
    self.optuna_epochs = base_cfg.optuna.epochs
    self.base_metadata = base_cfg.dataset._ensure_metadata

build(trial_params)

Build trial-specific Config with parameter overrides.

Parameters:

Name Type Description Default
trial_params dict[str, Any]

Sampled hyperparameters from Optuna

required

Returns:

Type Description
Config

Validated Config instance with trial parameters

Source code in orchard/optimization/objective/config_builder.py
def build(self, trial_params: dict[str, Any]) -> Config:
    """
    Build trial-specific Config with parameter overrides.

    Args:
        trial_params: Sampled hyperparameters from Optuna

    Returns:
        Validated Config instance with trial parameters
    """
    config_dict = self.base_cfg.model_dump()

    # Preserve resolution
    if config_dict["dataset"].get("resolution") is None:
        config_dict["dataset"]["resolution"] = self.base_cfg.dataset.resolution

    # Re-inject metadata (excluded from serialization)
    config_dict["dataset"]["metadata"] = self.base_metadata

    # Override epochs for Optuna trials
    config_dict["training"]["epochs"] = self.optuna_epochs

    # Cap mixup_epochs to trial length (prevents _check_mixup_epochs ValueError)
    config_dict["training"]["mixup_epochs"] = min(
        config_dict["training"]["mixup_epochs"], self.optuna_epochs
    )

    # Apply trial-specific overrides
    self._apply_param_overrides(config_dict, trial_params)

    return Config(**config_dict)

TrialTrainingExecutor(model, train_loader, val_loader, optimizer, scheduler, criterion, training, optuna, log_interval, device, metric_extractor)

Executes training loop with Optuna pruning integration.

Orchestrates a complete training cycle for a single Optuna trial, including:

  • Training and validation epochs
  • Metric extraction and tracking
  • Pruning decisions with warmup period
  • Learning rate scheduling
  • Progress logging

Pruning and warmup parameters are read from the optuna sub-config; training hyperparameters from training.

Attributes:

Name Type Description
model

PyTorch model to train.

train_loader

Training data loader.

val_loader

Validation data loader.

optimizer

Optimizer instance.

scheduler

Learning rate scheduler.

criterion

Loss function.

device

Training device (CPU/CUDA/MPS).

metric_extractor

Handles metric extraction and best-value tracking.

enable_pruning

Whether to enable trial pruning.

warmup_epochs

Epochs before pruning activates.

monitor_metric

Name of the metric driving scheduling.

scaler GradScaler | None

AMP gradient scaler (None when use_amp is False).

mixup_fn callable | None

Mixup augmentation function (None when alpha is 0).

epochs

Total training epochs.

log_interval

Epoch interval for progress logging.

_loop TrainingLoop

Shared epoch kernel for training steps (train only, no validation).

Example

executor = TrialTrainingExecutor( ... model=model, ... train_loader=train_loader, ... val_loader=val_loader, ... optimizer=optimizer, ... scheduler=scheduler, ... criterion=criterion, ... training=trial_cfg.training, ... optuna=trial_cfg.optuna, ... log_interval=trial_cfg.telemetry.log_interval, ... device=device, ... metric_extractor=MetricExtractor("auc"), ... ) best_metric = executor.execute(trial)

Initialize training executor.

Parameters:

Name Type Description Default
model Module

PyTorch model to train.

required
train_loader DataLoader[Any]

Training data loader.

required
val_loader DataLoader[Any]

Validation data loader.

required
optimizer Optimizer

Optimizer instance.

required
scheduler LRScheduler

Learning rate scheduler.

required
criterion Module

Loss function.

required
training TrainingConfig

Training hyperparameters sub-config.

required
optuna OptunaConfig

Optuna pruning/warmup sub-config.

required
log_interval int

Epoch interval for progress logging.

required
device device

Training device.

required
metric_extractor MetricExtractor

Metric extraction and tracking handler.

required
Source code in orchard/optimization/objective/training_executor.py
def __init__(
    self,
    model: torch.nn.Module,
    train_loader: torch.utils.data.DataLoader[Any],
    val_loader: torch.utils.data.DataLoader[Any],
    optimizer: torch.optim.Optimizer,
    scheduler: torch.optim.lr_scheduler.LRScheduler,
    criterion: torch.nn.Module,
    training: TrainingConfig,
    optuna: OptunaConfig,
    log_interval: int,
    device: torch.device,
    metric_extractor: MetricExtractor,
) -> None:
    """
    Initialize training executor.

    Args:
        model: PyTorch model to train.
        train_loader: Training data loader.
        val_loader: Validation data loader.
        optimizer: Optimizer instance.
        scheduler: Learning rate scheduler.
        criterion: Loss function.
        training: Training hyperparameters sub-config.
        optuna: Optuna pruning/warmup sub-config.
        log_interval: Epoch interval for progress logging.
        device: Training device.
        metric_extractor: Metric extraction and tracking handler.
    """
    self.model = model
    self.train_loader = train_loader
    self.val_loader = val_loader
    self.optimizer = optimizer
    self.scheduler = scheduler
    self.criterion = criterion
    self.device = device
    self.metric_extractor = metric_extractor

    # Pruning config
    self.enable_pruning = optuna.enable_pruning
    self.warmup_epochs = optuna.pruning_warmup_epochs

    # Training state
    self.scaler = create_amp_scaler(training, device=str(device))
    self.mixup_fn = create_mixup_fn(training)
    self.epochs = training.epochs
    self.monitor_metric = training.monitor_metric
    self.log_interval = log_interval
    self._consecutive_val_failures: int = 0

    # Shared epoch kernel (train step only — validation is error-resilient here)
    self._loop = TrainingLoop(
        model=model,
        train_loader=train_loader,
        val_loader=val_loader,
        optimizer=optimizer,
        scheduler=scheduler,
        criterion=criterion,
        device=device,
        scaler=self.scaler,
        mixup_fn=self.mixup_fn,
        options=LoopOptions(
            grad_clip=training.grad_clip,
            total_epochs=self.epochs,
            mixup_epochs=training.mixup_epochs,
            use_tqdm=False,
            monitor_metric=self.monitor_metric,
        ),
    )

execute(trial)

Execute full training loop with pruning.

Runs training for cfg.training.epochs, reporting metrics to Optuna after each epoch. Applies pruning logic after warmup period.

Parameters:

Name Type Description Default
trial Trial

Optuna trial for reporting and pruning

required

Returns:

Type Description
float

Best validation metric achieved during training

Raises:

Type Description
TrialPruned

If trial should terminate early

Source code in orchard/optimization/objective/training_executor.py
def execute(self, trial: optuna.Trial) -> float:
    """
    Execute full training loop with pruning.

    Runs training for cfg.training.epochs, reporting metrics to Optuna
    after each epoch. Applies pruning logic after warmup period.

    Args:
        trial: Optuna trial for reporting and pruning

    Returns:
        Best validation metric achieved during training

    Raises:
        optuna.TrialPruned: If trial should terminate early
    """
    for epoch in range(1, self.epochs + 1):
        # Train (delegated to shared loop)
        epoch_loss = self._loop.run_train_step(epoch)

        # Validate
        val_metrics = self._validate_epoch()

        # Extract and track metric
        current_metric = self.metric_extractor.extract(val_metrics)
        best_metric = self.metric_extractor.update_best(current_metric)

        # Report to Optuna (skip NaN to avoid poisoning the pruner)
        if not math.isnan(current_metric):
            trial.report(current_metric, epoch)

        # Check pruning
        if self._should_prune(trial, epoch):
            logger.info(
                "%s%s Trial %d pruned at epoch %d (%s=%.4f)",
                LogStyle.INDENT,
                LogStyle.ARROW,
                trial.number,
                epoch,
                self.metric_extractor.metric_name,
                current_metric,
            )
            raise optuna.TrialPruned()

        # Scheduler step (uses monitor_metric, consistent with ModelTrainer)
        step_scheduler(self.scheduler, val_metrics[self.monitor_metric])

        # Logging
        if epoch % self.log_interval == 0 or epoch == self.epochs:
            logger.info(
                "%sT%d E%d/%d | Loss:%.4f | %s:%.4f (Best:%.4f)",
                LogStyle.DOUBLE_INDENT,
                trial.number,
                epoch,
                self.epochs,
                epoch_loss,
                self.metric_extractor.metric_name,
                current_metric,
                best_metric,
            )

    self._log_trial_complete(trial, best_metric, epoch_loss)
    return best_metric

OptunaOrchestrator(cfg, device, paths, tracker=None)

High-level manager for Optuna hyperparameter optimization studies.

Coordinates the complete optimization lifecycle: study creation, trial execution, and post-processing artifact generation. Integrates with Orchard ML's Config and RunPaths infrastructure, delegating specialized tasks (sampler/pruner building, visualization, export) to focused submodules.

This orchestrator serves as the entry point for hyperparameter tuning, wrapping Optuna's API with Orchard ML-specific configuration and output management.

Attributes:

Name Type Description
cfg Config

Template configuration that will be overridden per trial

device device

Hardware target for training (CPU/CUDA/MPS)

paths RunPaths

Output directory structure for artifacts and results

Example

orchestrator = OptunaOrchestrator(cfg=config, device=device, paths=paths) study = orchestrator.optimize() print(f"Best AUC: {study.best_value:.3f}")

Artifacts saved to paths.figures/ and paths.exports/

Initialize orchestrator.

Parameters:

Name Type Description Default
cfg Config

Base Config to override per trial

required
device device

PyTorch device for training

required
paths RunPaths

Root directory for outputs

required
tracker TrackerProtocol | None

Optional experiment tracker for nested trial logging

None
Source code in orchard/optimization/orchestrator/orchestrator.py
def __init__(
    self,
    cfg: Config,
    device: torch.device,
    paths: RunPaths,
    tracker: TrackerProtocol | None = None,
) -> None:
    """
    Initialize orchestrator.

    Args:
        cfg (Config): Base Config to override per trial
        device (torch.device): PyTorch device for training
        paths (RunPaths): Root directory for outputs
        tracker (TrackerProtocol | None): Optional experiment tracker for nested trial logging
    """
    self.cfg = cfg
    self.device = device
    self.paths = paths
    self.tracker = tracker

create_study()

Create or load Optuna study with configured sampler and pruner.

Returns:

Type Description
Study

Configured Optuna study instance

Source code in orchard/optimization/orchestrator/orchestrator.py
def create_study(self) -> optuna.Study:
    """
    Create or load Optuna study with configured sampler and pruner.

    Returns:
        Configured Optuna study instance
    """
    sampler = build_sampler(self.cfg.optuna)
    pruner = build_pruner(self.cfg.optuna)
    storage_url = self.cfg.optuna.get_storage_url(self.paths)

    study = optuna.create_study(
        study_name=self.cfg.optuna.study_name,
        direction=self.cfg.optuna.direction,
        sampler=sampler,
        pruner=pruner,
        storage=storage_url,
        load_if_exists=self.cfg.optuna.load_if_exists,
    )

    return study

optimize()

Execute hyperparameter optimization.

Returns:

Type Description
Study

Completed study with trial results

Source code in orchard/optimization/orchestrator/orchestrator.py
def optimize(self) -> optuna.Study:
    """
    Execute hyperparameter optimization.

    Returns:
        Completed study with trial results
    """
    # Suppress Optuna's internal INFO logs (e.g. "A new study created in RDB")
    # before create_study(); our own header in phases.py is sufficient
    optuna.logging.set_verbosity(optuna.logging.WARNING)

    study = self.create_study()
    search_space = get_search_space(
        self.cfg.optuna.search_space_preset,
        resolution=self.cfg.dataset.resolution,
        include_models=self.cfg.optuna.enable_model_search,
        model_pool=self.cfg.optuna.model_pool,
        overrides=self.cfg.optuna.search_space_overrides,
    )

    objective = OptunaObjective(
        cfg=self.cfg,
        search_space=search_space,
        device=self.device,
        tracker=self.tracker,
    )

    # Configure callbacks and log our structured header
    log_optimization_header(self.cfg)

    callbacks = build_callbacks(self.cfg.optuna, self.cfg.training.monitor_metric)

    study.set_user_attr("n_trials", self.cfg.optuna.n_trials)

    interrupted = False
    try:
        study.optimize(
            objective,
            n_trials=self.cfg.optuna.n_trials,
            timeout=self.cfg.optuna.timeout,
            n_jobs=self.cfg.optuna.n_jobs,
            show_progress_bar=self.cfg.optuna.show_progress_bar,
            callbacks=callbacks,
        )
    except KeyboardInterrupt:
        interrupted = True
        logger.warning("Optimization interrupted by user. Saving partial results...")

    self._post_optimization_processing(study)

    if interrupted:
        logger.warning(
            "Continuing to training in 5 seconds... (Ctrl+C again to abort pipeline)"
        )
        time.sleep(5)  # grace period for the user to fully abort

    return study

TrialData(number, value, params, datetime_start=None, datetime_complete=None, state=None, duration_seconds=None) dataclass

Immutable snapshot of Optuna trial metadata for serialization.

Attributes:

Name Type Description
number int

Trial number within the study.

value float | None

Objective value (None for incomplete trials).

params dict[str, Any]

Hyperparameter values sampled for this trial.

datetime_start str | None

ISO-formatted start timestamp.

datetime_complete str | None

ISO-formatted completion timestamp.

state str | None

Trial state name (COMPLETE, PRUNED, FAIL, etc.).

duration_seconds float | None

Wall-clock duration in seconds.

from_trial(trial) classmethod

Build from an Optuna FrozenTrial, computing duration if timestamps are available.

Parameters:

Name Type Description Default
trial FrozenTrial

Frozen trial from study.

required

Returns:

Type Description
TrialData

Immutable trial snapshot with computed duration.

Source code in orchard/optimization/orchestrator/exporters.py
@classmethod
def from_trial(cls, trial: optuna.trial.FrozenTrial) -> TrialData:
    """
    Build from an Optuna FrozenTrial, computing duration if timestamps are available.

    Args:
        trial: Frozen trial from study.

    Returns:
        Immutable trial snapshot with computed duration.
    """
    duration = None
    if trial.datetime_complete and trial.datetime_start:
        duration = (trial.datetime_complete - trial.datetime_start).total_seconds()
    return cls(
        number=trial.number,
        value=trial.value,
        params=trial.params,
        state=trial.state.name,
        datetime_start=trial.datetime_start.isoformat() if trial.datetime_start else None,
        datetime_complete=(
            trial.datetime_complete.isoformat() if trial.datetime_complete else None
        ),
        duration_seconds=duration,
    )

to_dict()

Serialize to plain dictionary for JSON export.

Returns:

Type Description
dict[str, Any]

Dictionary representation with all fields.

Source code in orchard/optimization/orchestrator/exporters.py
def to_dict(self) -> dict[str, Any]:
    """
    Serialize to plain dictionary for JSON export.

    Returns:
        Dictionary representation with all fields.
    """
    return asdict(self)

SearchSpaceRegistry(overrides=None)

Centralized registry of hyperparameter search distributions.

Reads bounds from a SearchSpaceOverrides instance, enabling full YAML customization of search ranges without code changes.

Each method returns a dict of {param_name: suggest_function} where suggest_function takes a Trial object and returns a sampled value.

Parameters:

Name Type Description Default
overrides SearchSpaceOverrides | None

Configurable search range bounds. Uses defaults if None.

None
Source code in orchard/optimization/search_spaces.py
def __init__(self, overrides: SearchSpaceOverrides | None = None) -> None:
    self.ov = overrides if overrides is not None else _default_overrides()

get_optimization_space()

Core optimization hyperparameters (learning rate, weight decay, etc.).

Returns:

Type Description
Mapping[str, _SamplerFn]

Immutable mapping of parameter names to sampling functions

Source code in orchard/optimization/search_spaces.py
def get_optimization_space(self) -> Mapping[str, _SamplerFn]:
    """
    Core optimization hyperparameters (learning rate, weight decay, etc.).

    Returns:
        Immutable mapping of parameter names to sampling functions
    """
    ov = self.ov
    return MappingProxyType(
        {
            "optimizer_type": lambda trial: trial.suggest_categorical(
                "optimizer_type",
                ov.optimizer_type,
            ),
            "learning_rate": lambda trial: trial.suggest_float(
                "learning_rate",
                ov.learning_rate.low,
                ov.learning_rate.high,
                log=ov.learning_rate.log,
            ),
            "weight_decay": lambda trial: trial.suggest_float(
                "weight_decay",
                ov.weight_decay.low,
                ov.weight_decay.high,
                log=ov.weight_decay.log,
            ),
            "momentum": lambda trial: trial.suggest_float(
                "momentum",
                ov.momentum.low,
                ov.momentum.high,
            ),
            "min_lr": lambda trial: trial.suggest_float(
                "min_lr",
                ov.min_lr.low,
                ov.min_lr.high,
                log=ov.min_lr.log,
            ),
        }
    )

get_loss_space()

Loss function parameters (criterion type, focal gamma, label smoothing).

focal_gamma is only sampled when criterion_type == "focal", otherwise defaults to 2.0. label_smoothing is only sampled when criterion_type == "cross_entropy", otherwise defaults to 0.0.

Returns:

Type Description
Mapping[str, _SamplerFn]

Immutable mapping of loss-related parameter samplers

Source code in orchard/optimization/search_spaces.py
def get_loss_space(self) -> Mapping[str, _SamplerFn]:
    """
    Loss function parameters (criterion type, focal gamma, label smoothing).

    ``focal_gamma`` is only sampled when ``criterion_type == "focal"``,
    otherwise defaults to 2.0.  ``label_smoothing`` is only sampled
    when ``criterion_type == "cross_entropy"``, otherwise defaults to 0.0.

    Returns:
        Immutable mapping of loss-related parameter samplers
    """
    # label_smoothing lives here (not in regularization) because it is
    # mutually exclusive with focal_gamma — only the active loss's param
    # is sampled; the other gets a safe default via trial.params dispatch.
    ov = self.ov
    return MappingProxyType(
        {
            "criterion_type": lambda trial: trial.suggest_categorical(
                "criterion_type",
                ov.criterion_type,
            ),
            "focal_gamma": lambda trial: (
                trial.suggest_float(
                    "focal_gamma",
                    ov.focal_gamma.low,
                    ov.focal_gamma.high,
                )
                if trial.params.get("criterion_type") == "focal"
                else 2.0
            ),
            "label_smoothing": lambda trial: (
                trial.suggest_float(
                    "label_smoothing",
                    ov.label_smoothing.low,
                    ov.label_smoothing.high,
                )
                if trial.params.get("criterion_type") == "cross_entropy"
                else 0.0
            ),
        }
    )

get_regularization_space()

Regularization strategies (mixup, dropout).

Returns:

Type Description
Mapping[str, _SamplerFn]

Immutable mapping of regularization parameter samplers

Source code in orchard/optimization/search_spaces.py
def get_regularization_space(self) -> Mapping[str, _SamplerFn]:
    """
    Regularization strategies (mixup, dropout).

    Returns:
        Immutable mapping of regularization parameter samplers
    """
    ov = self.ov
    return MappingProxyType(
        {
            "mixup_alpha": lambda trial: trial.suggest_float(
                "mixup_alpha",
                ov.mixup_alpha.low,
                ov.mixup_alpha.high,
            ),
            "dropout": lambda trial: trial.suggest_float(
                "dropout",
                ov.dropout.low,
                ov.dropout.high,
            ),
        }
    )

get_batch_size_space(resolution=28)

Batch size as categorical (resolution-aware).

Parameters:

Name Type Description Default
resolution int

Input image resolution (e.g. 28, 32, 64, 128, 224)

28

Returns:

Type Description
Mapping[str, _SamplerFn]

Immutable mapping with batch_size sampler

Source code in orchard/optimization/search_spaces.py
def get_batch_size_space(self, resolution: int = 28) -> Mapping[str, _SamplerFn]:
    """
    Batch size as categorical (resolution-aware).

    Args:
        resolution: Input image resolution (e.g. 28, 32, 64, 128, 224)

    Returns:
        Immutable mapping with batch_size sampler
    """
    if resolution >= HIGHRES_THRESHOLD:
        batch_choices = list(self.ov.batch_size_high_res)
    else:
        batch_choices = list(self.ov.batch_size_low_res)

    return MappingProxyType(
        {
            "batch_size": lambda trial: trial.suggest_categorical("batch_size", batch_choices),
        }
    )

get_scheduler_space()

Learning rate scheduler parameters.

Returns:

Type Description
Mapping[str, _SamplerFn]

Immutable mapping of scheduler-related samplers

Source code in orchard/optimization/search_spaces.py
def get_scheduler_space(self) -> Mapping[str, _SamplerFn]:
    """
    Learning rate scheduler parameters.

    Returns:
        Immutable mapping of scheduler-related samplers
    """
    ov = self.ov
    return MappingProxyType(
        {
            "scheduler_type": lambda trial: trial.suggest_categorical(
                "scheduler_type",
                ov.scheduler_type,
            ),
            "scheduler_patience": lambda trial: trial.suggest_int(
                "scheduler_patience",
                ov.scheduler_patience.low,
                ov.scheduler_patience.high,
            ),
        }
    )

get_augmentation_space()

Data augmentation intensity parameters.

Returns:

Type Description
Mapping[str, _SamplerFn]

Immutable mapping of augmentation samplers

Source code in orchard/optimization/search_spaces.py
def get_augmentation_space(self) -> Mapping[str, _SamplerFn]:
    """
    Data augmentation intensity parameters.

    Returns:
        Immutable mapping of augmentation samplers
    """
    ov = self.ov
    return MappingProxyType(
        {
            "rotation_angle": lambda trial: trial.suggest_int(
                "rotation_angle",
                ov.rotation_angle.low,
                ov.rotation_angle.high,
            ),
            "jitter_val": lambda trial: trial.suggest_float(
                "jitter_val",
                ov.jitter_val.low,
                ov.jitter_val.high,
            ),
            "min_scale": lambda trial: trial.suggest_float(
                "min_scale",
                ov.min_scale.low,
                ov.min_scale.high,
            ),
        }
    )

get_full_space(resolution=28)

Combined search space with all available parameters.

Parameters:

Name Type Description Default
resolution int

Input image resolution for batch size calculation

28

Returns:

Type Description
Mapping[str, _SamplerFn]

Immutable unified mapping of all parameter samplers

Source code in orchard/optimization/search_spaces.py
def get_full_space(self, resolution: int = 28) -> Mapping[str, _SamplerFn]:
    """
    Combined search space with all available parameters.

    Args:
        resolution: Input image resolution for batch size calculation

    Returns:
        Immutable unified mapping of all parameter samplers
    """
    full_space: dict[str, _SamplerFn] = {}
    full_space.update(self.get_optimization_space())
    full_space.update(self.get_loss_space())
    full_space.update(self.get_regularization_space())
    full_space.update(self.get_batch_size_space(resolution))
    full_space.update(self.get_scheduler_space())
    full_space.update(self.get_augmentation_space())
    return MappingProxyType(full_space)

get_quick_space(resolution=28)

Reduced search space for fast exploration (most impactful params).

Focuses on:

  • Learning rate (most critical)
  • Weight decay
  • Batch size (resolution-aware)
  • Dropout

Parameters:

Name Type Description Default
resolution int

Input image resolution for batch size calculation

28

Returns:

Type Description
Mapping[str, _SamplerFn]

Immutable mapping of high-impact parameter samplers

Source code in orchard/optimization/search_spaces.py
def get_quick_space(self, resolution: int = 28) -> Mapping[str, _SamplerFn]:
    """
    Reduced search space for fast exploration (most impactful params).

    Focuses on:

    - Learning rate (most critical)
    - Weight decay
    - Batch size (resolution-aware)
    - Dropout

    Args:
        resolution: Input image resolution for batch size calculation

    Returns:
        Immutable mapping of high-impact parameter samplers
    """
    space: dict[str, _SamplerFn] = {}
    space.update(self.get_optimization_space())
    space.update(
        {
            "batch_size": self.get_batch_size_space(resolution)["batch_size"],
            "dropout": self.get_regularization_space()["dropout"],
        }
    )
    return MappingProxyType(space)

get_model_space_224() staticmethod

Search space for 224x224 architectures with weight variants.

Source code in orchard/optimization/search_spaces.py
@staticmethod
def get_model_space_224() -> Mapping[str, _SamplerFn]:
    """Search space for 224x224 architectures with weight variants."""
    return MappingProxyType(
        {
            "model_name": lambda trial: trial.suggest_categorical(
                "model_name", ["resnet_18", "efficientnet_b0", "vit_tiny", "convnext_tiny"]
            ),
            "weight_variant": _vit_weight_variant_sampler,
        }
    )

get_model_space_28() staticmethod

Search space for 28x28 architectures.

Source code in orchard/optimization/search_spaces.py
@staticmethod
def get_model_space_28() -> Mapping[str, _SamplerFn]:
    """Search space for 28x28 architectures."""
    return MappingProxyType(
        {
            "model_name": lambda trial: trial.suggest_categorical(
                "model_name", ["resnet_18", "mini_cnn"]
            ),
        }
    )

get_early_stopping_callback(metric_name, direction, threshold=None, patience=2, enabled=True)

Factory function to create appropriate early stopping callback.

Provides sensible defaults for common metrics.

Parameters:

Name Type Description Default
metric_name str

Name of metric being optimized (e.g., "auc", "accuracy")

required
direction str

"maximize" or "minimize"

required
threshold float | None

Custom threshold (if None, uses metric-specific default)

None
patience int

Trials meeting threshold before stopping

2
enabled bool

Whether callback is active

True

Returns:

Type Description
StudyEarlyStoppingCallback | None

Configured callback or None if disabled

Source code in orchard/optimization/early_stopping.py
def get_early_stopping_callback(
    metric_name: str,
    direction: str,
    threshold: float | None = None,
    patience: int = 2,
    enabled: bool = True,
) -> StudyEarlyStoppingCallback | None:
    """
    Factory function to create appropriate early stopping callback.

    Provides sensible defaults for common metrics.

    Args:
        metric_name: Name of metric being optimized (e.g., "auc", "accuracy")
        direction: "maximize" or "minimize"
        threshold: Custom threshold (if None, uses metric-specific default)
        patience: Trials meeting threshold before stopping
        enabled: Whether callback is active

    Returns:
        Configured callback or None if disabled
    """
    if not enabled:
        return None

    if threshold is None:
        direction_thresholds = _DEFAULT_THRESHOLDS.get(direction)
        threshold = (
            direction_thresholds.get(metric_name.lower())
            if direction_thresholds is not None
            else None
        )

        if threshold is None:
            logger.warning(
                "No default threshold for metric '%s'. "
                "Early stopping disabled. set threshold manually to enable.",
                metric_name,
            )
            return None

    return StudyEarlyStoppingCallback(
        threshold=threshold, direction=direction, patience=patience, enabled=enabled
    )

export_best_config(study, cfg, paths)

Export best trial configuration as YAML file.

Creates a new Config instance with best hyperparameters applied, validates it, and saves to reports/best_config.yaml.

Parameters:

Name Type Description Default
study Study

Completed Optuna study with at least one successful trial

required
cfg Config

Template configuration (used for non-optimized parameters)

required
paths RunPaths

RunPaths instance for output location

required

Returns:

Type Description
Path | None

Path to exported config file, or None if no completed trials

Note

Skips export with warning if no completed trials exist.

Example

export_best_config(study, cfg, paths)

Creates: {paths.reports}/best_config.yaml

Source code in orchard/optimization/orchestrator/exporters.py
def export_best_config(study: optuna.Study, cfg: Config, paths: RunPaths) -> Path | None:
    """
    Export best trial configuration as YAML file.

    Creates a new Config instance with best hyperparameters applied,
    validates it, and saves to reports/best_config.yaml.

    Args:
        study: Completed Optuna study with at least one successful trial
        cfg: Template configuration (used for non-optimized parameters)
        paths: RunPaths instance for output location

    Returns:
        Path to exported config file, or None if no completed trials

    Note:
        Skips export with warning if no completed trials exist.

    Example:
        >>> export_best_config(study, cfg, paths)
        # Creates: {paths.reports}/best_config.yaml
    """
    if not has_completed_trials(study):
        logger.warning("No completed trials. Cannot export best config.")
        return None

    # Build config dict with best parameters
    config_dict = build_best_config_dict(study.best_params, cfg)

    # Create and validate new config
    best_config = Config(**config_dict)

    # Save to YAML
    output_path = paths.reports / "best_config.yaml"
    save_config_as_yaml(best_config, output_path)

    return output_path

export_study_summary(study, paths)

Export complete study metadata to JSON.

Serializes all trials with parameters, values, states, timestamps, and durations. Handles studies with zero completed trials gracefully.

Parameters:

Name Type Description Default
study Study

Optuna study (may contain failed/pruned trials)

required
paths RunPaths

RunPaths instance for output location

required

Output structure::

{
    "study_name": str,
    "direction": str,
    "n_trials": int,
    "n_completed": int,
    "best_trial": {...} or null,
    "trials": [...]
}
Example

export_study_summary(study, paths)

Creates: {paths.reports}/study_summary.json

Source code in orchard/optimization/orchestrator/exporters.py
def export_study_summary(study: optuna.Study, paths: RunPaths) -> None:
    """
    Export complete study metadata to JSON.

    Serializes all trials with parameters, values, states, timestamps,
    and durations. Handles studies with zero completed trials gracefully.

    Args:
        study: Optuna study (may contain failed/pruned trials)
        paths: RunPaths instance for output location

    Output structure::

        {
            "study_name": str,
            "direction": str,
            "n_trials": int,
            "n_completed": int,
            "best_trial": {...} or null,
            "trials": [...]
        }

    Example:
        >>> export_study_summary(study, paths)
        # Creates: {paths.reports}/study_summary.json
    """
    completed = get_completed_trials(study)

    # Build best trial data (may be None if no completed trials)
    best_trial_data = build_best_trial_data(study, completed)

    summary = {
        "study_name": study.study_name,
        "direction": study.direction.name,
        "n_trials": len(study.trials),
        "n_completed": len(completed),
        "best_trial": best_trial_data.to_dict() if best_trial_data else None,
        "trials": [TrialData.from_trial(trial).to_dict() for trial in study.trials],
    }

    output_path = paths.reports / "study_summary.json"
    with open(output_path, "w") as f:
        json.dump(summary, f, indent=2)

    logger.info(
        "%s%s %-22s: %s",
        LogStyle.INDENT,
        LogStyle.ARROW,
        "Study Summary",
        Path(output_path).name,
    )

export_top_trials(study, paths, metric_name, top_k=10)

Export top K trials to Excel spreadsheet with professional formatting.

Creates human-readable comparison table of best-performing trials with hyperparameters, metric values, and durations. Applies professional Excel styling matching TrainingReport format.

Parameters:

Name Type Description Default
study Study

Completed Optuna study with at least one successful trial

required
paths RunPaths

RunPaths instance for output location

required
metric_name str

Name of optimization metric (for column header)

required
top_k int

Number of top trials to export (default: 10)

10

DataFrame Columns:

  • Rank: 1-based ranking
  • Trial: Trial number
  • {METRIC_NAME}: Objective value
  • {param_name}: Each hyperparameter
  • Duration (s): Trial duration if available
Example

export_top_trials(study, paths, "auc", top_k=10)

Creates: {paths.reports}/top_10_trials.xlsx

Source code in orchard/optimization/orchestrator/exporters.py
def export_top_trials(
    study: optuna.Study, paths: RunPaths, metric_name: str, top_k: int = 10
) -> None:
    """
    Export top K trials to Excel spreadsheet with professional formatting.

    Creates human-readable comparison table of best-performing trials
    with hyperparameters, metric values, and durations. Applies professional
    Excel styling matching TrainingReport format.

    Args:
        study: Completed Optuna study with at least one successful trial
        paths: RunPaths instance for output location
        metric_name: Name of optimization metric (for column header)
        top_k: Number of top trials to export (default: 10)

    DataFrame Columns:

    - Rank: 1-based ranking
    - Trial: Trial number
    - {METRIC_NAME}: Objective value
    - {param_name}: Each hyperparameter
    - Duration (s): Trial duration if available

    Example:
        >>> export_top_trials(study, paths, "auc", top_k=10)
        # Creates: {paths.reports}/top_10_trials.xlsx
    """
    completed = get_completed_trials(study)
    if not completed:
        logger.warning("No completed trials. Cannot export top trials.")
        return

    reverse = study.direction == optuna.study.StudyDirection.MAXIMIZE
    # Filter out trials with None or NaN values before sorting
    valid_trials = [
        t
        for t in completed
        if t.value is not None and not (isinstance(t.value, float) and math.isnan(t.value))
    ]
    sorted_trials = sorted(valid_trials, key=lambda t: cast(float, t.value), reverse=reverse)[
        :top_k
    ]

    df = build_top_trials_dataframe(sorted_trials, metric_name)

    output_path = paths.reports / "top_10_trials.xlsx"

    wb = Workbook()
    ws = wb.active
    ws.title = "Top Trials"

    _write_styled_rows(ws, df)
    _auto_adjust_column_widths(ws)

    wb.save(output_path)
    logger.info(
        "%s%s %-22s: %s (%d trials)",
        LogStyle.INDENT,
        LogStyle.ARROW,
        "Top Trials",
        Path(output_path).name,
        len(sorted_trials),
    )

run_optimization(cfg, device, paths, tracker=None)

Convenience function to run complete optimization pipeline.

Parameters:

Name Type Description Default
cfg Config

Global configuration with optuna section

required
device device

PyTorch device for training

required
paths RunPaths

RunPaths instance for output management

required
tracker TrackerProtocol | None

Optional experiment tracker for nested trial logging

None

Returns:

Type Description
Study

Completed Optuna study with trial results

Example

study = run_optimization(cfg=config, device=torch.device("cuda"), paths=paths) print(f"Best AUC: {study.best_value:.3f}")

Source code in orchard/optimization/orchestrator/orchestrator.py
def run_optimization(
    cfg: Config,
    device: torch.device,
    paths: RunPaths,
    tracker: TrackerProtocol | None = None,
) -> optuna.Study:
    """
    Convenience function to run complete optimization pipeline.

    Args:
        cfg (Config): Global configuration with optuna section
        device (torch.device): PyTorch device for training
        paths (RunPaths): RunPaths instance for output management
        tracker (TrackerProtocol | None): Optional experiment tracker for nested trial logging

    Returns:
        Completed Optuna study with trial results

    Example:
        >>> study = run_optimization(cfg=config, device=torch.device("cuda"), paths=paths)
        >>> print(f"Best AUC: {study.best_value:.3f}")
    """
    orchestrator = OptunaOrchestrator(cfg=cfg, device=device, paths=paths, tracker=tracker)
    return orchestrator.optimize()

get_search_space(preset='quick', resolution=28, include_models=False, model_pool=None, overrides=None)

Factory function to retrieve a search space preset.

Parameters:

Name Type Description Default
preset str

Name of the preset ("quick", "full", etc.)

'quick'
resolution int

Input image resolution (affects batch_size choices)

28
include_models bool

If True, includes model architecture selection

False
model_pool list[str] | None

Restrict model search to these architectures. When None, uses all built-in models for the target resolution.

None
overrides SearchSpaceOverrides | None

Configurable search range bounds (uses defaults if None)

None

Returns:

Type Description
Mapping[str, Any]

Immutable mapping of parameter samplers keyed by parameter name

Raises:

Type Description
OrchardConfigError

If preset name not recognized

Source code in orchard/optimization/search_spaces.py
def get_search_space(
    preset: str = "quick",
    resolution: int = 28,
    include_models: bool = False,
    model_pool: list[str] | None = None,
    overrides: SearchSpaceOverrides | None = None,
) -> Mapping[str, Any]:
    """
    Factory function to retrieve a search space preset.

    Args:
        preset: Name of the preset ("quick", "full", etc.)
        resolution: Input image resolution (affects batch_size choices)
        include_models: If True, includes model architecture selection
        model_pool: Restrict model search to these architectures.
            When None, uses all built-in models for the target resolution.
        overrides: Configurable search range bounds (uses defaults if None)

    Returns:
        Immutable mapping of parameter samplers keyed by parameter name

    Raises:
        OrchardConfigError: If preset name not recognized
    """
    registry = SearchSpaceRegistry(overrides)

    if preset == "quick":
        space = dict(registry.get_quick_space(resolution))
    elif preset == "full":
        space = dict(registry.get_full_space(resolution))
    else:
        raise OrchardConfigError(f"Unknown preset '{preset}'. Available: quick, full")

    if include_models:
        if model_pool is not None:
            space.update(_build_model_space_from_pool(model_pool))
        elif resolution >= HIGHRES_THRESHOLD:
            space.update(registry.get_model_space_224())
        else:
            space.update(registry.get_model_space_28())

    return MappingProxyType(space)