Megosztás a következőn keresztül:


AutoMLExperiment Class

Definition

The class for AutoML experiment

public class AutoMLExperiment
type AutoMLExperiment = class
Public Class AutoMLExperiment
Inheritance
AutoMLExperiment

Examples

using System;
using System.Collections.Generic;
using System.Linq;
using System.Text;
using System.Threading.Tasks;
using Microsoft.ML.Data;

namespace Microsoft.ML.AutoML.Samples
{
    public static class AutoMLExperiment
    {
        public static async Task RunAsync()
        {
            var seed = 0;

            // Create a new context for ML.NET operations. It can be used for
            // exception tracking and logging, as a catalog of available operations
            // and as the source of randomness. Setting the seed to a fixed number
            // in this example to make outputs deterministic.
            var context = new MLContext(seed);

            // Create a list of training data points and convert it to IDataView.
            var data = GenerateRandomBinaryClassificationDataPoints(100, seed);
            var dataView = context.Data.LoadFromEnumerable(data);

            var trainTestSplit = context.Data.TrainTestSplit(dataView);

            // Define the sweepable pipeline using predefined binary trainers and search space.
            var pipeline = context.Auto().BinaryClassification(labelColumnName: "Label", featureColumnName: "Features");

            // Create an AutoML experiment
            var experiment = context.Auto().CreateExperiment();

            // Redirect AutoML log to console
            context.Log += (object o, LoggingEventArgs e) =>
            {
                if (e.Source == nameof(AutoMLExperiment) && e.Kind > Runtime.ChannelMessageKind.Trace)
                {
                    Console.WriteLine(e.RawMessage);
                }
            };

            // Config experiment to optimize "Accuracy" metric on given dataset.
            // This experiment will run hyper-parameter optimization on given pipeline
            experiment.SetPipeline(pipeline)
                      .SetDataset(trainTestSplit.TrainSet, fold: 5) // use 5-fold cross validation to evaluate each trial
                      .SetBinaryClassificationMetric(BinaryClassificationMetric.Accuracy, "Label")
                      .SetMaxModelToExplore(100); // explore 100 trials

            // start automl experiment
            var result = await experiment.RunAsync();

            // Expected output samples during training:
            //      Update Running Trial - Id: 0
            //      Update Completed Trial - Id: 0 - Metric: 0.5536912515402218 - Pipeline: FastTreeBinary - Duration: 595 - Peak CPU: 0.00 % -Peak Memory in MB: 35.81
            //      Update Best Trial - Id: 0 - Metric: 0.5536912515402218 - Pipeline: FastTreeBinary

            // evaluate test dataset on best model.
            var bestModel = result.Model;
            var eval = bestModel.Transform(trainTestSplit.TestSet);
            var metrics = context.BinaryClassification.Evaluate(eval);

            PrintMetrics(metrics);

            // Expected output:
            //  Accuracy: 0.67
            //  AUC: 0.75
            //  F1 Score: 0.33
            //  Negative Precision: 0.88
            //  Negative Recall: 0.70
            //  Positive Precision: 0.25
            //  Positive Recall: 0.50

            //  TEST POSITIVE RATIO: 0.1667(2.0 / (2.0 + 10.0))
            //  Confusion table
            //            ||======================
            //  PREDICTED || positive | negative | Recall
            //  TRUTH     ||======================
            //   positive || 1 | 1 | 0.5000
            //   negative || 3 | 7 | 0.7000
            //            ||======================
            //  Precision || 0.2500 | 0.8750 |
        }

        private static IEnumerable<BinaryClassificationDataPoint> GenerateRandomBinaryClassificationDataPoints(int count,
            int seed = 0)

        {
            var random = new Random(seed);
            float randomFloat() => (float)random.NextDouble();
            for (int i = 0; i < count; i++)
            {
                var label = randomFloat() > 0.5f;
                yield return new BinaryClassificationDataPoint
                {
                    Label = label,
                    // Create random features that are correlated with the label.
                    // For data points with false label, the feature values are
                    // slightly increased by adding a constant.
                    Features = Enumerable.Repeat(label, 50)
                        .Select(x => x ? randomFloat() : randomFloat() +
                        0.1f).ToArray()

                };
            }
        }

        // Example with label and 50 feature values. A data set is a collection of
        // such examples.
        private class BinaryClassificationDataPoint
        {
            public bool Label { get; set; }

            [VectorType(50)]
            public float[] Features { get; set; }
        }

        // Class used to capture predictions.
        private class Prediction
        {
            // Original label.
            public bool Label { get; set; }
            // Predicted label from the trainer.
            public bool PredictedLabel { get; set; }
        }

        // Pretty-print BinaryClassificationMetrics objects.
        private static void PrintMetrics(BinaryClassificationMetrics metrics)
        {
            Console.WriteLine($"Accuracy: {metrics.Accuracy:F2}");
            Console.WriteLine($"AUC: {metrics.AreaUnderRocCurve:F2}");
            Console.WriteLine($"F1 Score: {metrics.F1Score:F2}");
            Console.WriteLine($"Negative Precision: " +
                $"{metrics.NegativePrecision:F2}");

            Console.WriteLine($"Negative Recall: {metrics.NegativeRecall:F2}");
            Console.WriteLine($"Positive Precision: " +
                $"{metrics.PositivePrecision:F2}");

            Console.WriteLine($"Positive Recall: {metrics.PositiveRecall:F2}\n");
            Console.WriteLine(metrics.ConfusionMatrix.GetFormattedConfusionTable());
        }
    }
}

Constructors

AutoMLExperiment(MLContext, AutoMLExperiment+AutoMLExperimentSettings)

Methods

AddSearchSpace(String, SearchSpace)
Run()

Run experiment and return the best trial result synchronizely.

RunAsync(CancellationToken)

Run experiment and return the best trial result asynchronizely. The experiment returns the current best trial result if there's any trial completed when ct get cancelled, and throws TimeoutException with message "Training time finished without completing a trial run" when no trial has completed. Another thing needs to notice is that this function won't immediately return after ct get cancelled. Instead, it will call Microsoft.ML.MLContext.CancelExecution to cancel all training process and wait all running trials get cancelled or completed.

SetMaximumMemoryUsageInMegaByte(Double)
SetMaxModelToExplore(Int32)
SetMonitor<TMonitor>()
SetMonitor<TMonitor>(Func<IServiceProvider,TMonitor>)
SetMonitor<TMonitor>(TMonitor)
SetTrainingTimeInSeconds(UInt32)
SetTrialRunner<TTrialRunner>()
SetTrialRunner<TTrialRunner>(Func<IServiceProvider,TTrialRunner>)
SetTrialRunner<TTrialRunner>(TTrialRunner)
SetTuner<TTuner>()
SetTuner<TTuner>(Func<IServiceProvider,TTuner>)
SetTuner<TTuner>(TTuner)

Extension Methods

SetBinaryClassificationMetric(AutoMLExperiment, BinaryClassificationMetric, String, String)

Set Microsoft.ML.AutoML.BinaryMetricManager as evaluation manager for AutoMLExperiment. This will make AutoMLExperiment uses metric as evaluation metric.

SetCheckpoint(AutoMLExperiment, String)

Set checkpoint folder for AutoMLExperiment. The checkpoint folder will be used to save temporary output, run history and many other stuff which will be used for restoring training process from last checkpoint and continue training.

SetCostFrugalTuner(AutoMLExperiment)

Set Microsoft.ML.AutoML.CostFrugalTuner as tuner for hyper-parameter optimization.

SetDataset(AutoMLExperiment, DataOperationsCatalog+TrainTestData)

Set train and validation dataset for AutoMLExperiment. This will make AutoMLExperiment uses TrainSet from trainValidationSplit to train a model, and use TestSet from trainValidationSplit to evaluate the model.

SetDataset(AutoMLExperiment, IDataView, IDataView, Boolean)

Set train and validation dataset for AutoMLExperiment. This will make AutoMLExperiment uses train to train a model, and use validation to evaluate the model.

SetDataset(AutoMLExperiment, IDataView, Int32, String)

Set cross-validation dataset for AutoMLExperiment. This will make AutoMLExperiment use n=fold cross-validation split on dataset to train and evaluate a model.

SetEciCostFrugalTuner(AutoMLExperiment)

set Microsoft.ML.AutoML.EciCostFrugalTuner as tuner for hyper-parameter optimization. This tuner only works with search space from SweepablePipeline.

SetGridSearchTuner(AutoMLExperiment, Int32)

set Microsoft.ML.AutoML.GridSearchTuner as tuner for hyper parameter optimization.

SetMulticlassClassificationMetric(AutoMLExperiment, MulticlassClassificationMetric, String, String)

Set Microsoft.ML.AutoML.MultiClassMetricManager as evaluation manager for AutoMLExperiment. This will make AutoMLExperiment uses metric as evaluation metric.

SetPerformanceMonitor(AutoMLExperiment, Int32)

Set DefaultPerformanceMonitor as IPerformanceMonitor for AutoMLExperiment.

SetPerformanceMonitor<TPerformanceMonitor>(AutoMLExperiment, Func<IServiceProvider,TPerformanceMonitor>)

Set a custom performance monitor as IPerformanceMonitor for AutoMLExperiment.

SetPerformanceMonitor<TPerformanceMonitor>(AutoMLExperiment)

Set a custom performance monitor as IPerformanceMonitor for AutoMLExperiment.

SetPipeline(AutoMLExperiment, SweepablePipeline)

Set pipeline for training. This also make AutoMLExperiment uses Microsoft.ML.AutoML.SweepablePipelineRunner , Microsoft.ML.AutoML.MLContextMonitor and Microsoft.ML.AutoML.EciCostFrugalTuner for automl traininng as well.

SetRandomSearchTuner(AutoMLExperiment, Nullable<Int32>)

set Microsoft.ML.AutoML.RandomSearchTuner as tuner for hyper parameter optimization. If seed is provided, it will use that seed to initialize Microsoft.ML.AutoML.RandomSearchTuner. Otherwise, Seed will be used.

SetRegressionMetric(AutoMLExperiment, RegressionMetric, String, String)

Set Microsoft.ML.AutoML.RegressionMetricManager as evaluation manager for AutoMLExperiment. This will make AutoMLExperiment uses metric as evaluation metric.

SetSmacTuner(AutoMLExperiment, Int32, Int32, Int32, Int32, Single, Int32, Int32, Double, Int32)

Set Microsoft.ML.AutoML.SmacTuner as tuner for hyper-parameter optimization. The performance of smac is in a large extend determined by numberOfTrees, nMinForSpit and splitRatio, which are used to fit smac's inner regressor.

Applies to