mlflow.client
The mlflow.client
module provides a Python CRUD interface to MLflow Experiments, Runs,
Model Versions, and Registered Models. This is a lower level API that directly translates to MLflow
REST API calls.
For a higher level API for managing an “active run”, use the mlflow
module.
-
class
mlflow.client.
MlflowClient
(tracking_uri: Optional[str] = None, registry_uri: Optional[str] = None)[source] Bases:
object
Client of an MLflow Tracking Server that creates and manages experiments and runs, and of an MLflow Registry Server that creates and manages registered models and model versions. It’s a thin wrapper around TrackingServiceClient and RegistryClient so there is a unified API but we can keep the implementation of the tracking and registry clients independent from each other.
-
create_experiment
(name: str, artifact_location: Optional[str] = None, tags: Optional[Dict[str, Any]] = None) → str[source] Create an experiment.
- Parameters
name – The experiment name. Must be unique.
artifact_location – The location to store run artifacts. If not provided, the server picks an appropriate default.
tags – A dictionary of key-value pairs that are converted into
mlflow.entities.ExperimentTag
objects, set as experiment tags upon experiment creation.
- Returns
String as an integer ID of the created experiment.
from pathlib import Path from mlflow import MlflowClient # Create an experiment with a name that is unique and case sensitive. client = MlflowClient() experiment_id = client.create_experiment( "Social NLP Experiments", artifact_location=Path.cwd().joinpath("mlruns").as_uri(), tags={"version": "v1", "priority": "P1"}, ) client.set_experiment_tag(experiment_id, "nlp.framework", "Spark NLP") # Fetch experiment metadata information experiment = client.get_experiment(experiment_id) print("Name: {}".format(experiment.name)) print("Experiment_id: {}".format(experiment.experiment_id)) print("Artifact Location: {}".format(experiment.artifact_location)) print("Tags: {}".format(experiment.tags)) print("Lifecycle_stage: {}".format(experiment.lifecycle_stage))
-
create_model_version
(name: str, source: str, run_id: Optional[str] = None, tags: Optional[Dict[str, Any]] = None, run_link: Optional[str] = None, description: Optional[str] = None, await_creation_for: int = 300) → ModelVersion[source] Create a new model version from given source (artifact URI).
- Parameters
name – Name for the containing registered model.
source – Source path where the MLflow model is stored.
run_id – Run ID from MLflow tracking server that generated the model
tags – A dictionary of key-value pairs that are converted into
mlflow.entities.model_registry.ModelVersionTag
objects.run_link – Link to the run from an MLflow tracking server that generated this model.
description – Description of the version.
await_creation_for – Number of seconds to wait for the model version to finish being created and is in
READY
status. By default, the function waits for five minutes. Specify 0 or None to skip waiting.
- Returns
Single
mlflow.entities.model_registry.ModelVersion
object created by backend.
import mlflow.sklearn from mlflow.store.artifact.runs_artifact_repo import RunsArtifactRepository from mlflow import MlflowClient from sklearn.ensemble import RandomForestRegressor mlflow.set_tracking_uri("sqlite:///mlruns.db") params = {"n_estimators": 3, "random_state": 42} name = "RandomForestRegression" rfr = RandomForestRegressor(**params).fit([[0, 1]], [1]) # Log MLflow entities with mlflow.start_run() as run: mlflow.log_params(params) mlflow.sklearn.log_model(rfr, artifact_path="sklearn-model") # Register model name in the model registry client = MlflowClient() client.create_registered_model(name) # Create a new version of the rfr model under the registered model name desc = "A new version of the model" runs_uri = "runs:/{}/sklearn-model".format(run.info.run_id) model_src = RunsArtifactRepository.get_underlying_uri(runs_uri) mv = client.create_model_version(name, model_src, run.info.run_id, description=desc) print("Name: {}".format(mv.name)) print("Version: {}".format(mv.version)) print("Description: {}".format(mv.description)) print("Status: {}".format(mv.status)) print("Stage: {}".format(mv.current_stage))
-
create_registered_model
(name: str, tags: Optional[Dict[str, Any]] = None, description: Optional[str] = None) → RegisteredModel[source] Create a new registered model in backend store.
- Parameters
name – Name of the new model. This is expected to be unique in the backend store.
tags – A dictionary of key-value pairs that are converted into
mlflow.entities.model_registry.RegisteredModelTag
objects.description – Description of the model.
- Returns
A single object of
mlflow.entities.model_registry.RegisteredModel
created by backend.
import mlflow from mlflow import MlflowClient def print_registered_model_info(rm): print("name: {}".format(rm.name)) print("tags: {}".format(rm.tags)) print("description: {}".format(rm.description)) name = "SocialMediaTextAnalyzer" tags = {"nlp.framework": "Spark NLP"} desc = "This sentiment analysis model classifies the tone-happy, sad, angry." mlflow.set_tracking_uri("sqlite:///mlruns.db") client = MlflowClient() client.create_registered_model(name, tags, desc) print_registered_model_info(client.get_registered_model(name))
-
create_run
(experiment_id: str, start_time: Optional[int] = None, tags: Optional[Dict[str, Any]] = None, run_name: Optional[str] = None) → Run[source] Create a
mlflow.entities.Run
object that can be associated with metrics, parameters, artifacts, etc. Unlikemlflow.projects.run()
, creates objects but does not run code. Unlikemlflow.start_run()
, does not change the “active run” used bymlflow.log_param()
.- Parameters
experiment_id – The string ID of the experiment to create a run in.
start_time – If not provided, use the current timestamp.
tags – A dictionary of key-value pairs that are converted into
mlflow.entities.RunTag
objects.run_name – The name of this run.
- Returns
mlflow.entities.Run
that was created.
from mlflow import MlflowClient # Create a run with a tag under the default experiment (whose id is '0'). tags = {"engineering": "ML Platform"} name = "platform-run-24" client = MlflowClient() experiment_id = "0" run = client.create_run(experiment_id, tags=tags, run_name=name) # Show newly created run metadata info print("Run tags: {}".format(run.data.tags)) print("Experiment id: {}".format(run.info.experiment_id)) print("Run id: {}".format(run.info.run_id)) print("Run name: {}".format(run.info.run_name)) print("lifecycle_stage: {}".format(run.info.lifecycle_stage)) print("status: {}".format(run.info.status))
-
delete_experiment
(experiment_id: str) → None[source] Delete an experiment from the backend store. This deletion is a soft-delete, not a permanent deletion. Experiment names can not be reused, unless the deleted experiment is permanently deleted by a database admin.
- Parameters
experiment_id – The experiment ID returned from
create_experiment
.
from mlflow import MlflowClient # Create an experiment with a name that is unique and case sensitive client = MlflowClient() experiment_id = client.create_experiment("New Experiment") client.delete_experiment(experiment_id) # Examine the deleted experiment details. experiment = client.get_experiment(experiment_id) print("Name: {}".format(experiment.name)) print("Artifact Location: {}".format(experiment.artifact_location)) print("Lifecycle_stage: {}".format(experiment.lifecycle_stage))
-
delete_model_version
(name: str, version: str) → None[source] Delete model version in backend.
- Parameters
name – Name of the containing registered model.
version – Version number of the model version.
import mlflow.sklearn from mlflow import MlflowClient from sklearn.ensemble import RandomForestRegressor def print_models_info(mv): for m in mv: print("name: {}".format(m.name)) print("latest version: {}".format(m.version)) print("run_id: {}".format(m.run_id)) print("current_stage: {}".format(m.current_stage)) mlflow.set_tracking_uri("sqlite:///mlruns.db") # Create two runs and log MLflow entities with mlflow.start_run() as run1: params = {"n_estimators": 3, "random_state": 42} rfr = RandomForestRegressor(**params).fit([[0, 1]], [1]) mlflow.log_params(params) mlflow.sklearn.log_model(rfr, artifact_path="sklearn-model") with mlflow.start_run() as run2: params = {"n_estimators": 6, "random_state": 42} rfr = RandomForestRegressor(**params).fit([[0, 1]], [1]) mlflow.log_params(params) mlflow.sklearn.log_model(rfr, artifact_path="sklearn-model") # Register model name in the model registry name = "RandomForestRegression" client = MlflowClient() client.create_registered_model(name) # Create a two versions of the rfr model under the registered model name for run_id in [run1.info.run_id, run2.info.run_id]: model_uri = "runs:/{}/sklearn-model".format(run_id) mv = client.create_model_version(name, model_uri, run_id) print("model version {} created".format(mv.version)) print("--") # Fetch latest version; this will be version 2 models = client.get_latest_versions(name, stages=["None"]) print_models_info(models) print("--") # Delete the latest model version 2 print("Deleting model version {}".format(mv.version)) client.delete_model_version(name, mv.version) models = client.get_latest_versions(name, stages=["None"]) print_models_info(models)
model version 1 created model version 2 created -- name: RandomForestRegression latest version: 2 run_id: 9881172ef10f4cb08df3ed452c0c362b current_stage: None -- Deleting model version 2 name: RandomForestRegression latest version: 1 run_id: 9165d4f8aa0a4d069550824bdc55caaf current_stage: None
-
delete_model_version_tag
(name: str, version: Optional[str] = None, key: Optional[str] = None, stage: Optional[str] = None) → None[source] Delete a tag associated with the model version. When stage is set, tag will be deleted for latest model version of the stage. Setting both version and stage parameter will result in error.
- Parameters
name – Registered model name.
version – Registered model version.
key – Tag key. key is required.
stage – Registered model stage.
- Returns
None
import mlflow.sklearn from mlflow import MlflowClient from sklearn.ensemble import RandomForestRegressor def print_model_version_info(mv): print("Name: {}".format(mv.name)) print("Version: {}".format(mv.version)) print("Tags: {}".format(mv.tags)) mlflow.set_tracking_uri("sqlite:///mlruns.db") params = {"n_estimators": 3, "random_state": 42} name = "RandomForestRegression" rfr = RandomForestRegressor(**params).fit([[0, 1]], [1]) # Log MLflow entities with mlflow.start_run() as run: mlflow.log_params(params) mlflow.sklearn.log_model(rfr, artifact_path="sklearn-model") # Register model name in the model registry client = MlflowClient() client.create_registered_model(name) # Create a new version of the rfr model under the registered model name # and delete a tag model_uri = "runs:/{}/sklearn-model".format(run.info.run_id) tags = {'t': "1", "t1" : "2"} mv = client.create_model_version(name, model_uri, run.info.run_id, tags=tags) print_model_version_info(mv) print("--") #using version to delete tag client.delete_model_version_tag(name, mv.version, "t") #using stage to delete tag client.delete_model_version_tag(name, key="t1", stage=mv.current_stage) mv = client.get_model_version(name, mv.version) print_model_version_info(mv)
-
delete_registered_model
(name: str)[source] Delete registered model. Backend raises exception if a registered model with given name does not exist.
- Parameters
name – Name of the registered model to delete.
import mlflow from mlflow import MlflowClient def print_registered_models_info(r_models): print("--") for rm in r_models: print("name: {}".format(rm.name)) print("tags: {}".format(rm.tags)) print("description: {}".format(rm.description)) mlflow.set_tracking_uri("sqlite:///mlruns.db") client = MlflowClient() # Register a couple of models with respective names, tags, and descriptions for name, tags, desc in [("name1", {"t1": "t1"}, 'description1'), ("name2", {"t2": "t2"}, 'description2')]: client.create_registered_model(name, tags, desc) # Fetch all registered models print_registered_models_info(client.list_registered_models()) # Delete one registered model and fetch again client.delete_registered_model("name1") print_registered_models_info(client.list_registered_models())
-
delete_registered_model_tag
(name: str, key: str) → None[source] Delete a tag associated with the registered model.
- Parameters
name – Registered model name.
key – Registered model tag key.
- Returns
None
import mlflow from mlflow import MlflowClient def print_registered_models_info(r_models): print("--") for rm in r_models: print("name: {}".format(rm.name)) print("tags: {}".format(rm.tags)) mlflow.set_tracking_uri("sqlite:///mlruns.db") client = MlflowClient() # Register a couple of models with respective names and tags for name, tags in [("name1", {"t1": "t1"}),("name2", {"t2": "t2"})]: client.create_registered_model(name, tags) # Fetch all registered models print_registered_models_info(client.list_registered_models()) # Delete a tag from model `name2` client.delete_registered_model_tag("name2", 't2') print_registered_models_info(client.list_registered_models())
-
delete_run
(run_id: str) → None[source] Deletes a run with the given ID.
- Parameters
run_id – The unique run id to delete.
from mlflow import MlflowClient # Create a run under the default experiment (whose id is '0'). client = MlflowClient() experiment_id = "0" run = client.create_run(experiment_id) run_id = run.info.run_id print("run_id: {}; lifecycle_stage: {}".format(run_id, run.info.lifecycle_stage)) print("--") client.delete_run(run_id) del_run = client.get_run(run_id) print("run_id: {}; lifecycle_stage: {}".format(run_id, del_run.info.lifecycle_stage))
-
delete_tag
(run_id: str, key: str) → None[source] Delete a tag from a run. This is irreversible.
- Parameters
run_id – String ID of the run
key – Name of the tag
from mlflow import MlflowClient def print_run_info(run): print("run_id: {}".format(run.info.run_id)) print("Tags: {}".format(run.data.tags)) # Create a run under the default experiment (whose id is '0'). client = MlflowClient() tags = {"t1": 1, "t2": 2} experiment_id = "0" run = client.create_run(experiment_id, tags=tags) print_run_info(run) print("--") # Delete tag and fetch updated info client.delete_tag(run.info.run_id, "t1") run = client.get_run(run.info.run_id) print_run_info(run)
-
download_artifacts
(run_id: str, path: str, dst_path: Optional[str] = None) → str[source] Download an artifact file or directory from a run to a local directory if applicable, and return a local path for it.
- Parameters
run_id – The run to download artifacts from.
path – Relative source path to the desired artifact.
dst_path – Absolute path of the local filesystem destination directory to which to download the specified artifacts. This directory must already exist. If unspecified, the artifacts will either be downloaded to a new uniquely-named directory on the local filesystem or will be returned directly in the case of the LocalArtifactRepository.
- Returns
Local path of desired artifact.
import os import mlflow from mlflow import MlflowClient features = "rooms, zipcode, median_price, school_rating, transport" with open("features.txt", 'w') as f: f.write(features) # Log artifacts with mlflow.start_run() as run: mlflow.log_artifact("features.txt", artifact_path="features") # Download artifacts client = MlflowClient() local_dir = "/tmp/artifact_downloads" if not os.path.exists(local_dir): os.mkdir(local_dir) local_path = client.download_artifacts(run.info.run_id, "features", local_dir) print("Artifacts downloaded in: {}".format(local_path)) print("Artifacts: {}".format(os.listdir(local_path)))
-
get_experiment
(experiment_id: str) → Experiment[source] Retrieve an experiment by experiment_id from the backend store
- Parameters
experiment_id – The experiment ID returned from
create_experiment
.- Returns
from mlflow import MlflowClient client = MlflowClient() exp_id = client.create_experiment("Experiment") experiment = client.get_experiment(exp_id) # Show experiment info print("Name: {}".format(experiment.name)) print("Experiment ID: {}".format(experiment.experiment_id)) print("Artifact Location: {}".format(experiment.artifact_location)) print("Lifecycle_stage: {}".format(experiment.lifecycle_stage))
-
get_experiment_by_name
(name: str) → Optional[Experiment][source] Retrieve an experiment by experiment name from the backend store
- Parameters
name – The experiment name, which is case sensitive.
- Returns
An instance of
mlflow.entities.Experiment
if an experiment with the specified name exists, otherwise None.
from mlflow import MlflowClient # Case-sensitive name client = MlflowClient() experiment = client.get_experiment_by_name("Default") # Show experiment info print("Name: {}".format(experiment.name)) print("Experiment ID: {}".format(experiment.experiment_id)) print("Artifact Location: {}".format(experiment.artifact_location)) print("Lifecycle_stage: {}".format(experiment.lifecycle_stage))
-
get_latest_versions
(name: str, stages: Optional[List[str]] = None) → List[ModelVersion][source] Latest version models for each requests stage. If no
stages
provided, returns the latest version for each stage.- Parameters
name – Name of the registered model from which to get the latest versions.
stages – List of desired stages. If input list is None, return latest versions for for ALL_STAGES.
- Returns
List of
mlflow.entities.model_registry.ModelVersion
objects.
import mlflow.sklearn from mlflow import MlflowClient from sklearn.ensemble import RandomForestRegressor def print_models_info(mv): for m in mv: print("name: {}".format(m.name)) print("latest version: {}".format(m.version)) print("run_id: {}".format(m.run_id)) print("current_stage: {}".format(m.current_stage)) mlflow.set_tracking_uri("sqlite:///mlruns.db") # Create two runs Log MLflow entities with mlflow.start_run() as run1: params = {"n_estimators": 3, "random_state": 42} rfr = RandomForestRegressor(**params).fit([[0, 1]], [1]) mlflow.log_params(params) mlflow.sklearn.log_model(rfr, artifact_path="sklearn-model") with mlflow.start_run() as run2: params = {"n_estimators": 6, "random_state": 42} rfr = RandomForestRegressor(**params).fit([[0, 1]], [1]) mlflow.log_params(params) mlflow.sklearn.log_model(rfr, artifact_path="sklearn-model") # Register model name in the model registry name = "RandomForestRegression" client = MlflowClient() client.create_registered_model(name) # Create a two versions of the rfr model under the registered model name for run_id in [run1.info.run_id, run2.info.run_id]: model_uri = "runs:/{}/sklearn-model".format(run_id) mv = client.create_model_version(name, model_uri, run_id) print("model version {} created".format(mv.version)) # Fetch latest version; this will be version 2 print("--") print_models_info(client.get_latest_versions(name, stages=["None"]))
-
get_metric_history
(run_id: str, key: str) → List[Metric][source] Return a list of metric objects corresponding to all values logged for a given metric.
- Parameters
run_id – Unique identifier for run
key – Metric name within the run
- Returns
A list of
mlflow.entities.Metric
entities if logged, else empty list
from mlflow import MlflowClient def print_metric_info(history): for m in history: print("name: {}".format(m.key)) print("value: {}".format(m.value)) print("step: {}".format(m.step)) print("timestamp: {}".format(m.timestamp)) print("--") # Create a run under the default experiment (whose id is "0"). Since this is low-level # CRUD operation, the method will create a run. To end the run, you'll have # to explicitly end it. client = MlflowClient() experiment_id = "0" run = client.create_run(experiment_id) print("run_id: {}".format(run.info.run_id)) print("--") # Log couple of metrics, update their initial value, and fetch each # logged metrics' history. for k, v in [("m1", 1.5), ("m2", 2.5)]: client.log_metric(run.info.run_id, k, v, step=0) client.log_metric(run.info.run_id, k, v + 1, step=1) print_metric_info(client.get_metric_history(run.info.run_id, k)) client.set_terminated(run.info.run_id)
-
get_model_version
(name: str, version: str) → ModelVersion[source] - Parameters
name – Name of the containing registered model.
version – Version number as an integer of the model version.
- Returns
A single
mlflow.entities.model_registry.ModelVersion
object.
import mlflow.sklearn from mlflow import MlflowClient from sklearn.ensemble import RandomForestRegressor # Create two runs Log MLflow entities with mlflow.start_run() as run1: params = {"n_estimators": 3, "random_state": 42} rfr = RandomForestRegressor(**params).fit([[0, 1]], [1]) mlflow.log_params(params) mlflow.sklearn.log_model(rfr, artifact_path="sklearn-model") with mlflow.start_run() as run2: params = {"n_estimators": 6, "random_state": 42} rfr = RandomForestRegressor(**params).fit([[0, 1]], [1]) mlflow.log_params(params) mlflow.sklearn.log_model(rfr, artifact_path="sklearn-model") # Register model name in the model registry name = "RandomForestRegression" client = MlflowClient() client.create_registered_model(name) # Create a two versions of the rfr model under the registered model name for run_id in [run1.info.run_id, run2.info.run_id]: model_uri = "runs:/{}/sklearn-model".format(run_id) mv = client.create_model_version(name, model_uri, run_id) print("model version {} created".format(mv.version)) print("--") # Fetch the last version; this will be version 2 mv = client.get_model_version(name, mv.version) print_model_version_info(mv)
-
get_model_version_download_uri
(name: str, version: str) → str[source] Get the download location in Model Registry for this model version.
- Parameters
name – Name of the containing registered model.
version – Version number as an integer of the model version.
- Returns
A single URI location that allows reads for downloading.
import mlflow.sklearn from mlflow import MlflowClient from sklearn.ensemble import RandomForestRegressor mlflow.set_tracking_uri("sqlite:///mlruns.db") params = {"n_estimators": 3, "random_state": 42} name = "RandomForestRegression" rfr = RandomForestRegressor(**params).fit([[0, 1]], [1]) # Log MLflow entities with mlflow.start_run() as run: mlflow.log_params(params) mlflow.sklearn.log_model(rfr, artifact_path="models/sklearn-model") # Register model name in the model registry client = MlflowClient() client.create_registered_model(name) # Create a new version of the rfr model under the registered model name model_uri = "runs:/{}/models/sklearn-model".format(run.info.run_id) mv = client.create_model_version(name, model_uri, run.info.run_id) artifact_uri = client.get_model_version_download_uri(name, mv.version) print("Download URI: {}".format(artifact_uri))
-
get_model_version_stages
(name: str, version: str) → List[str][source] - Returns
A list of valid stages.
import mlflow.sklearn from mlflow import MlflowClient from sklearn.ensemble import RandomForestRegressor mlflow.set_tracking_uri("sqlite:///mlruns.db") params = {"n_estimators": 3, "random_state": 42} name = "RandomForestRegression" rfr = RandomForestRegressor(**params).fit([[0, 1]], [1]) # Log MLflow entities with mlflow.start_run() as run: mlflow.log_params(params) mlflow.sklearn.log_model(rfr, artifact_path="models/sklearn-model") # Register model name in the model registry client = MlflowClient() client.create_registered_model(name) # Create a new version of the rfr model under the registered model name # fetch valid stages model_uri = "runs:/{}/models/sklearn-model".format(run.info.run_id) mv = client.create_model_version(name, model_uri, run.info.run_id) stages = client.get_model_version_stages(name, mv.version) print("Model list of valid stages: {}".format(stages))
-
get_registered_model
(name: str) → RegisteredModel[source] - Parameters
name – Name of the registered model to get.
- Returns
A single
mlflow.entities.model_registry.RegisteredModel
object.
import mlflow from mlflow import MlflowClient def print_model_info(rm): print("--") print("name: {}".format(rm.name)) print("tags: {}".format(rm.tags)) print("description: {}".format(rm.description)) name = "SocialMediaTextAnalyzer" tags = {"nlp.framework": "Spark NLP"} desc = "This sentiment analysis model classifies the tone-happy, sad, angry." mlflow.set_tracking_uri("sqlite:///mlruns.db") client = MlflowClient() # Create and fetch the registered model client.create_registered_model(name, tags, desc) model = client.get_registered_model(name) print_model_info(model)
-
get_run
(run_id: str) → Run[source] Fetch the run from backend store. The resulting
Run
contains a collection of run metadata –RunInfo
, as well as a collection of run parameters, tags, and metrics –RunData
. In the case where multiple metrics with the same key are logged for the run, theRunData
contains the most recently logged value at the largest step for each metric.- Parameters
run_id – Unique identifier for the run.
- Returns
A single
mlflow.entities.Run
object, if the run exists. Otherwise, raises an exception.
import mlflow from mlflow import MlflowClient with mlflow.start_run() as run: mlflow.log_param("p", 0) # The run has finished since we have exited the with block # Fetch the run client = MlflowClient() run = client.get_run(run.info.run_id) print("run_id: {}".format(run.info.run_id)) print("params: {}".format(run.data.params)) print("status: {}".format(run.info.status))
-
list_artifacts
(run_id: str, path=None) → List[FileInfo][source] List the artifacts for a run.
- Parameters
run_id – The run to list artifacts from.
path – The run’s relative artifact path to list from. By default it is set to None or the root artifact path.
- Returns
List of
mlflow.entities.FileInfo
from mlflow import MlflowClient def print_artifact_info(artifact): print("artifact: {}".format(artifact.path)) print("is_dir: {}".format(artifact.is_dir)) print("size: {}".format(artifact.file_size)) features = "rooms zipcode, median_price, school_rating, transport" labels = "price" # Create a run under the default experiment (whose id is '0'). client = MlflowClient() experiment_id = "0" run = client.create_run(experiment_id) # Create some artifacts and log under the above run for file, content in [("features", features), ("labels", labels)]: with open("{}.txt".format(file), 'w') as f: f.write(content) client.log_artifact(run.info.run_id, "{}.txt".format(file)) # Fetch the logged artifacts artifacts = client.list_artifacts(run.info.run_id) for artifact in artifacts: print_artifact_info(artifact) client.set_terminated(run.info.run_id)
-
list_experiments
(view_type: int = 1, max_results: Optional[int] = None, page_token: Optional[str] = None) → PagedList[Experiment][source] Warning
mlflow.tracking.client.MlflowClient.list_experiments
is deprecated. This method will be removed in a future release. Usesearch_experiments()
instead.- Parameters
view_type – Qualify requested type of experiments.
max_results – If passed, specifies the maximum number of experiments desired. If not passed, all experiments will be returned for the File and SQL backends. For the REST backend, the server will pick a maximum number of results to return.
page_token – Token specifying the next page of results. It should be obtained from a
list_experiments
call.
- Returns
A
PagedList
ofExperiment
objects. The pagination token for the next page can be obtained via thetoken
attribute of the object.
from mlflow import MlflowClient from mlflow.entities import ViewType def print_experiment_info(experiments): for e in experiments: print("- experiment_id: {}, name: {}, lifecycle_stage: {}" .format(e.experiment_id, e.name, e.lifecycle_stage)) client = MlflowClient() for name in ["Experiment 1", "Experiment 2"]: exp_id = client.create_experiment(name) # Delete the last experiment client.delete_experiment(exp_id) # Fetch experiments by view type print("Active experiments:") print_experiment_info(client.list_experiments(view_type=ViewType.ACTIVE_ONLY)) print("Deleted experiments:") print_experiment_info(client.list_experiments(view_type=ViewType.DELETED_ONLY)) print("All experiments:") print_experiment_info(client.list_experiments(view_type=ViewType.ALL))
Active experiments: - experiment_id: 0, name: Default, lifecycle_stage: active - experiment_id: 1, name: Experiment 1, lifecycle_stage: active Deleted experiments: - experiment_id: 2, name: Experiment 2, lifecycle_stage: deleted All experiments: - experiment_id: 0, name: Default, lifecycle_stage: active - experiment_id: 1, name: Experiment 1, lifecycle_stage: active - experiment_id: 2, name: Experiment 2, lifecycle_stage: deleted
-
list_registered_models
(max_results: int = 100, page_token: Optional[str] = None) → PagedList[RegisteredModel][source] Warning
mlflow.tracking.client.MlflowClient.list_registered_models
is deprecated. This method will be removed in a future release. Usesearch_registered_models()
instead.List of all registered models
- Parameters
max_results – Maximum number of registered models desired.
page_token – Token specifying the next page of results. It should be obtained from a
list_registered_models
call.
- Returns
A PagedList of
mlflow.entities.model_registry.RegisteredModel
objects that can satisfy the search expressions. The pagination token for the next page can be obtained via thetoken
attribute of the object.
import mlflow from mlflow import MlflowClient def print_model_info(models): for m in models: print("--") print("name: {}".format(m.name)) print("tags: {}".format(m.tags)) print("description: {}".format(m.description)) mlflow.set_tracking_uri("sqlite:///mlruns.db") client = MlflowClient() # Register a couple of models with respective names, tags, and descriptions for name, tags, desc in [("name1", {"t1": "t1"}, 'description1'), ("name2", {"t2": "t2"}, 'description2')]: client.create_registered_model(name, tags, desc) # Fetch all registered models print_model_info(client.list_registered_models())
-
list_run_infos
(experiment_id: str, run_view_type: int = 1, max_results: int = 1000, order_by: Optional[List[str]] = None, page_token: Optional[str] = None) → PagedList[RunInfo][source] Warning
mlflow.tracking.client.MlflowClient.list_run_infos
is deprecated. This method will be removed in a future release. Usesearch_runs()
instead.Return run information for runs which belong to the experiment_id.
- Parameters
experiment_id – The experiment id which to search
run_view_type – ACTIVE_ONLY, DELETED_ONLY, or ALL runs
max_results – Maximum number of results desired.
order_by – List of order_by clauses. Currently supported values are are
metric.key
,parameter.key
,tag.key
,attribute.key
. For example,order_by=["tag.release ASC", "metric.click_rate DESC"]
.
- Returns
A
PagedList
ofRunInfo
objects that satisfy the search expressions. If the underlying tracking store supports pagination, the token for the next page may be obtained via thetoken
attribute of the returned object.
import mlflow from mlflow import MlflowClient from mlflow.entities import ViewType def print_run_infos(run_infos): for r in run_infos: print("- run_id: {}, lifecycle_stage: {}".format(r.run_id, r.lifecycle_stage)) # Create two runs with mlflow.start_run() as run1: mlflow.log_metric("click_rate", 1.55) with mlflow.start_run() as run2: mlflow.log_metric("click_rate", 2.50) # Delete the last run client = MlflowClient() client.delete_run(run2.info.run_id) # Get all runs under the default experiment (whose id is 0) print("Active runs:") print_run_infos(mlflow.list_run_infos("0", run_view_type=ViewType.ACTIVE_ONLY)) print("Deleted runs:") print_run_infos(mlflow.list_run_infos("0", run_view_type=ViewType.DELETED_ONLY)) print("All runs:") print_run_infos(mlflow.list_run_infos("0", run_view_type=ViewType.ALL, order_by=["metric.click_rate DESC"]))
Active runs: - run_id: 47b11b33f9364ee2b148c41375a30a68, lifecycle_stage: active Deleted runs: - run_id: bc4803439bdd4a059103811267b6b2f4, lifecycle_stage: deleted All runs: - run_id: bc4803439bdd4a059103811267b6b2f4, lifecycle_stage: deleted - run_id: 47b11b33f9364ee2b148c41375a30a68, lifecycle_stage: active
-
log_artifact
(run_id, local_path, artifact_path=None) → None[source] Write a local file or directory to the remote
artifact_uri
.- Parameters
local_path – Path to the file or directory to write.
artifact_path – If provided, the directory in
artifact_uri
to write to.
from mlflow import MlflowClient features = "rooms, zipcode, median_price, school_rating, transport" with open("features.txt", 'w') as f: f.write(features) # Create a run under the default experiment (whose id is '0'). client = MlflowClient() experiment_id = "0" run = client.create_run(experiment_id) # log and fetch the artifact client.log_artifact(run.info.run_id, "features.txt") artifacts = client.list_artifacts(run.info.run_id) for artifact in artifacts: print("artifact: {}".format(artifact.path)) print("is_dir: {}".format(artifact.is_dir)) client.set_terminated(run.info.run_id)
-
log_artifacts
(run_id: str, local_dir: str, artifact_path: Optional[str] = None) → None[source] Write a directory of files to the remote
artifact_uri
.- Parameters
local_dir – Path to the directory of files to write.
artifact_path – If provided, the directory in
artifact_uri
to write to.
import os import json # Create some artifacts data to preserve features = "rooms, zipcode, median_price, school_rating, transport" data = {"state": "TX", "Available": 25, "Type": "Detached"} # Create couple of artifact files under the local directory "data" os.makedirs("data", exist_ok=True) with open("data/data.json", 'w', encoding='utf-8') as f: json.dump(data, f, indent=2) with open("data/features.txt", 'w') as f: f.write(features) # Create a run under the default experiment (whose id is '0'), and log # all files in "data" to root artifact_uri/states client = MlflowClient() experiment_id = "0" run = client.create_run(experiment_id) client.log_artifacts(run.info.run_id, "data", artifact_path="states") artifacts = client.list_artifacts(run.info.run_id) for artifact in artifacts: print("artifact: {}".format(artifact.path)) print("is_dir: {}".format(artifact.is_dir)) client.set_terminated(run.info.run_id)
-
log_batch
(run_id: str, metrics: Sequence[Metric] = (), params: Sequence[Param] = (), tags: Sequence[RunTag] = ()) → None[source] Log multiple metrics, params, and/or tags.
- Parameters
run_id – String ID of the run
metrics – If provided, List of Metric(key, value, timestamp) instances.
params – If provided, List of Param(key, value) instances.
tags – If provided, List of RunTag(key, value) instances.
Raises an MlflowException if any errors occur. :return: None
import time from mlflow import MlflowClient from mlflow.entities import Metric, Param, RunTag def print_run_info(r): print("run_id: {}".format(r.info.run_id)) print("params: {}".format(r.data.params)) print("metrics: {}".format(r.data.metrics)) print("tags: {}".format(r.data.tags)) print("status: {}".format(r.info.status)) # Create MLflow entities and a run under the default experiment (whose id is '0'). timestamp = int(time.time() * 1000) metrics = [Metric('m', 1.5, timestamp, 1)] params = [Param("p", 'p')] tags = [RunTag("t", "t")] experiment_id = "0" client = MlflowClient() run = client.create_run(experiment_id) # Log entities, terminate the run, and fetch run status client.log_batch(run.info.run_id, metrics=metrics, params=params, tags=tags) client.set_terminated(run.info.run_id) run = client.get_run(run.info.run_id) print_run_info(run)
-
log_dict
(run_id: str, dictionary: Any, artifact_file: str) → None[source] Log a JSON/YAML-serializable object (e.g. dict) as an artifact. The serialization format (JSON or YAML) is automatically inferred from the extension of artifact_file. If the file extension doesn’t exist or match any of [“.json”, “.yml”, “.yaml”], JSON format is used.
- Parameters
run_id – String ID of the run.
dictionary – Dictionary to log.
artifact_file – The run-relative artifact file path in posixpath format to which the dictionary is saved (e.g. “dir/data.json”).
from mlflow import MlflowClient client = MlflowClient() run = client.create_run(experiment_id="0") run_id = run.info.run_id dictionary = {"k": "v"} # Log a dictionary as a JSON file under the run's root artifact directory client.log_dict(run_id, dictionary, "data.json") # Log a dictionary as a YAML file in a subdirectory of the run's root artifact directory client.log_dict(run_id, dictionary, "dir/data.yml") # If the file extension doesn't exist or match any of [".json", ".yaml", ".yml"], # JSON format is used. mlflow.log_dict(run_id, dictionary, "data") mlflow.log_dict(run_id, dictionary, "data.txt")
-
log_figure
(run_id: str, figure: Union[matplotlib.figure.Figure, plotly.graph_objects.Figure], artifact_file: str) → None[source] Log a figure as an artifact. The following figure objects are supported:
- Parameters
run_id – String ID of the run.
figure – Figure to log.
artifact_file – The run-relative artifact file path in posixpath format to which the figure is saved (e.g. “dir/file.png”).
import mlflow import matplotlib.pyplot as plt fig, ax = plt.subplots() ax.plot([0, 1], [2, 3]) run = client.create_run(experiment_id="0") client.log_figure(run.info.run_id, fig, "figure.png")
-
log_image
(run_id: str, image: Union[numpy.ndarray, PIL.Image.Image], artifact_file: str) → None[source] Log an image as an artifact. The following image objects are supported:
- Numpy array support
data type (( ) represents a valid value range):
bool
integer (0 ~ 255)
unsigned integer (0 ~ 255)
float (0.0 ~ 1.0)
Warning
Out-of-range integer values will be clipped to [0, 255].
Out-of-range float values will be clipped to [0, 1].
shape (H: height, W: width):
H x W (Grayscale)
H x W x 1 (Grayscale)
H x W x 3 (an RGB channel order is assumed)
H x W x 4 (an RGBA channel order is assumed)
- Parameters
run_id – String ID of the run.
image – Image to log.
artifact_file – The run-relative artifact file path in posixpath format to which the image is saved (e.g. “dir/image.png”).
import mlflow import numpy as np image = np.random.randint(0, 256, size=(100, 100, 3), dtype=np.uint8) run = client.create_run(experiment_id="0") client.log_image(run.info.run_id, image, "image.png")
-
log_metric
(run_id: str, key: str, value: float, timestamp: Optional[int] = None, step: Optional[int] = None) → None[source] Log a metric against the run ID.
- Parameters
run_id – The run id to which the metric should be logged.
key – Metric name (string). This string may only contain alphanumerics, underscores (_), dashes (-), periods (.), spaces ( ), and slashes (/). All backend stores will support keys up to length 250, but some may support larger keys.
value – Metric value (float). Note that some special values such as +/- Infinity may be replaced by other values depending on the store. For example, the SQLAlchemy store replaces +/- Inf with max / min float values. All backend stores will support values up to length 5000, but some may support larger values.
timestamp – Time when this metric was calculated. Defaults to the current system time.
step – Integer training step (iteration) at which was the metric calculated. Defaults to 0.
from mlflow import MlflowClient def print_run_info(r): print("run_id: {}".format(r.info.run_id)) print("metrics: {}".format(r.data.metrics)) print("status: {}".format(r.info.status)) # Create a run under the default experiment (whose id is '0'). # Since these are low-level CRUD operations, this method will create a run. # To end the run, you'll have to explicitly end it. client = MlflowClient() experiment_id = "0" run = client.create_run(experiment_id) print_run_info(run) print("--") # Log the metric. Unlike mlflow.log_metric this method # does not start a run if one does not exist. It will log # the metric for the run id in the backend store. client.log_metric(run.info.run_id, "m", 1.5) client.set_terminated(run.info.run_id) run = client.get_run(run.info.run_id) print_run_info(run)
-
log_param
(run_id: str, key: str, value: Any) → Any[source] Log a parameter (e.g. model hyperparameter) against the run ID.
- Parameters
run_id – The run id to which the param should be logged.
key – Parameter name (string). This string may only contain alphanumerics, underscores (_), dashes (-), periods (.), spaces ( ), and slashes (/). All backend stores support keys up to length 250, but some may support larger keys.
value – Parameter value (string, but will be string-ified if not). All backend stores support values up to length 500, but some may support larger values.
- Returns
the parameter value that is logged.
from mlflow import MlflowClient def print_run_info(r): print("run_id: {}".format(r.info.run_id)) print("params: {}".format(r.data.params)) print("status: {}".format(r.info.status)) # Create a run under the default experiment (whose id is '0'). # Since these are low-level CRUD operations, this method will create a run. # To end the run, you'll have to explicitly end it. client = MlflowClient() experiment_id = "0" run = client.create_run(experiment_id) print_run_info(run) print("--") # Log the parameter. Unlike mlflow.log_param this method # does not start a run if one does not exist. It will log # the parameter in the backend store p_value = client.log_param(run.info.run_id, "p", 1) assert p_value == 1 client.set_terminated(run.info.run_id) run = client.get_run(run.info.run_id) print_run_info(run)
-
log_text
(run_id: str, text: str, artifact_file: str) → None[source] Log text as an artifact.
- Parameters
run_id – String ID of the run.
text – String containing text to log.
artifact_file – The run-relative artifact file path in posixpath format to which the text is saved (e.g. “dir/file.txt”).
from mlflow import MlflowClient client = MlflowClient() run = client.create_run(experiment_id="0") # Log text to a file under the run's root artifact directory client.log_text(run.info.run_id, "text1", "file1.txt") # Log text in a subdirectory of the run's root artifact directory client.log_text(run.info.run_id, "text2", "dir/file2.txt") # Log HTML text client.log_text(run.info.run_id, "<h1>header</h1>", "index.html")
-
rename_experiment
(experiment_id: str, new_name: str) → None[source] Update an experiment’s name. The new name must be unique.
- Parameters
experiment_id – The experiment ID returned from
create_experiment
.
from mlflow import MlflowClient def print_experiment_info(experiment): print("Name: {}".format(experiment.name)) print("Experiment_id: {}".format(experiment.experiment_id)) print("Lifecycle_stage: {}".format(experiment.lifecycle_stage)) # Create an experiment with a name that is unique and case sensitive client = MlflowClient() experiment_id = client.create_experiment("Social NLP Experiments") # Fetch experiment metadata information experiment = client.get_experiment(experiment_id) print_experiment_info(experiment) print("--") # Rename and fetch experiment metadata information client.rename_experiment(experiment_id, "Social Media NLP Experiments") experiment = client.get_experiment(experiment_id) print_experiment_info(experiment)
-
rename_registered_model
(name: str, new_name: str) → RegisteredModel[source] Update registered model name.
- Parameters
name – Name of the registered model to update.
new_name – New proposed name for the registered model.
- Returns
A single updated
mlflow.entities.model_registry.RegisteredModel
object.
import mlflow from mlflow import MlflowClient def print_registered_model_info(rm): print("name: {}".format(rm.name)) print("tags: {}".format(rm.tags)) print("description: {}".format(rm.description)) name = "SocialTextAnalyzer" tags = {"nlp.framework": "Spark NLP"} desc = "This sentiment analysis model classifies the tone-happy, sad, angry." # create a new registered model name mlflow.set_tracking_uri("sqlite:///mlruns.db") client = MlflowClient() client.create_registered_model(name, tags, desc) print_registered_model_info(client.get_registered_model(name)) print("--") # rename the model new_name = "SocialMediaTextAnalyzer" client.rename_registered_model(name, new_name) print_registered_model_info(client.get_registered_model(new_name))
name: SocialTextAnalyzer tags: {'nlp.framework': 'Spark NLP'} description: This sentiment analysis model classifies the tone-happy, sad, angry. -- name: SocialMediaTextAnalyzer tags: {'nlp.framework': 'Spark NLP'} description: This sentiment analysis model classifies the tone-happy, sad, angry.
-
restore_experiment
(experiment_id: str) → None[source] Restore a deleted experiment unless permanently deleted.
- Parameters
experiment_id – The experiment ID returned from
create_experiment
.
from mlflow import MlflowClient def print_experiment_info(experiment): print("Name: {}".format(experiment.name)) print("Experiment Id: {}".format(experiment.experiment_id)) print("Lifecycle_stage: {}".format(experiment.lifecycle_stage)) # Create and delete an experiment client = MlflowClient() experiment_id = client.create_experiment("New Experiment") client.delete_experiment(experiment_id) # Examine the deleted experiment details. experiment = client.get_experiment(experiment_id) print_experiment_info(experiment) print("--") # Restore the experiment and fetch its info client.restore_experiment(experiment_id) experiment = client.get_experiment(experiment_id) print_experiment_info(experiment)
-
restore_run
(run_id: str) → None[source] Restores a deleted run with the given ID.
- Parameters
run_id – The unique run id to restore.
from mlflow import MlflowClient # Create a run under the default experiment (whose id is '0'). client = MlflowClient() experiment_id = "0" run = client.create_run(experiment_id) run_id = run.info.run_id print("run_id: {}; lifecycle_stage: {}".format(run_id, run.info.lifecycle_stage)) client.delete_run(run_id) del_run = client.get_run(run_id) print("run_id: {}; lifecycle_stage: {}".format(run_id, del_run.info.lifecycle_stage)) client.restore_run(run_id) rest_run = client.get_run(run_id) print("run_id: {}; lifecycle_stage: {}".format(run_id, res_run.info.lifecycle_stage))
-
search_experiments
(view_type: int = 1, max_results: Optional[int] = 1000, filter_string: Optional[str] = None, order_by: Optional[List[str]] = None, page_token=None) → PagedList[Experiment][source] Search for experiments that match the specified search query.
- Parameters
view_type – One of enum values
ACTIVE_ONLY
,DELETED_ONLY
, orALL
defined inmlflow.entities.ViewType
.max_results – Maximum number of experiments desired. Certain server backend may apply its own limit.
filter_string –
Filter query string (e.g.,
"name = 'my_experiment'"
), defaults to searching for all experiments. The following identifiers, comparators, and logical operators are supported.- Identifiers
name
: Experiment namecreation_time
: Experiment creation timelast_update_time
: Experiment last update timetags.<tag_key>
: Experiment tag. Iftag_key
contains spaces, it must be wrapped with backticks (e.g.,"tags.`extra key`"
).
- Comparators for string attributes and tags
=
: Equal to!=
: Not equal toLIKE
: Case-sensitive pattern matchILIKE
: Case-insensitive pattern match
- Comparators for numeric attributes
=
: Equal to!=
: Not equal to<
: Less than<=
: Less than or equal to>
: Greater than>=
: Greater than or equal to
- Logical operators
AND
: Combines two sub-queries and returns True if both of them are True.
order_by –
List of columns to order by. The
order_by
column can contain an optionalDESC
orASC
value (e.g.,"name DESC"
). The default ordering isASC
, so"name"
is equivalent to"name ASC"
. If unspecified, defaults to["last_update_time DESC"]
, which lists experiments updated most recently first. The following fields are supported:experiment_id
: Experiment IDname
: Experiment namecreation_time
: Experiment creation timelast_update_time
: Experiment last update time
page_token – Token specifying the next page of results. It should be obtained from a
search_experiments
call.
- Returns
A
PagedList
ofExperiment
objects. The pagination token for the next page can be obtained via thetoken
attribute of the object.
import mlflow def assert_experiment_names_equal(experiments, expected_names): actual_names = [e.name for e in experiments if e.name != "Default"] assert actual_names == expected_names, (actual_names, expected_names) mlflow.set_tracking_uri("sqlite:///:memory:") client = mlflow.MlflowClient() # Create experiments for name, tags in [ ("a", None), ("b", None), ("ab", {"k": "v"}), ("bb", {"k": "V"}), ]: client.create_experiment(name, tags=tags) # Search for experiments with name "a" experiments = client.search_experiments(filter_string="name = 'a'") assert_experiment_names_equal(experiments, ["a"]) # Search for experiments with name starting with "a" experiments = client.search_experiments(filter_string="name LIKE 'a%'") assert_experiment_names_equal(experiments, ["ab", "a"]) # Search for experiments with tag key "k" and value ending with "v" or "V" experiments = client.search_experiments(filter_string="tags.k ILIKE '%v'") assert_experiment_names_equal(experiments, ["bb", "ab"]) # Search for experiments with name ending with "b" and tag {"k": "v"} experiments = client.search_experiments(filter_string="name LIKE '%b' AND tags.k = 'v'") assert_experiment_names_equal(experiments, ["ab"]) # Sort experiments by name in ascending order experiments = client.search_experiments(order_by=["name"]) assert_experiment_names_equal(experiments, ["a", "ab", "b", "bb"]) # Sort experiments by ID in descending order experiments = client.search_experiments(order_by=["experiment_id DESC"]) assert_experiment_names_equal(experiments, ["bb", "ab", "b", "a"])
-
search_model_versions
(filter_string: str) → PagedList[ModelVersion][source] Search for model versions in backend that satisfy the filter criteria.
- Parameters
filter_string –
Filter query string (e.g.,
"name = 'a_model_name' and tag.key = 'value1'"
), defaults to searching for all model versions. The following identifiers, comparators, and logical operators are supported.- Identifiers
name
: model name.source_path
: model version source path.run_id
: The id of the mlflow run that generates the model version.tags.<tag_key>
: model version tag. Iftag_key
contains spaces, it must be wrapped with backticks (e.g.,"tags.`extra key`"
).
- Comparators
=
: Equal to.!=
: Not equal to.LIKE
: Case-sensitive pattern match.ILIKE
: Case-insensitive pattern match.IN
: In a value list. Onlyrun_id
identifier supportsIN
comparator.
- Logical operators
AND
: Combines two sub-queries and returns True if both of them are True.
- Returns
PagedList of
mlflow.entities.model_registry.ModelVersion
objects.
import mlflow from mlflow import MlflowClient client = MlflowClient() # Get all versions of the model filtered by name model_name = "CordobaWeatherForecastModel" filter_string = "name='{}'".format(model_name) results = client.search_model_versions(filter_string) print("-" * 80) for res in results: print("name={}; run_id={}; version={}".format(res.name, res.run_id, res.version)) # Get the version of the model filtered by run_id run_id = "e14afa2f47a040728060c1699968fd43" filter_string = "run_id='{}'".format(run_id) results = client.search_model_versions(filter_string) print("-" * 80) for res in results: print("name={}; run_id={}; version={}".format(res.name, res.run_id, res.version))
------------------------------------------------------------------------------------ name=CordobaWeatherForecastModel; run_id=eaef868ee3d14d10b4299c4c81ba8814; version=1 name=CordobaWeatherForecastModel; run_id=e14afa2f47a040728060c1699968fd43; version=2 ------------------------------------------------------------------------------------ name=CordobaWeatherForecastModel; run_id=e14afa2f47a040728060c1699968fd43; version=2
-
search_registered_models
(filter_string: Optional[str] = None, max_results: int = 100, order_by: Optional[List[str]] = None, page_token: Optional[str] = None) → PagedList[RegisteredModel][source] Search for registered models in backend that satisfy the filter criteria.
- Parameters
filter_string –
Filter query string (e.g.,
"name = 'a_model_name' and tag.key = 'value1'"
), defaults to searching for all registered models. The following identifiers, comparators, and logical operators are supported.- Identifiers
name
: registered model name.tags.<tag_key>
: registered model tag. Iftag_key
contains spaces, it must be wrapped with backticks (e.g.,"tags.`extra key`"
).
- Comparators
=
: Equal to.!=
: Not equal to.LIKE
: Case-sensitive pattern match.ILIKE
: Case-insensitive pattern match.
- Logical operators
AND
: Combines two sub-queries and returns True if both of them are True.
max_results – Maximum number of registered models desired.
order_by – List of column names with ASC|DESC annotation, to be used for ordering matching search results.
page_token – Token specifying the next page of results. It should be obtained from a
search_registered_models
call.
- Returns
A PagedList of
mlflow.entities.model_registry.RegisteredModel
objects that satisfy the search expressions. The pagination token for the next page can be obtained via thetoken
attribute of the object.
import mlflow from mlflow import MlflowClient client = MlflowClient() # Get search results filtered by the registered model name model_name="CordobaWeatherForecastModel" filter_string = "name='{}'".format(model_name) results = client.search_registered_models(filter_string=filter_string) print("-" * 80) for res in results: for mv in res.latest_versions: print("name={}; run_id={}; version={}".format(mv.name, mv.run_id, mv.version)) # Get search results filtered by the registered model name that matches # prefix pattern filter_string = "name LIKE 'Boston%'" results = client.search_registered_models(filter_string=filter_string) for res in results: for mv in res.latest_versions: print("name={}; run_id={}; version={}".format(mv.name, mv.run_id, mv.version)) # Get all registered models and order them by ascending order of the names results = client.search_registered_models(order_by=["name ASC"]) print("-" * 80) for res in results: for mv in res.latest_versions: print("name={}; run_id={}; version={}".format(mv.name, mv.run_id, mv.version))
------------------------------------------------------------------------------------ name=CordobaWeatherForecastModel; run_id=eaef868ee3d14d10b4299c4c81ba8814; version=1 name=CordobaWeatherForecastModel; run_id=e14afa2f47a040728060c1699968fd43; version=2 ------------------------------------------------------------------------------------ name=BostonWeatherForecastModel; run_id=ddc51b9407a54b2bb795c8d680e63ff6; version=1 name=BostonWeatherForecastModel; run_id=48ac94350fba40639a993e1b3d4c185d; version=2 ----------------------------------------------------------------------------------- name=AzureWeatherForecastModel; run_id=5fcec6c4f1c947fc9295fef3fa21e52d; version=1 name=AzureWeatherForecastModel; run_id=8198cb997692417abcdeb62e99052260; version=3 name=BostonWeatherForecastModel; run_id=ddc51b9407a54b2bb795c8d680e63ff6; version=1 name=BostonWeatherForecastModel; run_id=48ac94350fba40639a993e1b3d4c185d; version=2 name=CordobaWeatherForecastModel; run_id=eaef868ee3d14d10b4299c4c81ba8814; version=1 name=CordobaWeatherForecastModel; run_id=e14afa2f47a040728060c1699968fd43; version=2
-
search_runs
(experiment_ids: List[str], filter_string: str = '', run_view_type: int = 1, max_results: int = 1000, order_by: Optional[List[str]] = None, page_token: Optional[str] = None) → PagedList[Run][source] Search for Runs that fit the specified criteria.
- Parameters
experiment_ids – List of experiment IDs, or a single int or string id.
filter_string – Filter query string, defaults to searching all runs.
run_view_type – one of enum values ACTIVE_ONLY, DELETED_ONLY, or ALL runs defined in
mlflow.entities.ViewType
.max_results – Maximum number of runs desired.
order_by – List of columns to order by (e.g., “metrics.rmse”). The
order_by
column can contain an optionalDESC
orASC
value. The default isASC
. The default ordering is to sort bystart_time DESC
, thenrun_id
.page_token – Token specifying the next page of results. It should be obtained from a
search_runs
call.
- Returns
A
PagedList
ofRun
objects that satisfy the search expressions. If the underlying tracking store supports pagination, the token for the next page may be obtained via thetoken
attribute of the returned object.
import mlflow from mlflow import MlflowClient from mlflow.entities import ViewType def print_run_info(runs): for r in runs: print("run_id: {}".format(r.info.run_id)) print("lifecycle_stage: {}".format(r.info.lifecycle_stage)) print("metrics: {}".format(r.data.metrics)) # Exclude mlflow system tags tags = {k: v for k, v in r.data.tags.items() if not k.startswith("mlflow.")} print("tags: {}".format(tags)) # Create an experiment and log two runs with metrics and tags under the experiment experiment_id = mlflow.create_experiment("Social NLP Experiments") with mlflow.start_run(experiment_id=experiment_id) as run: mlflow.log_metric("m", 1.55) mlflow.set_tag("s.release", "1.1.0-RC") with mlflow.start_run(experiment_id=experiment_id): mlflow.log_metric("m", 2.50) mlflow.set_tag("s.release", "1.2.0-GA") # Search all runs under experiment id and order them by # descending value of the metric 'm' client = MlflowClient() runs = client.search_runs(experiment_id, order_by=["metrics.m DESC"]) print_run_info(runs) print("--") # Delete the first run client.delete_run(run_id=run.info.run_id) # Search only deleted runs under the experiment id and use a case insensitive pattern # in the filter_string for the tag. filter_string = "tags.s.release ILIKE '%rc%'" runs = client.search_runs(experiment_id, run_view_type=ViewType.DELETED_ONLY, filter_string=filter_string) print_run_info(runs)
run_id: 0efb2a68833d4ee7860a964fad31cb3f lifecycle_stage: active metrics: {'m': 2.5} tags: {'s.release': '1.2.0-GA'} run_id: 7ab027fd72ee4527a5ec5eafebb923b8 lifecycle_stage: active metrics: {'m': 1.55} tags: {'s.release': '1.1.0-RC'} -- run_id: 7ab027fd72ee4527a5ec5eafebb923b8 lifecycle_stage: deleted metrics: {'m': 1.55} tags: {'s.release': '1.1.0-RC'}
-
set_experiment_tag
(experiment_id: str, key: str, value: Any) → None[source] Set a tag on the experiment with the specified ID. Value is converted to a string.
- Parameters
experiment_id – String ID of the experiment.
key – Name of the tag.
value – Tag value (converted to a string).
from mlflow import MlflowClient # Create an experiment and set its tag client = MlflowClient() experiment_id = client.create_experiment("Social Media NLP Experiments") client.set_experiment_tag(experiment_id, "nlp.framework", "Spark NLP") # Fetch experiment metadata information experiment = client.get_experiment(experiment_id) print("Name: {}".format(experiment.name)) print("Tags: {}".format(experiment.tags))
-
set_model_version_tag
(name: str, version: Optional[str] = None, key: Optional[str] = None, value: Optional[Any] = None, stage: Optional[str] = None) → None[source] Set a tag for the model version. When stage is set, tag will be set for latest model version of the stage. Setting both version and stage parameter will result in error.
- Parameters
name – Registered model name.
version – Registered model version.
key – Tag key to log. key is required.
value – Tag value to log. value is required.
stage – Registered model stage.
- Returns
None
import mlflow.sklearn from mlflow import MlflowClient from sklearn.ensemble import RandomForestRegressor def print_model_version_info(mv): print("Name: {}".format(mv.name)) print("Version: {}".format(mv.version)) print("Tags: {}".format(mv.tags)) mlflow.set_tracking_uri("sqlite:///mlruns.db") params = {"n_estimators": 3, "random_state": 42} name = "RandomForestRegression" rfr = RandomForestRegressor(**params).fit([[0, 1]], [1]) # Log MLflow entities with mlflow.start_run() as run: mlflow.log_params(params) mlflow.sklearn.log_model(rfr, artifact_path="sklearn-model") # Register model name in the model registry client = MlflowClient() client.create_registered_model(name) # Create a new version of the rfr model under the registered model name # and set a tag model_uri = "runs:/{}/sklearn-model".format(run.info.run_id) mv = client.create_model_version(name, model_uri, run.info.run_id) print_model_version_info(mv) print("--") # Tag using model version client.set_model_version_tag(name, mv.version, "t", "1") # Tag using model stage client.set_model_version_tag(name, key="t1", value="1", stage=mv.current_stage) mv = client.get_model_version(name, mv.version) print_model_version_info(mv)
-
set_registered_model_tag
(name, key, value) → None[source] Set a tag for the registered model.
- Parameters
name – Registered model name.
key – Tag key to log.
value – Tag value log.
- Returns
None
import mlflow from mlflow import MlflowClient def print_model_info(rm): print("--") print("name: {}".format(rm.name)) print("tags: {}".format(rm.tags)) name = "SocialMediaTextAnalyzer" tags = {"nlp.framework1": "Spark NLP"} mlflow.set_tracking_uri("sqlite:///mlruns.db") client = MlflowClient() # Create registered model, set an additional tag, and fetch # update model info client.create_registered_model(name, tags, desc) model = client.get_registered_model(name) print_model_info(model) client.set_registered_model_tag(name, "nlp.framework2", "VADER") model = client.get_registered_model(name) print_model_info(model)
-
set_tag
(run_id: str, key: str, value: Any) → None[source] Set a tag on the run with the specified ID. Value is converted to a string.
- Parameters
run_id – String ID of the run.
key – Tag name (string). This string may only contain alphanumerics, underscores (_), dashes (-), periods (.), spaces ( ), and slashes (/). All backend stores will support keys up to length 250, but some may support larger keys.
value – Tag value (string, but will be string-ified if not). All backend stores will support values up to length 5000, but some may support larger values.
from mlflow import MlflowClient def print_run_info(run): print("run_id: {}".format(run.info.run_id)) print("Tags: {}".format(run.data.tags)) # Create a run under the default experiment (whose id is '0'). client = MlflowClient() experiment_id = "0" run = client.create_run(experiment_id) print_run_info(run) print("--") # Set a tag and fetch updated run info client.set_tag(run.info.run_id, "nlp.framework", "Spark NLP") run = client.get_run(run.info.run_id) print_run_info(run)
-
set_terminated
(run_id: str, status: Optional[str] = None, end_time: Optional[int] = None) → None[source] Set a run’s status to terminated.
- Parameters
status – A string value of
mlflow.entities.RunStatus
. Defaults to “FINISHED”.end_time – If not provided, defaults to the current time.
from mlflow import MlflowClient def print_run_info(r): print("run_id: {}".format(r.info.run_id)) print("status: {}".format(r.info.status)) # Create a run under the default experiment (whose id is '0'). # Since this is low-level CRUD operation, this method will create a run. # To end the run, you'll have to explicitly terminate it. client = MlflowClient() experiment_id = "0" run = client.create_run(experiment_id) print_run_info(run) print("--") # Terminate the run and fetch updated status. By default, # the status is set to "FINISHED". Other values you can # set are "KILLED", "FAILED", "RUNNING", or "SCHEDULED". client.set_terminated(run.info.run_id, status="KILLED") run = client.get_run(run.info.run_id) print_run_info(run)
-
transition_model_version_stage
(name: str, version: str, stage: str, archive_existing_versions: bool = False) → ModelVersion[source] Update model version stage.
- Parameters
name – Registered model name.
version – Registered model version.
stage – New desired stage for this model version.
archive_existing_versions – If this flag is set to
True
, all existing model versions in the stage will be automatically moved to the “archived” stage. Only valid whenstage
is"staging"
or"production"
otherwise an error will be raised.
- Returns
A single
mlflow.entities.model_registry.ModelVersion
object.
import mlflow.sklearn from mlflow import MlflowClient from sklearn.ensemble import RandomForestRegressor def print_model_version_info(mv): print("Name: {}".format(mv.name)) print("Version: {}".format(mv.version)) print("Description: {}".format(mv.description)) print("Stage: {}".format(mv.current_stage)) mlflow.set_tracking_uri("sqlite:///mlruns.db") params = {"n_estimators": 3, "random_state": 42} name = "RandomForestRegression" desc = "A new version of the model using ensemble trees" rfr = RandomForestRegressor(**params).fit([[0, 1]], [1]) # Log MLflow entities with mlflow.start_run() as run: mlflow.log_params(params) mlflow.sklearn.log_model(rfr, artifact_path="sklearn-model") # Register model name in the model registry client = MlflowClient() client.create_registered_model(name) # Create a new version of the rfr model under the registered model name model_uri = "runs:/{}/sklearn-model".format(run.info.run_id) mv = client.create_model_version(name, model_uri, run.info.run_id, description=desc) print_model_version_info(mv) print("--") # transition model version from None -> staging mv = client.transition_model_version_stage(name, mv.version, "staging") print_model_version_info(mv)
-
update_model_version
(name: str, version: str, description: Optional[str] = None) → ModelVersion[source] Update metadata associated with a model version in backend.
- Parameters
name – Name of the containing registered model.
version – Version number of the model version.
description – New description.
- Returns
A single
mlflow.entities.model_registry.ModelVersion
object.
import mlflow.sklearn from mlflow import MlflowClient from sklearn.ensemble import RandomForestRegressor def print_model_version_info(mv): print("Name: {}".format(mv.name)) print("Version: {}".format(mv.version)) print("Description: {}".format(mv.description)) mlflow.set_tracking_uri("sqlite:///mlruns.db") params = {"n_estimators": 3, "random_state": 42} name = "RandomForestRegression" rfr = RandomForestRegressor(**params).fit([[0, 1]], [1]) # Log MLflow entities with mlflow.start_run() as run: mlflow.log_params(params) mlflow.sklearn.log_model(rfr, artifact_path="sklearn-model") # Register model name in the model registry client = MlflowClient() client.create_registered_model(name) # Create a new version of the rfr model under the registered model name model_uri = "runs:/{}/sklearn-model".format(run.info.run_id) mv = client.create_model_version(name, model_uri, run.info.run_id) print_model_version_info(mv) print("--") # Update model version's description desc = "A new version of the model using ensemble trees" mv = client.update_model_version(name, mv.version, desc) print_model_version_info(mv)
-
update_registered_model
(name: str, description: Optional[str] = None) → RegisteredModel[source] Updates metadata for RegisteredModel entity. Input field
description
should be non-None. Backend raises exception if a registered model with given name does not exist.- Parameters
name – Name of the registered model to update.
description – (Optional) New description.
- Returns
A single updated
mlflow.entities.model_registry.RegisteredModel
object.
def print_registered_model_info(rm): print("name: {}".format(rm.name)) print("tags: {}".format(rm.tags)) print("description: {}".format(rm.description)) name = "SocialMediaTextAnalyzer" tags = {"nlp.framework": "Spark NLP"} desc = "This sentiment analysis model classifies the tone-happy, sad, angry." mlflow.set_tracking_uri("sqlite:///mlruns.db") client = MlflowClient() client.create_registered_model(name, tags, desc) print_registered_model_info(client.get_registered_model(name)) print("--") # Update the model's description desc = "This sentiment analysis model classifies tweets' tone: happy, sad, angry." client.update_registered_model(name, desc) print_registered_model_info(client.get_registered_model(name))
name: SocialMediaTextAnalyzer tags: {'nlp.framework': 'Spark NLP'} description: This sentiment analysis model classifies the tone-happy, sad, angry. -- name: SocialMediaTextAnalyzer tags: {'nlp.framework': 'Spark NLP'} description: This sentiment analysis model classifies tweets' tone: happy, sad, angry.
-
update_run
(run_id: str, status: Optional[str] = None, name: Optional[str] = None) → None[source] Update a run with the specified ID to a new status or name.
- Parameters
run_id – The ID of the Run to update.
status – The new status of the run to set, if specified. At least one of
status
orname
should be specified.name – The new name of the run to set, if specified. At least one of
name
orstatus
should be specified.
from mlflow import MlflowClient def print_run_info(run): print("run_id: {}".format(run.info.run_id)) print("run_name: {}".format(run.info.run_name)) print("status: {}".format(run.info.status)) # Create a run under the default experiment (whose id is '0'). client = MlflowClient() experiment_id = "0" run = client.create_run(experiment_id) print_run_info(run) print("--") # Update run and fetch info client.update_run(run.info.run_id, "FINISHED", "new_name") run = client.get_run(run.info.run_id) print_run_info(run)
-