diff --git a/.backportrc.json b/.backportrc.json index afee202d7..e636dae1b 100644 --- a/.backportrc.json +++ b/.backportrc.json @@ -1,5 +1,5 @@ { "upstream": "elastic/stack-docs", - "branches": [{ "name": "8.x", "checked": true }, "8.17", "8.16", "8.15", "8.14", "8.13", "8.12", "8.11", "8.10", "8.9", "8.8", "8.7", "8.6", "8.5", "8.4", "8.3", "8.2", "8.1", "8.0", "7.17", "7.16", "7.15", "7.14", "7.13", "7.12", "7.11", "7.10", "7.9", "7.8", "7.7", "7.6", "7.5", "7.4", "7.3", "7.2", "7.1", "7.0", "6.8", "6.7", "6.6", "6.5", "6.4", "6.3", "6.2", "6.1", "6.0", "5.6"], + "branches": [{ "name": "8.x", "checked": true }, "8.19", "8.18", "8.17", "8.16", "8.15", "8.14", "8.13", "8.12", "8.11", "8.10", "8.9", "8.8", "8.7", "8.6", "8.5", "8.4", "8.3", "8.2", "8.1", "8.0", "7.17", "7.16", "7.15", "7.14", "7.13", "7.12", "7.11", "7.10", "7.9", "7.8", "7.7", "7.6", "7.5", "7.4", "7.3", "7.2", "7.1", "7.0", "6.8", "6.7", "6.6", "6.5", "6.4", "6.3", "6.2", "6.1", "6.0", "5.6"], "labels": ["backport"] } diff --git a/.github/CODEOWNERS b/.github/CODEOWNERS index 5266f9b62..e51b87fd7 100644 --- a/.github/CODEOWNERS +++ b/.github/CODEOWNERS @@ -1,2 +1,3 @@ * @elastic/platform-docs /docs/en/stack/ml/ @elastic/mlr-docs +/docs/en/glossary/terms/ @demisperazza \ No newline at end of file diff --git a/.github/workflows/comment-on-asciidoc-changes.yml b/.github/workflows/comment-on-asciidoc-changes.yml new file mode 100644 index 000000000..8e5f836b1 --- /dev/null +++ b/.github/workflows/comment-on-asciidoc-changes.yml @@ -0,0 +1,21 @@ +--- +name: Comment on PR for .asciidoc changes + +on: + # We need to use pull_request_target to be able to comment on PRs from forks + pull_request_target: + types: + - synchronize + - opened + - reopened + branches: + - main + - master + - "9.0" + +jobs: + comment-on-asciidoc-change: + permissions: + contents: read + pull-requests: write + uses: elastic/docs-builder/.github/workflows/comment-on-asciidoc-changes.yml@main diff --git a/docs/en/install-upgrade/air-gapped-install.asciidoc b/docs/en/install-upgrade/air-gapped-install.asciidoc index 6cf0276af..fcb8fe9a9 100644 --- a/docs/en/install-upgrade/air-gapped-install.asciidoc +++ b/docs/en/install-upgrade/air-gapped-install.asciidoc @@ -72,7 +72,8 @@ Specifically: * To be able to use {kib} sample data, install or update hundreds of prebuilt alert rules, and explore available data integrations, you need to set up and configure the <>. * To provide detection rule updates for {endpoint-sec} agents, you need to set up and configure the <>. * To access {ents} capabilities (in addition to the general search capabilities of {es}), you need to set up and configure <>. -* To access the APM integration, you need to set up and configure <>. +* To access the APM integration, you need to set up and configure <>. +* To install and use the Elastic documentation for {kib} AI assistants, you need to set up and configure the <>. [discrete] [[air-gapped-beats]] @@ -163,6 +164,12 @@ Some {ml} features, like natural language processing (NLP), require you to deplo * {ml-docs}/ml-nlp-elser.html#air-gapped-install[Deploy ELSER in an air-gapped environment]. * {eland-docs}/machine-learning.html#ml-nlp-pytorch-air-gapped[Install trained models in an air-gapped environment with Eland]. +[discrete] +[[air-gapped-kibana-product-documentation]] +==== 1.14 {kib} Product documentation for AI Assistants + +Detailed install and configuration instructions are available in the {kibana-ref}/ai-assistant-settings-kb.html[{kib} AI Assistants settings documentation]. + [discrete] [[air-gapped-kubernetes-and-openshift]] === 2. Kubernetes & OpenShift Install diff --git a/docs/en/install-upgrade/installing-stack-demo-secure.asciidoc b/docs/en/install-upgrade/installing-stack-demo-secure.asciidoc index 21edfcc94..7dd3ad4b9 100644 --- a/docs/en/install-upgrade/installing-stack-demo-secure.asciidoc +++ b/docs/en/install-upgrade/installing-stack-demo-secure.asciidoc @@ -98,7 +98,7 @@ sudo /usr/share/elasticsearch/jdk/bin/keytool -importcert -trustcacerts -nopromp + ["source","shell"] ---- -keytool -keystore elastic-stack-ca.p12 -list +sudo /usr/share/elasticsearch/jdk/bin/keytool -keystore /etc/elasticsearch/certs/elastic-stack-ca.p12 -list ---- + NOTE: The keytool utility is provided as part of the {es} installation and is located at: `/usr/share/elasticsearch/jdk/bin/keytool` on RPM installations. diff --git a/docs/en/reference-architectures/hot-frozen.asciidoc b/docs/en/reference-architectures/hot-frozen.asciidoc new file mode 100644 index 000000000..68e585ca4 --- /dev/null +++ b/docs/en/reference-architectures/hot-frozen.asciidoc @@ -0,0 +1,161 @@ +[[hot-frozen-architecture]] +== Hot/Frozen - High Availability + +The Hot/Frozen High Availability architecture is cost optimized for large time-series datasets. +In this architecture, the hot tier is primarily used for indexing, searching, and continuity for automated processes. +https://www.elastic.co/guide/en/elasticsearch/reference/current/searchable-snapshots.html[Searchable snapshots] are taken from hot into a repository, such as a cloud object store or an on-premises shared filesystem, and then cached to any desired volume on the local disks of the frozen tier. +Data in the repository is indexed for fast retrieval and accessed on-demand from the frozen nodes. +Index and snapshot lifecycle management are used to automate this process. + +This architecture is ideal for time-series use cases, such as Observability or Security, that do not require updating. +All the necessary components of the {stack} are included. +This is not intended for sizing workloads, but rather as a basis to ensure that your cluster is ready to handle any desired workload with resiliency. +A very high level representation of data flow is included, and for more detail around ingest architecture see our {ingest-guide}/use-case-arch.html[ingest architectures] documentation. + +[discrete] +[[hot-frozen-use-case]] +=== Hot/Frozen use case + +This Hot/Frozen – High Availability architecture is intended for organizations that: + +* Have a requirement for cost effective long term data storage (many months or years). +* Provide insights and alerts using logs, metrics, traces, or various event types to ensure optimal performance and quick issue resolution for applications. +* Apply https://www.elastic.co/guide/en/kibana/current/xpack-ml-anomalies.html[machine learning anomaly detection] to help detect patterns in time series data to find root cause and resolve problems faster. +* Use an AI assistant (https://www.elastic.co/guide/en/observability/current/obs-ai-assistant.html[Observability], https://www.elastic.co/guide/en/security/current/security-assistant.html[Security], or https://www.elastic.co/guide/en/kibana/current/playground.html[Playground]) for investigation, incident response, reporting, query generation, or query conversion from other languages using natural language. +* Deploy an architecture model that allows for maximum flexibility between storage cost and performance. + +[IMPORTANT] +==== +**Automated operations that frequently read large data volumes require both high availability (replicas) and predictable low latency (hot, warm or cold tier).** + +* Common examples of these tasks include look-back windows on security detection/alert rules, transforms, machine learning jobs, or watches; and long running scroll queries or external extract processes. +* These operations should be completed before moving the data into a frozen tier. +==== + +[discrete] +[[hot-frozen-architecture-diagram]] +=== Architecture + +image::images/hot-frozen.png["A Hot/Frozen Highly available architecture"] + +TIP: We use an Availability Zone (AZ) concept in the architecture above. +When running in your own Data Center (DC) you can equate AZs to failure zones within a datacenter, racks, or even separate physical machines depending on your constraints. + +The diagram illustrates an {es} cluster deployed across 3 availability zones (AZ). For production we recommend a minimum of 2 availability zones and 3 availability zones for mission critical applications. See https://www.elastic.co/guide/en/cloud/current/ec-planning.html[Plan for production] for more details. A cluster that is running in {ecloud} that has data nodes in only two AZs will create a third master-eligible node in a third AZ. High availability cannot be achieved without three zones for any distributed computing technology. + +The number of data nodes shown for each tier (hot and frozen) is illustrative and would be scaled up depending on ingest volume and retention period. Hot nodes contain both primary and replica shards. By default, primary and replica shards are always guaranteed to be in different availability zones in {ess}, but when self-deploying {ref}/shard-allocation-awareness.html[shard allocation awareness] would need to be configured. Frozen nodes act as a large high-speed cache and retrieve data from the snapshot store as needed. + +Machine learning nodes are optional but highly recommended for large scale time series use cases since the amount of data quickly becomes too difficult to analyze. Applying techniques such as machine learning based anomaly detection or Search AI with large language models helps to dramatically speed up problem identification and resolution. + +[discrete] +[[hot-frozen-hardware]] +=== Recommended hardware specifications + +With {ecloud} you can deploy clusters in AWS, Azure, and Google Cloud. Available hardware types and configurations vary across all three cloud providers but each provides instance types that meet our recommendations for the node types used in this architecture. For more details on these instance types, see our documentation on {ecloud} hardware for https://www.elastic.co/guide/en/cloud/current/ec-default-aws-configurations.html[AWS], https://www.elastic.co/guide/en/cloud/current/ec-default-azure-configurations.html[Azure], and https://www.elastic.co/guide/en/cloud/current/ec-default-gcp-configurations.html[GCP]. The **Physical** column below is guidance, based on the cloud node types, when self-deploying {es} in your own data center. + +In the links provided above, Elastic has performance tested hardware for each of the cloud providers to find the optimal hardware for each node type. We use ratios to represent the best mix of CPU, RAM, and disk for each type. In some cases the CPU to RAM ratio is key, in others the disk to memory ratio and type of disk is critical. Significantly deviating from these ratios may seem like a way to save on hardware costs, but may result in an {es} cluster that does not scale and perform well. + +This table shows our specific recommendations for nodes in a Hot/Frozen architecture. + +|=== +| **Type** | **AWS** | **Azure** | **GCP** | **Physical** +|image:images/hot.png["Hot data node"] | +c6gd | +f32sv2| + + +N2| +16-32 vCPU + +64 GB RAM + +2-6 TB NVMe SSD + +|image:images/frozen.png["Frozen data node"] +| +i3en +| +e8dsv4 +| +N2| +8 vCPU + +64 GB RAM + +6-20+ TB NVMe SSD + +Depending on days cached +|image:images/machine-learning.png["Machine learning node"] +| +m6gd +| +f16sv2 +| +N2| +16 vCPU + +64 GB RAM + +256 GB SSD +|image:images/master.png["Master node"] +| +c5d +| +f16sv2 +| +N2| +8 vCPU + +16 GB RAM + +256 GB SSD +|image:images/kibana.png["Kibana node"] +| +c6gd +| +f16sv2 +| +N2| +8-16 vCPU + +8 GB RAM + +256 GB SSD +|=== + +[discrete] +[[hot-frozen-considerations]] +=== Important considerations + + +**Updating data:** + +* Typically, time series logging use cases are append-only and there is rarely a need to update documents. The frozen tier is read-only. + +**Multi-AZ frozen tier:** + +* Three availability zones is ideal, but at least two availability zones are recommended to ensure that there will be data nodes available in the event of an AZ failure. + +**Shard management:** + +* The most important foundational step to maintaining performance as you scale is proper shard management. This includes even shard distribution amongst nodes, shard size, and shard count. For a complete understanding of what shards are and how they should be used, refer to https://www.elastic.co/guide/en/elasticsearch/reference/current/size-your-shards.html[Size your shards]. + +**Snapshots:** + +* If auditable or business critical events are being logged, a backup is necessary. The choice to back up data will depend on each individual business's needs and requirements. Refer to our https://www.elastic.co/guide/en/elasticsearch/reference/current/snapshots-register-repository.html[snapshot repository] documentation to learn more. +* To automate snapshots and attach to Index lifecycle management policies, refer to https://www.elastic.co/guide/en/elasticsearch/reference/current/snapshots-take-snapshot.html#automate-snapshots-slm[SLM (Snapshot lifecycle management)]. + +**Kibana:** + +* If self-deploying outside of {ess}, ensure that {kib} is configured for https://www.elastic.co/guide/en/kibana/current/production.html#high-availability[high availability]. + +[discrete] +[[hot-frozen-estimate]] +=== How many nodes of each do you need? +It depends on: + +* The type of data being ingested (such as logs, metrics, traces) +* The retention period of searchable data (such as 30 days, 90 days, 1 year) +* The amount of data you need to ingest each day +* The number of dashboards, queries, query types and how frequent they are run. + +You can https://www.elastic.co/contact[contact us] for an estimate and recommended configuration based on your specific scenario. + +[discrete] +[[hot-frozen-resources]] +=== Resources and references + +* https://www.elastic.co/guide/en/elasticsearch/reference/current/scalability.html[{es} - Get ready for production] + +* https://www.elastic.co/guide/en/cloud/current/ec-prepare-production.html[{ess} - Preparing a deployment for production] + +* https://www.elastic.co/guide/en/elasticsearch/reference/current/size-your-shards.html[Size your shards] diff --git a/docs/en/reference-architectures/images/frozen.png b/docs/en/reference-architectures/images/frozen.png new file mode 100644 index 000000000..5e7d53bff Binary files /dev/null and b/docs/en/reference-architectures/images/frozen.png differ diff --git a/docs/en/reference-architectures/images/hot-frozen.png b/docs/en/reference-architectures/images/hot-frozen.png new file mode 100644 index 000000000..1e12b4b25 Binary files /dev/null and b/docs/en/reference-architectures/images/hot-frozen.png differ diff --git a/docs/en/reference-architectures/images/hot.png b/docs/en/reference-architectures/images/hot.png new file mode 100644 index 000000000..ebfab3925 Binary files /dev/null and b/docs/en/reference-architectures/images/hot.png differ diff --git a/docs/en/reference-architectures/images/kibana.png b/docs/en/reference-architectures/images/kibana.png new file mode 100644 index 000000000..78a39ae74 Binary files /dev/null and b/docs/en/reference-architectures/images/kibana.png differ diff --git a/docs/en/reference-architectures/images/machine-learning.png b/docs/en/reference-architectures/images/machine-learning.png new file mode 100644 index 000000000..3508f0478 Binary files /dev/null and b/docs/en/reference-architectures/images/machine-learning.png differ diff --git a/docs/en/reference-architectures/images/master.png b/docs/en/reference-architectures/images/master.png new file mode 100644 index 000000000..9de92464c Binary files /dev/null and b/docs/en/reference-architectures/images/master.png differ diff --git a/docs/en/reference-architectures/index.asciidoc b/docs/en/reference-architectures/index.asciidoc new file mode 100644 index 000000000..397114105 --- /dev/null +++ b/docs/en/reference-architectures/index.asciidoc @@ -0,0 +1,9 @@ +include::{docs-root}/shared/versions/stack/{source_branch}.asciidoc[] +include::{docs-root}/shared/attributes.asciidoc[] + +[[reference-architectures]] += Reference architectures + +include::reference-architectures-overview.asciidoc[] + +include::hot-frozen.asciidoc[] diff --git a/docs/en/reference-architectures/reference-architectures-overview.asciidoc b/docs/en/reference-architectures/reference-architectures-overview.asciidoc new file mode 100644 index 000000000..4c0977786 --- /dev/null +++ b/docs/en/reference-architectures/reference-architectures-overview.asciidoc @@ -0,0 +1,32 @@ +[[reference-architectures-overview]] += Reference architectures + +Elasticsearch reference architectures are blueprints for deploying Elasticsearch clusters tailored to different use cases. Whether you're handling logs or metrics these reference architectures focus on scalability, reliability, and cost efficiency. Use these guidelines to deploy Elasticsearch for your use case. + +These architectures are designed by architects and engineers to provide standardized, proven solutions that help you to follow best practices when deploying {es}. + +TIP: These architectures are specific to running your deployment on-premises or on cloud. If you are using Elastic serverless your {es} clusters are autoscaled and fully managed by Elastic. For all the deployment options, refer to https://www.elastic.co/guide/en/elasticsearch/reference/current/elasticsearch-intro-deploy.html[Run Elasticsearch]. + +These reference architectures are recommendations and should be adapted to fit your specific environment and needs. Each solution can vary based on the unique requirements and conditions of your deployment. In these architectures we discuss about how to deploy cluster components. For information about designing ingest architectures to feed content into your cluster, refer to https://www.elastic.co/guide/en/ingest/current/use-case-arch.html[Ingest architectures] + +[discrete] +[[reference-architectures-time-series-2]] +=== Architectures + +[cols="50, 50"] +|=== +| *Architecture* | *When to use* +| <> + +A high availability architecture that is cost optimized for large time-series datasets. + +a| +* Have a requirement for cost effective long term data storage (many months or years). +* Provide insights and alerts using logs, metrics, traces, or various event types to ensure optimal performance and quick issue resolution for applications. +* Apply Machine Learning and Search AI to assist in dealing with the large amount of data. +* Deploy an architecture model that allows for maximum flexibility between storage cost and performance. +| Additional architectures are on the way. + +Stay tuned for updates. | + +|=== diff --git a/docs/en/stack/ml/anomaly-detection/anomaly-detection-scale.asciidoc b/docs/en/stack/ml/anomaly-detection/anomaly-detection-scale.asciidoc index 5326b8567..d338f9101 100644 --- a/docs/en/stack/ml/anomaly-detection/anomaly-detection-scale.asciidoc +++ b/docs/en/stack/ml/anomaly-detection/anomaly-detection-scale.asciidoc @@ -65,7 +65,7 @@ size of a {ml} node. Creating {ml-jobs} with model memory limits larger than the maximum node size can support is not allowed, as autoscaling cannot add a node big enough to run the job. On a self-managed deployment, you can set `xpack.ml.max_model_memory_limit` according to the available resources of the -{ml} node. This prevents you from you creating jobs with model memory limits too +{ml} node. This prevents you from creating jobs with model memory limits too high to open in your cluster. [discrete] diff --git a/docs/en/stack/ml/anomaly-detection/index.asciidoc b/docs/en/stack/ml/anomaly-detection/index.asciidoc index 6a24d3ba0..1db6e4475 100644 --- a/docs/en/stack/ml/anomaly-detection/index.asciidoc +++ b/docs/en/stack/ml/anomaly-detection/index.asciidoc @@ -56,40 +56,43 @@ include::ml-ad-resources.asciidoc[leveloffset=+1] include::ml-limitations.asciidoc[leveloffset=+2] -include::{es-repo-dir}/ml/anomaly-detection/functions/ml-functions.asciidoc[leveloffset=+2] +include::ml-ad-troubleshooting.asciidoc[leveloffset=+2] -include::ootb-ml-jobs.asciidoc[leveloffset=+2] +include::{es-repo-dir}/ml/anomaly-detection/functions/ml-functions.asciidoc[leveloffset=+1] -include::ml-ad-troubleshooting.asciidoc[leveloffset=+2] +include::{es-repo-dir}/ml/anomaly-detection/functions/ml-count-functions.asciidoc[leveloffset=+2] + +include::{es-repo-dir}/ml/anomaly-detection/functions/ml-geo-functions.asciidoc[leveloffset=+2] + +include::{es-repo-dir}/ml/anomaly-detection/functions/ml-info-functions.asciidoc[leveloffset=+2] + +include::{es-repo-dir}/ml/anomaly-detection/functions/ml-metric-functions.asciidoc[leveloffset=+2] -include::ootb-ml-jobs-apache.asciidoc[] +include::{es-repo-dir}/ml/anomaly-detection/functions/ml-rare-functions.asciidoc[leveloffset=+2] -include::ootb-ml-jobs-apm.asciidoc[] +include::{es-repo-dir}/ml/anomaly-detection/functions/ml-sum-functions.asciidoc[leveloffset=+2] -include::ootb-ml-jobs-auditbeat.asciidoc[] +include::{es-repo-dir}/ml/anomaly-detection/functions/ml-time-functions.asciidoc[leveloffset=+2] -include::ootb-ml-jobs-logs-ui.asciidoc[] -include::ootb-ml-jobs-metricbeat.asciidoc[] +include::ootb-ml-jobs.asciidoc[leveloffset=+1] -include::ootb-ml-jobs-metrics-ui.asciidoc[] +include::ootb-ml-jobs-apache.asciidoc[leveloffset=+2] -include::ootb-ml-jobs-nginx.asciidoc[] +include::ootb-ml-jobs-apm.asciidoc[leveloffset=+2] -include::ootb-ml-jobs-siem.asciidoc[] +include::ootb-ml-jobs-auditbeat.asciidoc[leveloffset=+2] -include::ootb-ml-jobs-uptime.asciidoc[] +include::ootb-ml-jobs-logs-ui.asciidoc[leveloffset=+2] -include::{es-repo-dir}/ml/anomaly-detection/functions/ml-count-functions.asciidoc[] +include::ootb-ml-jobs-metricbeat.asciidoc[leveloffset=+2] -include::{es-repo-dir}/ml/anomaly-detection/functions/ml-geo-functions.asciidoc[] +include::ootb-ml-jobs-metrics-ui.asciidoc[leveloffset=+2] -include::{es-repo-dir}/ml/anomaly-detection/functions/ml-info-functions.asciidoc[] +include::ootb-ml-jobs-nginx.asciidoc[leveloffset=+2] -include::{es-repo-dir}/ml/anomaly-detection/functions/ml-metric-functions.asciidoc[] +include::ootb-ml-jobs-siem.asciidoc[leveloffset=+2] -include::{es-repo-dir}/ml/anomaly-detection/functions/ml-rare-functions.asciidoc[] +include::ootb-ml-jobs-uptime.asciidoc[leveloffset=+2] -include::{es-repo-dir}/ml/anomaly-detection/functions/ml-sum-functions.asciidoc[] -include::{es-repo-dir}/ml/anomaly-detection/functions/ml-time-functions.asciidoc[] diff --git a/docs/en/stack/ml/anomaly-detection/ootb-ml-jobs-apache.asciidoc b/docs/en/stack/ml/anomaly-detection/ootb-ml-jobs-apache.asciidoc index a63d5694c..91dd0a51e 100644 --- a/docs/en/stack/ml/anomaly-detection/ootb-ml-jobs-apache.asciidoc +++ b/docs/en/stack/ml/anomaly-detection/ootb-ml-jobs-apache.asciidoc @@ -1,4 +1,4 @@ -["appendix",role="exclude",id="ootb-ml-jobs-apache"] +[[ootb-ml-jobs-apache]] = Apache {anomaly-detect} configurations // tag::apache-jobs[] diff --git a/docs/en/stack/ml/anomaly-detection/ootb-ml-jobs-apm.asciidoc b/docs/en/stack/ml/anomaly-detection/ootb-ml-jobs-apm.asciidoc index cd7cdd9d1..c685b9db0 100644 --- a/docs/en/stack/ml/anomaly-detection/ootb-ml-jobs-apm.asciidoc +++ b/docs/en/stack/ml/anomaly-detection/ootb-ml-jobs-apm.asciidoc @@ -1,4 +1,4 @@ -["appendix",role="exclude",id="ootb-ml-jobs-apm"] +[[ootb-ml-jobs-apm]] = APM {anomaly-detect} configurations This {anomaly-job} appears in the {apm-app} and the {ml-app} app when you have diff --git a/docs/en/stack/ml/anomaly-detection/ootb-ml-jobs-auditbeat.asciidoc b/docs/en/stack/ml/anomaly-detection/ootb-ml-jobs-auditbeat.asciidoc index 537c687bf..027d520d7 100644 --- a/docs/en/stack/ml/anomaly-detection/ootb-ml-jobs-auditbeat.asciidoc +++ b/docs/en/stack/ml/anomaly-detection/ootb-ml-jobs-auditbeat.asciidoc @@ -1,4 +1,4 @@ -["appendix",role="exclude",id="ootb-ml-jobs-auditbeat"] +[[ootb-ml-jobs-auditbeat]] = {auditbeat} {anomaly-detect} configurations // tag::auditbeat-jobs[] diff --git a/docs/en/stack/ml/anomaly-detection/ootb-ml-jobs-logs-ui.asciidoc b/docs/en/stack/ml/anomaly-detection/ootb-ml-jobs-logs-ui.asciidoc index c15e3f815..2faa037a1 100644 --- a/docs/en/stack/ml/anomaly-detection/ootb-ml-jobs-logs-ui.asciidoc +++ b/docs/en/stack/ml/anomaly-detection/ootb-ml-jobs-logs-ui.asciidoc @@ -1,4 +1,4 @@ -["appendix",role="exclude",id="ootb-ml-jobs-logs-ui"] +[[ootb-ml-jobs-logs-ui]] = Logs {anomaly-detect} configurations These {anomaly-jobs} appear by default in the diff --git a/docs/en/stack/ml/anomaly-detection/ootb-ml-jobs-metricbeat.asciidoc b/docs/en/stack/ml/anomaly-detection/ootb-ml-jobs-metricbeat.asciidoc index e2a8ebee7..1b163e687 100644 --- a/docs/en/stack/ml/anomaly-detection/ootb-ml-jobs-metricbeat.asciidoc +++ b/docs/en/stack/ml/anomaly-detection/ootb-ml-jobs-metricbeat.asciidoc @@ -1,4 +1,4 @@ -["appendix",role="exclude",id="ootb-ml-jobs-metricbeat"] +[[ootb-ml-jobs-metricbeat]] = {metricbeat} {anomaly-detect} configurations These {anomaly-job} wizards appear in {kib} if you use the diff --git a/docs/en/stack/ml/anomaly-detection/ootb-ml-jobs-metrics-ui.asciidoc b/docs/en/stack/ml/anomaly-detection/ootb-ml-jobs-metrics-ui.asciidoc index 3136f1603..6974b7b6d 100644 --- a/docs/en/stack/ml/anomaly-detection/ootb-ml-jobs-metrics-ui.asciidoc +++ b/docs/en/stack/ml/anomaly-detection/ootb-ml-jobs-metrics-ui.asciidoc @@ -1,4 +1,4 @@ -["appendix",role="exclude",id="ootb-ml-jobs-metrics-ui"] +[[ootb-ml-jobs-metrics-ui]] = Metrics {anomaly-detect} configurations These {anomaly-jobs} can be created in the {observability-guide}/analyze-metrics.html[{infrastructure-app}] in {kib}. diff --git a/docs/en/stack/ml/anomaly-detection/ootb-ml-jobs-nginx.asciidoc b/docs/en/stack/ml/anomaly-detection/ootb-ml-jobs-nginx.asciidoc index 938f33913..c6583ce61 100644 --- a/docs/en/stack/ml/anomaly-detection/ootb-ml-jobs-nginx.asciidoc +++ b/docs/en/stack/ml/anomaly-detection/ootb-ml-jobs-nginx.asciidoc @@ -1,4 +1,4 @@ -["appendix",role="exclude",id="ootb-ml-jobs-nginx"] +[[ootb-ml-jobs-nginx]] = Nginx {anomaly-detect} configurations // tag::nginx-jobs[] diff --git a/docs/en/stack/ml/anomaly-detection/ootb-ml-jobs-siem.asciidoc b/docs/en/stack/ml/anomaly-detection/ootb-ml-jobs-siem.asciidoc index d770692f9..797f50312 100644 --- a/docs/en/stack/ml/anomaly-detection/ootb-ml-jobs-siem.asciidoc +++ b/docs/en/stack/ml/anomaly-detection/ootb-ml-jobs-siem.asciidoc @@ -1,4 +1,4 @@ -["appendix",role="exclude",id="ootb-ml-jobs-siem"] +[[ootb-ml-jobs-siem]] = Security {anomaly-detect} configurations // tag::siem-jobs[] @@ -116,6 +116,39 @@ for data that matches the query. |=== // end::security-cloudtrail-jobs[] +[discrete] +[[security-host-jobs]] +== Security: Host + +Anomaly detection jobs for host-based threat hunting and detection. + +In the {ml-app} app, these configurations are available only when data exists +that matches the query specified in the +https://github.com/elastic/kibana/blob/{branch}/x-pack/platform/plugins/shared/ml/server/models/data_recognizer/modules/security_host/manifest.json[manifest file]. +In the {security-app}, it looks in the {data-source} specified in the +{kibana-ref}/advanced-options.html#securitysolution-defaultindex[`securitySolution:defaultIndex` advanced setting] +for data that matches the query. + +To access the host traffic anomalies dashboard in Kibana, go to: `Security -> Dashboards -> Host Traffic Anomalies`. + +// tag::security-host-jobs[] + +|=== +|Name |Description |Job |Datafeed + +|high_count_events_for_a_host_name +|Looks for a sudden spike in host based traffic. This can be due to a range of security issues, such as a compromised system, DDoS attacks, malware infections, privilege escalation, or data exfiltration. +|https://github.com/elastic/kibana/blob/{branch}/x-pack/platform/plugins/shared/ml/server/models/data_recognizer/modules/security_host/ml/high_count_events_for_a_host_name.json[image:images/link.svg[A link icon]] +|https://github.com/elastic/kibana/blob/{branch}/x-pack/platform/plugins/shared/ml/server/models/data_recognizer/modules/security_host/ml/datafeed_high_count_events_for_a_host_name.json[image:images/link.svg[A link icon]] + +|low_count_events_for_a_host_name +|Looks for a sudden drop in host based traffic. This can be due to a range of security issues, such as a compromised system, a failed service, or a network misconfiguration. +|https://github.com/elastic/kibana/blob/{branch}/x-pack/platform/plugins/shared/ml/server/models/data_recognizer/modules/security_host/ml/low_count_events_for_a_host_name.json[image:images/link.svg[A link icon]] +|https://github.com/elastic/kibana/blob/{branch}/x-pack/platform/plugins/shared/ml/server/models/data_recognizer/modules/security_host/ml/datafeed_low_count_events_for_a_host_name.json[image:images/link.svg[A link icon]] + +|=== +// end::security-host-jobs[] + [discrete] [[security-linux-jobs]] == Security: Linux @@ -548,4 +581,4 @@ The job configurations and datafeeds can be found https://github.com/elastic/integrations/blob/main/packages/lmd/kibana/ml_module/lmd-ml.json[here]. // end::security-windows-jobs[] -// end::siem-jobs[] \ No newline at end of file +// end::siem-jobs[] diff --git a/docs/en/stack/ml/anomaly-detection/ootb-ml-jobs-uptime.asciidoc b/docs/en/stack/ml/anomaly-detection/ootb-ml-jobs-uptime.asciidoc index dec44e182..d1215783f 100644 --- a/docs/en/stack/ml/anomaly-detection/ootb-ml-jobs-uptime.asciidoc +++ b/docs/en/stack/ml/anomaly-detection/ootb-ml-jobs-uptime.asciidoc @@ -1,4 +1,4 @@ -["appendix",role="exclude",id="ootb-ml-jobs-uptime"] +[[ootb-ml-jobs-uptime]] = Uptime {anomaly-detect} configurations If you have appropriate {heartbeat} data in {es}, you can enable this diff --git a/docs/en/stack/ml/anomaly-detection/ootb-ml-jobs.asciidoc b/docs/en/stack/ml/anomaly-detection/ootb-ml-jobs.asciidoc index 7aa98eb05..eb8c8136f 100644 --- a/docs/en/stack/ml/anomaly-detection/ootb-ml-jobs.asciidoc +++ b/docs/en/stack/ml/anomaly-detection/ootb-ml-jobs.asciidoc @@ -1,4 +1,3 @@ -[role="xpack"] [[ootb-ml-jobs]] = Supplied {anomaly-detect} configurations ++++ diff --git a/docs/en/stack/ml/nlp/index.asciidoc b/docs/en/stack/ml/nlp/index.asciidoc index ef78ae52e..206f72919 100644 --- a/docs/en/stack/ml/nlp/index.asciidoc +++ b/docs/en/stack/ml/nlp/index.asciidoc @@ -9,6 +9,7 @@ include::ml-nlp-inference.asciidoc[leveloffset=+1] include::ml-nlp-apis.asciidoc[leveloffset=+1] include::ml-nlp-built-in-models.asciidoc[leveloffset=+1] include::ml-nlp-elser.asciidoc[leveloffset=+2] +include::ml-nlp-elastic-rerank.asciidoc[leveloffset=+2] include::ml-nlp-e5.asciidoc[leveloffset=+2] include::ml-nlp-lang-ident.asciidoc[leveloffset=+2] include::ml-nlp-model-ref.asciidoc[leveloffset=+1] diff --git a/docs/en/stack/ml/nlp/ml-nlp-e5.asciidoc b/docs/en/stack/ml/nlp/ml-nlp-e5.asciidoc index 4ea26b878..28c5303f5 100644 --- a/docs/en/stack/ml/nlp/ml-nlp-e5.asciidoc +++ b/docs/en/stack/ml/nlp/ml-nlp-e5.asciidoc @@ -278,7 +278,7 @@ your system. -- [source,bash] ---- -git clone https://huggingface.co/elastic/multilingual-e5-small +git clone https://huggingface.co/intfloat/multilingual-e5-small ---- The command results in a local copy of the model in the `multilingual-e5-small` directory. diff --git a/docs/en/stack/ml/nlp/ml-nlp-elastic-rerank.asciidoc b/docs/en/stack/ml/nlp/ml-nlp-elastic-rerank.asciidoc new file mode 100644 index 000000000..982831bb7 --- /dev/null +++ b/docs/en/stack/ml/nlp/ml-nlp-elastic-rerank.asciidoc @@ -0,0 +1,365 @@ +[[ml-nlp-rerank]] += Elastic Rerank + +Elastic Rerank is a state-of-the-art cross-encoder reranking model trained by Elastic that helps you improve search relevance with a few simple API calls. +Elastic Rerank is Elastic's first semantic reranking model and is available out-of-the-box in supporting Elastic deployments using the {es} Inference API. + +Use Elastic Rerank to improve existing search applications including: + +* Traditional BM25 scoring +* Hybrid semantic search +* Retrieval Augmented Generation (RAG) + +The model can significantly improve search result quality by reordering results based on deeper semantic understanding of queries and documents. + +When reranking BM25 results, it provides an average 40% improvement in ranking quality on a diverse benchmark of retrieval tasks— matching the performance of models 11x its size. + +[discrete] +[[ml-nlp-rerank-availability]] +== Availability and requirements + +experimental[] + +[discrete] +[[ml-nlp-rerank-availability-serverless]] +=== Elastic Cloud Serverless + +Elastic Rerank is available in {es} Serverless projects as of November 25, 2024. + +[discrete] +[[ml-nlp-rerank-availability-elastic-stack]] +=== Elastic Cloud Hosted and self-managed deployments + +Elastic Rerank is available in Elastic Stack version 8.17+: + +* To use Elastic Rerank, you must have the appropriate subscription level or the trial period activated. +* A 4GB ML node ++ +[IMPORTANT] +==== +Deploying the Elastic Rerank model in combination with ELSER (or other hosted models) requires at minimum an 8GB ML node. The current maximum size for trial ML nodes is 4GB (defaults to 1GB). +==== + +[discrete] +[[ml-nlp-rerank-deploy]] +== Download and deploy + +To download and deploy Elastic Rerank, use the {ref}/infer-service-elasticsearch.html[create inference API] to create an {es} service `rerank` endpoint. + +[TIP] +==== +Refer to this https://github.com/elastic/elasticsearch-labs/blob/main/notebooks/search/12-semantic-reranking-elastic-rerank.ipynb[Python notebook] for an end-to-end example using Elastic Rerank. +==== + +[discrete] +[[ml-nlp-rerank-deploy-steps]] +=== Create an inference endpoint + +. In {kib}, navigate to the *Dev Console*. + +. Create an {infer} endpoint with the Elastic Rerank service by running: ++ +[source,console] +---------------------------------- +PUT _inference/rerank/my-rerank-model +{ + "service": "elasticsearch", + "service_settings": { + "adaptive_allocations": { + "enabled": true, + "min_number_of_allocations": 1, + "max_number_of_allocations": 10 + }, + "num_threads": 1, + "model_id": ".rerank-v1" + } +} +---------------------------------- ++ +NOTE: The API request automatically downloads and deploys the model. This example uses <> through adaptive allocation. + +[NOTE] +==== +You might see a 502 bad gateway error in the response when using the {kib} Console. +This error usually just reflects a timeout, while the model downloads in the background. +You can check the download progress in the {ml-app} UI. +If using the Python client, you can set the `timeout` parameter to a higher value. +==== + +After creating the Elastic Rerank {infer} endpoint, it's ready to use with a {ref}/retriever.html#text-similarity-reranker-retriever-example-elastic-rerank[`text_similarity_reranker`] retriever. + +[discrete] +[[ml-nlp-rerank-deploy-verify]] +== Deploy in an air-gapped environment + +If you want to deploy the Elastic Rerank model in a restricted or closed network, you have two options: + +* Create your own HTTP/HTTPS endpoint with the model artifacts on it +* Put the model artifacts into a directory inside the config directory on all master-eligible nodes. + +[discrete] +[[ml-nlp-rerank-model-artifacts]] +=== Model artifact files + +For the cross-platform version, you need the following files in your system: +``` +https://ml-models.elastic.co/rerank-v1.metadata.json +https://ml-models.elastic.co/rerank-v1.pt +https://ml-models.elastic.co/rerank-v1.vocab.json +``` + +// For the optimized version, you need the following files in your system: +// ``` +// https://ml-models.elastic.co/rerank-v1_linux-x86_64.metadata.json +// https://ml-models.elastic.co/rerank-v1_linux-x86_64.pt +// https://ml-models.elastic.co/rerank-v1_linux-x86_64.vocab.json +// ``` + +[discrete] +=== Using an HTTP server + +INFO: If you use an existing HTTP server, note that the model downloader only +supports passwordless HTTP servers. + +You can use any HTTP service to deploy the model. This example uses the official +Nginx Docker image to set a new HTTP download service up. + +. Download the <>. +. Put the files into a subdirectory of your choice. +. Run the following commands: ++ +-- +[source, shell] +-------------------------------------------------- +export ELASTIC_ML_MODELS="/path/to/models" +docker run --rm -d -p 8080:80 --name ml-models -v ${ELASTIC_ML_MODELS}:/usr/share/nginx/html nginx +-------------------------------------------------- + +Don't forget to change `/path/to/models` to the path of the subdirectory where +the model artifact files are located. + +These commands start a local Docker image with an Nginx server with the +subdirectory containing the model files. As the Docker image has to be +downloaded and built, the first start might take a longer period of time. +Subsequent runs start quicker. +-- +. Verify that Nginx runs properly by visiting the following URL in your +browser: ++ +-- +``` +http://{IP_ADDRESS_OR_HOSTNAME}:8080/rerank-v1.metadata.json +``` + +If Nginx runs properly, you see the content of the metdata file of the model. +-- +. Point your {es} deployment to the model artifacts on the HTTP server +by adding the following line to the `config/elasticsearch.yml` file: ++ +-- +``` +xpack.ml.model_repository: http://{IP_ADDRESS_OR_HOSTNAME}:8080 +``` + +If you use your own HTTP or HTTPS server, change the address accordingly. It is +important to specificy the protocol ("http://" or "https://"). Ensure that all +master-eligible nodes can reach the server you specify. +-- +. Repeat step 5 on all master-eligible nodes. +. {ref}/restart-cluster.html#restart-cluster-rolling[Restart] the +master-eligible nodes one by one. +. Create an inference endpoint to deploy the model per <>. + +The HTTP server is only required for downloading the model. After the download +has finished, you can stop and delete the service. You can stop the Docker image +used in this example by running the following command: + +[source, shell] +-------------------------------------------------- +docker stop ml-models +-------------------------------------------------- + +[discrete] +=== Using file-based access + +For a file-based access, follow these steps: + +. Download the <>. +. Put the files into a `models` subdirectory inside the `config` directory of +your {es} deployment. +. Point your {es} deployment to the model directory by adding the +following line to the `config/elasticsearch.yml` file: ++ +-- +``` +xpack.ml.model_repository: file://${path.home}/config/models/ +``` +-- +. Repeat step 2 and step 3 on all master-eligible nodes. +. {ref}/restart-cluster.html#restart-cluster-rolling[Restart] the +master-eligible nodes one by one. +. Create an inference endpoint to deploy the model per <>. + +[discrete] +[[ml-nlp-rerank-limitations]] +== Limitations + +* English language only +* Maximum context window of 512 tokens ++ +When using the {ref}/semantic-text.html[`semantic_text` field type], text is divided into chunks. By default, each chunk contains 250 words (approximately 400 tokens). Be cautious when increasing the chunk size - if the combined length of your query and chunk text exceeds 512 tokens, the model won't have access to the full content. ++ +When the combined inputs exceed the 512 token limit, a balanced truncation strategy is used. If both the query and input text are longer than 255 tokens each then both are truncated, otherwise the longest is truncated. + +[discrete] +[[ml-nlp-rerank-perf-considerations]] +== Performance considerations + +It's important to note that if you rerank to depth `n` then you will need to run `n` inferences per query. This will include the document text and will therefore be significantly more expensive than inference for query embeddings. Hardware can be scaled to run these inferences in parallel, but we would recommend shallow reranking for CPU inference: no more than top-30 results. You may find that the preview version is cost prohibitive for high query rates and low query latency requirements. We plan to address performance issues for GA. + +[discrete] +[[ml-nlp-rerank-model-specs]] +== Model specifications + +* Purpose-built for English language content + +* Relatively small: 184M parameters (86M backbone + 98M embedding layer) + +* Matches performance of billion-parameter reranking models + +* Built directly into {es} - no external services or dependencies needed + +[discrete] +[[ml-nlp-rerank-arch-overview]] +== Model architecture + +Elastic Rerank is built on the https://arxiv.org/abs/2111.09543[DeBERTa v3] language model architecture. + +The model employs several key architectural features that make it particularly effective for reranking: + +* *Disentangled attention mechanism* enables the model to: +** Process word content and position separately +** Learn more nuanced relationships between query and document text +** Better understand the semantic importance of word positions and relationships + +* *ELECTRA-style pre-training* uses: +** A GAN-like approach to token prediction +** Simultaneous training of token generation and detection +** Enhanced parameter efficiency compared to traditional masked language modeling + +[discrete] +[[ml-nlp-rerank-arch-training]] +== Training process + +Here is an overview of the Elastic Rerank model training process: + +* *Initial relevance extraction* +** Fine-tunes the pre-trained DeBERTa [CLS] token representation +** Uses a GeLU activation and dropout layer +** Preserves important pre-trained knowledge while adapting to the reranking task + +* *Trained by distillation* +** Uses an ensemble of bi-encoder and cross-encoder models as a teacher +** Bi-encoder provides nuanced negative example assessment +** Cross-encoder helps differentiate between positive and negative examples +** Combines strengths of both model types + +[discrete] +[[ml-nlp-rerank-arch-data]] +=== Training data + +The training data consists of: + +* Open domain Question-Answering datasets +* Natural document pairs (like article headings and summaries) +* 180,000 synthetic query-passage pairs with varying relevance +* Total of approximately 3 million queries + +The data preparation process includes: + +* Basic cleaning and fuzzy deduplication +* Multi-stage prompting for diverse topics (on the synthetic portion of the training data only) +* Varied query types: +** Keyword search +** Exact phrase matching +** Short and long natural language questions + +[discrete] +[[ml-nlp-rerank-arch-sampling]] +=== Negative sampling + +The model uses an advanced sampling strategy to ensure high-quality rankings: + +* Samples from top 128 documents per query using multiple retrieval methods +* Uses five negative samples per query - more than typical approaches +* Applies probability distribution shaped by document scores for sampling + +* Deep sampling benefits: +** Improves model robustness across different retrieval depths +** Enhances score calibration +** Provides better handling of document diversity + +[discrete] +[[ml-nlp-rerank-arch-optimization]] +=== Training optimization + +The training process incorporates several key optimizations: + +Uses cross-entropy loss function to: + +* Model relevance as probability distribution +* Learn relationships between all document scores +* Fit scores through maximum likelihood estimation + +Implemented parameter averaging along optimization trajectory: + +* Eliminates need for traditional learning rate scheduling and provides improvement in the final model quality + +[discrete] +[[ml-nlp-rerank-performance]] +== Performance + +Elastic Rerank shows significant improvements in search quality across a wide range of retrieval tasks. + +[discrete] +[[ml-nlp-rerank-performance-overview]] +=== Overview + +* Average 40% improvement in ranking quality when reranking BM25 results +* 184M parameter model matches performance of 2B parameter alternatives +* Evaluated across 21 different datasets using the BEIR benchmark suite + +[discrete] +[[ml-nlp-rerank-performance-benchmarks]] +=== Key benchmark results + +* Natural Questions: 90% improvement +* MS MARCO: 85% improvement +* Climate-FEVER: 80% improvement +* FiQA-2018: 76% improvement + +For detailed benchmark information, including complete dataset results and methodology, refer to the https://www.elastic.co/search-labs/blog/elastic-semantic-reranker-part-2[Introducing Elastic Rerank blog]. + +// [discrete] +// [[ml-nlp-rerank-benchmarks-hw]] +// === Hardware benchmarks +// Note: these are more for GA timeframe + +[discrete] +[[ml-nlp-rerank-resources]] +== Further resources + +*Documentation*: + +* {ref}/semantic-reranking.html#semantic-reranking-in-es[Semantic re-ranking in {es} overview] +* {ref}/infer-service-elasticsearch.html#inference-example-elastic-reranker[Inference API example] + +*Blogs*: + +* https://www.elastic.co/search-labs/blog/elastic-semantic-reranker-part-1[Part 1] +* https://www.elastic.co/search-labs/blog/elastic-semantic-reranker-part-2[Part 2] +* https://www.elastic.co/search-labs/blog/elastic-semantic-reranker-part-3[Part 3] + +*Python notebooks*: + +* https://github.com/elastic/elasticsearch-labs/blob/main/notebooks/search/12-semantic-reranking-elastic-rerank.ipynb[End-to-end example using Elastic Rerank in Python] diff --git a/docs/en/stack/ml/nlp/ml-nlp-elser.asciidoc b/docs/en/stack/ml/nlp/ml-nlp-elser.asciidoc index 241213c44..ddb000c42 100644 --- a/docs/en/stack/ml/nlp/ml-nlp-elser.asciidoc +++ b/docs/en/stack/ml/nlp/ml-nlp-elser.asciidoc @@ -90,7 +90,7 @@ to download and deploy the model and you don't need to select from different versions. If you want to learn more about the ELSER V2 improvements, refer to -https://www.elastic.co/search-labs/introducing-elser-v2-part-1[this blog post]. +https://www.elastic.co/search-labs/blog/introducing-elser-v2-part-1[this blog post]. [discrete] @@ -390,7 +390,7 @@ following line to the `config/elasticsearch.yml` file: + -- ``` -xpack.ml.model_repository: file://${path.home}/config/models/` +xpack.ml.model_repository: file://${path.home}/config/models/ ``` -- . Repeat step 2 and step 3 on all master-eligible nodes. @@ -560,7 +560,7 @@ IMPORTANT: The length of the documents in your particular dataset will have a significant impact on your throughput numbers. Refer to -https://www.elastic.co/search-labs/introducing-elser-v2-part-1[this blog post] +https://www.elastic.co/search-labs/blog/introducing-elser-v2-part-1[this blog post] to learn more about ELSER V2 improved performance. image::images/ml-nlp-elser-bm-summary.png[alt="Summary of ELSER V1 and V2 benchmark reports",align="center"] diff --git a/docs/en/stack/ml/nlp/ml-nlp-model-ref.asciidoc b/docs/en/stack/ml/nlp/ml-nlp-model-ref.asciidoc index b68fc5e3f..dd664147b 100644 --- a/docs/en/stack/ml/nlp/ml-nlp-model-ref.asciidoc +++ b/docs/en/stack/ml/nlp/ml-nlp-model-ref.asciidoc @@ -78,6 +78,16 @@ purposes and to get started with the Elastic {nlp} features. * https://huggingface.co/deepset/electra-base-squad2[Electra base squad2] * https://huggingface.co/deepset/tinyroberta-squad2[TinyRoBERTa squad2] +[discrete] +[[ml-nlp-model-ref-sparse-embedding]] +== Third party sparse embedding models + +Sparse embedding models should be configured with the `text_expansion` task type. + +* https://huggingface.co/naver/splade-v3-distilbert[SPLADE-v3-DistilBERT] +* https://huggingface.co/aken12/splade-japanese-v3[aken12/splade-japanese-v3] +* https://huggingface.co/hotchpotch/japanese-splade-v2[hotchpotch/japanese-splade-v2] + [discrete] [[ml-nlp-model-ref-text-embedding]] @@ -154,6 +164,7 @@ You can use these text similarity models for {ref}/semantic-reranking.html#seman * https://huggingface.co/cross-encoder/ms-marco-TinyBERT-L-2-v2[ms marco TinyBERT L2 v2] * https://huggingface.co/cross-encoder/ms-marco-MiniLM-L-6-v2[ms marco MiniLM L6 v2] +* https://huggingface.co/BAAI/bge-reranker-base[BAAI/bge-reranker-base] [discrete] [[ml-nlp-model-ref-zero-shot]]