1
0
mirror of https://github.com/minio/docs.git synced 2025-07-30 07:03:26 +03:00

Attempting to reduce docs to single platform

This commit is contained in:
Ravind Kumar
2024-06-13 15:33:53 -04:00
parent 3fec026a95
commit 0a53f3af4f
97 changed files with 6498 additions and 10225 deletions

137
Makefile
View File

@ -34,24 +34,9 @@ stage-%:
exit 1; \
fi
@if [ ! $(shell command -v mc) ]; then \
echo "mc not found on this host, exiting" ; \
exit 1; \
fi
@if [ $(shell mc alias list --json docs-staging | jq '.status') = "error" ]; then \
echo "doc-staging alias not found on for host mc configuration, exiting" ; \
exit 1; \
fi
@(./stage.sh)
@if [ $(shell mc stat --json docs-staging/staging | jq '.status') = "error" ]; then \
echo "docs-staging/staging bucket not found, exiting" ; \
exit 1; \
fi
@echo "Copying contents of $(BUILDDIR)/$(GITDIR)/$*/html/* to docs-staging/staging/$(GITDIR)/$*/"
@mc cp -r $(BUILDDIR)/$(GITDIR)/$*/html/* docs-staging/staging/$(GITDIR)/$*/
@echo "Copy complete, visit $(STAGINGURL)/$(GITDIR)/$*/index.html"
# Commenting out the older method
# python -m http.server --directory $(BUILDDIR)/$(GITDIR)/$*/html/
@ -65,12 +50,14 @@ stage-%:
# - Compile SCSS
# - Build docs via Sphinx
linux:
mindocs:
@echo "--------------------------------------"
@echo "Building for $@ Platform"
@echo " Building for MinIO "
@echo "--------------------------------------"
@cp source/default-conf.py source/conf.py
@make sync-deps
@make sync-operator-version
@make sync-deps
ifeq ($(SYNC_SDK),TRUE)
@make sync-sdks
else
@ -80,91 +67,6 @@ endif
@$(SPHINXBUILD) -M html "$(SOURCEDIR)" "$(BUILDDIR)/$(GITDIR)/$@" $(SPHINXOPTS) $(O) -t $@
@echo -e "Building $@ Complete\n--------------------------------------\n"
windows:
@echo "--------------------------------------"
@echo "Building for $@ Platform"
@echo "--------------------------------------"
@cp source/default-conf.py source/conf.py
@make sync-deps
@npm run build
@$(SPHINXBUILD) -M html "$(SOURCEDIR)" "$(BUILDDIR)/$(GITDIR)/$@" $(SPHINXOPTS) $(O) -t $@
@echo -e "Building $@ Complete\n--------------------------------------\n"
macos:
@echo "--------------------------------------"
@echo "Building for $@ Platform"
@echo "--------------------------------------"
@cp source/default-conf.py source/conf.py
@make sync-deps
@npm run build
@$(SPHINXBUILD) -M html "$(SOURCEDIR)" "$(BUILDDIR)/$(GITDIR)/$@" $(SPHINXOPTS) $(O) -t $@
@echo -e "Building $@ Complete\n--------------------------------------\n"
k8s:
@echo "--------------------------------------"
@echo "Building for $@ Platform"
@echo "--------------------------------------"
@cp source/default-conf.py source/conf.py
@make sync-operator-version
@make sync-deps
@npm run build
@$(SPHINXBUILD) -M html "$(SOURCEDIR)" "$(BUILDDIR)/$(GITDIR)/$@" $(SPHINXOPTS) $(O) -t $@
@echo -e "Building $@ Complete\n--------------------------------------\n"
openshift:
@echo "--------------------------------------"
@echo "Building for $@ Platform"
@echo "--------------------------------------"
@cp source/default-conf.py source/conf.py
@make sync-operator-version
@make sync-deps
@npm run build
@$(SPHINXBUILD) -M html "$(SOURCEDIR)" "$(BUILDDIR)/$(GITDIR)/$@" $(SPHINXOPTS) $(O) -t $@ -t k8s
@echo -e "Building $@ Complete\n--------------------------------------\n"
eks:
@echo "--------------------------------------"
@echo "Building for $@ Platform"
@echo "--------------------------------------"
@cp source/default-conf.py source/conf.py
@make sync-operator-version
@make sync-deps
@npm run build
@$(SPHINXBUILD) -M html "$(SOURCEDIR)" "$(BUILDDIR)/$(GITDIR)/$@" $(SPHINXOPTS) $(O) -t $@ -t k8s
@echo -e "Building $@ Complete\n--------------------------------------\n"
gke:
@echo "--------------------------------------"
@echo "Building for $@ Platform"
@echo "--------------------------------------"
@cp source/default-conf.py source/conf.py
@make sync-operator-version
@make sync-deps
@npm run build
@$(SPHINXBUILD) -M html "$(SOURCEDIR)" "$(BUILDDIR)/$(GITDIR)/$@" $(SPHINXOPTS) $(O) -t $@ -t k8s
@echo -e "Building $@ Complete\n--------------------------------------\n"
aks:
@echo "--------------------------------------"
@echo "Building for $@ Platform"
@echo "--------------------------------------"
@cp source/default-conf.py source/conf.py
@make sync-operator-version
@make sync-deps
@npm run build
@$(SPHINXBUILD) -M html "$(SOURCEDIR)" "$(BUILDDIR)/$(GITDIR)/$@" $(SPHINXOPTS) $(O) -t $@ -t k8s
@echo -e "Building $@ Complete\n--------------------------------------\n"
container:
@echo "--------------------------------------"
@echo "Building for $@ Platform"
@echo "--------------------------------------"
@cp source/default-conf.py source/conf.py
@make sync-deps
@npm run build
@$(SPHINXBUILD) -M html "$(SOURCEDIR)" "$(BUILDDIR)/$(GITDIR)/$@" $(SPHINXOPTS) $(O) -t $@
@echo -e "Building $@ Complete\n--------------------------------------\n"
# Synchronization targets
# Note that the @case statements are required to account for differences between Linux and MacOS binaries
# Specifically, MacOS does not use GNU utils, so syntax is slightly different for things like sed
@ -213,31 +115,8 @@ sync-minio-server-docs:
@(./sync-minio-server-docs.sh)
sync-minio-version:
@echo "Retrieving current MinIO version"
$(eval DEB = $(shell curl -s https://min.io/assets/downloads-minio.json | jq '.Linux."MinIO Server".amd64.DEB.download' | sed "s|linux-amd64|linux-amd64/archive|g"))
$(eval RPM = $(shell curl -s https://min.io/assets/downloads-minio.json | jq '.Linux."MinIO Server".amd64.RPM.download' | sed "s|linux-amd64|linux-amd64/archive|g"))
$(eval DEBARM64 = $(shell curl -s https://min.io/assets/downloads-minio.json | jq '.Linux."MinIO Server".arm64.DEB.download' | sed "s|linux-arm64|linux-arm64/archive|g"))
$(eval RPMARM64 = $(shell curl -s https://min.io/assets/downloads-minio.json | jq '.Linux."MinIO Server".arm64.RPM.download' | sed "s|linux-arm64|linux-arm64/archive|g"))
$(eval MINIO = $(shell curl --retry 10 -Ls -o /dev/null -w "%{url_effective}" https://github.com/minio/minio/releases/latest | sed "s/https:\/\/github.com\/minio\/minio\/releases\/tag\///"))
@$(eval kname = $(shell uname -s))
@case "${kname}" in \
"Darwin") \
sed -i "" "s|MINIOLATEST|${MINIO}|g" source/conf.py; \
sed -i "" "s|DEBURL|${DEB}|g" source/conf.py; \
sed -i "" "s|RPMURL|${RPM}|g" source/conf.py; \
sed -i "" "s|DEBARM64URL|${DEBARM64}|g" source/conf.py; \
sed -i "" "s|RPMARM64URL|${RPMARM64}|g" source/conf.py; \
;; \
*) \
sed -i "s|MINIOLATEST|${MINIO}|g" source/conf.py; \
sed -i "s|DEBURL|${DEB}|g" source/conf.py; \
sed -i "s|RPMURL|${RPM}|g" source/conf.py; \
sed -i "s|DEBARM64URL|${DEBARM64}|g" source/conf.py; \
sed -i "s|RPMARM64URL|${RPMARM64}|g" source/conf.py; \
;; \
esac
@echo "Retrieving MinIO latest version and download URLs"
@(./sync-minio-version.sh)
sync-sdks:
@(./sync-docs.sh)
@ -249,7 +128,7 @@ sync-operator-crd:
sync-deps:
# C++ and Rust repos do not have any releases yet.
@echo "Synchronizing all external dependencies"
@make sync-minio-version
# @make sync-minio-version
@make sync-kes-version
@make sync-minio-server-docs

6155
package-lock.json generated

File diff suppressed because it is too large Load Diff

View File

@ -322,7 +322,7 @@ window.addEventListener("DOMContentLoaded", (event) => {
else if (page_title === "Software Development Kits (SDK)") {
list[i].insertAdjacentElement('beforebegin',developerPersona);
}
else if (page_title === "MinIO Client") {
else if (page_title === "Kubernetes Reference") {
list[i].insertAdjacentElement('beforebegin',referencePersona);
}
}

View File

@ -1,5 +1,6 @@
<div class="platform-nav">
<div class="platform-nav__main">
<<<<<<< HEAD
<nav>
<a rel="noreferrer" href="/docs/minio/kubernetes/upstream/index.html" class="{{ 'active' if doc_platform == 'k8s' }}">
Kubernetes
@ -22,6 +23,10 @@
</a>
</nav>
=======
<div class="container">
</div>
>>>>>>> 8da23e1 (Attempting to reduce docs to single platform)
{%- if pagename != "search" %}
<button type="button" class="icon search-toggle search-toggle--keyboard visible-rm">
{%- include "icons/search.html" %}
@ -29,18 +34,4 @@
</button>
{%- endif %}
</div>
{%- if doc_platform == 'k8s' or doc_platform == 'openshift' or doc_platform == 'eks' or doc_platform == 'gke' or doc_platform == 'aks' -%}
<div class="platform-nav__sub">
<div class="container">
<nav>
<a rel="noreferrer" href="/docs/minio/kubernetes/upstream/index.html" class="{{ 'active' if doc_platform == 'k8s' }}">Upstream</a>
<a rel="noreferrer" href="/docs/minio/kubernetes/openshift/index.html" class="{{ 'active' if doc_platform == 'openshift' }}">Redhat Openshift</a>
<a rel="noreferrer" href="/docs/minio/kubernetes/eks/index.html" class="{{ 'active' if doc_platform == 'eks' }}">Amazon Elastic Kubernetes Service</a>
<a rel="noreferrer" href="/docs/minio/kubernetes/gke/index.html" class="{{ 'active' if doc_platform == 'gke' }}">Google Kubernetes Engine</a>
<a rel="noreferrer" href="/docs/minio/kubernetes/aks/index.html" class="{{ 'active' if doc_platform == 'aks' }}">Azure Kubernetes Service</a>
</nav>
</div>
</div>
{%- endif -%}
</div>

View File

@ -97,11 +97,6 @@ Administrators typically control the creation and configuration of buckets.
Client applications can then use :ref:`S3-compatible SDKs <minio-drivers>` to create, list, retrieve, and :ref:`delete <minio-object-delete>` objects on the MinIO deployment.
Clients therefore drive the overall hierarchy of data within a given bucket or prefix, where Administrators can exercise control using :ref:`policies <minio-policy>` to grant or deny access to an action or resource.
.. cond:: windows
MinIO does not support the ``\`` or ``:`` characters in object names, regardless of support for those characters in Windows filesystems.
Use ``/`` as a delimiter in object names to have MinIO automatically create a folder structure using :term:`prefixes <prefix>`.
MinIO has no hard :ref:`thresholds <minio-server-limits>` on the number of buckets, objects, or prefixes on a given deployment.
The relative performance of the hardware and networking underlying the MinIO deployment may create a practical limit to the number of objects in a given prefix or bucket.
Specifically, hardware using slower drives or network infrastructures tend to exhibit poor performance in buckets or prefixes with a flat hierarchy of objects.
@ -117,6 +112,11 @@ Consider the following points as general guidance for client applications worklo
For a deeper discussion on the benefits of limiting prefix contents, see the article on :s3-docs:`optimizing S3 performance <optimizing-performance.html>`.
.. note::
MinIO does not support the ``\`` or ``:`` characters in object names, regardless of support for those characters in Windows filesystems.
Use ``/`` as a delimiter in object names to have MinIO automatically create a folder structure using :term:`prefixes <prefix>`.
Object Versioning
-----------------

View File

@ -71,6 +71,7 @@ extlinks = {
'eks-docs' : ('https://docs.aws.amazon.com/eks/latest/userguide/%s', None),
'minio-web' : ('https://min.io/%s?ref=docs', None),
'minio-docs' : ('https://min.io/docs/%s?ref=docs-internal', None),
'minio-blog' : ('https://blog.min.io/%s?ref=docs', None),
'gke-docs' : ('https://cloud.google.com/kubernetes-engine/docs/%s', None),
'gcp-docs' : ('https://cloud.google.com/compute/docs/%s', None),
'gcs-docs' : ('https://cloud.google.com/storage/docs/%s', None),
@ -106,78 +107,6 @@ sitemap_url_scheme = "{link}"
excludes = []
if tags.has("linux"):
html_baseurl = 'https://min.io/docs/minio/linux/'
with open('url-excludes.yaml','r') as f:
for i in (yaml.safe_load_all(f)):
if i['tag'] == 'linux':
excludes = i['excludes']
break
elif tags.has("macos"):
html_baseurl = 'https://min.io/docs/minio/macos/'
with open('url-excludes.yaml','r') as f:
for i in (yaml.safe_load_all(f)):
if i['tag'] == 'macos':
excludes = i['excludes']
break
elif tags.has("windows"):
# html_baseurl is used for generating the sitemap.xml for each platform. These are combined in a sitemapindex.xml.
html_baseurl = 'https://min.io/docs/minio/windows/'
with open('url-excludes.yaml','r') as f:
for i in (yaml.safe_load_all(f)):
if i['tag'] == 'windows':
excludes = i['excludes']
break
elif tags.has("container"):
html_baseurl = 'https://min.io/docs/minio/container/'
with open('url-excludes.yaml','r') as f:
for i in (yaml.safe_load_all(f)):
if i['tag'] == 'container':
excludes = i['excludes']
break
elif tags.has("k8s") and not (tags.has("openshift") or tags.has("eks") or tags.has("gke") or tags.has("aks")):
html_baseurl = 'https://min.io/docs/minio/kubernetes/upstream/'
with open('url-excludes.yaml','r') as f:
for i in (yaml.safe_load_all(f)):
if i['tag'] == 'k8s':
excludes = i['excludes']
break
elif tags.has("openshift"):
html_baseurl = 'https://min.io/docs/minio/kubernetes/openshift/'
with open('url-excludes.yaml','r') as f:
for i in (yaml.safe_load_all(f)):
if i['tag'] == 'openshift':
excludes = i['excludes']
break
elif tags.has("eks"):
html_baseurl = 'https://min.io/docs/minio/kubernetes/eks/'
with open('url-excludes.yaml','r') as f:
for i in (yaml.safe_load_all(f)):
if i['tag'] == 'eks':
excludes = i['excludes']
break
elif tags.has("gke"):
html_baseurl = 'https://min.io/docs/minio/kubernetes/gke/'
with open('url-excludes.yaml','r') as f:
for i in (yaml.safe_load_all(f)):
if i['tag'] == 'gke':
excludes = i['excludes']
break
elif tags.has("aks"):
html_baseurl = 'https://min.io/docs/minio/kubernetes/aks/'
with open('url-excludes.yaml','r') as f:
for i in (yaml.safe_load_all(f)):
if i['tag'] == 'aks':
excludes = i['excludes']
break
exclude_patterns.extend(excludes)
@ -246,39 +175,21 @@ html_js_files = [
# Add https://www.min.io/robots.txt to html_extra_path list once available.
html_extra_path = [ 'extra']
html_baseurl = 'https://min.io/docs/minio/'
# -- Project information -----------------------------------------------------
# We assume a single tag, since we control the builder
platform = list(tags.tags.keys())[0]
platform_fmt = ""
if platform == "k8s":
platform_fmt = "Kubernetes"
elif platform == "macos":
platform_fmt = "MacOS"
elif platform == "openshift":
platform_fmt = "OpenShift"
elif platform == "eks":
platform_fmt = "Elastic Kubernetes Service"
elif platform == "gke":
platform_fmt = "Google Kubernetes Engine"
elif platform == "aks":
platform_fmt = "Azure Kubernetes Service"
else:
platform_fmt = platform.capitalize()
project = 'MinIO Documentation for ' + platform_fmt
project = 'Documentation for MinIO Object Storage'
copyright = '2020-Present, MinIO, Inc. '
author = 'MinIO Documentation Team'
html_title = 'MinIO Object Storage for ' + platform_fmt
html_short_title = 'MinIO Object Storage for ' + platform_fmt
html_title = 'MinIO Object Storage (AGPLv3)'
html_short_title = 'MinIO Object Storage'
html_permalinks_icon = ''
html_context = {
'doc_platform': platform.lower(),
'doc_platform': 'k8s',
'docs': [
# The first item has to be the current docs site #
{
@ -306,14 +217,9 @@ sphinx_tabs_disable_css_loading = True
# k8s is temporary until integrating the references here
intersphinx_mapping = {
'linux' : ('https://min.io/docs/minio/linux/', None),
'kubernetes' : ('https://min.io/docs/minio/kubernetes/upstream/',None)
}
rst_prolog = """
.. |platform| replace:: %s
.. |platform| replace:: 'foo'
.. |podman| replace:: `Podman <https://podman.io/>`__
@ -323,10 +229,25 @@ rst_prolog = """
.. |minio-latest| replace:: MINIOLATEST
.. |minio-rpm| replace:: RPMURL
.. |minio-deb| replace:: DEBURL
<<<<<<< HEAD
.. |minio-rpmarm64| replace:: RPMARM64URL
.. |minio-debarm64| replace:: DEBARM64URL
.. |subnet| replace:: `MinIO pricing <https://min.io/pricing?jmp=docs>`__
.. |subnet-short| replace:: `pricing <https://min.io/pricing?jmp=docs>`__
=======
.. |minio-binary| replace:: MINIOURL
.. |minio-rpm-arm64| replace:: RPMARM64URL
.. |minio-deb-arm64| replace:: DEBARM64URL
.. |minio-binary-arm64| replace:: MINIOARM64URL
.. |minio-rpm-ppc64le| replace:: RPMPPC64LEURL
.. |minio-deb-ppc64le| replace:: DEBPPC64LEURL
.. |minio-binary-ppc64le| replace:: MINIOPPC64LEURL
.. |minio-rpms-390x| replace:: RPMS390XURL
.. |minio-debs-390x| replace:: DEBS390XURL
.. |minio-binarys-390x| replace:: MINIOS390XURL
.. |subnet| replace:: `MinIO SUBNET <https://min.io/pricing?jmp=docs>`__
.. |subnet-short| replace:: `SUBNET <https://min.io/pricing?jmp=docs>`__
>>>>>>> 8da23e1 (Attempting to reduce docs to single platform)
.. |SNSD| replace:: :abbr:`SNSD (Single-Node Single-Drive)`
.. |SNMD| replace:: :abbr:`SNMD (Single-Node Multi-Drive)`
.. |MNMD| replace:: :abbr:`MNMD (Multi-Node Multi-Drive)`
@ -348,4 +269,4 @@ rst_prolog = """
.. |rust-sdk-version| replace:: RUSTVERSION
""" % platform_fmt
"""

View File

@ -219,7 +219,7 @@ Grids
Header 1
--------
.. cond:: linux
.. cond:: mindocs
.. include:: /includes/common/common-design.rst

View File

@ -12,13 +12,215 @@ File Transfer Protocol (FTP/SFTP)
.. contents:: Table of Contents
:local:
:depth: 1
:depth: 2
.. cond:: not k8s
.. tab-set::
:class: parent
.. include:: /includes/linux/file-transfer-protocol-not-k8s.rst
.. tab-item:: Kubernetes
:sync: k8s
.. cond:: k8s and not (openshift or eks or gke or aks)
Starting with Operator 5.0.7 and :minio-release:`MinIO Server RELEASE.2023-04-20T17-56-55Z <RELEASE.2023-04-20T17-56-55Z>`, you can use the SSH File Transfer Protocol (SFTP) to interact with the objects on a MinIO Operator Tenant deployment.
SFTP is defined by the Internet Engineering Task Force (IETF) as an extension of SSH 2.0.
It allows file transfer over SSH for use with :ref:`Transport Layer Security (TLS) <minio-tls>` and virtual private network (VPN) applications.
Enabling SFTP does not affect other MinIO features.
.. tab-item:: Baremetal
:sync: baremetal
Starting with :minio-release:`MinIO Server RELEASE.2023-04-20T17-56-55Z <RELEASE.2023-04-20T17-56-55Z>`, you can use the File Transfer Protocol (FTP) to interact with the objects on a MinIO deployment.
You must specifically enable FTP or SFTP when starting the server.
Enabling either server type does not affect other MinIO features.
This page uses the abbreviation FTP throughout, but you can use any of the supported FTP protocols described below.
Supported Protocols
-------------------
.. tab-set::
:class: hidden
.. tab-item:: Kubernetes
:sync: k8s
The MinIO Operator only supports configuring SSH File Transfer Protocol (SFTP).
.. tab-item:: Baremetal
:sync: baremetal
When enabled, MinIO supports FTP access over the following protocols:
- SSH File Transfer Protocol (SFTP)
SFTP is defined by the Internet Engineering Task Force (IETF) as an extension of SSH 2.0.
SFTP allows file transfer over SSH for use with :ref:`Transport Layer Security (TLS) <minio-tls>` and virtual private network (VPN) applications.
Your FTP client must support SFTP.
- File Transfer Protocol over SSL/TLS (FTPS)
FTPS allows for encrypted FTP communication with TLS certificates over the standard FTP communication channel.
FTPS should not be confused with SFTP, as FTPS does not communicate over a Secure Shell (SSH).
Your FTP client must support FTPS.
- File Transfer Protocol (FTP)
Unencrypted file transfer.
MinIO does **not** recommend using unencrypted FTP for file transfer.
Supported Commands
------------------
When enabled, MinIO supports the following SFTP operations:
- ``get``
- ``put``
- ``ls``
- ``mkdir``
- ``rmdir``
- ``delete``
MinIO does not support either ``append`` or ``rename`` operations.
Considerations
--------------
Versioning
~~~~~~~~~~
SFTP clients can only operate on the :ref:`latest version <minio-bucket-versioning>` of an object.
Specifically:
- For read operations, MinIO only returns the latest version of the requested object(s) to the SFTP client.
- For write operations, MinIO applies normal versioning behavior and creates a new object version at the specified namespace.
``rm`` and ``rmdir`` operations create ``DeleteMarker`` objects.
Authentication and Access
~~~~~~~~~~~~~~~~~~~~~~~~~
SFTP access requires the same authentication as any other S3 client.
MinIO supports the following authentication providers:
- :ref:`MinIO IDP <minio-internal-idp>` users and their service accounts
- :ref:`Active Directory/LDAP <minio-external-identity-management-ad-ldap>` users and their service accounts
- :ref:`OpenID/OIDC <minio-external-identity-management-openid>` service accounts
:ref:`STS <minio-security-token-service>` credentials **cannot** access buckets or objects over SFTP.
Authenticated users can access buckets and objects based on the :ref:`policies <minio-policy>` assigned to the user or parent user account.
The SFTP protocol does not require any of the ``admin:*`` :ref:`permissions <minio-policy-mc-admin-actions>`.
You may not perform other MinIO admin actions with SFTP.
Prerequisites
-------------
.. tab-set::
:class: hidden
.. tab-item:: Kubernetes
:sync: k8s
- MinIO Operator v5.0.7 or later.
- Enable an SFTP port (8022) for the server.
- A port to use for the SFTP commands and a range of ports to allow the SFTP server to request to use for the data transfer.
.. tab-item:: Baremetal
:sync: baremetal
- MinIO RELEASE.2023-04-20T17-56-55Z or later.
- Enable an FTP or SFTP port for the server.
- A port to use for the FTP commands and a range of ports to allow the FTP server to request to use for the data transfer.
Procedure
---------
.. tab-set::
:class: hidden
.. tab-item:: Kubernetes
:sync: k8s
.. include:: /includes/k8s/file-transfer-protocol-k8s.rst
.. tab-item:: Baremetal
:sync: baremetal
.. include:: /includes/linux/file-transfer-protocol-not-k8s.rst
.. _minio-certificate-key-file-sftp-k8s:
Connect to MinIO Using SFTP with a Certificate Key File
-------------------------------------------------------
.. versionadded:: RELEASE.2024-05-07T06-41-25Z
MinIO supports mutual TLS (mTLS) certificate-based authentication on SFTP, where both the server and the client verify the authenticity of each other.
This type of authentication requires the following:
1. Public key file for the trusted certificate authority
2. Public key file for the MinIO Server minted and signed by the trusted certificate authority
3. Public key file for the user minted and signed by the trusted certificate authority for the client connecting by SFTP and located in the user's ``.ssh`` folder (or equivalent for the operating system)
The keys must include a `principals list <https://man.openbsd.org/ssh-keygen#CERTIFICATES>`__ of the user(s) that can authenticate with the key:
.. code-block:: console
:class: copyable
ssh-keygen -s ~/.ssh/ca_user_key -I miniouser -n miniouser -V +1h -z 1 miniouser1.pub
- ``-s`` specifies the path to the certificate authority public key to use for generating this key.
The specified public key must have a ``principals`` list that includes this user.
- ``-I`` specifies the key identity for the public key.
- ``-n`` creates the ``user principals`` list for which this key is valid.
You must include the user for which this key is valid, and the user must match the username in MinIO.
- ``-V`` limits the duration for which the generated key is valid.
In this example, the key is valid for one hour.
Adjust the duration for your requirements.
- ``-z`` adds a serial number to the key to distinguish this generated public key from other keys signed by the same certificate authority public key.
MinIO requires specifying the Certificate Authority used to sign the certificates for SFTP access.
Start or restart the MinIO Server and specify the path to the trusted certificate authority's public key using an ``--sftp="trusted-user-ca-key=PATH"`` flag:
.. code-block:: console
:class: copyable
minio server {path-to-server} --sftp="trusted-user-ca-key=/path/to/.ssh/ca_user_key.pub" {...other flags}
When connecting to the MinIO Server with SFTP, the client verifies the MinIO Server's certificate.
The client then passes its own certificate to the MinIO Server.
The MinIO Server verifies the key created above by comparing its value to the the known public key from the certificate authority provided at server startup.
Once the MinIO Server verifies the client's certificate, the user can connect to the MinIO server over SFTP:
.. code-block:: bash
:class: copyable:
sftp -P <SFTP port> <server IP>
Require service account or LDAP for authentication
~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
To force authentication to SFTP using LDAP or service account credentials, append a suffix to the username.
Valid suffixes are either ``=ldap`` or ``=svc``.
.. code-block:: console
> sftp -P 8022 my-ldap-user=ldap@[minio@localhost]:/bucket
.. code-block:: console
> sftp -P 8022 my-ldap-user=svc@[minio@localhost]:/bucket
- Replace ``my-ldap-user`` with the username to use.
- Replace ``[minio@localhost]`` with the address of the MinIO server.

View File

@ -46,3 +46,4 @@ MinIO supports the following STS API endpoints:
:glob:
/developers/security-token-service/*
/developers/sts-for-operator

View File

@ -1,53 +0,0 @@
.. _deploy-operator-gke:
=================================================
Deploy MinIO Operator on Azure Kubernetes Service
=================================================
.. default-domain:: minio
.. contents:: Table of Contents
:local:
:depth: 1
Overview
--------
`Azure Kubernetes Engine <https://azure.microsoft.com/en-us/products/kubernetes-service/#overview>`__ (AKS) is a highly available, secure, and fully managed Kubernetes service from Microsoft Azure.
The MinIO Kubernetes Operator supports deploying MinIO Tenants onto AKS infrastructure using the MinIO Operator Console or `kustomize <https://kustomize.io/>`__ for :minio-git:`YAML-defined deployments <operator/tree/master/examples/kustomization>`.
:minio-web:`Through the AKS Marketplace <product/multicloud-azure-kubernetes-service>`
MinIO maintains an `AKS Marketplace listing <https://azuremarketplace.microsoft.com/en-us/marketplace/apps/minio.minio-object-storage_v1dot1>`__ through which you can register your AKS cluster with |subnet|.
Any MinIO tenant you deploy through Marketplace-connected clusters can take advantage of SUBNET registration, including 24/7 access to MinIO engineers.
This page documents deploying the MinIO Operator through the CLI using Kustomize.
For instructions on deploying the MinIO Operator through the AKS Marketplace, see :minio-web:`Deploy MinIO through AKS <multicloud-azure-kubernetes-service/deploy>`
This documentation assumes familiarity with all referenced Kubernetes and Azure Kubernetes Service concepts, utilities, and procedures.
While this documentation *may* provide guidance for configuring or deploying Kubernetes-related or Azure Kubernetes Service-related resources on a best-effort basis, it is not a replacement for the official :kube-docs:`Kubernetes Documentation <>`.
Prerequisites
-------------
Existing AKS Cluster
~~~~~~~~~~~~~~~~~~~~
This procedure assumes an existing :abbr:`AKS (Azure Kubernetes Service)` cluster onto which you can deploy the MinIO Operator.
The Operator by default deploys pods and services with two replicas each and pod anti-affinity.
The AKS cluster should therefore have at least two nodes available for scheduling Operator pods and services.
While these nodes *may* be the same nodes intended for use by MinIO Tenants, co-locating Operator and Tenant pods may increase the risk of service interruptions due to the loss of any one node.
``kubectl`` Access to the AKS Cluster
~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
Ensure your host machine has a ``kubectl`` installation compatible with the target AKS cluster.
For guidance on connecting ``kubectl`` to AKS, see :aks-docs:`Install kubectl and configure cluster access <tutorial-kubernetes-deploy-cluster?tabs=azure-cli#connect-to-cluster-using-kubectl>`.
Procedure
---------
The following steps deploy Operator using Kustomize and a ``kustomization.yaml`` file from the MinIO Operator GitHub repository.
.. include:: /includes/common/common-install-operator-kustomize.rst

View File

@ -0,0 +1,155 @@
1. Set the Active Directory / LDAP Configuration Settings
Configure the AD/LDAP provider using one of the following:
* MinIO Client
* Environment variables
* MinIO Console
All methods require starting/restarting the MinIO deployment to apply changes.
The following tabs provide a quick reference for the available configuration methods:
.. tab-set::
.. tab-item:: MinIO Client
MinIO supports specifying the AD/LDAP provider settings using :mc:`mc idp ldap` commands.
For distributed deployments, the :mc:`mc idp ldap` command applies the configuration to all nodes in the deployment.
The following example code sets *all* configuration settings related to configuring an AD/LDAP provider for external identity management.
The minimum *required* settings are:
- :mc-conf:`server_addr <identity_ldap.server_addr>`
- :mc-conf:`lookup_bind_dn <identity_ldap.lookup_bind_dn>`
- :mc-conf:`lookup_bind_password <identity_ldap.lookup_bind_password>`
- :mc-conf:`user_dn_search_base_dn <identity_ldap.user_dn_search_base_dn>`
- :mc-conf:`user_dn_search_filter <identity_ldap.user_dn_search_filter>`
.. code-block:: shell
:class: copyable
mc idp ldap add ALIAS \
server_addr="ldaps.example.net:636" \
lookup_bind_dn="CN=xxxxx,OU=xxxxx,OU=xxxxx,DC=example,DC=net" \
lookup_bind_password="xxxxxxxx" \
user_dn_search_base_dn="DC=example,DC=net" \
user_dn_search_filter="(&(objectCategory=user)(sAMAccountName=%s))" \
group_search_filter= "(&(objectClass=group)(member=%d))" \
group_search_base_dn="ou=MinIO Users,dc=example,dc=net" \
enabled="true" \
tls_skip_verify="off" \
server_insecure=off \
server_starttls="off" \
srv_record_name="" \
comment="Test LDAP server"
For more complete documentation on these settings, see :mc:`mc idp ldap`.
.. admonition:: :mc:`mc idp ldap` recommended
:class: note
:mc:`mc idp ldap` offers additional features and improved validation over :mc-cmd:`mc admin config set` runtime configuration settings.
:mc:`mc idp ldap` supports the same settings as :mc:`mc admin config` and the :mc-conf:`identity_ldap` configuration key.
The :mc-conf:`identity_ldap` configuration key remains available for existing scripts and tools.
.. tab-item:: Environment Variables
MinIO supports specifying the AD/LDAP provider settings using :ref:`environment variables <minio-server-envvar-external-identity-management-ad-ldap>`.
The :mc:`minio server` process applies the specified settings on its next startup.
For distributed deployments, specify these settings across all nodes in the deployment using the *same* values.
Any differences in server configurations between nodes will result in startup or configuration failures.
The following example code sets *all* environment variables related to configuring an AD/LDAP provider for external identity management. The minimum *required* variable are:
- :envvar:`MINIO_IDENTITY_LDAP_SERVER_ADDR`
- :envvar:`MINIO_IDENTITY_LDAP_LOOKUP_BIND_DN`
- :envvar:`MINIO_IDENTITY_LDAP_LOOKUP_BIND_PASSWORD`
- :envvar:`MINIO_IDENTITY_LDAP_USER_DN_SEARCH_BASE_DN`
- :envvar:`MINIO_IDENTITY_LDAP_USER_DN_SEARCH_FILTER`
.. code-block:: shell
:class: copyable
export MINIO_IDENTITY_LDAP_SERVER_ADDR="ldaps.example.net:636"
export MINIO_IDENTITY_LDAP_LOOKUP_BIND_DN="CN=xxxxx,OU=xxxxx,OU=xxxxx,DC=example,DC=net"
export MINIO_IDENTITY_LDAP_USER_DN_SEARCH_BASE_DN="dc=example,dc=net"
export MINIO_IDENTITY_LDAP_USER_DN_SEARCH_FILTER="(&(objectCategory=user)(sAMAccountName=%s))"
export MINIO_IDENTITY_LDAP_LOOKUP_BIND_PASSWORD="xxxxxxxxx"
export MINIO_IDENTITY_LDAP_GROUP_SEARCH_FILTER="(&(objectClass=group)(member=%d))"
export MINIO_IDENTITY_LDAP_GROUP_SEARCH_BASE_DN="ou=MinIO Users,dc=example,dc=net"
export MINIO_IDENTITY_LDAP_TLS_SKIP_VERIFY="off"
export MINIO_IDENTITY_LDAP_SERVER_INSECURE="off"
export MINIO_IDENTITY_LDAP_SERVER_STARTTLS="off"
export MINIO_IDENTITY_LDAP_SRV_RECORD_NAME=""
export MINIO_IDENTITY_LDAP_COMMENT="LDAP test server"
For complete documentation on these variables, see :ref:`minio-server-envvar-external-identity-management-ad-ldap`
.. tab-item:: MinIO Console
MinIO supports specifying the AD/LDAP provider settings using the :ref:`MinIO Console <minio-console>`.
For distributed deployments, configuring AD/LDAP from the Console applies the configuration to all nodes in the deployment.
.. include:: /includes/common-minio-external-auth.rst
:start-after: start-minio-ad-ldap-console-enable
:end-before: end-minio-ad-ldap-console-enable
#. Restart the MinIO Deployment
You must restart the MinIO deployment to apply the configuration changes.
If you configured AD/LDAP from the MinIO Console, no additional action is required.
The MinIO Console automatically restarts the deployment after saving the new AD/LDAP configuration.
For MinIO Client and environment variable configuration, use the :mc-cmd:`mc admin service restart` command to restart the deployment:
.. code-block:: shell
:class: copyable
mc admin service restart ALIAS
Replace ``ALIAS`` with the :ref:`alias <alias>` of the deployment to restart.
#. Use the MinIO Console to Log In with AD/LDAP Credentials
The MinIO Console supports the full workflow of authenticating to the AD/LDAP provider, generating temporary credentials using the MinIO :ref:`minio-sts-assumerolewithldapidentity` Security Token Service (STS) endpoint, and logging the user into the MinIO deployment.
You can access the Console by opening the root URL for the MinIO cluster. For example, ``https://minio.example.net:9000``.
Once logged in, you can perform any action for which the authenticated user is :ref:`authorized <minio-external-identity-management-ad-ldap-access-control>`.
You can also create :ref:`access keys <minio-idp-service-account>` for supporting applications which must perform operations on MinIO.
Access Keys are long-lived credentials which inherit their privileges from the parent user.
The parent user can further restrict those privileges while creating the service account.
#. Generate S3-Compatible Temporary Credentials using AD/LDAP Credentials
MinIO requires clients to authenticate using :s3-api:`AWS Signature Version 4 protocol <sig-v4-authenticating-requests.html>` with support for the deprecated Signature Version 2 protocol.
Specifically, clients must present a valid access key and secret key to access any S3 or MinIO administrative API, such as ``PUT``, ``GET``, and ``DELETE`` operations.
Applications can generate temporary access credentials as-needed using the :ref:`minio-sts-assumerolewithldapidentity` Security Token Service (STS) API endpoint and AD/LDAP user credentials.
MinIO provides an example Go application :minio-git:`ldap.go <minio/blob/master/docs/sts/ldap.go>` that manages this workflow.
.. code-block:: shell
POST https://minio.example.net?Action=AssumeRoleWithLDAPIdentity
&LDAPUsername=USERNAME
&LDAPPassword=PASSWORD
&Version=2011-06-15
&Policy={}
- Replace the ``LDAPUsername`` with the username of the AD/LDAP user.
- Replace the ``LDAPPassword`` with the password of the AD/LDAP user.
- Replace the ``Policy`` with an inline URL-encoded JSON :ref:`policy <minio-policy>` that further restricts the permissions associated to the temporary credentials.
Omit to use the :ref:`policy whose name matches <minio-external-identity-management-ad-ldap-access-control>` the Distinguished Name (DN) of the AD/LDAP user.
The API response consists of an XML document containing the access key, secret key, session token, and expiration date.
Applications can use the access key and secret key to access and perform operations on MinIO.
See the :ref:`minio-sts-assumerolewithldapidentity` for reference documentation.

View File

@ -0,0 +1,85 @@
.. |KEYCLOAK_URL| replace:: keycloak-url.example.net:8080
.. |MINIO_S3_URL| replace:: minio-url.example.net:9000
.. |MINIO_CONSOLE_URL| replace:: minio-url.example.net:9001
#. Configure or Create a Client for Accessing Keycloak
Authenticate to the Keycloak :guilabel:`Administrative Console` and navigate to :guilabel:`Clients`.
.. include:: /includes/common/common-configure-keycloak-identity-management.rst
:start-after: start-configure-keycloak-client
:end-before: end-configure-keycloak-client
#. Create Client Scope for MinIO Client
Client scopes allow Keycloak to map user attributes as part of the JSON Web Token (JWT) returned in authentication requests.
This allows MinIO to reference those attributes when assigning policies to the user.
This step creates the necessary client scope to support MinIO authorization after successful Keycloak authentication.
.. include:: /includes/common/common-configure-keycloak-identity-management.rst
:start-after: start-configure-keycloak-client-scope
:end-before: end-configure-keycloak-client-scope
#. Apply the Necessary Attribute to Keycloak Users/Groups
You must assign an attribute named ``policy`` to the Keycloak Users or Groups.
Set the value to any :ref:`policy <minio-policy>` on the MinIO deployment.
.. include:: /includes/common/common-configure-keycloak-identity-management.rst
:start-after: start-configure-keycloak-user-group-attributes
:end-before: end-configure-keycloak-user-group-attributes
#. Configure MinIO for Keycloak Authentication
MinIO supports multiple methods for configuring Keycloak authentication:
- Using the MinIO Console
- Using a terminal/shell and the :mc:`mc idp openid` command
- Using environment variables set prior to starting MinIO
.. tab-set::
.. tab-item:: MinIO Console
.. include:: /includes/common/common-configure-keycloak-identity-management.rst
:start-after: start-configure-keycloak-minio-console
:end-before: end-configure-keycloak-minio-console
.. tab-item:: CLI
.. include:: /includes/common/common-configure-keycloak-identity-management.rst
:start-after: start-configure-keycloak-minio-cli
:end-before: end-configure-keycloak-minio-cli
.. tab-item:: Environment Variables
.. include:: /includes/common/common-configure-keycloak-identity-management.rst
:start-after: start-configure-keycloak-minio-envvar
:end-before: end-configure-keycloak-minio-envvar
Restart the MinIO deployment for the changes to apply.
Check the MinIO logs and verify that startup succeeded with no errors related to the OIDC configuration.
If you attempt to log in with the Console, you should now see an (SSO) button using the configured :guilabel:`Display Name`.
Specify a configured user and attempt to log in.
MinIO should automatically redirect you to the Keycloak login entry.
Upon successful authentication, Keycloak should redirect you back to the MinIO Console using either the originating Console URL *or* the :guilabel:`Redirect URI` if configured.
#. Generate Application Credentials using the Security Token Service (STS)
.. include:: /includes/common/common-configure-keycloak-identity-management.rst
:start-after: start-configure-keycloak-sts
:end-before: end-configure-keycloak-sts
#. Next Steps
Applications should implement the :ref:`STS AssumeRoleWithWebIdentity <minio-sts-assumerolewithwebidentity>` flow using their :ref:`SDK <minio-drivers>` of choice.
When STS credentials expire, applications should have logic in place to regenerate the JWT token, STS token, and MinIO credentials before retrying and continuing operations.
Alternatively, users can generate :ref:`access keys <minio-id-access-keys>` through the MinIO Console for the purpose of creating long-lived API-key like access using their Keycloak credentials.

View File

@ -0,0 +1,156 @@
1. Set the OpenID Configuration Settings
You can configure the :abbr:`OIDC (OpenID Connect)` provider using either
environment variables *or* server runtime configuration settings. Both
methods require starting/restarting the MinIO deployment to apply changes. The
following tabs provide a quick reference of all required and optional
environment variables and configuration settings respectively:
.. tab-set::
.. tab-item:: Environment Variables
MinIO supports specifying the :abbr:`OIDC (OpenID Connect)` provider
settings using :ref:`environment variables
<minio-server-envvar-external-identity-management-openid>`. The
:mc:`minio server` process applies the specified settings on its next
startup. For distributed deployments, specify these settings across all
nodes in the deployment using the *same* values consistently.
The following example code sets *all* environment variables related to
configuring an :abbr:`OIDC (OpenID Connect)` provider for external
identity management. The minimum *required* variable is
:envvar:`MINIO_IDENTITY_OPENID_CONFIG_URL`:
.. code-block:: shell
:class: copyable
export MINIO_IDENTITY_OPENID_CONFIG_URL="https://openid-provider.example.net/.well-known/openid-configuration"
export MINIO_IDENTITY_OPENID_CLIENT_ID="<string>"
export MINIO_IDENTITY_OPENID_CLIENT_SECRET="<string>"
export MINIO_IDENTITY_OPENID_CLAIM_NAME="<string>"
export MINIO_IDENTITY_OPENID_CLAIM_PREFIX="<string>"
export MINIO_IDENTITY_OPENID_SCOPES="<string>"
export MINIO_IDENTITY_OPENID_REDIRECT_URI="<string>"
export MINIO_IDENTITY_OPENID_COMMENT="<string>"
Replace the ``MINIO_IDENTITY_OPENID_CONFIG_URL`` with the URL endpoint of
the :abbr:`OIDC (OpenID Connect)` provider discovery document.
For complete documentation on these variables, see
:ref:`minio-server-envvar-external-identity-management-openid`
.. tab-item:: Configuration Settings
MinIO supports specifying the :abbr:`OIDC (OpenID Connect)` provider
settings using :mc-conf:`configuration settings <identity_openid>`. The
:mc:`minio server` process applies the specified settings on its next
startup. For distributed deployments, the :mc:`mc admin config`
command applies the configuration to all nodes in the deployment.
The following example code sets *all* configuration settings related to
configuring an :abbr:`OIDC (OpenID Connect)` provider for external
identity management. The minimum *required* setting is
:mc-conf:`identity_openid config_url <identity_openid.config_url>`:
.. code-block:: shell
:class: copyable
mc admin config set ALIAS/ identity_openid \
config_url="https://openid-provider.example.net/.well-known/openid-configuration" \
client_id="<string>" \
client_secret="<string>" \
claim_name="<string>" \
claim_prefix="<string>" \
scopes="<string>" \
redirect_uri="<string>" \
comment="<string>"
Replace the ``config_url`` with the URL endpoint of the
:abbr:`OIDC (OpenID Connect)` provider discovery document.
For more complete documentation on these settings, see
:mc-conf:`identity_openid`.
#. Restart the MinIO Deployment
You must restart the MinIO deployment to apply the configuration changes.
Use the :mc-cmd:`mc admin service restart` command to restart the deployment.
.. code-block:: shell
:class: copyable
mc admin service restart ALIAS
Replace ``ALIAS`` with the :ref:`alias <alias>` of the deployment to
restart.
#. Use the MinIO Console to Log In with OIDC Credentials
The MinIO Console supports the full workflow of authenticating to the
:abbr:`OIDC (OpenID Connect)` provider, generating temporary credentials using
the MinIO :ref:`minio-sts-assumerolewithwebidentity` Security Token Service
(STS) endpoint, and logging the user into the MinIO deployment.
Starting in :minio-release:`RELEASE.2021-07-08T01-15-01Z`, the MinIO Console is
embedded in the MinIO server. You can access the Console by opening the root URL
for the MinIO cluster. For example, ``https://minio.example.net:9000``.
From the Console, click :guilabel:`BUTTON` to begin the OpenID authentication
flow.
Once logged in, you can perform any action for which the authenticated
user is :ref:`authorized
<minio-external-identity-management-openid-access-control>`.
You can also create :ref:`access keys <minio-idp-service-account>` for
supporting applications which must perform operations on MinIO. Access Keys
are long-lived credentials which inherit their privileges from the parent user.
The parent user can further restrict those privileges while creating the service
account.
#. Generate S3-Compatible Temporary Credentials using OIDC Credentials
MinIO requires clients authenticate using :s3-api:`AWS Signature Version 4
protocol <sig-v4-authenticating-requests.html>` with support for the deprecated
Signature Version 2 protocol. Specifically, clients must present a valid access
key and secret key to access any S3 or MinIO administrative API, such as
``PUT``, ``GET``, and ``DELETE`` operations.
Applications can generate temporary access credentials as-needed using the
:ref:`minio-sts-assumerolewithwebidentity` Security Token Service (STS)
API endpoint and the JSON Web Token (JWT) returned by the
:abbr:`OIDC (OpenID Connect)` provider.
The application must provide a workflow for logging into the
:abbr:`OIDC (OpenID Connect)` provider and retrieving the
JSON Web Token (JWT) associated to the authentication session. Defer to the
provider documentation for obtaining and parsing the JWT token after successful
authentication. MinIO provides an example Go application
:minio-git:`web-identity.go <minio/blob/master/docs/sts/web-identity.go>` with
an example of managing this workflow.
Once the application retrieves the JWT token, use the
``AssumeRoleWithWebIdentity`` endpoint to generate the temporary credentials:
.. code-block:: shell
:class: copyable
POST https://minio.example.net?Action=AssumeRoleWithWebIdentity
&WebIdentityToken=TOKEN
&Version=2011-06-15
&DurationSeconds=86400
&Policy=Policy
- Replace the ``TOKEN`` with the JWT token returned in the previous step.
- Replace the ``DurationSeconds`` with the duration in seconds until the temporary credentials expire. The example above specifies a period of ``86400`` seconds, or 24 hours.
- Replace the ``Policy`` with an inline URL-encoded JSON :ref:`policy <minio-policy>` that further restricts the permissions associated to the temporary credentials.
Omit to use the policy associated to the OpenID user :ref:`policy claim <minio-external-identity-management-openid-access-control>`.
The API response consists of an XML document containing the
access key, secret key, session token, and expiration date. Applications
can use the access key and secret key to access and perform operations on
MinIO.
See the :ref:`minio-sts-assumerolewithwebidentity` for reference documentation.

View File

@ -3,7 +3,7 @@
The following tabs provide examples of installing MinIO onto 64-bit Linux operating systems using RPM, DEB, or binary.
The RPM and DEB packages automatically install MinIO to the necessary system paths and create a ``minio`` service for ``systemctl``.
MinIO strongly recommends using the RPM or DEB installation routes.
To update deployments managed using ``systemctl``, see :ref:`minio-upgrade-systemctl`.
To update deployments managed using ``systemctl``, see :ref:`minio-upgrade`.
.. tab-set::

View File

@ -1,35 +0,0 @@
The Operator Console service does not automatically bind or expose itself for external access on the Kubernetes cluster.
Instead, you must configure a network control plane component, such as a load balancer or ingress, to grant external access.
For testing purposes or short-term access, expose the Operator Console service through a NodePort using the following patch:
.. code-block:: shell
:class: copyable
kubectl patch service -n minio-operator console -p '
{
"spec": {
"ports": [
{
"name": "http",
"port": 9090,
"protocol": "TCP",
"targetPort": 9090,
"nodePort": 30090
},
{
"name": "https",
"port": 9443,
"protocol": "TCP",
"targetPort": 9443,
"nodePort": 30433
}
],
"type": "NodePort"
}
}'
After applying the path, you can access the service through port ``30433`` on any of the Kubernetes worker nodes.
Append the ``nodePort`` value to the externally-accessible IP address of a worker node in your Kubernetes cluster.
Use the appropriate ``http`` or ``https`` port depending on whether you deployed Operator Console with TLS.

View File

@ -1,60 +0,0 @@
.. dropdown:: Port Forwarding
:open:
The :ref:`Operator Console service <minio-operator-console>` does not automatically bind or expose itself for external access on the Kubernetes cluster.
Instead, configure a network control plane component, such as a load balancer or ingress, to grant external access.
.. cond:: k8s and not openshift
For testing purposes or short-term access, expose the Operator Console service through a NodePort using the following patch:
.. code-block:: shell
:class: copyable
kubectl patch service -n minio-operator console -p '
{
"spec": {
"ports": [
{
"name": "http",
"port": 9090,
"protocol": "TCP",
"targetPort": 9090,
"nodePort": 30090
},
{
"name": "https",
"port": 9443,
"protocol": "TCP",
"targetPort": 9443,
"nodePort": 30433
}
],
"type": "NodePort"
}
}'
The patch command should output ``service/console patched``.
You can now access the service through ports ``30433`` (HTTPS) or ``30090`` (HTTP) on any of your Kubernetes worker nodes.
For example, a Kubernetes cluster with the following Operator nodes might be accessed at ``https://172.18.0.2:30443``:
.. code-block:: shell
kubectl get nodes -o custom-columns=IP:.status.addresses[:]
IP
map[address:172.18.0.5 type:InternalIP],map[address:k3d-MINIO-agent-3 type:Hostname]
map[address:172.18.0.6 type:InternalIP],map[address:k3d-MINIO-agent-2 type:Hostname]
map[address:172.18.0.2 type:InternalIP],map[address:k3d-MINIO-server-0 type:Hostname]
map[address:172.18.0.4 type:InternalIP],map[address:k3d-MINIO-agent-1 type:Hostname]
map[address:172.18.0.3 type:InternalIP],map[address:k3d-MINIO-agent-0 type:Hostname]
Use the following command to retrieve the JWT token necessary for logging into the Operator Console:
.. code-block:: shell
:class: copyable
kubectl get secret/console-sa-secret -n minio-operator -o json | jq -r '.data.token' | base64 -d
If your local host does not have the ``jq`` utility installed, you can run the ``kubectl`` part of this command (before ``| jq``) and locate the ``data.token`` section of the output.

View File

@ -1,147 +0,0 @@
.. _minio-installation:
========================
Install and Deploy MinIO
========================
.. default-domain:: minio
.. contents:: Table of Contents
:local:
:depth: 1
.. meta::
:description: MinIO Deployment Topologies and Installation Instructions
:keywords: MinIO, Deploy, Architecture, Topology, Distributed, Replication, Install
.. container:: extlinks-video
- `Installing and Running MinIO on Linux <https://www.youtube.com/watch?v=74usXkZpNt8&list=PLFOIsHSSYIK1BnzVY66pCL-iJ30Ht9t1o>`__
- `Object Storage Essentials <https://www.youtube.com/playlist?list=PLFOIsHSSYIK3WitnqhqfpeZ6fRFKHxIr7>`__
- `How to Connect to MinIO with JavaScript <https://www.youtube.com/watch?v=yUR4Fvx0D3E&list=PLFOIsHSSYIK3Dd3Y_x7itJT1NUKT5SxDh&index=5>`__
MinIO is a software-defined high performance distributed object storage server.
You can run MinIO on consumer or enterprise-grade hardware and a variety
of operating systems and architectures.
All MinIO deployments implement :ref:`Erasure Coding <minio-erasure-coding>` backends.
You can deploy MinIO using one of the following topologies:
.. _minio-installation-comparison:
:ref:`Single-Node Single-Drive <minio-snsd>` (SNSD or "Standalone")
Local development and evaluation with no/limited reliability
:ref:`Single-Node Multi-Drive <minio-snmd>` (SNMD or "Standalone Multi-Drive")
Workloads with lower performance, scale, and capacity requirements
Drive-level reliability with configurable tolerance for loss of up to 1/2 all drives
Evaluation of multi-drive topologies and failover behavior.
:ref:`Multi-Node Multi-Drive <minio-mnmd>` (MNMD or "Distributed")
Enterprise-grade high-performance object storage
Multi Node/Drive level reliability with configurable tolerance for loss of up to 1/2 all nodes/drives
Primary storage for AI/ML, Distributed Query, Analytics, and other Data Lake components
Scalable for Petabyte+ workloads - both storage capacity and performance
.. cond:: macos or windows
.. note::
Use |platform|-based MinIO deployments for early development and evaluation.
MinIO provides no guarantee of support for :abbr:`SNMD (Single-Node Multi-Drive)` or :abbr:`MNMD (Multi-Node Multi-Drive)` topologies on |platform|.
MinIO strongly recommends :minio-docs:`Linux (RHEL, Ubuntu) <minio/linux/index.html>` or :minio-docs:`Kubernetes (Upstream, OpenShift) <minio/kubernetes/upstream/index.html>` for long-term development and production environments.
Site Replication
----------------
Site replication expands the features of bucket replication to include IAM, security tokens, access keys, and bucket features the same across all sites.
:ref:`Site replication <minio-site-replication-overview>` links multiple MinIO deployments together and keeps the buckets, objects, and Identity and Access Management (IAM) settings in sync across all connected sites.
.. include:: /includes/common-replication.rst
:start-after: start-mc-admin-replicate-what-replicates
:end-before: end-mc-admin-replicate-what-replicates
.. cond:: macos or windows
MinIO does not recommend using |platform| hosts for site replication outside of early development, evaluation, or general experimentation.
For production, use :minio-docs:`Linux <minio/linux/operations/install-deploy-manage/multi-site-replication.html>` or :minio-docs:`Kubernetes <minio/kubernetes/upstream/operations/install-deploy-manage/multi-site-replication.html>`.
What Does Not Replicate?
~~~~~~~~~~~~~~~~~~~~~~~~
Not everything replicates across sites.
.. include:: /includes/common-replication.rst
:start-after: start-mc-admin-replicate-what-does-not-replicate
:end-before: end-mc-admin-replicate-what-does-not-replicate
.. _minio-installation-platform-support:
Platform Support
----------------
.. cond:: linux
MinIO provides builds of the MinIO server (:mc:`minio`) and the
MinIO :abbr:`CLI (Command Line Interface)` (:mc:`mc`) for the following
platforms.
- Red Hat Enterprise Linux 8.5+ (including all binary-compatible RHEL alternatives)
- Ubuntu 18.04+
MinIO provides builds for the following architectures:
- AMD64
- ARM64
- PowerPC 64 LE
- S390X
.. cond:: macos
MinIO recommends non-EOL macOS versions (10.14+).
For unlisted platforms or architectures, please reach out to MinIO at
hello@min.io for additional support and guidance. You can build MinIO from
:minio-git:`source <minio/#install-from-source>` and
`cross-compile
<https://golang.org/doc/install/source#bootstrapFromCrosscompiledSource>`__
for your platform and architecture combo. MinIO generally does not recommend
source-based installations in production environments.
.. cond:: linux
.. toctree::
:titlesonly:
:hidden:
/operations/install-deploy-manage/deploy-minio-single-node-single-drive
/operations/install-deploy-manage/deploy-minio-single-node-multi-drive
/operations/install-deploy-manage/deploy-minio-multi-node-multi-drive
/operations/install-deploy-manage/multi-site-replication
.. cond:: windows
.. toctree::
:titlesonly:
:hidden:
/operations/install-deploy-manage/deploy-minio-single-node-single-drive
.. cond:: macos
.. toctree::
:titlesonly:
:hidden:
/operations/install-deploy-manage/deploy-minio-single-node-single-drive
/operations/install-deploy-manage/deploy-minio-single-node-multi-drive
/operations/install-deploy-manage/multi-site-replication

View File

@ -1,96 +0,0 @@
.. _minio-installation:
========================
Install and Deploy MinIO
========================
.. default-domain:: minio
.. contents:: Table of Contents
:local:
:depth: 1
.. container:: extlinks-video
- `Installing and Running MinIO: Overview <https://youtu.be/mg9NRR6Js1s?ref=docs>`__
- `Installing and Running MinIO: Installation Lab <https://youtu.be/Z0FtabDUPtU?ref=docs>`__
- `Installing and Running MinIO: Docker Compose Overview <https://youtu.be/FtJA3TmjaJQ?ref=docs>`__
- `Installing and Running MinIO: Docker Compose Lab: <https://youtu.be/tRlEctAwkk8?ref=docs>`__
MinIO is a software-defined high performance distributed object storage server.
You can run MinIO on consumer or enterprise-grade hardware and a variety of operating systems and architectures.
MinIO supports three deployment topologies:
Single-Node Single-Drive (SNSD or "Standalone")
A single MinIO server with a single storage volume or folder.
|SNSD| deployment provides failover protections. Drive-level reliability and failover depends on the underlying storage volume.
|SNSD| deployments are best suited for evaluation and initial development of applications using MinIO for object storage.
|SNSD| deployments implement a zero-parity erasure coding backend and include support for the following erasure-coding dependent features:
- :ref:`Versioning <minio-bucket-versioning>`
- :ref:`Object Locking / Retention <minio-object-retention>`
Single-Node Multi-Drive (SNMD or "Standalone Multi-Drive")
A single MinIO server with four or more storage volumes.
|SNMD| deployments provide drive-level reliability and failover only.
Multi-Node Multi-Drive (MNMD or "Distributed")
Multiple MinIO servers with at least four drives across all servers.
The distributed |MNMD| topology supports production-grade object storage with drive and node-level availability and resiliency.
.. note::
This documentation provides instructions for |SNSD| and |SNMD| for supporting local development and evaluation of MinIO on a single host machine **only**.
For |MNMD| deployments, use the MinIO Kubernetes Operator to :minio-docs:`deploy and manage MinIO tenants in a containerized and orchestrated environment <minio/kubernetes/upstream/operations/installation.html>`.
Site Replication
----------------
:ref:`Site replication <minio-site-replication-overview>` links multiple MinIO deployments together and keeps the buckets, objects, and Identity and Access Management (IAM) settings in sync across all connected sites.
.. include:: /includes/common-replication.rst
:start-after: start-mc-admin-replicate-what-replicates
:end-before: end-mc-admin-replicate-what-replicates
.. important::
MinIO does not recommend using |platform| hosts for site replication outside of early development, evaluation, or general experimentation.
For production, use :minio-docs:`Kubernetes <minio/kubernetes/upstream/operations/install-deploy-manage/multi-site-replication.html>` for an orchestrated container environment.
What Does Not Replicate?
~~~~~~~~~~~~~~~~~~~~~~~~
Not everything replicates across sites.
.. include:: /includes/common-replication.rst
:start-after: start-mc-admin-replicate-what-does-not-replicate
:end-before: end-mc-admin-replicate-what-does-not-replicate
.. _minio-installation-platform-support:
Platform Support
----------------
MinIO provides container images at the following repositories:
- https://hub.docker.com/r/minio/minio
- https://quay.io/repository/minio/minio?tab=info
.. versionchanged:: RELEASE.2022-12-02T19-19-22Z
These images include the :ref:`MinIO Client <minio-client>` command line tool built in for container-level debugging.
However, to regularly interact with a container MinIO install, :ref:`install the MinIO Client <mc-install>` on your computer and define an :mc:`alias <mc alias set>` to the container instead.
Use of MinIO images from any other repository, host, or organization is at your own risk.
The :ref:`Single-Node Single-Drive <minio-snsd>` and :ref:`Single-Node Multi-Drive <minio-snmd>` tutorials provide instructions for the `Docker <https://www.docker.com/>`__ and :podman-docs:`Podman <>` container managers.
.. toctree::
:titlesonly:
:hidden:
/operations/install-deploy-manage/deploy-minio-single-node-single-drive
/operations/install-deploy-manage/deploy-minio-single-node-multi-drive

View File

@ -1,142 +0,0 @@
MinIO uses an update-then-restart methodology for upgrading a deployment to a newer release:
1. Update the container MinIO image with the newer release.
2. Restart the container.
This procedure does not require taking downtime and is non-disruptive to ongoing operations.
Considerations
--------------
Upgrades Are Non-Disruptive
~~~~~~~~~~~~~~~~~~~~~~~~~~~
MinIO's upgrade-then-restart procedure does *not* require taking downtime or scheduling a maintenance period.
MinIO restarts are fast, such that restarting all server processes in parallel typically completes in a few seconds.
MinIO operations are atomic and strictly consistent, such that applications using MinIO or S3 SDKs can rely on the built-in :aws-docs:`transparent retry <general/latest/gr/api-retries.html>` without further client-side logic.
This ensures upgrades are non-disruptive to ongoing operations.
Check Release Notes
~~~~~~~~~~~~~~~~~~~
MinIO publishes :minio-git:`Release Notes <minio/releases>` for your reference as part of identifying the changes applied in each release.
Review the associated release notes between your current MinIO version and the newer release so you have a complete view of any changes.
Pay particular attention to any releases that are *not* backwards compatible.
You cannot trivially downgrade from any such release.
Procedure
---------
You can run the ``podman container inspect`` or ``docker inspect`` command to inspect the container and validate the current container image:
.. code-block:: shell
:class: copyable
# For docker, use docker inspect
podman container inspect --format='{{.Config.Image}}' CONTAINER_NAME
The following output indicates the container was created using the most recent stable image tag:
.. code-block:: shell
quay.io/minio/minio:latest
Use the :ref:`minio-upgrade-latest-tag` steps to upgrade your container.
The following output indicates the container was created using a specific image tag:
.. code-block:: shell
quay.io/minio/minio:RELEASE.2023-07-21T21-12-44Z
Use the :ref:`minio-upgrade-specific-tag` steps to upgrade your container.
.. _minio-upgrade-latest-tag:
Upgrade Containers using Latest Image Tag
~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
1. Update your image registry
Pull the latest stable MinIO image for the configured image repository:
.. code-block:: shell
:class: copyable
# For docker, use docker pull
podman pull quay.io/minio/minio:latest
#. Restart the container
You must restart the container to load the new image binary for use by MinIO:
.. code-block:: shell
:class: copyable
# For docker, use docker restart
podman container restart CONTAINER_NAME
#. Validate the Upgrade
Use the :mc:`mc admin info` command to check that the MinIO container is online, operational, and reflects the installed MinIO version.
#. Update MinIO Client
You should upgrade your :mc:`mc` binary to match or closely follow the MinIO server release.
You can use the :mc:`mc update` command to update the binary to the latest stable release:
.. code-block:: shell
:class: copyable
mc update
.. _minio-upgrade-specific-tag:
Upgrade Containers using Specific Image Tag
~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
1. Update your local image registry
Pull the desired image you want to use for updating the container.
The following example uses the latest stable version of MinIO:
.. code-block:: shell
:class: copyable
:substitutions:
# For docker, use docker pull
podman pull quay.io/minio/minio:|minio-tag|
#. Modify the container start script or configuration
Specify the new MinIO tag to the container start script or configuration.
For Docker, this might be the Compose file used to start MinIO.
For Podman, this might be a YAML file used to create the container or pod.
Ensure the ``image: <VALUE>`` matches the newly pulled image tag.
#. Restart or re-create the container
If you started the container using CLI commands, you may need to completely stop, remove, and re-create the container.
Use a script to perform this procedure to minimize potential downtime.
For Docker, this might require running ``docker compose restart``.
#. Validate the Upgrade
Use the :mc:`mc admin info` command to check that the MinIO container is online, operational, and reflects the installed MinIO version.
#. Update MinIO Client
You should upgrade your :mc:`mc` binary to match or closely follow the MinIO server release.
You can use the :mc:`mc update` command to update the binary to the latest stable release:
.. code-block:: shell
:class: copyable
mc update

View File

@ -1,58 +0,0 @@
.. _deploy-operator-eks:
==========================================================
Deploy MinIO Operator on Amazon Elastic Kubernetes Service
==========================================================
.. default-domain:: minio
.. contents:: Table of Contents
:local:
:depth: 1
Overview
--------
:eks-docs:`Amazon® Elastic Kubernetes Service® <what-is-eks.html>` (EKS) is an enterprise-ready Kubernetes container platform with full-stack automated operations to manage hybrid cloud, multi-cloud, and edge deployments.
The MinIO Kubernetes Operator supports deploying MinIO Tenants onto EKS infrastructure using the MinIO Operator Console or by using `kustomize <https://kustomize.io/>`__ for :minio-git:`YAML-defined deployments <operator/tree/master/examples/kustomization>`.
MinIO supports the following methods for installing the MinIO Operator onto your :abbr:`EKS (Elastic Kubernetes Service)` clusters:
:minio-web:`Through the AWS Marketplace <product/multicloud-elastic-kubernetes-service>`
MinIO maintains an `AWS Marketplace listing <https://aws.amazon.com/marketplace/pp/prodview-smchi7bcs4nn4>`__ through which you can register your EKS cluster with |subnet|.
Any tenant you deploy through Marketplace-connected clusters can take advantage of SUBNET registration, including 24/7 direct access to MinIO engineers.
This page documents deploying the MinIO Operator through the CLI using Kustomize.
For instructions on deploying the MinIO Operator through the AWS Marketplace, see :minio-web:`Deploy MinIO through EKS <product/multicloud-elastic-kubernetes-service/deploy>`
This documentation assumes familiarity with all referenced Kubernetes and Elastic Kubernetes Service concepts, utilities, and procedures.
While this documentation *may* provide guidance for configuring or deploying Kubernetes-related or Elastic Kubernetes Service-related resources on a best-effort basis, it is not a replacement for the official :kube-docs:`Kubernetes Documentation <>`.
Prerequisites
-------------
Existing EKS Cluster
~~~~~~~~~~~~~~~~~~~~
This procedure assumes an existing :abbr:`EKS (Elastic Kubernetes Service)` cluster onto which you can deploy the MinIO Operator.
The Operator by default deploys pods and services with two replicas each and pod anti-affinity.
The GKE cluster should therefore have at least two nodes available for scheduling Operator pods and services.
While these nodes *may* be the same nodes intended for use by MinIO Tenants, co-locating Operator and Tenant pods may increase the risk of service interruptions due to the loss of any one node.
``kubectl`` Access to the EKS Cluster
~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
Ensure your host machine has a ``kubectl`` installation compatible with the target EKS cluster.
For guidance on connecting ``kubectl`` to EKS, see :aws-docs:`Creating or updating a kubeconfig file for an Amazon EKS cluster <eks/latest/userguide/create-kubeconfig.html>`.
Your ``kubectl`` configuration must include authentication as a user with the correct permissions.
MinIO provides an example IAM policy for Marketplace-based installations in the MinIO Operator :minio-git:`github repository <marketplace/blob/master/eks/iam-policy.json>`.
You can use this policy as a baseline for manual Operator installations.
Procedure
---------
The following steps deploy Operator using Kustomize and a ``kustomization.yaml`` file from the MinIO Operator GitHub repository.
.. include:: /includes/common/common-install-operator-kustomize.rst

View File

@ -1,56 +0,0 @@
.. _deploy-operator-gke:
=================================================
Deploy MinIO Operator on Google Kubernetes Engine
=================================================
.. default-domain:: minio
.. contents:: Table of Contents
:local:
:depth: 1
Overview
--------
`Google Kubernetes Engine <https://cloud.google.com/kubernetes-engine?ref=minio-docs>`__ (GKE) offers a highly automated secure and fully managed Kubernetes platform.
The MinIO Kubernetes Operator supports deploying MinIO Tenants onto GKE infrastructure using the MinIO Operator Console or `kustomize <https://kustomize.io/>`__ for :minio-git:`YAML-defined deployments <operator/tree/master/examples/kustomization>`.
:minio-web:`Through the GKE Marketplace <product/multicloud-google-kubernetes-service>`
MinIO maintains an `GKE Marketplace listing <https://console.cloud.google.com/marketplace/product/minio-inc-public/minio-enterprise?pli=1&project=peak-essence-171622>`__ through which you can register your GKE cluster with |subnet|.
Any MinIO tenant you deploy through Marketplace-connected clusters can take advantage of SUBNET registration, including 24/7 direct access to MinIO engineers.
Using the MinIO ``kubectl`` Plugin
MinIO provides a ``kubectl`` plugin for installing and managing the MinIO Operator and Tenants through a terminal or shell (CLI) environment.
You can manually register these tenants with |subnet| at any time.
This page documents deploying the MinIO Operator through the CLI using Kustomize.
For instructions on deploying the MinIO Operator through the GKE Marketplace, see :minio-web:`Deploy MinIO through GKE <product/multicloud-google-kubernetes-service/deploy>`
This documentation assumes familiarity with all referenced Kubernetes and Google Kubernetes Engine concepts, utilities, and procedures.
While this documentation *may* provide guidance for configuring or deploying Kubernetes-related or Google Kubernetes Engine-related resources on a best-effort basis, it is not a replacement for the official :kube-docs:`Kubernetes Documentation <>`.
Prerequisites
-------------
Existing GKE Cluster
~~~~~~~~~~~~~~~~~~~~
This procedure assumes an existing :abbr:`GKE (Google Kubernetes Engine)` cluster onto which you can deploy the MinIO Operator.
The Operator by default deploys pods and services with two replicas each and pod anti-affinity.
The GKE cluster should therefore have at least two nodes available for scheduling Operator pods and services.
While these nodes *may* be the same nodes intended for use by MinIO Tenants, co-locating Operator and Tenant pods may increase the risk of service interruptions due to the loss of any one node.
``kubectl`` Access to the GKE Cluster
~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
Ensure your host machine has a ``kubectl`` installation compatible with the target GKE cluster.
For guidance on connecting ``kubectl`` to GKE, see :gke-docs:`Install kubectl and configure cluster access <how-to/cluster-access-for-kubectl>`.
Procedure
---------
The following steps deploy Operator using Kustomize and a ``kustomization.yaml`` file from the MinIO Operator GitHub repository.
.. include:: /includes/common/common-install-operator-kustomize.rst

View File

@ -1,92 +1,5 @@
.. versionadded:: Operator v5.0.7
Overview
--------
Starting with Operator 5.0.7 and :minio-release:`MinIO Server RELEASE.2023-04-20T17-56-55Z <RELEASE.2023-04-20T17-56-55Z>`, you can use the SSH File Transfer Protocol (SFTP) to interact with the objects on a MinIO Operator Tenant deployment.
SFTP is defined by the Internet Engineering Task Force (IETF) as an extension of SSH 2.0.
It allows file transfer over SSH for use with :ref:`Transport Layer Security (TLS) <minio-tls>` and virtual private network (VPN) applications.
Enabling SFTP does not affect other MinIO features.
Supported Commands
~~~~~~~~~~~~~~~~~~
When enabled, MinIO supports the following SFTP operations:
- ``get``
- ``put``
- ``ls``
- ``mkdir``
- ``rmdir``
- ``delete``
MinIO does not support either ``append`` or ``rename`` operations.
MinIO Operator only supports the SFTP file transfer protocol.
Other protocols, such as FTP, are not supported for accessing Tenants.
Considerations
--------------
Versioning
~~~~~~~~~~
SFTP clients can only operate on the :ref:`latest version <minio-bucket-versioning>` of an object.
Specifically:
- For read operations, MinIO only returns the latest version of the requested object(s) to the SFTP client.
- For write operations, MinIO applies normal versioning behavior and creates a new object version at the specified namespace.
``rm`` and ``rmdir`` operations create ``DeleteMarker`` objects.
Authentication and Access
~~~~~~~~~~~~~~~~~~~~~~~~~
SFTP access requires the same authentication as any other S3 client.
MinIO supports the following authentication providers:
- :ref:`MinIO IDP <minio-internal-idp>` users and their service accounts
- :ref:`Active Directory/LDAP <minio-external-identity-management-ad-ldap>` users and their service accounts
- :ref:`OpenID/OIDC <minio-external-identity-management-openid>` service accounts
- :ref:`Certificate Key File <minio-certificate-key-file-sftp-k8s>`
:ref:`STS <minio-security-token-service>` credentials **cannot** access buckets or objects over SFTP.
Authenticated users can access buckets and objects based on the :ref:`policies <minio-policy>` assigned to the user or parent user account.
The SFTP protocol does not require any of the ``admin:*`` :ref:`permissions <minio-policy-mc-admin-actions>`.
You may not perform other MinIO admin actions with SFTP.
Prerequisites
-------------
- MinIO Operator v5.0.7 or later.
- Enable an SFTP port (8022) for the server.
- A port to use for the SFTP commands and a range of ports to allow the SFTP server to request to use for the data transfer.
Procedure
---------
#. Enable SFTP for the desired Tenant:
.. tab-set::
.. tab-item:: Operator Console
- In the Operator Console, click on the Tenant for which to enable SFTP.
- In the :guilabel:`Configuration` tab, toggle :guilabel:`SFTP` to :guilabel:`Enabled`.
- Click :guilabel:`Save`.
- Click :guilabel:`Restart` to restart MinIO and apply your changes.
.. tab-item:: Kubectl
Use the following Kubectl command to edit the Tenant YAML configuration:
.. code-block:: yaml
@ -102,6 +15,8 @@ Procedure
spec:
configuration:
name: my-tenant-env-configuration
credsSecret:
name: my-tenant-secret
exposeServices:
console: true
minio: true
@ -115,39 +30,38 @@ Procedure
#. If needed, configure ingress for the SFTP port according to your local policies.
#. Validate the configuration
The following ``kubectl get`` command uses `yq <https://github.com/mikefarah/yq/#install>`__ to display the value of ``enableSFTP``, indicating whether SFTP is enabled:
.. code-block:: console
:class: copyable
kubectl get tenants/my-tenant -n my-tenant-ns -o yaml | yq '.spec.features'
Replace ``my-tenant`` and ``my-tenant-ns`` with the desired Tenant and namespace.
If SFTP is enabled, the output resembles the following:
.. code-block:: console
enableSFTP: true
#. Use your preferred SFTP client to connect to the MinIO deployment.
You must connect as a user whose :ref:`policies <minio-policy>` allow access to the desired buckets and objects.
The specifics of connecting to the MinIO deployment depend on your SFTP client.
Refer to the documentation for your client.
The following example connects to the MinIO Tenant SFTP server forwarded to the local host system, and lists the contents of a bucket named ``runner``.
Examples
--------
.. code-block:: console
The following examples use the `SFTP CLI client <https://linux.die.net/man/1/sftp>`__ on a Linux system.
Connect to MinIO Using SFTP
~~~~~~~~~~~~~~~~~~~~~~~~~~~
The following example connects to an SFTP server, lists the contents of a bucket named ``test-bucket``, and downloads an object.
.. code-block:: console
sftp -P 8022 my-access-key@localhost
my-access-key@localhost's password:
> sftp -P 8022 minio@localhost
minio@localhost's password:
Connected to localhost.
sftp> ls
test-bucket
sftp> ls test-bucket
test-bucket/test-file.txt
sftp> get test-bucket/test-file.txt
Fetching /test-bucket/test-file.txt to test-file.txt
test-file.txt 100% 6 1.3KB/s 00:00
Check if SFTP is Enabled for a Tenant
~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
sftp> ls runner/
chunkdocs testdir
The following ``kubectl get`` command uses `yq <https://github.com/mikefarah/yq/#install>`__ to display the value of ``enableSFTP``, indicating whether SFTP is enabled:
@ -164,73 +78,3 @@ If SFTP is enabled, the output resembles the following:
enableSFTP: true
.. _minio-certificate-key-file-sftp-k8s:
Connect to MinIO Using SFTP with a Certificate Key File
~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
.. versionadded:: RELEASE.2024-05-07T06-41-25Z
MinIO supports mutual TLS (mTLS) certificate-based authentication on SFTP, where both the server and the client verify the authenticity of each other.
This type of authentication requires the following:
1. Public key file for the trusted certificate authority
2. Public key file for the MinIO Server minted and signed by the trusted certificate authority
3. Public key file for the user minted and signed by the trusted certificate authority for the client connecting by SFTP and located in the user's ``.ssh`` folder (or equivalent for the operating system)
The keys must include a `principals list <https://man.openbsd.org/ssh-keygen#CERTIFICATES>`__ of the user(s) that can authenticate with the key:
.. code-block:: console
:class: copyable
ssh-keygen -s ~/.ssh/ca_user_key -I miniouser -n miniouser -V +1h -z 1 miniouser1.pub
- ``-s`` specifies the path to the certificate authority public key to use for generating this key.
The specified public key must have a ``principals`` list that includes this user.
- ``-I`` specifies the key identity for the public key.
- ``-n`` creates the ``user principals`` list for which this key is valid.
You must include the user for which this key is valid, and the user must match the username in MinIO.
- ``-V`` limits the duration for which the generated key is valid.
In this example, the key is valid for one hour.
Adjust the duration for your requirements.
- ``-z`` adds a serial number to the key to distinguish this generated public key from other keys signed by the same certificate authority public key.
MinIO requires specifying the Certificate Authority used to sign the certificates for SFTP access.
Start or restart the MinIO Server and specify the path to the trusted certificate authority's public key using an ``--sftp="trusted-user-ca-key=PATH"`` flag:
.. code-block:: console
:class: copyable
minio server {path-to-server} --sftp="trusted-user-ca-key=/path/to/.ssh/ca_user_key.pub" {...other flags}
When connecting to the MinIO Server with SFTP, the client verifies the MinIO Server's certificate.
The client then passes its own certificate to the MinIO Server.
The MinIO Server verifies the key created above by comparing its value to the the known public key from the certificate authority provided at server startup.
Once the MinIO Server verifies the client's certificate, the user can connect to the MinIO server over SFTP:
.. code-block:: bash
:class: copyable:
sftp -P <SFTP port> <server IP>
Require service account or LDAP for authentication
~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
To force authentication to SFTP using LDAP or service account credentials, append a suffix to the username.
Valid suffixes are either ``=ldap`` or ``=svc``.
.. code-block:: console
> sftp -P 8022 my-ldap-user=ldap@[minio@localhost]:/bucket
.. code-block:: console
> sftp -P 8022 my-ldap-user=svc@[minio@localhost]:/bucket
- Replace ``my-ldap-user`` with the username to use.
- Replace ``[minio@localhost]`` with the address of the MinIO server.

View File

@ -1,42 +1,37 @@
Deploy MinIO Tenant with Active Directory / LDAP Identity Management
--------------------------------------------------------------------
#. Access the Operator Console
1) Access the Operator Console
~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
Temporarily forward traffic between the local host machine and the MinIO Operator Console and retrieve the JWT token for your Operator deployment.
For instructions, see :ref:`Configure access to the Operator Console service <minio-k8s-deploy-operator-access-console>`.
Temporarily forward traffic between the local host machine and the MinIO Operator Console and retrieve the JWT token for your Operator deployment.
For instructions, see :ref:`Configure access to the Operator Console service <minio-k8s-deploy-operator-access-console>`.
Open your browser to the temporary URL and enter the JWT Token into the login page.
You should see the :guilabel:`Tenants` page:
Open your browser to the temporary URL and enter the JWT Token into the login page.
You should see the :guilabel:`Tenants` page:
.. image:: /images/k8s/operator-dashboard.png
.. image:: /images/k8s/operator-dashboard.png
:align: center
:width: 70%
:class: no-scaled-link
:alt: MinIO Operator Console
Click the :guilabel:`+ Create Tenant` to start creating a MinIO Tenant.
To deploy a new MinIO Tenant with AD/LDAP external identity management, select the :guilabel:`+ Create Tenant` button.
If you are modifying an existing Tenant, select that Tenant from the list.
The following steps reference the necessary sections and configuration settings for existing Tenants.
To configure an existing MinIO Tenant with AD/LDAP external identity management select that Tenant from the displayed list.
The following steps reference the necessary sections and configuration settings for existing Tenants.
2) Complete the :guilabel:`Identity Provider` Section
~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
#. Complete the :guilabel:`Identity Provider` Section
To enable external identity management with an Active Directory / LDAP provider, select the :guilabel:`Identity Provider` section.
You can then change the radio button to :guilabel:`Active Directory` to display the configuration settings.
To enable external identity management with an Active Directory / LDAP provider, select the :guilabel:`Identity Provider` section.
You can then change the radio button to :guilabel:`Active Directory` to display the configuration settings.
.. image:: /images/k8s/operator-create-tenant-identity-provider-adldap.png
.. image:: /images/k8s/operator-create-tenant-identity-provider-adldap.png
:align: center
:width: 70%
:class: no-scaled-link
:alt: MinIO Operator Console - Create a Tenant - External Identity Provider Section - Active Directory / LDAP
An asterisk ``*`` marks required fields.
The following table provides general guidance for those fields:
An asterisk ``*`` marks required fields.
The following table provides general guidance for those fields:
.. list-table::
.. list-table::
:header-rows: 1
:widths: 40 60
:width: 100%
@ -57,35 +52,33 @@ The following table provides general guidance for those fields:
You can specify multiple :abbr:`DNs (Distinguished Names)` by selecting the plus :octicon:`plus-circle` icon.
You can delete a DN by selecting the trash can :octicon:`trash` icon for that DN.
Once you complete the section, you can finish any other required sections of :ref:`Tenant Deployment <minio-k8s-deploy-minio-tenant>`.
Once you complete the section, you can finish any other required sections of :ref:`Tenant Deployment <minio-k8s-deploy-minio-tenant>`.
3) Assign Policies to AD/LDAP Users
~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
#. Assign Policies to AD/LDAP Users
MinIO by default assigns no :ref:`policies <minio-policy>` to AD/LDAP users or groups.
You must explicitly assign MinIO policies to a given user or group Distinguished Name (DN) to grant that user or group access to the MinIO deployment.
MinIO by default assigns no :ref:`policies <minio-policy>` to AD/LDAP users or groups.
You must explicitly assign MinIO policies to a given user or group Distinguished Name (DN) to grant that user or group access to the MinIO deployment.
The following example assumes an existing :ref:`alias <alias>` configured for the MinIO Tenant.
The following example assumes an existing :ref:`alias <alias>` configured for the MinIO Tenant.
Use the :mc:`mc idp ldap policy attach` command to assign a user or group DN to an existing MinIO Policy:
Use the :mc:`mc idp ldap policy attach` command to assign a user or group DN to an existing MinIO Policy:
.. code-block:: shell
.. code-block:: shell
:class: copyable
mc idp ldap policy attach minio-tenant POLICY --user='uid=primary,cn=applications,dc=domain,dc=com'
mc idp ldap policy attach minio-tenant POLICY --group='cn=applications,ou=groups,dc=domain,dc=com'
Replace ``POLICY`` with the name of the MinIO policy to assign to the user or group DN.
Replace ``POLICY`` with the name of the MinIO policy to assign to the user or group DN.
See :ref:`minio-external-identity-management-ad-ldap-access-control` for more information on access control with AD/LDAP users and groups.
See :ref:`minio-external-identity-management-ad-ldap-access-control` for more information on access control with AD/LDAP users and groups.
4) Generate S3-Compatible Temporary Credentials using AD/LDAP Credentials
~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
#. Generate S3-Compatible Temporary Credentials using AD/LDAP Credentials
Applications can use an AD/LDAP user credential to generate temporary S3-compatible credentials as-needed using the :ref:`minio-sts-assumerolewithldapidentity` Security Token Service (STS) API endpoint.
MinIO provides an example Go application :minio-git:`ldap.go <minio/blob/master/docs/sts/ldap.go>` with an example of managing this workflow.
Applications can use an AD/LDAP user credential to generate temporary S3-compatible credentials as-needed using the :ref:`minio-sts-assumerolewithldapidentity` Security Token Service (STS) API endpoint.
MinIO provides an example Go application :minio-git:`ldap.go <minio/blob/master/docs/sts/ldap.go>` with an example of managing this workflow.
.. code-block:: shell
.. code-block:: shell
POST https://minio.example.net?Action=AssumeRoleWithLDAPIdentity
&LDAPUsername=USERNAME
@ -93,17 +86,17 @@ MinIO provides an example Go application :minio-git:`ldap.go <minio/blob/master/
&Version=2011-06-15
&Policy={}
- Replace ``minio.example.net`` with the hostname or URL for the MinIO Tenant service.
- Replace ``minio.example.net`` with the hostname or URL for the MinIO Tenant service.
- Replace the ``LDAPUsername`` with the username of the AD/LDAP user.
- Replace the ``LDAPUsername`` with the username of the AD/LDAP user.
- Replace the ``LDAPPassword`` with the password of the AD/LDAP user.
- Replace the ``LDAPPassword`` with the password of the AD/LDAP user.
- Replace the ``Policy`` with an inline URL-encoded JSON :ref:`policy <minio-policy>` that further restricts the permissions associated to the temporary credentials.
- Replace the ``Policy`` with an inline URL-encoded JSON :ref:`policy <minio-policy>` that further restricts the permissions associated to the temporary credentials.
Omit to use the :ref:`policy whose name matches <minio-external-identity-management-ad-ldap-access-control>` the Distinguished Name (DN) of the AD/LDAP user.
The API response consists of an XML document containing the access key, secret key, session token, and expiration date.
Applications can use the access key and secret key to access and perform operations on MinIO.
The API response consists of an XML document containing the access key, secret key, session token, and expiration date.
Applications can use the access key and secret key to access and perform operations on MinIO.
See the :ref:`minio-sts-assumerolewithldapidentity` for reference documentation.
See the :ref:`minio-sts-assumerolewithldapidentity` for reference documentation.

View File

@ -2,38 +2,34 @@
.. |MINIO_S3_URL| replace:: minio.minio-tenant.svc.cluster-domain.example
.. |MINIO_CONSOLE_URL| replace:: minio-console.minio-tenant.svc.cluster-domain.example
1) Configure or Create a Client for Accessing Keycloak
~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
#. Configure or Create a Client for Accessing Keycloak
Authenticate to the Keycloak :guilabel:`Administrative Console` and navigate to :guilabel:`Clients`.
Authenticate to the Keycloak :guilabel:`Administrative Console` and navigate to :guilabel:`Clients`.
.. include:: /includes/common/common-configure-keycloak-identity-management.rst
.. include:: /includes/common/common-configure-keycloak-identity-management.rst
:start-after: start-configure-keycloak-client
:end-before: end-configure-keycloak-client
2) Create Client Scope for MinIO Client
~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
#. Create Client Scope for MinIO Client
Client scopes allow Keycloak to map user attributes as part of the JSON Web Token (JWT) returned in authentication requests.
This allows MinIO to reference those attributes when assigning policies to the user.
This step creates the necessary client scope to support MinIO authorization after successful Keycloak authentication.
Client scopes allow Keycloak to map user attributes as part of the JSON Web Token (JWT) returned in authentication requests.
This allows MinIO to reference those attributes when assigning policies to the user.
This step creates the necessary client scope to support MinIO authorization after successful Keycloak authentication.
.. include:: /includes/common/common-configure-keycloak-identity-management.rst
.. include:: /includes/common/common-configure-keycloak-identity-management.rst
:start-after: start-configure-keycloak-client-scope
:end-before: end-configure-keycloak-client-scope
3) Apply the Necessary Attribute to Keycloak Users/Groups
~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
#. Apply the Necessary Attribute to Keycloak Users/Groups
You must assign an attribute named ``policy`` to the Keycloak Users or Groups.
Set the value to any :ref:`policy <minio-policy>` on the MinIO deployment.
You must assign an attribute named ``policy`` to the Keycloak Users or Groups.
Set the value to any :ref:`policy <minio-policy>` on the MinIO deployment.
.. include:: /includes/common/common-configure-keycloak-identity-management.rst
.. include:: /includes/common/common-configure-keycloak-identity-management.rst
:start-after: start-configure-keycloak-user-group-attributes
:end-before: end-configure-keycloak-user-group-attributes
4) Configure MinIO for Keycloak Authentication
~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
#. Configure MinIO for Keycloak Authentication
.. include:: /includes/common/common-configure-keycloak-identity-management.rst
:start-after: start-configure-keycloak-minio-cli
@ -44,16 +40,14 @@ Restart the MinIO deployment for the changes to apply.
Check the MinIO logs and verify that startup succeeded with no errors related to the OIDC configuration.
5) Generate Application Credentials using the Security Token Service (STS)
~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
#. Generate Application Credentials using the Security Token Service (STS)
.. include:: /includes/common/common-configure-keycloak-identity-management.rst
.. include:: /includes/common/common-configure-keycloak-identity-management.rst
:start-after: start-configure-keycloak-sts
:end-before: end-configure-keycloak-sts
Next Steps
~~~~~~~~~~
#. Next Steps
Applications should implement the :ref:`STS AssumeRoleWithWebIdentity <minio-sts-assumerolewithwebidentity>` flow using their :ref:`SDK <minio-drivers>` of choice.
When STS credentials expire, applications should have logic in place to regenerate the JWT token, STS token, and MinIO credentials before retrying and continuing operations.

View File

@ -1,53 +1,41 @@
Deploy MinIO Tenant with Server-Side Encryption
-----------------------------------------------
#. Review the Tenant CRD
1) Access the Operator Console
~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
Review the :ref:`Tenant CRD <minio-operator-crd>` ``TenantSpec.kes`` object, the ``TenantSpec.configuration`` object, and the :minio-docs:`KES Configuration reference</kes/tutorials/configuration>`.
Temporarily forward traffic between the local host machine and the MinIO Operator Console and retrieve the JWT token for your Operator deployment.
For instructions, see :ref:`Configure access to the Operator Console service <minio-k8s-deploy-operator-access-console>`.
You must prepare all necessary configurations associated to your external Key Management Service of choice before proceeding.
Open your browser to the temporary URL and enter the JWT Token into the login page.
You should see the :guilabel:`Tenants` page:
#. Create or Modify your Tenant YAML to set the values of ``KesConfig`` as necessary:
.. image:: /images/k8s/operator-dashboard.png
:align: center
:width: 70%
:class: no-scaled-link
:alt: MinIO Operator Console
You must modify your Tenant YAML or ``Kustomize`` templates to reflect the necessary KES configuration.
The following example is taken from the :minio-git:`MinIO Operator Kustomize examples </operator/blob/master/examples/kustomization/tenant-kes-encryption/tenant.yaml>`
Click the :guilabel:`+ Create Tenant` to start creating a MinIO Tenant.
.. code-block:: yaml
2) Complete the :guilabel:`Encryption` Section
~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
kes:
image: "" # minio/kes:2024-06-17T15-47-05Z
env: [ ]
replicas: 2
kesSecret:
name: kes-configuration
imagePullPolicy: "IfNotPresent"
Reference the :ref:`Deploy a MinIO Tenant <minio-k8s-deploy-minio-tenant>` procedure for complete documentation of other Tenant settings.
The ``kes-configuration`` secret must reference a Kubernetes Opaque Secret which contains a ``stringData`` object with the full KES configuration as ``server-config.yaml``.
The ``keystore`` field must contain the full configuration associated with your preferred Key Management System.
To enable |SSE| with a :kes-docs:`supported KMS target <#supported-kms-targets>` during Tenant deployment, select the :guilabel:`Encryption` section and toggle the switch to :guilabel:`Enabled`.
You can then select the Radio button for the chosen KMS provider to display configuration settings for that provider.
Reference :minio-git:`the Kustomize example <operator/blob/master/examples/kustomization/tenant-kes-encryption/kes-configuration-secret.yaml>` for additional guidance.
.. image:: /images/k8s/operator-create-tenant-encryption.png
:align: center
:width: 70%
:class: no-scaled-link
:alt: MinIO Operator Console - Create a Tenant - Encryption Section
#. Create or Modify your Tenant YAML to set the values of ``TenantSpec.configuration`` as necessary.
An asterisk ``*`` marks required fields.
TODO
Refer to the Configuration References section of the tutorial for your chosen :kes-docs:`supported KMS target <#supported-kms-targets>` for more information on the configuration options for your KMS.
#. Generate a New Encryption Key
Once you have completed the configuration, you can finish any remaining sections of :ref:`Tenant Deployment <minio-k8s-deploy-minio-tenant>`.
3) Generate a New Encryption Key
~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
.. include:: /includes/k8s/common-minio-kes.rst
.. include:: /includes/k8s/common-minio-kes.rst
:start-after: start-kes-generate-key-desc
:end-before: end-kes-generate-key-desc
4) Enable SSE-KMS for a Bucket
~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
#. Enable SSE-KMS for a Bucket
.. include:: /includes/k8s/common-minio-kes.rst
.. include:: /includes/k8s/common-minio-kes.rst
:start-after: start-kes-enable-sse-kms-desc
:end-before: end-kes-enable-sse-kms-desc

View File

@ -1,42 +1,37 @@
Deploy MinIO Tenant with OpenID Connect Identity Management
-----------------------------------------------------------
1. Access the Operator Console
1) Access the Operator Console
~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
Temporarily forward traffic between the local host machine and the MinIO Operator Console and retrieve the JWT token for your Operator deployment.
For instructions, see :ref:`Configure access to the Operator Console service <minio-k8s-deploy-operator-access-console>`.
Temporarily forward traffic between the local host machine and the MinIO Operator Console and retrieve the JWT token for your Operator deployment.
For instructions, see :ref:`Configure access to the Operator Console service <minio-k8s-deploy-operator-access-console>`.
Open your browser to the temporary URL and enter the JWT Token into the login page.
You should see the :guilabel:`Tenants` page:
Open your browser to the temporary URL and enter the JWT Token into the login page.
You should see the :guilabel:`Tenants` page:
.. image:: /images/k8s/operator-dashboard.png
.. image:: /images/k8s/operator-dashboard.png
:align: center
:width: 70%
:class: no-scaled-link
:alt: MinIO Operator Console
Click the :guilabel:`+ Create Tenant` to start creating a MinIO Tenant.
To deploy a new MinIO Tenant with OIDC external identity management, select the :guilabel:`+ Create Tenant` button.
If you are modifying an existing Tenant, select that Tenant from the list.
The following steps reference the necessary sections and configuration settings for existing Tenants.
TO configure an existing MinIO Tenant with OIDC external identity management select that Tenant from the displayed list.
The following steps reference the necessary sections and configuration settings for existing Tenants.
2) Complete the :guilabel:`Identity Provider` Section
~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
#. Complete the :guilabel:`Identity Provider` Section
To enable external identity management with an OIDC select the :guilabel:`Identity Provider` section.
You can then change the radio button to :guilabel:`OIDC` to display the configuration settings.
To enable external identity management with an OIDC select the :guilabel:`Identity Provider` section.
You can then change the radio button to :guilabel:`OIDC` to display the configuration settings.
.. image:: /images/k8s/operator-create-tenant-identity-provider-openid.png
.. image:: /images/k8s/operator-create-tenant-identity-provider-openid.png
:align: center
:width: 70%
:class: no-scaled-link
:alt: MinIO Operator Console - Create a Tenant - External Identity Provider Section - OpenID
An asterisk ``*`` marks required fields.
The following table provides general guidance for those fields:
An asterisk ``*`` marks required fields.
The following table provides general guidance for those fields:
.. list-table::
.. list-table::
:header-rows: 1
:widths: 40 60
:width: 100%
@ -54,20 +49,19 @@ The following table provides general guidance for those fields:
* - Claim Name
- The OIDC Claim MinIO uses for identifying the :ref:`policies <minio-policy>` to attach to the authenticated user.
Once you complete the section, you can finish any other required sections of :ref:`Tenant Deployment <minio-k8s-deploy-minio-tenant>`.
Once you complete the section, you can finish any other required sections of :ref:`Tenant Deployment <minio-k8s-deploy-minio-tenant>`.
3) Assign Policies to OIDC Users
~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
#. Assign Policies to OIDC Users
MinIO by default assigns no :ref:`policies <minio-policy>` to OIDC users.
MinIO uses the specified user Claim to identify one or more policies to attach to the authenticated user.
If the Claim is empty or specifies policies which do not exist on the deployment, the authenticated user has no permissions on the Tenant.
MinIO by default assigns no :ref:`policies <minio-policy>` to OIDC users.
MinIO uses the specified user Claim to identify one or more policies to attach to the authenticated user.
If the Claim is empty or specifies policies which do not exist on the deployment, the authenticated user has no permissions on the Tenant.
The following example assumes an existing :ref:`alias <alias>` configured for the MinIO Tenant.
The following example assumes an existing :ref:`alias <alias>` configured for the MinIO Tenant.
Consider the following example policy that grants general S3 API access on only the ``data`` bucket:
Consider the following example policy that grants general S3 API access on only the ``data`` bucket:
.. code-block:: json
.. code-block:: json
:class: copyable
{
@ -86,30 +80,29 @@ Consider the following example policy that grants general S3 API access on only
]
}
Use the :mc:`mc admin policy create` command to create a policy for use by an OIDC user:
Use the :mc:`mc admin policy create` command to create a policy for use by an OIDC user:
.. code-block:: shell
.. code-block:: shell
:class: copyable
mc admin policy create minio-tenant datareadonly /path/to/datareadonly.json
MinIO attaches the ``datareadonly`` policy to any authenticated OIDC user with ``datareadonly`` included in the configured claim.
MinIO attaches the ``datareadonly`` policy to any authenticated OIDC user with ``datareadonly`` included in the configured claim.
See :ref:`minio-external-identity-management-openid-access-control` for more information on access control with OIDC users and groups.
See :ref:`minio-external-identity-management-openid-access-control` for more information on access control with OIDC users and groups.
4) Generate S3-Compatible Temporary Credentials using OIDC Credentials
~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
#. Generate S3-Compatible Temporary Credentials using OIDC Credentials
Applications can generate temporary access credentials as-needed using the :ref:`minio-sts-assumerolewithwebidentity` Security Token Service (STS) API endpoint and the JSON Web Token (JWT) returned by the :abbr:`OIDC (OpenID Connect)` provider.
Applications can generate temporary access credentials as-needed using the :ref:`minio-sts-assumerolewithwebidentity` Security Token Service (STS) API endpoint and the JSON Web Token (JWT) returned by the :abbr:`OIDC (OpenID Connect)` provider.
The application must provide a workflow for logging into the :abbr:`OIDC (OpenID Connect)` provider and retrieving the JSON Web Token (JWT) associated to the authentication session.
Defer to the provider documentation for obtaining and parsing the JWT token after successful authentication.
MinIO provides an example Go application :minio-git:`web-identity.go <minio/blob/master/docs/sts/web-identity.go>` with an example of managing this workflow.
The application must provide a workflow for logging into the :abbr:`OIDC (OpenID Connect)` provider and retrieving the JSON Web Token (JWT) associated to the authentication session.
Defer to the provider documentation for obtaining and parsing the JWT token after successful authentication.
MinIO provides an example Go application :minio-git:`web-identity.go <minio/blob/master/docs/sts/web-identity.go>` with an example of managing this workflow.
Once the application retrieves the JWT token, use the ``AssumeRoleWithWebIdentity`` endpoint to generate the temporary credentials:
Once the application retrieves the JWT token, use the ``AssumeRoleWithWebIdentity`` endpoint to generate the temporary credentials:
.. code-block:: shell
.. code-block:: shell
:class: copyable
POST https://minio.example.net?Action=AssumeRoleWithWebIdentity
@ -118,14 +111,14 @@ Once the application retrieves the JWT token, use the ``AssumeRoleWithWebIdentit
&DurationSeconds=86400
&Policy=Policy
- Replace ``minio.example.net`` with the hostname or URL of the MinIO Tenant service.
- Replace the ``TOKEN`` with the JWT token returned in the previous step.
- Replace the ``DurationSeconds`` with the duration in seconds until the temporary credentials expire. The example above specifies a period of ``86400`` seconds, or 24 hours.
- Replace the ``Policy`` with an inline URL-encoded JSON :ref:`policy <minio-policy>` that further restricts the permissions associated to the temporary credentials.
- Replace ``minio.example.net`` with the hostname or URL of the MinIO Tenant service.
- Replace the ``TOKEN`` with the JWT token returned in the previous step.
- Replace the ``DurationSeconds`` with the duration in seconds until the temporary credentials expire. The example above specifies a period of ``86400`` seconds, or 24 hours.
- Replace the ``Policy`` with an inline URL-encoded JSON :ref:`policy <minio-policy>` that further restricts the permissions associated to the temporary credentials.
Omit to use the policy associated to the OpenID user :ref:`policy claim <minio-external-identity-management-openid-access-control>`.
The API response consists of an XML document containing the access key, secret key, session token, and expiration date.
Applications can use the access key and secret key to access and perform operations on MinIO.
The API response consists of an XML document containing the access key, secret key, session token, and expiration date.
Applications can use the access key and secret key to access and perform operations on MinIO.
See the :ref:`minio-sts-assumerolewithwebidentity` for reference documentation.
See the :ref:`minio-sts-assumerolewithwebidentity` for reference documentation.

View File

@ -1,104 +1,30 @@
.. versionadded:: MinIO RELEASE.2023-04-20T17-56-55Z
Overview
--------
Starting with :minio-release:`MinIO Server RELEASE.2023-04-20T17-56-55Z <RELEASE.2023-04-20T17-56-55Z>`, you can use the File Transfer Protocol (FTP) or SSH File Transfer Protocol (SFTP) to interact with the objects on a MinIO deployment.
You must specifically enable FTP or SFTP when starting the server.
Enabling either server type does not affect other MinIO features.
This page uses the abbreviation FTP throughout, but you can use any of the supported FTP protocols described below.
Supported Protocols
~~~~~~~~~~~~~~~~~~~
When enabled, MinIO supports FTP access over the following protocols:
- SSH File Transfer Protocol (SFTP)
SFTP is defined by the Internet Engineering Task Force (IETF) as an extension of SSH 2.0.
SFTP allows file transfer over SSH for use with :ref:`Transport Layer Security (TLS) <minio-tls>` and virtual private network (VPN) applications.
Your FTP client must support SFTP.
- File Transfer Protocol over SSL/TLS (FTPS)
FTPS allows for encrypted FTP communication with TLS certificates over the standard FTP communication channel.
FTPS should not be confused with SFTP, as FTPS does not communicate over a Secure Shell (SSH).
Your FTP client must support FTPS.
- File Transfer Protocol (FTP)
Unencrypted file transfer.
MinIO does **not** recommend using unencrypted FTP for file transfer.
.. admonition:: MinIO Operator Tenants only support SFTP
:class: note
MinIO Tenants deployed with Operator only support SFTP.
For details, see `File Transfer Protocol for Tenants <https://min.io/docs/minio/kubernetes/upstream/developers/file-transfer-protocol.html?ref=docs>`__.
Supported Commands
~~~~~~~~~~~~~~~~~~
When enabled, MinIO supports the following FTP operations:
- ``get``
- ``put``
- ``ls``
- ``mkdir``
- ``rmdir``
- ``delete``
MinIO does not support either ``append`` or ``rename`` operations.
Considerations
--------------
Versioning
~~~~~~~~~~
SFTP clients can only operate on the :ref:`latest version <minio-bucket-versioning>` of an object.
Specifically:
- For read operations, MinIO only returns the latest version of the requested object(s) to the FTP client.
- For write operations, MinIO applies normal versioning behavior and creates a new object version at the specified namespace.
``delete`` and ``rmdir`` operations create ``DeleteMarker`` objects.
Authentication and Access
~~~~~~~~~~~~~~~~~~~~~~~~~
FTP access requires the same authentication as any other S3 client.
MinIO supports the following authentication providers:
- :ref:`MinIO IDP <minio-internal-idp>` users and their service accounts
- :ref:`Active Directory/LDAP <minio-external-identity-management-ad-ldap>` users and their service accounts
- :ref:`OpenID/OIDC <minio-external-identity-management-openid>` service accounts
:ref:`STS <minio-security-token-service>` credentials **cannot** access buckets or objects over FTP.
Authenticated users can access buckets and objects based on the :ref:`policies <minio-policy>` assigned to the user or parent user account.
The FTP protocol does not require any of the ``admin:*`` :ref:`permissions <minio-policy-mc-admin-actions>`.
The FTP protocols do not support any of the MinIO admin actions.
Prerequisites
-------------
- MinIO RELEASE.2023-04-20T17-56-55Z or later.
- Enable an FTP or SFTP port for the server.
- A port to use for the FTP commands and a range of ports to allow the FTP server to request to use for the data transfer.
Procedure
---------
1. Start MinIO with an FTP and/or SFTP port enabled.
.. tab-set::
.. tab-item:: FTPS
:sync: ftps
The following example starts MinIO with FTPS enabled.
.. code-block:: shell
:class: copyable
minio server http://server{1...4}/disk{1...4} \
--ftp="address=:8021" \
--ftp="passive-port-range=30000-40000" \
--ftp="tls-private-key=path/to/private.key" \
--ftp="tls-public-cert=path/to/public.crt" \
...
.. note::
Omit ``tls-private-key`` and ``tls-public-cert`` to use the MinIO default TLS keys for FTPS.
For more information, see the :ref:`TLS on MinIO documentation <minio-tls>`.
.. tab-item:: SFTP/FTP
:sync: sftp
.. code-block:: shell
:class: copyable
@ -127,118 +53,33 @@ Procedure
To connect over TLS or through SSH, you must use a client that supports the desired protocol.
Examples
--------
3. Connect to MinIO
The following examples use the `FTP CLI client <https://linux.die.net/man/1/ftp>`__ on a Linux system.
.. tab-set::
.. tab-item:: SFTP/FTP
:sync: sftp
Connect to MinIO Using FTP
~~~~~~~~~~~~~~~~~~~~~~~~~~
The following example connects to an SFTP server, and lists the contents of a bucket named ``runner``.
The following example connects to a server using ``minio`` credentials to list contents in a bucket named ``runner``
.. code-block:: shell
> ftp localhost -P 8021
Connected to localhost.
220 Welcome to MinIO FTP Server
Name (localhost:user): minio
331 User name ok, password required
Password:
230 Password ok, continue
Remote system type is UNIX.
Using binary mode to transfer files.
ftp> ls runner/
229 Entering Extended Passive Mode (|||39155|)
150 Opening ASCII mode data connection for file list
drwxrwxrwx 1 nobody nobody 0 Jan 1 00:00 chunkdocs/
drwxrwxrwx 1 nobody nobody 0 Jan 1 00:00 testdir/
...
Start MinIO with FTP over TLS (FTPS) Enabled
~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
The following example starts MinIO with FTPS enabled.
.. code-block:: shell
:class: copyable
minio server http://server{1...4}/disk{1...4} \
--ftp="address=:8021" \
--ftp="passive-port-range=30000-40000" \
--ftp="tls-private-key=path/to/private.key" \
--ftp="tls-public-cert=path/to/public.crt" \
...
.. note::
Omit ``tls-private-key`` and ``tls-public-cert`` to use the MinIO default TLS keys for FTPS.
For more information, see the :ref:`TLS on MinIO documentation <minio-tls>`.
Download an Object over FTP
~~~~~~~~~~~~~~~~~~~~~~~~~~~
This example lists items in a bucket, then downloads the contents of the bucket.
.. code-block:: console
> ftp localhost -P 8021
Connected to localhost.
220 Welcome to MinIO FTP Server
Name (localhost:user): minio
331 User name ok, password required
Password:
230 Password ok, continue
Remote system type is UNIX.
Using binary mode to transfer files.ftp> ls runner/chunkdocs/metadata
229 Entering Extended Passive Mode (|||44269|)
150 Opening ASCII mode data connection for file list
-rwxrwxrwx 1 nobody nobody 45 Apr 1 06:13 chunkdocs/metadata
226 Closing data connection, sent 75 bytes
ftp> get
(remote-file) runner/chunkdocs/metadata
(local-file) test
local: test remote: runner/chunkdocs/metadata
229 Entering Extended Passive Mode (|||37785|)
150 Data transfer starting 45 bytes
45 3.58 KiB/s
226 Closing data connection, sent 45 bytes
45 bytes received in 00:00 (3.55 KiB/s)
...
Connect to MinIO Using SFTP
~~~~~~~~~~~~~~~~~~~~~~~~~~~
The following example connects to an SFTP server, lists the contents of a bucket named ``runner``, and downloads an object.
.. code-block:: console
.. code-block:: console
> sftp -P 8022 minio@localhost
minio@localhost's password:
Connected to localhost.
sftp> ls runner/
chunkdocs testdir
sftp> get runner/chunkdocs/metadata metadata
Fetching /runner/chunkdocs/metadata to metadata
metadata 100% 226 16.6KB/s 00:00
Connect to MinIO Using SFTP with a Certificate Key File
~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
.. versionadded:: RELEASE.2024-05-07T06-41-25Z
MinIO supports mutual TLS (mTLS) certificate-based authentication on SFTP, where both the server and the client verify the authenticity of each other.
.. tab-item:: FTPS
:sync: ftps
This type of authentication requires the following:
The following uses the Linux uses the `FTP CLI client <https://linux.die.net/man/1/ftp>`__ to connect to the MinIO server using ``minio`` credentials to list contents in a bucket named ``runner``
1. Public key file for the trusted certificate authority
2. Public key file for the MinIO Server minted and signed by the trusted certificate authority
3. Public key file for the user minted and signed by the trusted certificate authority for the client connecting by SFTP and located in the user's ``.ssh`` folder (or equivalent for the operating system)
The keys must include a `principals list <https://man.openbsd.org/ssh-keygen#CERTIFICATES>`__ of the user(s) that can authenticate with the key:
.. code-block:: shell
<<<<<<< HEAD
.. code-block:: console
:class: copyable
@ -328,12 +169,73 @@ Valid suffixes are either ``=ldap`` or ``=svc``.
.. code-block:: console
> sftp -P 8022 my-ldap-user=ldap@[minio@localhost]:/bucket
=======
> ftp localhost -P 8021
Connected to localhost.
220 Welcome to MinIO FTP Server
Name (localhost:user): minio
331 User name ok, password required
Password:
230 Password ok, continue
Remote system type is UNIX.
Using binary mode to transfer files.
ftp> ls runner/
229 Entering Extended Passive Mode (|||39155|)
150 Opening ASCII mode data connection for file list
drwxrwxrwx 1 nobody nobody 0 Jan 1 00:00 chunkdocs/
drwxrwxrwx 1 nobody nobody 0 Jan 1 00:00 testdir/
...
>>>>>>> 8da23e1 (Attempting to reduce docs to single platform)
.. code-block:: console
4. Download an Object
> sftp -P 8022 my-ldap-user=svc@[minio@localhost]:/bucket
.. tab-set::
.. tab-item:: SFTP/FTP
:sync: sftp
- Replace ``my-ldap-user`` with the username to use.
- Replace ``[minio@localhost]`` with the address of the MinIO server.
This example lists items in a bucket, then downloads the contents of the bucket.
.. code-block:: console
> sftp -P 8022 minio@localhost
minio@localhost's password:
Connected to localhost.
sftp> ls runner/
chunkdocs testdir
sftp> get runner/chunkdocs/metadata metadata
Fetching /runner/chunkdocs/metadata to metadata
metadata 100% 226 16.6KB/s 00:00
sftp>
.. tab-item:: FTPS
:sync: ftps
This example lists items in a bucket, then downloads the contents of the bucket.
.. code-block:: console
> ftp localhost -P 8021
Connected to localhost.
220 Welcome to MinIO FTP Server
Name (localhost:user): minio
331 User name ok, password required
Password:
230 Password ok, continue
Remote system type is UNIX.
Using binary mode to transfer files.ftp> ls runner/chunkdocs/metadata
229 Entering Extended Passive Mode (|||44269|)
150 Opening ASCII mode data connection for file list
-rwxrwxrwx 1 nobody nobody 45 Apr 1 06:13 chunkdocs/metadata
226 Closing data connection, sent 75 bytes
ftp> get
(remote-file) runner/chunkdocs/metadata
(local-file) test
local: test remote: runner/chunkdocs/metadata
229 Entering Extended Passive Mode (|||37785|)
150 Data transfer starting 45 bytes
45 3.58 KiB/s
226 Closing data connection, sent 45 bytes
45 bytes received in 00:00 (3.55 KiB/s)
...

View File

@ -1,51 +1,28 @@
Procedure
---------
#. Generate a KES API Key for use by MinIO
This procedure provides instructions for configuring and enabling Server-Side Encryption using your selected `supported KMS solution <https://min.io/docs/kes/#supported-kms-targets>`__ in production environments.
Specifically, this procedure assumes the following:
Use the :kes-docs:`kes identity new <cli/kes-identity/new>` command to generate a new API key for use by the MinIO Server:
- An existing production-grade KMS target
- One or more KES servers connected to the KMS target
- One or more hosts for a new or existing MinIO deployment
Prerequisite
~~~~~~~~~~~~
Depending on your chosen :kes-docs:`supported KMS target <#supported-kms-targets>` configuration, you may need to pass the ``kes-server.cert`` as a trusted Certificate Authority (CA).
Defer to the client documentation for instructions on trusting a third-party CA.
1) Generate a KES API Key for use by MinIO
~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
Starting with KES version :minio-git:`2023-02-15T14-54-37Z <kes/releases/tag/2023-02-15T14-54-37Z>`, you can generate an API key to use for authenticating to the KES server.
Use the :kes-docs:`kes identity new <cli/kes-identity/new>` command to generate a new API key for use by the MinIO Server:
.. code-block:: shell
.. code-block:: shell
:class: copyable
kes identity new
The output includes both the API Key for use with MinIO and the Identity hash for use with the :kes-docs:`KES Policy configuration <tutorials/configuration/#policy-configuration>`.
The output includes both the API Key for use with MinIO and the Identity hash for use with the :kes-docs:`KES Policy configuration <tutorials/configuration/#policy-configuration>`.
2) Create the MinIO Configurations
~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
#. Configure the MinIO Environment File
Configure the MinIO Environment File
Create or modify the MinIO Server environment file for all hosts in the target deployment to include the following environment variables:
Create or modify the MinIO Server environment file for all hosts in the target deployment to include the following environment variables:
.. include:: /includes/common/common-minio-kes.rst
.. include:: /includes/common/common-minio-kes.rst
:start-after: start-kes-configuration-minio-desc
:end-before: end-kes-configuration-minio-desc
MinIO defaults to expecting this file at ``/etc/default/minio``.
If you modified your deployment to use a different location for the environment file, modify the file at that location.
MinIO defaults to expecting this file at ``/etc/default/minio``.
If you modified your deployment to use a different location for the environment file, modify the file at that location.
3) Start MinIO
~~~~~~~~~~~~~~
#. Start MinIO
.. admonition:: KES Operations Requires Unsealed Vault
.. admonition:: KES Operations Requires Unsealed Vault
:class: important
Depending on your selected KMS solution, you may need to unseal the key instance to allow normal cryptographic operations, including key creation or retrieval.
@ -56,24 +33,21 @@ If you modified your deployment to use a different location for the environment
You must start KES *before* starting MinIO.
The MinIO deployment requires access to KES as part of its startup.
This step uses ``systemd`` for starting and managing the MinIO server processes:
You can use the :mc:`mc admin service restart` command to restart MinIO:
Start the MinIO Server
.. code-block:: shell
:class: copyable
.. include:: /includes/linux/common-minio-kes.rst
:start-after: start-kes-minio-start-service-desc
:end-before: end-kes-minio-start-service-desc
mc admin service restart ALIAS
4) Generate a New Encryption Key
~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
#. Generate a New Encryption Key
.. include:: /includes/common/common-minio-kes.rst
.. include:: /includes/common/common-minio-kes.rst
:start-after: start-kes-generate-key-desc
:end-before: end-kes-generate-key-desc
5) Enable SSE-KMS for a Bucket
~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
#. Enable SSE-KMS for a Bucket
.. include:: /includes/common/common-minio-kes.rst
.. include:: /includes/common/common-minio-kes.rst
:start-after: start-kes-enable-sse-kms-desc
:end-before: end-kes-enable-sse-kms-desc

View File

@ -1,77 +0,0 @@
.. start-install-minio-binary-desc
.. tab-set::
.. tab-item:: Homebrew
Open a Terminal and run the following command to install the latest stable MinIO package using `Homebrew <https://brew.sh>`_.
.. code-block:: shell
:class: copyable
brew install minio/stable/minio
.. important::
If you previously installed the MinIO server using ``brew install minio``, then we recommend that you reinstall from ``minio/stable/minio`` instead.
.. code-block:: shell
:class: copyable
brew uninstall minio
brew install minio/stable/minio
.. tab-item:: Binary - arm64
Open a Terminal, then use the following commands to download the latest stable MinIO binary, set it to executable, and install it to the system ``$PATH``:
.. code-block:: shell
:class: copyable
curl -O https://dl.min.io/server/minio/release/darwin-arm64/minio
chmod +x ./minio
sudo mv ./minio /usr/local/bin/
.. tab-item:: Binary - amd64
Open a Terminal, then use the following commands to download the latest stable MinIO binary, set it to executable, and install it to the system ``$PATH``:
.. code-block:: shell
:class: copyable
curl -O https://dl.min.io/server/minio/release/darwin-amd64/minio
chmod +x ./minio
sudo mv ./minio /usr/local/bin/
.. end-install-minio-binary-desc
.. start-run-minio-binary-desc
From the Terminal, use the :mc:`minio server` to start a local MinIO instance in the ``~/data`` folder.
If desired, you can replace ``~/data`` with another location to which the user has read, write, and delete access for the MinIO instance.
.. code-block:: shell
:class: copyable
export MINIO_CONFIG_ENV_FILE=/etc/default/minio
minio server --console-address :9001
.. code-block:: shell
Status: 1 Online, 0 Offline.
API: http://192.168.2.100:9000 http://127.0.0.1:9000
RootUser: myminioadmin
RootPass: minio-secret-key-change-me
Console: http://192.168.2.100:9001 http://127.0.0.1:9001
RootUser: myminioadmin
RootPass: minio-secret-key-change-me
Command-line: https://min.io/docs/minio/linux/reference/minio-mc.html
$ mc alias set myminio http://10.0.2.100:9000 myminioadmin minio-secret-key-change-me
Documentation: https://min.io/docs/minio/linux/index.html
The ``API`` block lists the network interfaces and port on which clients can access the MinIO S3 API.
The ``Console`` block lists the network interfaces and port on which clients can access the MinIO Web Console.
.. end-run-minio-binary-desc

View File

@ -1,34 +0,0 @@
1) Download the MinIO Server
~~~~~~~~~~~~~~~~~~~~~~~~~~~~
.. include:: /includes/macos/common-installation.rst
:start-after: start-install-minio-binary-desc
:end-before: end-install-minio-binary-desc
2) Create the Environment Variable File
~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
.. include:: /includes/common/common-deploy.rst
:start-after: start-common-deploy-create-environment-file-multi-drive
:end-before: end-common-deploy-create-environment-file-multi-drive
.. include:: /includes/common/common-deploy.rst
:start-after: start-common-deploy-create-unique-root-credentials
:end-before: end-common-deploy-create-unique-root-credentials
3) Start the MinIO Deployment
~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
Issue the following command on the local host to start the MinIO |SNSD| deployment as a foreground process.
You must keep the shell or terminal session open to keep the process running.
.. include:: /includes/macos/common-installation.rst
:start-after: start-run-minio-binary-desc
:end-before: end-run-minio-binary-desc
4) Connect to the MinIO Deployment
~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
.. include:: /includes/common/common-deploy.rst
:start-after: start-common-deploy-connect-to-minio-deployment
:end-before: end-common-deploy-connect-to-minio-deployment

View File

@ -1,34 +0,0 @@
1) Download the MinIO Server
~~~~~~~~~~~~~~~~~~~~~~~~~~~~
.. include:: /includes/macos/common-installation.rst
:start-after: start-install-minio-binary-desc
:end-before: end-install-minio-binary-desc
2) Create the Environment Variable File
~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
.. include:: /includes/common/common-deploy.rst
:start-after: start-common-deploy-create-environment-file-single-drive
:end-before: end-common-deploy-create-environment-file-single-drive
.. include:: /includes/common/common-deploy.rst
:start-after: start-common-deploy-create-unique-root-credentials
:end-before: end-common-deploy-create-unique-root-credentials
3) Start the MinIO Deployment
~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
Issue the following command on the local host to start the MinIO |SNSD| deployment as a foreground process.
You must keep the shell or terminal session open to keep the process running.
.. include:: /includes/macos/common-installation.rst
:start-after: start-run-minio-binary-desc
:end-before: end-run-minio-binary-desc
4) Connect to the MinIO Deployment
~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
.. include:: /includes/common/common-deploy.rst
:start-after: start-common-deploy-connect-to-minio-deployment
:end-before: end-common-deploy-connect-to-minio-deployment

View File

@ -1,78 +0,0 @@
MinIO uses an update-then-restart methodology for upgrading a deployment to a newer release:
1. Update the MinIO binary with the newer release.
2. Restart the deployment using :mc-cmd:`mc admin service restart`.
This procedure does not require taking downtime and is non-disruptive to ongoing operations.
This page documents methods for upgrading using the update-then-restart method for both ``systemctl`` and user-managed MinIO deployments.
Deployments using Ansible, Terraform, or other management tools can use the procedures here as guidance for implementation within the existing automation framework.
Considerations
--------------
Upgrades Are Non-Disruptive
~~~~~~~~~~~~~~~~~~~~~~~~~~~
MinIO's upgrade-then-restart procedure does *not* require taking downtime or scheduling a maintenance period.
MinIO restarts are fast, such that restarting all server processes in parallel typically completes in a few seconds.
MinIO operations are atomic and strictly consistent, such that applications using MinIO or S3 SDKs can rely on the built-in :aws-docs:`transparent retry <general/latest/gr/api-retries.html>` without further client-side logic.
This ensures upgrades are non-disruptive to ongoing operations.
"Rolling" or serial "one-at-a-time" upgrade methods do not provide any advantage over the recommended "parallel" procedure, and can introduce unnecessary complexity to the upgrade procedure.
For virtualized environments which *require* rolling updates, you should modify the recommended procedure as follows:
1. Update the MinIO Binary in the virtual machine or container one at a time.
2. Restart the MinIO deployment using :mc-cmd:`mc admin service restart`.
3. Update the virtual machine/container configuration to use the matching newer MinIO image.
4. Perform the rolling restart of each machine/container with the updated image.
Check Release Notes
~~~~~~~~~~~~~~~~~~~
MinIO publishes :minio-git:`Release Notes <minio/releases>` for your reference as part of identifying the changes applied in each release.
Review the associated release notes between your current MinIO version and the newer release so you have a complete view of any changes.
Pay particular attention to any releases that are *not* backwards compatible.
You cannot trivially downgrade from any such release.
Update Using Homebrew
---------------------
For Homebrew installations, you can use homebrew to update the cask:
.. code-block:: shell
:class: copyable
brew upgrade minio/stable/minio
Restart the MinIO process to complete the update.
Update using Binary Replacement
-------------------------------
.. tab-set::
.. tab-item:: Binary - arm64
Open a Terminal, then use the following commands to download the latest stable MinIO binary, set it to executable, and install it to the system ``$PATH``:
.. code-block:: shell
:class: copyable
curl -O https://dl.min.io/server/minio/release/darwin-arm64/minio
chmod +x ./minio
sudo mv ./minio /usr/local/bin/
.. tab-item:: Binary - amd64
Open a Terminal, then use the following commands to download the latest stable MinIO binary, set it to executable, and install it to the system ``$PATH``:
.. code-block:: shell
:class: copyable
curl -O https://dl.min.io/server/minio/release/darwin-amd64/minio
chmod +x ./minio
sudo mv ./minio /usr/local/bin/
Restart the MinIO process to complete the update.

View File

@ -1,63 +0,0 @@
Deploy MinIO and KES with Server-Side Encryption
------------------------------------------------
Prior to starting these steps, create the following folders:
.. code-block:: powershell
:class: copyable
:substitutions:
New-Item -Path "|kescertpath|" -ItemType "directory"
New-Item -Path "|kesconfigpath|" -ItemType "directory"
New-Item -Path "|miniodatapath|" -ItemType "directory"
Prerequisite
~~~~~~~~~~~~
Depending on your chosen :kes-docs:`supported KMS target <#supported-kms-targets>` configuration, you may need to pass the ``kes-server.cert`` as a trusted Certificate Authority (CA).
Defer to the client documentation for instructions on trusting a third-party CA.
1) Create the MinIO Configurations
~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
Create the MinIO Environment File
Create the environment file using your preferred text editor.
The following example uses the Windows Notepad program:
.. code-block:: powershell
:substitutions:
notepad |minioconfigpath|\minio
.. include:: /includes/windows/common-minio-kes.rst
:start-after: start-kes-configuration-minio-desc
:end-before: end-kes-configuration-minio-desc
2) Start the MinIO Server
~~~~~~~~~~~~~~~~~~~~~~~~~
.. note::
You **must** start KES *before* starting MinIO.
The MinIO deployment requires access to KES as part of its startup.
Start the MinIO Server
.. include:: /includes/windows/common-minio-kes.rst
:start-after: start-kes-minio-start-server-desc
:end-before: end-kes-minio-start-server-desc
3) Generate a New Encryption Key
~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
.. include:: /includes/windows/common-minio-kes.rst
:start-after: start-kes-generate-key-desc
:end-before: end-kes-generate-key-desc
4) Enable SSE-KMS for a Bucket
~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
.. include:: /includes/common/common-minio-kes.rst
:start-after: start-kes-enable-sse-kms-desc
:end-before: end-kes-enable-sse-kms-desc

View File

@ -1,53 +0,0 @@
1) Download the MinIO Server
~~~~~~~~~~~~~~~~~~~~~~~~~~~~
Download the MinIO executable from the following URL:
.. code-block:: shell
:class: copyable
https://dl.min.io/server/minio/release/windows-amd64/minio.exe
The next step includes instructions for running the executable.
You cannot run the executable from the Explorer or by double clicking the file.
Instead, you call the executable to launch the server.
2) Prepare the Data Path for MinIO
~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
Ensure the data path is empty and contains no existing files, including any hidden or Windows system files.
If specifying a drive not dedicated for use by MinIO, consider creating a dedicated folder for storing MinIO data such as ``D:/Minio``.
3) Start the MinIO Server
~~~~~~~~~~~~~~~~~~~~~~~~~~
Open the Command Prompt or PowerShell and issue the following command to start the MinIO |SNSD| deployment in that session:
.. code-block:: shell
:class: copyable
minio.exe server D:/minio --console-address ":9001"
The output should resemble the following:
.. code-block:: shell
Status: 1 Online, 0 Offline.
API: http://192.168.2.100:9000 http://127.0.0.1:9000
Console: http://192.168.2.100:9001 http://127.0.0.1:9001
Command-line: https://min.io/docs/minio/linux/reference/minio-mc.html
$ mc alias set myminio http://10.0.2.100:9000 minioadmin minioadmin
Documentation: https://min.io/docs/minio/linux/index.html
The ``API`` block lists the network interfaces and port on which clients can access the MinIO S3 API.
The ``Console`` block lists the network interfaces and port on which clients can access the MinIO Web Console.
4) Connect to the MinIO Server
~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
.. include:: /includes/common/common-deploy.rst
:start-after: start-common-deploy-connect-to-minio-deployment
:end-before: end-common-deploy-connect-to-minio-deployment

View File

@ -1,6 +1,6 @@
================================================================================
MinIO Object Storage for |platform|
================================================================================
=====================================
MinIO High Performance Object Storage
=====================================
.. default-domain:: minio
@ -52,31 +52,37 @@ MinIO Object Storage for |platform|
:local:
:depth: 2
MinIO is an object storage solution that provides an Amazon Web Services S3-compatible API and supports all core S3 features.
MinIO is built to deploy anywhere - public or private cloud, baremetal infrastructure, orchestrated environments, and edge infrastructure.
MinIO is a Kubernetes-native S3-compatible object storage solution designed to deploy wherever your applications are - on premises, in the private cloud, in the public cloud, and edge infrastructure.
MinIO is designed to support modern application workload patterns where high performance distributed computing meets petabyte-scale storage requirements.
.. cond:: linux
MinIO is available under two server editions, each with their own distinct license:
This site documents Operations, Administration, and Development of MinIO deployments on Linux platforms for the latest stable version of MinIO: |minio-tag|.
.. grid:: 2
.. cond:: windows
.. grid-item-card:: MinIO Object Store (MinIO)
This site documents Operations, Administration, and Development of MinIO deployments on Windows platforms for the latest stable version of MinIO: |minio-tag|.
MinIO Object Store (MinIO) is licensed under `GNU Affero General Public License v3.0 <https://www.gnu.org/licenses/agpl-3.0.en.html?ref=docs>`__.
.. cond:: macos
MinIO features are available to the community as a stream of active development.
This site documents Operations, Administration, and Development of MinIO deployments on macOS platforms for the latest stable version of MinIO: |minio-tag|.
MinIO is community-focused, with best-effort support through the MinIO Community Slack Channel and the MinIO Github repository.
.. cond:: container
.. grid-item-card:: MinIO Enterprise Object Store (MinEOS)
This site documents Operations, Administration, and Development of MinIO deployments on Containers for the latest stable version of MinIO: |minio-tag|.
MinIO Enterprise Object Store (MinEOS) is licensed under the `MinIO Commercial License <https://min.io/pricing?jmp=docs>`__.
.. cond:: k8s and not (openshift or eks or gke or aks)
MinEOS is available to |SUBNET| Enterprise-Lite and Enterprise-Plus customers and includes exclusive support for the :minio-blog:`Enterprise Object Store feature suite <enterprise-object-store-overview/>`.
This site documents Operations, Administration, and Development of MinIO deployments on Kubernetes platform for the latest stable version of the MinIO Operator: |operator-version-stable|.
MinEOS include |SUBNET| access for 24/7 L1 support from MinIO Engineering, with 4 or 1 hour SLAs available based on deployment size.
.. cond:: openshift
This site documents Operations, Administration, and Development of MinIO deployments on supported platforms for |minio-tag|.
MinIO Enterprise Object Storage (MinEOS) deployments can use this documentation as a baseline of features available in a current or upcoming release.
.. todo: More marketing/SEO below?
MinIO officially supports the following platforms:
<<<<<<< HEAD
This site documents Operations, Administration, and Development of MinIO deployments on Red Hat Kubernetes distributions for the latest stable version of the MinIO Operator: |operator-version-stable|.
.. important::
@ -84,193 +90,116 @@ MinIO is built to deploy anywhere - public or private cloud, baremetal infrastru
Support for deploying the MinIO Operator via the RedHat Marketplace or OperatorHub was removed in 2024.
MinIO AIStor fully supports installation via the Marketplace and OperatorHub onto enterprise RedHat Kubernetes distributions like OpenShift Container Platform (OCP).
|subnet| customers can open an issue for further clarification and instructions on migrating to `AIStor <https://min.io/product/aistor-overview?jmp=docs>`__.
=======
- :ref:`Kubernetes (Upstream) <deploy-minio-kubernetes>`
- :ref:`RedHat Openshift <deploy-operator-openshift>`
- :ref:`SUSE Rancher <deploy-operator-rancher>`
- :ref:`Elastic Kubernetes Service <deploy-operator-eks>`
- :ref:`Google Kubernetes Engine <deploy-operator-gke>`
- :ref:`Azure Kubernetes Service <deploy-operator-aks>`
- :ref:`Red Hat Enterprise Linux <deploy-minio-rhel>`
- :ref:`Ubuntu Linux <deploy-minio-ubuntu>`
- :ref:`MacOS <deploy-minio-macos>`
- :ref:`Container <deploy-minio-container>`
- :ref:`Windows <deploy-minio-windows>`
>>>>>>> 8da23e1 (Attempting to reduce docs to single platform)
.. cond:: eks
Quickstart
----------
This site documents Operations, Administration, and Development of MinIO deployments on `Amazon Elastic Kubernetes Service <https://aws.amazon.com/eks/>`__ for the latest stable version of the MinIO Operator: |operator-version-stable|.
.. tab-set::
.. cond:: gke
.. tab-item:: Sandbox
This site documents Operations, Administration, and Development of MinIO deployments on `Google Kubernetes Engine <https://cloud.google.com/kubernetes-engine>`__ for the latest stable version of the MinIO Operator: |operator-version-stable|.
MinIO maintains a sandbox instance of the community server at https://play.min.io.
You can use this instance for experimenting or evaluating the MinIO product on your local system.
.. cond:: aks
Follow the :mc:`mc` CLI :ref:`installation guide <mc-install>` to install the utility on your local host.
This site documents Operations, Administration, and Development of MinIO deployments on `Azure Kubernetes Engine <https://azure.microsoft.com/en-us/products/kubernetes-service/#overview>`__ for the latest stable version of the MinIO Operator: |operator-version-stable|.
:mc:`mc` includes a pre-configured ``play`` alias for connecting to the sandbox.
For example, you can use the following commands to create a bucket and copy objects to ``play``:
.. cond:: not (eks or aks or gke)
.. code-block:: shell
:class: copyable
MinIO is released under dual license `GNU Affero General Public License v3.0 <https://www.gnu.org/licenses/agpl-3.0.en.html?ref=docs>`__ and `MinIO Commercial License <https://min.io/pricing?jmp=docs>`__.
Deployments registered through |SUBNET| use the commercial license and include access to 24/7 MinIO support.
mc mb play/mynewbucket
.. cond:: eks
mc cp /path/to/file play/mynewbucket/prefix/filename.extension
MinIO is released under dual license `GNU Affero General Public License v3.0 <https://www.gnu.org/licenses/agpl-3.0.en.html?ref=docs>`__ and `MinIO Commercial License <https://min.io/pricing?jmp=docs>`__.
Deploying MinIO through the :minio-web:`AWS Marketplace <product/multicloud-elastic-kubernetes-service>` includes the commercial license and access to 24/7 MinIO support through |SUBNET|.
mc stat play/mynewbucket/prefix/filename.extension
.. cond:: gke
.. important::
MinIO is released under dual license `GNU Affero General Public License v3.0 <https://www.gnu.org/licenses/agpl-3.0.en.html?ref=docs>`__ and `MinIO Commercial License <https://min.io/pricing?jmp=docs>`__.
Deploying MinIO through the :minio-web:`GKE Marketplace <product/multicloud-google-kubernetes-service>` includes the commercial license and access to 24/7 MinIO support through |SUBNET|.
MinIO's Play sandbox is an ephemeral public-facing deployment with well-known access credentials.
Any private, confidential, internal, secured, or other important data uploaded to Play is effectively made public.
Exercise caution and discretion in any data you upload to Play.
.. cond:: aks
.. tab-item:: Baremetal
MinIO is released under dual license `GNU Affero General Public License v3.0 <https://www.gnu.org/licenses/agpl-3.0.en.html?ref=docs>`__ and `MinIO Commercial License <https://min.io/pricing?jmp=docs>`__.
Deploying MinIO through the :minio-web:`AKS Marketplace <product/multicloud-azure-kubernetes-service>` includes the commercial license and access to 24/7 MinIO support through |SUBNET|.
1. Download the MinIO Server Process for your Operating System
You can get started exploring MinIO features using the :ref:`minio-console` and our ``play`` server at https://play.min.io.
``play`` is a *public* MinIO cluster running the latest stable MinIO server.
Any file uploaded to ``play`` should be considered public and non-protected.
For more about connecting to ``play``, see :ref:`MinIO Console play Login <minio-console-play-login>`.
Follow the instructions on the `MinIO Download Page <https://min.io/downloads?ref=docs>` for your operating system to download and install the :mc:`minio server` process.
.. cond:: linux
2. Create a folder for use with MinIO
.. include:: /includes/linux/quickstart.rst
For example, create a folder ``~/minio`` in Linux/MacOS or ``C:\minio`` in Windows.
.. cond:: macos
3. Start the MinIO Server
.. include:: /includes/macos/quickstart.rst
Run the :mc:`minio server` specifying the path to the directory and the :mc:`~minio server --console-address` parameter to set a static console listen path:
.. cond:: windows
.. code-block:: shell
:class: copyable
.. include:: /includes/windows/quickstart.rst
minio server ~/minio --console-address :9001
# For windows, use minio.exe server ~/minio --console-address :9001`
.. cond:: k8s
The output includes connection instructions for both :mc:`mc` and connecting to the Console using your browser.
.. include:: /includes/k8s/quickstart.rst
.. tab-item:: Kubernetes
.. cond:: container
Download `minio-dev.yaml <https://raw.githubusercontent.com/minio/docs/master/source/extra/examples/minio-dev.yaml>`__ to your host machine:
.. include:: /includes/container/quickstart.rst
.. code-block:: shell
:class: copyable
.. cond:: k8s
curl https://raw.githubusercontent.com/minio/docs/master/source/extra/examples/minio-dev.yaml -O
.. toctree::
:titlesonly:
:hidden:
The file describes two Kubernetes resources:
/operations/installation
/operations/install-deploy-manage/upgrade-minio-operator
/operations/deploy-manage-tenants
/operations/concepts
/operations/monitoring
/operations/external-iam
/operations/server-side-encryption
/operations/network-encryption
/operations/cert-manager
/operations/checklists
/operations/data-recovery
/operations/troubleshooting
/administration/minio-console
/administration/object-management
/administration/monitoring
/administration/identity-access-management
/administration/server-side-encryption
/administration/bucket-replication
/administration/batch-framework
/administration/concepts
- A new namespace ``minio-dev``, and
- A MinIO pod using a drive or volume on the Worker Node for serving data
.. cond:: windows
.. toctree::
:titlesonly:
:hidden:
/operations/installation
/operations/concepts
/operations/monitoring
/operations/external-iam
/operations/server-side-encryption
/operations/network-encryption
/operations/checklists
/operations/data-recovery
/operations/troubleshooting
/administration/minio-console
/administration/object-management
/administration/monitoring
/administration/identity-access-management
/administration/server-side-encryption
/administration/bucket-replication
/administration/batch-framework
/administration/concepts
.. cond:: linux or macos or container
.. toctree::
:titlesonly:
:hidden:
/operations/installation
/operations/manage-existing-deployments
/operations/concepts
/operations/monitoring
/operations/external-iam
/operations/server-side-encryption
/operations/network-encryption
/operations/checklists
/operations/data-recovery
/operations/troubleshooting
/administration/minio-console
/administration/object-management
/administration/monitoring
/administration/identity-access-management
/administration/server-side-encryption
/administration/bucket-replication
/administration/batch-framework
/administration/concepts
.. cond:: not (linux or k8s)
.. toctree::
:titlesonly:
:hidden:
Software Development Kits (SDK) <https://min.io/docs/minio/linux/developers/minio-drivers.html?ref=docs>
Security Token Service (STS) <https://min.io/docs/minio/linux/developers/security-token-service.html?ref=docs>
Object Lambda <https://min.io/docs/minio/linux/developers/transforms-with-object-lambda.html?ref=docs>
File Transfer Protocol <https://min.io/docs/minio/linux/developers/file-transfer-protocol.html?ref=docs>
MinIO Client <https://min.io/docs/minio/linux/reference/minio-mc.html?ref=docs>
MinIO Admin Client <https://min.io/docs/minio/linux/reference/minio-mc-admin.html?ref=docs>
S3 API Compatibility <https://min.io/docs/minio/linux/reference/s3-api-compatibility.html?ref=docs>
Integrations <https://min.io/docs/minio/linux/integrations/integrations.html?ref=docs>
.. cond:: linux
.. toctree::
:titlesonly:
:hidden:
/developers/minio-drivers
/developers/security-token-service
/developers/transforms-with-object-lambda
/developers/file-transfer-protocol
/reference/minio-mc
/reference/minio-mc-admin
/reference/minio-mc-deprecated
/reference/minio-server/minio-server
/reference/s3-api-compatibility
/integrations/integrations
.. cond:: k8s
.. toctree::
:titlesonly:
:hidden:
Software Development Kits (SDK) <https://min.io/docs/minio/linux/developers/minio-drivers.html?ref=docs>
/developers/sts-for-operator
Object Lambda <https://min.io/docs/minio/linux/developers/transforms-with-object-lambda.html?ref=docs>
/developers/file-transfer-protocol
MinIO Client <https://min.io/docs/minio/linux/reference/minio-mc.html?ref=docs>
MinIO Admin Client <https://min.io/docs/minio/linux/reference/minio-mc-admin.html?ref=docs>
S3 API Compatibility <https://min.io/docs/minio/linux/reference/s3-api-compatibility.html?ref=docs>
Integrations <https://min.io/docs/minio/linux/integrations/integrations.html?ref=docs>
/reference/operator-crd
/reference/operator-chart-values
/reference/tenant-chart-values
/reference/operator-environment-variables
Use ``kubectl port-forward`` to access the Pod, or create a service for the pod for which you can configure Ingress, Load Balancing, or similar Kubernetes-level networking.
.. toctree::
:titlesonly:
:hidden:
/operations/deployments/installation
/operations/replication/multi-site-replication
/operations/concepts
/operations/monitoring
/operations/external-iam
/operations/server-side-encryption
/operations/network-encryption
/operations/checklists
/operations/data-recovery
/operations/troubleshooting
/administration/minio-console
/administration/object-management
/administration/monitoring
/administration/identity-access-management
/administration/server-side-encryption
/administration/bucket-replication
/administration/batch-framework
/administration/concepts
/developers/minio-drivers
/developers/security-token-service
/developers/transforms-with-object-lambda
/developers/file-transfer-protocol
/reference/kubernetes
/reference/baremetal
/reference/s3-api-compatibility
/glossary
/integrations/integrations

View File

@ -43,13 +43,18 @@ Workloads that benefit from storing aged data on lower-cost hardware should inst
See our `Reference Hardware <https://min.io/product/reference-hardware#hardware?ref-docs>`__ page for a curated selection of servers and storage components from our hardware partners.
.. cond:: k8s
.. tab-set::
:class: parent
.. tab-item:: Kubernetes
:sync: k8s
.. include:: /includes/common/common-checklist.rst
:start-after: start-k8s-hardware-checklist
:end-before: end-k8s-hardware-checklist
.. cond:: not k8s
.. tab-item:: Baremetal
:sync: baremetal
.. include:: /includes/common/common-checklist.rst
:start-after: start-linux-hardware-checklist
@ -202,7 +207,14 @@ Storage
:start-after: start-exclusive-drive-access
:end-before: end-exclusive-drive-access
.. cond:: k8s
Recommended Storage Mediums
+++++++++++++++++++++++++++
.. tab-set::
:class: hidden
.. tab-item:: Kubernetes
:sync: k8s
MinIO recommends provisioning a storage class for each MinIO Tenant that meets the performance objectives for that tenant.
@ -214,30 +226,65 @@ Storage
MinIO limits the maximum usable size per PV to the smallest PV in the pool.
For example, if a pool has 15 10TB PVs and 1 1TB PV, MinIO limits the per-PV capacity to 1TB.
.. cond:: not k8s
Recommended Storage Mediums
+++++++++++++++++++++++++++
.. tab-item:: Baremetal
:sync: baremetal
MinIO recommends using flash-based storage (NVMe or SSD) for all workload types and scales.
Workloads that require high performance should prefer NVMe over SSD.
MinIO deployments using HDD-based storage are best suited as cold-tier targets for :ref:`Object Transition ("Tiering") <minio-lifecycle-management-tiering>` of aged data.
MinIO does not recommends HDD storage for production environments.
HDD storage typically does not provide the necessary performance to meet the expectations of modern workloads, and any cost efficiencies at scale are offset by the performance constraints of the medium.
Use Direct-Attached "Local" Storage (DAS)
+++++++++++++++++++++++++++++++++++++++++
Prefer Direct-Attached "Local" Storage (DAS)
++++++++++++++++++++++++++++++++++++++++++++
:abbr:`DAS (Direct-Attached Storage)`, such as locally-attached JBOD (Just a Bunch of Disks) arrays, provide significant performance and consistency advantages over networked (NAS, SAN, NFS) storage.
:abbr:`DAS (Direct-Attached Storage)`, such as locally-attached JBOD (Just a Bunch of Disks) arrays, provide significant performance and consistency advantages over networked (NAS, SAN, NFS) storage.
.. dropdown:: Network File System Volumes Break Consistency Guarantees
.. tab-set::
:class: hidden
.. tab-item:: Kubernetes
:sync: k8s
While MinIO Tenants can make use of remote Persistent Volume (PV) resources, the cost of performing I/O over the network typically constrains overall performance.
MinIO strongly recommends using CSIs which can provision storage attached to the worker node on which Kubernetes schedules your MinIO pods, such as :minio-docs:`MinIO DirectPV <directpv>`.
For all other cases, make every effort possible to select a CSI which presents the storage to MinIO as if it were a locally-attached filesystem.
CSIs which add layers of software or translations between MinIO and the OS-level storage access APIs necessarily increase the complexity of the syste and can contribute to unexpected or undesired behavior.
.. tab-item:: Baremetal
:sync: baremetal
Configure the JBOD arrays without any RAID, pooling, or similar software-level layers, such that the storage is presented directly to MinIO.
For virtual machines or systems that require provising storage as a virtual volume, MinIO recommends using thick LUNs only.
.. dropdown:: Network File System Volumes Break Consistency Guarantees
:class-title: note
MinIO's strict **read-after-write** and **list-after-write** consistency model requires local drive filesystems.
MinIO cannot provide consistency guarantees if the underlying storage volumes are NFS or a similar network-attached storage volume.
Use XFS-Formatted Drives with Labels
++++++++++++++++++++++++++++++++++++
Use XFS-Formatted Drives with Consistent Mounting
+++++++++++++++++++++++++++++++++++++++++++++++++
.. tab-set::
.. tab-item:: Kubernetes
:sync: k8s
MinIO recommends formatting the drives underlying MinIO Persistent Volumes as ``xfs``.
If using a CSI, review the documentation for that CSI and ensure it supports specifying the ``xfs`` filesystem.
MinIO strongly recommends avoiding any CSI which formats drives as ``ext4``, ``btrfs`` or other filesystems.
MinIO expects all provisioned Persistent Volumes (PV) to be intended for its exclusive use, where the underlying storage medium guarantees access to the stored data at the assigned mount path.
Modifications to the underlying storage medium, including but not limited to external or third-party applications or the arbitrary re-mounting of locally-attached storage, may result in unexpected behavior or data loss.
.. tab-item:: Baremetal
:sync: baremetal
Format drives as XFS and present them to MinIO as a :abbr:`JBOD (Just a Bunch of Disks)` array with no RAID or other pooling configurations.
Using any other type of backing storage (SAN/NAS, ext4, RAID, LVM) typically results in a reduction in performance, reliability, predictability, and consistency.
@ -252,9 +299,6 @@ Storage
mkfs.xfs /dev/sdd -L MINIODRIVE3
mkfs.xfs /dev/sde -L MINIODRIVE4
Mount Drives using ``/etc/fstab``
+++++++++++++++++++++++++++++++++
MinIO **requires** that drives maintain their ordering at the mounted position across restarts.
MinIO **does not** support arbitrary migration of a drive with existing MinIO data to a new mount position, whether intentional or as the result of OS-level behavior.
@ -288,17 +332,29 @@ Storage
You should not use this option on systems with locally attached disks, as silencing drive errors prevents both MinIO and the OS from responding to those errors in a normal fashion.
Disable XFS Retry On Error
++++++++++++++++++++++++++
Disable XFS Retry On Error
++++++++++++++++++++++++++
MinIO **strongly recommends** disabling `retry-on-error <https://docs.kernel.org/admin-guide/xfs.html?highlight=xfs#error-handling>`__ behavior using the ``max_retries`` configuration for the following error classes:
MinIO **strongly recommends** disabling `retry-on-error <https://docs.kernel.org/admin-guide/xfs.html?highlight=xfs#error-handling>`__ behavior using the ``max_retries`` configuration for the following error classes:
- ``EIO`` Error when reading or writing
- ``ENOSPC`` Error no space left on device
- ``default`` All other errors
- ``EIO`` Error when reading or writing
- ``ENOSPC`` Error no space left on device
- ``default`` All other errors
The default ``max_retries`` setting typically directs the filesystem to retry-on-error indefinitely instead of propagating the error.
MinIO can handle XFS errors appropriately, such that the retry-on-error behavior introduces at most unnecessary latency or performance degradation.
The default ``max_retries`` setting typically directs the filesystem to retry-on-error indefinitely instead of propagating the error.
MinIO can handle XFS errors appropriately, such that the retry-on-error behavior introduces at most unnecessary latency or performance degradation.
.. tab-set::
:class: hidden
.. tab-item:: Kubernetes
:sync: k8s
Defer to the documentation for your preferred CSI or StorageClass on options for configuring filesystem-level settings.
.. tab-item:: Baremetal
:sync: baremetal
The following script iterates through all drives at the specified mount path and sets the XFS ``max_retries`` setting to ``0`` or "fail immediately on error" for the recommended error classes.
The script ignores any drives not mounted, either manually or through ``/etc/fstab``.
@ -328,16 +384,16 @@ Storage
@reboot /opt/minio/xfs-retry-settings.sh
Use Consistent Drive Type and Capacity
++++++++++++++++++++++++++++++++++++++
Use Consistent Drive Type and Capacity
++++++++++++++++++++++++++++++++++++++
Ensure a consistent drive type (NVMe, SSD, HDD) for the underlying storage in a MinIO deployment.
MinIO does not distinguish between storage types and does not support configuring "hot" or "warm" drives within a single deployment.
Mixing drive types typically results in performance degradation, as the slowest drives in the deployment become a bottleneck regardless of the capabilities of the faster drives.
Ensure a consistent drive type (NVMe, SSD, HDD) for the underlying storage in a MinIO deployment.
MinIO does not distinguish between storage types and does not support configuring "hot" or "warm" drives within a single deployment.
Mixing drive types typically results in performance degradation, as the slowest drives in the deployment become a bottleneck regardless of the capabilities of the faster drives.
Use the same capacity and type of drive across all nodes in each MinIO :ref:`server pool <minio-intro-server-pool>`.
MinIO limits the maximum usable size per drive to the smallest size in the deployment.
For example, if a deployment has 15 10TB drives and 1 1TB drive, MinIO limits the per-drive capacity to 1TB.
Use the same capacity and type of drive across all nodes in each MinIO :ref:`server pool <minio-intro-server-pool>`.
MinIO limits the maximum usable size per drive to the smallest size in the deployment.
For example, if a deployment has 15 10TB drives and 1 1TB drive, MinIO limits the per-drive capacity to 1TB.
Recommended Hardware Tests
--------------------------

View File

@ -68,20 +68,7 @@ MinIO Pre-requisites
MinIO Install
-------------
Install the MinIO server binary across all nodes, ensuring that each node uses the same version of that binary.
.. cond:: linux
See the :ref:`Multi Node Multi Drive deployment guide <minio-mnmd>` for more information.
.. cond:: container or macos or windows
See the :ref:`Single Node Single Drive deployment guide <minio-snsd>` for more information.
.. cond:: k8s
See the :ref:`Deploy MinIO Operator <minio-operator-installation>` and :ref:`Minio Tenant deployment guide <minio-k8s-deploy-minio-tenant>` for more information.
Install a matching version of MinIO across all nodes in the deployment.
Post Install Tasks
------------------

View File

@ -37,13 +37,9 @@ MinIO can deploy to three types of topologies:
#. :ref:`Multi Node Multi Drive <minio-mnmd>`, multiple MinIO servers with multiple mounted drives or volumes for data
.. cond:: linux
For Baremetal infrastructure, you can install and manage distributed MinIO deployments using Ansible, Terraform, or manual processes
For example, a production deployment using Ansible, Terraform, or manual processes
.. cond:: k8s
For example, a production deployment using Kubernetes to manage and deploy pods and their associated persistent volume claims.
For Kubernetes infrastructure, use the MinIO Operator to manage and deploy distributed MinIO Tenants.
How does a distributed MinIO deployment work?
---------------------------------------------
@ -109,13 +105,7 @@ Expansion consists of adding one or more :ref:`server pools <minio-intro-server-
Each server pool consists of dedicated nodes and storage that contribute to the overall capacity of the deployment.
Once you create a server pool you cannot change its size, but you can add or remove capacity at any time by adding or decommissioning pools.
.. cond:: linux
See :ref:`Expand a MinIO deployment <expand-minio-distributed>` for more information
.. cond:: k8s
See :ref:`Expand a MinIO Tenant <minio-k8s-expand-minio-tenant>` for more information.
See :ref:`Baremetal: Expand a MinIO deployment <expand-minio-distributed>` and :ref:`Kubernetes: Expand a MinIO Tenant <minio-k8s-expand-minio-tenant>` for more information on expansion in Baremetal and Kubernetes infrastructures respectively.
For deployments which have multiple server pools, you can :ref:`decommission <minio-decommissioning>` the older pools and migrate that data to the newer pools in the deployment.
Once started, decommissioning cannot be stopped.

View File

@ -39,7 +39,7 @@ command unmounts the drive at ``/dev/sdb``:
Remove the failed drive(s) from the node hardware and replace it with known
healthy drive(s). Replacement drives *must* meet the following requirements:
- :ref:`XFS formatted <deploy-minio-distributed-prereqs-storage>` and empty.
- :ref:`XFS formatted <minio-hardware-checklist-storage>` and empty.
- Same drive type (e.g. HDD, SSD, NVMe).
- Equal or greater performance.
- Equal or greater capacity.

View File

@ -1,50 +0,0 @@
.. _minio-installation:
===============================
Deploy and Manage MinIO Tenants
===============================
.. default-domain:: minio
.. contents:: Table of Contents
:local:
:depth: 1
The MinIO Kubernetes Operator supports deploying and managing MinIO Tenants onto your Kubernetes cluster through the Operator Console web interface.
The following tutorials provide steps for tenant management via the Operator Console and Kustomize:
.. list-table::
:stub-columns: 1
:widths: 40 60
:width: 100%
* - :ref:`minio-k8s-deploy-minio-tenant`
- Deploy a new MinIO Tenant onto the Kubernetes cluster.
* - :ref:`minio-k8s-modify-minio-tenant`
- Modify the configuration or topology settings of a MinIO Tenant.
* - :ref:`minio-k8s-upgrade-minio-tenant`
- Upgrade the MinIO Server version used by a MinIO Tenant.
* - :ref:`minio-k8s-expand-minio-tenant`
- Increase the available storage capacity of an existing MinIO Tenant.
* - :ref:`minio-k8s-delete-minio-tenant`
- Delete an existing MinIO Tenant.
* - :ref:`minio-site-replication-overview`
- Configure two or more MinIO Tenants as peers for MinIO Site Replication
.. toctree::
:titlesonly:
:hidden:
/operations/install-deploy-manage/deploy-minio-tenant
/operations/install-deploy-manage/modify-minio-tenant
/operations/install-deploy-manage/upgrade-minio-tenant
/operations/install-deploy-manage/expand-minio-tenant
/operations/install-deploy-manage/delete-minio-tenant
/operations/install-deploy-manage/multi-site-replication

View File

@ -0,0 +1,191 @@
.. _deploy-minio-container:
===========================
Deploy MinIO as a Container
===========================
.. default-domain:: minio
.. contents:: Table of Contents
:local:
:depth: 1
This page documents deploying MinIO as a Container onto any operating system that supports containerized processes.
This documentation assumes installation of Docker, Podman, or a similar runtime which supports the standard container image format.
MinIO images use `Red Hat Universal Base Image 9 Micro <https://catalog.redhat.com/software/container-stacks/detail/609560d9e2b160d361d24f98>`__.
Functionality and performance of the MinIO container may be constrained by the base OS.
The procedure includes guidance for deploying Single-Node Multi-Drive (SNMD) and Single-Node Single-Drive (SNSD) topologies in support of early development and evaluation environments.
.. important::
MinIO officially supports containerized Multi-Node Multi-Drive (MNMD) "Distributed" configurations on Kubernetes infrastructures through the MinIO Kubernetes Operator.
MinIO does not support nor provide instruction for deploying distributed clusters using Docker Swarm, Docker Compose, or any other orchestrated container environment.
Considerations
--------------
Review Checklists
~~~~~~~~~~~~~~~~~
Ensure you have reviewed our published Hardware, Software, and Security checklists before attempting this procedure.
Erasure Coding Parity
~~~~~~~~~~~~~~~~~~~~~
MinIO automatically determines the default :ref:`erasure coding <minio-erasure-coding>` configuration for the cluster based on the total number of nodes and drives in the topology.
You can configure the per-object :term:`parity` setting when you set up the cluster *or* let MinIO select the default (``EC:4`` for production-grade clusters).
Parity controls the relationship between object availability and storage on disk.
Use the MinIO `Erasure Code Calculator <https://min.io/product/erasure-code-calculator>`__ for guidance in selecting the appropriate erasure code parity level for your cluster.
While you can change erasure parity settings at any time, objects written with a given parity do **not** automatically update to the new parity settings.
Container Storage
~~~~~~~~~~~~~~~~~
This procedure assumes you mount one or more dedicated storage devices to the container to act as persistent storage for MinIO.
Data stored on ephemeral container paths is lost when the container restarts or is deleted.
Use any such paths at your own risk.
Procedure
---------
1. Start the Container
This procedure provides instructions for Podman and Docker in rootfull mode.
For rootless deployments, defer to documentation by each runtime for configuration and container startup.
For all other container runtimes, follow the documentation for that runtime and specify the equivalent options, parameters, or configurations.
.. tab-set::
.. tab-item:: Podman
The following command creates a folder in your home directory, then starts the MinIO container using Podman:
.. code-block:: shell
:class: copyable
mkdir -p ~/minio/data
podman run \
-p 9000:9000 \
-p 9001:9001 \
--name minio \
-v ~/minio/data:/data \
-e "MINIO_ROOT_USER=ROOTNAME" \
-e "MINIO_ROOT_PASSWORD=CHANGEME123" \
quay.io/minio/minio server /data --console-address ":9001"
The command binds ports ``9000`` and ``9001`` to the MinIO S3 API and Web Console respectively.
The local drive ``~/minio/data`` is mounted to the ``/data`` folder on the container.
You can modify the :envvar:`MINIO_ROOT_USER` and :envvar:`MINIO_ROOT_PASSWORD` variables to change the root login as needed.
For multi-drive deployments, bind each local drive or folder it's on sequentially-numbered path on the remote.
You can then modify the :mc:`minio server` startup to specify those paths:
.. code-block:: shell
:class: copyable
mkdir -p ~/minio/data-{1..4}
podman run \
-p 9000:9000 \
-p 9001:9001 \
--name minio \
-v /mnt/drive-1:/mnt/drive-1 \
-v /mnt/drive-2:/mnt/drive-2 \
-v /mnt/drive-3:/mnt/drive-3 \
-v /mnt/drive-4:/mnt/drive-4 \
-e "MINIO_ROOT_USER=ROOTNAME" \
-e "MINIO_ROOT_PASSWORD=CHANGEME123" \
quay.io/minio/minio server http://localhost:9000/mnt/drive-{1...4} --console-address ":9001"
For Windows hosts, specify the local folder path using Windows filesystem semantics ``C:\minio\:/data``.
.. tab-item:: Docker
The following command creates a folder in your home directory, then starts the MinIO container using Docker:
.. code-block:: shell
:class: copyable
mkdir -p ~/minio/data
docker run \
-p 9000:9000 \
-p 9001:9001 \
--name minio \
-v ~/minio/data:/data \
-e "MINIO_ROOT_USER=ROOTNAME" \
-e "MINIO_ROOT_PASSWORD=CHANGEME123" \
quay.io/minio/minio server /data --console-address ":9001"
The command binds ports ``9000`` and ``9001`` to the MinIO S3 API and Web Console respectively.
The local drive ``~/minio/data`` is mounted to the ``/data`` folder on the container.
You can modify the :envvar:`MINIO_ROOT_USER` and :envvar:`MINIO_ROOT_PASSWORD` variables to change the root login as needed.
For multi-drive deployments, bind each local drive or folder it's on sequentially-numbered path on the remote.
You can then modify the :mc:`minio server` startup to specify those paths:
.. code-block:: shell
:class: copyable
mkdir -p ~/minio/data-{1..4}
docker run \
-p 9000:9000 \
-p 9001:9001 \
--name minio \
-v /mnt/drive-1:/mnt/drive-1 \
-v /mnt/drive-2:/mnt/drive-2 \
-v /mnt/drive-3:/mnt/drive-3 \
-v /mnt/drive-4:/mnt/drive-4 \
-e "MINIO_ROOT_USER=ROOTNAME" \
-e "MINIO_ROOT_PASSWORD=CHANGEME123" \
quay.io/minio/minio server http://localhost:9000/mnt/drive-{1...4} --console-address ":9001"
For Windows hosts, specify the local folder path using Windows filesystem semantics ``C:\minio\:/data``.
2. Connect to the Deployment
~~~~~~~~~~~~~~~~~~~~~~~~~~~~
.. tab-set::
.. tab-item:: Console
Open your browser to http://localhost:9000 to open the :ref:`MinIO Console <minio-console>` login page.
Log in with the :guilabel:`MINIO_ROOT_USER` and :guilabel:`MINIO_ROOT_PASSWORD`
from the previous step.
.. image:: /images/minio-console/console-login.png
:width: 600px
:alt: MinIO Console Login Page
:align: center
You can use the MinIO Console for general administration tasks like Identity and Access Management, Metrics and Log Monitoring, or Server Configuration.
Each MinIO server includes its own embedded MinIO Console.
.. tab-item:: CLI
Follow the :ref:`installation instructions <mc-install>` for ``mc`` on your local host.
Run ``mc --version`` to verify the installation.
Once installed, create an alias for the MinIO deployment:
.. code-block:: shell
:class: copyable
mc alias set myminio http://localhost:9000 USERNAME PASSWORD
Change the hostname, username, and password to reflect your deployment.

View File

@ -0,0 +1,315 @@
.. _deploy-minio-macos:
=====================
Deploy MinIO on MacOS
=====================
.. default-domain:: minio
.. container:: extlinks-video
- `Object Storage Essentials <https://www.youtube.com/playlist?list=PLFOIsHSSYIK3WitnqhqfpeZ6fRFKHxIr7>`__
- `How to Connect to MinIO with JavaScript <https://www.youtube.com/watch?v=yUR4Fvx0D3E&list=PLFOIsHSSYIK3Dd3Y_x7itJT1NUKT5SxDh&index=5>`__
This page documents deploying MinIO onto Apple MacOS hosts.
MinIO officially supports MacOS operating systems in service status, which is typically 3 years from initial release.
At the time of writing, that includes:
- macOS 14 (Sonoma) (**Recommended**)
- macOS 13 (Ventura)
- macOS 12 (Monterey)
MinIO *may* run on older or out-of-support macOS releases, with limited support or troubleshooting from either MinIO or RedHat.
MinIO supports both Intel and ARM-based macOS hardware and provides distinct binaries for each architecture.
Ensure you download the correct binary as per the documentation for your host system.
The procedure includes guidance for deploying Single-Node Multi-Drive (SNMD) and Single-Node Single-Drive (SNSD) topologies in support of early development and evaluation environments.
MinIO does not officially support Multi-Node Multi-Drive (MNMD) "Distributed" configurations on MacOS hosts.
Considerations
--------------
Review Checklists
~~~~~~~~~~~~~~~~~
Ensure you have reviewed our published Hardware, Software, and Security checklists before attempting this procedure.
Erasure Coding Parity
~~~~~~~~~~~~~~~~~~~~~
MinIO automatically determines the default :ref:`erasure coding <minio-erasure-coding>` configuration for the cluster based on the total number of nodes and drives in the topology.
You can configure the per-object :term:`parity` setting when you set up the cluster *or* let MinIO select the default (``EC:4`` for production-grade clusters).
Parity controls the relationship between object availability and storage on disk.
Use the MinIO `Erasure Code Calculator <https://min.io/product/erasure-code-calculator>`__ for guidance in selecting the appropriate erasure code parity level for your cluster.
While you can change erasure parity settings at any time, objects written with a given parity do **not** automatically update to the new parity settings.
Procedure
---------
1. Download the MinIO Binary
~~~~~~~~~~~~~~~~~~~~~~~~~~~~
.. tab-set::
.. tab-item:: Homebrew
Open a Terminal and run the following command to install the latest stable MinIO package using `Homebrew <https://brew.sh>`_.
.. code-block:: shell
:class: copyable
brew install minio/stable/minio
.. important::
If you previously installed the MinIO server using ``brew install minio``, then we recommend that you reinstall from ``minio/stable/minio`` instead.
.. code-block:: shell
:class: copyable
brew uninstall minio
brew install minio/stable/minio
.. tab-item:: Binary - arm64
Open a Terminal, then use the following commands to download the latest stable MinIO binary, set it to executable, and install it to the system ``$PATH``:
.. code-block:: shell
:class: copyable
curl -O https://dl.min.io/server/minio/release/darwin-arm64/minio
chmod +x ./minio
sudo mv ./minio /usr/local/bin/
.. tab-item:: Binary - amd64
Open a Terminal, then use the following commands to download the latest stable MinIO binary, set it to executable, and install it to the system ``$PATH``:
.. code-block:: shell
:class: copyable
curl -O https://dl.min.io/server/minio/release/darwin-amd64/minio
chmod +x ./minio
sudo mv ./minio /usr/local/bin/
2. Enable TLS Connectivity
~~~~~~~~~~~~~~~~~~~~~~~~~~
You can skip this step to deploy without TLS enabled.
MinIO strongly recommends *against* non-TLS deployments outside of early development.
Create or provide :ref:`Transport Layer Security (TLS) <minio-tls>` certificates to MinIO to automatically enable HTTPS-secured connections between the server and clients.
MinIO expects the default certificate names of ``private.key`` and ``public.crt`` for the private and public keys respectively.
Place the certificates in a dedicated directory:
.. code-block:: shell
:class: copyable
mkdir -P /opt/minio/certs
cp private.key /opt/minio/certs
cp public.crt /opt/minio/certs
MinIO verifies client certificates against the OS/System's default list of trusted Certificate Authorities.
To enable verification of third-party or internally-signed certificates, place the CA file in the ``/opt/minio/certs/CAs`` folder.
The CA file should include the full chain of trust from leaf to root to ensure successful verification.
For more specific guidance on configuring MinIO for TLS, including multi-domain support via Server Name Indication (SNI), see :ref:`minio-tls`.
.. dropdown:: Certificates for Early Development
For local testing or development environments, you can use the MinIO :minio-git:`certgen <certgen>` to mint self-signed certificates.
For example, the following command generates a self-signed certificate with a set of IP and DNS Subject Alternate Names (SANs) associated to the MinIO Server hosts:
.. code-block:: shell
certgen -host "localhost,minio-*.example.net"
Place the generated ``public.crt`` and ``private.key`` into the ``/path/to/certs`` directory to enable TLS for the MinIO deployment.
Applications can use the ``public.crt`` as a trusted Certificate Authority to allow connections to the MinIO deployment without disabling certificate validation.
3. Create the MinIO Environment File
~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
Create an environment file at ``/etc/default/minio``.
The MinIO service uses this file as the source of all :ref:`environment variables <minio-server-environment-variables>` used by MinIO *and* the ``minio.service`` file.
Modify the example to reflect your deployment topology.
.. tab-set::
.. tab-item:: Single-Node Multi-Drive
Use Single-Node Multi-Drive deployments in development and evaluation environments.
You can also use them for smaller storage workloads which can tolerate data loss or unavailability due to node downtime.
.. code-block:: shell
:class: copyable
# Set the volumes MinIO uses at startup
# The command uses MinIO expansion notation {x...y} to denote a
# sequential series.
#
# The following specifies a single host with 4 drives at the specified location
#
# The command includes the port that the MinIO server listens on
# (default 9000).
# If you run without TLS, change https -> http
MINIO_VOLUMES="https://minio1.example.net:9000/mnt/drive{1...4}/minio"
# Set all MinIO server command-line options
#
# The following explicitly sets the MinIO Console listen address to
# port 9001 on all network interfaces.
# The default behavior is dynamic port selection.
MINIO_OPTS="--console-address :9001 --certs-dir /opt/minio/certs"
# Set the root username.
# This user has unrestricted permissions to perform S3 and
# administrative API operations on any resource in the deployment.
#
# Defer to your organizations requirements for superadmin user name.
MINIO_ROOT_USER=minioadmin
# Set the root password
#
# Use a long, random, unique string that meets your organizations
# requirements for passwords.
MINIO_ROOT_PASSWORD=minio-secret-key-CHANGE-ME
.. tab-item:: Single-Node Single-Drive
Use Single-Node Single-Drive ("Standalone") deployments in early development and evaluation environments.
MinIO does not recommend Standalone deployments in production, as the loss of the node or its storage medium results in data loss.
.. code-block:: shell
:class: copyable
# Set the volume MinIO uses at startup
#
# The following specifies the drive or folder path
MINIO_VOLUMES="/mnt/drive1/minio"
# Set all MinIO server command-line options
#
# The following explicitly sets the MinIO Console listen address to
# port 9001 on all network interfaces.
# The default behavior is dynamic port selection.
MINIO_OPTS="--console-address :9001 --certs-dir /opt/minio/certs"
# Set the root username.
# This user has unrestricted permissions to perform S3 and
# administrative API operations on any resource in the deployment.
#
# Defer to your organizations requirements for superadmin user name.
MINIO_ROOT_USER=minioadmin
# Set the root password
#
# Use a long, random, unique string that meets your organizations
# requirements for passwords.
MINIO_ROOT_PASSWORD=minio-secret-key-CHANGE-ME
Specify any other :ref:`environment variables <minio-server-environment-variables>` or server command-line options as required by your deployment.
4. Start the MinIO Server
~~~~~~~~~~~~~~~~~~~~~~~~~
The following command starts the MinIO Server attached to the current terminal/shell window:
.. code-block:: shell
:class: copyable
export MINIO_CONFIG_ENV_FILE=/etc/default/minio
minio server --console-address :9001
The command output resembles the following:
.. code-block:: shell
.. code-block:: shell
MinIO Object Storage Server
Copyright: 2015-2024 MinIO, Inc.
License: GNU AGPLv3 - https://www.gnu.org/licenses/agpl-3.0.html
Version: RELEASE.2024-06-07T16-42-07Z (go1.22.4 linux/amd64)
API: https://minio-1.example.net:9000 https://203.0.113.10:9000 https://127.0.0.1:9000
RootUser: minioadmin
RootPass: minioadmin
WebUI: https://minio-1.example.net:9001 https://203.0.113.10:9001 https://127.0.0.1:9001
RootUser: minioadmin
RootPass: minioadmin
CLI: https://min.io/docs/minio/linux/reference/minio-mc.html#quickstart
$ mc alias set 'myminio' 'https://minio-1.example.net:9000' 'minioadmin' 'minioadmin'
Docs: https://min.io/docs/minio/linux/index.html
Status: 1 Online, 0 Offline.
The ``API`` block lists the network interfaces and port on which clients can access the MinIO S3 API.
The ``Console`` block lists the network interfaces and port on which clients can access the MinIO Web Console.
To run the MinIO server process in the background or as a daemon, defer to your MacOS OS documentation for best practices and procedures.
5. Connect to the Deployment
~~~~~~~~~~~~~~~~~~~~~~~~~~~~
.. tab-set::
.. tab-item:: Console
Open your browser and access any of the MinIO hostnames at port ``:9001`` to open the :ref:`MinIO Console <minio-console>` login page.
For example, ``https://minio1.example.com:9001``.
Log in with the :guilabel:`MINIO_ROOT_USER` and :guilabel:`MINIO_ROOT_PASSWORD`
from the previous step.
.. image:: /images/minio-console/console-login.png
:width: 600px
:alt: MinIO Console Login Page
:align: center
You can use the MinIO Console for general administration tasks like Identity and Access Management, Metrics and Log Monitoring, or Server Configuration.
Each MinIO server includes its own embedded MinIO Console.
.. tab-item:: CLI
Follow the :ref:`installation instructions <mc-install>` for ``mc`` on your local host.
Run ``mc --version`` to verify the installation.
If your MinIO deployment uses third-party or self-signed TLS certificates, copy the :abbr:`CA (Certificate Authority)` files to ``~/.mc/certs/CAs`` to allow ``mc``
Once installed, create an alias for the MinIO deployment:
.. code-block:: shell
:class: copyable
mc alias set myminio https://minio-1.example.net:9000 USERNAME PASSWORD
Change the hostname, username, and password to reflect your deployment.
The hostname can be any MinIO node in the deployment.
You can also specify the hostname load balancer, reverse proxy, or similar network control plane that handles connections to the deployment.
6. Next Steps
~~~~~~~~~~~~~
TODO

View File

@ -0,0 +1,438 @@
.. _deploy-minio-rhel:
============================
Deploy MinIO on RedHat Linux
============================
.. default-domain:: minio
.. contents:: Table of Contents
:local:
:depth: 1
This page documents deploying MinIO on RedHat Linux operating systems, including distributions that are binary-compatible with RHEL.
This page makes no distinction or special remarks between RHEL and those distributions, and guidance given for RHEL can typically be applied to those distributions.
MinIO strongly recommends that production deployments use RHEL versions in the **Full Support** or **Maintenance Support** phases of the Red Hat life cycle.
At the time of writing, that includes:
- RHEL 9.5+ (**Recommended**)
- RHEL 8.10+
Your organization should have the necessary service contracts with Red Hat to ensure end-to-end supportability of your deployments.
MinIO *may* run on versions of RHEL no longer supported by Red Hat Linux, with limited support or troubleshooting from either MinIO or RedHat.
The procedure focuses on production-grade Multi-Node Multi-Drive (MNMD) "Distributed" configurations.
|MNMD| deployments provide enterprise-grade performance, availability, and scalability and are the recommended topology for all production workloads.
The procedure includes guidance for deploying Single-Node Multi-Drive (SNMD) and Single-Node Single-Drive (SNSD) topologies in support of early development and evaluation environments.
Considerations
--------------
Review Checklists
~~~~~~~~~~~~~~~~~
Ensure you have reviewed our published Hardware, Software, and Security checklists before attempting this procedure.
Erasure Coding Parity
~~~~~~~~~~~~~~~~~~~~~
MinIO automatically determines the default :ref:`erasure coding <minio-erasure-coding>` configuration for the cluster based on the total number of nodes and drives in the topology.
You can configure the per-object :term:`parity` setting when you set up the cluster *or* let MinIO select the default (``EC:4`` for production-grade clusters).
Parity controls the relationship between object availability and storage on disk.
Use the MinIO `Erasure Code Calculator <https://min.io/product/erasure-code-calculator>`__ for guidance in selecting the appropriate erasure code parity level for your cluster.
While you can change erasure parity settings at any time, objects written with a given parity do **not** automatically update to the new parity settings.
Capacity-Based Planning
~~~~~~~~~~~~~~~~~~~~~~~
MinIO recommends planning storage capacity sufficient to store **at least** 2 years of data before reaching 70% usage.
Performing :ref:`server pool expansion <expand-minio-distributed>` more frequently or on a "just-in-time" basis generally indicates an architecture or planning issue.
For example, consider an application suite expected to produce at least 100 TiB of data per year and a 3 year target before expansion.
By ensuring the deployment has ~500TiB of usable storage up front, the cluster can safely meet the 70% threshold with additional buffer for growth in data storage output per year.
Consider using the MinIO `Erasure Code Calculator <https://min.io/product/erasure-code-calculator>`__ for guidance in planning capacity around specific erasure code settings.
Procedure
---------
1. Download the MinIO RPM
~~~~~~~~~~~~~~~~~~~~~~~~~
MinIO provides builds for the following architectures:
- AMD64
- ARM64
- PowerPC 64 LE
- S390X
Use the following commands to download the latest stable MinIO RPM for your host architecture and install it.
.. tab-set::
.. tab-item:: AMD64
.. code-block:: shell
:class: copyable
:substitutions:
wget |minio-rpm| -O minio.rpm
sudo dnf install minio.rpm
.. tab-item:: ARM64
.. code-block:: shell
:class: copyable
:substitutions:
wget |minio-rpm-arm64| -O minio.rpm
sudo dnf install minio.rpm
.. tab-item:: PPC64LE
.. code-block:: shell
:class: copyable
:substitutions:
wget |minio-rpm-ppc64le| -O minio.rpm
sudo dnf install minio.rpm
2. Review the ``systemd`` Service File
~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
The ``.rpm`` package install the following `systemd <https://www.freedesktop.org/wiki/Software/systemd/>`__ service file to ``/usr/lib/systemd/system/minio.service``:
.. code-block:: shell
:class: copyable
[Unit]
Description=MinIO
Documentation=https://min.io/docs/minio/linux/index.html
Wants=network-online.target
After=network-online.target
AssertFileIsExecutable=/usr/local/bin/minio
[Service]
WorkingDirectory=/usr/local
User=minio-user
Group=minio-user
ProtectProc=invisible
EnvironmentFile=-/etc/default/minio
ExecStartPre=/bin/bash -c "if [ -z \"${MINIO_VOLUMES}\" ]; then echo \"Variable MINIO_VOLUMES not set in /etc/default/minio\"; exit 1; fi"
ExecStart=/usr/local/bin/minio server $MINIO_OPTS $MINIO_VOLUMES
# MinIO RELEASE.2023-05-04T21-44-30Z adds support for Type=notify (https://www.freedesktop.org/software/systemd/man/systemd.service.html#Type=)
# This may improve systemctl setups where other services use `After=minio.server`
# Uncomment the line to enable the functionality
# Type=notify
# Let systemd restart this service always
Restart=always
# Specifies the maximum file descriptor number that can be opened by this process
LimitNOFILE=65536
# Specifies the maximum number of threads this process can create
TasksMax=infinity
# Disable timeout logic and wait until process is stopped
TimeoutStopSec=infinity
SendSIGKILL=no
[Install]
WantedBy=multi-user.target
# Built for ${project.name}-${project.version} (${project.name})
3. Create a User and Group for MinIO
~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
The ``minio.service`` file runs as the ``minio-user`` User and Group by default.
You can create the user and group using the ``groupadd`` and ``useradd`` commands.
The following example creates the user, group, and sets permissions to access the folder paths intended for use by MinIO.
These commands typically require root (``sudo``) permissions.
.. code-block:: shell
:class: copyable
groupadd -r minio-user
useradd -M -r -g minio-user minio-user
The command above creates the user **without** a home directory, as is typical for system service accounts.
You **must** ``chown`` the drive paths you intend to use with MinIO.
If the ``minio-user`` user or group cannot read, write, or list contents of any drive, the MinIO process returns errors on startup.
For example, the following command sets ``minio-user:minio-user`` as the user-group owner of all drives at ``/mnt/drives-n`` where ``n`` is between 1 and 16 inclusive:
.. code-block:: shell
:class: copyable
chown -R minio-user:minio-user /mnt/drives-{1...16}
4. Enable TLS Connectivity
~~~~~~~~~~~~~~~~~~~~~~~~~~
Create or provide :ref:`Transport Layer Security (TLS) <minio-tls>` certificates to MinIO to automatically enable HTTPS-secured connections between the server and clients.
Place the certificates in a directory accessible by the ``minio-user`` user/group:
.. code-block:: shell
:class: copyable
mkdir -P /opt/minio/certs
chown -R minio-user:minio-user /opt/minio/certs
cp private.key /opt/minio/certs
cp public.crt /opt/minio/certs
For local testing or development environments, you can use the MinIO :minio-git:`certgen <certgen>` to mint self-signed certificates.
For example, the following command generates a self-signed certificate with a set of IP and DNS Subject Alternate Names (SANs) associated to the MinIO Server hosts:
.. code-block:: shell
certgen -host "localhost,minio-*.example.net"
Place the generated ``public.crt`` and ``private.key`` into the ``/path/to/certs`` directory to enable TLS for the MinIO deployment.
Applications can use the ``public.crt`` as a trusted Certificate Authority to allow connections to the MinIO deployment without disabling certificate validation.
When MinIO runs with TLS enabled, it also verifies connecting client certificates against the OS list of trusted Certificate Authorities.
To enable verification of third-party or internally-signed certificates, place the CA file in the ``/opt/minio/certs/CAs`` folder.
The CA file should include the full chain of trust from leaf to root to ensure successful verification.
For more specific guidance on configuring MinIO for TLS, including multi-domain support via Server Name Indication (SNI), see :ref:`minio-tls`.
You can optionally skip this step to deploy without TLS enabled. MinIO strongly recommends *against* non-TLS deployments outside of early development.
5. Create the MinIO Environment File
~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
Create an environment file at ``/etc/default/minio``.
The MinIO service uses this file as the source of all :ref:`environment variables <minio-server-environment-variables>` used by MinIO *and* the ``minio.service`` file.
Modify the example to reflect your deployment topology.
.. tab-set::
.. tab-item:: Multi-Node Multi-Drive
Use Multi-Node Multi-Drive ("Distributed") deployment topologies in production environments.
.. code-block:: shell
:class: copyable
# Set the hosts and volumes MinIO uses at startup
# The command uses MinIO expansion notation {x...y} to denote a
# sequential series.
#
# The following example covers four MinIO hosts
# with 4 drives each at the specified hostname and drive locations.
#
# The command includes the port that each MinIO server listens on
# (default 9000).
# If you run without TLS, change https -> http
MINIO_VOLUMES="https://minio{1...4}.example.net:9000/mnt/disk{1...4}/minio"
# Set all MinIO server command-line options
#
# The following explicitly sets the MinIO Console listen address to
# port 9001 on all network interfaces.
# The default behavior is dynamic port selection.
MINIO_OPTS="--console-address :9001 --certs-dir /opt/minio/certs"
# Set the root username.
# This user has unrestricted permissions to perform S3 and
# administrative API operations on any resource in the deployment.
#
# Defer to your organizations requirements for superadmin user name.
MINIO_ROOT_USER=minioadmin
# Set the root password
#
# Use a long, random, unique string that meets your organizations
# requirements for passwords.
MINIO_ROOT_PASSWORD=minio-secret-key-CHANGE-ME
.. tab-item:: Single-Node Multi-Drive
Use Single-Node Multi-Drive deployments in development and evaluation environments.
You can also use them for smaller storage workloads which can tolerate data loss or unavailability due to node downtime.
.. code-block:: shell
:class: copyable
# Set the volumes MinIO uses at startup
# The command uses MinIO expansion notation {x...y} to denote a
# sequential series.
#
# The following specifies a single host with 4 drives at the specified location
#
# The command includes the port that the MinIO server listens on
# (default 9000).
# If you run without TLS, change https -> http
MINIO_VOLUMES="https://minio1.example.net:9000/mnt/drive{1...4}/minio"
# Set all MinIO server command-line options
#
# The following explicitly sets the MinIO Console listen address to
# port 9001 on all network interfaces.
# The default behavior is dynamic port selection.
MINIO_OPTS="--console-address :9001 --certs-dir /opt/minio/certs"
# Set the root username.
# This user has unrestricted permissions to perform S3 and
# administrative API operations on any resource in the deployment.
#
# Defer to your organizations requirements for superadmin user name.
MINIO_ROOT_USER=minioadmin
# Set the root password
#
# Use a long, random, unique string that meets your organizations
# requirements for passwords.
MINIO_ROOT_PASSWORD=minio-secret-key-CHANGE-ME
.. tab-item:: Single-Node Single-Drive
Use Single-Node Single-Drive ("Standalone") deployments in early development and evaluation environments.
MinIO does not recommend Standalone deployments in production, as the loss of the node or its storage medium results in data loss.
.. code-block:: shell
:class: copyable
# Set the volume MinIO uses at startup
#
# The following specifies the drive or folder path
MINIO_VOLUMES="/mnt/drive1/minio"
# Set all MinIO server command-line options
#
# The following explicitly sets the MinIO Console listen address to
# port 9001 on all network interfaces.
# The default behavior is dynamic port selection.
MINIO_OPTS="--console-address :9001 --certs-dir /opt/minio/certs"
# Set the root username.
# This user has unrestricted permissions to perform S3 and
# administrative API operations on any resource in the deployment.
#
# Defer to your organizations requirements for superadmin user name.
MINIO_ROOT_USER=minioadmin
# Set the root password
#
# Use a long, random, unique string that meets your organizations
# requirements for passwords.
MINIO_ROOT_PASSWORD=minio-secret-key-CHANGE-ME
Specify any other :ref:`environment variables <minio-server-environment-variables>` or server command-line options as required by your deployment.
For distributed deployments, all nodes **must** have matching ``/etc/default/minio`` environment files.
Use a utility such as ``shasum -a 256 /etc/default/minio`` on each node to verify an exact match across all nodes.
6. Start the MinIO Deployment
~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
Use ``systemctl start minio`` to start each node in the deployment.
You can track the status of the startup using ``journalctl -u minio`` on each node.
On successful startup, the MinIO process emits a summary of the deployment that resembles the following output:
.. code-block:: shell
MinIO Object Storage Server
Copyright: 2015-2024 MinIO, Inc.
License: GNU AGPLv3 - https://www.gnu.org/licenses/agpl-3.0.html
Version: RELEASE.2024-06-07T16-42-07Z (go1.22.4 linux/amd64)
API: https://minio-1.example.net:9000 https://203.0.113.10:9000 https://127.0.0.1:9000
RootUser: minioadmin
RootPass: minioadmin
WebUI: https://minio-1.example.net:9001 https://203.0.113.10:9001 https://127.0.0.1:9001
RootUser: minioadmin
RootPass: minioadmin
CLI: https://min.io/docs/minio/linux/reference/minio-mc.html#quickstart
$ mc alias set 'myminio' 'https://minio-1.example.net:9000' 'minioadmin' 'minioadmin'
Docs: https://min.io/docs/minio/linux/index.html
Status: 16 Online, 0 Offline.
You may see increased log churn as the cluster starts up and synchronizes.
Common reasons for startup failure include:
- The MinIO process does not have read-write-list access to the specified drives
- The drives are not empty or contain non-MinIO data
- The drives are not formatted or mounted properly
- One or more hosts are not reachable over the network
Following our checklists typically mitigates the risk of encountering those or similar issues.
7. Connect to the Deployment
~~~~~~~~~~~~~~~~~~~~~~~~~~~~
.. tab-set::
.. tab-item:: Console
Open your browser and access any of the MinIO hostnames at port ``:9001`` to open the :ref:`MinIO Console <minio-console>` login page.
For example, ``https://minio1.example.com:9001``.
Log in with the :guilabel:`MINIO_ROOT_USER` and :guilabel:`MINIO_ROOT_PASSWORD`
from the previous step.
.. image:: /images/minio-console/console-login.png
:width: 600px
:alt: MinIO Console Login Page
:align: center
You can use the MinIO Console for general administration tasks like Identity and Access Management, Metrics and Log Monitoring, or Server Configuration.
Each MinIO server includes its own embedded MinIO Console.
.. tab-item:: CLI
Follow the :ref:`installation instructions <mc-install>` for ``mc`` on your local host.
Run ``mc --version`` to verify the installation.
If your MinIO deployment uses third-party or self-signed TLS certificates, copy the :abbr:`CA (Certificate Authority)` files to ``~/.mc/certs/CAs`` to allow ``mc``
Once installed, create an alias for the MinIO deployment:
.. code-block:: shell
:class: copyable
mc alias set myminio https://minio-1.example.net:9000 USERNAME PASSWORD
Change the hostname, username, and password to reflect your deployment.
The hostname can be any MinIO node in the deployment.
You can also specify the hostname load balancer, reverse proxy, or similar network control plane that handles connections to the deployment.
8. Next Steps
~~~~~~~~~~~~~
TODO

View File

@ -0,0 +1,421 @@
.. _deploy-minio-ubuntu:
============================
Deploy MinIO on Ubuntu Linux
============================
.. default-domain:: minio
.. contents:: Table of Contents
:local:
:depth: 1
.. container:: extlinks-video
- `Object Storage Essentials <https://www.youtube.com/playlist?list=PLFOIsHSSYIK3WitnqhqfpeZ6fRFKHxIr7>`__
- `How to Connect to MinIO with JavaScript <https://www.youtube.com/watch?v=yUR4Fvx0D3E&list=PLFOIsHSSYIK3Dd3Y_x7itJT1NUKT5SxDh&index=5>`__
This page documents deploying MinIO on Ubuntu Linux operating systems.
MinIO officially supports Ubuntu Long Term Support (LTS) releases in the **Standard** or **Ubuntu Pro** support phases of the Ubuntu life cycle.
MinIO strongly recommends only those releases that include the Linux 5.X kernel and above for best performance.
At the time of writing, that includes:
- Ubuntu 24.04+ LTS (Noble Numbat) (**Recommended**)
- Ubuntu 22.04+ LTS (Jammy Jellyfish)
- Ubuntu 20.04+ LTS (Focal Fossa)
- Ubuntu 18.04.5 LTS (Bionic Beaver) (**Ubuntu Pro Only**)
The above list assumes your organization has the necessary service contracts with Ubuntu to ensure end-to-end supportability throughout the release's lifespan.
MinIO *may* run on versions of Ubuntu that use older kernels, are out of support, or are in legacy support phases, with limited support or troubleshooting from either MinIO or RedHat.
The procedure focuses on production-grade Multi-Node Multi-Drive (MNMD) "Distributed" configurations.
|MNMD| deployments provide enterprise-grade performance, availability, and scalability and are the recommended topology for all production workloads.
The procedure includes guidance for deploying Single-Node Multi-Drive (SNMD) and Single-Node Single-Drive (SNSD) topologies in support of early development and evaluation environments.
Considerations
--------------
Review Checklists
~~~~~~~~~~~~~~~~~
Ensure you have reviewed our published Hardware, Software, and Security checklists before attempting this procedure.
Erasure Coding Parity
~~~~~~~~~~~~~~~~~~~~~
MinIO automatically determines the default :ref:`erasure coding <minio-erasure-coding>` configuration for the cluster based on the total number of nodes and drives in the topology.
You can configure the per-object :term:`parity` setting when you set up the cluster *or* let MinIO select the default (``EC:4`` for production-grade clusters).
Parity controls the relationship between object availability and storage on disk.
Use the MinIO `Erasure Code Calculator <https://min.io/product/erasure-code-calculator>`__ for guidance in selecting the appropriate erasure code parity level for your cluster.
While you can change erasure parity settings at any time, objects written with a given parity do **not** automatically update to the new parity settings.
Capacity-Based Planning
~~~~~~~~~~~~~~~~~~~~~~~
MinIO recommends planning storage capacity sufficient to store **at least** 2 years of data before reaching 70% usage.
Performing :ref:`server pool expansion <expand-minio-distributed>` more frequently or on a "just-in-time" basis generally indicates an architecture or planning issue.
For example, consider an application suite expected to produce at least 100 TiB of data per year and a 3 year target before expansion.
By ensuring the deployment has ~500TiB of usable storage up front, the cluster can safely meet the 70% threshold with additional buffer for growth in data storage output per year.
Consider using the MinIO `Erasure Code Calculator <https://min.io/product/erasure-code-calculator>`__ for guidance in planning capacity around specific erasure code settings.
Procedure
---------
1. Download the MinIO RPM
~~~~~~~~~~~~~~~~~~~~~~~~~
Use the following commands to download the latest stable MinIO DEB and install it.
.. code-block:: shell
:class: copyable
:substitutions:
wget |minio-deb| -O minio.deb
sudo dpkg -i minio.deb
2. Review the ``systemd`` Service File
~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
The ``.deb`` package install the following `systemd <https://www.freedesktop.org/wiki/Software/systemd/>`__ service file to ``/usr/lib/systemd/system/minio.service``:
.. code-block:: shell
:class: copyable
[Unit]
Description=MinIO
Documentation=https://min.io/docs/minio/linux/index.html
Wants=network-online.target
After=network-online.target
AssertFileIsExecutable=/usr/local/bin/minio
[Service]
WorkingDirectory=/usr/local
User=minio-user
Group=minio-user
ProtectProc=invisible
EnvironmentFile=-/etc/default/minio
ExecStartPre=/bin/bash -c "if [ -z \"${MINIO_VOLUMES}\" ]; then echo \"Variable MINIO_VOLUMES not set in /etc/default/minio\"; exit 1; fi"
ExecStart=/usr/local/bin/minio server $MINIO_OPTS $MINIO_VOLUMES
# MinIO RELEASE.2023-05-04T21-44-30Z adds support for Type=notify (https://www.freedesktop.org/software/systemd/man/systemd.service.html#Type=)
# This may improve systemctl setups where other services use `After=minio.server`
# Uncomment the line to enable the functionality
# Type=notify
# Let systemd restart this service always
Restart=always
# Specifies the maximum file descriptor number that can be opened by this process
LimitNOFILE=65536
# Specifies the maximum number of threads this process can create
TasksMax=infinity
# Disable timeout logic and wait until process is stopped
TimeoutStopSec=infinity
SendSIGKILL=no
[Install]
WantedBy=multi-user.target
# Built for ${project.name}-${project.version} (${project.name})
3. Create a User and Group for MinIO
~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
The ``minio.service`` file runs as the ``minio-user`` User and Group by default.
You can create the user and group using the ``groupadd`` and ``useradd`` commands.
The following example creates the user, group, and sets permissions to access the folder paths intended for use by MinIO.
These commands typically require root (``sudo``) permissions.
.. code-block:: shell
:class: copyable
groupadd -r minio-user
useradd -M -r -g minio-user minio-user
The command above creates the user **without** a home directory, as is typical for system service accounts.
You **must** ``chown`` the drive paths you intend to use with MinIO.
If the ``minio-user`` user or group cannot read, write, or list contents of any drive, the MinIO process returns errors on startup.
For example, the following command sets ``minio-user:minio-user`` as the user-group owner of all drives at ``/mnt/drives-n`` where ``n`` is between 1 and 16 inclusive:
.. code-block:: shell
:class: copyable
chown -R minio-user:minio-user /mnt/drives-{1...16}
4. Enable TLS Connectivity
~~~~~~~~~~~~~~~~~~~~~~~~~~
You can skip this step to deploy without TLS enabled.
MinIO strongly recommends *against* non-TLS deployments outside of early development.
Create or provide :ref:`Transport Layer Security (TLS) <minio-tls>` certificates to MinIO to automatically enable HTTPS-secured connections between the server and clients.
MinIO expects the default certificate names of ``private.key`` and ``public.crt`` for the private and public keys respectively.
Place the certificates in a directory accessible by the ``minio-user`` user/group:
.. code-block:: shell
:class: copyable
mkdir -P /opt/minio/certs
chown -R minio-user:minio-user /opt/minio/certs
cp private.key /opt/minio/certs
cp public.crt /opt/minio/certs
MinIO verifies client certificates against the OS/System's default list of trusted Certificate Authorities.
To enable verification of third-party or internally-signed certificates, place the CA file in the ``/opt/minio/certs/CAs`` folder.
The CA file should include the full chain of trust from leaf to root to ensure successful verification.
For more specific guidance on configuring MinIO for TLS, including multi-domain support via Server Name Indication (SNI), see :ref:`minio-tls`.
.. dropdown:: Certificates for Early Development
For local testing or development environments, you can use the MinIO :minio-git:`certgen <certgen>` to mint self-signed certificates.
For example, the following command generates a self-signed certificate with a set of IP and DNS Subject Alternate Names (SANs) associated to the MinIO Server hosts:
.. code-block:: shell
certgen -host "localhost,minio-*.example.net"
Place the generated ``public.crt`` and ``private.key`` into the ``/path/to/certs`` directory to enable TLS for the MinIO deployment.
Applications can use the ``public.crt`` as a trusted Certificate Authority to allow connections to the MinIO deployment without disabling certificate validation.
5. Create the MinIO Environment File
~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
Create an environment file at ``/etc/default/minio``.
The MinIO service uses this file as the source of all :ref:`environment variables <minio-server-environment-variables>` used by MinIO *and* the ``minio.service`` file.
Modify the example to reflect your deployment topology.
.. tab-set::
.. tab-item:: Multi-Node Multi-Drive
Use Multi-Node Multi-Drive ("Distributed") deployment topologies in production environments.
.. code-block:: shell
:class: copyable
# Set the hosts and volumes MinIO uses at startup
# The command uses MinIO expansion notation {x...y} to denote a
# sequential series.
#
# The following example covers four MinIO hosts
# with 4 drives each at the specified hostname and drive locations.
#
# The command includes the port that each MinIO server listens on
# (default 9000).
# If you run without TLS, change https -> http
MINIO_VOLUMES="https://minio{1...4}.example.net:9000/mnt/disk{1...4}/minio"
# Set all MinIO server command-line options
#
# The following explicitly sets the MinIO Console listen address to
# port 9001 on all network interfaces.
# The default behavior is dynamic port selection.
MINIO_OPTS="--console-address :9001 --certs-dir /opt/minio/certs"
# Set the root username.
# This user has unrestricted permissions to perform S3 and
# administrative API operations on any resource in the deployment.
#
# Defer to your organizations requirements for superadmin user name.
MINIO_ROOT_USER=minioadmin
# Set the root password
#
# Use a long, random, unique string that meets your organizations
# requirements for passwords.
MINIO_ROOT_PASSWORD=minio-secret-key-CHANGE-ME
.. tab-item:: Single-Node Multi-Drive
Use Single-Node Multi-Drive deployments in development and evaluation environments.
You can also use them for smaller storage workloads which can tolerate data loss or unavailability due to node downtime.
.. code-block:: shell
:class: copyable
# Set the volumes MinIO uses at startup
# The command uses MinIO expansion notation {x...y} to denote a
# sequential series.
#
# The following specifies a single host with 4 drives at the specified location
#
# The command includes the port that the MinIO server listens on
# (default 9000).
# If you run without TLS, change https -> http
MINIO_VOLUMES="https://minio1.example.net:9000/mnt/drive{1...4}/minio"
# Set all MinIO server command-line options
#
# The following explicitly sets the MinIO Console listen address to
# port 9001 on all network interfaces.
# The default behavior is dynamic port selection.
MINIO_OPTS="--console-address :9001 --certs-dir /opt/minio/certs"
# Set the root username.
# This user has unrestricted permissions to perform S3 and
# administrative API operations on any resource in the deployment.
#
# Defer to your organizations requirements for superadmin user name.
MINIO_ROOT_USER=minioadmin
# Set the root password
#
# Use a long, random, unique string that meets your organizations
# requirements for passwords.
MINIO_ROOT_PASSWORD=minio-secret-key-CHANGE-ME
.. tab-item:: Single-Node Single-Drive
Use Single-Node Single-Drive ("Standalone") deployments in early development and evaluation environments.
MinIO does not recommend Standalone deployments in production, as the loss of the node or its storage medium results in data loss.
.. code-block:: shell
:class: copyable
# Set the volume MinIO uses at startup
#
# The following specifies the drive or folder path
MINIO_VOLUMES="/mnt/drive1/minio"
# Set all MinIO server command-line options
#
# The following explicitly sets the MinIO Console listen address to
# port 9001 on all network interfaces.
# The default behavior is dynamic port selection.
MINIO_OPTS="--console-address :9001 --certs-dir /opt/minio/certs"
# Set the root username.
# This user has unrestricted permissions to perform S3 and
# administrative API operations on any resource in the deployment.
#
# Defer to your organizations requirements for superadmin user name.
MINIO_ROOT_USER=minioadmin
# Set the root password
#
# Use a long, random, unique string that meets your organizations
# requirements for passwords.
MINIO_ROOT_PASSWORD=minio-secret-key-CHANGE-ME
Specify any other :ref:`environment variables <minio-server-environment-variables>` or server command-line options as required by your deployment.
For distributed deployments, all nodes **must** have matching ``/etc/default/minio`` environment files.
Use a utility such as ``shasum -a 256 /etc/default/minio`` on each node to verify an exact match across all nodes.
6. Start the MinIO Deployment
~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
Use ``systemctl start minio`` to start each node in the deployment.
You can track the status of the startup using ``journalctl -u minio`` on each node.
On successful startup, the MinIO process emits a summary of the deployment that resembles the following output:
.. code-block:: shell
MinIO Object Storage Server
Copyright: 2015-2024 MinIO, Inc.
License: GNU AGPLv3 - https://www.gnu.org/licenses/agpl-3.0.html
Version: RELEASE.2024-06-07T16-42-07Z (go1.22.4 linux/amd64)
API: https://minio-1.example.net:9000 https://203.0.113.10:9000 https://127.0.0.1:9000
RootUser: minioadmin
RootPass: minioadmin
WebUI: https://minio-1.example.net:9001 https://203.0.113.10:9001 https://127.0.0.1:9001
RootUser: minioadmin
RootPass: minioadmin
CLI: https://min.io/docs/minio/linux/reference/minio-mc.html#quickstart
$ mc alias set 'myminio' 'https://minio-1.example.net:9000' 'minioadmin' 'minioadmin'
Docs: https://min.io/docs/minio/linux/index.html
Status: 16 Online, 0 Offline.
You may see increased log churn as the cluster starts up and synchronizes.
Common reasons for startup failure include:
- The MinIO process does not have read-write-list access to the specified drives
- The drives are not empty or contain non-MinIO data
- The drives are not formatted or mounted properly
- One or more hosts are not reachable over the network
Following our checklists typically mitigates the risk of encountering those or similar issues.
7. Connect to the Deployment
~~~~~~~~~~~~~~~~~~~~~~~~~~~~
.. tab-set::
.. tab-item:: Console
Open your browser and access any of the MinIO hostnames at port ``:9001`` to open the :ref:`MinIO Console <minio-console>` login page.
For example, ``https://minio1.example.com:9001``.
Log in with the :guilabel:`MINIO_ROOT_USER` and :guilabel:`MINIO_ROOT_PASSWORD`
from the previous step.
.. image:: /images/minio-console/console-login.png
:width: 600px
:alt: MinIO Console Login Page
:align: center
You can use the MinIO Console for general administration tasks like Identity and Access Management, Metrics and Log Monitoring, or Server Configuration.
Each MinIO server includes its own embedded MinIO Console.
.. tab-item:: CLI
Follow the :ref:`installation instructions <mc-install>` for ``mc`` on your local host.
Run ``mc --version`` to verify the installation.
If your MinIO deployment uses third-party or self-signed TLS certificates, copy the :abbr:`CA (Certificate Authority)` files to ``~/.mc/certs/CAs`` to allow ``mc``
Once installed, create an alias for the MinIO deployment:
.. code-block:: shell
:class: copyable
mc alias set myminio https://minio-1.example.net:9000 USERNAME PASSWORD
Change the hostname, username, and password to reflect your deployment.
The hostname can be any MinIO node in the deployment.
You can also specify the hostname load balancer, reverse proxy, or similar network control plane that handles connections to the deployment.
8. Next Steps
~~~~~~~~~~~~~
TODO

View File

@ -0,0 +1,210 @@
.. _deploy-minio-windows:
=======================
Deploy MinIO on Windows
=======================
.. default-domain:: minio
.. contents:: Table of Contents
:local:
:depth: 1
.. container:: extlinks-video
- `Object Storage Essentials <https://www.youtube.com/playlist?list=PLFOIsHSSYIK3WitnqhqfpeZ6fRFKHxIr7>`__
- `How to Connect to MinIO with JavaScript <https://www.youtube.com/watch?v=yUR4Fvx0D3E&list=PLFOIsHSSYIK3Dd3Y_x7itJT1NUKT5SxDh&index=5>`__
This page documents deploying MinIO onto Microsoft Windows hosts.
MinIO officially supports Windows operating systems in the Active Support of the Microsoft Modern Lifecycle Policy.
At the time of writing, that includes:
- Windows Server 23H2 (**Recommended**)
- Windows Server 2022 LTSC
- Windows 11 Enterprise/Workstation 23H2
- Windows 11 Enterprise/Workstation 22H2
- Windows 10 Enterprise 21H2 (LTS)
- Windows 10 IoT 21H2 (LTS)
- Windows 10 Enterprise 22H2
MinIO *may* run on older or out-of-support Windows releases, with limited support or troubleshooting from either MinIO or Microsoft.
The procedure includes guidance for deploying Single-Node Multi-Drive (SNMD) and Single-Node Single-Drive (SNSD) topologies in support of early development and evaluation environments.
MinIO does not officially support Multi-Node Multi-Drive (MNMD) "Distributed" configurations on Windows hosts.
Considerations
--------------
Review Checklists
~~~~~~~~~~~~~~~~~
Ensure you have reviewed our published Hardware, Software, and Security checklists before attempting this procedure.
Erasure Coding Parity
~~~~~~~~~~~~~~~~~~~~~
MinIO automatically determines the default :ref:`erasure coding <minio-erasure-coding>` configuration for the cluster based on the total number of nodes and drives in the topology.
You can configure the per-object :term:`parity` setting when you set up the cluster *or* let MinIO select the default (``EC:4`` for production-grade clusters).
Parity controls the relationship between object availability and storage on disk.
Use the MinIO `Erasure Code Calculator <https://min.io/product/erasure-code-calculator>`__ for guidance in selecting the appropriate erasure code parity level for your cluster.
While you can change erasure parity settings at any time, objects written with a given parity do **not** automatically update to the new parity settings.
Procedure
---------
1. Download the MinIO Binary
~~~~~~~~~~~~~~~~~~~~~~~~~~~~
Download the MinIO executable from the following URL:
.. code-block:: shell
:class: copyable
https://dl.min.io/server/minio/release/windows-amd64/minio.exe
The next step includes instructions for running the executable.
You cannot run the executable from the Explorer or by double clicking the file.
Instead, you call the executable to launch the server.
2. Launch the MinIO Server
~~~~~~~~~~~~~~~~~~~~~~~~~~
In PowerShell or the Command Prompt, navigate to the location of the executable or add the path of the ``minio.exe`` file to the system ``$PATH``.
computer.
.. tab-set::
.. tab-item:: Multi-Drive
For Windows hosts with multiple drives, you can specify a sequential set of drives to use for configuring MinIO in the Single-Node Multi-Drive (SNMD) topology:
.. code-block::
:class: copyable
.\minio.exe server {D...G}:\minio --console-address :9001
The :mc:`minio server` process prints its output to the system console, similar to the following:
.. code-block:: shell
API: http://192.0.2.10:9000 http://127.0.0.1:9000
RootUser: minioadmin
RootPass: minioadmin
Console: http://192.0.2.10:9001 http://127.0.0.1:9001
RootUser: minioadmin
RootPass: minioadmin
Command-line: https://min.io/docs/minio/linux/reference/minio-mc.html
$ mc alias set myminio http://192.0.2.10:9000 minioadmin minioadmin
Documentation: https://min.io/docs/minio/linux/index.html
WARNING: Detected default credentials 'minioadmin:minioadmin', we recommend that you change these values with 'MINIO_ROOT_USER' and 'MINIO_ROOT_PASSWORD' environment variables.
The process is tied to the current PowerShell or Command Prompt window.
Closing the window stops the server and ends the process.
.. tab-item:: Single-Drive
Use this command to start a local MinIO instance in the ``C:\minio`` folder.
You can replace ``C:\minio`` with another drive or folder path on the local
.. code-block::
:class: copyable
.\minio.exe server C:\minio --console-address :9001
The :mc:`minio server` process prints its output to the system console, similar to the following:
.. code-block:: shell
API: http://192.0.2.10:9000 http://127.0.0.1:9000
RootUser: minioadmin
RootPass: minioadmin
Console: http://192.0.2.10:9001 http://127.0.0.1:9001
RootUser: minioadmin
RootPass: minioadmin
Command-line: https://min.io/docs/minio/linux/reference/minio-mc.html
$ mc alias set myminio http://192.0.2.10:9000 minioadmin minioadmin
Documentation: https://min.io/docs/minio/linux/index.html
WARNING: Detected default credentials 'minioadmin:minioadmin', we recommend that you change these values with 'MINIO_ROOT_USER' and 'MINIO_ROOT_PASSWORD' environment variables.
The process is tied to the current PowerShell or Command Prompt window.
Closing the window stops the server and ends the process.
3. Connect your Browser to the MinIO Server
~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
Access the :ref:`minio-console` by going to a browser (such as Microsoft Edge) and going to ``http://127.0.0.1:9001`` or one of the Console addresses specified in the :mc:`minio server` command's output.
For example, ``Console: http://192.0.2.10:9001 http://127.0.0.1:9001`` in the example output indicates two possible addresses to use for connecting to the Console.
While port ``9000`` is used for connecting to the API, MinIO automatically redirects browser access to the MinIO Console.
Log in to the Console with the ``RootUser`` and ``RootPass`` user credentials displayed in the output.
These default to ``minioadmin | minioadmin``.
.. image:: /images/minio-console/console-login.png
:width: 600px
:alt: MinIO Console displaying login screen
:align: center
You can use the MinIO Console for general administration tasks like Identity and Access Management, Metrics and Log Monitoring, or Server Configuration.
Each MinIO server includes its own embedded MinIO Console.
.. image:: /images/minio-console/minio-console.png
:width: 600px
:alt: MinIO Console displaying bucket start screen
:align: center
For more information, see the :ref:`minio-console` documentation.
4. `(Optional)` Install the MinIO Client
~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
The :ref:`MinIO Client <minio-client>` allows you to work with your MinIO deployment from Powershell.
Download the standalone MinIO client for Windows from the following link:
https://dl.min.io/client/mc/release/windows-amd64/mc.exe
Double click on the file to run it.
Or, run the following in the Command Prompt or PowerShell.
.. code-block::
:class: copyable
\path\to\mc.exe --help
Use :mc:`mc.exe alias set <mc alias set>` to quickly authenticate and connect to the MinIO deployment.
.. code-block:: shell
:class: copyable
mc.exe alias set local http://127.0.0.1:9000 minioadmin minioadmin
mc.exe admin info local
The :mc:`mc.exe alias set <mc alias set>` takes four arguments:
- The name of the alias
- The hostname or IP address and port of the MinIO server
- The Access Key for a MinIO :ref:`user <minio-users>`
- The Secret Key for a MinIO :ref:`user <minio-users>`
For additional details about this command, see :ref:`alias`.
5. Next Steps
~~~~~~~~~~~~~
ToDo

View File

@ -0,0 +1,25 @@
.. _deploy-minio-standalone:
========================
Install the MinIO Server
========================
.. default-domain:: minio
.. contents:: Table of Contents
:local:
:depth: 1
MinIO supports deploying onto baremetal infrastructure - physical machines or virtualized hosts - running Linux, MacOS, and Windows.
TODO conceptual information here
.. toctree::
:titlesonly:
:hidden:
/operations/deployments/baremetal-deploy-minio-on-redhat-linux
/operations/deployments/baremetal-deploy-minio-on-ubuntu-linux
/operations/deployments/baremetal-deploy-minio-as-a-container
/operations/deployments/baremetal-deploy-minio-on-macos
/operations/deployments/baremetal-deploy-minio-on-windows

View File

@ -221,15 +221,7 @@ Complete any planned hardware expansion prior to :ref:`decommissioning older har
1) Install the MinIO Binary on Each Node in the New Server Pool
~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
.. cond:: linux
.. include:: /includes/linux/common-installation.rst
:start-after: start-install-minio-binary-desc
:end-before: end-install-minio-binary-desc
.. cond:: macos
.. include:: /includes/macos/common-installation.rst
.. include:: /includes/linux/common-installation.rst
:start-after: start-install-minio-binary-desc
:end-before: end-install-minio-binary-desc

View File

@ -20,11 +20,6 @@ Along with the deprecation announcement, MinIO also announced that the feature w
As of :minio-release:`RELEASE.2022-10-29T06-21-33Z`, the MinIO Gateway and the related filesystem mode code have been removed.
Deployments still using the `standalone` or `filesystem` MinIO modes that upgrade to MinIO Server :minio-release:`RELEASE.2022-10-29T06-21-33Z` or later receive an error when attempting to start MinIO.
.. cond:: linux
.. note::
For deployments running in a container, see the `Container - Migrate from Gateway or Filesystem Mode <https://min.io/docs/minio/container/operations/install-deploy-manage/migrate-fs-gateway.html>`__ tutorial instead.
Overview
--------
@ -68,7 +63,7 @@ Procedure
#. Create a new Single-Node Single-Drive MinIO deployment.
Refer to the :ref:`documentation for step-by-step instructions <deploy-minio-standalone>` for launching a new |SNSD| deployment.
Follow our :ref:`installation instructions <deploy-minio-standalone>` for your OS of choice and configure the installation as a Single-Node Single-Drive (SNSD) topology.
The location of the deployment can be any empty folder on the storage medium of your choice.
A new folder on the same drive can work for the new deployment as long as the existing deployment is not on the root of a drive.
@ -259,6 +254,5 @@ Procedure
#. Stop the server for both deployments.
#. Restart the new MinIO deployment with the ports used for the previous standalone deployment.
For more about starting the MinIO service, refer to step four in the deploy |SNSD| :ref:`documentation <deploy-minio-standalone>`.
Ensure you apply all environment variables and runtime configuration settings and validate the behavior of the new deployment.

View File

@ -15,18 +15,4 @@ Upgrade a MinIO Deployment
For deployments older than :minio-release:`RELEASE.2024-03-30T09-41-56Z` running with :ref:`AD/LDAP <minio-ldap-config-settings>` enabled, you **must** read through the release notes for :minio-release:`RELEASE.2024-04-18T19-09-19Z` before starting this procedure.
You must take the extra steps documented in the linked release as part of the upgrade.
.. cond:: linux
.. include:: /includes/linux/steps-upgrade-minio-deployment.rst
.. cond:: container
.. include:: /includes/container/steps-upgrade-minio-deployment.rst
.. cond:: windows
.. include:: /includes/windows/steps-upgrade-minio-deployment.rst
.. cond:: macos
.. include:: /includes/macos/steps-upgrade-minio-deployment.rst
.. include:: /includes/linux/steps-upgrade-minio-deployment.rst

View File

@ -0,0 +1,32 @@
.. _minio-baremetal:
.. _minio-installation-platform-support:
.. _deploy-minio-distributed-baremetal:
=========================
Deploy MinIO on Baremetal
=========================
.. default-domain:: minio
.. contents:: Table of Contents
:local:
:depth: 1
MinIO supports deploying onto baremetal infrastructure - physical machines or virtualized hosts - running Linux, MacOS, and Windows.
You can also deploy MinIO as a container onto supported Operating Systems.
- :ref:`Deploy MinIO onto RedHat Linux <deploy-minio-rhel>`
- :ref:`Deploy MinIO onto Ubuntu Linux <deploy-minio-ubuntu>`
- :ref:`Deploy MinIO onto Apple MacOS <deploy-minio-macos>`
- :ref:`Deploy MinIO as a Container <deploy-minio-container>`
- :ref:`Deploy MinIO onto Microsoft Windows <deploy-minio-windows>`
.. toctree::
:titlesonly:
:hidden:
/operations/deployments/baremetal-deploy-minio-server
/operations/deployments/baremetal-upgrade-minio-deployment
/operations/deployments/baremetal-expand-minio-deployment
/operations/deployments/baremetal-decommission-server-pool
/operations/deployments/baremetal-migrate-fs-gateway

View File

@ -0,0 +1,101 @@
.. _deploy-minio-distributed:
.. _minio-mnmd:
.. _minio-installation:
.. _minio-snmd:
.. _minio-snsd:
===========================
Installation and Management
===========================
.. default-domain:: minio
.. contents:: Table of Contents
:local:
:depth: 2
This section documents steps for installing and managing the AGPLv3-licensed Community MinIO Object Storage on :ref:`Kubernetes <minio-kubernetes>` and :ref:`Baremetal <minio-baremetal>` infrastructures.
.. meta::
:description: MinIO Deployment Topologies and Installation Instructions
:keywords: MinIO, Deploy, Architecture, Topology, Distributed, Replication, Install
.. container:: extlinks-video
- `Installing and Running MinIO on Linux <https://www.youtube.com/watch?v=74usXkZpNt8&list=PLFOIsHSSYIK1BnzVY66pCL-iJ30Ht9t1o>`__
- `Object Storage Essentials <https://www.youtube.com/playlist?list=PLFOIsHSSYIK3WitnqhqfpeZ6fRFKHxIr7>`__
- `How to Connect to MinIO with JavaScript <https://www.youtube.com/watch?v=yUR4Fvx0D3E&list=PLFOIsHSSYIK3Dd3Y_x7itJT1NUKT5SxDh&index=5>`__
MinIO is a software-defined high performance distributed object storage server.
You can run MinIO on consumer or enterprise-grade hardware and a variety
of operating systems and architectures.
All MinIO deployments implement :ref:`Erasure Coding <minio-erasure-coding>` backends.
You can deploy MinIO using one of the following topologies:
.. _minio-installation-comparison:
:ref:`Single-Node Single-Drive <minio-snsd>` (SNSD or "Standalone")
Local development and evaluation with no/limited reliability
:ref:`Single-Node Multi-Drive <minio-snmd>` (SNMD or "Standalone Multi-Drive")
Workloads with lower performance, scale, and capacity requirements
Drive-level reliability with configurable tolerance for loss of up to 1/2 all drives
Evaluation of multi-drive topologies and failover behavior.
:ref:`Multi-Node Multi-Drive <minio-mnmd>` (MNMD or "Distributed")
Enterprise-grade high-performance object storage
Multi Node/Drive level reliability with configurable tolerance for loss of up to 1/2 all nodes/drives
Primary storage for AI/ML, Distributed Query, Analytics, and other Data Lake components
Scalable for Petabyte+ workloads - both storage capacity and performance
Kubernetes
----------
MinIO provides a Kubernetes-native Operator framework for managing and deploying Tenants onto your managed infrastructure.
MinIO fully supports upstream Kubernetes and most flavors which inherit from the upstream as a base.
This includes, but is not limited to, RedHat Openshift, SUSE Rancher, VMWare Tanzu.
MinIO also fully supports cloud-based Kubernetes engines such as Elastic Kubernetes Engine, Google Kubernetes Service, and Azure Kubernetes Service.
Select the link most appropriate for your Kubernetes infrastructure.
If your provider is not listed, use the Kubernetes Upstream documentation as a baseline and modify as needed based on your provider's guidance or divergence from upstream semantics and behavior.
- :ref:`Deploy MinIO on Kubernetes (Upstream) <deploy-operator-kubernetes>`
- :ref:`Deploy MinIO on Openshift Kubernetes <deploy-operator-openshift>`
- :ref:`Deploy MinIO on SUSE Rancher Kubernetes <deploy-operator-rancher>`
- :ref:`Deploy MinIO on Elastic Kubernetes Service <deploy-operator-eks>`
- :ref:`Deploy MinIO on Google Kubernetes Engine <deploy-operator-gke>`
- :ref:`Deploy MinIO on Azure Kubernetes Service <deploy-operator-aks>`
Baremetal
---------
MinIO supports deploying onto baremetal infrastructure - physical machines or virtualized hosts - running Linux, MacOS, and Windows.
You can also deploy MinIO as a container onto supported Operating Systems.
- :ref:`Deploy MinIO onto RedHat Linux <deploy-minio-rhel>`
- :ref:`Deploy MinIO onto Ubuntu Linux <deploy-minio-ubuntu>`
- :ref:`Deploy MinIO as a Container <deploy-minio-container>`
- :ref:`Deploy MinIO onto MacOS <deploy-minio-macos>`
- :ref:`Deploy MinIO onto Windows <deploy-minio-windows>`
.. important::
MinIO strongly recommends :minio-docs:`Linux (RHEL, Ubuntu) <minio/linux/index.html>` or :minio-docs:`Kubernetes (Upstream, OpenShift) <minio/kubernetes/upstream/index.html>` for long-term development and production environments.
MinIO provides no guarantee of support for :abbr:`SNMD (Single-Node Multi-Drive)` or :abbr:`MNMD (Multi-Node Multi-Drive)` topologies on MacOS, Windows, or Containerized deployments.
.. toctree::
:titlesonly:
:hidden:
/operations/deployments/kubernetes
/operations/deployments/baremetal

View File

@ -0,0 +1,262 @@
.. _deploy-operator-aks:
=================================================
Deploy MinIO Operator on Azure Kubernetes Service
=================================================
.. default-domain:: minio
.. contents:: Table of Contents
:local:
:depth: 1
Overview
--------
`Azure Kubernetes Engine <https://azure.microsoft.com/en-us/products/kubernetes-service/#overview>`__ (AKS) is a highly available, secure, and fully managed Kubernetes service from Microsoft Azure.
The MinIO Kubernetes Operator supports deploying MinIO Tenants onto AKS infrastructure using the MinIO Operator Console or `kustomize <https://kustomize.io/>`__ for :minio-git:`YAML-defined deployments <operator/tree/master/examples/kustomization>`.
:minio-web:`Through the AKS Marketplace <product/multicloud-azure-kubernetes-service>`
MinIO maintains an `AKS Marketplace listing <https://azuremarketplace.microsoft.com/en-us/marketplace/apps/minio.minio-object-storage_v1dot1>`__ through which you can register your AKS cluster with |subnet|.
Any MinIO tenant you deploy through Marketplace-connected clusters can take advantage of SUBNET registration, including 24/7 access to MinIO engineers.
Using Kubernetes Kustomize
MinIO provides Kustomize templates for deploying the MinIO Operator onto Kubernetes infrastructure.
You can use Kustomize to install the Operator onto EKS infrastructure.
MinIO Operator installations and Tenants deployed through this path require manual subscription with MinIO SUBNET for licensing and support.
Using Kubernetes Helm
MinIO provides a Helm chart for deploying the MinIO Operator onto Kubernetes infrastructure.
See :ref:`minio-k8s-deploy-operator-helm` for instructions.
MinIO Operator installations and Tenants deployed through this path require manual subscription with MinIO SUBNET for licensing and support.
This page documents deploying the MinIO Operator through the CLI using Kustomize.
This page documents deploying the MinIO Operator through the CLI using Kustomize.
For instructions on deploying the MinIO Operator through the AKS Marketplace, see :minio-web:`Deploy MinIO through AKS <multicloud-azure-kubernetes-service/deploy>`
This documentation assumes familiarity with all referenced Kubernetes and Azure Kubernetes Service concepts, utilities, and procedures.
While this documentation *may* provide guidance for configuring or deploying Kubernetes-related or Azure Kubernetes Service-related resources on a best-effort basis, it is not a replacement for the official :kube-docs:`Kubernetes Documentation <>`.
Prerequisites
-------------
AKS Cluster with Azure Virtual Machines
~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
This procedure assumes an existing :abbr:`AKS (Azure Kubernetes Service)` cluster with *at least* four Azure virtual machines (VM).
The Azure VMs should have matching machine types and configurations to ensure predictable performance with MinIO.
MinIO provides :ref:`hardware guidelines <deploy-minio-distributed-recommendations>` for selecting the appropriate EC2 instance class and size.
MinIO strongly recommends selecting VM instances with support for Premium SSDs and *at least* 25Gbps Network bandwidth as a baseline for performance.
For more complete information on Azure Virtual Machine types and Storage resources, see :azure-docs:`Sizes for virtual machines in Azure <virtual-machines/sizes>` and :azure-docs:`Azure managed disk types <virtual-machines/disks-types>`
``kubectl`` Access to the AKS Cluster
~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
Ensure your host machine has a ``kubectl`` installation compatible with the target AKS cluster.
For guidance on connecting ``kubectl`` to AKS, see :aks-docs:`Install kubectl and configure cluster access <tutorial-kubernetes-deploy-cluster?tabs=azure-cli#connect-to-cluster-using-kubectl>`.
Procedure
---------
The following steps deploy Operator using Kustomize and a ``kustomization.yaml`` file from the MinIO Operator GitHub repository.
To install Operator using a Helm chart, see :ref:`Deploy Operator with Helm <minio-k8s-deploy-operator-helm>`.
The following procedure uses ``kubectl -k`` to install the Operator from the MinIO Operator GitHub repository.
``kubectl -k`` and ``kubectl --kustomize`` are aliases that perform the same command.
.. important::
If you use Kustomize to install the Operator, you must use Kustomize to manage or upgrade that installation.
Do not use ``kubectl krew``, a Helm chart, or similar methods to manage or upgrade a MinIO Operator installation deployed with Kustomize.
You can, however, use Kustomize to upgrade a previous version of Operator (5.0.14 or earlier) installed with the MinIO Kubernetes Plugin.
1. Install the latest version of Operator
~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
.. code-block:: shell
:class: copyable
:substitutions:
kubectl apply -k "github.com/minio/operator?ref=v|operator-version-stable|"
The output resembles the following:
.. code-block:: shell
namespace/minio-operator created
customresourcedefinition.apiextensions.k8s.io/miniojobs.job.min.io created
customresourcedefinition.apiextensions.k8s.io/policybindings.sts.min.io created
customresourcedefinition.apiextensions.k8s.io/tenants.minio.min.io created
serviceaccount/console-sa created
serviceaccount/minio-operator created
clusterrole.rbac.authorization.k8s.io/console-sa-role created
clusterrole.rbac.authorization.k8s.io/minio-operator-role created
clusterrolebinding.rbac.authorization.k8s.io/console-sa-binding created
clusterrolebinding.rbac.authorization.k8s.io/minio-operator-binding created
configmap/console-env created
secret/console-sa-secret created
service/console created
service/operator created
service/sts created
deployment.apps/console created
deployment.apps/minio-operator created
2. Verify the Operator pods are running
~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
.. code-block:: shell
:class: copyable
kubectl get pods -n minio-operator
The output resembles the following:
.. code-block:: shell
NAME READY STATUS RESTARTS AGE
console-56c7d8bd89-485qh 1/1 Running 0 2m42s
minio-operator-6c758b8c45-nkhlx 1/1 Running 0 2m42s
minio-operator-6c758b8c45-dgd8n 1/1 Running 0 2m42s
In this example, the ``minio-operator`` pod is MinIO Operator and the ``console`` pod is the Operator Console.
You can modify your Operator deployment by applying kubectl patches.
You can find examples for common configurations in the `Operator GitHub repository <https://github.com/minio/operator/tree/master/examples/kustomization>`__.
3. *(Optional)* Configure access to the Operator Console service
~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
The Operator Console service does not automatically bind or expose itself for external access on the Kubernetes cluster.
You must instead configure a network control plane component, such as a load balancer or ingress, to grant that external access.
For testing purposes or short-term access, expose the Operator Console service through a NodePort using the following patch:
.. code-block:: shell
:class: copyable
kubectl patch service -n minio-operator console -p '
{
"spec": {
"ports": [
{
"name": "http",
"port": 9090,
"protocol": "TCP",
"targetPort": 9090,
"nodePort": 30090
},
{
"name": "https",
"port": 9443,
"protocol": "TCP",
"targetPort": 9443,
"nodePort": 30433
}
],
"type": "NodePort"
}
}'
The patch command should output ``service/console patched``.
You can now access the service through ports ``30433`` (HTTPS) or ``30090`` (HTTP) on any of your Kubernetes worker nodes.
4. Verify the Operator installation
~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
Check the contents of the specified namespace (``minio-operator``) to ensure all pods and services have started successfully.
.. code-block:: shell
:class: copyable
kubectl get all -n minio-operator
The response should resemble the following:
.. code-block:: shell
NAME READY STATUS RESTARTS AGE
pod/console-56c7d8bd89-485qh 1/1 Running 0 5m20s
pod/minio-operator-6c758b8c45-nkhlx 1/1 Running 0 5m20s
pod/minio-operator-6c758b8c45-dgd8n 1/1 Running 0 5m20s
NAME TYPE CLUSTER-IP EXTERNAL-IP PORT(S) AGE
service/operator ClusterIP 10.43.135.241 <none> 4221/TCP 5m20s
service/sts ClusterIP 10.43.117.251 <none> 4223/TCP 5m20s
service/console NodePort 10.43.235.38 <none> 9090:30090/TCP,9443:30433/TCP 5m20s
NAME READY UP-TO-DATE AVAILABLE AGE
deployment.apps/console 1/1 1 1 5m20s
deployment.apps/minio-operator 2/2 2 2 5m20s
NAME DESIRED CURRENT READY AGE
replicaset.apps/console-56c7d8bd89 1 1 1 5m20s
replicaset.apps/minio-operator-6c758b8c45 2 2 2 5m20s
5. Retrieve the Operator Console JWT for login
~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
.. code-block:: shell
:class: copyable
kubectl apply -f - <<EOF
apiVersion: v1
kind: Secret
metadata:
name: console-sa-secret
namespace: minio-operator
annotations:
kubernetes.io/service-account.name: console-sa
type: kubernetes.io/service-account-token
EOF
SA_TOKEN=$(kubectl -n minio-operator get secret console-sa-secret -o jsonpath="{.data.token}" | base64 --decode)
echo $SA_TOKEN
The output of this command is the JSON Web Token (JWT) login credential for Operator Console.
6. Log into the MinIO Operator Console
~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
.. tab-set::
.. tab-item:: NodePort
:selected:
If you configured the service for access through a NodePort, specify the hostname of any worker node in the cluster with that port as ``HOSTNAME:NODEPORT`` to access the Console.
For example, a deployment configured with a NodePort of 30090 and the following ``InternalIP`` addresses can be accessed at ``http://172.18.0.5:30090``.
.. code-block:: shell
:class: copyable
kubectl get nodes -o custom-columns=IP:.status.addresses[:]
IP
map[address:172.18.0.5 type:InternalIP],map[address:k3d-MINIO-agent-3 type:Hostname]
map[address:172.18.0.6 type:InternalIP],map[address:k3d-MINIO-agent-2 type:Hostname]
map[address:172.18.0.2 type:InternalIP],map[address:k3d-MINIO-server-0 type:Hostname]
map[address:172.18.0.4 type:InternalIP],map[address:k3d-MINIO-agent-1 type:Hostname]
map[address:172.18.0.3 type:InternalIP],map[address:k3d-MINIO-agent-0 type:Hostname]
.. tab-item:: Ingress or Load Balancer
If you configured the ``svc/console`` service for access through ingress or a cluster load balancer, you can access the Console using the configured hostname and port.
.. tab-item:: Port Forwarding
You can use ``kubectl port forward`` to temporary forward ports for the Console:
.. code-block:: shell
:class: copyable
kubectl port-forward svc/console -n minio-operator 9090:9090
You can then use ``http://localhost:9090`` to access the MinIO Operator Console.
Once you access the Console, use the Console JWT to log in.
You can now :ref:`deploy and manage MinIO Tenants using the Operator Console <deploy-minio-distributed>`.

View File

@ -0,0 +1,268 @@
.. _deploy-operator-eks:
==========================================================
Deploy MinIO Operator on Amazon Elastic Kubernetes Service
==========================================================
.. default-domain:: minio
.. contents:: Table of Contents
:local:
:depth: 1
Overview
--------
:eks-docs:`Amazon® Elastic Kubernetes Service® <what-is-eks.html>` (EKS) is an enterprise-ready Kubernetes container platform with full-stack automated operations to manage hybrid cloud, multi-cloud, and edge deployments.
The MinIO Kubernetes Operator supports deploying MinIO Tenants onto EKS infrastructure using the MinIO Operator Console or by using `kustomize <https://kustomize.io/>`__ for :minio-git:`YAML-defined deployments <operator/tree/master/examples/kustomization>`.
MinIO supports the following methods for installing the MinIO Operator onto your :abbr:`EKS (Elastic Kubernetes Service)` clusters:
:minio-web:`Through the AWS Marketplace <product/multicloud-elastic-kubernetes-service>`
MinIO maintains an `AWS Marketplace listing <https://aws.amazon.com/marketplace/pp/prodview-smchi7bcs4nn4>`__ through which you can register your EKS cluster with |subnet|.
Any tenant you deploy through Marketplace-connected clusters can take advantage of SUBNET registration, including 24/7 direct access to MinIO engineers.
Using Kubernetes Kustomize
MinIO provides Kustomize templates for deploying the MinIO Operator onto Kubernetes infrastructure.
You can use Kustomize to install the Operator onto EKS infrastructure.
MinIO Operator installations and Tenants deployed through this path require manual subscription with MinIO SUBNET for licensing and support.
Using Kubernetes Helm
MinIO provides a Helm chart for deploying the MinIO Operator onto Kubernetes infrastructure.
See :ref:`minio-k8s-deploy-operator-helm` for instructions.
MinIO Operator installations and Tenants deployed through this path require manual subscription with MinIO SUBNET for licensing and support.
This page documents deploying the MinIO Operator through the CLI using Kustomize.
For instructions on deploying the MinIO Operator through the AWS Marketplace, see :minio-web:`Deploy MinIO through EKS <product/multicloud-elastic-kubernetes-service/deploy>`.
This documentation assumes familiarity with all referenced Kubernetes and Elastic Kubernetes Service concepts, utilities, and procedures.
While this documentation *may* provide guidance for configuring or deploying Kubernetes-related or Elastic Kubernetes Service-related resources on a best-effort basis, it is not a replacement for the official :kube-docs:`Kubernetes Documentation <>`.
Prerequisites
-------------
In addition to the general :ref:`MinIO Operator prerequisites <minio-operator-prerequisites>`, your EKS cluster must also meet the following requirements:
EKS Cluster with EBS-Optimized EC2 Nodes
~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
This procedure assumes an existing :abbr:`EKS (Elastic Kubernetes Service)` cluster with *at least* four EC2 nodes.
The EC2 nodes should have matching machine types and configurations to ensure predictable performance with MinIO.
MinIO provides :ref:`hardware guidelines <deploy-minio-distributed-recommendations>` for selecting the appropriate EC2 instance class and size.
MinIO strongly recommends selecting EBS-optimized instances with *at least* 25Gbps Network bandwidth as a baseline for performance.
For more complete information on the available EC2 and EBS resources, see `EC2 Instance Types <https://aws.amazon.com/ec2/instance-types/>`__ and `EBS Volume Types <https://aws.amazon.com/ebs/volume-types/>`__.
|subnet| customers should reach out to MinIO engineering as part of architecture planning for assistance in selecting the optimal instance and volume types for the target workload and performance goals.
``kubectl`` Access to the EKS Cluster
~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
Ensure your host machine has a ``kubectl`` installation compatible with the target EKS cluster.
For guidance on connecting ``kubectl`` to EKS, see :aws-docs:`Creating or updating a kubeconfig file for an Amazon EKS cluster <eks/latest/userguide/create-kubeconfig.html>`.
Your ``kubectl`` configuration must include authentication as a user with the correct permissions.
MinIO provides an example IAM policy for Marketplace-based installations in the MinIO Operator :minio-git:`github repository <marketplace/blob/master/eks/iam-policy.json>`.
You can use this policy as a baseline for manual Operator installations.
Procedure
---------
The following steps deploy Operator using Kustomize and a ``kustomization.yaml`` file from the MinIO Operator GitHub repository.
To install Operator using a Helm chart, see :ref:`Deploy Operator with Helm <minio-k8s-deploy-operator-helm>`.
The following procedure uses ``kubectl -k`` to install the Operator from the MinIO Operator GitHub repository.
``kubectl -k`` and ``kubectl --kustomize`` are aliases that perform the same command.
.. important::
If you use Kustomize to install the Operator, you must use Kustomize to manage or upgrade that installation.
Do not use ``kubectl krew``, a Helm chart, or similar methods to manage or upgrade a MinIO Operator installation deployed with Kustomize.
You can, however, use Kustomize to upgrade a previous version of Operator (5.0.14 or earlier) installed with the MinIO Kubernetes Plugin.
1. Install the latest version of Operator
~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
.. code-block:: shell
:class: copyable
:substitutions:
kubectl apply -k "github.com/minio/operator?ref=v|operator-version-stable|"
The output resembles the following:
.. code-block:: shell
namespace/minio-operator created
customresourcedefinition.apiextensions.k8s.io/miniojobs.job.min.io created
customresourcedefinition.apiextensions.k8s.io/policybindings.sts.min.io created
customresourcedefinition.apiextensions.k8s.io/tenants.minio.min.io created
serviceaccount/console-sa created
serviceaccount/minio-operator created
clusterrole.rbac.authorization.k8s.io/console-sa-role created
clusterrole.rbac.authorization.k8s.io/minio-operator-role created
clusterrolebinding.rbac.authorization.k8s.io/console-sa-binding created
clusterrolebinding.rbac.authorization.k8s.io/minio-operator-binding created
configmap/console-env created
secret/console-sa-secret created
service/console created
service/operator created
service/sts created
deployment.apps/console created
deployment.apps/minio-operator created
2. Verify the Operator pods are running
~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
.. code-block:: shell
:class: copyable
kubectl get pods -n minio-operator
The output resembles the following:
.. code-block:: shell
NAME READY STATUS RESTARTS AGE
console-56c7d8bd89-485qh 1/1 Running 0 2m42s
minio-operator-6c758b8c45-nkhlx 1/1 Running 0 2m42s
minio-operator-6c758b8c45-dgd8n 1/1 Running 0 2m42s
In this example, the ``minio-operator`` pod is MinIO Operator and the ``console`` pod is the Operator Console.
You can modify your Operator deployment by applying kubectl patches.
You can find examples for common configurations in the `Operator GitHub repository <https://github.com/minio/operator/tree/master/examples/kustomization>`__.
3. *(Optional)* Configure access to the Operator Console service
~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
The Operator Console service does not automatically bind or expose itself for external access on the Kubernetes cluster.
You must instead configure a network control plane component, such as a load balancer or ingress, to grant that external access.
For testing purposes or short-term access, expose the Operator Console service through a NodePort using the following patch:
.. code-block:: shell
:class: copyable
kubectl patch service -n minio-operator console -p '
{
"spec": {
"ports": [
{
"name": "http",
"port": 9090,
"protocol": "TCP",
"targetPort": 9090,
"nodePort": 30090
},
{
"name": "https",
"port": 9443,
"protocol": "TCP",
"targetPort": 9443,
"nodePort": 30433
}
],
"type": "NodePort"
}
}'
The patch command should output ``service/console patched``.
You can now access the service through ports ``30433`` (HTTPS) or ``30090`` (HTTP) on any of your Kubernetes worker nodes.
4. Verify the Operator installation
~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
Check the contents of the specified namespace (``minio-operator``) to ensure all pods and services have started successfully.
.. code-block:: shell
:class: copyable
kubectl get all -n minio-operator
The response should resemble the following:
.. code-block:: shell
NAME READY STATUS RESTARTS AGE
pod/console-56c7d8bd89-485qh 1/1 Running 0 5m20s
pod/minio-operator-6c758b8c45-nkhlx 1/1 Running 0 5m20s
pod/minio-operator-6c758b8c45-dgd8n 1/1 Running 0 5m20s
NAME TYPE CLUSTER-IP EXTERNAL-IP PORT(S) AGE
service/operator ClusterIP 10.43.135.241 <none> 4221/TCP 5m20s
service/sts ClusterIP 10.43.117.251 <none> 4223/TCP 5m20s
service/console NodePort 10.43.235.38 <none> 9090:30090/TCP,9443:30433/TCP 5m20s
NAME READY UP-TO-DATE AVAILABLE AGE
deployment.apps/console 1/1 1 1 5m20s
deployment.apps/minio-operator 2/2 2 2 5m20s
NAME DESIRED CURRENT READY AGE
replicaset.apps/console-56c7d8bd89 1 1 1 5m20s
replicaset.apps/minio-operator-6c758b8c45 2 2 2 5m20s
5. Retrieve the Operator Console JWT for login
~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
.. code-block:: shell
:class: copyable
kubectl apply -f - <<EOF
apiVersion: v1
kind: Secret
metadata:
name: console-sa-secret
namespace: minio-operator
annotations:
kubernetes.io/service-account.name: console-sa
type: kubernetes.io/service-account-token
EOF
SA_TOKEN=$(kubectl -n minio-operator get secret console-sa-secret -o jsonpath="{.data.token}" | base64 --decode)
echo $SA_TOKEN
The output of this command is the JSON Web Token (JWT) login credential for Operator Console.
6. Log into the MinIO Operator Console
~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
.. tab-set::
.. tab-item:: NodePort
:selected:
If you configured the service for access through a NodePort, specify the hostname of any worker node in the cluster with that port as ``HOSTNAME:NODEPORT`` to access the Console.
For example, a deployment configured with a NodePort of 30090 and the following ``InternalIP`` addresses can be accessed at ``http://172.18.0.5:30090``.
.. code-block:: shell
:class: copyable
kubectl get nodes -o custom-columns=IP:.status.addresses[:]
IP
map[address:172.18.0.5 type:InternalIP],map[address:k3d-MINIO-agent-3 type:Hostname]
map[address:172.18.0.6 type:InternalIP],map[address:k3d-MINIO-agent-2 type:Hostname]
map[address:172.18.0.2 type:InternalIP],map[address:k3d-MINIO-server-0 type:Hostname]
map[address:172.18.0.4 type:InternalIP],map[address:k3d-MINIO-agent-1 type:Hostname]
map[address:172.18.0.3 type:InternalIP],map[address:k3d-MINIO-agent-0 type:Hostname]
.. tab-item:: Ingress or Load Balancer
If you configured the ``svc/console`` service for access through ingress or a cluster load balancer, you can access the Console using the configured hostname and port.
.. tab-item:: Port Forwarding
You can use ``kubectl port forward`` to temporary forward ports for the Console:
.. code-block:: shell
:class: copyable
kubectl port-forward svc/console -n minio-operator 9090:9090
You can then use ``http://localhost:9090`` to access the MinIO Operator Console.
Once you access the Console, use the Console JWT to log in.
You can now :ref:`deploy and manage MinIO Tenants using the Operator Console <deploy-minio-distributed>`.

View File

@ -0,0 +1,261 @@
.. _deploy-operator-gke:
=================================================
Deploy MinIO Operator on Google Kubernetes Engine
=================================================
.. default-domain:: minio
.. contents:: Table of Contents
:local:
:depth: 1
Overview
--------
`Google Kubernetes Engine <https://cloud.google.com/kubernetes-engine?ref=minio-docs>`__ (GKE) offers a highly automated secure and fully managed Kubernetes platform.
The MinIO Kubernetes Operator supports deploying MinIO Tenants onto GKE infrastructure using the MinIO Operator Console or `kustomize <https://kustomize.io/>`__ for :minio-git:`YAML-defined deployments <operator/tree/master/examples/kustomization>`.
:minio-web:`Through the GKE Marketplace <product/multicloud-google-kubernetes-service>`
MinIO maintains an `GKE Marketplace listing <https://console.cloud.google.com/marketplace/product/minio-inc-public/minio-enterprise?pli=1&project=peak-essence-171622>`__ through which you can register your GKE cluster with |subnet|.
Any MinIO tenant you deploy through Marketplace-connected clusters can take advantage of SUBNET registration, including 24/7 direct access to MinIO engineers.
Using Kubernetes Kustomize
MinIO provides Kustomize templates for deploying the MinIO Operator onto Kubernetes infrastructure.
You can use Kustomize to install the Operator onto EKS infrastructure.
MinIO Operator installations and Tenants deployed through this path require manual subscription with MinIO SUBNET for licensing and support.
Using Kubernetes Helm
MinIO provides a Helm chart for deploying the MinIO Operator onto Kubernetes infrastructure.
See :ref:`minio-k8s-deploy-operator-helm` for instructions.
MinIO Operator installations and Tenants deployed through this path require manual subscription with MinIO SUBNET for licensing and support.
This page documents deploying the MinIO Operator through the CLI using Kustomize.
This page documents deploying the MinIO Operator through the CLI using Kustomize.
For instructions on deploying the MinIO Operator through the GKE Marketplace, see :minio-web:`Deploy MinIO through GKE <product/multicloud-google-kubernetes-service/deploy>`
This documentation assumes familiarity with all referenced Kubernetes and Google Kubernetes Engine concepts, utilities, and procedures.
While this documentation *may* provide guidance for configuring or deploying Kubernetes-related or Google Kubernetes Engine-related resources on a best-effort basis, it is not a replacement for the official :kube-docs:`Kubernetes Documentation <>`.
Prerequisites
-------------
GKE Cluster with Compute Engine Nodes
~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
This procedure assumes an existing :abbr:`GKE (Google Kubernetes Engine)` cluster with a MinIO Operator installation and *at least* four Compute Engine nodes.
The Compute Engine nodes should have matching machine types and configurations to ensure predictable performance with MinIO.
MinIO provides :ref:`hardware guidelines <deploy-minio-distributed-recommendations>` for selecting the appropriate Compute Engine instance class and size.
MinIO strongly recommends selecting instances with support for local SSDs and *at least* 25Gbps egress bandwidth as a baseline for performance.
For more complete information on the available Compute Engine and Persistent Storage resources, see :gcp-docs:`Machine families resources and comparison guide <general-purpose-machines>` and :gcp-docs:`Persistent disks <disks>`.
``kubectl`` Access to the GKE Cluster
~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
Ensure your host machine has a ``kubectl`` installation compatible with the target GKE cluster.
For guidance on connecting ``kubectl`` to GKE, see :gke-docs:`Install kubectl and configure cluster access <how-to/cluster-access-for-kubectl>`.
Procedure
---------
The following steps deploy Operator using Kustomize and a ``kustomization.yaml`` file from the MinIO Operator GitHub repository.
To install Operator using a Helm chart, see :ref:`Deploy Operator with Helm <minio-k8s-deploy-operator-helm>`.
The following procedure uses ``kubectl -k`` to install the Operator from the MinIO Operator GitHub repository.
``kubectl -k`` and ``kubectl --kustomize`` are aliases that perform the same command.
.. important::
If you use Kustomize to install the Operator, you must use Kustomize to manage or upgrade that installation.
Do not use ``kubectl krew``, a Helm chart, or similar methods to manage or upgrade a MinIO Operator installation deployed with Kustomize.
You can, however, use Kustomize to upgrade a previous version of Operator (5.0.14 or earlier) installed with the MinIO Kubernetes Plugin.
1. Install the latest version of Operator
~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
.. code-block:: shell
:class: copyable
:substitutions:
kubectl apply -k "github.com/minio/operator?ref=v|operator-version-stable|"
The output resembles the following:
.. code-block:: shell
namespace/minio-operator created
customresourcedefinition.apiextensions.k8s.io/miniojobs.job.min.io created
customresourcedefinition.apiextensions.k8s.io/policybindings.sts.min.io created
customresourcedefinition.apiextensions.k8s.io/tenants.minio.min.io created
serviceaccount/console-sa created
serviceaccount/minio-operator created
clusterrole.rbac.authorization.k8s.io/console-sa-role created
clusterrole.rbac.authorization.k8s.io/minio-operator-role created
clusterrolebinding.rbac.authorization.k8s.io/console-sa-binding created
clusterrolebinding.rbac.authorization.k8s.io/minio-operator-binding created
configmap/console-env created
secret/console-sa-secret created
service/console created
service/operator created
service/sts created
deployment.apps/console created
deployment.apps/minio-operator created
2. Verify the Operator pods are running
~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
.. code-block:: shell
:class: copyable
kubectl get pods -n minio-operator
The output resembles the following:
.. code-block:: shell
NAME READY STATUS RESTARTS AGE
console-56c7d8bd89-485qh 1/1 Running 0 2m42s
minio-operator-6c758b8c45-nkhlx 1/1 Running 0 2m42s
minio-operator-6c758b8c45-dgd8n 1/1 Running 0 2m42s
In this example, the ``minio-operator`` pod is MinIO Operator and the ``console`` pod is the Operator Console.
You can modify your Operator deployment by applying kubectl patches.
You can find examples for common configurations in the `Operator GitHub repository <https://github.com/minio/operator/tree/master/examples/kustomization>`__.
3. *(Optional)* Configure access to the Operator Console service
~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
The Operator Console service does not automatically bind or expose itself for external access on the Kubernetes cluster.
You must instead configure a network control plane component, such as a load balancer or ingress, to grant that external access.
For testing purposes or short-term access, expose the Operator Console service through a NodePort using the following patch:
.. code-block:: shell
:class: copyable
kubectl patch service -n minio-operator console -p '
{
"spec": {
"ports": [
{
"name": "http",
"port": 9090,
"protocol": "TCP",
"targetPort": 9090,
"nodePort": 30090
},
{
"name": "https",
"port": 9443,
"protocol": "TCP",
"targetPort": 9443,
"nodePort": 30433
}
],
"type": "NodePort"
}
}'
The patch command should output ``service/console patched``.
You can now access the service through ports ``30433`` (HTTPS) or ``30090`` (HTTP) on any of your Kubernetes worker nodes.
4. Verify the Operator installation
~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
Check the contents of the specified namespace (``minio-operator``) to ensure all pods and services have started successfully.
.. code-block:: shell
:class: copyable
kubectl get all -n minio-operator
The response should resemble the following:
.. code-block:: shell
NAME READY STATUS RESTARTS AGE
pod/console-56c7d8bd89-485qh 1/1 Running 0 5m20s
pod/minio-operator-6c758b8c45-nkhlx 1/1 Running 0 5m20s
pod/minio-operator-6c758b8c45-dgd8n 1/1 Running 0 5m20s
NAME TYPE CLUSTER-IP EXTERNAL-IP PORT(S) AGE
service/operator ClusterIP 10.43.135.241 <none> 4221/TCP 5m20s
service/sts ClusterIP 10.43.117.251 <none> 4223/TCP 5m20s
service/console NodePort 10.43.235.38 <none> 9090:30090/TCP,9443:30433/TCP 5m20s
NAME READY UP-TO-DATE AVAILABLE AGE
deployment.apps/console 1/1 1 1 5m20s
deployment.apps/minio-operator 2/2 2 2 5m20s
NAME DESIRED CURRENT READY AGE
replicaset.apps/console-56c7d8bd89 1 1 1 5m20s
replicaset.apps/minio-operator-6c758b8c45 2 2 2 5m20s
5. Retrieve the Operator Console JWT for login
~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
.. code-block:: shell
:class: copyable
kubectl apply -f - <<EOF
apiVersion: v1
kind: Secret
metadata:
name: console-sa-secret
namespace: minio-operator
annotations:
kubernetes.io/service-account.name: console-sa
type: kubernetes.io/service-account-token
EOF
SA_TOKEN=$(kubectl -n minio-operator get secret console-sa-secret -o jsonpath="{.data.token}" | base64 --decode)
echo $SA_TOKEN
The output of this command is the JSON Web Token (JWT) login credential for Operator Console.
6. Log into the MinIO Operator Console
~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
.. tab-set::
.. tab-item:: NodePort
:selected:
If you configured the service for access through a NodePort, specify the hostname of any worker node in the cluster with that port as ``HOSTNAME:NODEPORT`` to access the Console.
For example, a deployment configured with a NodePort of 30090 and the following ``InternalIP`` addresses can be accessed at ``http://172.18.0.5:30090``.
.. code-block:: shell
:class: copyable
kubectl get nodes -o custom-columns=IP:.status.addresses[:]
IP
map[address:172.18.0.5 type:InternalIP],map[address:k3d-MINIO-agent-3 type:Hostname]
map[address:172.18.0.6 type:InternalIP],map[address:k3d-MINIO-agent-2 type:Hostname]
map[address:172.18.0.2 type:InternalIP],map[address:k3d-MINIO-server-0 type:Hostname]
map[address:172.18.0.4 type:InternalIP],map[address:k3d-MINIO-agent-1 type:Hostname]
map[address:172.18.0.3 type:InternalIP],map[address:k3d-MINIO-agent-0 type:Hostname]
.. tab-item:: Ingress or Load Balancer
If you configured the ``svc/console`` service for access through ingress or a cluster load balancer, you can access the Console using the configured hostname and port.
.. tab-item:: Port Forwarding
You can use ``kubectl port forward`` to temporary forward ports for the Console:
.. code-block:: shell
:class: copyable
kubectl port-forward svc/console -n minio-operator 9090:9090
You can then use ``http://localhost:9090`` to access the MinIO Operator Console.
Once you access the Console, use the Console JWT to log in.
You can now :ref:`deploy and manage MinIO Tenants using the Operator Console <deploy-minio-distributed>`.

View File

@ -0,0 +1,244 @@
.. _deploy-minio-kubernetes:
.. _minio-operator-installation:
.. _minio-operator-installation-kustomize:
.. _deploy-operator-kubernetes:
.. _deploy-operator-kubernetes-kustomize:
===================================
Deploy MinIO Operator on Kubernetes
===================================
.. default-domain:: minio
.. contents:: Table of Contents
:local:
:depth: 1
.. container:: extlinks-video
- `Object Storage Essentials <https://www.youtube.com/playlist?list=PLFOIsHSSYIK3WitnqhqfpeZ6fRFKHxIr7>`__
- `How to Connect to MinIO with JavaScript <https://www.youtube.com/watch?v=yUR4Fvx0D3E&list=PLFOIsHSSYIK3Dd3Y_x7itJT1NUKT5SxDh&index=5>`__
This page documents installing the MinIO Kubernetes Operator onto Kubernetes infrastructure.
This procedure assumes an installation of Kubernetes Upstream, though the instructions may work for other flavors of Kubernetes.
The MinIO Operator installs a :kube-docs:`Custom Resource Definition (CRD) <concepts/extend-kubernetes/api-extension/custom-resources/#customresourcedefinitions>` to support describing MinIO tenants as a Kubernetes :kube-docs:`object <concepts/overview/working-with-objects/kubernetes-objects/>`.
See the MinIO Operator :minio-git:`CRD Reference <operator/blob/master/docs/tenant_crd.adoc>` for complete documentation on the MinIO CRD.
Once you have installed the Kubernetes Operator, you can then deploy MinIO Tenants onto your Kubernetes worker nodes.
This documentation assumes familiarity with referenced Kubernetes concepts, utilities, and procedures.
While this documentation *may* provide guidance for configuring or deploying Kubernetes-related resources on a best-effort basis, it is not a replacement for the official :kube-docs:`Kubernetes Documentation <>`.
Considerations
--------------
Procedure
---------
The following steps deploy Operator using Kustomize and a ``kustomization.yaml`` file from the MinIO Operator GitHub repository.
To install Operator using a Helm chart, see :ref:`Deploy Operator with Helm <minio-k8s-deploy-operator-helm>`.
The following procedure uses ``kubectl -k`` to install the Operator from the MinIO Operator GitHub repository.
``kubectl -k`` and ``kubectl --kustomize`` are aliases that perform the same command.
.. important::
If you use Kustomize to install the Operator, you must use Kustomize to manage or upgrade that installation.
Do not use ``kubectl krew``, a Helm chart, or similar methods to manage or upgrade a MinIO Operator installation deployed with Kustomize.
You can, however, use Kustomize to upgrade a previous version of Operator (5.0.14 or earlier) installed with the MinIO Kubernetes Plugin.
1. Install the latest version of Operator
~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
.. code-block:: shell
:class: copyable
:substitutions:
kubectl apply -k "github.com/minio/operator?ref=v|operator-version-stable|"
The output resembles the following:
.. code-block:: shell
namespace/minio-operator created
customresourcedefinition.apiextensions.k8s.io/miniojobs.job.min.io created
customresourcedefinition.apiextensions.k8s.io/policybindings.sts.min.io created
customresourcedefinition.apiextensions.k8s.io/tenants.minio.min.io created
serviceaccount/console-sa created
serviceaccount/minio-operator created
clusterrole.rbac.authorization.k8s.io/console-sa-role created
clusterrole.rbac.authorization.k8s.io/minio-operator-role created
clusterrolebinding.rbac.authorization.k8s.io/console-sa-binding created
clusterrolebinding.rbac.authorization.k8s.io/minio-operator-binding created
configmap/console-env created
secret/console-sa-secret created
service/console created
service/operator created
service/sts created
deployment.apps/console created
deployment.apps/minio-operator created
2. Verify the Operator pods are running
~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
.. code-block:: shell
:class: copyable
kubectl get pods -n minio-operator
The output resembles the following:
.. code-block:: shell
NAME READY STATUS RESTARTS AGE
console-56c7d8bd89-485qh 1/1 Running 0 2m42s
minio-operator-6c758b8c45-nkhlx 1/1 Running 0 2m42s
minio-operator-6c758b8c45-dgd8n 1/1 Running 0 2m42s
In this example, the ``minio-operator`` pod is MinIO Operator and the ``console`` pod is the Operator Console.
You can modify your Operator deployment by applying kubectl patches.
You can find examples for common configurations in the `Operator GitHub repository <https://github.com/minio/operator/tree/master/examples/kustomization>`__.
.. _minio-k8s-deploy-operator-access-console:
3. *(Optional)* Configure access to the Operator Console service
~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
The Operator Console service does not automatically bind or expose itself for external access on the Kubernetes cluster.
You must instead configure a network control plane component, such as a load balancer or ingress, to grant that external access.
For testing purposes or short-term access, expose the Operator Console service through a NodePort using the following patch:
.. code-block:: shell
:class: copyable
kubectl patch service -n minio-operator console -p '
{
"spec": {
"ports": [
{
"name": "http",
"port": 9090,
"protocol": "TCP",
"targetPort": 9090,
"nodePort": 30090
},
{
"name": "https",
"port": 9443,
"protocol": "TCP",
"targetPort": 9443,
"nodePort": 30433
}
],
"type": "NodePort"
}
}'
The patch command should output ``service/console patched``.
You can now access the service through ports ``30433`` (HTTPS) or ``30090`` (HTTP) on any of your Kubernetes worker nodes.
4. Verify the Operator installation
~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
Check the contents of the specified namespace (``minio-operator``) to ensure all pods and services have started successfully.
.. code-block:: shell
:class: copyable
kubectl get all -n minio-operator
The response should resemble the following:
.. code-block:: shell
NAME READY STATUS RESTARTS AGE
pod/console-56c7d8bd89-485qh 1/1 Running 0 5m20s
pod/minio-operator-6c758b8c45-nkhlx 1/1 Running 0 5m20s
pod/minio-operator-6c758b8c45-dgd8n 1/1 Running 0 5m20s
NAME TYPE CLUSTER-IP EXTERNAL-IP PORT(S) AGE
service/operator ClusterIP 10.43.135.241 <none> 4221/TCP 5m20s
service/sts ClusterIP 10.43.117.251 <none> 4223/TCP 5m20s
service/console NodePort 10.43.235.38 <none> 9090:30090/TCP,9443:30433/TCP 5m20s
NAME READY UP-TO-DATE AVAILABLE AGE
deployment.apps/console 1/1 1 1 5m20s
deployment.apps/minio-operator 2/2 2 2 5m20s
NAME DESIRED CURRENT READY AGE
replicaset.apps/console-56c7d8bd89 1 1 1 5m20s
replicaset.apps/minio-operator-6c758b8c45 2 2 2 5m20s
5. Retrieve the Operator Console JWT for login
~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
.. code-block:: shell
:class: copyable
kubectl apply -f - <<EOF
apiVersion: v1
kind: Secret
metadata:
name: console-sa-secret
namespace: minio-operator
annotations:
kubernetes.io/service-account.name: console-sa
type: kubernetes.io/service-account-token
EOF
SA_TOKEN=$(kubectl -n minio-operator get secret console-sa-secret -o jsonpath="{.data.token}" | base64 --decode)
echo $SA_TOKEN
The output of this command is the JSON Web Token (JWT) login credential for Operator Console.
6. Log into the MinIO Operator Console
~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
.. tab-set::
.. tab-item:: NodePort
:selected:
If you configured the service for access through a NodePort, specify the hostname of any worker node in the cluster with that port as ``HOSTNAME:NODEPORT`` to access the Console.
For example, a deployment configured with a NodePort of 30090 and the following ``InternalIP`` addresses can be accessed at ``http://172.18.0.5:30090``.
.. code-block:: shell
:class: copyable
kubectl get nodes -o custom-columns=IP:.status.addresses[:]
IP
map[address:172.18.0.5 type:InternalIP],map[address:k3d-MINIO-agent-3 type:Hostname]
map[address:172.18.0.6 type:InternalIP],map[address:k3d-MINIO-agent-2 type:Hostname]
map[address:172.18.0.2 type:InternalIP],map[address:k3d-MINIO-server-0 type:Hostname]
map[address:172.18.0.4 type:InternalIP],map[address:k3d-MINIO-agent-1 type:Hostname]
map[address:172.18.0.3 type:InternalIP],map[address:k3d-MINIO-agent-0 type:Hostname]
.. tab-item:: Ingress or Load Balancer
If you configured the ``svc/console`` service for access through ingress or a cluster load balancer, you can access the Console using the configured hostname and port.
.. tab-item:: Port Forwarding
You can use ``kubectl port forward`` to temporary forward ports for the Console:
.. code-block:: shell
:class: copyable
kubectl port-forward svc/console -n minio-operator 9090:9090
You can then use ``http://localhost:9090`` to access the MinIO Operator Console.
Once you access the Console, use the Console JWT to log in.
You can now :ref:`deploy and manage MinIO Tenants using the Operator Console <deploy-minio-distributed>`.
.. toctree::
:titlesonly:
:hidden:
/operations/deployments/k8s-deploy-operator-helm-on-kubernetes

View File

@ -0,0 +1,326 @@
.. _deploy-operator-openshift:
=========================================
Deploy MinIO Operator on RedHat OpenShift
=========================================
.. default-domain:: minio
.. contents:: Table of Contents
:local:
:depth: 1
Red Hat® OpenShift® is an enterprise-ready Kubernetes container platform with full-stack automated operations to manage hybrid cloud, multi-cloud, and edge deployments.
OpenShift includes an enterprise-grade Linux operating system, container runtime, networking, monitoring, registry, and authentication and authorization solutions.
You can deploy the MinIO Kubernetes Operator through the :openshift-docs:`Red Hat® OpenShift® Container Platform 4.8+ <welcome/index.html>`.
You can deploy and manage MinIO Tenants through OpenShift after deploying the MinIO Operator.
This procedure includes instructions for the following deployment paths:
- Purchase and Deploy MinIO through the `RedHat Marketplace <https://marketplace.redhat.com/en-us/products/minio-hybrid-cloud-object-storage>`__.
- Deploy MinIO through the OpenShift `OperatorHub <https://operatorhub.io/operator/minio-operator>`__
After deploying the MinIO Operator into your OpenShift cluster, you can create and manage MinIO Tenants through the :openshift-docs:`OperatorHub <operators/understanding/olm-understanding-operatorhub.html>` user interface.
This documentation assumes familiarity with all referenced Kubernetes and OpenShift concepts, utilities, and procedures.
While this documentation *may* provide guidance for configuring or deploying Kubernetes-related or OpenShift-related resources on a best-effort basis, it is not a replacement for the official :kube-docs:`Kubernetes Documentation <>` and :openshift-docs:`OpenShift Container Platform 4.8+ Documentation <welcome/index.html>`.
Prerequisites
-------------
In addition to the general :ref:`MinIO Operator prerequisites <minio-operator-prerequisites>`, your OpenShift cluster must also meet the following requirements:
RedHat OpenShift 4.8+
~~~~~~~~~~~~~~~~~~~~~
The MinIO Kubernetes Operator is available starting with `OpenShift 4.8+ <https://docs.openshift.com/container-platform/4.13/welcome/index.html>`__.
Red Hat Marketplace installation requires registration of the OpenShift cluster with the Marketplace for the necessary namespaces.
See `Register OpenShift cluster with Red Hat Marketplace <https://marketplace.redhat.com/en-us/documentation/clusters>`__ for complete instructions.
For older versions of OpenShift, use the generic :ref:`deploy-operator-kubernetes` procedure.
Administrator Access
~~~~~~~~~~~~~~~~~~~~
Installation of operators through the Red Hat Marketplace and the Operator Hub is restricted to OpenShift cluster administrators (``cluster-admin`` privileges).
This procedure requires logging into the Marketplace and/or OpenShift with an account that has those privileges.
OpenShift ``oc`` CLI
~~~~~~~~~~~~~~~~~~~~
:openshift-docs:`Download and Install <cli_reference/openshift_cli/getting-started-cli.html>` the OpenShift :abbr:`CLI (command-line interface)` ``oc`` for use in this procedure.
Pod Security Context Constraints
~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
The MinIO Operator deploys pods using the following default :kube-docs:`Security Context <tasks/configure-pod-container/security-context/>` per pod:
.. code-block:: yaml
:class: copyable
securityContext:
runAsUser: 1000
runAsGroup: 1000
runAsNonRoot: true
fsGroup: 1000
Certain OpenShift :openshift-docs:`Security Context Constraints </authentication/managing-security-context-constraints.html>` limit the allowed UID or GID for a pod such that MinIO cannot deploy the Tenant successfully.
Ensure that the Project in which the Operator deploys the Tenant has sufficient SCC settings that allow the default pod security context.
You can alternatively modify the tenant security context settings during deployment.
The following command returns the optimal value for the securityContext:
.. code-block:: shell
:class: copyable
oc get namespace <namespace> \
-o=jsonpath='{.metadata.annotations.openshift\.io/sa\.scc\.supplemental-groups}{"\n"}'
The command returns output similar to the following:
.. code-block:: shell
1056560000/10000
Take note of this value before the slash for use in this procedure.
Procedure
---------
1) Access the MinIO Operator Installation
~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
Select the tab that corresponds to your preferred installation method:
.. tab-set::
.. tab-item:: Red Hat OperatorHub
Log into the OpenShift Web Console as a user with ``cluster-admin`` privileges.
From the :guilabel:`Administrator` panel, select :guilabel:`Operators`, then :guilabel:`OperatorHub`.
From the :guilabel:`OperatorHub` page, type "MinIO" into the :guilabel:`Filter` text entry. Select the :guilabel:`MinIO Operator` tile from the search list.
.. image:: /images/openshift/minio-openshift-select-minio.png
:align: center
:width: 90%
:class: no-scaled-link
:alt: From the OperatorHub, search for MinIO, then select the MinIO Tile.
Select the :guilabel:`MinIO Operator` tile, then click :guilabel:`Install` to begin the installation.
.. tab-item:: Red Hat Marketplace
Open the `MinIO Red Hat Marketplace listing <https://marketplace.redhat.com/en-us/products/minio-hybrid-cloud-object-storage>`__ in your browser.
Click :guilabel:`Login` to log in with your Red Hat Marketplace account.
After logging in, click :guilabel:`Purchase` to purchase the MinIO Operator for your account.
After completing the purchase, click :guilabel:`Workplace` from the top navigation and select :guilabel:`My Software`.
.. image:: /images/openshift/minio-openshift-marketplace-my-software.png
:align: center
:width: 90%
:class: no-scaled-link
:alt: From the Red Hat Marketplace, select Workplace, then My Software
Click :guilabel:`MinIO Hybrid Cloud Object Storage` and select :guilabel:`Install Operator` to start the Operator Installation procedure in OpenShift.
2) Configure and Deploy the Operator
~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
The :guilabel:`Install Operator` page provides a walkthrough for configuring the MinIO Operator installation.
.. image:: /images/openshift/minio-openshift-operator-installation.png
:align: center
:width: 90%
:class: no-scaled-link
:alt: Complete the Operator Installation Walkthrough
- For :guilabel:`Update channel`, select any of the available options.
- For :guilabel:`Installation Mode`, select :guilabel:`All namespaces on the cluster`
- For :guilabel:`Installed Namespace`, select :guilabel:`openshift-operators`
- For :guilabel:`Approval Strategy`, select the approval strategy of your choice.
See the :openshift-docs:`Operator Installation Documentation <operators/admin/olm-adding-operators-to-cluster.html#olm-installing-from-operatorhub-using-web-console_olm-adding-operators-to-a-cluster>` :guilabel:`Step 5` for complete descriptions of each displayed option.
Click :guilabel:`Install` to start the installation procedure.
The web console displays a widget for tracking the installation progress.
.. image:: /images/openshift/minio-openshift-operator-installation-progress.png
:align: center
:width: 70%
:class: no-scaled-link
:alt: Wait for Installation to Complete.
Once installation completes, click :guilabel:`View Operator` to view the MinIO Operator page.
3) Configure TLS Certificates
~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
If you have installed the MinIO Operator from Red Hat OperatorHub, the installation process also configures the :openshift-docs:`OpenShift Service CA Operator <security/certificate_types_descriptions/service-ca-certificates.html>`.
This Operator manages the TLS certificates required to access the MinIO Operator Console and Tenants.
It automatically renews and rotates the certificates 13 months before expiration.
No additional action is required.
For Operator installations deployed by other methods, configure the :openshift-docs:`Service CA certificates <security/certificate_types_descriptions/service-ca-certificates.html>` manually.
See the dropdowns below for details.
.. dropdown:: OpenShift Service CA Certificate configuration
To manually enable the ``service-ca`` Operator to manage TLS certificates:
#. Use the following :openshift-docs:`oc <cli_reference/openshift_cli/getting-started-cli.html>` command to edit the deployment:
.. code-block:: shell
:class: copyable
oc edit deployment minio-operator -n minio-operator
If needed, replace ``minio-operator`` with the name and namespace of your deployment.
``oc edit`` opens the deployment configuration file in an editor.
#. In the ``spec`` section, add the highlighted MinIO Operator :ref:`environment variables <minio-server-environment-variables>`:
.. code-block:: shell
:class: copyable
:emphasize-lines: 5-8
containers:
- args:
- controller
env:
- name: MINIO_CONSOLE_TLS_ENABLE
value: 'on'
- name: MINIO_OPERATOR_RUNTIME
value: OpenShift
#. In the ``volumes`` section, add the following volumes and volume mounts:
- ``sts-tls``
- ``openshift-service-ca``
- ``openshift-csr-signer-ca``
The added volume configuration resembles the following:
.. code-block:: shell
:class: copyable
volumes:
- name: sts-tls
projected:
sources:
- secret:
name: sts-tls
items:
- key: tls.crt
path: public.crt
- key: tls.key
path: private.key
optional: true
defaultMode: 420
- name: openshift-service-ca
configMap:
name: openshift-service-ca.crt
items:
- key: service-ca.crt
path: service-ca.crt
defaultMode: 420
optional: true
- name: openshift-csr-signer-ca
projected:
sources:
- secret:
name: openshift-csr-signer-ca
items:
- key: tls.crt
path: tls.crt
optional: true
defaultMode: 420
volumeMounts:
- name: openshift-service-ca
mountPath: /tmp/service-ca
- name: openshift-csr-signer-ca
mountPath: /tmp/csr-signer-ca
- name: sts-tls
mountPath: /tmp/sts
.. dropdown:: OpenShift Service CA Certificate for Helm deployments
For Helm deployments on OpenShift, add the following :ref:`environment variables <minio-server-environment-variables>` and volumes to the ``values.yaml`` in the Operator Helm chart before deploying.
The added YAML configuration for the ``operator`` pod resembles the following:
.. code-block::
:class: copyable
operator:
env:
- name: MINIO_OPERATOR_RUNTIME
value: "OpenShift"
- name: MINIO_CONSOLE_TLS_ENABLE
value: "on"
volumes:
- name: sts-tls
projected:
sources:
- secret:
name: sts-tls
items:
- key: tls.crt
path: public.crt
- key: tls.key
path: private.key
optional: true
defaultMode: 420
- name: openshift-service-ca
configMap:
name: openshift-service-ca.crt
items:
- key: service-ca.crt
path: service-ca.crt
defaultMode: 420
optional: true
- name: openshift-csr-signer-ca
projected:
sources:
- secret:
name: openshift-csr-signer-ca
items:
- key: tls.crt
path: tls.crt
optional: true
defaultMode: 420
volumeMounts:
- name: openshift-service-ca
mountPath: /tmp/service-ca
- name: openshift-csr-signer-ca
mountPath: /tmp/csr-signer-ca
- name: sts-tls
mountPath: /tmp/sts
4) Open the MinIO Operator Interface
~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
You can find the MinIO Operator Interface from the :guilabel:`Operators` left-hand navigation header
1. Go to :guilabel:`Operators`, then :guilabel:`Installed Operators`.
2. For the :guilabel:`Project` dropdown, select :guilabel:`openshift-operators`.
3. Select :guilabel:`MinIO Operators` from the list of installed operators.
The :guilabel:`Status` column must read :guilabel:`Success` to access the Operator interface.
5) Next Steps
~~~~~~~~~~~~~
After deploying the MinIO Operator, you can create a new MinIO Tenant.
To deploy a MinIO Tenant using OpenShift, see :ref:`deploy-minio-tenant-redhat-openshift`.

View File

@ -0,0 +1,265 @@
.. _deploy-operator-rancher:
=====================================
Deploy MinIO Operator on SUSE Rancher
=====================================
.. default-domain:: minio
.. contents:: Table of Contents
:local:
:depth: 1
SUSE® Rancher® is a multi-cluster container management platform for organizations that deploy containerized workloads, orchestrated by Kubernetes.
Rancher can manage access, usage, infrastructure and applications across clusters, that are Cloud Native Computing Foundation (CNCF) conformant and certified, anywhere across edge, on-premise data centers, or cloud service providers.
Rancher supports MinIO as part of the `SUSE One Partner Solution Stack <https://www.suse.com/partners/>`__.
MinIO supports the following methods for installing the MinIO Operator onto SUSE Rancher-managed clusters:
SUSE Rancher Apps & Marketplace
You can deploy and manage the MinIO Operator through the SUSE Rancher Apps & Marketplace.
See `MinIO Object Storage for SUSE Rancher <https://documentation.suse.com/trd/kubernetes/html/gs_rancher_minio/index.html>`__ for a procedure on that installation path.
Using Kubernetes Kustomize
MinIO provides Kustomize templates for deploying the MinIO Operator onto Kubernetes infrastructure.
You can use Kustomize to install the Operator onto Rancher clusters.
MinIO Operator installations and Tenants deployed through this path require manual subscription with MinIO SUBNET for licensing and support.
Using Kubernetes Helm
MinIO provides a Helm chart for deploying the MinIO Operator onto Kubernetes infrastructure.
See :ref:`minio-k8s-deploy-operator-helm` for instructions.
MinIO Operator installations and Tenants deployed through this path require manual subscription with MinIO SUBNET for licensing and support.
This page documents deploying the MinIO Operator through the CLI using Kustomize.
This documentation assumes familiarity with all referenced Kubernetes and Rancher Kubernetes concepts, utilities, and procedures.
While this documentation *may* provide guidance for configuring or deploying Kubernetes-related or Rancher-related resources on a best-effort basis, it is not a replacement for the official :kube-docs:`Kubernetes Documentation <>`.
Prerequisites
-------------
In addition to the general :ref:`MinIO Operator prerequisites <minio-operator-prerequisites>`, your EKS cluster must also meet the following requirements:
Existing Rancher Cluster
~~~~~~~~~~~~~~~~~~~~~~~~
This procedure assumes an existing Rancher cluster onto which you can deploy the MinIO Operator.
The Operator by default deploys pods and services with two replicas each and pod anti-affinity.
The Rancher cluster should therefore have at least two nodes available for scheduling Operator pods and services.
While these nodes *may* be the same nodes intended for use by MinIO Tenants, co-locating Operator and Tenant pods may increase the risk of service interruptions due to the loss of any one node.
``kubectl`` Access to the Rancher Cluster
~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
Ensure your host machine has a ``kubectl`` installation compatible with the target Rancher cluster.
For guidance on connecting ``kubectl`` to Rancher, see `Creating or updating a kubeconfig file for an Amazon EKS cluster <https://ranchermanager.docs.rancher.com/how-to-guides/new-user-guides/manage-clusters/access-clusters/use-kubectl-and-kubeconfig>`__.
Your ``kubectl`` configuration must include authentication as a user with sufficient permissions to deploy Kubernetes resources.
You can reference the MinIO Operator default :minio-git:`cluster role </operator/blob/master/resources/base/cluster-role.yaml>` for guidance on the Kubernetes APIs, resources, and verbs which the Operator uses.
Procedure
---------
The following steps deploy Operator using Kustomize and a ``kustomization.yaml`` file from the MinIO Operator GitHub repository.
To install Operator using a Helm chart, see :ref:`Deploy Operator with Helm <minio-k8s-deploy-operator-helm>`.
The following procedure uses ``kubectl -k`` to install the Operator from the MinIO Operator GitHub repository.
``kubectl -k`` and ``kubectl --kustomize`` are aliases that perform the same command.
.. important::
If you use Kustomize to install the Operator, you must use Kustomize to manage or upgrade that installation.
Do not use ``kubectl krew``, a Helm chart, or similar methods to manage or upgrade a MinIO Operator installation deployed with Kustomize.
You can, however, use Kustomize to upgrade a previous version of Operator (5.0.14 or earlier) installed with the MinIO Kubernetes Plugin.
1. Install the latest version of Operator
~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
.. code-block:: shell
:class: copyable
:substitutions:
kubectl apply -k "github.com/minio/operator?ref=v|operator-version-stable|"
The output resembles the following:
.. code-block:: shell
namespace/minio-operator created
customresourcedefinition.apiextensions.k8s.io/miniojobs.job.min.io created
customresourcedefinition.apiextensions.k8s.io/policybindings.sts.min.io created
customresourcedefinition.apiextensions.k8s.io/tenants.minio.min.io created
serviceaccount/console-sa created
serviceaccount/minio-operator created
clusterrole.rbac.authorization.k8s.io/console-sa-role created
clusterrole.rbac.authorization.k8s.io/minio-operator-role created
clusterrolebinding.rbac.authorization.k8s.io/console-sa-binding created
clusterrolebinding.rbac.authorization.k8s.io/minio-operator-binding created
configmap/console-env created
secret/console-sa-secret created
service/console created
service/operator created
service/sts created
deployment.apps/console created
deployment.apps/minio-operator created
2. Verify the Operator pods are running
~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
.. code-block:: shell
:class: copyable
kubectl get pods -n minio-operator
The output resembles the following:
.. code-block:: shell
NAME READY STATUS RESTARTS AGE
console-56c7d8bd89-485qh 1/1 Running 0 2m42s
minio-operator-6c758b8c45-nkhlx 1/1 Running 0 2m42s
minio-operator-6c758b8c45-dgd8n 1/1 Running 0 2m42s
In this example, the ``minio-operator`` pod is MinIO Operator and the ``console`` pod is the Operator Console.
You can modify your Operator deployment by applying kubectl patches.
You can find examples for common configurations in the `Operator GitHub repository <https://github.com/minio/operator/tree/master/examples/kustomization>`__.
3. *(Optional)* Configure access to the Operator Console service
~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
The Operator Console service does not automatically bind or expose itself for external access on the Kubernetes cluster.
You must instead configure a network control plane component, such as a load balancer or ingress, to grant that external access.
For testing purposes or short-term access, expose the Operator Console service through a NodePort using the following patch:
.. code-block:: shell
:class: copyable
kubectl patch service -n minio-operator console -p '
{
"spec": {
"ports": [
{
"name": "http",
"port": 9090,
"protocol": "TCP",
"targetPort": 9090,
"nodePort": 30090
},
{
"name": "https",
"port": 9443,
"protocol": "TCP",
"targetPort": 9443,
"nodePort": 30433
}
],
"type": "NodePort"
}
}'
The patch command should output ``service/console patched``.
You can now access the service through ports ``30433`` (HTTPS) or ``30090`` (HTTP) on any of your Kubernetes worker nodes.
For Rancher clusters configured with
4. Verify the Operator installation
~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
Check the contents of the specified namespace (``minio-operator``) to ensure all pods and services have started successfully.
.. code-block:: shell
:class: copyable
kubectl get all -n minio-operator
The response should resemble the following:
.. code-block:: shell
NAME READY STATUS RESTARTS AGE
pod/console-56c7d8bd89-485qh 1/1 Running 0 5m20s
pod/minio-operator-6c758b8c45-nkhlx 1/1 Running 0 5m20s
pod/minio-operator-6c758b8c45-dgd8n 1/1 Running 0 5m20s
NAME TYPE CLUSTER-IP EXTERNAL-IP PORT(S) AGE
service/operator ClusterIP 10.43.135.241 <none> 4221/TCP 5m20s
service/sts ClusterIP 10.43.117.251 <none> 4223/TCP 5m20s
service/console NodePort 10.43.235.38 <none> 9090:30090/TCP,9443:30433/TCP 5m20s
NAME READY UP-TO-DATE AVAILABLE AGE
deployment.apps/console 1/1 1 1 5m20s
deployment.apps/minio-operator 2/2 2 2 5m20s
NAME DESIRED CURRENT READY AGE
replicaset.apps/console-56c7d8bd89 1 1 1 5m20s
replicaset.apps/minio-operator-6c758b8c45 2 2 2 5m20s
5. Retrieve the Operator Console JWT for login
~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
.. code-block:: shell
:class: copyable
kubectl apply -f - <<EOF
apiVersion: v1
kind: Secret
metadata:
name: console-sa-secret
namespace: minio-operator
annotations:
kubernetes.io/service-account.name: console-sa
type: kubernetes.io/service-account-token
EOF
SA_TOKEN=$(kubectl -n minio-operator get secret console-sa-secret -o jsonpath="{.data.token}" | base64 --decode)
echo $SA_TOKEN
The output of this command is the JSON Web Token (JWT) login credential for Operator Console.
6. Log into the MinIO Operator Console
~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
.. tab-set::
.. tab-item:: NodePort
:selected:
If you configured the service for access through a NodePort, specify the hostname of any worker node in the cluster with that port as ``HOSTNAME:NODEPORT`` to access the Console.
For example, a deployment configured with a NodePort of 30090 and the following ``InternalIP`` addresses can be accessed at ``http://172.18.0.5:30090``.
.. code-block:: shell
:class: copyable
kubectl get nodes -o custom-columns=IP:.status.addresses[:]
IP
map[address:172.18.0.5 type:InternalIP],map[address:k3d-MINIO-agent-3 type:Hostname]
map[address:172.18.0.6 type:InternalIP],map[address:k3d-MINIO-agent-2 type:Hostname]
map[address:172.18.0.2 type:InternalIP],map[address:k3d-MINIO-server-0 type:Hostname]
map[address:172.18.0.4 type:InternalIP],map[address:k3d-MINIO-agent-1 type:Hostname]
map[address:172.18.0.3 type:InternalIP],map[address:k3d-MINIO-agent-0 type:Hostname]
.. tab-item:: Ingress or Load Balancer
If you configured the ``svc/console`` service for access through ingress or a cluster load balancer, you can access the Console using the configured hostname and port.
.. tab-item:: Port Forwarding
You can use ``kubectl port forward`` to temporary forward ports for the Console:
.. code-block:: shell
:class: copyable
kubectl port-forward svc/console -n minio-operator 9090:9090
You can then use ``http://localhost:9090`` to access the MinIO Operator Console.
Once you access the Console, use the Console JWT to log in.
You can now :ref:`deploy and manage MinIO Tenants using the Operator Console <deploy-minio-distributed>`.

View File

@ -43,7 +43,7 @@ You must meet the following requirements to install a MinIO Tenant with Helm:
This procedure assumes your Kubernetes cluster access grants you broad administrative permissions.
For more about Tenant installation requirements, including supported Kubernetes versions and TLS certificates, see the :ref:`Tenant deployment prerequisites <deploy-minio-distributed-prereqs-storage>`.
For more about Tenant installation requirements, including supported Kubernetes versions and TLS certificates, see the :ref:`Tenant deployment prerequisites <minio-hardware-checklist-storage>`.
This procedure assumes familiarity the with referenced Kubernetes concepts and utilities.
While this documentation may provide guidance for configuring or deploying Kubernetes-related resources on a best-effort basis, it is not a replacement for the official :kube-docs:`Kubernetes Documentation <>`.
@ -373,7 +373,7 @@ This method may support easier pre-configuration of the Tenant compared to the :
mc alias set myminio https://localhost:9000 minio minio123 --insecure
This example uses the non-TLS ``myminio-hl`` service, which requires :std:option:`--insecure <mc.--insecure>`.
This example uses the non-TLS ``myminio-hl`` service, which requires the ``--insecure`` option..
If you have a TLS cert configured, omit ``--insecure`` and use ``svc/minio`` instead.

View File

@ -0,0 +1,259 @@
.. _minio-k8s-deploy-minio-tenant:
.. _deploy-minio-tenant-redhat-openshift:
=====================
Deploy a MinIO Tenant
=====================
.. default-domain:: minio
.. contents:: Table of Contents
:local:
:depth: 1
This procedure documents deploying a MinIO Tenant using the MinIO Operator.
.. screenshot temporarily removed
.. image:: /images/k8s/operator-dashboard.png
:align: center
:width: 70%
:class: no-scaled-link
:alt: MinIO Operator Console
Deploying Single-Node topologies requires additional configurations not covered in this documentation.
You can alternatively use a simple Kubernetes YAML object to describe a Single-Node topology for local testing and evaluation as necessary.
MinIO does not recommend nor support single-node deployment topologies for production environments.
This documentation assumes familiarity with all referenced Kubernetes concepts, utilities, and procedures.
While this documentation *may* provide guidance for configuring or deploying Kubernetes-related resources on a best-effort basis, it is not a replacement for the official :kube-docs:`Kubernetes Documentation <>`.
.. _minio-k8s-deploy-minio-tenant-security:
Deploy a MinIO Tenant using Kustomize
-------------------------------------
The following procedure uses ``kubectl -k`` to deploy a MinIO Tenant using the ``base`` Kustomization template in the :minio-git:`MinIO Operator Github repository <operator/tree/master/examples/kustomization/base>`.
You can select a different base or pre-built template from the :minio-git:`repository <operator/tree/master/examples/kustomization/>` as your starting point, or build your own Kustomization resources using the :ref:`MinIO Custom Resource Documentation <minio-operator-crd>`.
.. important::
If you use Kustomize to deploy a MinIO Tenant, you must use Kustomize to manage or upgrade that deployment.
Do not use ``kubectl krew``, a Helm Chart, or similar methods to manage or upgrade the MinIO Tenant.
This procedure is not exhaustive of all possible configuration options available in the :ref:`Tenant CRD <minio-operator-crd>`.
It provides a baseline from which you can modify and tailor the Tenant to your requirements.
.. container:: procedure
#. Create a YAML object for the Tenant
Use the ``kubectl kustomize`` command to produce a YAML file containing all Kubernetes resources necessary to deploy the ``base`` Tenant:
.. code-block:: shell
:class: copyable
kubectl kustomize https://github.com/minio/operator/examples/kustomization/base/ > tenant-base.yaml
The command creates a single YAML file with multiple objects separated by the ``---`` line.
Open the file in your preferred editor.
The following steps reference each object based on it's ``kind`` and ``metadata.name`` fields:
#. Configure the Tenant topology
The ``kind: Tenant`` object describes the MinIO Tenant.
The following fields share the ``spec.pools[0]`` prefix and control the number of servers, volumes per server, and storage class of all pods deployed in the Tenant:
.. list-table::
:header-rows: 1
:widths: 30 70
* - Field
- Description
* - ``servers``
- The number of MinIO pods to deploy in the Server Pool.
* - ``volumesPerServer``
- The number of persistent volumes to attach to each MinIO pod (``servers``).
The Operator generates ``volumesPerServer x servers`` Persistant Volume Claims for the Tenant.
* - ``volumeClaimTemplate.spec.storageClassName``
- The Kubernetes storage class to associate with the generated Persistent Volume Claims.
If no storage class exists matching the specified value *or* if the specified storage class cannot meet the requested number of PVCs or storage capacity, the Tenant may fail to start.
* - ``volumeClaimTemplate.spec.resources.requests.storage``
- The amount of storage to request for each generated PVC.
#. Configure Tenant Affinity or Anti-Affinity
The MinIO Operator supports the following Kubernetes Affinity and Anti-Affinity configurations:
- Node Affinity (``spec.pools[n].nodeAffinity``)
- Pod Affinity (``spec.pools[n].podAffinity``)
- Pod Anti-Affinity (``spec.pools[n].podAntiAffinity``)
MinIO recommends configuring Tenants with Pod Anti-Affinity to ensure that the Kubernetes schedule does not schedule multiple pods on the same worker node.
If you have specific worker nodes on which you want to deploy the tenant, pass those node labels or filters to the ``nodeAffinity`` field to constrain the scheduler to place pods on those nodes.
#. Configure Network Encryption
The MinIO Tenant CRD provides the following fields from which you can configure tenant TLS network encryption:
.. list-table::
:header-rows: 1
:widths: 30 70
* - Field
- Description
* - ``tenant.certificate.requestAutoCert``
- Enable or disable MinIO :ref:`automatic TLS certificate generation <minio-tls>`
Defaults to ``true`` or enabled if omitted.
* - ``tenant.certificate.certConfig``
- Customize the behavior of :ref:`automatic TLS <minio-tls>`, if enabled.
* - ``tenant.certificate.externalCertSecret``
- Enable TLS for multiple hostnames via Server Name Indication (SNI)
Specify one or more Kubernetes secrets of type ``kubernetes.io/tls`` or ``cert-manager``.
* - ``tenant.certificate.externalCACertSecret``
- Enable validation of client TLS certificates signed by unknown, third-party, or internal Certificate Authorities (CA).
Specify one or more Kubernetes secrets of type ``kubernetes.io/tls`` containing the full chain of CA certificates for a given authority.
#. Configure MinIO Environment Variables
You can set MinIO Server environment variables using the ``tenant.configuration`` field.
.. list-table::
:header-rows: 1
:widths: 30 70
* - Field
- Description
* - ``tenant.configuration``
- Specify a Kubernetes opaque secret whose data payload ``config.env`` contains each MinIO environment variable you want to set.
The ``config.env`` data payload **must** be a base64-encoded string.
You can create a local file, set your environment variables, and then use ``cat LOCALFILE | base64`` to create the payload.
The YAML includes an object ``kind: Secret`` with ``metadata.name: storage-configuration`` that sets the root username, password, erasure parity settings, and enables Tenant Console.
Modify this as needed to reflect your Tenant requirements.
#. Review the Namespace
The YAML object ``kind: Namespace`` sets the default namespace for the Tenant to ``minio-tenant``.
You can change this value to create a different namespace for the Tenant.
You must change **all** ``metadata.namespace`` values in the YAML file to match the Namespace.
#. Deploy the Tenant
Use the ``kubectl apply -f`` command to deploy the Tenant.
.. code-block:: shell
:class: copyable
kubectl apply -f tenant-base.yaml
The command creates each of the resources specified in the YAML object at the configured namespace.
You can monitor the progress using the following command:
.. code-block:: shell
:class: copyable
watch kubectl get all -n minio-tenant
#. Expose the Tenant MinIO S3 API port
To test the MinIO Client :mc:`mc` from your local machine, forward the MinIO port and create an alias.
* Forward the Tenant's MinIO port:
.. code-block:: shell
:class: copyable
kubectl port-forward svc/MINIO_TENANT_NAME-hl 9000 -n MINIO_TENANT_NAMESPACE
* Create an alias for the Tenant service:
.. code-block:: shell
:class: copyable
mc alias set myminio https://localhost:9000 minio minio123 --insecure
You can use :mc:`mc mb` to create a bucket on the Tenant:
.. code-block:: shell
:class: copyable
mc mb myminio/mybucket --insecure
If you deployed your MinIO Tenant using TLS certificates minted by a trusted Certificate Authority (CA) you can omit the ``--insecure`` flag.
See :ref:`create-tenant-connect-tenant` for specific instructions.
.. _create-tenant-connect-tenant:
Connect to the Tenant
---------------------
The MinIO Operator creates services for the MinIO Tenant.
Use the ``kubectl get svc -n NAMESPACE`` command to review the deployed services.
For Kubernetes services which use a custom ``kubectl`` analog, you can substitute the name of that program.
.. code-block:: shell
:class: copyable
kubectl get svc -n minio-tenant-1
.. code-block:: shell
NAME TYPE CLUSTER-IP EXTERNAL-IP PORT(S) AGE
minio LoadBalancer 10.97.114.60 <pending> 443:30979/TCP 2d3h
TENANT-NAMESPACE-console LoadBalancer 10.106.103.247 <pending> 9443:32095/TCP 2d3h
TENANT-NAMESPACE-hl ClusterIP None <none> 9000/TCP 2d3h
- The ``minio`` service corresponds to the MinIO Tenant service.
Applications should use this service for performing operations against the MinIO Tenant.
- The ``*-console`` service corresponds to the :minio-git:`MinIO Console <console>`.
Administrators should use this service for accessing the MinIO Console and performing administrative operations on the MinIO Tenant.
The remaining services support Tenant operations and are not intended for consumption by users or administrators.
By default each service is visible only within the Kubernetes cluster.
Applications deployed inside the cluster can access the services using the ``CLUSTER-IP``.
Applications external to the Kubernetes cluster can access the services using the ``EXTERNAL-IP``.
This value is only populated for Kubernetes clusters configured for Ingress or a similar network access service.
Kubernetes provides multiple options for configuring external access to services.
See the Kubernetes documentation on :kube-docs:`Publishing Services (ServiceTypes) <concepts/services-networking/service/#publishing-services-service-types>` and :kube-docs:`Ingress <concepts/services-networking/ingress/>` for more complete information on configuring external access to services.
For specific flavors of Kubernetes, such as OpenShift or Rancher, defer to the service documentation on the preferred or available methods of exposing Services to internal or external access.
.. toctree::
:titlesonly:
:hidden:
/operations/deployments/k8s-deploy-minio-tenant-helm-on-kubernetes
/operations/deployments/k8s-upgrade-minio-tenant-on-kubernetes
/operations/deployments/k8s-expand-minio-tenant-on-kubernetes
/operations/deployments/k8s-modify-minio-tenant-on-kubernetes
/operations/deployments/k8s-delete-minio-tenant-on-kubernetes

View File

@ -46,16 +46,14 @@ Persistent Volumes
:start-after: start-exclusive-drive-access
:end-before: end-exclusive-drive-access
.. cond:: not eks
MinIO can use any Kubernetes :kube-docs:`Persistent Volume (PV) <concepts/storage/persistent-volumes>` that supports the :kube-docs:`ReadWriteOnce <concepts/storage/persistent-volumes/#access-modes>` access mode.
MinIO's consistency guarantees require the exclusive storage access that ``ReadWriteOnce`` provides.
MinIO can use any Kubernetes :kube-docs:`Persistent Volume (PV) <concepts/storage/persistent-volumes>` that supports the :kube-docs:`ReadWriteOnce <concepts/storage/persistent-volumes/#access-modes>` access mode.
MinIO's consistency guarantees require the exclusive storage access that ``ReadWriteOnce`` provides.
For Kubernetes clusters where nodes have Direct Attached Storage, MinIO strongly recommends using the `DirectPV CSI driver <https://min.io/directpv?ref=docs>`__.
DirectPV provides a distributed persistent volume manager that can discover, format, mount, schedule, and monitor drives across Kubernetes nodes.
DirectPV addresses the limitations of manually provisioning and monitoring :kube-docs:`local persistent volumes <concepts/storage/volumes/#local>`.
For Kubernetes clusters where nodes have Direct Attached Storage, MinIO strongly recommends using the `DirectPV CSI driver <https://min.io/directpv?ref=docs>`__.
DirectPV provides a distributed persistent volume manager that can discover, format, mount, schedule, and monitor drives across Kubernetes nodes.
DirectPV addresses the limitations of manually provisioning and monitoring :kube-docs:`local persistent volumes <concepts/storage/volumes/#local>`.
.. cond:: eks
.. note::
MinIO Tenants on EKS must use the :github:`EBS CSI Driver <kubernetes-sigs/aws-ebs-csi-driver>` to provision the necessary underlying persistent volumes.
MinIO strongly recommends using SSD-backed EBS volumes for best performance.
@ -117,27 +115,3 @@ The MinIO Operator supports expanding a MinIO Tenant by adding additional pools.
You can use the ``kubectl get events -n TENANT-NAMESPACE --watch`` to monitor the progress of expansion.
The MinIO Operator updates services to route connections appropriately across the new nodes.
If you use customized services, routes, ingress, or similar Kubernetes network components, you may need to update those components for the new pod hostname ranges.
.. Following link is intended for K8s only
.. _minio-decommissioning:
Decommission a Tenant Server Pool
----------------------------------
Decommissioning a server pool involves three steps:
1) Run the :mc-cmd:`mc admin decommission start` command against the Tenant
2) Wait until decommissioning completes
3) Modify the Tenant YAML to remove the decommissioned pool
When removing the Tenant pool, ensure the ``spec.pools.[n].name`` fields have values for all remaining pools.
.. include:: /includes/common-installation.rst
:start-after: start-pool-order-must-not-change
:end-before: end-pool-order-must-not-change
.. important::
You cannot reuse the same pool name or hostname sequence for a decommissioned pool.

View File

@ -0,0 +1,171 @@
.. _deploy-minio-operator:
=========================
MinIO Kubernetes Operator
=========================
.. default-domain:: minio
.. contents:: Table of Contents
:local:
:depth: 1
MinIO is a Kubernetes-native high performance object store with an S3-compatible API.
The MinIO Kubernetes Operator supports deploying MinIO Tenants onto private and public cloud infrastructures ("Hybrid" Cloud).
The MinIO Operator installs a :kube-docs:`Custom Resource Definition (CRD) <concepts/extend-kubernetes/api-extension/custom-resources/#customresourcedefinitions>` to support describing MinIO tenants as a Kubernetes :kube-docs:`object <concepts/overview/working-with-objects/kubernetes-objects/>`.
The MinIO Operator exists in its own namespace.
Within the Operator's namespace, the MinIO Operator utilizes two pods:
- The Operator pod for the base Operator functions to deploy, manage, modify, and maintain tenants.
- Console pod for the Operator's Graphical User Interface, the Operator Console.
See the MinIO Operator :minio-git:`CRD Reference <operator/blob/master/docs/tenant_crd.adoc>` for complete documentation on the MinIO CRD.
.. _minio-operator-prerequisites:
Operator Prerequisites
----------------------
Kubernetes Version 1.21.0
~~~~~~~~~~~~~~~~~~~~~~~~~
.. important::
MinIO **strongly recommends** upgrading Production clusters running `End-Of-Life <https://kubernetes.io/releases/patch-releases/#non-active-branch-history>`__ Kubernetes APIs.
Starting with v5.0.0, MinIO **requires** Kubernetes 1.21.0 or later for both the infrastructure and the ``kubectl`` CLI tool.
.. versionadded:: Operator 5.0.6
For Kubernetes 1.25.0 and later, MinIO supports deploying in environments with the :kube-docs:`Pod Security admission (PSA) <concepts/security/pod-security-admission>` ``restricted`` policy enabled.
Kustomize and ``kubectl``
~~~~~~~~~~~~~~~~~~~~~~~~~
`Kustomize <https://kubernetes.io/docs/tasks/manage-kubernetes-objects/kustomization>`__ is a YAML-based templating tool that allows you to define Kubernetes resources in a declarative and repeatable fashion.
Kustomize is included with the :kube-docs:`kubectl <reference/kubectl>` command line tool.
This procedure assumes that your local host machine has both the matching version of ``kubectl`` for your Kubernetes cluster *and* the necessary access to that cluster to create new resources.
The `default MinIO Operator Kustomize template <https://github.com/minio/operator/blob/master/kustomization.yaml>`__ provides a starting point for customizing configurations for your local environment.
You can modify the default Kustomization file or apply your own `patches <https://datatracker.ietf.org/doc/html/rfc6902>`__ to customize the Operator deployment for your Kubernetes cluster.
.. _minio-k8s-deploy-operator-tls:
Kubernetes TLS Certificate API
~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
.. versionchanged:: Operator v.5.0.0
The MinIO Operator manages TLS Certificate Signing Requests (CSR) using the Kubernetes ``certificates.k8s.io`` :kube-docs:`TLS certificate management API <tasks/tls/managing-tls-in-a-cluster/>` to create signed TLS certificates in the following circumstances:
- When ``autoCert`` is enabled.
- For the MinIO Console when the :envvar:`MINIO_CONSOLE_TLS_ENABLE` environment variable is set to ``on``.
- For :ref:`STS service <minio-security-token-service>` when :envvar:`OPERATOR_STS_ENABLED` environment variable is set to ``on``.
- For retrieving the health of the cluster.
The MinIO Operator reads certificates inside the ``operator-ca-tls`` secret and syncs this secret within the tenant namespace to trust private certificate authorities, such as when using cert-manager.
For any of these circumstances, the MinIO Operator *requires* that the Kubernetes ``kube-controller-manager`` configuration include the following :kube-docs:`configuration settings <reference/command-line-tools-reference/kube-controller-manager/#options>`:
- ``--cluster-signing-key-file`` - Specify the PEM-encoded RSA or ECDSA private key used to sign cluster-scoped certificates.
- ``--cluster-signing-cert-file`` - Specify the PEM-encoded x.509 Certificate Authority certificate used to issue cluster-scoped certificates.
The Kubernetes TLS API uses the CA signature algorithm for generating new TLS certificate.
MinIO recommends ECDSA (e.g. `NIST P-256 curve <https://nvlpubs.nist.gov/nistpubs/FIPS/NIST.FIPS.186-4.pdf>`__) or EdDSA (e.g. :rfc:`Curve25519 <7748>`) TLS private keys/certificates due to their lower computation requirements compared to RSA.
See :ref:`minio-TLS-supported-cipher-suites` for a complete list of supported TLS Cipher Suites.
If the Kubernetes cluster is not configured to respond to a generated :abbr:`CSR (Certificate Signing Request)`, the Operator cannot complete initialization.
Some Kubernetes providers do not specify these configuration values by default.
To check whether the ``kube-controller-manager`` specifies the cluster signing key and certificate files, use the following command:
.. code-block:: shell
:class: copyable
kubectl get pod kube-controller-manager-$CLUSTERNAME-control-plane \
-n kube-system -o yaml
- Replace ``$CLUSTERNAME`` with the name of the Kubernetes cluster.
Confirm that the output contains the highlighted lines.
The output of the example command above may differ from the output in your terminal:
.. code-block:: shell
:emphasize-lines: 12,13
spec:
containers:
- command:
- kube-controller-manager
- --allocate-node-cidrs=true
- --authentication-kubeconfig=/etc/kubernetes/controller-manager.conf
- --authorization-kubeconfig=/etc/kubernetes/controller-manager.conf
- --bind-address=127.0.0.1
- --client-ca-file=/etc/kubernetes/pki/ca.crt
- --cluster-cidr=10.244.0.0/16
- --cluster-name=my-cluster-name
- --cluster-signing-cert-file=/etc/kubernetes/pki/ca.crt
- --cluster-signing-key-file=/etc/kubernetes/pki/ca.key
...
.. important::
The MinIO Operator automatically generates TLS certificates for all MinIO Tenant pods using the specified Certificate Authority (CA).
Clients external to the Kubernetes cluster must trust the Kubernetes cluster CA to connect to the MinIO Operator or MinIO Tenants.
Clients which cannot trust the Kubernetes cluster CA can disable TLS validation for connections to the MinIO Operator or a MinIO Tenant.
Alternatively, you can generate x.509 TLS certificates signed by a known and trusted CA and pass those certificates to MinIO Tenants.
See :ref:`minio-tls` for more complete documentation.
Operator Internals
------------------
Operator Namespace
~~~~~~~~~~~~~~~~~~
ToDO
Tenant Namespace
~~~~~~~~~~~~~~~~
When you use the Operator to create a tenant, the tenant *must* have its own namespace.
Within that namespace, the Operator generates the pods required by the tenant configuration.
Each Tenant pod runs three containers:
- MinIO Container that runs all of the standard MinIO functions, equivalent to basic MinIO installation on baremetal.
This container stores and retrieves objects in the provided mount points (persistent volumes).
- InitContainer that only exists during the launch of the pod to manage configuration secrets during startup.
Once startup completes, this container terminates.
- SideCar container that monitors configuration secrets for the tenant and updates them as they change.
This container also monitors for root credentials and creates an error if it does not find root credentials.
Starting with v5.0.6, the MinIO Operator supports custom :kube-docs:`init containers <concepts/workloads/pods/init-containers>` for additional pod initialization that may be required for your environment.
The tenant utilizes Persistent Volume Claims to talk to the Persistent Volumes that store the objects.
.. image:: /images/k8s/OperatorsComponent-Diagram.png
:width: 600px
:alt: A diagram of the namespaces and pods used by or maintained by the MinIO Operator.
:align: center
.. toctree::
:titlesonly:
:hidden:
/operations/deployments/k8s-deploy-minio-on-kubernetes-upstream
/operations/deployments/k8s-deploy-minio-on-red-hat-open-shift-kubernetes
/operations/deployments/k8s-deploy-minio-on-suse-rancher-kubernetes
/operations/deployments/k8s-deploy-minio-on-elastic-kubernetes-service
/operations/deployments/k8s-deploy-minio-on-google-kubernetes-engine
/operations/deployments/k8s-deploy-minio-on-azure-kubernetes-service
/operations/deployments/k8s-upgrade-minio-operator-kubernetes

View File

@ -0,0 +1,108 @@
===========================
MinIO Tenants on Kubernetes
===========================
.. default-domain:: minio
.. contents:: Table of Contents
:local:
:depth: 1
A MinIO Tenant consists of a complete set of Kubernetes resources deployed within a namespace that support the MinIO Object Storage service.
This documentation assumes a :ref:`MinIO Operator installation <deploy-minio-operator>` on the target Kubernetes infrastructure.
Prerequisites
-------------
Your Kubernetes infrastructure must meet the following pre-requisites for deploying MinIO Tenants.
MinIO Kubernetes Operator
~~~~~~~~~~~~~~~~~~~~~~~~~
The procedures on this page *requires* a valid installation of the MinIO Kubernetes Operator and assumes the local host has a matching installation of the MinIO Kubernetes Operator.
This procedure assumes the latest stable Operator, version |operator-version-stable|.
See :ref:`deploy-operator-kubernetes` for complete documentation on deploying the MinIO Operator.
Worker Nodes with Local Storage
~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
MinIO **strongly recommends** deploying Tenants onto Kubernetes worker nodes with locally attached storage.
The Worker Nodes should meet MinIO's :ref:`hardware checklist <minio-hardware-checklist>` for production environments.
Avoid colocating MinIO tenants onto worker nodes hosting other high-performance softwares, and where necessary to do so ensure you configure the appropriate limits and constraints to guarantee MinIO access to the necessary compute and storage resources.
.. _deploy-minio-tenant-pv:
Persistent Volumes
~~~~~~~~~~~~~~~~~~
.. include:: /includes/common-admonitions.rst
:start-after: start-exclusive-drive-access
:end-before: end-exclusive-drive-access
MinIO can typically use any Kubernetes :kube-docs:`Persistent Volume (PV) <concepts/storage/persistent-volumes>` that supports the :kube-docs:`ReadWriteOnce <concepts/storage/persistent-volumes/#access-modes>` access mode.
MinIO's consistency guarantees require the exclusive storage access that ``ReadWriteOnce`` provides.
Additionally, MinIO recommends setting a reclaim policy of ``Retain`` for the PVC :kube-docs:`StorageClass <concepts/storage/storage-classes>`.
Where possible, configure the Storage Class, CSI, or other provisioner underlying the PV to format volumes as XFS to ensure best performance.
For Kubernetes clusters where nodes have Direct Attached Storage, MinIO strongly recommends using the `DirectPV CSI driver <https://min.io/directpv?ref=docs>`__.
DirectPV provides a distributed persistent volume manager that can discover, format, mount, schedule, and monitor drives across Kubernetes nodes.
DirectPV addresses the limitations of manually provisioning and monitoring :kube-docs:`local persistent volumes <concepts/storage/volumes/#local>`.
For Tenants deploying onto Amazon Elastic, Azure, or Google Kubernetes, select the tabs below for specific guidance on PV configuration:
.. tab-set::
.. tab-item:: Amazon EKS
MinIO Tenants on EKS must use the :github:`EBS CSI Driver <kubernetes-sigs/aws-ebs-csi-driver>` to provision the necessary underlying persistent volumes.
MinIO strongly recommends using SSD-backed EBS volumes for best performance.
MinIO strongly recommends deploying EBS-based PVs with the XFS filesystem.
Create a StorageClass for the MinIO EBS PVs and set the ``csi.storage.k8s.io/fstype`` `parameter <https://github.com/kubernetes-sigs/aws-ebs-csi-driver/blob/master/docs/parameters.md>`__ to ``xfs`` .
MinIO recommends the following :github:`EBS volume types <kubernetes-sigs/aws-ebs-csi-driver/blob/master/docs/parameters.md>`:
- ``io2`` (Provisioned IOPS SSD) **Preferred**
- ``io1`` (Provisioned IOPS SSD)
- ``gp3`` (General Purpose SSD)
- ``gp2`` (General Purpose SSD)
For more information on EBS resources, see `EBS Volume Types <https://aws.amazon.com/ebs/volume-types/>`__.
For more information on StorageClass Parameters, see `StorageClass Parameters <https://github.com/kubernetes-sigs/aws-ebs-csi-driver/blob/master/docs/parameters.md>`__.
.. tab-item:: Google GKS
MinIO Tenants on GKE should use the :gke-docs:`Compute Engine Persistent Disk CSI Driver <how-to/persistent-volumes/gce-pd-csi-driver>` to provision the necessary underlying persistent volumes.
MinIO recommends the following :gke-docs:`GKE CSI Driver <how-to/persistent-volumes/gce-pd-csi-driver>` storage classes:
- ``standard-rwo`` (Balanced Persistent SSD)
- ``premium-rwo`` (Performance Persistent SSD)
MinIO strongly recommends SSD-backed disk types for best performance.
For more information on GKE disk types, see :gcp-docs:`Persistent Disks <disks>`.
.. tab-item:: Azure AKS
MinIO Tenants on AKS should use the :azure-docs:`Azure Disks CSI driver <azure-disk-csi>` to provision the necessary underlying persistent volumes.
MinIO recommends the following :aks-docs:`AKS CSI Driver <azure-disk-csi>` storage classes:
- ``managed-csi`` (Standard SSD)
- ``managed-csi-premium`` (Premium SSD)
MinIO strongly recommends SSD-backed disk types for best performance.
For more information on AKS disk types, see :azure-docs:`Azure disk types <virtual-machines/disk-types>`.
.. toctree::
:titlesonly:
:hidden:
/operations/deployments/k8s-deploy-minio-tenant-on-kubernetes
/operations/deployments/k8s-modify-minio-tenant-on-kubernetes
/operations/deployments/k8s-upgrade-minio-tenant-on-kubernetes
/operations/deployments/k8s-expand-minio-tenant-on-kubernetes
/operations/deployments/k8s-delete-minio-tenant-on-kubernetes

View File

@ -0,0 +1,78 @@
.. _minio-k8s-modify-minio-tenant:
.. _minio-k8s-modify-minio-tenant-security:
=====================
Modify a MinIO Tenant
=====================
.. default-domain:: minio
.. contents:: Table of Contents
:local:
:depth: 1
You can modify tenants after deployment to change mutable configuration settings.
See :ref:`minio-operator-crd` for a complete description of available settings in the MinIO Custom Resource Definition.
The method for modifying the Tenant depends on how you deployed the tenant:
.. tab-set::
.. tab-item:: Kustomize
:sync: kustomize
For Kustomize-deployed Tenants, you can modify the base Kustomization resources and apply them using ``kubectl apply -k`` against the directory containing the ``kustomization.yaml`` object.
.. code-block:: shell
kubectl apply -k ~/kustomization/TENANT-NAME/
Modify the path to the Kustomization directory to match your local configuration.
.. tab-item:: Helm
:sync: helm
For Helm-deployed Tenants, you can modify the base ``values.yaml`` and upgrade the Tenant using the chart:
.. code-block:: shell
helm upgrade TENANT-NAME minio-operator/tenant -f values.yaml -n TENANT-NAMESPACE
The command above assumes use of the MinIO Operator Chart repository.
If you installed the Chart manually or by using a different repository name, specify that chart or name in the command.
Replace ``TENANT-NAME`` and ``TENANT-NAMESPACE`` with the name and namespace of the Tenant, respectively.
You can use ``helm list -n TENANT-NAMESPACE`` to validate the Tenant name.
Add Trusted Certificate Authorities
The MinIO Tenant validates the TLS certificate presented by each connecting client against the host system's trusted root certificate store.
The MinIO Operator can attach additional third-party Certificate Authorities (CA) to the Tenant to allow validation of client TLS certificates signed by those CAs.
To customize the trusted CAs mounted to each Tenant MinIO pod, enable the :guilabel:`Custom Certificates` switch.
Select the :guilabel:`Add CA Certificate +` button to add third party CA certificates.
If the MinIO Tenant cannot match an incoming client's TLS certificate issuer against either the container OS's trust store *or* an explicitly attached CA, MinIO rejects the connection as invalid.
Manage Tenant Pools
-------------------
Specify Runtime Class
~~~~~~~~~~~~~~~~~~~~~
.. versionadded:: Console 0.23.1
When adding a new pool or modifying an existing pool for a tenant, you can specify the :kube-docs:`Runtime Class Name <concepts/containers/runtime-class/>` for pools to use.
.. Following link is intended for K8s only
Decommission a Tenant Server Pool
~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
MinIO Operator 4.4.13 and later support decommissioning a server pool in a Tenant.
Specifically, you can follow the :minio-docs:`Decommission a Server pool <minio/linux/operations/install-deploy-manage/decommission-server-pool.html>` procedure to remove the pool from the tenant, then edit the tenant YAML to drop the pool from the StatefulSet.
When removing the Tenant pool, ensure the ``spec.pools.[n].name`` fields have values for all remaining pools.
.. include:: /includes/common-installation.rst
:start-after: start-pool-order-must-not-change
:end-before: end-pool-order-must-not-change

View File

@ -179,7 +179,7 @@ Procedure
"value": "cluster.local"
}
],
"image": "minio/operator:v|operator-version-stable|",
"image": "minio/operator:v5.0.x",
"imagePullPolicy": "IfNotPresent",
"name": "minio-operator"
}
@ -227,15 +227,6 @@ Procedure
kubectl get pod -l 'name=minio-operator' -n minio-operator -o json | jq '.items[0].spec.containers'
#. *(Optional)* Connect to the Operator Console
.. include:: /includes/common/common-k8s-connect-operator-console-no-plugin.rst
#. Retrieve the Operator Console JWT for login
To continue upgrading to |operator-version-stable|, see :ref:`minio-k8s-upgrade-minio-operator`.
.. include:: /includes/common/common-k8s-operator-console-jwt.rst
.. tab-item:: Upgrade using Helm
@ -271,6 +262,34 @@ Procedure
NAME NAMESPACE REVISION UPDATED STATUS CHART APP VERSION
operator minio-operator 1 2023-11-01 15:49:54.539724775 -0400 EDT deployed operator-5.0.x v5.0.x
You can also introspect the operator pods directly to determine the installed version.
The following example uses the ``jq`` tool to filter the necessary information from ``kubectl``:
.. code-block:: shell
:class: copyable
kubectl get pod -l 'name=minio-operator' -n minio-operator -o json | jq '.items[0].spec.containers'
The output resembles the following:
.. code-block:: json
:emphasize-lines: 8-10
:substitutions:
{
"env": [
{
"name": "CLUSTER_DOMAIN",
"value": "cluster.local"
}
],
"image": "minio/operator:v5.0.x",
"imagePullPolicy": "IfNotPresent",
"name": "minio-operator"
}
If your local host does not have the ``jq`` utility installed, you can run the first part of the command and locate the ``spec.containers`` section of the output.
#. Update the Operator Repository
Use ``helm repo update minio-operator`` to update the MinIO Operator repo.
@ -315,11 +334,13 @@ Procedure
#. Validate the Operator upgrade
.. include:: /includes/common/common-k8s-connect-operator-console-no-plugin.rst
You can check the new Operator version with the same ``kubectl`` command used previously:
#. Retrieve the Operator Console JWT for login
.. code-block:: shell
:class: copyable
kubectl get pod -l 'name=minio-operator' -n minio-operator -o json | jq '.items[0].spec.containers'
.. include:: /includes/common/common-k8s-operator-console-jwt.rst
.. _minio-k8s-upgrade-minio-operator-to-4.5.8:
@ -397,8 +418,6 @@ You can then upgrade from release 4.5.8 to 5.0.15.
You can check the Operator version by reviewing the object specification for an Operator Pod using a previous step.
.. include:: /includes/common/common-k8s-connect-operator-console.rst
.. _minio-k8s-upgrade-minio-operator-4.2.2-procedure:
Upgrade MinIO Operator 4.0.0 through 4.2.2 to 4.2.3

View File

@ -0,0 +1,48 @@
.. _minio-kubernetes:
==========================
Deploy MinIO on Kubernetes
==========================
.. default-domain:: minio
.. contents:: Table of Contents
:local:
:depth: 1
MinIO is a Kubernetes-native high performance object store with an S3-compatible API.
The MinIO Kubernetes Operator supports deploying MinIO Tenants onto private and public cloud infrastructures ("Hybrid" Cloud).
All documentation assumes familiarity with referenced Kubernetes concepts, utilities, and procedures.
While MinIO documentation *may* provide guidance for configuring or deploying Kubernetes-related resources on a best-effort basis, it is not a replacement for the official :kube-docs:`Kubernetes Documentation <>`.
MinIO Operator Architecture
---------------------------
.. todo: image of architecture
MinIO Operator
--------------
The MinIO Operator is a first-party Kubernetes-native operator that manages the deployment of MinIO Tenants onto Kubernetes infrastructure.
The Operator provides MinIO-centric functionality around Tenant management, including support for configuring all core MinIO features.
You can interact with the Operator through the MinIO :kube-docs:`Custom Resource Definition (CRD) <concepts/extend-kubernetes/api-extension/custom-resources/#customresourcedefinitions>`, or through the Operator Console UI.
The CRD provides a highly customizable entry point for using tools like Kustomize for deploying Tenants.
You can also use the MinIO Operator Console, a rich web-based UI that has complete support for deploying and configuring MinIO Tenants.
.. important::
The MinIO Operator Console UI is deprecated and removed in MinIO Operator 6.0.0.
You can continue to use standard Kubernetes approaches for MinIO Tenant management, such as Kustomize templates, Helm Charts, and ``kubectl`` commands for introspecting Tenant namespaces and resources.
.. toctree::
:titlesonly:
:hidden:
/operations/deployments/k8s-minio-operator
/operations/deployments/k8s-minio-tenants

View File

@ -46,12 +46,11 @@ For example, consider a claim with the following key-value assignment:
The specified policy claim directs MinIO to attach the policies with names matching ``readwrite_data``, ``read_analytics``, and ``read_logs`` to the authenticated user.
.. cond:: linux or container or macos or windows
You can set a custom policy claim using the
:envvar:`MINIO_IDENTITY_OPENID_CLAIM_NAME` environment variable
*or* by using :mc-cmd:`mc admin config set` to set the
:mc-conf:`identity_openid claim_name <identity_openid.claim_name>` setting.
You can set a custom policy claim using the
:envvar:`MINIO_IDENTITY_OPENID_CLAIM_NAME` environment variable
*or* by using :mc-cmd:`mc admin config set` to set the
:mc-conf:`identity_openid claim_name <identity_openid.claim_name>` setting.
See :ref:`minio-external-identity-management-openid-access-control` for more information on mapping MinIO policies to an OIDC-managed identity.
@ -77,12 +76,11 @@ Querying the Active Directory / LDAP Service
MinIO queries the configured Active Directory / LDAP server to verify the credentials specified by the application and optionally return a list of groups in which the user has membership.
This process, called Lookup-Bind mode, uses an AD/LDAP user with minimal permissions, only sufficient to authenticate with the AD/LDAP server for user and group lookups.
.. cond:: linux or container or macos or windows
The following tabs provide a reference of the environment variables and
configuration settings required for enabling Lookup-Bind mode.
The following tabs provide a reference of the environment variables and
configuration settings required for enabling Lookup-Bind mode.
.. tab-set::
.. tab-set::
.. tab-item:: Environment Variable
@ -137,15 +135,9 @@ Group Lookup
MinIO supports querying the Active Directory / LDAP server for a list of groups in which the authenticated user has membership.
MinIO attempts to match existing :ref:`policies <minio-policy>` to each group DN and assigns each matching policy to the authenticated user.
.. cond:: k8s
The following tabs provide a reference of the environment variables and configuration settings required for enabling group lookups:
The MinIO Operator Console provides the necessary fields for configuring Group Lookup as part of configuring AD/LDAP identity management for new or existing MinIO Tenants.
.. cond:: linux or container or macos or windows
The following tabs provide a reference of the environment variables and configuration settings required for enabling group lookups:
.. tab-set::
.. tab-set::
.. tab-item:: Environment Variable

View File

@ -18,72 +18,47 @@ MinIO supports configuring a single Active Directory / LDAP Connect for external
The procedure on this page provides instructions for:
.. cond:: k8s
.. tab-set::
:class: parent-tab
.. tab-item:: Kubernetes
:sync: k8s
For MinIO Tenants deployed using the :ref:`MinIO Kubernetes Operator <minio-kubernetes>`, this procedure covers:
- Configuring a MinIO Tenant to use an external AD/LDAP provider
- Accessing the Tenant Console using AD/LDAP Credentials.
- Using the MinIO ``AssumeRoleWithLDAPIdentity`` Security Token Service (STS) API to generate temporary credentials for use by applications.
.. cond:: linux or macos or container or windows
.. tab-item:: Baremetal
:sync: baremetal
For MinIO deployments on baremetal infrastructure, this procedure covers:
- Configuring a MinIO cluster for an external AD/LDAP provider.
- Accessing the MinIO Console using AD/LDAP credentials.
- Using the MinIO ``AssumeRoleWithLDAPIdentity`` Security Token Service (STS) API to generate temporary credentials for use by applications.
This procedure is generic for AD/LDAP services.
See the documentation for the AD/LDAP provider of your choice for specific instructions or procedures on configuration of user identities.
Prerequisites
-------------
.. cond:: k8s
Access to MinIO Cluster
~~~~~~~~~~~~~~~~~~~~~~~
MinIO Kubernetes Operator
~~~~~~~~~~~~~~~~~~~~~~~~~
.. tab-set::
:class: hidden
.. include:: /includes/k8s/common-operator.rst
:start-after: start-requires-operator-plugin
:end-before: end-requires-operator-plugin
.. tab-item:: Kubernetes
:sync: k8s
Active Directory / LDAP Compatible IDentity Provider
~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
You must have access to the MinIO Operator Console web UI.
You can either expose the MinIO Operator Console service using your preferred Kubernetes routing component, or use temporary port forwarding to expose the Console service port on your local machine.
This procedure assumes an existing Active Directory or LDAP service.
Instructions on configuring AD/LDAP are out of scope for this procedure.
.. cond:: k8s
- For AD/LDAP deployments within the same Kubernetes cluster as the MinIO Tenant, you can use Kubernetes service names to allow the MinIO Tenant to establish connectivity to the AD/LDAP service.
- For AD/LDAP deployments external to the Kubernetes cluster, you must ensure the cluster supports routing communications between Kubernetes services and pods and the external network.
This may require configuration or deployment of additional Kubernetes network components and/or enabling access to the public internet.
MinIO requires a read-only access keys with which it :ref:`binds <minio-external-identity-management-ad-ldap-lookup-bind>` to perform authenticated user and group queries.
Ensure each AD/LDAP user and group intended for use with MinIO has a corresponding :ref:`policy <minio-external-identity-management-ad-ldap-access-control>` on the MinIO deployment.
An AD/LDAP user with no assigned policy *and* with membership in groups with no assigned policy has no permission to access any action or resource on the MinIO cluster.
.. cond:: k8s
MinIO Tenant
~~~~~~~~~~~~
This procedure assumes your Kubernetes cluster has sufficient resources to :ref:`deploy a new MinIO Tenant <minio-k8s-deploy-minio-tenant>`.
You can also use this procedure as guidance for modifying an existing MinIO Tenant to enable AD/LDAP Identity Management.
.. cond:: linux or container or macos or windows
MinIO Deployment
~~~~~~~~~~~~~~~~
This procedure assumes an existing MinIO cluster running the :minio-git:`latest stable MinIO version <minio/releases/latest>`.
Defer to the :ref:`minio-installation` for more complete documentation on new MinIO deployments.
This procedure *may* work as expected for older versions of MinIO.
.. cond:: linux or container or macos or windows
Install and Configure ``mc`` with Access to the MinIO Cluster
~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
.. tab-item:: Baremetal
:sync: baremetal
This procedure uses :mc:`mc` for performing operations on the MinIO cluster.
Install ``mc`` on a machine with network access to the cluster.
@ -91,153 +66,51 @@ An AD/LDAP user with no assigned policy *and* with membership in groups with no
This procedure assumes a configured :mc:`alias <mc alias>` for the MinIO cluster.
.. Lightly modeled after the SSE tutorials
Active Directory / LDAP Compatible IDentity Provider
~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
.. cond:: k8s
This procedure assumes an existing Active Directory or LDAP service.
Instructions on configuring AD/LDAP are out of scope for this procedure.
.. _minio-external-identity-management-ad-ldap-configure:
.. tab-set::
:class: hidden
.. tab-item:: Kubernetes
:sync: k8s
- For AD/LDAP deployments within the same Kubernetes cluster as the MinIO Tenant, you can use Kubernetes service names to allow the MinIO Tenant to establish connectivity to the AD/LDAP service.
- For AD/LDAP deployments external to the Kubernetes cluster, you must ensure the cluster supports routing communications between Kubernetes services and pods and the external network.
This may require configuration or deployment of additional Kubernetes network components and/or enabling access to the public internet.
.. tab-item:: Baremetal
:sync: baremetal
The MinIO deployment must have bidirectional network connectivity to the target AD / LDAP service.
MinIO requires a read-only access keys with which it :ref:`binds <minio-external-identity-management-ad-ldap-lookup-bind>` to perform authenticated user and group queries.
Ensure each AD/LDAP user and group intended for use with MinIO has a corresponding :ref:`policy <minio-external-identity-management-ad-ldap-access-control>` on the MinIO deployment.
An AD/LDAP user with no assigned policy *and* with membership in groups with no assigned policy has no permission to access any action or resource on the MinIO cluster.
.. _minio-external-identity-management-ad-ldap-configure:
Configure MinIO with Active Directory or LDAP External Identity Management
--------------------------------------------------------------------------
.. tab-set::
:class: hidden
.. tab-item:: Kubernetes
:sync: k8s
.. include:: /includes/k8s/steps-configure-ad-ldap-external-identity-management.rst
.. Doing this the quick and dirty way. Need to revise later to be proper full includes via stepfiles
.. tab-item:: Baremetal
:sync: baremetal
.. cond:: linux or container or macos or windows
.. _minio-external-identity-management-ad-ldap-configure:
.. include:: /includes/baremetal/steps-configure-ad-ldap-external-identity-management.rst
Procedure
---------
1) Set the Active Directory / LDAP Configuration Settings
~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
Configure the AD/LDAP provider using one of the following:
* MinIO Client
* Environment variables
All methods require starting/restarting the MinIO deployment to apply changes.
The following tabs provide a quick reference for the available configuration methods:
.. tab-set::
.. tab-item:: MinIO Client
MinIO supports specifying the AD/LDAP provider settings using :mc:`mc idp ldap` commands.
For distributed deployments, the :mc:`mc idp ldap` command applies the configuration to all nodes in the deployment.
The following example code sets *all* configuration settings related to configuring an AD/LDAP provider for external identity management.
The minimum *required* settings are:
- :mc-conf:`server_addr <identity_ldap.server_addr>`
- :mc-conf:`lookup_bind_dn <identity_ldap.lookup_bind_dn>`
- :mc-conf:`lookup_bind_password <identity_ldap.lookup_bind_password>`
- :mc-conf:`user_dn_search_base_dn <identity_ldap.user_dn_search_base_dn>`
- :mc-conf:`user_dn_search_filter <identity_ldap.user_dn_search_filter>`
.. code-block:: shell
:class: copyable
mc idp ldap add ALIAS \
server_addr="ldaps.example.net:636" \
lookup_bind_dn="CN=xxxxx,OU=xxxxx,OU=xxxxx,DC=example,DC=net" \
lookup_bind_password="xxxxxxxx" \
user_dn_search_base_dn="DC=example,DC=net" \
user_dn_search_filter="(&(objectCategory=user)(sAMAccountName=%s))" \
group_search_filter= "(&(objectClass=group)(member=%d))" \
group_search_base_dn="ou=MinIO Users,dc=example,dc=net" \
enabled="true" \
tls_skip_verify="off" \
server_insecure=off \
server_starttls="off" \
srv_record_name="" \
comment="Test LDAP server"
For more complete documentation on these settings, see :mc:`mc idp ldap`.
.. tab-item:: Environment Variables
MinIO supports specifying the AD/LDAP provider settings using :ref:`environment variables <minio-server-envvar-external-identity-management-ad-ldap>`.
The :mc:`minio server` process applies the specified settings on its next startup.
For distributed deployments, specify these settings across all nodes in the deployment using the *same* values.
Any differences in server configurations between nodes will result in startup or configuration failures.
The following example code sets *all* environment variables related to configuring an AD/LDAP provider for external identity management. The minimum *required* variable are:
- :envvar:`MINIO_IDENTITY_LDAP_SERVER_ADDR`
- :envvar:`MINIO_IDENTITY_LDAP_LOOKUP_BIND_DN`
- :envvar:`MINIO_IDENTITY_LDAP_LOOKUP_BIND_PASSWORD`
- :envvar:`MINIO_IDENTITY_LDAP_USER_DN_SEARCH_BASE_DN`
- :envvar:`MINIO_IDENTITY_LDAP_USER_DN_SEARCH_FILTER`
.. code-block:: shell
:class: copyable
export MINIO_IDENTITY_LDAP_SERVER_ADDR="ldaps.example.net:636"
export MINIO_IDENTITY_LDAP_LOOKUP_BIND_DN="CN=xxxxx,OU=xxxxx,OU=xxxxx,DC=example,DC=net"
export MINIO_IDENTITY_LDAP_USER_DN_SEARCH_BASE_DN="dc=example,dc=net"
export MINIO_IDENTITY_LDAP_USER_DN_SEARCH_FILTER="(&(objectCategory=user)(sAMAccountName=%s))"
export MINIO_IDENTITY_LDAP_LOOKUP_BIND_PASSWORD="xxxxxxxxx"
export MINIO_IDENTITY_LDAP_GROUP_SEARCH_FILTER="(&(objectClass=group)(member=%d))"
export MINIO_IDENTITY_LDAP_GROUP_SEARCH_BASE_DN="ou=MinIO Users,dc=example,dc=net"
export MINIO_IDENTITY_LDAP_TLS_SKIP_VERIFY="off"
export MINIO_IDENTITY_LDAP_SERVER_INSECURE="off"
export MINIO_IDENTITY_LDAP_SERVER_STARTTLS="off"
export MINIO_IDENTITY_LDAP_SRV_RECORD_NAME=""
export MINIO_IDENTITY_LDAP_COMMENT="LDAP test server"
For complete documentation on these variables, see :ref:`minio-server-envvar-external-identity-management-ad-ldap`
When providing an AD/LDAP group search filter, configure a filter that returns the minimum number of relevant groups for the purpose of supporting authentication.
Filters that return large group assignments increase the size of associated calls and resources.
Functions sensitive to large request or response bodies may exhibit unexpected behaviors as a result.
2) Restart the MinIO Deployment
~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
You must restart the MinIO deployment to apply the configuration changes.
For MinIO Client and environment variable configuration, use the :mc-cmd:`mc admin service restart` command to restart the deployment:
.. code-block:: shell
:class: copyable
mc admin service restart ALIAS
Replace ``ALIAS`` with the :ref:`alias <alias>` of the deployment to restart.
3) Generate S3-Compatible Temporary Credentials using AD/LDAP Credentials
~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
MinIO requires clients to authenticate using :s3-api:`AWS Signature Version 4 protocol <sig-v4-authenticating-requests.html>` with support for the deprecated Signature Version 2 protocol.
Specifically, clients must present a valid access key and secret key to access any S3 or MinIO administrative API, such as ``PUT``, ``GET``, and ``DELETE`` operations.
Applications can generate temporary access credentials as-needed using the :ref:`minio-sts-assumerolewithldapidentity` Security Token Service (STS) API endpoint and AD/LDAP user credentials.
MinIO provides an example Go application :minio-git:`ldap.go <minio/blob/master/docs/sts/ldap.go>` that manages this workflow.
.. code-block:: shell
POST https://minio.example.net?Action=AssumeRoleWithLDAPIdentity
&LDAPUsername=USERNAME
&LDAPPassword=PASSWORD
&Version=2011-06-15
&Policy={}
- Replace the ``LDAPUsername`` with the username of the AD/LDAP user.
- Replace the ``LDAPPassword`` with the password of the AD/LDAP user.
- Replace the ``Policy`` with an inline URL-encoded JSON :ref:`policy <minio-policy>` that further restricts the permissions associated to the temporary credentials.
Omit to use the :ref:`policy whose name matches <minio-external-identity-management-ad-ldap-access-control>` the Distinguished Name (DN) of the AD/LDAP user.
The API response consists of an XML document containing the access key, secret key, session token, and expiration date.
Applications can use the access key and secret key to access and perform operations on MinIO.
See the :ref:`minio-sts-assumerolewithldapidentity` for reference documentation.
Disable a Configured Active Directory / LDAP Connection

View File

@ -16,28 +16,33 @@ Overview
This procedure configures MinIO to use `Keycloak <https://www.keycloak.org/>`__ as an external IDentity Provider (IDP) for authentication of users via the OpenID Connect (OIDC) protocol.
This procedure specifically covers the following steps:
This page has procedures for configuring OIDC for MinIO deployments in Kubernetes and Baremetal infrastructures.
.. cond:: k8s
Select the tab corresponding to your infrastructure to switch between instruction sets.
.. tab-set::
:class: parent-tab
.. tab-item:: Kubernetes
:sync: k8s
For MinIO Tenants deployed using the :ref:`MinIO Kubernetes Operator <minio-kubernetes>`, this procedure covers:
- Configure Keycloak for use with MinIO authentication and authorization
- Configure a new or existing MinIO Tenant to use Keycloak as the OIDC provider
- Create policies to control access of Keycloak-authenticated users
- Log into the MinIO Tenant Console using SSO and a Keycloak-managed identity
- Generate temporary S3 access credentials using the ``AssumeRoleWithWebIdentity`` Security Token Service (STS) API
.. cond:: linux or macos or windows
.. tab-item:: Baremetal
:sync: baremetal
For MinIO deployments on baremetal infrastructure, this procedure covers:
- Configure Keycloak for use with MinIO authentication and authorization
- Configure a new or existing MinIO cluster to use Keycloak as the OIDC provider
- Create policies to control access of Keycloak-authenticated users
- Generate temporary S3 access credentials using the ``AssumeRoleWithWebIdentity`` Security Token Service (STS) API
.. cond:: container
- Deploy a Keycloak and MinIO Container
- Configure Keycloak for use with MinIO authentication and authorization
- Configure MinIO to use Keycloak as the OIDC provider
- Create policies to control access of Keycloak-authenticated users
- Log into the MinIO Console using SSO and a Keycloak-managed identity
- Generate temporary S3 access credentials using the ``AssumeRoleWithWebIdentity`` Security Token Service (STS) API
This procedure was written and tested against Keycloak ``21.0.0``.
@ -47,80 +52,68 @@ This procedure assumes you have prior experience with Keycloak and have reviewed
Prerequisites
-------------
.. cond:: k8s
Keycloak Deployment and Realm Configuration
~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
MinIO Kubernetes Operator
~~~~~~~~~~~~~~~~~~~~~~~~~
This procedure assumes an existing Keycloak deployment to which you have administrative access.
Specifically, you must have permission to create and configure Realms, Clients, Client Scopes, Realm Roles, Users, and Groups on the Keycloak deployment.
.. include:: /includes/k8s/common-operator.rst
:start-after: start-requires-operator-plugin
:end-before: end-requires-operator-plugin
.. tab-set::
:class: hidden
MinIO Tenant
~~~~~~~~~~~~
This procedure assumes your Kubernetes cluster has sufficient resources to :ref:`deploy a new MinIO Tenant <minio-k8s-deploy-minio-tenant>`.
You can also use this procedure as guidance for modifying an existing MinIO Tenant to enable Keycloak Identity Management.
.. cond:: linux or container or macos or windows
MinIO Deployment
~~~~~~~~~~~~~~~~
This procedure assumes an existing MinIO cluster running the :minio-git:`latest stable MinIO version <minio/releases/latest>`.
Refer to the :ref:`minio-installation` for more complete documentation on new MinIO deployments.
This procedure *may* work as expected for older versions of MinIO.
.. cond:: not container
Keycloak Deployment and Realm Configuration
~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
This procedure assumes an existing Keycloak deployment to which you have administrative access.
Specifically, you must have permission to create and configure Realms, Clients, Client Scopes, Realm Roles, Users, and Groups on the Keycloak deployment.
.. cond:: k8s
.. tab-item:: Kubernetes
:sync: k8s
For Keycloak deployments within the same Kubernetes cluster as the MinIO Tenant, this procedure assumes bidirectional access between the Keycloak and MinIO pods/services.
For Keycloak deployments external to the Kubernetes cluster, this procedure assumes an existing Ingress, Load Balancer, or similar Kubernetes network control component that manages network access to and from the MinIO Tenant.
.. cond:: not k8s
This procedure assumes bidirectional access between the Keycloak and MinIO deployments.
.. tab-item:: Baremetal
:sync: baremetal
Install and Configure ``mc`` with Access to the MinIO Cluster
~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
The MinIO deployment must have bidirectional access to the target OIDC service.
This procedure uses :mc:`mc` for performing operations on the MinIO cluster.
Install ``mc`` on a machine with network access to the cluster.
Ensure each user identity intended for use with MinIO has the appropriate :ref:`claim <minio-external-identity-management-openid-access-control>` configured such that MinIO can associate a :ref:`policy <minio-policy>` to the authenticated user.
An OpenID user with no assigned policy has no permission to access any action or resource on the MinIO cluster.
.. cond:: k8s
Your local host must have access to the MinIO Tenant, such as through Ingress, a Load Balancer, or a similar Kubernetes network control component.
Access to MinIO Cluster
~~~~~~~~~~~~~~~~~~~~~~~
See the ``mc`` :ref:`Installation Quickstart <mc-install>` for instructions on downloading and installing ``mc``.
.. tab-set::
:class: hidden
This procedure assumes a configured :mc:`alias <mc alias>` for the MinIO cluster.
.. tab-item:: Kubernetes
:sync: k8s
You must have access to the MinIO Operator Console web UI.
You can either expose the MinIO Operator Console service using your preferred Kubernetes routing component, or use temporary port forwarding to expose the Console service port on your local machine.
.. tab-item:: Baremetal
:sync: baremetal
This procedure uses :mc:`mc` for performing operations on the MinIO cluster.
Install ``mc`` on a machine with network access to the cluster.
See the ``mc`` :ref:`Installation Quickstart <mc-install>` for instructions on downloading and installing ``mc``.
This procedure assumes a configured :mc:`alias <mc alias>` for the MinIO cluster.
.. _minio-external-identity-management-keycloak-configure:
Configure MinIO for Keycloak Identity Management
------------------------------------------------
.. cond:: linux or macos or windows
.. tab-set::
.. include:: /includes/linux/steps-configure-keycloak-identity-management.rst
.. cond:: k8s
.. tab-item:: Kubernetes
:sync: k8s
.. include:: /includes/k8s/steps-configure-keycloak-identity-management.rst
.. cond:: container
.. tab-item:: Baremetal
:sync: baremetal
.. include:: /includes/container/steps-configure-keycloak-identity-management.rst
.. include:: /includes/baremetal/steps-configure-keycloak-identity-management.rst
Enable the Keycloak Admin REST API
----------------------------------

View File

@ -8,21 +8,33 @@ Configure MinIO for Authentication using OpenID
.. contents:: Table of Contents
:local:
:depth: 2
:depth: 1
Overview
--------
MinIO supports using an OpenID Connect (OIDC) compatible IDentity Provider (IDP) such as Okta, KeyCloak, Dex, Google, or Facebook for external management of user identities.
The procedure on this page provides instructions for:
.. cond:: k8s
This page has procedures for configuring OIDC for MinIO deployments in Kubernetes and Baremetal infrastructures.
Select the tab corresponding to your infrastructure to switch between instruction sets.
.. tab-set::
:class: parent-tab
.. tab-item:: Kubernetes
:sync: k8s
For MinIO Tenants deployed using the :ref:`MinIO Kubernetes Operator <minio-kubernetes>`, this procedure covers:
- Configuring a MinIO Tenant to use an external OIDC provider.
- Accessing the Tenant Console using OIDC Credentials.
- Using the MinIO ``AssumeRoleWithWebIdentity`` Security Token Service (STS) API to generate temporary credentials for use by applications.
.. cond:: linux or container or macos or windows
.. tab-item:: Baremetal
:sync: baremetal
For MinIO deployments on baremetal infrastructure, this procedure covers:
- Configuring a MinIO cluster for an external OIDC provider.
- Logging into the cluster using the MinIO Console and OIDC credentials.
@ -34,54 +46,46 @@ Defer to the documentation for the OIDC provider of your choice for specific ins
Prerequisites
-------------
.. cond:: k8s
MinIO Kubernetes Operator
~~~~~~~~~~~~~~~~~~~~~~~~~
.. include:: /includes/k8s/common-operator.rst
:start-after: start-requires-operator-plugin
:end-before: end-requires-operator-plugin
OpenID-Connect (OIDC) Compatible IDentity Provider
~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
This procedure assumes an existing OIDC provider such as Okta, KeyCloak, Dex, Google, or Facebook.
Instructions on configuring these services are out of scope for this procedure.
.. cond:: k8s
.. tab-set::
:class: hidden
.. tab-item:: Kubernetes
:sync: k8s
- For OIDC services within the same Kubernetes cluster as the MinIO Tenant, you can use Kubernetes service names to allow the MinIO Tenant to establish connectivity to the OIDC service.
- For OIDC services external to the Kubernetes cluster, you must ensure the cluster supports routing communications between Kubernetes services and pods and the external network.
This may require configuration or deployment of additional Kubernetes network components and/or enabling access to the public internet.
.. tab-item:: Baremetal
:sync: baremetal
The MinIO deployment must have bidirectional network connectivity to the target OIDC service.
Ensure each user identity intended for use with MinIO has the appropriate :ref:`claim <minio-external-identity-management-openid-access-control>` configured such that MinIO can associate a :ref:`policy <minio-policy>` to the authenticated user.
An OpenID user with no assigned policy has no permission to access any action or resource on the MinIO cluster.
.. cond:: k8s
MinIO Tenant
~~~~~~~~~~~~
Access to MinIO Cluster
~~~~~~~~~~~~~~~~~~~~~~~
This procedure assumes your Kubernetes cluster has sufficient resources to :ref:`deploy a new MinIO Tenant <minio-k8s-deploy-minio-tenant>`.
.. tab-set::
:class: hidden
You can also use this procedure as guidance for modifying an existing MinIO Tenant to enable OIDC Identity Management.
.. tab-item:: Kubernetes
:sync: k8s
.. cond:: linux or container or macos or windows
You must have access to the MinIO Operator Console web UI.
You can either expose the MinIO Operator Console service using your preferred Kubernetes routing component, or use temporary port forwarding to expose the Console service port on your local machine.
MinIO Deployment
~~~~~~~~~~~~~~~~
This procedure assumes an existing MinIO cluster running the :minio-git:`latest stable MinIO version <minio/releases/latest>`.
Defer to the :ref:`minio-installation` for more complete documentation on new MinIO deployments.
This procedure *may* work as expected for older versions of MinIO.
.. cond:: linux or container or macos or windows
Install and Configure ``mc`` with Access to the MinIO Cluster
~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
.. tab-item:: Baremetal
:sync: baremetal
This procedure uses :mc:`mc` for performing operations on the MinIO cluster.
Install ``mc`` on a machine with network access to the cluster.
@ -89,181 +93,21 @@ An OpenID user with no assigned policy has no permission to access any action or
This procedure assumes a configured :mc:`alias <mc alias>` for the MinIO cluster.
.. Lightly modeled after the SSE tutorials
.. _minio-external-identity-management-openid-configure:
.. cond:: k8s
Configure MinIO with OpenID External Identity Management
--------------------------------------------------------
.. _minio-external-identity-management-openid-configure:
.. tab-set::
:class: hidden
.. tab-item:: Kubernetes
:sync: k8s
.. include:: /includes/k8s/steps-configure-openid-external-identity-management.rst
.. tab-item:: Baremetal
:sync: baremetal
.. Doing this the quick and dirty way. Need to revise later to be proper full includes via stepfiles
.. include:: /includes/baremetal/steps-configure-openid-external-identity-management.rst
.. cond:: linux or container or macos or windows
.. _minio-external-identity-management-openid-configure:
Procedure
---------
1) Set the OpenID Configuration Settings
~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
You can configure the :abbr:`OIDC (OpenID Connect)` provider using either
environment variables *or* server runtime configuration settings. Both
methods require starting/restarting the MinIO deployment to apply changes. The
following tabs provide a quick reference of all required and optional
environment variables and configuration settings respectively:
.. tab-set::
.. tab-item:: Environment Variables
MinIO supports specifying the :abbr:`OIDC (OpenID Connect)` provider
settings using :ref:`environment variables
<minio-server-envvar-external-identity-management-openid>`. The
:mc:`minio server` process applies the specified settings on its next
startup. For distributed deployments, specify these settings across all
nodes in the deployment using the *same* values consistently.
The following example code sets *all* environment variables related to
configuring an :abbr:`OIDC (OpenID Connect)` provider for external
identity management. The minimum *required* variable is
:envvar:`MINIO_IDENTITY_OPENID_CONFIG_URL`:
.. code-block:: shell
:class: copyable
export MINIO_IDENTITY_OPENID_CONFIG_URL="https://openid-provider.example.net/.well-known/openid-configuration"
export MINIO_IDENTITY_OPENID_CLIENT_ID="<string>"
export MINIO_IDENTITY_OPENID_CLIENT_SECRET="<string>"
export MINIO_IDENTITY_OPENID_CLAIM_NAME="<string>"
export MINIO_IDENTITY_OPENID_CLAIM_PREFIX="<string>"
export MINIO_IDENTITY_OPENID_SCOPES="<string>"
export MINIO_IDENTITY_OPENID_REDIRECT_URI="<string>"
export MINIO_IDENTITY_OPENID_COMMENT="<string>"
Replace the ``MINIO_IDENTITY_OPENID_CONFIG_URL`` with the URL endpoint of
the :abbr:`OIDC (OpenID Connect)` provider discovery document.
For complete documentation on these variables, see
:ref:`minio-server-envvar-external-identity-management-openid`
.. tab-item:: Configuration Settings
MinIO supports specifying the :abbr:`OIDC (OpenID Connect)` provider
settings using :mc-conf:`configuration settings <identity_openid>`. The
:mc:`minio server` process applies the specified settings on its next
startup. For distributed deployments, the :mc:`mc admin config`
command applies the configuration to all nodes in the deployment.
The following example code sets *all* configuration settings related to
configuring an :abbr:`OIDC (OpenID Connect)` provider for external
identity management. The minimum *required* setting is
:mc-conf:`identity_openid config_url <identity_openid.config_url>`:
.. code-block:: shell
:class: copyable
mc admin config set ALIAS/ identity_openid \
config_url="https://openid-provider.example.net/.well-known/openid-configuration" \
client_id="<string>" \
client_secret="<string>" \
claim_name="<string>" \
claim_prefix="<string>" \
scopes="<string>" \
redirect_uri="<string>" \
comment="<string>"
Replace the ``config_url`` with the URL endpoint of the
:abbr:`OIDC (OpenID Connect)` provider discovery document.
For more complete documentation on these settings, see
:mc-conf:`identity_openid`.
2) Restart the MinIO Deployment
~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
You must restart the MinIO deployment to apply the configuration changes.
Use the :mc-cmd:`mc admin service restart` command to restart the deployment.
.. code-block:: shell
:class: copyable
mc admin service restart ALIAS
Replace ``ALIAS`` with the :ref:`alias <alias>` of the deployment to
restart.
3) Use the MinIO Console to Log In with OIDC Credentials
~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
The MinIO Console supports the full workflow of authenticating to the
:abbr:`OIDC (OpenID Connect)` provider, generating temporary credentials using
the MinIO :ref:`minio-sts-assumerolewithwebidentity` Security Token Service
(STS) endpoint, and logging the user into the MinIO deployment.
Starting in :minio-release:`RELEASE.2021-07-08T01-15-01Z`, the MinIO Console is
embedded in the MinIO server. You can access the Console by opening the root URL
for the MinIO cluster. For example, ``https://minio.example.net:9000``.
From the Console, click :guilabel:`BUTTON` to begin the OpenID authentication
flow.
Once logged in, you can perform any action for which the authenticated
user is :ref:`authorized
<minio-external-identity-management-openid-access-control>`.
You can also create :ref:`access keys <minio-idp-service-account>` for
supporting applications which must perform operations on MinIO. Access Keys
are long-lived credentials which inherit their privileges from the parent user.
The parent user can further restrict those privileges while creating the service
account.
4) Generate S3-Compatible Temporary Credentials using OIDC Credentials
~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
MinIO requires clients authenticate using :s3-api:`AWS Signature Version 4
protocol <sig-v4-authenticating-requests.html>` with support for the deprecated
Signature Version 2 protocol. Specifically, clients must present a valid access
key and secret key to access any S3 or MinIO administrative API, such as
``PUT``, ``GET``, and ``DELETE`` operations.
Applications can generate temporary access credentials as-needed using the
:ref:`minio-sts-assumerolewithwebidentity` Security Token Service (STS)
API endpoint and the JSON Web Token (JWT) returned by the
:abbr:`OIDC (OpenID Connect)` provider.
The application must provide a workflow for logging into the
:abbr:`OIDC (OpenID Connect)` provider and retrieving the
JSON Web Token (JWT) associated to the authentication session. Defer to the
provider documentation for obtaining and parsing the JWT token after successful
authentication. MinIO provides an example Go application
:minio-git:`web-identity.go <minio/blob/master/docs/sts/web-identity.go>` with
an example of managing this workflow.
Once the application retrieves the JWT token, use the
``AssumeRoleWithWebIdentity`` endpoint to generate the temporary credentials:
.. code-block:: shell
:class: copyable
POST https://minio.example.net?Action=AssumeRoleWithWebIdentity
&WebIdentityToken=TOKEN
&Version=2011-06-15
&DurationSeconds=86400
&Policy=Policy
- Replace the ``TOKEN`` with the JWT token returned in the previous step.
- Replace the ``DurationSeconds`` with the duration in seconds until the temporary credentials expire. The example above specifies a period of ``86400`` seconds, or 24 hours.
- Replace the ``Policy`` with an inline URL-encoded JSON :ref:`policy <minio-policy>` that further restricts the permissions associated to the temporary credentials.
Omit to use the policy associated to the OpenID user :ref:`policy claim <minio-external-identity-management-openid-access-control>`.
The API response consists of an XML document containing the
access key, secret key, session token, and expiration date. Applications
can use the access key and secret key to access and perform operations on
MinIO.
See the :ref:`minio-sts-assumerolewithwebidentity` for reference documentation.

View File

@ -1,67 +0,0 @@
.. _minio-snmd:
=====================================
Deploy MinIO: Single-Node Multi-Drive
=====================================
.. default-domain:: minio
.. contents:: Table of Contents
:local:
:depth: 2
The procedures on this page cover deploying MinIO in a Single-Node Multi-Drive (SNMD) configuration.
|SNMD| deployments provide drive-level reliability and failover/recovery with performance and scaling limitations imposed by the single node.
.. cond:: linux or macos or windows
For production environments, MinIO strongly recommends deploying with the :ref:`Multi-Node Multi-Drive (Distributed) <minio-mnmd>` topology for enterprise-grade performance, availability, and scalability.
.. cond:: container
For production environments, MinIO strongly recommends using the MinIO Kubernetes Operator to deploy Multi-Node Multi-Drive (MNMD) or "Distributed" Tenants.
Prerequisites
-------------
Storage Requirements
~~~~~~~~~~~~~~~~~~~~
.. |deployment| replace:: deployment
.. include:: /includes/common-installation.rst
:start-after: start-storage-requirements-desc
:end-before: end-storage-requirements-desc
.. include:: /includes/common-admonitions.rst
:start-after: start-exclusive-drive-access
:end-before: end-exclusive-drive-access
Memory Requirements
~~~~~~~~~~~~~~~~~~~
.. versionchanged:: RELEASE.2024-01-28T22-35-53Z
MinIO pre-allocates 2GiB of system memory at startup.
MinIO recommends a *minimum* of 32GiB of memory per host.
See :ref:`minio-hardware-checklist-memory` for more guidance on memory allocation in MinIO.
.. _deploy-minio-standalone-multidrive:
Deploy Single-Node Multi-Drive MinIO
------------------------------------
The following procedure deploys MinIO consisting of a single MinIO server and a multiple drives or storage volumes.
.. cond:: linux
.. include:: /includes/linux/steps-deploy-minio-single-node-multi-drive.rst
.. cond:: macos
.. include:: /includes/macos/steps-deploy-minio-single-node-multi-drive.rst
.. cond:: container
.. include:: /includes/container/steps-deploy-minio-single-node-multi-drive.rst

View File

@ -1,129 +0,0 @@
.. _minio-snsd:
======================================
Deploy MinIO: Single-Node Single-Drive
======================================
.. default-domain:: minio
.. contents:: Table of Contents
:local:
:depth: 2
The procedures on this page cover deploying MinIO in a Single-Node Single-Drive (SNSD) configuration for early development and evaluation.
|SNSD| deployments use a zero-parity erasure coded backend that provides no added reliability or availability beyond what the underlying storage volume implements.
These deployments are best suited for local testing and evaluation, or for small-scale data workloads that do not have availability or performance requirements.
.. cond:: container
For extended development or production environments in orchestrated environments, use the MinIO Kubernetes Operator to deploy a Tenant on multiple worker nodes.
.. cond:: linux
For extended development or production environments, deploy MinIO in a :ref:`Multi-Node Multi-Drive (Distributed) <minio-mnmd>` topology
.. important::
:minio-release:`RELEASE.2022-10-29T06-21-33Z` fully removes the `deprecated Gateway/Filesystem <https://blog.min.io/deprecation-of-the-minio-gateway/>`__ backends.
MinIO returns an error if it starts up and detects existing Filesystem backend files.
To migrate from an FS-backend deployment, use :mc:`mc mirror` or :mc:`mc cp` to copy your data over to a new MinIO |SNSD| deployment.
You should also recreate any necessary users, groups, policies, and bucket configurations on the |SNSD| deployment.
.. _minio-snsd-pre-existing-data:
Pre-Existing Data
-----------------
MinIO startup behavior depends on the the contents of the specified storage volume or path.
The server checks for both MinIO-internal backend data and the structure of existing folders and files.
The following table lists the possible storage volume states and MinIO behavior:
.. list-table::
:header-rows: 1
:widths: 40 60
* - Storage Volume State
- Behavior
* - Empty with **no** files, folders, or MinIO backend data
- MinIO starts in |SNSD| mode and creates the zero-parity backend
* - Existing |SNSD| zero-parity objects and MinIO backend data
- MinIO resumes in |SNSD| mode
* - Existing filesystem folders, files, but **no** MinIO backend data
- MinIO returns an error and does not start
* - Existing filesystem folders, files, and legacy "FS-mode" backend data
- MinIO returns an error and does not start
.. versionchanged:: RELEASE.2022-10-29T06-21-33Z
Prerequisites
-------------
Storage Requirements
~~~~~~~~~~~~~~~~~~~~
The following requirements summarize the :ref:`minio-hardware-checklist-storage` section of MinIO's hardware recommendations:
Use Local Storage
Direct-Attached Storage (DAS) has significant performance and consistency advantages over networked storage (:abbr:`NAS (Network Attached Storage)`, :abbr:`SAN (Storage Area Network)`, :abbr:`NFS (Network File Storage)`).
MinIO strongly recommends flash storage (NVMe, SSD) for primary or "hot" data.
Use XFS-Formatting for Drives
MinIO strongly recommends provisioning XFS formatted drives for storage.
MinIO uses XFS as part of internal testing and validation suites, providing additional confidence in performance and behavior at all scales.
Persist Drive Mounting and Mapping Across Reboots
Use ``/etc/fstab`` to ensure consistent drive-to-mount mapping across node reboots.
Non-Linux Operating Systems should use the equivalent drive mount management tool.
.. include:: /includes/common-admonitions.rst
:start-after: start-exclusive-drive-access
:end-before: end-exclusive-drive-access
Memory Requirements
~~~~~~~~~~~~~~~~~~~
.. versionchanged:: RELEASE.2024-01-28T22-35-53Z
MinIO pre-allocates 2GiB of system memory at startup.
MinIO recommends a *minimum* of 32GiB of memory per host.
See :ref:`minio-hardware-checklist-memory` for more guidance on memory allocation in MinIO.
.. _deploy-minio-standalone:
Deploy Single-Node Single-Drive MinIO
-------------------------------------
The following procedure deploys MinIO consisting of a single MinIO server and a single drive or storage volume.
.. admonition:: Network File System Volumes Break Consistency Guarantees
:class: note
MinIO's strict **read-after-write** and **list-after-write** consistency
model requires local drive filesystems.
MinIO cannot provide consistency guarantees if the underlying storage
volumes are NFS or a similar network-attached storage volume.
.. cond:: linux
.. include:: /includes/linux/steps-deploy-minio-single-node-single-drive.rst
.. cond:: macos
.. include:: /includes/macos/steps-deploy-minio-single-node-single-drive.rst
.. cond:: container
.. include:: /includes/container/steps-deploy-minio-single-node-single-drive.rst
.. cond:: windows
.. include:: /includes/windows/steps-deploy-minio-single-node-single-drive.rst

View File

@ -1,127 +0,0 @@
:orphan:
.. _minio-operator-console:
======================
MinIO Operator Console
======================
.. default-domain:: minio
.. contents:: Table of Contents
:local:
:depth: 2
.. warning::
MinIO Operator 6.0.0 deprecates and removes the Operator Console.
You can use either Kustomization or Helm to manage and deploy MinIO Tenants.
This page provides a historical view at the Operator Console, and will recieve no further updates or corrections.
The Operator Console provides a rich user interface for deploying and
managing MinIO Tenants on Kubernetes infrastructure. Installing the
MinIO :ref:`Kubernetes Operator <deploy-operator-kubernetes>` automatically
installs and configures the Operator Console.
.. screenshot temporarily removed
.. image:: /images/k8s/operator-dashboard.png
:align: center
:width: 70%
:class: no-scaled-link
:alt: MinIO Operator Console
This page summarizes the functions available with the MinIO Operator Console.
.. _minio-operator-console-connect:
Connect to the Operator Console
-------------------------------
.. include:: /includes/common/common-k8s-connect-operator-console.rst
Tenant Management
-----------------
The MinIO Operator Console supports deploying, managing, and monitoring MinIO Tenants on the Kubernetes cluster.
.. screenshot temporarily removed
.. image:: /images/k8s/operator-dashboard.png
:align: center
:width: 70%
:class: no-scaled-link
:alt: MinIO Operator Console
You can :ref:`deploy a MinIO Tenant <minio-k8s-deploy-minio-tenant>` through the Operator Console.
The Operator Console automatically detects MinIO Tenants deployed on the cluster when provisioned through:
- Operator Console
- Helm
- Kustomize
Select a listed tenant to open an in-browser view of that tenant's MinIO Console.
You can use this view to directly manage, modify, expand, upgrade, and delete the tenant through the Operator UI.
.. versionadded:: Operator 5.0.0
You can download a Log Report for a tenant from the Pods summary screen.
The report downloads as ``<tenant-name>-report.zip``.
The ZIP archive contains status, events, and log information for each pool on the deployment.
The archive also includes a summary yaml file describing the deployment.
|subnet| users relying on the commercial license should register the MinIO tenants to their SUBNET account, which can be done through the Operator Console.
Tenant Registration
-------------------
|subnet| users relying on the commercial license should register the MinIO tenants to their SUBNET account, which can be done through the Operator Console.
.. screenshot temporarily removed
.. image:: /images/k8s/operator-console-register.png
:align: center
:width: 70%
:class: no-scaled-link
:alt: MinIO Operator Console Register Screen
#. Select the :guilabel:`Register` tab
#. Enter the :guilabel:`API Key`
You can obtain the key from |SUBNET| through the Console by selecting :guilabel:`Get from SUBNET`.
TLS Certificate Renewal
-----------------------
Operator 4.5.4 or later
~~~~~~~~~~~~~~~~~~~~~~~
Operator versions 4.5.4 and later automatically renew a tenant's certificates when the duration of the certificate has reached 80% of its life.
For example, a tenant certificate was issued on January 1, 2023, and set to expire on December 31, 2023.
80% of the 1 year life of the certificate comes on day 292, or October 19, 2023.
On that date, Operator automatically renews the tenant's certificate.
Operator 4.3.3 to 4.5.3
~~~~~~~~~~~~~~~~~~~~~~~
Operator versions 4.3.3 through 4.5.3 automatically renew tenant certificates after they reach 48 hours before expiration.
For a certificate that expires on December 31, 2023, Operator renews the certificate on December 29 or December 30, within 48 of the expiration.
Operator 4.3.2 or earlier
~~~~~~~~~~~~~~~~~~~~~~~~~
Operator versions 4.3.2 and earlier do not automatically renew certificates.
You must renew the tenant certificates on these releases separately.
Review Your MinIO License
-------------------------
To review which license you are using and the features available through different license options, select the :guilabel:`License` tab.
MinIO supports two licenses: `AGPLv3 Open Source <https://opensource.org/licenses/AGPL-3.0>`__ or a `MinIO Commercial License <https://min.io/pricing?ref=docs>`__.
Subscribers to |SUBNET| use MinIO under a commercial license.
You can also :guilabel:`Subscribe` from the License screen.

View File

@ -1,47 +0,0 @@
.. _minio-k8s-modify-minio-tenant:
.. _minio-k8s-modify-minio-tenant-security:
=====================
Modify a MinIO Tenant
=====================
.. default-domain:: minio
.. contents:: Table of Contents
:local:
:depth: 1
You can modify tenants after deployment to change mutable configuration settings.
See :ref:`minio-operator-crd` for a complete description of available settings in the MinIO Custom Resource Definition.
The method for modifying the Tenant depends on how you deployed the tenant:
.. tab-set::
.. tab-item:: Kustomize
:sync: kustomize
For Kustomize-deployed Tenants, you can modify the base Kustomization resources and apply them using ``kubectl apply -k`` against the directory containing the ``kustomization.yaml`` object.
.. code-block:: shell
kubectl apply -k ~/kustomization/TENANT-NAME/
Modify the path to the Kustomization directory to match your local configuration.
.. tab-item:: Helm
:sync: helm
For Helm-deployed Tenants, you can modify the base ``values.yaml`` and upgrade the Tenant using the chart:
.. code-block:: shell
helm upgrade TENANT-NAME minio-operator/tenant -f values.yaml -n TENANT-NAMESPACE
The command above assumes use of the MinIO Operator Chart repository.
If you installed the Chart manually or by using a different repository name, specify that chart or name in the command.
Replace ``TENANT-NAME`` and ``TENANT-NAMESPACE`` with the name and namespace of the Tenant, respectively.
You can use ``helm list -n TENANT-NAMESPACE`` to validate the Tenant name.
See :ref:`minio-tenant-chart-values` for more complete documentation on the available Chart fields.

View File

@ -1,42 +0,0 @@
.. _minio-manage:
=================================
Manage Existing MinIO Deployments
=================================
.. default-domain:: minio
.. contents:: Table of Contents
:local:
:depth: 1
Management of an existing MinIO deployment typically falls into the following categories:
Expansion
Increase the total storage capacity of the MinIO Deployment by adding a Server Pool
Upgrade
Test and deploy the latest stable version of MinIO to take advantage of new features, fixes, and performance improvements.
Decommission
Drain data from an older storage pool in preparation for removing it from the deployment
.. cond:: not (linux or k8s)
.. toctree::
:titlesonly:
:hidden:
/operations/install-deploy-manage/upgrade-minio-deployment
/operations/install-deploy-manage/migrate-fs-gateway
.. cond:: linux or k8s
.. toctree::
:titlesonly:
:hidden:
/operations/install-deploy-manage/expand-minio-deployment
/operations/install-deploy-manage/upgrade-minio-deployment
/operations/install-deploy-manage/decommission-server-pool
/operations/install-deploy-manage/migrate-fs-gateway

View File

@ -168,11 +168,8 @@ Use the :mc:`mc admin prometheus generate` command to generate the scrape config
This can be any single node, or a load balancer/proxy which handles connections to the MinIO nodes.
.. cond:: k8s
For Prometheus deployments in the same cluster as the MinIO Tenant, you can specify the service DNS name for the ``minio`` service.
For Prometheus deployments external to the cluster, you must specify an ingress or load balancer endpoint configured to route connections to and from the MinIO Tenant.
For MinIO Tenants on Kubernetes infrastructure, when using a Prometheus cluster in that same cluster you can specify the service DNS name for the ``minio`` service.
You can otherwise specify the ingress or load balancer endpoint configured to route connections to and from the MinIO Tenant.
2) Restart Prometheus with the Updated Configuration
~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~

View File

@ -109,16 +109,8 @@ MinIO provides the following scraping endpoints, relative to the base URL:
For a complete list of metrics for each endpoint, see :ref:`Available version 3 metrics <minio-metrics-and-alerts-available-metrics>`.
.. cond:: k8s
The MinIO Operator supports deploying a per-tenant Prometheus instance configured to support metrics and visualization.
If you deploy the Tenant with this feature disabled *but* still want the historical metric views, you can instead configure an external Prometheus service to scrape the Tenant metrics.
Once configured, you can update the Tenant to query that Prometheus service to retrieve metric data:
.. cond:: linux or container or macos or windows
To enable historical data visualization in MinIO Console, set the following environment variables on each node in the MinIO deployment:
To enable historical data visualization in MinIO Console, set the following environment variables on each node in the MinIO deployment:
- Set :envvar:`MINIO_PROMETHEUS_URL` to the URL of the Prometheus service
- Set :envvar:`MINIO_PROMETHEUS_JOB_ID` to the unique job ID assigned to the collected metrics

View File

@ -30,9 +30,7 @@ The procedure on this page documents the following:
These instructions use :ref:`version 2 metrics. <minio-metrics-v2>`
For more about metrics API versions, see :ref:`Metrics and alerts. <minio-metrics-and-alerts>`
.. cond:: k8s
This procedure assumes all necessary network control components, such as Ingress or Load Balancers, to facilitate access between the MinIO Tenant and the InfluxDB service.
For MinIO Deployments on Kubernetes, this procedure assumes all necessary network control components, such as Ingress or Load Balancers, to facilitate access between the MinIO Tenant and the InfluxDB service.
Configure InfluxDB to Collect and Alert using MinIO Metrics
-----------------------------------------------------------
@ -47,8 +45,6 @@ Configure InfluxDB to Collect and Alert using MinIO Metrics
- You cannot enable authenticated access to the MinIO metrics endpoint via the InfluxDB UI
- You cannot set a tag for collected metrics (e.g. ``url_tag``) for uniquely identifying the metrics for a given MinIO deployment
.. cond:: k8s
The Telegraf Prometheus plugin also supports Kubernetes-specific features, such as scraping the ``minio`` service for a given MinIO Tenant.
Configuring Telegraf is out of scope for this procedure.

View File

@ -1,4 +1,6 @@
.. _minio-tls:
.. _minio-TLS-third-party-ca:
.. _minio-tls-user-generated:
========================
Network Encryption (TLS)
@ -8,9 +10,7 @@ Network Encryption (TLS)
.. contents:: Table of Contents
:local:
:depth: 1
MinIO supports Transport Layer Security (TLS) 1.2+ encryption of incoming and outgoing traffic.
:depth: 2
.. admonition:: SSL is Deprecated
:class: note
@ -18,38 +18,36 @@ MinIO supports Transport Layer Security (TLS) 1.2+ encryption of incoming and ou
TLS is the successor to Secure Socket Layer (SSL) encryption.
SSL is fully `deprecated <https://tools.ietf.org/html/rfc7568>`__ as of June 30th, 2018.
.. _minio-tls-user-generated:
Overview
--------
Enabling TLS
------------
MinIO supports Transport Layer Security (TLS) 1.2+ encryption of incoming and outgoing traffic.
MinIO can automatically detect certificates specified to either a default or custom search path and enable TLS for all connections.
MinIO supports Server Name Indication (SNI) requests from clients, where MinIO attempts to locate the appropriate TLS certificate for the hostname specified by the client.
.. cond:: not k8s
.. todo: add an image
The sections below describe how to enable TLS for MinIO.
You may use TLS certificates from a well-known Certificate Authority, an internal or private CA, or self-signed certs.
MinIO requires *at minimum* a single default TLS certificate and can support multiple TLS certificates in support of SNI connectivity.
MinIO uses the TLS Subject Alternate Name (SAN) list to determine which certificate to return to the client.
If MinIO cannot find a TLS certificate whose SAN covers the client-requested hostname, MinIO uses the default certificate and attempts to establish the handshake.
Before beginning, note these important points:
You can specify a single TLS certificate which covers all possible SANs for which the MinIO deployment accepts connections.
- Configure TLS on each node.
- Ensure certs are readable by the user who runs the MinIO Server process.
- Update :envvar:`MINIO_VOLUMES` and any needed services or apps to use an ``HTTPS`` URL.
This configuration requires the least configuration, but necessarily exposes all hostnames configured in the TLS SAN to connecting clients.
Depending on your TLS configuration, this may include internal or private SAN domains.
.. cond:: k8s
You can instead specify multiple TLS certificates separated by domain(s) with a single default certificate for any non-matching hostname requests.
This configuration requires more configuration, but only exposes those hostnames configured in the returned TLS SAN array.
For Kubernetes clusters with a valid :ref:`TLS Cluster Signing Certificate <minio-k8s-deploy-operator-tls>`,
the MinIO Kubernetes Operator can automatically generate TLS certificates while :ref:`deploying <minio-k8s-deploy-minio-tenant-security>` or :ref:`modifying <minio-k8s-modify-minio-tenant-security>` a MinIO Tenant.
The TLS certificate generation process is as follows:
.. _minio-tls-kubernetes:
- The Operator generates a Certificate Signing Request (CSR) associated to the Tenant.
The :abbr:`CSR (Certificate Signing Request)` includes the appropriate DNS Subject Alternate Names (SANs) for the Tenant services and pods.
MinIO TLS on Kubernetes
-----------------------
The Operator then waits for :abbr:`CSR (Certificate Signing Request)` approval
The MinIO Kubernetes Operator provides three approaches for configuring TLS on MinIO Tenants:
- The :abbr:`CSR (Certificate Signing Request)` waits pending approval.
The Kubernetes TLS API can automatically approve the :abbr:`CSR (Certificate Signing Request)` if properly configured.
Otherwise, a cluster administrator must manually approve the :abbr:`CSR (Certificate Signing Request)` before Kubernetes can generate the necessary certificates.
- The Operator applies the generated TLS Certificates to each MinIO Pod in the Tenant.
Automatic TLS using Cluster Signing API
For Kubernetes clusters with a valid :ref:`TLS Cluster Signing Certificate <minio-k8s-deploy-operator-tls>`,the MinIO Kubernetes Operator can automatically generate TLS certificates while :ref:`deploying <minio-k8s-deploy-minio-tenant-security>` or :ref:`modifying <minio-k8s-modify-minio-tenant-security>` a MinIO Tenant.
The Kubernetes TLS API uses the Kubernetes cluster Certificate Authority (CA) signature algorithm when generating new TLS certificates.
See :ref:`minio-TLS-supported-cipher-suites` for a complete list of MinIO's supported TLS Cipher Suites and recommended signature algorithms.
@ -64,19 +62,69 @@ Enabling TLS
If you have a custom Subject Alternative Name (SAN) certificate that is *not* also a wildcard cert, the TLS certificate SAN **must** apply to the hostname for its parent node.
Without a wildcard, the SAN must match exactly to be able to connect to the tenant.
Certificate Management with cert-manager
----------------------------------------
cert-manager Certificate Management
The MinIO Operator supports using `cert-manager <https://cert-manager.io/>`__ as a full replacement for its built-in automatic certificate management *or* user-driven manual certificate management.
For instructions for deploying the MinIO Operator and tenants using cert-manager, refer to the :ref:`cert-manager page <minio-certmanager>`.
Manual Certificate Management
The Tenant CRD spec ``spec.externalCertsSecret`` supports specifying either ``opaque`` or ``kubernetes.io/tls`` type :kube-docs:`secrets <concepts/configuration/secret/#secret-types>` containing the ``private.key`` and ``public.crt`` to use for TLS.
.. cond:: linux
You can specify multiple certificates to support Tenants which have multiple assigned hostnames.
The MinIO Server searches for TLS keys and certificates for each node and uses those credentials for enabling TLS.
The search location depends on your MinIO configuration:
.. tab-set::
Self-signed, Internal, Private Certificates, and Public CAs with Intermediate Certificates
~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
If deploying MinIO Tenants with certificates minted by a non-global or non-public Certificate Authority, *or* if using a global CA that requires the use of intermediate certificates, you must provide those CAs to the Operator to ensure it can trust those certificates.
The Operator may log warnings related to TLS cert validation for Tenants deployed with untrusted certificates.
The following procedure attaches a secret containing the ``public.crt`` of the Certificate Authority to the MinIO Operator.
You can specify multiple CAs in a single certificate, as long as you maintain the ``BEGIN`` and ``END`` delimiters as-is.
1. Create the ``operator-ca-tls`` secret
The following creates a Kubernetes secret in the MinIO Operator namespace (``minio-operator``).
.. code-block:: shell
:class: copyable
kubectl create secret generic operator-ca-tls \
--from-file=public.crt -n minio-operator
The ``public.crt`` file must correspond to a valid TLS certificate containing one or more CA definitions.
2. Restart the Operator
Once created, you must restart the Operator to load the new CAs:
.. code-block:: shell
:class: copyable
kubectl rollout restart deployments.apps/minio-operator -n minio-operator
Third-Party Certificate Authorities
~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
The MinIO Kubernetes Operator can automatically attach third-party Certificate Authorities when :ref:`deploying <minio-k8s-deploy-minio-tenant-security>` or :ref:`modifying <minio-k8s-modify-minio-tenant-security>` a MinIO Tenant.
You can add, update, or remove CAs from the tenant at any time.
You must restart the MinIO Tenant for the changes to the configured CAs to apply.
The Operator places the specified CAs on each MinIO Server pod such that all pods have a consistent set of trusted CAs.
If the MinIO Server cannot match an incoming client's TLS certificate issuer against any of the available CAs, the server rejects the connection as invalid.
.. _minio-tls-baremetal:
MinIO TLS on Baremetal
----------------------
The MinIO Server searches for TLS keys and certificates for each node and uses those credentials for enabling TLS.
MinIO automatically enables TLS upon discovery and validation of certificates.
The search location depends on your MinIO configuration:
.. tab-set::
.. tab-item:: Default Path
@ -104,333 +152,69 @@ Enabling TLS
The user running the MinIO service *must* have read and write permissions to this directory.
Place the TLS certificates for the default domain (e.g. ``minio.example.net``) in the ``/certs`` directory, with the private key as ``private.key`` and public certificate as ``public.crt``.
Place the TLS certificates for the default domain (e.g. ``minio.example.net``) in the ``/certs`` directory, with the private key as ``private.key`` and public certificate as ``public.crt``.
For example:
For distributed MinIO deployments, each node in the deployment must have matching TLS certificate configurations.
.. code-block:: shell
Self-signed, Internal, Private Certificates, and Public CAs with Intermediate Certificates
~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
/path/to/certs
private.key
public.crt
If using Certificates signed by a non-global or non-public Certificate Authority, *or* if using a global CA that requires the use of intermediate certificates, you must provide those CAs to the MinIO Server.
If the MinIO server does not have the necessary CAs, it may return warnings or errors related to TLS validation when connecting to other services.
You can use the MinIO :minio-git:`certgen <certgen>` to mint self-signed certificates for evaluating MinIO with TLS enabled.
For example, the following command generates a self-signed certificate with a set of IP and DNS Subject Alternate Names (SANs) associated to the MinIO Server hosts:
Place the CA certificates in the ``/certs/CAs`` folder.
The root path for this folder depends on whether you use the default certificate path *or* a custom certificate path (:mc-cmd:`minio server --certs-dir` or ``-S``)
.. code-block:: shell
certgen -host "localhost,minio-*.example.net"
Place the generated ``public.crt`` and ``private.key`` into the ``/path/to/certs`` directory to enable TLS for the MinIO deployment.
Applications can use the ``public.crt`` as a trusted Certificate Authority to allow connections to the MinIO deployment without disabling certificate validation.
If you are reconfiguring an existing deployment that did not previously have TLS enabled, update :envvar:`MINIO_VOLUMES` to specify ``https`` instead of ``http``.
You may also need to update URLs used by applications or clients.
.. cond:: container
Start the MinIO container with the :mc-cmd:`minio/minio:latest server --certs-dir <minio server --certs-dir>` parameter and specify the path to a directory in which MinIO searches for certificates.
You must mount a local host volume to that path when starting the container to ensure the MinIO Server can access the necessary certificates.
Place the TLS certificates for the default domain (e.g. ``minio.example.net``) in the specified directory, with the private key as ``private.key`` and public certificate as ``public.crt``.
For example:
.. code-block:: shell
/opts/certs
private.key
public.crt
You can use the MinIO :minio-git:`certgen <certgen>` to mint self-signed certificates for evaluating MinIO with TLS enabled.
For example, the following command generates a self-signed certificate with a set of IP and DNS SANs associated to the MinIO Server hosts:
.. code-block:: shell
certgen -host "localhost,minio-*.example.net"
You may need to start the container and set a ``--hostname`` that matches the TLS certificate DNS SAN.
Move the certificates to the local host machine path that the container mounts to its ``--certs-dir`` path.
When the MinIO container starts, the server searches the specified location for certificates and uses them to enable TLS.
Applications can use the ``public.crt`` as a trusted Certificate Authority to allow connections to the MinIO deployment without disabling certificate validation.
If you are reconfiguring an existing deployment that did not previously have TLS enabled, update :envvar:`MINIO_VOLUMES` to specify ``https`` instead of ``http``.
You may also need to update URLs used by applications or clients.
.. cond:: macos
The MinIO server searches the following directory for TLS keys and certificates:
.. code-block:: shell
${HOME}/.minio/certs
For deployments started with a custom TLS directory :mc-cmd:`minio server --certs-dir`, use that directory instead of the defaults.
Place the TLS certificates for the default domain (e.g. ``minio.example.net``) in the ``/certs`` directory, with the private key as ``private.key`` and public certificate as ``public.crt``.
For example:
.. code-block:: shell
${HOME}/.minio/certs
private.key
public.crt
Where ``${HOME}`` is the home directory of the user running the MinIO Server process.
You may need to create the ``${HOME}/.minio/certs`` directory if it does not exist.
You can use the MinIO :minio-git:`certgen <certgen>` to mint self-signed certificates for evaluating MinIO with TLS enabled.
For example, the following command generates a self-signed certificate with a set of IP and DNS SANs associated to the MinIO Server hosts:
.. code-block:: shell
certgen -host "localhost,minio-*.example.net"
Place the generated ``public.crt`` and ``private.key`` into the ``/.minio/certs`` directory to enable TLS for the MinIO deployment.
Applications can use the ``public.crt`` as a trusted Certificate Authority to allow connections to the MinIO deployment without disabling certificate validation.
If you are reconfiguring an existing deployment that did not previously have TLS enabled, update :envvar:`MINIO_VOLUMES` to specify ``https`` instead of ``http``.
You may also need to update URLs used by applications or clients.
.. cond:: windows
The MinIO server searches the following directory for TLS keys and certificates:
.. code-block:: shell
%%USERPROFILE%%\.minio\certs
For deployments started with a custom TLS directory :mc-cmd:`minio server --certs-dir`, use that directory instead of the defaults.
Place the TLS certificates for the default domain (e.g. ``minio.example.net``) in the ``/certs`` directory, with the private key as ``private.key`` and public certificate as ``public.crt``.
For example:
.. code-block:: shell
%%USERPROFILE%%\.minio\certs
private.key
public.crt
Where ``%%USERPROFILE%%`` is the location of the `User Profile folder <https://docs.microsoft.com/en-us/windows/deployment/usmt/usmt-recognized-environment-variables>`__ of the user running the MinIO Server process.
You can use the MinIO :minio-git:`certgen <certgen>` to mint self-signed certificates for evaluating MinIO with TLS enabled.
For example, the following command generates a self-signed certificate with a set of IP and DNS SANs associated to the MinIO Server hosts:
.. code-block:: shell
certgen.exe -host "localhost,minio-*.example.net"
Place the generated ``public.crt`` and ``private.key`` into the ``\.minio\certs`` directory to enable TLS for the MinIO deployment.
Applications can use the ``public.crt`` as a trusted Certificate Authority to allow connections to the MinIO deployment without disabling certificate validation.
If you are reconfiguring an existing deployment that did not previously have TLS enabled, update :envvar:`MINIO_VOLUMES` to specify ``https`` instead of ``http``.
You may also need to update URLs used by applications or clients.
.. cond:: k8s
Supported Secret Types
~~~~~~~~~~~~~~~~~~~~~~
MinIO supports three types of :kube-docs:`secrets in Kubernetes <concepts/configuration/secret/#secret-types>`.
#. ``opaque``
Using ``private.key`` and ``public.crt`` files.
#. ``tls``
Using ``tls.key`` and ``tls.crt`` files.
#. `cert-manager <https://cert-manager.io/>`__ 1.7.x or later
Running on Kubernetes 1.21 or later.
.. note::
For the best support of *tls* or *cert-manager* secrets, upgrade to Operator version 5.0.10 or later.
Multiple Domain-Based TLS Certificates
~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
.. cond:: k8s
The MinIO Operator supports attaching user-specified TLS certificates when :ref:`deploying <minio-k8s-deploy-minio-tenant-security>` or :ref:`modifying <minio-k8s-modify-minio-tenant-security>` the MinIO Tenant.
These custom certificates support `Server Name Indication (SNI) <https://en.wikipedia.org/wiki/Server_Name_Indication>`__, where the MinIO server identifies which certificate to use based on the hostname specified by the connecting client.
For example, you can generate certificates signed by your organization's preferred Certificate Authority (CA) and attach those to the MinIO Tenant.
Applications which trust that :abbr:`CA (Certificate Authority)` can connect to the MinIO Tenant and fully validate the Tenant TLS certificates.
.. cond:: linux
The MinIO server supports multiple TLS certificates, where the server uses `Server Name Indication (SNI) <https://en.wikipedia.org/wiki/Server_Name_Indication>`__ to identify which certificate to use when responding to a client request.
When a client connects using a specific hostname, MinIO uses :abbr:`SNI (Server Name Indication)` to select the appropriate TLS certificate for that hostname.
For example, consider a MinIO deployment reachable through the following hostnames:
- ``https://minio.example.net`` (default TLS certificates)
- ``https://s3.example.net``
- ``https://minio.internal-example.net``
Place the certificates in the ``/certs`` folder, creating a subfolder in ``/certs`` for each additional domain for which MinIO should present TLS certificates.
While MinIO has no requirements for folder names, consider creating subfolders whose name matches the domain to improve human readability.
Place the TLS private and public key for that domain in the subfolder.
The root path for this folder depends on whether you use the default certificate path *or* a custom certificate path (:mc-cmd:`minio server --certs-dir` or ``-S``).
.. tab-set::
.. tab-set::
.. tab-item:: Default Certificate Path
.. code-block:: shell
${HOME}/.minio/certs
private.key
public.crt
s3-example.net/
private.key
public.crt
internal-example.net/
private.key
public.crt
mv myCA.crt ${HOME}/.minio/certs/CAs
.. tab-item:: Custom Certificate Path
The following example assumes the MinIO Server was started with ``--certs dir | -S /opt/minio/certs``:
The following example assumes the MinIO Server was started with ``--certs dir /opt/minio/certs``:
.. code-block:: shell
/opt/minio/certs
private.key
public.crt
s3-example.net/
private.key
public.crt
internal-example.net/
private.key
public.crt
mv myCA.crt /opt/minio/certs/CAs/
While you can have a single TLS certificate that covers all hostnames with multiple Subject Alternative Names (SANs), this would reveal the ``internal-example.net`` and ``s3-example.net`` hostnames to any client which inspects the server certificate.
Using a TLS certificate per hostname better protects each individual hostname from discovery.
The individual TLS certificate SANs **must** apply to the hostname for their respective parent node.
For a self-signed certificate, the Certificate Authority is typically the private key used to sign the cert.
If the client-specified hostname or IP address does not match any of the configured TLS certificates, the connection typically fails with a certificate validation error.
For certificates signed by an internal, private, or other non-global Certificate Authority, use the same CA that signed the cert.
A non-global CA must include the full chain of trust from the intermediate certificate to the root.
If the provided file is not an X.509 certificate, MinIO ignores it and may return errors for validating certificates signed by that CA.
.. cond:: container
Third-Party Certificate Authorities
~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
The MinIO server supports multiple TLS certificates, where the server uses `Server Name Indication (SNI) <https://en.wikipedia.org/wiki/Server_Name_Indication>`__ to identify which certificate to use when responding to a client request.
When a client connects using a specific hostname, MinIO uses :abbr:`SNI (Server Name Indication)` to select the appropriate TLS certificate for that hostname.
The MinIO Server validates the TLS certificate presented by each connecting client against the host system's trusted root certificate store.
For example, consider a MinIO deployment reachable through the following hostnames:
Place the CA certificates in the ``/certs/CAs`` folder.
The root path for this folder depends on whether you use the default certificate path *or* a custom certificate path (:mc-cmd:`minio server --certs-dir` or ``-S``)
- ``https://minio.example.net`` (default TLS certificates)
- ``https://s3.example.net``
- ``https://minio.internal-example.net``
.. tab-set::
Start the MinIO container with the :mc-cmd:`minio/minio:latest server --certs-dir <minio server --certs-dir>` parameter and specify the path to a directory in which MinIO searches for certificates.
You must mount a local host volume to that path when starting the container to ensure the MinIO Server can access the necessary certificates.
Place the TLS certificates for the default domain (e.g. ``minio.example.net``) in the specified directory, with the private key as ``private.key`` and public certificate as ``public.crt``.
For other hostnames, create a subfolder whose name matches the domain to improve human readability.
Place the TLS private and public key for that domain in the subfolder.
For example:
.. tab-item:: Default Certificate Path
.. code-block:: shell
/opts/certs
private.key
public.crt
s3-example.net/
private.key
public.crt
internal-example.net/
private.key
public.crt
mv myCA.crt ${HOME}/certs/CAs
When the MinIO container starts, the server searches the mounted location ``/opts/certs`` for certificates and uses them enable TLS.
MinIO serves clients connecting to the container using a supported hostname with the associated certificates.
Applications can use the ``public.crt`` as a trusted Certificate Authority to allow connections to the MinIO deployment without disabling certificate validation.
.. tab-item:: Custom Certificate Path
While you can have a single TLS certificate that covers all hostnames with multiple Subject Alternative Names (SANs), this would reveal the ``internal-example.net`` and ``s3-example.net`` hostnames to any client which inspects the server certificate.
Using one TLS certificate per hostname better protects each individual hostname from discovery.
The individual TLS certificate SANs **must** apply to the hostname for their respective parent node.
If the client-specified hostname or IP address does not match any of the configured TLS certificates, the connection typically fails with a certificate validation error.
.. cond:: macos
The MinIO server supports multiple TLS certificates, where the server uses `Server Name Indication (SNI) <https://en.wikipedia.org/wiki/Server_Name_Indication>`__ to identify which certificate to use when responding to a client request.
When a client connects using a specific hostname, MinIO uses SNI to select the appropriate TLS certificate for that hostname.
For example, consider a MinIO deployment reachable through the following hostnames:
- ``https://minio.example.net`` (default TLS certificates)
- ``https://s3.example.net``
- ``https://minio.internal-example.net``
Create a subfolder in ``/certs`` for each additional domain for which MinIO should present TLS certificates.
While MinIO has no requirements for folder names, consider creating subfolders whose name matches the domain to improve human readability.
Place the TLS private and public key for that domain in the subfolder.
For example:
The following example assumes the MinIO Server was started with ``--certs dir /opt/minio/certs``:
.. code-block:: shell
${HOME}/.minio/certs
private.key
public.crt
s3-example.net/
private.key
public.crt
internal-example.net/
private.key
public.crt
mv myCA.crt /opt/minio/certs/CAs/
While you can have a single TLS certificate that covers all hostnames with multiple Subject Alternative Names (SANs), this would reveal the ``internal-example.net`` and ``s3-example.net`` hostnames to any client which inspects the server certificate.
Using a TLS certificate per hostname better protects each individual hostname from discovery.
The individual TLS certificate SANs **must** apply to the hostname for their respective parent node.
If the client-specified hostname or IP address does not match any of the configured TLS certificates, the connection typically fails with a certificate validation error.
.. cond:: windows
The MinIO server supports multiple TLS certificates, where the server uses `Server Name Indication (SNI) <https://en.wikipedia.org/wiki/Server_Name_Indication>`__ to identify which certificate to use when responding to a client request.
When a client connects using a specific hostname, MinIO uses SNI to select the appropriate TLS certificate for that hostname.
For example, consider a MinIO deployment reachable through the following hostnames:
- ``https://minio.example.net`` (default TLS certificates)
- ``https://s3.example.net``
- ``https://minio.internal-example.net``
Create a subfolder in ``/certs`` for each additional domain for which MinIO should present TLS certificates.
While MinIO has no requirements for folder names, consider creating subfolders whose name matches the domain to improve human readability.
Place the TLS private and public key for that domain in the subfolder.
For example:
.. code-block:: shell
%%USERPROFILE%%\.minio\certs
private.key
public.crt
s3-example.net\
private.key
public.crt
internal-example.net\
private.key
public.crt
While you can have a single TLS certificate that covers all hostnames with multiple Subject Alternative Names (SANs), this would reveal the ``internal-example.net`` and ``s3-example.net`` hostnames to any client which inspects the server certificate.
Using a TLS certificate per hostname better protects each individual hostname from discovery.
The individual TLS certificate SANs **must** apply to the hostname for their respective parent node.
If the client-specified hostname or IP address does not match any of the configured TLS certificates, the connection typically fails with a certificate validation error.
Place the certificate file for each CA into the ``/CAs`` subdirectory.
Ensure all hosts in the MinIO deployment have a consistent set of trusted CAs in that directory.
If the MinIO Server cannot match an incoming client's TLS certificate issuer against any of the available CAs, the server rejects the connection as invalid.
.. _minio-TLS-supported-cipher-suites:
@ -458,7 +242,8 @@ MinIO supports the following TLS 1.2 and 1.3 cipher suites as supported by `Go <
- ``TLS_ECDHE_RSA_WITH_AES_128_GCM_SHA256``
- ``TLS_ECDHE_RSA_WITH_AES_256_GCM_SHA384``
.. _minio-TLS-third-party-ca:
.. toctree::
:hidden:
Third-Party Certificate Authorities
-----------------------------------
@ -624,3 +409,6 @@ Self-signed, Internal, Private Certificates, and Public CAs with Intermediate Ce
:class: copyable
kubectl rollout restart deployments.apps/minio-operator -n minio-operator
/operations/network-encryption/enable-minio-tls
/operations/network-encryption/enable-multiple-domain-minio-tls
/operations/cert-manager

View File

@ -0,0 +1,256 @@
====================
Enable TLS for MinIO
====================
.. default-domain:: minio
.. contents:: Table of Contents
:local:
:depth: 1
MinIO supports Transport Layer Security (TLS) 1.2+ encryption of incoming and outgoing traffic.
.. tab-set::
:class: parent
.. tab-item:: Kubernetes
:sync: k8s
The MinIO Operator supports the following approaches to enabling TLS on a MinIO Tenant:
- Automatic TLS provisioning using Kubernetes Cluster Signing Certificates
- User-specified TLS using Kubernetes secrets
- Certmanager-managed TLS certificates
.. tab-item:: Baremetal
:sync: baremetal
MinIO automatically detects TLS certificates in the configured or default directory and starts with TLS enabled.
This procedure documents enabling TLS for a single domain in MinIO.
For instructions on TLS for multiple domains, see TODO
Prerequisites
-------------
Access to MinIO Cluster
~~~~~~~~~~~~~~~~~~~~~~~
.. tab-set::
:class: hidden
.. tab-item:: Kubernetes
:sync: k8s
You must have access to the Kubernetes cluster, with administrative permissions associated to your ``kubectl`` configuration.
This procedure assumes your permission sets extends sufficiently to support deployment or modification of MinIO-associated resources on the Kubernetes cluster, including but not limited to pods, statefulsets, replicasets, deployments, and secrets.
.. tab-item:: Baremetal
:sync: baremetal
This procedure uses :mc:`mc` for performing operations on the MinIO cluster.
Install ``mc`` on a machine with network access to the cluster.
See the ``mc`` :ref:`Installation Quickstart <mc-install>` for instructions on downloading and installing ``mc``.
This procedure assumes a configured :mc:`alias <mc alias>` for the MinIO cluster.
This procedure also assumes SSH or similar shell-level access with administrative permissions to each MinIO host server.
TLS Certificates
~~~~~~~~~~~~~~~~
Provision the necessary TLS certificates with a :ref:`supported cipher suite <minio-TLS-supported-cipher-suites>` for use by MinIO.
.. tab-set::
:class: hidden
.. tab-item:: Kubernetes
:sync: k8s
See :ref:`minio-tls-kubernetes` for more complete guidance on the supported Tenant TLS configurations.
.. tab-item:: Baremetal
:sync: k8s
Provision certificate susing your preferred path, such as through your organizations internal Certificate Authority or by using a well-known global provider such as Digicert or Verisign.
You can create self-signed certificates using ``openssl`` or the MinIO :minio-git:`certgen <certgen>` tool.
For example, the following command generates a self-signed certificate with a set of IP and DNS Subject Alternate Names (SANs) associated to the MinIO Server hosts:
.. code-block:: shell
certgen -host "localhost,minio-*.example.net"
See :ref:`minio-tls-baremetal` for more complete guidance on certificate generation and placement.
Procedure
---------
.. tab-set::
:class: hidden
.. tab-item:: Kubernetes
:sync: k8s
The MinIO Operator supports three methods of TLS certificate management on MinIO Tenants:
- MinIO automatic TLS certificate generation
- ``cert-manager`` managed TLS certificates
- User managed TLS certificates
You can use any combination of the above methods to enable and configure TLS.
MinIO strongly recommends using ``cert-manager`` for user-specified certificates for a streamlined management and renewal proces.
You can also deploy MinIO Tenants without TLS enabled.
.. tab-set::
.. tab-item:: MinIO Auto-TLS
The following steps apply to both new and existing MinIO Deployments using ``Kustomize``:
1. Review the :ref:`Tenant CRD <minio-operator-crd>` ``TenantSpec.requestAutoCert`` and ``TenantSpec.certConfig`` fields.
For existing MinIO Tenants, review the Kustomize resources used to create the Tenant and introspect those fields and their current configuration, if any.
2. Create or Modify your Tenant YAML to set the values of ``requestAutoCert`` and ``certConfig`` as necessary.
For example:
.. code-block:: yaml
spec:
requestAutoCert: true
certConfig:
commonName: "CN=MinioTenantCommonName"
organizationName: "O=MyOrganizationName"
dnsNames:
- '*.minio-tenant.domain.tld'
See the :minio-git:`Kustomize Tenant base YAML <operator/blob/master/examples/kustomization/base/tenant.yaml>` for a baseline template for guidance in creating or modifying your Tenant resource.
3. Apply the new Kustomization template
Once you apply the changes, the MinIO Operator automatically redeploys the Tenant with the updated configuration.
.. tab-item:: CertManager
The following steps apply to both new and existing MinIO Deployments using ``Kustomize``:
1. Review the :ref:`Tenant CRD <minio-operator-crd>` ``TenantSpec.externalCertsCecret`` fields
For existing MinIO Tenants, review the Kustomize resources used to create the Tenant and introspect that field's current configuration, if any.
2. Create or Modify your Tenant YAML to reference the appropriate ``cert-manager`` resource.
For example, the following Tenant YAML fragment references a cert-manager resource ``myminio-tls``:
.. code-block:: yaml
apiVersion: minio.min.io/v2
kind: Tenant
metadata:
name: myminio
namespace: minio-tenant
spec:
## Disable default tls certificates.
requestAutoCert: false
## Use certificates generated by cert-manager.
externalCertSecret:
- name: myminio-tls
type: cert-manager.io/v1
3. Apply the new Kustomization Template
Once you apply the changes, the MinIO Operator automatically redeploys the Tenant with the updated configuration.
.. tab-item:: User-Managed
The following steps apply to both new and existing MinIO deployments using ``Kustomize``:
1. Review the :ref:`Tenant CRD <minio-operator-crd>` ``TenantSpec.externalCertSecret`` field.
For existing MinIO Tenants, review the Kustomize resources used to create the Tenant and introspect that field's current configuration, if any.
2. Create or modify your Tenant YAML to reference a secret of type ``kubernetes.io/tls``:
For example, the following Tenant YAML fragment references a TLS secret which covers the domain on which the MinIO Tenant accepts connections.
.. code-block:: yaml
apiVersion: minio.min.io/v2
kind: Tenant
metadata:
name: myminio
namespace: minio-tenant
spec:
## Disable default tls certificates.
requestAutoCert: false
## Use certificates generated by cert-manager.
externalCertSecret:
- name: domain-certificate
type: kubernetes.io/tls
3. Apply the new Kustomization Template
Once you apply the changes, the MinIO Operator automatically redeploys the Tenant with the updated configuration.
.. tab-item:: Baremetal
:sync: baremetal
The MinIO Server searches for TLS keys and certificates for each node and uses those credentials for enabling TLS.
MinIO automatically enables TLS upon discovery and validation of certificates.
The search location depends on your MinIO configuration:
.. tab-set::
.. tab-item:: Default Path
By default, the MinIO server looks for the TLS keys and certificates for each node in the following directory:
.. code-block:: shell
${HOME}/.minio/certs
Where ``${HOME}`` is the home directory of the user running the MinIO Server process.
You may need to create the ``${HOME}/.minio/certs`` directory if it does not exist.
For ``systemd`` managed deployments this must correspond to the ``USER`` running the MinIO process.
If that user has no home directory, use the :guilabel:`Custom Path` option instead.
.. tab-item:: Custom Path
You can specify a path for the MinIO server to search for certificates using the :mc-cmd:`minio server --certs-dir` or ``-S`` parameter.
For example, the following command fragment directs the MinIO process to use the ``/opt/minio/certs`` directory for TLS certificates.
.. code-block:: shell
minio server --certs-dir /opt/minio/certs ...
The user running the MinIO service *must* have read and write permissions to this directory.
Place the TLS certificates for the default domain (e.g. ``minio.example.net``) in the ``/certs`` directory, with the private key as ``private.key`` and public certificate as ``public.crt``.
For example:
.. code-block:: shell
/path/to/certs
private.key
public.crt
You can use the MinIO :minio-git:`certgen <certgen>` to mint self-signed certificates for evaluating MinIO with TLS enabled.
For example, the following command generates a self-signed certificate with a set of IP and DNS Subject Alternate Names (SANs) associated to the MinIO Server hosts:
.. code-block:: shell
certgen -host "localhost,minio-*.example.net"
Place the generated ``public.crt`` and ``private.key`` into the ``/path/to/certs`` directory to enable TLS for the MinIO deployment.
Applications can use the ``public.crt`` as a trusted Certificate Authority to allow connections to the MinIO deployment without disabling certificate validation.
If you are reconfiguring an existing deployment that did not previously have TLS enabled, update :envvar:`MINIO_VOLUMES` to specify ``https`` instead of ``http``.
You may also need to update URLs used by applications or clients.

View File

@ -0,0 +1,266 @@
====================================
Enable Multiple Domain TLS for MinIO
====================================
.. default-domain:: minio
.. contents:: Table of Contents
:local:
:depth: 1
MinIO supports Transport Layer Security (TLS) 1.2+ encryption of incoming and outgoing traffic.
.. tab-set::
:class: parent
.. tab-item:: Kubernetes
:sync: k8s
The MinIO Operator supports the following approaches to enabling TLS on a MinIO Tenant:
- Automatic TLS provisioning using Kubernetes Cluster Signing Certificates
- User-specified TLS using Kubernetes secrets
- Certmanager-managed TLS certificates
The MinIO Operator supports attaching user-specified TLS certificates when :ref:`deploying <minio-k8s-deploy-minio-tenant-security>` or :ref:`modifying <minio-k8s-modify-minio-tenant-security>` the MinIO Tenant.
These custom certificates support `Server Name Indication (SNI) <https://en.wikipedia.org/wiki/Server_Name_Indication>`__, where the MinIO server identifies which certificate to use based on the hostname specified by the connecting client.
For example, you can generate certificates signed by your organization's preferred Certificate Authority (CA) and attach those to the MinIO Tenant.
Applications which trust that :abbr:`CA (Certificate Authority)` can connect to the MinIO Tenant and fully validate the Tenant TLS certificates.
.. tab-item:: Baremetal
:sync: baremetal
MinIO automatically detects TLS certificates in the configured or default directory and starts with TLS enabled.
The MinIO server supports multiple TLS certificates, where the server uses `Server Name Indication (SNI) <https://en.wikipedia.org/wiki/Server_Name_Indication>`__ to identify which certificate to use when responding to a client request.
When a client connects using a specific hostname, MinIO uses :abbr:`SNI (Server Name Indication)` to select the appropriate TLS certificate for that hostname.
This procedure documents enabling TLS for multiple domains in MinIO.
For instructions on TLS for single domains, see TODO
Prerequisites
-------------
Access to MinIO Cluster
~~~~~~~~~~~~~~~~~~~~~~~
.. tab-set::
:class: hidden
.. tab-item:: Kubernetes
:sync: k8s
You must have access to the Kubernetes cluster, with administrative permissions associated to your ``kubectl`` configuration.
This procedure assumes your permission sets extends sufficiently to support deployment or modification of MinIO-associated resources on the Kubernetes cluster, including but not limited to pods, statefulsets, replicasets, deployments, and secrets.
.. tab-item:: Baremetal
:sync: baremetal
This procedure uses :mc:`mc` for performing operations on the MinIO cluster.
Install ``mc`` on a machine with network access to the cluster.
See the ``mc`` :ref:`Installation Quickstart <mc-install>` for instructions on downloading and installing ``mc``.
This procedure assumes a configured :mc:`alias <mc alias>` for the MinIO cluster.
This procedure also assumes SSH or similar shell-level access with administrative permissions to each MinIO host server.
TLS Certificates
~~~~~~~~~~~~~~~~
Provision the necessary TLS certificates with a :ref:`supported cipher suite <minio-TLS-supported-cipher-suites>` for use by MinIO.
.. tab-set::
:class: hidden
.. tab-item:: Kubernetes
:sync: k8s
See :ref:`minio-tls-kubernetes` for more complete guidance on the supported Tenant TLS configurations.
.. tab-item:: Baremetal
:sync: baremetal
Provision certificate susing your preferred path, such as through your organizations internal Certificate Authority or by using a well-known global provider such as Digicert or Verisign.
You can create self-signed certificates using ``openssl`` or the MinIO :minio-git:`certgen <certgen>` tool.
For example, the following command generates a self-signed certificate with a set of IP and DNS Subject Alternate Names (SANs) associated to the MinIO Server hosts:
.. code-block:: shell
certgen -host "localhost,minio-*.example.net"
See :ref:`minio-tls-baremetal` for more complete guidance on certificate generation and placement.
Procedure
---------
.. tab-set::
:class: hidden
.. tab-item:: Kubernetes
:sync: k8s
The MinIO Operator supports three methods of TLS certificate management on MinIO Tenants:
- MinIO automatic TLS certificate generation
- User-specified TLS certificates
- ``cert-manager`` managed TLS certificates
You can also deploy MinIO Tenants without TLS enabled.
.. tab-set::
.. tab-item:: MinIO Auto-TLS
The following steps apply to both new and existing MinIO Deployments using ``Kustomize``:
1. Review the :ref:`Tenant CRD <minio-operator-crd>` ``TenantSpec.requestAutoCert`` and ``TenantSpec.certConfig`` fields.
For existing MinIO Tenants, review the Kustomize resources used to create the Tenant and introspect those fields and their current configuration, if any.
2. Create or Modify your Tenant YAML to set the values of ``requestAutoCert`` and ``certConfig`` as necessary.
For example:
.. code-block:: yaml
spec:
requestAutoCert: true
certConfig:
commonName: "CN=MinioTenantCommonName"
organizationName: "O=MyOrganizationName"
dnsNames:
- 'minio-tenant.domain.tld'
- '*.kubernete.cluster.dns.path.tld'
The ``spec.certConfig.dnsNames`` should contain a list of :abbr:`SAN (Subject Alternate Names)` the TLS certificate covers.
See the :minio-git:`Kustomize Tenant base YAML <operator/blob/master/examples/kustomization/base/tenant.yaml>` for a baseline template for guidance in creating or modifying your Tenant resource.
3. Apply the new Kustomization template
Once you apply the changes, the MinIO Operator automatically redeploys the Tenant with the updated configuration.
.. tab-item:: CertManager
The following steps apply to both new and existing MinIO Deployments using ``Kustomize``:
1. Review the :ref:`Tenant CRD <minio-operator-crd>` ``TenantSpec.externalCertsCecret`` fields
For existing MinIO Tenants, review the Kustomize resources used to create the Tenant and introspect that field's current configuration, if any.
2. Create or Modify your Tenant YAML to reference the appropriate ``cert-manager`` resources.
For example, the following Tenant YAML fragment references a cert-manager resource ``myminio-tls``:
.. code-block:: yaml
apiVersion: minio.min.io/v2
kind: Tenant
metadata:
name: myminio
namespace: minio-tenant
spec:
## Disable default tls certificates.
requestAutoCert: false
## Use certificates generated by cert-manager.
externalCertSecret:
- name: default-domain
type: cert-manager.io/v1
- name: internal-domain
type: cert-manager.io/v1
- name: external-domain
type: cert-manager.io/v1
3. Apply the new Kustomization Template
Once you apply the changes, the MinIO Operator automatically redeploys the Tenant with the updated configuration.
.. tab-item:: User-Specified
The following steps apply to both new and existing MinIO deployments using ``Kustomize``:
1. Review the :ref:`Tenant CRD <minio-operator-crd>` ``TenantSpec.externalCertSecret`` field.
For existing MinIO Tenants, review the Kustomize resources used to create the Tenant and introspect that field's current configuration, if any.
2. Create or modify your Tenant YAML to reference a secret of type ``kubernetes.io/tls``:
For example, the following Tenant YAML fragment references two TLS secrets for each domain for which the MinIO Tenant accepts connections:
.. code-block:: yaml
apiVersion: minio.min.io/v2
kind: Tenant
metadata:
name: myminio
namespace: minio-tenant
spec:
## Disable default tls certificates.
requestAutoCert: false
## Use certificates generated by cert-manager.
externalCertSecret:
- name: domain-certificate-1
type: kubernetes.io/tls
- name: domain-certificate-2
type: kubernetes.io/tls
3. Apply the new Kustomization Template
Once you apply the changes, the MinIO Operator automatically redeploys the Tenant with the updated configuration.
.. tab-item:: Baremetal
:sync: baremetal
The MinIO Server searches for TLS keys and certificates for each node and uses those credentials for enabling TLS.
MinIO automatically enables TLS upon discovery and validation of certificates.
The search location depends on your MinIO configuration:
.. tab-set::
.. tab-item:: Default Path
:sync: baremetal-default
By default, the MinIO server looks for the TLS keys and certificates for each node in the following directory:
.. code-block:: shell
${HOME}/.minio/certs
Where ``${HOME}`` is the home directory of the user running the MinIO Server process.
You may need to create the ``${HOME}/.minio/certs`` directory if it does not exist.
For ``systemd`` managed deployments this must correspond to the ``USER`` running the MinIO process.
If that user has no home directory, use the :guilabel:`Custom Path` option instead.
.. tab-item:: Custom Path
:sync: baremetal-custom
You can specify a path for the MinIO server to search for certificates using the :mc-cmd:`minio server --certs-dir` or ``-S`` parameter.
For example, the following command fragment directs the MinIO process to use the ``/opt/minio/certs`` directory for TLS certificates.
.. code-block:: shell
minio server --certs-dir /opt/minio/certs ...
The user running the MinIO service *must* have read and write permissions to this directory.
Place the certificates in the ``/certs`` folder, creating a subfolder in ``/certs`` for each additional domain for which MinIO should present TLS certificates.
While MinIO has no requirements for folder names, consider creating subfolders whose name matches the domain to improve human readability.
Place the TLS private and public key for that domain in the subfolder.
.. code-block:: shell
/path/to/certs
private.key
public.crt
s3-example.net/
private.key
public.crt
internal-example.net/
private.key
public.crt

View File

@ -26,10 +26,8 @@ Deployments using an external IDP must use the same configuration across sites.
For more information on site replication architecture and deployment concepts, see :ref:`Deployment Architecture: Replicated MinIO Deployments <minio-deployment-architecture-replicated>`.
.. cond:: macos or windows or container
MinIO does not recommend using |platform| hosts for site replication outside of early development, evaluation, or general experimentation.
For production, use :minio-docs:`Linux <minio/linux/operations/install-deploy-manage/multi-site-replication.html>` or :minio-docs:`Kubernetes <minio/kubernetes/upstream/operations/install-deploy-manage/multi-site-replication.html>`
MinIO does not recommend using MacOS, Windows, or non-orchestrated Containerized deployments for site replication outside of early development, evaluation, or general experimentation.
For production, use :minio-docs:`Linux <minio/linux/operations/install-deploy-manage/multi-site-replication.html>` or :minio-docs:`Kubernetes <minio/kubernetes/upstream/operations/install-deploy-manage/multi-site-replication.html>`
Overview
--------

View File

@ -27,7 +27,27 @@ Server-Side Object Encryption with KES
.. Conditionals to handle the slight divergences in procedures between platforms.
.. cond:: linux
.. tab-set::
:class: parent
.. tab-item:: Kubernetes
:sync: k8s
This procedure assumes you have access to a Kubernetes cluster with an active MinIO Operator installation.
For instructions on running KES, see the :kes-docs:`KES docs <tutorials/getting-started/>`.
As part of this procedure, you will:
#. Create or modify a MinIO deployment with support for |SSE| using |KES|.
Defer to the :ref:`Deploy Distributed MinIO <minio-mnmd>` tutorial for guidance on production-ready MinIO deployments.
#. Use the MinIO Operator Console to create or manage a MinIO Tenant.
#. Access the :guilabel:`Encryption` settings for that tenant and configure |SSE| using a :kes-docs:`supported Key Management System <#supported-kms-targets>`.
#. Create a new |EK| for use with |SSE|.
#. Configure automatic bucket-default :ref:`SSE-KMS <minio-encryption-sse-kms>`.
.. tab-item:: Baremetal
:sync: baremetal
This procedure provides guidance for deploying MinIO configured to use KES and enable :ref:`Server Side Encryption <minio-sse-data-encryption>`.
For instructions on running KES, see the :kes-docs:`KES docs <tutorials/getting-started/>`.
@ -41,57 +61,6 @@ Server-Side Object Encryption with KES
#. Configure automatic bucket-default :ref:`SSE-KMS <minio-encryption-sse-kms>`
.. cond:: macos or windows
This procedure assumes a single local host machine running the MinIO and KES processes.
For instructions on running KES, see the :kes-docs:`KES docs <tutorials/getting-started/>`.
.. note::
For production orchestrated environments, use the MinIO Kubernetes Operator to deploy a tenant with |SSE| enabled and configured for use with your |KMS|.
For production baremetal environments, see the `MinIO on Linux documentation <https://min.io/docs/minio/linux/operations/server-side-encryption.html>`__ for tutorials on configuring MinIO with KES and your |KMS|.
As part of this procedure, you will:
#. Create a new |EK| for use with |SSE|.
#. Deploy a MinIO server in :ref:`Single-Node Single-Drive mode <minio-snsd>` configured to use the |KES| container for supporting |SSE|.
#. Configure automatic bucket-default :ref:`SSE-KMS <minio-encryption-sse-kms>`.
.. cond:: container
This procedure assumes that you use a single host machine to run both the MinIO and KES containers.
For instructions on running KES, see the :kes-docs:`KES docs <tutorials/getting-started/>`.
As part of this procedure, you will:
#. Create a new |EK| for use with |SSE|.
#. Deploy a MinIO Server container in :ref:`Single-Node Single-Drive mode <minio-snsd>` configured to use the |KES| container for supporting |SSE|.
#. Configure automatic bucket-default :ref:`SSE-KMS <minio-encryption-sse-kms>`.
For production orchestrated environments, use the MinIO Kubernetes Operator to deploy a tenant with |SSE| enabled and configured for use with your |KMS|.
For production baremetal environments, see the `MinIO on Linux documentation <https://min.io/docs/minio/linux/operations/server-side-encryption.html>`__ for tutorials on configuring MinIO with KES and your |KMS|.
.. cond:: k8s
This procedure assumes you have access to a Kubernetes cluster with an active MinIO Operator installation.
For instructions on running KES, see the :kes-docs:`KES docs <tutorials/getting-started/>`.
As part of this procedure, you will:
#. Use the MinIO Operator Console to create or manage a MinIO Tenant.
#. Access the :guilabel:`Encryption` settings for that tenant and configure |SSE| using a :kes-docs:`supported Key Management System <#supported-kms-targets>`.
#. Create a new |EK| for use with |SSE|.
#. Configure automatic bucket-default :ref:`SSE-KMS <minio-encryption-sse-kms>`.
For production baremetal environments, see the `MinIO on Linux documentation <https://min.io/docs/minio/linux/operations/server-side-encryption.html>`__ for tutorials on configuring MinIO with KES and your |KMS|.
.. important::
.. include:: /includes/common/common-minio-kes.rst
@ -101,28 +70,55 @@ Server-Side Object Encryption with KES
Prerequisites
-------------
.. cond:: k8s
Access to MinIO Cluster
~~~~~~~~~~~~~~~~~~~~~~~
MinIO Kubernetes Operator
~~~~~~~~~~~~~~~~~~~~~~~~~
.. tab-set::
:class: hidden
.. include:: /includes/k8s/common-operator.rst
:start-after: start-requires-operator-plugin
:end-before: end-requires-operator-plugin
.. tab-item:: Kubernetes
:sync: k8s
See :ref:`deploy-operator-kubernetes` for complete documentation on deploying the MinIO Operator.
You must have access to the Kubernetes cluster, with administrative permissions associated to your ``kubectl`` configuration.
This procedure assumes your permission sets extends sufficiently to support deployment or modification of MinIO-associated resources on the Kubernetes cluster, including but not limited to pods, statefulsets, replicasets, deployments, and secrets.
.. tab-item:: Baremetal
:sync: baremetal
This procedure uses :mc:`mc` for performing operations on the MinIO cluster.
Install ``mc`` on a machine with network access to the cluster.
See the ``mc`` :ref:`Installation Quickstart <mc-install>` for instructions on downloading and installing ``mc``.
This procedure assumes a configured :mc:`alias <mc alias>` for the MinIO cluster.
.. _minio-sse-vault-prereq-vault:
Ensure KES Access to a Supported KMS Target
~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
.. cond:: linux or macos or windows or container
.. tab-set::
:class: hidden
.. tab-item:: Kubernetes
:sync: k8s
This procedure assumes an existing :kes-docs:`supported KMS installation <#supported-kms-targets>` accessible from the Kubernetes cluster.
- For deployments within the same Kubernetes cluster as the MinIO Tenant, you can use Kubernetes service names to allow the MinIO Tenant to establish connectivity to the target KMS service.
- For deployments external to the Kubernetes cluster, you must ensure the cluster supports routing communications between Kubernetes services and pods and the external network.
This may require configuration or deployment of additional Kubernetes network components and/or enabling access to the public internet.
Defer to the documentation for your chosen KMS solution for guidance on deployment and configuration.
.. tab-item:: Baremetal
:sync: baremetal
This procedure assumes an existing KES installation connected to a supported |KMS| installation accessible, both accessible from the local host.
Refer to the installation instructions for your :kes-docs:`supported KMS target <#supported-kms-targets>` to deploy KES and connect it to a KMS solution.
.. admonition:: KES Operations Require Unsealed Target
.. admonition:: KES Operations Require Unsealed Target
:class: important
Some supported |KMS| targets allow you to seal or unseal the vault instance.
@ -133,12 +129,6 @@ Ensure KES Access to a Supported KMS Target
See the documentation for your chosen |KMS| solution for more information on whether unsealing may be required.
.. cond:: k8s
.. include:: /includes/k8s/common-minio-kes.rst
:start-after: start-kes-prereq-hashicorp-vault-desc
:end-before: end-kes-prereq-hashicorp-vault-desc
Refer to the configuration instruction in the :kes-docs:`KES documentation <>` for your chosen supported |KMS|:
- :kes-docs:`AWS Secrets Manager <integrations/aws-secrets-manager/>`
@ -149,76 +139,25 @@ Refer to the configuration instruction in the :kes-docs:`KES documentation <>` f
- :kes-docs:`HashiCorp Vault <integrations/hashicorp-vault-keystore/>`
- :kes-docs:`Thales CipherTrust Manager (formerly Gemalto KeySecure) <integrations/thales-ciphertrust/>`
Procedure
---------
.. cond:: linux or macos or windows
This procedure provides instructions for configuring and enabling Server-Side Encryption using your selected `supported KMS solution <https://min.io/docs/kes/#supported-kms-targets>`__ in production environments.
Specifically, this procedure assumes the following:
Deploy or Ensure Access to a MinIO Deployment
~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
- An existing production-grade KMS target
- One or more KES servers connected to the KMS target
- One or more hosts for a new or existing MinIO deployment
.. include:: /includes/common/common-minio-kes.rst
:start-after: start-kes-new-existing-minio-deployment-desc
:end-before: end-kes-new-existing-minio-deployment-desc
.. tab-set::
:class: hidden
.. cond:: container
Install Podman or a Similar Container Management Interface
~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
.. include:: /includes/container/common-deploy.rst
:start-after: start-common-prereq-container-management-interface
:end-before: end-common-prereq-container-management-interface
.. The included file has the correct header structure.
There are slight divergences between platforms so this ends up being easier compared to cascading conditionals to handle little nitty-gritty differences.
.. |namespace| replace:: minio-kes-vault
.. cond:: container
.. |kescertpath| replace:: ~/minio-kes-vault/certs
.. |kesconfigpath| replace:: ~/minio-kes-vault/config
.. |kesconfigcertpath| replace:: /certs/
.. |miniocertpath| replace:: ~/minio-kes-vault/certs
.. |minioconfigpath| replace:: ~/minio-kes-vault/config
.. |miniodatapath| replace:: ~/minio-kes-vault/minio
.. include:: /includes/container/steps-configure-minio-kes-hashicorp.rst
.. cond:: linux
.. |kescertpath| replace:: /opt/kes/certs
.. |kesconfigpath| replace:: /opt/kes/config
.. |kesconfigcertpath| replace:: /opt/kes/certs/
.. |miniocertpath| replace:: /opt/minio/certs
.. |minioconfigpath| replace:: /opt/minio/config
.. |miniodatapath| replace:: ~/minio
.. include:: /includes/linux/steps-configure-minio-kes-hashicorp.rst
.. cond:: macos
.. |kescertpath| replace:: ~/minio-kes-vault/certs
.. |kesconfigpath| replace:: ~/minio-kes-vault/config
.. |kesconfigcertpath| replace:: ~/minio-kes-vault/certs
.. |miniocertpath| replace:: ~/minio-kes-vault/certs
.. |minioconfigpath| replace:: ~/minio-kes-vault/config
.. |miniodatapath| replace:: ~/minio-kes-vault/minio
.. include:: /includes/macos/steps-configure-minio-kes-hashicorp.rst
.. cond:: k8s
.. tab-item:: Kubernetes
:sync: k8s
.. include:: /includes/k8s/steps-configure-minio-kes-hashicorp.rst
.. cond:: windows
.. tab-item:: Baremetal
:sync: baremetal
.. |kescertpath| replace:: C:\\minio-kes-vault\\certs
.. |kesconfigpath| replace:: C:\\minio-kes-vault\\config
.. |kesconfigcertpath| replace:: C:\\minio-kes-vault\\certs\\
.. |miniocertpath| replace:: C:\\minio-kes-vault\\certs
.. |minioconfigpath| replace:: C:\\minio-kes-vault\\config
.. |miniodatapath| replace:: C:\\minio-kes-vault\\minio
.. include:: /includes/windows/steps-configure-minio-kes-hashicorp.rst
.. Procedure for K8s only, for adding KES to an existing Tenant
.. include:: /includes/linux/steps-configure-minio-kes-hashicorp.rst

View File

@ -0,0 +1,15 @@
===================
Baremetal Reference
===================
.. default-domain:: minio
This page acts as an index for MinIO Baremetal references.
.. toctree::
:titlesonly:
/reference/minio-mc
/reference/minio-mc-admin
/reference/minio-server/minio-server
/reference/minio-mc-deprecated

View File

@ -0,0 +1,15 @@
====================
Kubernetes Reference
====================
.. default-domain:: minio
This page acts as an index for MinIO-specific Kubernetes references.
.. toctree::
:titlesonly:
/reference/operator-crd
/reference/operator-environment-variables
/reference/operator-chart-values
/reference/tenant-chart-values

View File

@ -105,4 +105,4 @@ Binary Compression
:mc-cmd:`mc admin update` compresses the binary before sending to all nodes in the deployment.
This feature does not apply to :ref:`systemctl managed deployments <minio-upgrade-systemctl>`.
This feature does not apply to :ref:`systemctl managed deployments <minio-baremetal>`.

View File

@ -114,7 +114,7 @@ For a :ref:`third-party <minio-external-identity-management>` identity service s
View Policies from Group Membership
~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
Use :mc-cmd:`mc admin user info` with :std:option:`--json <mc.--json>` to view the policies inherited from a user's :ref:`group memberships <minio-groups>`:
Use :mc-cmd:`mc admin user info` with :option::`--json <mc.--json>` to view the policies inherited from a user's :ref:`group memberships <minio-groups>`:
.. code-block:: shell
:class: copyable

View File

@ -148,7 +148,7 @@ Use :mc:`mc ilm rule ls` to list a bucket's lifecycle management rules:
Show Policy Modification Time
~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
Use :mc:`mc ilm rule ls` with :std:option:`--json <mc.--json>` to show the time the policy for a bucket was last updated.
Use :mc:`mc ilm rule ls` with :option::`--json <mc.--json>` to show the time the policy for a bucket was last updated.
.. code-block:: shell
:class: copyable

View File

@ -137,6 +137,13 @@ Parameters
- :mc-cmd:`~mc stat --rewind`
- :mc-cmd:`~mc stat --recursive`
Global Flags
~~~~~~~~~~~~
.. include:: /includes/common-minio-mc.rst
:start-after: start-minio-mc-globals
:end-before: end-minio-mc-globals
Examples
--------

View File

@ -1,235 +0,0 @@
tag: linux
excludes:
- 'operations/install-deploy-manage/deploy-minio-tenant.rst'
- 'operations/install-deploy-manage/modify-minio-tenant.rst'
- 'operations/install-deploy-manage/deploy-operator-helm.rst'
- 'operations/install-deploy-manage/deploy-operator-kustomize.rst'
- 'operations/install-deploy-manage/expand-minio-tenant.rst'
- 'operations/install-deploy-manage/upgrade-minio-tenant.rst'
- 'operations/install-deploy-manage/upgrade-minio-operator.rst'
- 'operations/install-deploy-manage/delete-minio-tenant.rst'
- 'operations/install-deploy-manage/minio-operator-console.rst'
- 'operations/install-deploy-manage/deploy-minio-tenant-helm.rst'
- 'operations/install-deploy-manage/deploy-operator-kustomize.rst'
- 'operations/deploy-manage-tenants.rst'
- 'operations/cert-manager.rst'
- 'operations/cert-manager/cert-manager-operator.rst'
- 'operations/cert-manager/cert-manager-tenants.rst'
- 'developers/sts-for-operator.rst'
- 'reference/kubectl-minio-plugin.rst'
- 'reference/kubectl-minio-plugin/kubectl-minio-delete.rst'
- 'reference/kubectl-minio-plugin/kubectl-minio-init.rst'
- 'reference/kubectl-minio-plugin/kubectl-minio-proxy.rst'
- 'reference/kubectl-minio-plugin/kubectl-minio-tenant-create.rst'
- 'reference/kubectl-minio-plugin/kubectl-minio-tenant-delete.rst'
- 'reference/kubectl-minio-plugin/kubectl-minio-tenant-expand.rst'
- 'reference/kubectl-minio-plugin/kubectl-minio-tenant-info.rst'
- 'reference/kubectl-minio-plugin/kubectl-minio-tenant-list.rst'
- 'reference/kubectl-minio-plugin/kubectl-minio-tenant-report.rst'
- 'reference/kubectl-minio-plugin/kubectl-minio-tenant-upgrade.rst'
- 'reference/kubectl-minio-plugin/kubectl-minio-tenant.rst'
- 'reference/kubectl-minio-plugin/kubectl-minio-version.rst'
- 'reference/operator-crd.rst'
- 'reference/operator-chart-values.rst'
- 'reference/operator-environment-variables.rst'
- 'reference/tenant-chart-values.rst'
---
tag: macos
excludes:
- 'operations/install-deploy-manage/deploy-minio-multi-node-multi-drive.rst'
- 'operations/install-deploy-manage/expand-minio-deployment.rst'
- 'operations/install-deploy-manage/decommission-server-pool.rst'
- 'operations/install-deploy-manage/deploy-minio-tenant.rst'
- 'operations/install-deploy-manage/deploy-operator-helm.rst'
- 'operations/install-deploy-manage/deploy-operator-kustomize.rst'
- 'operations/install-deploy-manage/modify-minio-tenant.rst'
- 'operations/install-deploy-manage/expand-minio-tenant.rst'
- 'operations/install-deploy-manage/upgrade-minio-tenant.rst'
- 'operations/install-deploy-manage/upgrade-minio-operator.rst'
- 'operations/install-deploy-manage/delete-minio-tenant.rst'
- 'operations/install-deploy-manage/minio-operator-console.rst'
- 'operations/install-deploy-manage/deploy-minio-tenant-helm.rst'
- 'operations/install-deploy-manage/deploy-operator-kustomize.rst'
- 'operations/deploy-manage-tenants.rst'
- 'operations/cert-manager.rst'
- 'operations/cert-manager/cert-manager-operator.rst'
- 'operations/cert-manager/cert-manager-tenants.rst'
- 'reference/kubectl-minio-plugin*'
- 'reference/minio-server*'
- 'reference/minio-mc*'
- 'reference/deprecated/*'
- 'reference/operator-crd.rst'
- 'reference/operator-chart-values.rst'
- 'reference/operator-environment-variables.rst'
- 'reference/tenant-chart-values.rst'
- 'reference/s3-api-compatibility.rst'
- 'developers/*'
- 'integrations/*'
---
tag: windows
excludes:
- 'operations/install-deploy-manage/expand-minio-deployment.rst'
- 'operations/install-deploy-manage/upgrade-minio-deployment.rst'
- 'operations/install-deploy-manage/decommission-server-pool.rst'
- 'operations/install-deploy-manage/migrate-fs-gateway.rst'
- 'operations/manage-existing-deployments.rst'
- 'operations/install-deploy-manage/deploy-minio-single-node-multi-drive.rst'
- 'operations/install-deploy-manage/deploy-minio-multi-node-multi-drive.rst'
- 'operations/install-deploy-manage/deploy-operator-helm.rst'
- 'operations/install-deploy-manage/deploy-operator-kustomize.rst'
- 'operations/install-deploy-manage/multi-site-replication.rst'
- 'operations/install-deploy-manage/deploy-minio-tenant.rst'
- 'operations/install-deploy-manage/modify-minio-tenant.rst'
- 'operations/install-deploy-manage/expand-minio-tenant.rst'
- 'operations/install-deploy-manage/upgrade-minio-tenant.rst'
- 'operations/install-deploy-manage/upgrade-minio-operator.rst'
- 'operations/install-deploy-manage/delete-minio-tenant.rst'
- 'operations/install-deploy-manage/minio-operator-console.rst'
- 'operations/install-deploy-manage/deploy-minio-tenant-helm.rst'
- 'operations/install-deploy-manage/deploy-operator-kustomize.rst'
- 'operations/deploy-manage-tenants.rst'
- 'operations/cert-manager.rst'
- 'operations/cert-manager/cert-manager-operator.rst'
- 'operations/cert-manager/cert-manager-tenants.rst'
- 'reference/kubectl-minio-plugin*'
- 'reference/minio-server*'
- 'reference/minio-mc*'
- 'reference/deprecated/*'
- 'reference/operator-crd.rst'
- 'reference/operator-chart-values.rst'
- 'reference/operator-environment-variables.rst'
- 'reference/tenant-chart-values.rst'
- 'reference/s3-api-compatibility.rst'
- 'developers/*'
- 'integrations/*'
---
tag: container
excludes:
- 'operations/install-deploy-manage/deploy-minio-tenant.rst'
- 'operations/install-deploy-manage/deploy-operator-helm.rst'
- 'operations/install-deploy-manage/deploy-operator-kustomize.rst'
- 'operations/install-deploy-manage/modify-minio-tenant.rst'
- 'operations/install-deploy-manage/expand-minio-tenant.rst'
- 'operations/install-deploy-manage/upgrade-minio-tenant.rst'
- 'operations/install-deploy-manage/upgrade-minio-operator.rst'
- 'operations/install-deploy-manage/delete-minio-tenant.rst'
- 'operations/install-deploy-manage/minio-operator-console.rst'
- 'operations/install-deploy-manage/deploy-minio-multi-node-multi-drive.rst'
- 'operations/install-deploy-manage/multi-site-replication.rst'
- 'operations/install-deploy-manage/decommission-server-pool.rst'
- 'operations/install-deploy-manage/expand-minio-deployment.rst'
- 'operations/deploy-manage-tenants.rst'
- 'operations/cert-manager.rst'
- 'operations/cert-manager/cert-manager-operator.rst'
- 'operations/cert-manager/cert-manager-tenants.rst'
- 'operations/install-deploy-manage/deploy-minio-tenant-helm.rst'
- 'operations/install-deploy-manage/deploy-operator-kustomize.rst'
- 'reference/kubectl-minio-plugin*'
- 'reference/minio-server*'
- 'reference/minio-mc*'
- 'reference/deprecated/*'
- 'reference/s3-api-compatibility.rst'
- 'reference/operator-crd.rst'
- 'reference/operator-chart-values.rst'
- 'reference/operator-environment-variables.rst'
- 'reference/tenant-chart-values.rst'
- 'developers/*'
- 'integrations/*'
---
tag: k8s
excludes:
- 'operations/install-deploy-manage/deploy-minio-single-node-single-drive.rst'
- 'operations/install-deploy-manage/deploy-minio-single-node-multi-drive.rst'
- 'operations/install-deploy-manage/deploy-minio-multi-node-multi-drive.rst'
- 'operations/install-deploy-manage/upgrade-minio-deployment.rst'
- 'operations/install-deploy-manage/expand-minio-deployment.rst'
- 'operations/install-deploy-manage/decommission-server-pool.rst'
- 'operations/install-deploy-manage/migrate-fs-gateway.rst'
- 'operations/manage-existing-deployments.rst'
- 'reference/minio-server*'
- 'reference/minio-mc*'
- 'reference/deprecated/*'
- 'reference/s3-api-compatibility.rst'
- 'developers/dotnet/*'
- 'developers/go/*'
- 'developers/haskell/*'
- 'developers/java/*'
- 'developers/javascript/*'
- 'developers/python/*'
- 'developers/security-token-service/*'
- 'developers/minio-drivers.rst'
- 'developers/security-token-service.rst'
- 'developers/transforms-with-object-lambda.rst'
- 'integrations/*'
---
tag: openshift
excludes:
- 'operations/install-deploy-manage/deploy-minio-single-node-single-drive.rst'
- 'operations/install-deploy-manage/deploy-minio-single-node-multi-drive.rst'
- 'operations/install-deploy-manage/deploy-minio-multi-node-multi-drive.rst'
- 'operations/install-deploy-manage/deploy-operator-helm.rst'
- 'operations/install-deploy-manage/upgrade-minio-deployment.rst'
- 'operations/install-deploy-manage/expand-minio-deployment.rst'
- 'operations/install-deploy-manage/decommission-server-pool.rst'
- 'operations/install-deploy-manage/migrate-fs-gateway.rst'
- 'operations/manage-existing-deployments.rst'
- 'reference/minio-server*'
- 'reference/minio-mc*'
- 'reference/deprecated/*'
- 'reference/s3-api-compatibility.rst'
- 'developers/*'
- 'integrations/*'
---
tag: eks
excludes:
- 'operations/install-deploy-manage/deploy-minio-single-node-single-drive.rst'
- 'operations/install-deploy-manage/deploy-minio-single-node-multi-drive.rst'
- 'operations/install-deploy-manage/deploy-minio-multi-node-multi-drive.rst'
- 'operations/install-deploy-manage/deploy-operator-helm.rst'
- 'operations/install-deploy-manage/upgrade-minio-deployment.rst'
- 'operations/install-deploy-manage/expand-minio-deployment.rst'
- 'operations/install-deploy-manage/decommission-server-pool.rst'
- 'operations/install-deploy-manage/migrate-fs-gateway.rst'
- 'operations/manage-existing-deployments.rst'
- 'reference/minio-server*'
- 'reference/minio-mc*'
- 'reference/deprecated/*'
- 'reference/s3-api-compatibility.rst'
- 'developers/*'
- 'integrations/*'
---
tag: gke
excludes:
- 'operations/install-deploy-manage/deploy-minio-single-node-single-drive.rst'
- 'operations/install-deploy-manage/deploy-minio-single-node-multi-drive.rst'
- 'operations/install-deploy-manage/deploy-minio-multi-node-multi-drive.rst'
- 'operations/install-deploy-manage/deploy-operator-helm.rst'
- 'operations/install-deploy-manage/upgrade-minio-deployment.rst'
- 'operations/install-deploy-manage/expand-minio-deployment.rst'
- 'operations/install-deploy-manage/decommission-server-pool.rst'
- 'operations/install-deploy-manage/migrate-fs-gateway.rst'
- 'operations/manage-existing-deployments.rst'
- 'reference/minio-server*'
- 'reference/minio-mc*'
- 'reference/deprecated/*'
- 'reference/s3-api-compatibility.rst'
- 'developers/*'
- 'integrations/*'
---
tag: aks
excludes:
- 'operations/install-deploy-manage/deploy-minio-single-node-single-drive.rst'
- 'operations/install-deploy-manage/deploy-minio-single-node-multi-drive.rst'
- 'operations/install-deploy-manage/deploy-minio-multi-node-multi-drive.rst'
- 'operations/install-deploy-manage/deploy-operator-helm.rst'
- 'operations/install-deploy-manage/upgrade-minio-deployment.rst'
- 'operations/install-deploy-manage/expand-minio-deployment.rst'
- 'operations/install-deploy-manage/decommission-server-pool.rst'
- 'operations/install-deploy-manage/migrate-fs-gateway.rst'
- 'operations/manage-existing-deployments.rst'
- 'reference/minio-server*'
- 'reference/minio-mc*'
- 'reference/deprecated/*'
- 'reference/s3-api-compatibility.rst'
- 'developers/*'
- 'integrations/*'

17
stage.sh Executable file
View File

@ -0,0 +1,17 @@
#!/bin/bash
set -e
source staging.env
BRANCH=$(git rev-parse --symbolic-full-name --abbrev-ref HEAD)
function main() {
echo "Staging to $STAGEHOST:"
rsync --mkpath -rv --delete -e "ssh -i $SSHKEY -p $SSHPORT" build/$BRANCH/mindocs/* $STAGEUSER@$STAGEHOST:/var/www/html/$STAGEPROJECT/$BRANCH
echo "Staging complete"
echo "Staged to http://$STAGEHOST:$STAGEPORT/$STAGEPROJECT/$BRANCH/html/index.html"
}
main "$@"

6
staging.env Normal file
View File

@ -0,0 +1,6 @@
SSHPORT=22
STAGEUSER=docs
STAGEHOST=35.224.151.164
STAGEPORT=80
STAGEPROJECT=community-docs #use your repo name here
SSHKEY=~/.ssh/minio_docs_ed25519

69
sync-minio-version.sh Executable file
View File

@ -0,0 +1,69 @@
#!/bin/bash
set -e
function main() {
if test -f /tmp/downloads-minio.json; then
rm /tmp/downloads-minio.json
fi
curl --retry 10 -Ls https://min.io/assets/downloads-minio.json -o /tmp/downloads-minio.json
if test -f /tmp/downloads-minio.json; then
echo "Populated downloads-minio.json from latest, proceeding"
fi
# AMD64 arch
MINIOAMD64=$(cat /tmp/downloads-minio.json | jq '.Linux."MinIO Server".amd64.Binary.download')
DEB=$(cat /tmp/downloads-minio.json | jq '.Linux."MinIO Server".amd64.DEB.download')
RPM=$(cat /tmp/downloads-minio.json | jq '.Linux."MinIO Server".amd64.RPM.download')
# ARM64 arch
MINIOARM64=$(cat /tmp/downloads-minio.json | jq '.Linux."MinIO Server".arm64.Binary.download')
DEBARM64=$(cat /tmp/downloads-minio.json | jq '.Linux."MinIO Server".arm64.DEB.download')
RPMARM64=$(cat /tmp/downloads-minio.json | jq '.Linux."MinIO Server".arm64.RPM.download')
# ppc64le arch
MINIOPPC64LE=$(cat /tmp/downloads-minio.json | jq '.Linux."MinIO Server".ppc64le.Binary.download')
DEBPPC64LE=$(cat /tmp/downloads-minio.json | jq '.Linux."MinIO Server".ppc64le.DEB.download')
RPMPPC64LE=$(cat /tmp/downloads-minio.json | jq '.Linux."MinIO Server".ppc64le.RPM.download')
MINIO=$(curl --retry 10 -Ls -o /dev/null -w "%{url_effective}" https://github.com/minio/minio/releases/latest | sed "s/https:\/\/github.com\/minio\/minio\/releases\/tag\///")
kname=$(uname -s)
case "${kname}" in \
"Darwin") \
sed -i "" "s|MINIOLATEST|${MINIO}|g" source/conf.py; \
sed -i "" "s|DEBURL|${DEB}|g" source/conf.py; \
sed -i "" "s|RPMURL|${RPM}|g" source/conf.py; \
sed -i "" "s|MINIOURL|${MINIOAMD64}|g" source/conf.py; \
sed -i "" "s|DEBARM64URL|${DEBARM64}|g" source/conf.py; \
sed -i "" "s|RPMARM64URL|${RPMARM64}|g" source/conf.py; \
sed -i "" "s|MINIOARM64URL|${MINIOARM64}|g" source/conf.py; \
sed -i "" "s|DEBPPC64LEURL|${DEBPPC64LE}|g" source/conf.py; \
sed -i "" "s|RPMPPC64LEURL|${RPMPPC64LE}|g" source/conf.py; \
sed -i "" "s|MINIOPPC64LEURL|${MINIOPPC64LE}|g" source/conf.py; \
;; \
*) \
sed -i "s|MINIOLATEST|${MINIO}|g" source/conf.py; \
sed -i "s|DEBURL|${DEB}|g" source/conf.py; \
sed -i "s|RPMURL|${RPM}|g" source/conf.py; \
sed -i "s|MINIOURL|${MINIOAMD64}|g" source/conf.py; \
sed -i "s|DEBARM64URL|${DEBARM64}|g" source/conf.py; \
sed -i "s|RPMARM64URL|${RPMARM64}|g" source/conf.py; \
sed -i "s|MINIOARM64URL|${MINIOARM64}|g" source/conf.py; \
sed -i "s|DEBPPC64LEURL|${DEBPPC64LE}|g" source/conf.py; \
sed -i "s|RPMPPC64LEURL|${RPMPPC64LE}|g" source/conf.py; \
sed -i "s|MINIOPPC64LEURL|${MINIOPPC64LE}|g" source/conf.py; \
;; \
esac
}
main