Ceilometer Configuration Options

Ceilometer Sample Configuration File

Configure Ceilometer by editing /etc/ceilometer/ceilometer.conf.

No config file is provided with the source code, it will be created during the installation. In case where no configuration file was installed, one can be easily created by running:

oslo-config-generator \
    --config-file=/etc/ceilometer/ceilometer-config-generator.conf \
    --output-file=/etc/ceilometer/ceilometer.conf

The following is a sample Ceilometer configuration for adaptation and use. It is auto-generated from Ceilometer when this documentation is built, and can also be viewed in file form.

[DEFAULT]

#
# From ceilometer
#

# Inspector to use for inspecting the hypervisor layer. Known inspectors are
# libvirt, hyperv, vsphere and xenapi. (string value)
#hypervisor_inspector = libvirt

# Libvirt domain type. (string value)
# Possible values:
# kvm - <No description provided>
# lxc - <No description provided>
# qemu - <No description provided>
# uml - <No description provided>
# xen - <No description provided>
#libvirt_type = kvm

# Override the default libvirt URI (which is dependent on libvirt_type).
# (string value)
#libvirt_uri =

# Swift reseller prefix. Must be on par with reseller_prefix in proxy-
# server.conf. (string value)
#reseller_prefix = AUTH_

# Configuration file for pipeline definition. (string value)
#pipeline_cfg_file = pipeline.yaml

# Configuration file for event pipeline definition. (string value)
#event_pipeline_cfg_file = event_pipeline.yaml

# Configuration file for polling definition. (string value)
#cfg_file = polling.yaml

# Work-load partitioning group prefix. Use only if you want to run multiple
# polling agents with different config files. For each sub-group of the agent
# pool with the same partitioning_group_prefix a disjoint subset of pollsters
# should be loaded. (string value)
#partitioning_group_prefix = <None>

# Batch size of samples to send to notification agent, Set to 0 to disable
# (integer value)
#batch_size = 50

# List of directories with YAML files used to created pollsters. (multi valued)
#pollsters_definitions_dirs = /etc/ceilometer/pollsters.d

# Source for samples emitted on this instance. (string value)
#sample_source = openstack

# List of metadata prefixes reserved for metering use. (list value)
#reserved_metadata_namespace = metering.

# Limit on length of reserved metadata values. (integer value)
#reserved_metadata_length = 256

# List of metadata keys reserved for metering use. And these keys are
# additional to the ones included in the namespace. (list value)
#reserved_metadata_keys =

# Path to the rootwrap configuration file to use for running commands as root
# (string value)
#rootwrap_config = /etc/ceilometer/rootwrap.conf

# Name of this node, which must be valid in an AMQP key. Can be an opaque
# identifier. For ZeroMQ only, must be a valid host name, FQDN, or IP address.
# (host address value)
#
# This option has a sample default set, which means that
# its actual default value may vary from the one documented
# below.
#host = <your_hostname>

# Timeout seconds for HTTP requests. Set it to None to disable timeout.
# (integer value)
#http_timeout = 600

# Maximum number of parallel requests for services to handle at the same time.
# (integer value)
# Minimum value: 1
#max_parallel_requests = 64

#
# From oslo.log
#

# If set to true, the logging level will be set to DEBUG instead of the default
# INFO level. (boolean value)
# Note: This option can be changed without restarting.
#debug = false

# The name of a logging configuration file. This file is appended to any
# existing logging configuration files. For details about logging configuration
# files, see the Python logging module documentation. Note that when logging
# configuration files are used then all logging configuration is set in the
# configuration file and other logging configuration options are ignored (for
# example, log-date-format). (string value)
# Note: This option can be changed without restarting.
# Deprecated group/name - [DEFAULT]/log_config
#log_config_append = <None>

# Defines the format string for %%(asctime)s in log records. Default:
# %(default)s . This option is ignored if log_config_append is set. (string
# value)
#log_date_format = %Y-%m-%d %H:%M:%S

# (Optional) Name of log file to send logging output to. If no default is set,
# logging will go to stderr as defined by use_stderr. This option is ignored if
# log_config_append is set. (string value)
# Deprecated group/name - [DEFAULT]/logfile
#log_file = <None>

# (Optional) The base directory used for relative log_file  paths. This option
# is ignored if log_config_append is set. (string value)
# Deprecated group/name - [DEFAULT]/logdir
#log_dir = <None>

# Uses logging handler designed to watch file system. When log file is moved or
# removed this handler will open a new log file with specified path
# instantaneously. It makes sense only if log_file option is specified and
# Linux platform is used. This option is ignored if log_config_append is set.
# (boolean value)
#watch_log_file = false

# Use syslog for logging. Existing syslog format is DEPRECATED and will be
# changed later to honor RFC5424. This option is ignored if log_config_append
# is set. (boolean value)
#use_syslog = false

# Enable journald for logging. If running in a systemd environment you may wish
# to enable journal support. Doing so will use the journal native protocol
# which includes structured metadata in addition to log messages.This option is
# ignored if log_config_append is set. (boolean value)
#use_journal = false

# Syslog facility to receive log lines. This option is ignored if
# log_config_append is set. (string value)
#syslog_log_facility = LOG_USER

# Use JSON formatting for logging. This option is ignored if log_config_append
# is set. (boolean value)
#use_json = false

# Log output to standard error. This option is ignored if log_config_append is
# set. (boolean value)
#use_stderr = false

# Log output to Windows Event Log. (boolean value)
#use_eventlog = false

# The amount of time before the log files are rotated. This option is ignored
# unless log_rotation_type is set to "interval". (integer value)
#log_rotate_interval = 1

# Rotation interval type. The time of the last file change (or the time when
# the service was started) is used when scheduling the next rotation. (string
# value)
# Possible values:
# Seconds - <No description provided>
# Minutes - <No description provided>
# Hours - <No description provided>
# Days - <No description provided>
# Weekday - <No description provided>
# Midnight - <No description provided>
#log_rotate_interval_type = days

# Maximum number of rotated log files. (integer value)
#max_logfile_count = 30

# Log file maximum size in MB. This option is ignored if "log_rotation_type" is
# not set to "size". (integer value)
#max_logfile_size_mb = 200

# Log rotation type. (string value)
# Possible values:
# interval - Rotate logs at predefined time intervals.
# size - Rotate logs once they reach a predefined size.
# none - Do not rotate log files.
#log_rotation_type = none

# Format string to use for log messages with context. Used by
# oslo_log.formatters.ContextFormatter (string value)
#logging_context_format_string = %(asctime)s.%(msecs)03d %(process)d %(levelname)s %(name)s [%(global_request_id)s %(request_id)s %(user_identity)s] %(instance)s%(message)s

# Format string to use for log messages when context is undefined. Used by
# oslo_log.formatters.ContextFormatter (string value)
#logging_default_format_string = %(asctime)s.%(msecs)03d %(process)d %(levelname)s %(name)s [-] %(instance)s%(message)s

# Additional data to append to log message when logging level for the message
# is DEBUG. Used by oslo_log.formatters.ContextFormatter (string value)
#logging_debug_format_suffix = %(funcName)s %(pathname)s:%(lineno)d

# Prefix each line of exception output with this format. Used by
# oslo_log.formatters.ContextFormatter (string value)
#logging_exception_prefix = %(asctime)s.%(msecs)03d %(process)d ERROR %(name)s %(instance)s

# Defines the format string for %(user_identity)s that is used in
# logging_context_format_string. Used by oslo_log.formatters.ContextFormatter
# (string value)
#logging_user_identity_format = %(user)s %(project)s %(domain)s %(system_scope)s %(user_domain)s %(project_domain)s

# List of package logging levels in logger=LEVEL pairs. This option is ignored
# if log_config_append is set. (list value)
#default_log_levels = amqp=WARN,amqplib=WARN,boto=WARN,qpid=WARN,sqlalchemy=WARN,suds=INFO,oslo.messaging=INFO,oslo_messaging=INFO,iso8601=WARN,requests.packages.urllib3.connectionpool=WARN,urllib3.connectionpool=WARN,websocket=WARN,requests.packages.urllib3.util.retry=WARN,urllib3.util.retry=WARN,keystonemiddleware=WARN,routes.middleware=WARN,stevedore=WARN,taskflow=WARN,keystoneauth=WARN,oslo.cache=INFO,oslo_policy=INFO,dogpile.core.dogpile=INFO

# Enables or disables publication of error events. (boolean value)
#publish_errors = false

# The format for an instance that is passed with the log message. (string
# value)
#instance_format = "[instance: %(uuid)s] "

# The format for an instance UUID that is passed with the log message. (string
# value)
#instance_uuid_format = "[instance: %(uuid)s] "

# Interval, number of seconds, of log rate limiting. (integer value)
#rate_limit_interval = 0

# Maximum number of logged messages per rate_limit_interval. (integer value)
#rate_limit_burst = 0

# Log level name used by rate limiting: CRITICAL, ERROR, INFO, WARNING, DEBUG
# or empty string. Logs with level greater or equal to rate_limit_except_level
# are not filtered. An empty string means that all levels are filtered. (string
# value)
#rate_limit_except_level = CRITICAL

# Enables or disables fatal status of deprecations. (boolean value)
#fatal_deprecations = false

#
# From oslo.messaging
#

# Size of RPC connection pool. (integer value)
# Minimum value: 1
#rpc_conn_pool_size = 30

# The pool size limit for connections expiration policy (integer value)
#conn_pool_min_size = 2

# The time-to-live in sec of idle connections in the pool (integer value)
#conn_pool_ttl = 1200

# Size of executor thread pool when executor is threading or eventlet. (integer
# value)
# Deprecated group/name - [DEFAULT]/rpc_thread_pool_size
#executor_thread_pool_size = 64

# Seconds to wait for a response from a call. (integer value)
#rpc_response_timeout = 60

# The network address and optional user credentials for connecting to the
# messaging backend, in URL format. The expected format is:
#
# driver://[user:pass@]host:port[,[userN:passN@]hostN:portN]/virtual_host?query
#
# Example: rabbit://rabbitmq:password@127.0.0.1:5672//
#
# For full details on the fields in the URL see the documentation of
# oslo_messaging.TransportURL at
# https://docs.openstack.org/oslo.messaging/latest/reference/transport.html
# (string value)
#transport_url = rabbit://

# The default exchange under which topics are scoped. May be overridden by an
# exchange name specified in the transport_url option. (string value)
#control_exchange = openstack

# Add an endpoint to answer to ping calls. Endpoint is named
# oslo_rpc_server_ping (boolean value)
#rpc_ping_enabled = false

#
# From oslo.service.service
#

# Enable eventlet backdoor.  Acceptable values are 0, <port>, and
# <start>:<end>, where 0 results in listening on a random tcp port number;
# <port> results in listening on the specified port number (and not enabling
# backdoor if that port is in use); and <start>:<end> results in listening on
# the smallest unused port number within the specified range of port numbers.
# The chosen port is displayed in the service's log file. (string value)
#backdoor_port = <None>

# Enable eventlet backdoor, using the provided path as a unix socket that can
# receive connections. This option is mutually exclusive with 'backdoor_port'
# in that only one should be provided. If both are provided then the existence
# of this option overrides the usage of that option. Inside the path {pid} will
# be replaced with the PID of the current process. (string value)
#backdoor_socket = <None>

# Enables or disables logging values of all registered options when starting a
# service (at DEBUG level). (boolean value)
#log_options = true

# Specify a timeout after which a gracefully shutdown server will exit. Zero
# value means endless wait. (integer value)
#graceful_shutdown_timeout = 60


[compute]

#
# From ceilometer
#

# Ceilometer offers many methods to discover the instance running on a compute
# node:
# * naive: poll nova to get all instances
# * workload_partitioning: poll nova to get instances of the compute
# * libvirt_metadata: get instances from libvirt metadata   but without
# instance metadata (recommended for Gnocchi   backend (string value)
# Possible values:
# naive - <No description provided>
# workload_partitioning - <No description provided>
# libvirt_metadata - <No description provided>
#instance_discovery_method = libvirt_metadata

# New instances will be discovered periodically based on this option (in
# seconds). By default, the agent discovers instances according to pipeline
# polling interval. If option is greater than 0, the instance list to poll will
# be updated based on this option's interval. Measurements relating to the
# instances will match intervals defined in pipeline. This option is only used
# for agent polling to Nova API, so it will work only when
# 'instance_discovery_method' is set to 'naive'. (integer value)
# Minimum value: 0
#resource_update_interval = 0

# The expiry to totally refresh the instances resource cache, since the
# instance may be migrated to another host, we need to clean the legacy
# instances info in local cache by totally refreshing the local cache. The
# minimum should be the value of the config option of resource_update_interval.
# This option is only used for agent polling to Nova API, so it will work only
# when 'instance_discovery_method' is set to 'naive'. (integer value)
# Minimum value: 0
#resource_cache_expiry = 3600


[coordination]

#
# From ceilometer
#

# The backend URL to use for distributed coordination. If left empty, per-
# deployment central agent and per-host compute agent won't do workload
# partitioning and will only function correctly if a single instance of that
# service is running. (string value)
#backend_url = <None>

# Number of seconds between checks to see if group membership has changed
# (floating point value)
#check_watchers = 10.0


[event]

#
# From ceilometer
#

# Configuration file for event definitions. (string value)
#definitions_cfg_file = event_definitions.yaml

# Drop notifications if no event definition matches. (Otherwise, we convert
# them with just the default traits) (boolean value)
#drop_unmatched_notifications = false

# Store the raw notification for select priority levels (info and/or error). By
# default, raw details are not captured. (multi valued)
#store_raw =


[hardware]

#
# From ceilometer
#

# URL scheme to use for hardware nodes. (string value)
#url_scheme = snmp://

# SNMPd user name of all nodes running in the cloud. (string value)
#readonly_user_name = ro_snmp_user

# SNMPd v3 authentication password of all the nodes running in the cloud.
# (string value)
#readonly_user_password = password

# SNMPd v3 authentication algorithm of all the nodes running in the cloud
# (string value)
# Possible values:
# md5 - <No description provided>
# sha - <No description provided>
#readonly_user_auth_proto = <None>

# SNMPd v3 encryption algorithm of all the nodes running in the cloud (string
# value)
# Possible values:
# des - <No description provided>
# aes128 - <No description provided>
# 3des - <No description provided>
# aes192 - <No description provided>
# aes256 - <No description provided>
#readonly_user_priv_proto = <None>

# SNMPd v3 encryption password of all the nodes running in the cloud. (string
# value)
#readonly_user_priv_password = <None>

# Name of the control plane Tripleo network (string value)
#tripleo_network_name = ctlplane

# Configuration file for defining hardware snmp meters. (string value)
#meter_definitions_file = snmp.yaml


[ipmi]

#
# From ceilometer
#

# Number of retries upon Intel Node Manager initialization failure (integer
# value)
#node_manager_init_retry = 3

# Tolerance of IPMI/NM polling failures before disable this pollster. Negative
# indicates retrying forever. (integer value)
#polling_retry = 3


[meter]

#
# From ceilometer
#

# List directory to find files of defining meter notifications. (multi valued)
#meter_definitions_dirs = /etc/ceilometer/meters.d
#meter_definitions_dirs = /home/zuul/src/opendev.org/openstack/ceilometer/ceilometer/data/meters.d


[monasca]

#
# From ceilometer
#

# Version of Monasca client to use while publishing. (string value)
#clientapi_version = 2_0

# Enable paging through monasca api resultset. (boolean value)
#enable_api_pagination = false

# Monasca static and dynamic field mappings (string value)
#monasca_mappings = /etc/ceilometer/monasca_field_definitions.yaml

# The name of control plane (string value)
#control_plane = None

# The name of cluster (string value)
#cluster = None

# The name of cloud (string value)
#cloud_name = None

# Indicates whether samples are published in a batch. (boolean value)
#batch_mode = true

# Maximum number of samples in a batch. (integer value)
#batch_count = 1000

# Maximum time interval(seconds) after which samples are published in a batch.
# (integer value)
#batch_timeout = 15

# Frequency of checking if batch criteria is met. (integer value)
#batch_polling_interval = 5

# Indicates whether publisher retries publishing sample in case of failure.
# Only a few error cases are queued for a retry. (boolean value)
#retry_on_failure = false

# Maximum number of retry attempts on a publishing failure to Monasca API.
# (integer value)
#batch_max_retries = 3

# When turned on, archives metrics in file system when publish to Monasca fails
# or metric publish maxes out retry attempts. (boolean value)
#archive_on_failure = false

# File of metrics that failed to publish to Monasca. These include metrics that
# failed to publish on first attempt and failed metrics that maxed out their
# retries. (string value)
#archive_path = mon_pub_failures.txt

# Maximum number of retry attempts of connecting to Monasca API. (integer
# value)
#client_max_retries = 3

# Frequency of attempting a retry connecting to Monasca API. (integer value)
#client_retry_interval = 60


[notification]

#
# From ceilometer
#

# Acknowledge message when event persistence fails. (boolean value)
#ack_on_event_error = true

# Messaging URLs to listen for notifications. Example:
# rabbit://user:pass@host1:port1[,user:pass@hostN:portN]/virtual_host
# (DEFAULT/transport_url is used if empty). This is useful when you have
# dedicate messaging nodes for each service, for example, all nova
# notifications go to rabbit-nova:5672, while all cinder notifications go to
# rabbit-cinder:5672. (multi valued)
#messaging_urls =

# Number of notification messages to wait before publishing them. Batching is
# advised when transformations are applied in pipeline. (integer value)
# Minimum value: 1
#batch_size = 100

# Number of workers for notification service, default value is 1. (integer
# value)
# Minimum value: 1
# Deprecated group/name - [DEFAULT]/notification_workers
#workers = 1

# Select which pipeline managers to enable to  generate data (multi valued)
#pipelines = meter
#pipelines = event

# Exchanges name to listen for notifications. (multi valued)
# Deprecated group/name - [DEFAULT]/http_control_exchanges
#notification_control_exchanges = nova
#notification_control_exchanges = glance
#notification_control_exchanges = neutron
#notification_control_exchanges = cinder
#notification_control_exchanges = heat
#notification_control_exchanges = keystone
#notification_control_exchanges = sahara
#notification_control_exchanges = trove
#notification_control_exchanges = zaqar
#notification_control_exchanges = swift
#notification_control_exchanges = ceilometer
#notification_control_exchanges = magnum
#notification_control_exchanges = dns
#notification_control_exchanges = ironic
#notification_control_exchanges = aodh


[oslo_concurrency]

#
# From oslo.concurrency
#

# Enables or disables inter-process locks. (boolean value)
#disable_process_locking = false

# Directory to use for lock files.  For security, the specified directory
# should only be writable by the user running the processes that need locking.
# Defaults to environment variable OSLO_LOCK_PATH. If external locks are used,
# a lock path must be set. (string value)
#lock_path = <None>


[oslo_messaging_amqp]

#
# From oslo.messaging
#

# Name for the AMQP container. must be globally unique. Defaults to a generated
# UUID (string value)
#container_name = <None>

# Timeout for inactive connections (in seconds) (integer value)
#idle_timeout = 0

# Debug: dump AMQP frames to stdout (boolean value)
#trace = false

# Attempt to connect via SSL. If no other ssl-related parameters are given, it
# will use the system's CA-bundle to verify the server's certificate. (boolean
# value)
#ssl = false

# CA certificate PEM file used to verify the server's certificate (string
# value)
#ssl_ca_file =

# Self-identifying certificate PEM file for client authentication (string
# value)
#ssl_cert_file =

# Private key PEM file used to sign ssl_cert_file certificate (optional)
# (string value)
#ssl_key_file =

# Password for decrypting ssl_key_file (if encrypted) (string value)
#ssl_key_password = <None>

# By default SSL checks that the name in the server's certificate matches the
# hostname in the transport_url. In some configurations it may be preferable to
# use the virtual hostname instead, for example if the server uses the Server
# Name Indication TLS extension (rfc6066) to provide a certificate per virtual
# host. Set ssl_verify_vhost to True if the server's SSL certificate uses the
# virtual host name instead of the DNS name. (boolean value)
#ssl_verify_vhost = false

# Space separated list of acceptable SASL mechanisms (string value)
#sasl_mechanisms =

# Path to directory that contains the SASL configuration (string value)
#sasl_config_dir =

# Name of configuration file (without .conf suffix) (string value)
#sasl_config_name =

# SASL realm to use if no realm present in username (string value)
#sasl_default_realm =

# Seconds to pause before attempting to re-connect. (integer value)
# Minimum value: 1
#connection_retry_interval = 1

# Increase the connection_retry_interval by this many seconds after each
# unsuccessful failover attempt. (integer value)
# Minimum value: 0
#connection_retry_backoff = 2

# Maximum limit for connection_retry_interval + connection_retry_backoff
# (integer value)
# Minimum value: 1
#connection_retry_interval_max = 30

# Time to pause between re-connecting an AMQP 1.0 link that failed due to a
# recoverable error. (integer value)
# Minimum value: 1
#link_retry_delay = 10

# The maximum number of attempts to re-send a reply message which failed due to
# a recoverable error. (integer value)
# Minimum value: -1
#default_reply_retry = 0

# The deadline for an rpc reply message delivery. (integer value)
# Minimum value: 5
#default_reply_timeout = 30

# The deadline for an rpc cast or call message delivery. Only used when caller
# does not provide a timeout expiry. (integer value)
# Minimum value: 5
#default_send_timeout = 30

# The deadline for a sent notification message delivery. Only used when caller
# does not provide a timeout expiry. (integer value)
# Minimum value: 5
#default_notify_timeout = 30

# The duration to schedule a purge of idle sender links. Detach link after
# expiry. (integer value)
# Minimum value: 1
#default_sender_link_timeout = 600

# Indicates the addressing mode used by the driver.
# Permitted values:
# 'legacy'   - use legacy non-routable addressing
# 'routable' - use routable addresses
# 'dynamic'  - use legacy addresses if the message bus does not support routing
# otherwise use routable addressing (string value)
#addressing_mode = dynamic

# Enable virtual host support for those message buses that do not natively
# support virtual hosting (such as qpidd). When set to true the virtual host
# name will be added to all message bus addresses, effectively creating a
# private 'subnet' per virtual host. Set to False if the message bus supports
# virtual hosting using the 'hostname' field in the AMQP 1.0 Open performative
# as the name of the virtual host. (boolean value)
#pseudo_vhost = true

# address prefix used when sending to a specific server (string value)
#server_request_prefix = exclusive

# address prefix used when broadcasting to all servers (string value)
#broadcast_prefix = broadcast

# address prefix when sending to any server in group (string value)
#group_request_prefix = unicast

# Address prefix for all generated RPC addresses (string value)
#rpc_address_prefix = openstack.org/om/rpc

# Address prefix for all generated Notification addresses (string value)
#notify_address_prefix = openstack.org/om/notify

# Appended to the address prefix when sending a fanout message. Used by the
# message bus to identify fanout messages. (string value)
#multicast_address = multicast

# Appended to the address prefix when sending to a particular RPC/Notification
# server. Used by the message bus to identify messages sent to a single
# destination. (string value)
#unicast_address = unicast

# Appended to the address prefix when sending to a group of consumers. Used by
# the message bus to identify messages that should be delivered in a round-
# robin fashion across consumers. (string value)
#anycast_address = anycast

# Exchange name used in notification addresses.
# Exchange name resolution precedence:
# Target.exchange if set
# else default_notification_exchange if set
# else control_exchange if set
# else 'notify' (string value)
#default_notification_exchange = <None>

# Exchange name used in RPC addresses.
# Exchange name resolution precedence:
# Target.exchange if set
# else default_rpc_exchange if set
# else control_exchange if set
# else 'rpc' (string value)
#default_rpc_exchange = <None>

# Window size for incoming RPC Reply messages. (integer value)
# Minimum value: 1
#reply_link_credit = 200

# Window size for incoming RPC Request messages (integer value)
# Minimum value: 1
#rpc_server_credit = 100

# Window size for incoming Notification messages (integer value)
# Minimum value: 1
#notify_server_credit = 100

# Send messages of this type pre-settled.
# Pre-settled messages will not receive acknowledgement
# from the peer. Note well: pre-settled messages may be
# silently discarded if the delivery fails.
# Permitted values:
# 'rpc-call' - send RPC Calls pre-settled
# 'rpc-reply'- send RPC Replies pre-settled
# 'rpc-cast' - Send RPC Casts pre-settled
# 'notify'   - Send Notifications pre-settled
#  (multi valued)
#pre_settled = rpc-cast
#pre_settled = rpc-reply


[oslo_messaging_kafka]

#
# From oslo.messaging
#

# Max fetch bytes of Kafka consumer (integer value)
#kafka_max_fetch_bytes = 1048576

# Default timeout(s) for Kafka consumers (floating point value)
#kafka_consumer_timeout = 1.0

# DEPRECATED: Pool Size for Kafka Consumers (integer value)
# This option is deprecated for removal.
# Its value may be silently ignored in the future.
# Reason: Driver no longer uses connection pool.
#pool_size = 10

# DEPRECATED: The pool size limit for connections expiration policy (integer
# value)
# This option is deprecated for removal.
# Its value may be silently ignored in the future.
# Reason: Driver no longer uses connection pool.
#conn_pool_min_size = 2

# DEPRECATED: The time-to-live in sec of idle connections in the pool (integer
# value)
# This option is deprecated for removal.
# Its value may be silently ignored in the future.
# Reason: Driver no longer uses connection pool.
#conn_pool_ttl = 1200

# Group id for Kafka consumer. Consumers in one group will coordinate message
# consumption (string value)
#consumer_group = oslo_messaging_consumer

# Upper bound on the delay for KafkaProducer batching in seconds (floating
# point value)
#producer_batch_timeout = 0.0

# Size of batch for the producer async send (integer value)
#producer_batch_size = 16384

# The compression codec for all data generated by the producer. If not set,
# compression will not be used. Note that the allowed values of this depend on
# the kafka version (string value)
# Possible values:
# none - <No description provided>
# gzip - <No description provided>
# snappy - <No description provided>
# lz4 - <No description provided>
# zstd - <No description provided>
#compression_codec = none

# Enable asynchronous consumer commits (boolean value)
#enable_auto_commit = false

# The maximum number of records returned in a poll call (integer value)
#max_poll_records = 500

# Protocol used to communicate with brokers (string value)
# Possible values:
# PLAINTEXT - <No description provided>
# SASL_PLAINTEXT - <No description provided>
# SSL - <No description provided>
# SASL_SSL - <No description provided>
#security_protocol = PLAINTEXT

# Mechanism when security protocol is SASL (string value)
#sasl_mechanism = PLAIN

# CA certificate PEM file used to verify the server certificate (string value)
#ssl_cafile =

# Client certificate PEM file used for authentication. (string value)
#ssl_client_cert_file =

# Client key PEM file used for authentication. (string value)
#ssl_client_key_file =

# Client key password file used for authentication. (string value)
#ssl_client_key_password =


[oslo_messaging_notifications]

#
# From oslo.messaging
#

# The Drivers(s) to handle sending notifications. Possible values are
# messaging, messagingv2, routing, log, test, noop (multi valued)
# Deprecated group/name - [DEFAULT]/notification_driver
#driver =

# A URL representing the messaging driver to use for notifications. If not set,
# we fall back to the same configuration used for RPC. (string value)
# Deprecated group/name - [DEFAULT]/notification_transport_url
#transport_url = <None>

# AMQP topic used for OpenStack notifications. (list value)
# Deprecated group/name - [rpc_notifier2]/topics
# Deprecated group/name - [DEFAULT]/notification_topics
#topics = notifications

# The maximum number of attempts to re-send a notification message which failed
# to be delivered due to a recoverable error. 0 - No retry, -1 - indefinite
# (integer value)
#retry = -1


[oslo_messaging_rabbit]

#
# From oslo.messaging
#

# Use durable queues in AMQP. If rabbit_quorum_queue is enabled, queues will be
# durable and this value will be ignored. (boolean value)
#amqp_durable_queues = false

# Auto-delete queues in AMQP. (boolean value)
#amqp_auto_delete = false

# Connect over SSL. (boolean value)
# Deprecated group/name - [oslo_messaging_rabbit]/rabbit_use_ssl
#ssl = false

# SSL version to use (valid only if SSL enabled). Valid values are TLSv1 and
# SSLv23. SSLv2, SSLv3, TLSv1_1, and TLSv1_2 may be available on some
# distributions. (string value)
# Deprecated group/name - [oslo_messaging_rabbit]/kombu_ssl_version
#ssl_version =

# SSL key file (valid only if SSL enabled). (string value)
# Deprecated group/name - [oslo_messaging_rabbit]/kombu_ssl_keyfile
#ssl_key_file =

# SSL cert file (valid only if SSL enabled). (string value)
# Deprecated group/name - [oslo_messaging_rabbit]/kombu_ssl_certfile
#ssl_cert_file =

# SSL certification authority file (valid only if SSL enabled). (string value)
# Deprecated group/name - [oslo_messaging_rabbit]/kombu_ssl_ca_certs
#ssl_ca_file =

# Global toggle for enforcing the OpenSSL FIPS mode. This feature requires
# Python support. This is available in Python 3.9 in all environments and may
# have been backported to older Python versions on select environments. If the
# Python executable used does not support OpenSSL FIPS mode, an exception will
# be raised. (boolean value)
#ssl_enforce_fips_mode = false

# Run the health check heartbeat thread through a native python thread by
# default. If this option is equal to False then the health check heartbeat
# will inherit the execution model from the parent process. For example if the
# parent process has monkey patched the stdlib by using eventlet/greenlet then
# the heartbeat will be run through a green thread. This option should be set
# to True only for the wsgi services. (boolean value)
#heartbeat_in_pthread = false

# How long to wait (in seconds) before reconnecting in response to an AMQP
# consumer cancel notification. (floating point value)
# Minimum value: 0.0
# Maximum value: 4.5
#kombu_reconnect_delay = 1.0

# EXPERIMENTAL: Possible values are: gzip, bz2. If not set compression will not
# be used. This option may not be available in future versions. (string value)
#kombu_compression = <None>

# How long to wait a missing client before abandoning to send it its replies.
# This value should not be longer than rpc_response_timeout. (integer value)
# Deprecated group/name - [oslo_messaging_rabbit]/kombu_reconnect_timeout
#kombu_missing_consumer_retry_timeout = 60

# Determines how the next RabbitMQ node is chosen in case the one we are
# currently connected to becomes unavailable. Takes effect only if more than
# one RabbitMQ node is provided in config. (string value)
# Possible values:
# round-robin - <No description provided>
# shuffle - <No description provided>
#kombu_failover_strategy = round-robin

# The RabbitMQ login method. (string value)
# Possible values:
# PLAIN - <No description provided>
# AMQPLAIN - <No description provided>
# EXTERNAL - <No description provided>
# RABBIT-CR-DEMO - <No description provided>
#rabbit_login_method = AMQPLAIN

# How frequently to retry connecting with RabbitMQ. (integer value)
#rabbit_retry_interval = 1

# How long to backoff for between retries when connecting to RabbitMQ. (integer
# value)
#rabbit_retry_backoff = 2

# Maximum interval of RabbitMQ connection retries. Default is 30 seconds.
# (integer value)
#rabbit_interval_max = 30

# Try to use HA queues in RabbitMQ (x-ha-policy: all). If you change this
# option, you must wipe the RabbitMQ database. In RabbitMQ 3.0, queue mirroring
# is no longer controlled by the x-ha-policy argument when declaring a queue.
# If you just want to make sure that all queues (except those with auto-
# generated names) are mirrored across all nodes, run: "rabbitmqctl set_policy
# HA '^(?!amq\.).*' '{"ha-mode": "all"}' " (boolean value)
#rabbit_ha_queues = false

# Use quorum queues in RabbitMQ (x-queue-type: quorum). The quorum queue is a
# modern queue type for RabbitMQ implementing a durable, replicated FIFO queue
# based on the Raft consensus algorithm. It is available as of RabbitMQ 3.8.0.
# If set this option will conflict with the HA queues (``rabbit_ha_queues``)
# aka mirrored queues, in other words the HA queues should be disabled, quorum
# queues durable by default so the amqp_durable_queues opion is ignored when
# this option enabled. (boolean value)
#rabbit_quorum_queue = false

# Each time a message is redelivered to a consumer, a counter is incremented.
# Once the redelivery count exceeds the delivery limit the message gets dropped
# or dead-lettered (if a DLX exchange has been configured) Used only when
# rabbit_quorum_queue is enabled, Default 0 which means dont set a limit.
# (integer value)
#rabbit_quorum_delivery_limit = 0

# By default all messages are maintained in memory if a quorum queue grows in
# length it can put memory pressure on a cluster. This option can limit the
# number of messages in the quorum queue. Used only when rabbit_quorum_queue is
# enabled, Default 0 which means dont set a limit. (integer value)
# Deprecated group/name - [oslo_messaging_rabbit]/rabbit_quroum_max_memory_length
#rabbit_quorum_max_memory_length = 0

# By default all messages are maintained in memory if a quorum queue grows in
# length it can put memory pressure on a cluster. This option can limit the
# number of memory bytes used by the quorum queue. Used only when
# rabbit_quorum_queue is enabled, Default 0 which means dont set a limit.
# (integer value)
# Deprecated group/name - [oslo_messaging_rabbit]/rabbit_quroum_max_memory_bytes
#rabbit_quorum_max_memory_bytes = 0

# Positive integer representing duration in seconds for queue TTL (x-expires).
# Queues which are unused for the duration of the TTL are automatically
# deleted. The parameter affects only reply and fanout queues. (integer value)
# Minimum value: 1
#rabbit_transient_queues_ttl = 1800

# Specifies the number of messages to prefetch. Setting to zero allows
# unlimited messages. (integer value)
#rabbit_qos_prefetch_count = 0

# Number of seconds after which the Rabbit broker is considered down if
# heartbeat's keep-alive fails (0 disables heartbeat). (integer value)
#heartbeat_timeout_threshold = 60

# How often times during the heartbeat_timeout_threshold we check the
# heartbeat. (integer value)
#heartbeat_rate = 2

# DEPRECATED: (DEPRECATED) Enable/Disable the RabbitMQ mandatory flag for
# direct send. The direct send is used as reply, so the MessageUndeliverable
# exception is raised in case the client queue does not
# exist.MessageUndeliverable exception will be used to loop for a timeout to
# lets a chance to sender to recover.This flag is deprecated and it will not be
# possible to deactivate this functionality anymore (boolean value)
# This option is deprecated for removal.
# Its value may be silently ignored in the future.
# Reason: Mandatory flag no longer deactivable.
#direct_mandatory_flag = true

# Enable x-cancel-on-ha-failover flag so that rabbitmq server will cancel and
# notify consumerswhen queue is down (boolean value)
#enable_cancel_on_failover = false


[polling]

#
# From ceilometer
#

# Configuration file for polling definition. (string value)
#cfg_file = polling.yaml

# Work-load partitioning group prefix. Use only if you want to run multiple
# polling agents with different config files. For each sub-group of the agent
# pool with the same partitioning_group_prefix a disjoint subset of pollsters
# should be loaded. (string value)
#partitioning_group_prefix = <None>

# Batch size of samples to send to notification agent, Set to 0 to disable
# (integer value)
#batch_size = 50

# List of directories with YAML files used to created pollsters. (multi valued)
#pollsters_definitions_dirs = /etc/ceilometer/pollsters.d


[publisher]

#
# From ceilometer
#

# Secret value for signing messages. Set value empty if signing is not required
# to avoid computational overhead. (string value)
# Deprecated group/name - [DEFAULT]/metering_secret
# Deprecated group/name - [publisher_rpc]/metering_secret
# Deprecated group/name - [publisher]/metering_secret
#telemetry_secret = change this for valid signing


[publisher_notifier]

#
# From ceilometer
#

# DEPRECATED: The topic that ceilometer uses for metering notifications.
# (string value)
# This option is deprecated for removal.
# Its value may be silently ignored in the future.
#metering_topic = metering

# DEPRECATED: The topic that ceilometer uses for event notifications. (string
# value)
# This option is deprecated for removal.
# Its value may be silently ignored in the future.
#event_topic = event

# The driver that ceilometer uses for metering notifications. (string value)
# Deprecated group/name - [publisher_notifier]/metering_driver
#telemetry_driver = messagingv2


[rgw_admin_credentials]

#
# From ceilometer
#

# Access key for Radosgw Admin. (string value)
#access_key = <None>

# Secret key for Radosgw Admin. (string value)
#secret_key = <None>


[rgw_client]

#
# From ceilometer
#

# Whether RGW uses implicit tenants or not. (boolean value)
#implicit_tenants = false


[service_credentials]

#
# From ceilometer-auth
#

# Authentication type to load (string value)
# Deprecated group/name - [service_credentials]/auth_plugin
#auth_type = <None>

# Config Section from which to load plugin specific options (string value)
#auth_section = <None>

# Authentication URL (string value)
#auth_url = <None>

# Scope for system operations (string value)
#system_scope = <None>

# Domain ID to scope to (string value)
#domain_id = <None>

# Domain name to scope to (string value)
#domain_name = <None>

# Project ID to scope to (string value)
# Deprecated group/name - [service_credentials]/tenant_id
#project_id = <None>

# Project name to scope to (string value)
# Deprecated group/name - [service_credentials]/tenant_name
#project_name = <None>

# Domain ID containing project (string value)
#project_domain_id = <None>

# Domain name containing project (string value)
#project_domain_name = <None>

# ID of the trust to use as a trustee use (string value)
#trust_id = <None>

# Optional domain ID to use with v3 and v2 parameters. It will be used for both
# the user and project domain in v3 and ignored in v2 authentication. (string
# value)
#default_domain_id = <None>

# Optional domain name to use with v3 API and v2 parameters. It will be used
# for both the user and project domain in v3 and ignored in v2 authentication.
# (string value)
#default_domain_name = <None>

# User id (string value)
#user_id = <None>

# Username (string value)
# Deprecated group/name - [service_credentials]/user_name
#username = <None>

# User's domain id (string value)
#user_domain_id = <None>

# User's domain name (string value)
#user_domain_name = <None>

# User's password (string value)
#password = <None>

# Region name to use for OpenStack service endpoints. (string value)
# Deprecated group/name - [DEFAULT]/os_region_name
#region_name = <None>

# Type of endpoint in Identity service catalog to use for communication with
# OpenStack services. (string value)
# Possible values:
# public - <No description provided>
# internal - <No description provided>
# admin - <No description provided>
# auth - <No description provided>
# publicURL - <No description provided>
# internalURL - <No description provided>
# adminURL - <No description provided>
# Deprecated group/name - [service_credentials]/os_endpoint_type
#interface = public


[service_types]

#
# From ceilometer
#

# Glance service type. (string value)
#glance = image

# Neutron service type. (string value)
#neutron = network

# Neutron load balancer version. (string value)
# Possible values:
# v1 - <No description provided>
# v2 - <No description provided>
#neutron_lbaas_version = v2

# Nova service type. (string value)
#nova = compute

# Radosgw service type. (string value)
#radosgw = <None>

# Swift service type. (string value)
#swift = object-store

# Cinder service type. (string value)
# Deprecated group/name - [service_types]/cinderv2
#cinder = volumev3


[vmware]

#
# From ceilometer
#

# IP address of the VMware vSphere host. (host address value)
#host_ip = 127.0.0.1

# Port of the VMware vSphere host. (port value)
# Minimum value: 0
# Maximum value: 65535
#host_port = 443

# Username of VMware vSphere. (string value)
#host_username =

# Password of VMware vSphere. (string value)
#host_password =

# CA bundle file to use in verifying the vCenter server certificate. (string
# value)
#ca_file = <None>

# If true, the vCenter server certificate is not verified. If false, then the
# default CA truststore is used for verification. This option is ignored if
# "ca_file" is set. (boolean value)
#insecure = false

# Number of times a VMware vSphere API may be retried. (integer value)
#api_retry_count = 10

# Sleep time in seconds for polling an ongoing async task. (floating point
# value)
#task_poll_interval = 0.5

# Optional vim service WSDL location e.g http://<server>/vimService.wsdl.
# Optional over-ride to default location for bug work-arounds. (string value)
#wsdl_location = <None>


[xenapi]

#
# From ceilometer
#

# URL for connection to XenServer/Xen Cloud Platform. (string value)
#connection_url = <None>

# Username for connection to XenServer/Xen Cloud Platform. (string value)
#connection_username = root

# Password for connection to XenServer/Xen Cloud Platform. (string value)
#connection_password = <None>