[DEFAULT] # # From cotyledon # # Enables or disables logging values of all registered options when starting a # service (at DEBUG level) (boolean value) # Note: This option can be changed without restarting. #log_options = true # Specify a timeout after which a gracefully shutdown server will exit. Zero # value means endless wait (integer value) # Note: This option can be changed without restarting. #graceful_shutdown_timeout = 60 # # From mistral.config # # Specifies which mistral server to start by the launch script (list value) #server = all # Logger name for pretty workflow trace output (string value) #workflow_trace_log_name = workflow_trace # Authentication type (valid options: keystone, keycloak-oidc) (string value) #auth_type = keystone # The name of the scheduler implementation used in the system (string value) # Possible values: # legacy - # default - #scheduler_type = legacy # The JavaScript implementation to be used by the std.javascript action to # evaluate scripts (string value) # Possible values: # pyv8 - # v8eval - # py_mini_racer - #js_implementation = pyv8 # Window of seconds to determine whether the given token is about to expire # (integer value) #expiration_token_duration = 30 # # From oslo.log # # If set to true, the logging level will be set to DEBUG instead of the default # INFO level (boolean value) # Note: This option can be changed without restarting. #debug = false # The name of a logging configuration file. This file is appended to any # existing logging configuration files. For details about logging configuration # files, see the Python logging module documentation. Note that when logging # configuration files are used then all logging configuration is set in the # configuration file and other logging configuration options are ignored (for # example, log-date-format) (string value) # Note: This option can be changed without restarting. # Deprecated group/name - [DEFAULT]/log_config #log_config_append = # Defines the format string for %%(asctime)s in log records. Default: # %(default)s . This option is ignored if log_config_append is set (string # value) #log_date_format = %Y-%m-%d %H:%M:%S # (Optional) Name of log file to send logging output to. If no default is set, # logging will go to stderr as defined by use_stderr. This option is ignored if # log_config_append is set (string value) # Deprecated group/name - [DEFAULT]/logfile #log_file = # (Optional) The base directory used for relative log_file paths. This option # is ignored if log_config_append is set (string value) # Deprecated group/name - [DEFAULT]/logdir #log_dir = # Use syslog for logging. Existing syslog format is DEPRECATED and will be # changed later to honor RFC5424. This option is ignored if log_config_append is # set (boolean value) #use_syslog = false # Enable journald for logging. If running in a systemd environment you may wish # to enable journal support. Doing so will use the journal native protocol which # includes structured metadata in addition to log messages.This option is # ignored if log_config_append is set (boolean value) #use_journal = false # Syslog facility to receive log lines. This option is ignored if # log_config_append is set (string value) #syslog_log_facility = LOG_USER # Use JSON formatting for logging. This option is ignored if log_config_append # is set (boolean value) #use_json = false # Log output to standard error. This option is ignored if log_config_append is # set (boolean value) #use_stderr = false # (Optional) Set the 'color' key according to log levels. This option takes # effect only when logging to stderr or stdout is used. This option is ignored # if log_config_append is set (boolean value) #log_color = false # The amount of time before the log files are rotated. This option is ignored # unless log_rotation_type is set to "interval" (integer value) #log_rotate_interval = 1 # Rotation interval type. The time of the last file change (or the time when the # service was started) is used when scheduling the next rotation (string value) # Possible values: # Seconds - # Minutes - # Hours - # Days - # Weekday - # Midnight - #log_rotate_interval_type = days # Maximum number of rotated log files (integer value) #max_logfile_count = 30 # Log file maximum size in MB. This option is ignored if "log_rotation_type" is # not set to "size" (integer value) #max_logfile_size_mb = 200 # Log rotation type (string value) # Possible values: # interval - Rotate logs at predefined time intervals. # size - Rotate logs once they reach a predefined size. # none - Do not rotate log files. #log_rotation_type = none # Format string to use for log messages with context. Used by # oslo_log.formatters.ContextFormatter (string value) #logging_context_format_string = %(asctime)s.%(msecs)03d %(process)d %(levelname)s %(name)s [%(global_request_id)s %(request_id)s %(user_identity)s] %(instance)s%(message)s # Format string to use for log messages when context is undefined. Used by # oslo_log.formatters.ContextFormatter (string value) #logging_default_format_string = %(asctime)s.%(msecs)03d %(process)d %(levelname)s %(name)s [-] %(instance)s%(message)s # Additional data to append to log message when logging level for the message is # DEBUG. Used by oslo_log.formatters.ContextFormatter (string value) #logging_debug_format_suffix = %(funcName)s %(pathname)s:%(lineno)d # Prefix each line of exception output with this format. Used by # oslo_log.formatters.ContextFormatter (string value) #logging_exception_prefix = %(asctime)s.%(msecs)03d %(process)d ERROR %(name)s %(instance)s # Defines the format string for %(user_identity)s that is used in # logging_context_format_string. Used by oslo_log.formatters.ContextFormatter # (string value) #logging_user_identity_format = %(user)s %(project)s %(domain)s %(system_scope)s %(user_domain)s %(project_domain)s # List of package logging levels in logger=LEVEL pairs. This option is ignored # if log_config_append is set (list value) #default_log_levels = amqp=WARN,boto=WARN,sqlalchemy=WARN,suds=INFO,oslo.messaging=INFO,oslo_messaging=INFO,iso8601=WARN,requests.packages.urllib3.connectionpool=WARN,urllib3.connectionpool=WARN,websocket=WARN,requests.packages.urllib3.util.retry=WARN,urllib3.util.retry=WARN,keystonemiddleware=WARN,routes.middleware=WARN,stevedore=WARN,taskflow=WARN,keystoneauth=WARN,oslo.cache=INFO,oslo_policy=INFO,dogpile.core.dogpile=INFO # Enables or disables publication of error events (boolean value) #publish_errors = false # The format for an instance that is passed with the log message (string value) #instance_format = "[instance: %(uuid)s] " # The format for an instance UUID that is passed with the log message (string # value) #instance_uuid_format = "[instance: %(uuid)s] " # Interval, number of seconds, of log rate limiting (integer value) #rate_limit_interval = 0 # Maximum number of logged messages per rate_limit_interval (integer value) #rate_limit_burst = 0 # Log level name used by rate limiting. Logs with level greater or equal to # rate_limit_except_level are not filtered. An empty string means that all # levels are filtered (string value) # Possible values: # CRITICAL - # ERROR - # INFO - # WARNING - # DEBUG - # '' - #rate_limit_except_level = CRITICAL # Enables or disables fatal status of deprecations (boolean value) #fatal_deprecations = false # # From oslo.messaging # # Size of executor thread pool when executor is threading or eventlet (integer # value) # Deprecated group/name - [DEFAULT]/rpc_thread_pool_size #executor_thread_pool_size = 64 # Seconds to wait for a response from a call (integer value) #rpc_response_timeout = 60 # The network address and optional user credentials for connecting to the # messaging backend, in URL format. The expected format is. For more # information, refer to the documentation. (string value) #transport_url = rabbit:// # The default exchange under which topics are scoped. May be overridden by an # exchange name specified in the transport_url option (string value) #control_exchange = openstack # Add an endpoint to answer to ping calls. Endpoint is named # oslo_rpc_server_ping (boolean value) #rpc_ping_enabled = false [action_heartbeat] # # From mistral.config # # The maximum amount of missed heartbeats to be allowed. If set to 0 then this # feature is disabled. See check_interval for more details (integer value) # Minimum value: 0 #max_missed_heartbeats = 15 # How often (in seconds) action executions are checked. For example when # check_interval is 10, check action executions every 10 seconds. When the # checker runs it will transit all running action executions to error if the # last heartbeat received is older than 10 * max_missed_heartbeats seconds. If # set to 0 then this feature is disabled (integer value) # Minimum value: 0 #check_interval = 20 # The maximum number of action executions processed during one iteration of # action execution heartbeat checker. If set to 0 then there is no limit # (integer value) # Minimum value: 0 #batch_size = 10 # The first heartbeat is handled differently, to provide a grace period in case # there is no available executor to handle the action execution. For example # when first_heartbeat_timeout = 3600, wait 3600 seconds before closing the # action executions that never received a heartbeat (integer value) # Minimum value: 0 #first_heartbeat_timeout = 3600 [action_logging] # # From mistral.config # # If this value is set to True then HTTP action response body will be hidden in # logs (boolean value) #hide_response_body = false # If this value is set to True then HTTP action request body will be hidden in # logs (boolean value) #hide_request_body = false # List of sensitive headers that should be hidden in logs (list value) #sensitive_headers = [action_providers] # # From mistral.config # # Allowlist with action providers that is allowed to be loaded from the entry # point "mistral.action.providers", if empty all action providers will be # allowed unless denylist is set (list value) #allowlist = # Denylist with action providers that is not allowed to be loaded from the entry # point "mistral.action.providers", allowlist takes precendence, if empty all # action providers will be allowed (list value) #denylist = [api] # # From mistral.config # # Mistral API server host (host address value) #host = 0.0.0.0 # Mistral API server port (port value) # Minimum value: 0 # Maximum value: 65535 #port = 8989 # Enables the ability to delete action_execution which has no relationship with # workflows (boolean value) #allow_action_execution_deletion = false # Enable the integrated stand-alone API to service requests via HTTPS instead of # HTTP (boolean value) #enable_ssl_api = false # DEPRECATED: Number of workers for Mistral API service default is equal to the # number of CPUs available if that can be determined, else a default worker # count of 1 is returned (integer value) # This option is deprecated for removal. # Its value may be silently ignored in the future. # Reason: This option has become useless and mistral-apiwill start only one # worker now. If you want more workers, consider starting mistral-wsgi-api with # a wsgi server instead. #api_workers = # Defines in what cases Mistral will be validating the syntax of workflow YAML # definitions. If 'enabled' is set the service will be validating the syntax but # only if it's not explicitly turned off in the API request. 'disabled' disables # validation for all API requests. 'mandatory' enables validation for all API # requests (string value) # Possible values: # enabled - # mandatory - # disabled - #validation_mode = mandatory # Enable API for exposing info json about current Mistral build (boolean value) #enable_info_endpoint = false # Specify the path to info json file which will be exposed via /info endpoint # (string value) #info_json_file_path = info.json [context_versioning] # # From mistral.config # # If this value is set to True then Mistral will use versioning of context to # improve results of context merging. This feature fixes some bugs with context # merging but also slows down Mistral performance (boolean value) #enabled = true # If this value is set to True then Mistral will use md5 hashing for version # keys to ensure this keys will be the same size. Disabling hashing could be # useful for debug purposes, but avoid this in production, because it leads to # excessive memory consumption (boolean value) #hash_version_keys = true [cors] # # From oslo.middleware.cors # # Indicate whether this resource may be shared with the domain received in the # requests "origin" header. Format: "://[:]", no trailing # slash. Example: https://horizon.example.com (list value) #allowed_origin = # Indicate that the actual request can include user credentials (boolean value) #allow_credentials = true # Indicate which headers are safe to expose to the API. Defaults to HTTP Simple # Headers (list value) #expose_headers = X-Auth-Token,X-Subject-Token,X-Service-Token,X-Project-Id,X-User-Name,X-Project-Name # Maximum cache age of CORS preflight requests (integer value) #max_age = 3600 # Indicate which methods can be used during the actual request (list value) #allow_methods = GET,PUT,POST,DELETE,PATCH # Indicate which header field names may be used during the actual request (list # value) #allow_headers = X-Auth-Token,X-Identity-Status,X-Roles,X-Service-Catalog,X-User-Id,X-Tenant-Id,X-Project-Id,X-User-Name,X-Project-Name [cron_trigger] # # From mistral.config # # If this value is set to False then the subsystem of cron triggers is disabled. # Disabling cron triggers increases system performance (boolean value) #enabled = true # This setting defines how frequently Mistral checks for cron triggers that need # execution. By default this is every second which can lead to high system load. # Increasing the number will reduce the load but also limit the minimum # freqency. For example, a cron trigger can be configured to run every second # but if the execution_interval is set to 60, it will only run once per minute # (integer value) # Minimum value: 1 #execution_interval = 1 [database] # # From oslo.db # # If True, SQLite uses synchronous mode (boolean value) #sqlite_synchronous = true # The back end to use for the database (string value) #backend = sqlalchemy # The SQLAlchemy connection string to use to connect to the database (string # value) #connection = # The SQLAlchemy connection string to use to connect to the slave database # (string value) #slave_connection = # The SQLAlchemy asyncio connection string to use to connect to the database # (string value) #asyncio_connection = # The SQLAlchemy asyncio connection string to use to connect to the slave # database (string value) #asyncio_slave_connection = # Whether or not to assume a reader context needs to guarantee it can read data # committed by a writer assuming replication lag is present; defaults to True. # When False, a reader context works the same as async_reader and will select # the slave database if present. When using a galera cluster, this can be set to # False only if you set mysql_wsrep_sync_wait to 1 (this will guarantee that the # reader will wait until writesets are committed).Note that this may incur a # performance degradation within the galera cluster. Note also that this # parameter has no effect if you do not set any slave_connection (boolean value) #synchronous_reader = true # The SQL mode to be used for MySQL sessions. This option, including the # default, overrides any server-set SQL mode. To use whatever SQL mode is set by # the server configuration, set this to no value. Example: mysql_sql_mode= # (string value) #mysql_sql_mode = TRADITIONAL # For Galera only, configure wsrep_sync_wait causality checks on new # connections. Default is None, meaning don't configure any setting (integer # value) #mysql_wsrep_sync_wait = # Connections which have been present in the connection pool longer than this # number of seconds will be replaced with a new one the next time they are # checked out from the pool (integer value) #connection_recycle_time = 3600 # Maximum number of SQL connections to keep open in a pool. Setting a value of 0 # indicates no limit (integer value) #max_pool_size = 5 # Maximum number of database connection retries during startup. Set to -1 to # specify an infinite retry count (integer value) #max_retries = 10 # Interval between retries of opening a SQL connection (integer value) #retry_interval = 10 # If set, use this value for max_overflow with SQLAlchemy (integer value) #max_overflow = 50 # Verbosity of SQL debugging information: 0=None, 100=Everything (integer value) # Minimum value: 0 # Maximum value: 100 #connection_debug = 0 # Add Python stack traces to SQL as comment strings (boolean value) #connection_trace = false # If set, use this value for pool_timeout with SQLAlchemy (integer value) #pool_timeout = # Enable the experimental use of database reconnect on connection lost (boolean # value) #use_db_reconnect = false # Seconds between retries of a database transaction (integer value) #db_retry_interval = 1 # If True, increases the interval between retries of a database operation up to # db_max_retry_interval (boolean value) #db_inc_retry_interval = true # If db_inc_retry_interval is set, the maximum seconds between retries of a # database operation (integer value) #db_max_retry_interval = 10 # Maximum retries in case of connection error or deadlock error before error is # raised. Set to -1 to specify an infinite retry count (integer value) #db_max_retries = 20 # Optional URL parameters to append onto the connection URL at connect time; # specify as param1=value1¶m2=value2& (string value) #connection_parameters = [engine] # # From mistral.config # # Mistral engine plugin (string value) #engine = default # Name of the engine node. This can be an opaque identifier. It is not # necessarily a hostname, FQDN, or IP address (host address value) #host = 0.0.0.0 # The message topic that the engine listens on (string value) #topic = mistral_engine # The version of the engine (string value) #version = 1.0 # The default maximum size in KB of large text fields of runtime execution # objects. Use -1 for no limit (integer value) #execution_field_size_limit_kb = 1024 # A number of seconds since the last update of a task execution in RUNNING state # after which Mistral will start checking its integrity, meaning that if all # associated actions/workflows are finished its state will be restored # automatically. If this property is set to a negative value Mistral will never # be doing this check (integer value) #execution_integrity_check_delay = 20 # A number of task executions in RUNNING state that the execution integrity # checker can process in a single iteration (integer value) # Minimum value: 1 #execution_integrity_check_batch_size = 5 # A number of seconds that indicates how long action definitions should be # stored in the local cache (integer value) #action_definition_cache_time = 60 # Enables starting subworkflows via RPC. Use "False" to start subworkflow within # the same engine instance. Use "True" to start subworkflow via RPC to improve # load balancing in case of several engine instances (boolean value) #start_subworkflows_via_rpc = false # Merge strategy of data inside workflow execution. (replace, merge) (string # value) # Possible values: # replace - # merge - #merge_strategy = replace [event_engine] # # From mistral.config # # Name of the event engine node. This can be an opaque identifier. It is not # necessarily a hostname, FQDN, or IP address (host address value) #host = 0.0.0.0 # Name of the event engine's listener pool. This can be an opaque identifier. It # is used for identifying the group of event engine listeners in oslo.messaging # (host address value) #listener_pool_name = events # The message topic that the event engine listens on (string value) #topic = mistral_event_engine # Configuration file for event definitions (string value) #event_definitions_cfg_file = /etc/mistral/event_definitions.yaml [execution_expiration_policy] # # From mistral.config # # How often will the executions be evaluated (in minutes). For example for value # 120 the interval will be 2 hours (every 2 hours). Note that only final state # executions will be removed: ( SUCCESS / ERROR / CANCELLED ) (integer value) #evaluation_interval = # Evaluate from which time remove executions in minutes. For example when # older_than = 60, remove all executions that finished a 60 minutes ago or more. # Minimum value is 1 (integer value) #older_than = # The maximum number of finished workflow executions to be stored. For example # when max_finished_executions = 100, only the 100 latest finished executions # will be preserved. This means that even unexpired executions are eligible for # deletion, to decrease the number of executions in the database. The default # value is 0. If it is set to 0, this constraint won't be applied (integer # value) #max_finished_executions = 0 # Size of batch of expired executions to be deleted.The default value is 0. If # it is set to 0, size of batch is total number of expired executions that is # going to be deleted (integer value) #batch_size = 0 # The states that the expiration policy will filter out and will not # delete.Valid values are, [['CANCELLED', 'ERROR', 'SUCCESS']] (list value) #ignored_states = [executor] # # From mistral.config # # Type of executor. Use local to run the executor within the engine server. Use # remote if the executor is launched as a separate server to run action # executions (string value) # Possible values: # local - # remote - #type = remote # Name of the executor node. This can be an any string name/identifier. It is # not necessarily a hostname, FQDN, or IP address. It is also related to the # "target" attribute of tasks defined in a workflow text. If "target" is defined # for a task then the action of the task will be sent to one of the executors # that have the same value in the "host" property (host address value) #host = 0.0.0.0 # The message topic that the executor listens on (string value) #topic = mistral_executor # The version of the executor (string value) #version = 1.0 [healthcheck] # # From mistral.config # # Enable the health check endpoint at /healthcheck. Note that this is # unauthenticated. More information is available at # https://docs.openstack.org/oslo.middleware/latest/reference/healthcheck_plugins.html # (boolean value) #enabled = false # # From oslo.middleware.healthcheck # # Show more detailed information as part of the response. Security note: # Enabling this option may expose sensitive details about the service being # monitored. Be sure to verify that it will not violate your security policies # (boolean value) #detailed = false # Additional backends that can perform health checks and report that information # back as part of a request (list value) #backends = # A list of network addresses to limit source ip allowed to access healthcheck # information. Any request from ip outside of these network addresses are # ignored (list value) #allowed_source_ranges = # Ignore requests with proxy headers (boolean value) #ignore_proxied_requests = false # Check the presence of a file to determine if an application is running on a # port. Used by DisableByFileHealthcheck plugin (string value) #disable_by_file_path = # Check the presence of a file based on a port to determine if an application is # running on a port. Expects a "port:path" list of strings. Used by # DisableByFilesPortsHealthcheck plugin (list value) #disable_by_file_paths = # Check the presence of files. Used by EnableByFilesHealthcheck plugin (list # value) #enable_by_file_paths = [keycloak_oidc] # # From mistral.config # # Keycloak base url (e.g. https://my.keycloak:8443/auth) (string value) #auth_url = # Required if identity server requires client certificate (string value) #certfile = # Required if identity server requires client certificate (string value) #keyfile = # A PEM encoded Certificate Authority to use when verifying HTTPs connections. # Defaults to system CAs (string value) #cafile = # If True, SSL/TLS certificate verification is disabled (boolean value) #insecure = false # Endpoint against which authorization will be performed (string value) #user_info_endpoint_url = /realms/%s/protocol/openid-connect/userinfo # URL to get the public key for a particular realm (string value) #public_cert_url = /realms/%s/protocol/openid-connect/certs # Keycloak issuer(iss) url. Example: https://ip_add:port/auth/realms/%s (string # value) #keycloak_iss = [keystone_authtoken] # # From keystonemiddleware.auth_token # # Complete "public" Identity API endpoint. This endpoint should not be an # "admin" endpoint, as it should be accessible by all end users. Unauthenticated # clients are redirected to this endpoint to authenticate. Although this # endpoint should ideally be unversioned, client support in the wild varies. If # you're using a versioned v2 endpoint here, then this should *not* be the same # endpoint the service user utilizes for validating tokens, because normal end # users may not be able to reach that endpoint (string value) # Deprecated group/name - [keystone_authtoken]/auth_uri #www_authenticate_uri = # DEPRECATED: Complete "public" Identity API endpoint. This endpoint should not # be an "admin" endpoint, as it should be accessible by all end users. # Unauthenticated clients are redirected to this endpoint to authenticate. # Although this endpoint should ideally be unversioned, client support in the # wild varies. If you're using a versioned v2 endpoint here, then this should # *not* be the same endpoint the service user utilizes for validating tokens, # because normal end users may not be able to reach that endpoint. This option # is deprecated in favor of www_authenticate_uri and will be removed in the S # release (string value) # This option is deprecated for removal since Queens. # Its value may be silently ignored in the future. # Reason: The auth_uri option is deprecated in favor of www_authenticate_uri and # will be removed in the S release. #auth_uri = # API version of the Identity API endpoint (string value) #auth_version = # Interface to use for the Identity API endpoint. Valid values are "public", # "internal" (default) or "admin" (string value) #interface = internal # Do not handle authorization requests within the middleware, but delegate the # authorization decision to downstream WSGI components (boolean value) #delay_auth_decision = false # Request timeout value for communicating with Identity API server (integer # value) #http_connect_timeout = # How many times are we trying to reconnect when communicating with Identity API # Server (integer value) #http_request_max_retries = 3 # Request environment key where the Swift cache object is stored. When # auth_token middleware is deployed with a Swift cache, use this option to have # the middleware share a caching backend with swift. Otherwise, use the # ``memcached_servers`` option instead (string value) #cache = # Required if identity server requires client certificate (string value) #certfile = # Required if identity server requires client certificate (string value) #keyfile = # A PEM encoded Certificate Authority to use when verifying HTTPs connections. # Defaults to system CAs (string value) #cafile = # Verify HTTPS connections (boolean value) #insecure = false # The region in which the identity server can be found (string value) #region_name = # Optionally specify a list of memcached server(s) to use for caching. If left # undefined, tokens will instead be cached in-process (list value) # Deprecated group/name - [keystone_authtoken]/memcache_servers #memcached_servers = # In order to prevent excessive effort spent validating tokens, the middleware # caches previously-seen tokens for a configurable duration (in seconds). Set to # -1 to disable caching completely (integer value) #token_cache_time = 300 # (Optional) If defined, indicate whether token data should be authenticated or # authenticated and encrypted. If MAC, token data is authenticated (with HMAC) # in the cache. If ENCRYPT, token data is encrypted and authenticated in the # cache. If the value is not one of these options or empty, auth_token will # raise an exception on initialization (string value) # Possible values: # None - # MAC - # ENCRYPT - #memcache_security_strategy = None # (Optional, mandatory if memcache_security_strategy is defined) This string is # used for key derivation (string value) #memcache_secret_key = # (Optional) Global toggle for TLS usage when comunicating with the caching # servers (boolean value) #memcache_tls_enabled = false # (Optional) Path to a file of concatenated CA certificates in PEM format # necessary to establish the caching server's authenticity. If tls_enabled is # False, this option is ignored (string value) #memcache_tls_cafile = # (Optional) Path to a single file in PEM format containing the client's # certificate as well as any number of CA certificates needed to establish the # certificate's authenticity. This file is only required when client side # authentication is necessary. If tls_enabled is False, this option is ignored # (string value) #memcache_tls_certfile = # (Optional) Path to a single file containing the client's private key in. # Otherwhise the private key will be taken from the file specified in # tls_certfile. If tls_enabled is False, this option is ignored (string value) #memcache_tls_keyfile = # (Optional) Set the available ciphers for sockets created with the TLS context. # It should be a string in the OpenSSL cipher list format. If not specified, all # OpenSSL enabled ciphers will be available (string value) #memcache_tls_allowed_ciphers = # (Optional) Number of seconds memcached server is considered dead before it is # tried again (integer value) #memcache_pool_dead_retry = 300 # (Optional) Maximum total number of open connections to every memcached server # (integer value) #memcache_pool_maxsize = 10 # (Optional) Socket timeout in seconds for communicating with a memcached server # (integer value) #memcache_pool_socket_timeout = 3 # (Optional) Number of seconds a connection to memcached is held unused in the # pool before it is closed (integer value) #memcache_pool_unused_timeout = 60 # (Optional) Number of seconds that an operation will wait to get a memcached # client connection from the pool (integer value) #memcache_pool_conn_get_timeout = 10 # (Optional) Use the advanced (eventlet safe) memcached client pool (boolean # value) #memcache_use_advanced_pool = true # (Optional) Indicate whether to set the X-Service-Catalog header. If False, # middleware will not ask for service catalog on token validation and will not # set the X-Service-Catalog header (boolean value) #include_service_catalog = true # Used to control the use and type of token binding. Can be set to: "disabled" # to not check token binding. "permissive" (default) to validate binding # information if the bind type is of a form known to the server and ignore it if # not. "strict" like "permissive" but if the bind type is unknown the token will # be rejected. "required" any form of token binding is needed to be allowed. # Finally the name of a binding method that must be present in tokens (string # value) #enforce_token_bind = permissive # A choice of roles that must be present in a service token. Service tokens are # allowed to request that an expired token can be used and so this check should # tightly control that only actual services should be sending this token. Roles # here are applied as an ANY check so any role in this list must be present. For # backwards compatibility reasons this currently only affects the allow_expired # check (list value) #service_token_roles = service # For backwards compatibility reasons we must let valid service tokens pass that # don't pass the service_token_roles check as valid. Setting this true will # become the default in a future release and should be enabled if possible # (boolean value) #service_token_roles_required = false # The name or type of the service as it appears in the service catalog. This is # used to validate tokens that have restricted access rules (string value) #service_type = # Enable the SASL(Simple Authentication and Security Layer) if the SASL_enable # is true, else disable (boolean value) #memcache_sasl_enabled = false # the user name for the SASL (string value) #memcache_username = # the username password for SASL (string value) #memcache_password = # Authentication type to load (string value) # Deprecated group/name - [keystone_authtoken]/auth_plugin #auth_type = # Config Section from which to load plugin specific options (string value) #auth_section = [legacy_action_provider] # # From mistral.config # # If True, enables loading actions configured in the entry point # "mistral.actions" (boolean value) #load_action_plugins = true # If True, enables loading actions from action generators configured in the # entry point "mistral.generators" (boolean value) #load_action_generators = true # If True, then the legacy action provider loads only the actions delivered by # the Mistral project out of the box plugged in with the entry point # "mistral.actions".This property is needed mostly for testing (boolean value) #only_builtin_actions = false # Allowlist with actions that is allowed to be loaded from the entry point # "mistral.actions", if empty all actions will be allowed (list value) #allowlist = # Denylist with actions that is not allowed to be loaded from the entry point # "mistral.actions", allowlist takes precedence, if empty all actions will be # allowed (list value) #denylist = [notifier] # # From mistral.config # # Type of notifier. Use local to run the notifier within the engine server. Use # remote if the notifier is launched as a separate server to process events # (string value) # Possible values: # local - # remote - #type = remote # Name of the notifier node. This can be an opaque identifier. It is not # necessarily a hostname, FQDN, or IP address (string value) #host = 0.0.0.0 # The message topic that the notifier server listens on (string value) #topic = mistral_notifier # List of publishers to publish notification (list value) #notify = [oslo_concurrency] # # From oslo.concurrency # # Enables or disables inter-process locks (boolean value) #disable_process_locking = false # Directory to use for lock files. For security, the specified directory should # only be writable by the user running the processes that need locking. Defaults # to environment variable OSLO_LOCK_PATH. If external locks are used, a lock # path must be set (string value) #lock_path = [oslo_messaging_kafka] # # From oslo.messaging # # Max fetch bytes of Kafka consumer (integer value) #kafka_max_fetch_bytes = 1048576 # Default timeout(s) for Kafka consumers (floating point value) #kafka_consumer_timeout = 1.0 # Group id for Kafka consumer. Consumers in one group will coordinate message # consumption (string value) #consumer_group = oslo_messaging_consumer # Upper bound on the delay for KafkaProducer batching in seconds (floating point # value) #producer_batch_timeout = 0.0 # Size of batch for the producer async send (integer value) #producer_batch_size = 16384 # The compression codec for all data generated by the producer. If not set, # compression will not be used. Note that the allowed values of this depend on # the kafka version (string value) # Possible values: # none - # gzip - # snappy - # lz4 - # zstd - #compression_codec = none # Enable asynchronous consumer commits (boolean value) #enable_auto_commit = false # The maximum number of records returned in a poll call (integer value) #max_poll_records = 500 # Protocol used to communicate with brokers (string value) # Possible values: # PLAINTEXT - # SASL_PLAINTEXT - # SSL - # SASL_SSL - #security_protocol = PLAINTEXT # Mechanism when security protocol is SASL (string value) #sasl_mechanism = PLAIN # CA certificate PEM file used to verify the server certificate (string value) #ssl_cafile = # Client certificate PEM file used for authentication (string value) #ssl_client_cert_file = # Client key PEM file used for authentication (string value) #ssl_client_key_file = # Client key password file used for authentication (string value) #ssl_client_key_password = [oslo_messaging_notifications] # # From oslo.messaging # # The Drivers(s) to handle sending notifications. Possible values are messaging, # messagingv2, routing, log, test, noop (multi valued) #driver = # A URL representing the messaging driver to use for notifications. If not set, # we fall back to the same configuration used for RPC (string value) #transport_url = # AMQP topic used for OpenStack notifications (list value) #topics = notifications # The maximum number of attempts to re-send a notification message which failed # to be delivered due to a recoverable error. 0 - No retry, -1 - indefinite # (integer value) #retry = -1 [oslo_messaging_rabbit] # # From oslo.messaging # # Use durable queues in AMQP. If rabbit_quorum_queue is enabled, queues will be # durable and this value will be ignored (boolean value) #amqp_durable_queues = false # Auto-delete queues in AMQP (boolean value) #amqp_auto_delete = false # Size of RPC connection pool (integer value) # Minimum value: 1 #rpc_conn_pool_size = 30 # The pool size limit for connections expiration policy (integer value) #conn_pool_min_size = 2 # The time-to-live in sec of idle connections in the pool (integer value) #conn_pool_ttl = 1200 # Connect over SSL (boolean value) #ssl = false # SSL version to use (valid only if SSL enabled). Valid values are TLSv1 and # SSLv23. SSLv2, SSLv3, TLSv1_1, and TLSv1_2 may be available on some # distributions (string value) #ssl_version = # SSL key file (valid only if SSL enabled) (string value) #ssl_key_file = # SSL cert file (valid only if SSL enabled) (string value) #ssl_cert_file = # SSL certification authority file (valid only if SSL enabled) (string value) #ssl_ca_file = # DEPRECATED: Global toggle for enforcing the OpenSSL FIPS mode. This feature # requires Python support. This is available in Python 3.9 in all environments # and may have been backported to older Python versions on select environments. # If the Python executable used does not support OpenSSL FIPS mode, an exception # will be raised (boolean value) # This option is deprecated for removal. # Its value may be silently ignored in the future. # Reason: FIPS_mode_set API was removed in OpenSSL 3.0.0. This option has no # effect now. #ssl_enforce_fips_mode = false # DEPRECATED: (DEPRECATED) It is recommend not to use this option anymore. Run # the health check heartbeat thread through a native python thread by default. # If this option is equal to False then the health check heartbeat will inherit # the execution model from the parent process. For example if the parent process # has monkey patched the stdlib by using eventlet/greenlet then the heartbeat # will be run through a green thread. This option should be set to True only for # the wsgi services (boolean value) # This option is deprecated for removal. # Its value may be silently ignored in the future. # Reason: The option is related to Eventlet which will be removed. In addition # this has never worked as expected with services using eventlet for core # service framework. #heartbeat_in_pthread = false # How long to wait (in seconds) before reconnecting in response to an AMQP # consumer cancel notification (floating point value) # Minimum value: 0.0 # Maximum value: 4.5 #kombu_reconnect_delay = 1.0 # Random time to wait for when reconnecting in response to an AMQP consumer # cancel notification (floating point value) # Minimum value: 0.0 #kombu_reconnect_splay = 0.0 # EXPERIMENTAL: Possible values are: gzip, bz2. If not set compression will not # be used. This option may not be available in future versions (string value) #kombu_compression = # How long to wait a missing client before abandoning to send it its replies. # This value should not be longer than rpc_response_timeout (integer value) # Deprecated group/name - [oslo_messaging_rabbit]/kombu_reconnect_timeout #kombu_missing_consumer_retry_timeout = 60 # Determines how the next RabbitMQ node is chosen in case the one we are # currently connected to becomes unavailable. Takes effect only if more than one # RabbitMQ node is provided in config (string value) # Possible values: # round-robin - # shuffle - #kombu_failover_strategy = round-robin # The RabbitMQ login method (string value) # Possible values: # PLAIN - # AMQPLAIN - # EXTERNAL - # RABBIT-CR-DEMO - #rabbit_login_method = AMQPLAIN # How frequently to retry connecting with RabbitMQ (integer value) # Minimum value: 1 #rabbit_retry_interval = 1 # How long to backoff for between retries when connecting to RabbitMQ (integer # value) # Minimum value: 0 #rabbit_retry_backoff = 2 # Maximum interval of RabbitMQ connection retries (integer value) # Minimum value: 1 #rabbit_interval_max = 30 # Try to use HA queues in RabbitMQ (x-ha-policy: all). If you change this # option, you must wipe the RabbitMQ database. In RabbitMQ 3.0, queue mirroring # is no longer controlled by the x-ha-policy argument when declaring a queue. If # you just want to make sure that all queues (except those with auto-generated # names) are mirrored across all nodes, run: "rabbitmqctl set_policy HA # '^(?!amq\.).*' '{"ha-mode": "all"}' " (boolean value) #rabbit_ha_queues = false # Use quorum queues in RabbitMQ (x-queue-type: quorum). The quorum queue is a # modern queue type for RabbitMQ implementing a durable, replicated FIFO queue # based on the Raft consensus algorithm. It is available as of RabbitMQ 3.8.0. # If set this option will conflict with the HA queues (``rabbit_ha_queues``) aka # mirrored queues, in other words the HA queues should be disabled. Quorum # queues are also durable by default so the amqp_durable_queues option is # ignored when this option is enabled (boolean value) #rabbit_quorum_queue = false # Use quorum queues for transients queues in RabbitMQ. Enabling this option will # then make sure those queues are also using quorum kind of rabbit queues, which # are HA by default (boolean value) #rabbit_transient_quorum_queue = false # Each time a message is redelivered to a consumer, a counter is incremented. # Once the redelivery count exceeds the delivery limit the message gets dropped # or dead-lettered (if a DLX exchange has been configured) Used only when # rabbit_quorum_queue is enabled, Default 0 which means dont set a limit # (integer value) #rabbit_quorum_delivery_limit = 0 # By default all messages are maintained in memory if a quorum queue grows in # length it can put memory pressure on a cluster. This option can limit the # number of messages in the quorum queue. Used only when rabbit_quorum_queue is # enabled, Default 0 which means dont set a limit (integer value) #rabbit_quorum_max_memory_length = 0 # By default all messages are maintained in memory if a quorum queue grows in # length it can put memory pressure on a cluster. This option can limit the # number of memory bytes used by the quorum queue. Used only when # rabbit_quorum_queue is enabled, Default 0 which means dont set a limit # (integer value) #rabbit_quorum_max_memory_bytes = 0 # Positive integer representing duration in seconds for queue TTL (x-expires). # Queues which are unused for the duration of the TTL are automatically deleted. # The parameter affects only reply and fanout queues. Setting 0 as value will # disable the x-expires. If doing so, make sure you have a rabbitmq policy to # delete the queues or you deployment will create an infinite number of queue # over time.In case rabbit_stream_fanout is set to True, this option will # control data retention policy (x-max-age) for messages in the fanout queue # rather then the queue duration itself. So the oldest data in the stream queue # will be discarded from it once reaching TTL Setting to 0 will disable x-max- # age for stream which make stream grow indefinitely filling up the diskspace # (integer value) # Minimum value: 0 #rabbit_transient_queues_ttl = 1800 # Specifies the number of messages to prefetch. Setting to zero allows unlimited # messages (integer value) #rabbit_qos_prefetch_count = 0 # Number of seconds after which the Rabbit broker is considered down if # heartbeat's keep-alive fails (0 disables heartbeat) (integer value) #heartbeat_timeout_threshold = 60 # How often times during the heartbeat_timeout_threshold we check the heartbeat # (integer value) #heartbeat_rate = 3 # DEPRECATED: (DEPRECATED) Enable/Disable the RabbitMQ mandatory flag for direct # send. The direct send is used as reply, so the MessageUndeliverable exception # is raised in case the client queue does not exist.MessageUndeliverable # exception will be used to loop for a timeout to lets a chance to sender to # recover.This flag is deprecated and it will not be possible to deactivate this # functionality anymore (boolean value) # This option is deprecated for removal. # Its value may be silently ignored in the future. # Reason: Mandatory flag no longer deactivable. #direct_mandatory_flag = true # Enable x-cancel-on-ha-failover flag so that rabbitmq server will cancel and # notify consumerswhen queue is down (boolean value) #enable_cancel_on_failover = false # Should we use consistant queue names or random ones (boolean value) #use_queue_manager = false # Hostname used by queue manager. Defaults to the value returned by # socket.gethostname() (string value) # # This option has a sample default set, which means that # its actual default value may vary from the one documented # below. #hostname = node1.example.com # Process name used by queue manager (string value) # # This option has a sample default set, which means that # its actual default value may vary from the one documented # below. #processname = nova-api # Use stream queues in RabbitMQ (x-queue-type: stream). Streams are a new # persistent and replicated data structure ("queue type") in RabbitMQ which # models an append-only log with non-destructive consumer semantics. It is # available as of RabbitMQ 3.9.0. If set this option will replace all fanout # queues with only one stream queue (boolean value) #rabbit_stream_fanout = false [oslo_middleware] # # From oslo.middleware.http_proxy_to_wsgi # # Whether the application is behind a proxy or not. This determines if the # middleware should parse the headers or not (boolean value) #enable_proxy_headers_parsing = false [oslo_policy] # # From oslo.policy # # DEPRECATED: This option controls whether or not to enforce scope when # evaluating policies. If ``True``, the scope of the token used in the request # is compared to the ``scope_types`` of the policy being enforced. If the scopes # do not match, an ``InvalidScope`` exception will be raised. If ``False``, a # message will be logged informing operators that policies are being invoked # with mismatching scope (boolean value) # This option is deprecated for removal. # Its value may be silently ignored in the future. # Reason: This configuration was added temporarily to facilitate a smooth # transition to the new RBAC. OpenStack will always enforce scope checks. This # configuration option is deprecated and will be removed in the 2025.2 cycle. #enforce_scope = true # This option controls whether or not to use old deprecated defaults when # evaluating policies. If ``True``, the old deprecated defaults are not going to # be evaluated. This means if any existing token is allowed for old defaults but # is disallowed for new defaults, it will be disallowed. It is encouraged to # enable this flag along with the ``enforce_scope`` flag so that you can get the # benefits of new defaults and ``scope_type`` together. If ``False``, the # deprecated policy check string is logically OR'd with the new policy check # string, allowing for a graceful upgrade experience between releases with new # policies, which is the default behavior (boolean value) #enforce_new_defaults = true # The relative or absolute path of a file that maps roles to permissions for a # given service. Relative paths must be specified in relation to the # configuration file setting this option (string value) #policy_file = policy.yaml # Default rule. Enforced when a requested rule is not found (string value) #policy_default_rule = default # Directories where policy configuration files are stored. They can be relative # to any directory in the search path defined by the config_dir option, or # absolute paths. The file defined by policy_file must exist for these # directories to be searched. Missing or empty directories are ignored (multi # valued) #policy_dirs = policy.d # Content Type to send and receive data for REST based policy check (string # value) # Possible values: # application/x-www-form-urlencoded - # application/json - #remote_content_type = application/x-www-form-urlencoded # server identity verification for REST based policy check (boolean value) #remote_ssl_verify_server_crt = false # Absolute path to ca cert file for REST based policy check (string value) #remote_ssl_ca_crt_file = # Absolute path to client cert for REST based policy check (string value) #remote_ssl_client_crt_file = # Absolute path client key file REST based policy check (string value) #remote_ssl_client_key_file = # Timeout in seconds for REST based policy check (floating point value) # Minimum value: 0 #remote_timeout = 60 [pecan] # # From mistral.config # # Pecan root controller (string value) #root = mistral.api.controllers.root.RootController # A list of modules where pecan will search for applications (list value) #modules = mistral.api # Enables the ability to display tracebacks in the browser and interactively # debug during development (boolean value) #debug = false # Enables user authentication in pecan (boolean value) #auth_enable = true [profiler] # # From mistral.config # # # Enable the profiling for all services on this node. For more information, # refer to the documentation. (boolean value) # Deprecated group/name - [profiler]/profiler_enabled #enabled = false # # Enable SQL requests profiling in services. For more information, refer to the # documentation. (boolean value) #trace_sqlalchemy = false # # Enable python requests package profiling. For more information, refer to the # documentation. (boolean value) #trace_requests = false # # Secret key(s) to use for encrypting context data for performance profiling. # For more information, refer to the documentation. (string value) #hmac_keys = SECRET_KEY # # Connection string for a notifier backend. For more information, refer to the # documentation. (string value) #connection_string = messaging:// # # Document type for notification indexing in elasticsearch. # (string value) #es_doc_type = notification # # This parameter is a time value parameter (for example: es_scroll_time=2m), # indicating for how long the nodes that participate in the search will maintain # relevant resources in order to continue and support it. # (string value) #es_scroll_time = 2m # # Elasticsearch splits large requests in batches. This parameter defines # maximum size of each batch (for example: es_scroll_size=10000). # (integer value) #es_scroll_size = 10000 # # Redissentinel provides a timeout option on the connections. # This parameter defines that timeout (for example: socket_timeout=0.1). # (floating point value) #socket_timeout = 0.1 # # Redissentinel uses a service name to identify a master redis service. # This parameter defines the name (for example: # ``sentinal_service_name=mymaster``). # (string value) #sentinel_service_name = mymaster # # Enable filter traces that contain error/exception to a separated place. For # more information, refer to the documentation. (boolean value) #filter_error_trace = false # Logger name for the osprofiler trace output (string value) #profiler_log_name = profiler_trace [scheduler] # # From mistral.config # # Fixed part of the delay between scheduler iterations, in seconds. Full delay # is defined as a sum of "fixed_delay" and a random delay limited by # "random_delay" (floating point value) # Minimum value: 0.1 #fixed_delay = 1 # Max value of the random part of the delay between scheduler iterations, in # seconds. Full delay is defined as a sum of "fixed_delay" and a random delay # limited by this property (floating point value) # Minimum value: 0 #random_delay = 0 # The max number of delayed calls will be selected during a scheduler iteration. # If this property equals None then there is no restriction on selection # (integer value) # Minimum value: 1 #batch_size = # Defines how soon (in seconds) a scheduled job captured for processing becomes # eligible for capturing by other schedulers again. This option is needed to # prevent situations when a scheduler instance captured a job and failed while # processing and so this job can never be processed again because it is marked # as captured (floating point value) # Minimum value: 1 #captured_job_timeout = 30 # Time period given to a scheduler to process a scheduled job locally before it # becomes eligible for processing by other scheduler instances.For example, a # job needs to run at 12:00:00. When a scheduler starts processing it has 60 # seconds (or other configured value) to complete the job. If the scheduler did # not complete the job within this period it most likely means that the # scheduler process crashed. In this case another scheduler instance will pick # it up from the Job Store, but not earlier than 12:01:00 and try to process it # (floating point value) # Minimum value: 1 #pickup_job_after = 60 [ssl] # # From oslo.service.sslutils # # DEPRECATED: CA certificate file to use to verify connecting clients (string # value) # This option is deprecated for removal. # Its value may be silently ignored in the future. # Reason: The 'ca_file' option is deprecated and will be removed in a future # release. #ca_file = # DEPRECATED: Certificate file to use when starting the server securely (string # value) # This option is deprecated for removal. # Its value may be silently ignored in the future. # Reason: The 'cert_file' option is deprecated and will be removed in a future # release. #cert_file = # DEPRECATED: Private key file to use when starting the server securely (string # value) # This option is deprecated for removal. # Its value may be silently ignored in the future. # Reason: The 'key_file' option is deprecated and will be removed in a future # release. #key_file = # DEPRECATED: SSL version to use (valid only if SSL enabled). Valid values are # TLSv1 and SSLv23. SSLv2, SSLv3, TLSv1_1, and TLSv1_2 may be available on some # distributions (string value) # This option is deprecated for removal. # Its value may be silently ignored in the future. # Reason: The 'version' option is deprecated and will be removed in a future # release. #version = # DEPRECATED: Sets the list of available ciphers. value should be a string in # the OpenSSL cipher list format (string value) # This option is deprecated for removal. # Its value may be silently ignored in the future. # Reason: The 'ciphers' option is deprecated and will be removed in a future # release. #ciphers = [yaql] # # From mistral.config # # Limit iterators by the given number of elements. When set, each time any # function declares its parameter to be iterator, that iterator is modified to # not produce more than a given number of items. If not set (or set to -1) the # result data is allowed to contain endless iterators that would cause errors if # the result where to be serialized (integer value) # Minimum value: -1 #limit_iterators = -1 # The memory usage quota (in bytes) for all data produced by the expression (or # any part of it). -1 means no limitation (integer value) # Minimum value: -1 #memory_quota = -1 # Enables input data conversion for YAQL expressions. If set to True, YAQL will # convert mutable data structures (lists, dicts, sets) into their immutable # versions. That will allow them to work with some constructs that require # hashable types even if elements are not hashable. For example, it will be # possible to put dicts into a set. Although it conflicts with the base # principles of such collections (e.g. we cannot put a non-hashable type into a # set just because otherwise it will not work correctly) the YAQL library itself # allows this. Disabling input data conversion may give significant performance # boost if the input data for an expression is large (boolean value) #convert_input_data = true # Enables output data conversion for YAQL expressions.If set to False, it is # possible that YAQL will generate an output that will be not JSON-serializable. # For example, if an expression has ".toSet()" in the end to convert a list into # a set. It does not mean though that such functions cannot be used, they can # still be used in expressions but user has to keep in mind of what type a # result will be, whereas if the value of ths property is True YAQL will convert # the result to a JSON-compatible type (boolean value) #convert_output_data = true # When set to True, yaql converts all tuples in the expression result to lists. # It works only if "convert_output_data" is set to True (boolean value) #convert_tuples_to_lists = true # When set to True, yaql converts all sets in the expression result to lists. # Otherwise the produced result may contain sets that are not JSON-serializable. # It works only if "convert_output_data" is set to True (boolean value) #convert_sets_to_lists = false # When set to True, dictionaries are considered to be iterable and iteration # over dictionaries produces their keys (as in Python and yaql 0.2) (boolean # value) #iterable_dicts = false # Allows one to configure keyword/mapping symbol. Ability to pass named # arguments can be disabled altogether if empty string is provided (string # value) #keyword_operator = => # Enables or disables delegate expression parsing (boolean value) #allow_delegates = false