zoukankan      html  css  js  c++  java
  • ansible安装服务实例

    ansible安装redis主从

       redis第一次启动后会把配置文件的配置数据缓存到data目录,不是每次直接读取配置文件.这样即使临时修改了配置文件的配置项同时也要删除data目录.这样配置才能生效

           

      然后重新启动redis.然后redis-cli进行测试

          

      删除redis的dir,然后再重启redis

          

         

         

     redis主节点安装

    - name: Create redis dir
      file: path="{{ taishi_dir }}/redis" state=directory owner={{ taishi_user }} group={{ taishi_user }} recurse=yes
      tags: redis-master
    
    - name: Copy redis.tar to {{ groups["redis-master"][0] }}
      copy: src=../../common/packages/redis/redis-6.2.1.tar.gz dest=/tmp
      tags: redis-master
    
    - name: Install redis for  {{ groups["redis-master"][0] }}
      unarchive: src="/tmp/redis-6.2.1.tar.gz" dest="{{ taishi_dir }}/redis"  copy=no mode=0755
      tags: redis-master
    
    - name: "Copy the redis.conf"
      template: src=redis.conf.j2  dest="{{ taishi_dir }}/redis/redis-6.2.1/redis.conf" owner={{ taishi_user }} group={{ taishi_user }} mode=0755
      tags: redis-master
    
    - name: "create redis logs file"
      file: path="{{ taishi_dir }}/logs/redis.log" state=touch owner={{ taishi_user }} group={{ taishi_user }}
      tags: redis-master
    
    - name: "chown redis dir to {{ taishi_user }}"
      file: path="{{ taishi_dir }}/redis" state=directory owner={{ taishi_user }} group={{ taishi_user }} recurse=yes
      tags: redis-master
    
    - name: Create redis data dir
      file: path="{{ redis_path_data }}" state=directory owner={{ taishi_user }} group={{ taishi_user }} recurse=yes
      tags: redis-master
    tasks/main.yml
    ## Generated by install_server.sh ##
    # Redis configuration file example.
    #
    # Note that in order to read the configuration file, Redis must be
    # started with the file path as first argument:
    #
    # ./redis-server /path/to/redis.conf
    
    # Note on units: when memory size is needed, it is possible to specify
    # it in the usual form of 1k 5GB 4M and so forth:
    #
    # 1k => 1000 bytes
    # 1kb => 1024 bytes
    # 1m => 1000000 bytes
    # 1mb => 1024*1024 bytes
    # 1g => 1000000000 bytes
    # 1gb => 1024*1024*1024 bytes
    #
    # units are case insensitive so 1GB 1Gb 1gB are all the same.
    
    ################################## INCLUDES ###################################
    
    # Include one or more other config files here.  This is useful if you
    # have a standard template that goes to all Redis servers but also need
    # to customize a few per-server settings.  Include files can include
    # other files, so use this wisely.
    #
    # Notice option "include" won't be rewritten by command "CONFIG REWRITE"
    # from admin or Redis Sentinel. Since Redis always uses the last processed
    # line as value of a configuration directive, you'd better put includes
    # at the beginning of this file to avoid overwriting config change at runtime.
    #
    # If instead you are interested in using includes to override configuration
    # options, it is better to use include as the last line.
    #
    # include /path/to/local.conf
    # include /path/to/other.conf
    
    ################################## NETWORK #####################################
    
    # By default, if no "bind" configuration directive is specified, Redis listens
    # for connections from all the network interfaces available on the server.
    # It is possible to listen to just one or multiple selected interfaces using
    # the "bind" configuration directive, followed by one or more IP addresses.
    #
    # Examples:
    #
    bind 0.0.0.0
    # bind 127.0.0.1 ::1
    #
    # ~~~ WARNING ~~~ If the computer running Redis is directly exposed to the
    # internet, binding to all the interfaces is dangerous and will expose the
    # instance to everybody on the internet. So by default we uncomment the
    # following bind directive, that will force Redis to listen only into
    # the IPv4 lookback interface address (this means Redis will be able to
    # accept connections only from clients running into the same computer it
    # is running).
    #
    # IF YOU ARE SURE YOU WANT YOUR INSTANCE TO LISTEN TO ALL THE INTERFACES
    # JUST COMMENT THE FOLLOWING LINE.
    # ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
    #bind 127.0.0.1
    
    # Protected mode is a layer of security protection, in order to avoid that
    # Redis instances left open on the internet are accessed and exploited.
    #
    # When protected mode is on and if:
    #
    # 1) The server is not binding explicitly to a set of addresses using the
    #    "bind" directive.
    # 2) No password is configured.
    #
    # The server only accepts connections from clients connecting from the
    # IPv4 and IPv6 loopback addresses 127.0.0.1 and ::1, and from Unix domain
    # sockets.
    #
    # By default protected mode is enabled. You should disable it only if
    # you are sure you want clients from other hosts to connect to Redis
    # even if no authentication is configured, nor a specific set of interfaces
    # are explicitly listed using the "bind" directive.
    protected-mode yes
    
    # Accept connections on the specified port, default is 6379 (IANA #815344).
    # If port 0 is specified Redis will not listen on a TCP socket.
    port {{ redis_port }}
    
    # TCP listen() backlog.
    #
    # In high requests-per-second environments you need an high backlog in order
    # to avoid slow clients connections issues. Note that the Linux kernel
    # will silently truncate it to the value of /proc/sys/net/core/somaxconn so
    # make sure to raise both the value of somaxconn and tcp_max_syn_backlog
    # in order to get the desired effect.
    tcp-backlog 511
    
    # Unix socket.
    #
    # Specify the path for the Unix socket that will be used to listen for
    # incoming connections. There is no default, so Redis will not listen
    # on a unix socket when not specified.
    #
    # unixsocket /tmp/redis.sock
    # unixsocketperm 700
    
    # Close the connection after a client is idle for N seconds (0 to disable)
    timeout 0
    
    # TCP keepalive.
    #
    # If non-zero, use SO_KEEPALIVE to send TCP ACKs to clients in absence
    # of communication. This is useful for two reasons:
    #
    # 1) Detect dead peers.
    # 2) Take the connection alive from the point of view of network
    #    equipment in the middle.
    #
    # On Linux, the specified value (in seconds) is the period used to send ACKs.
    # Note that to close the connection the double of the time is needed.
    # On other kernels the period depends on the kernel configuration.
    #
    # A reasonable value for this option is 300 seconds, which is the new
    # Redis default starting with Redis 3.2.1.
    tcp-keepalive 300
    
    ################################# GENERAL #####################################
    
    # By default Redis does not run as a daemon. Use 'yes' if you need it.
    # Note that Redis will write a pid file in /var/run/redis.pid when daemonized.
    daemonize no
    
    # If you run Redis from upstart or systemd, Redis can interact with your
    # supervision tree. Options:
    #   supervised no      - no supervision interaction
    #   supervised upstart - signal upstart by putting Redis into SIGSTOP mode
    #   supervised systemd - signal systemd by writing READY=1 to $NOTIFY_SOCKET
    #   supervised auto    - detect upstart or systemd method based on
    #                        UPSTART_JOB or NOTIFY_SOCKET environment variables
    # Note: these supervision methods only signal "process is ready."
    #       They do not enable continuous liveness pings back to your supervisor.
    supervised no
    
    # If a pid file is specified, Redis writes it where specified at startup
    # and removes it at exit.
    #
    # When the server runs non daemonized, no pid file is created if none is
    # specified in the configuration. When the server is daemonized, the pid file
    # is used even if not specified, defaulting to "/var/run/redis.pid".
    #
    # Creating a pid file is best effort: if Redis is not able to create it
    # nothing bad happens, the server will start and run normally.
    pidfile /var/run/redis_6379.pid
    
    # Specify the server verbosity level.
    # This can be one of:
    # debug (a lot of information, useful for development/testing)
    # verbose (many rarely useful info, but not a mess like the debug level)
    # notice (moderately verbose, what you want in production probably)
    # warning (only very important / critical messages are logged)
    loglevel notice
    
    # Specify the log file name. Also the empty string can be used to force
    # Redis to log on the standard output. Note that if you use standard
    # output for logging but daemonize, logs will be sent to /dev/null
    logfile {{ taishi_dir }}/logs/redis.log
    
    # To enable logging to the system logger, just set 'syslog-enabled' to yes,
    # and optionally update the other syslog parameters to suit your needs.
    # syslog-enabled no
    
    # Specify the syslog identity.
    # syslog-ident redis
    
    # Specify the syslog facility. Must be USER or between LOCAL0-LOCAL7.
    # syslog-facility local0
    
    # Set the number of databases. The default database is DB 0, you can select
    # a different one on a per-connection basis using SELECT <dbid> where
    # dbid is a number between 0 and 'databases'-1
    databases 16
    
    ################################ SNAPSHOTTING  ################################
    #
    # Save the DB on disk:
    #
    #   save <seconds> <changes>
    #
    #   Will save the DB if both the given number of seconds and the given
    #   number of write operations against the DB occurred.
    #
    #   In the example below the behaviour will be to save:
    #   after 900 sec (15 min) if at least 1 key changed
    #   after 300 sec (5 min) if at least 10 keys changed
    #   after 60 sec if at least 10000 keys changed
    #
    #   Note: you can disable saving completely by commenting out all "save" lines.
    #
    #   It is also possible to remove all the previously configured save
    #   points by adding a save directive with a single empty string argument
    #   like in the following example:
    #
    #   save ""
    
    save 900 1
    save 300 10
    save 60 10000
    
    # By default Redis will stop accepting writes if RDB snapshots are enabled
    # (at least one save point) and the latest background save failed.
    # This will make the user aware (in a hard way) that data is not persisting
    # on disk properly, otherwise chances are that no one will notice and some
    # disaster will happen.
    #
    # If the background saving process will start working again Redis will
    # automatically allow writes again.
    #
    # However if you have setup your proper monitoring of the Redis server
    # and persistence, you may want to disable this feature so that Redis will
    # continue to work as usual even if there are problems with disk,
    # permissions, and so forth.
    stop-writes-on-bgsave-error yes
    
    # Compress string objects using LZF when dump .rdb databases?
    # For default that's set to 'yes' as it's almost always a win.
    # If you want to save some CPU in the saving child set it to 'no' but
    # the dataset will likely be bigger if you have compressible values or keys.
    rdbcompression yes
    
    # Since version 5 of RDB a CRC64 checksum is placed at the end of the file.
    # This makes the format more resistant to corruption but there is a performance
    # hit to pay (around 10%) when saving and loading RDB files, so you can disable it
    # for maximum performances.
    #
    # RDB files created with checksum disabled have a checksum of zero that will
    # tell the loading code to skip the check.
    rdbchecksum yes
    
    # The filename where to dump the DB
    dbfilename dump.rdb
    
    # The working directory.
    #
    # The DB will be written inside this directory, with the filename specified
    # above using the 'dbfilename' configuration directive.
    #
    # The Append Only File will also be created inside this directory.
    #
    # Note that you must specify a directory here, not a file name.
    dir "{{ redis_path_data }}"
    
    ################################# REPLICATION #################################
    
    # Master-Slave replication. Use slaveof to make a Redis instance a copy of
    # another Redis server. A few things to understand ASAP about Redis replication.
    #
    # 1) Redis replication is asynchronous, but you can configure a master to
    #    stop accepting writes if it appears to be not connected with at least
    #    a given number of slaves.
    # 2) Redis slaves are able to perform a partial resynchronization with the
    #    master if the replication link is lost for a relatively small amount of
    #    time. You may want to configure the replication backlog size (see the next
    #    sections of this file) with a sensible value depending on your needs.
    # 3) Replication is automatic and does not need user intervention. After a
    #    network partition slaves automatically try to reconnect to masters
    #    and resynchronize with them.
    #
    # slaveof <masterip> <masterport>
    
    # If the master is password protected (using the "requirepass" configuration
    # directive below) it is possible to tell the slave to authenticate before
    # starting the replication synchronization process, otherwise the master will
    # refuse the slave request.
    #
    # masterauth <master-password>
    
    # When a slave loses its connection with the master, or when the replication
    # is still in progress, the slave can act in two different ways:
    #
    # 1) if slave-serve-stale-data is set to 'yes' (the default) the slave will
    #    still reply to client requests, possibly with out of date data, or the
    #    data set may just be empty if this is the first synchronization.
    #
    # 2) if slave-serve-stale-data is set to 'no' the slave will reply with
    #    an error "SYNC with master in progress" to all the kind of commands
    #    but to INFO and SLAVEOF.
    #
    slave-serve-stale-data yes
    
    # You can configure a slave instance to accept writes or not. Writing against
    # a slave instance may be useful to store some ephemeral data (because data
    # written on a slave will be easily deleted after resync with the master) but
    # may also cause problems if clients are writing to it because of a
    # misconfiguration.
    #
    # Since Redis 2.6 by default slaves are read-only.
    #
    # Note: read only slaves are not designed to be exposed to untrusted clients
    # on the internet. It's just a protection layer against misuse of the instance.
    # Still a read only slave exports by default all the administrative commands
    # such as CONFIG, DEBUG, and so forth. To a limited extent you can improve
    # security of read only slaves using 'rename-command' to shadow all the
    # administrative / dangerous commands.
    slave-read-only yes
    
    # Replication SYNC strategy: disk or socket.
    #
    # -------------------------------------------------------
    # WARNING: DISKLESS REPLICATION IS EXPERIMENTAL CURRENTLY
    # -------------------------------------------------------
    #
    # New slaves and reconnecting slaves that are not able to continue the replication
    # process just receiving differences, need to do what is called a "full
    # synchronization". An RDB file is transmitted from the master to the slaves.
    # The transmission can happen in two different ways:
    #
    # 1) Disk-backed: The Redis master creates a new process that writes the RDB
    #                 file on disk. Later the file is transferred by the parent
    #                 process to the slaves incrementally.
    # 2) Diskless: The Redis master creates a new process that directly writes the
    #              RDB file to slave sockets, without touching the disk at all.
    #
    # With disk-backed replication, while the RDB file is generated, more slaves
    # can be queued and served with the RDB file as soon as the current child producing
    # the RDB file finishes its work. With diskless replication instead once
    # the transfer starts, new slaves arriving will be queued and a new transfer
    # will start when the current one terminates.
    #
    # When diskless replication is used, the master waits a configurable amount of
    # time (in seconds) before starting the transfer in the hope that multiple slaves
    # will arrive and the transfer can be parallelized.
    #
    # With slow disks and fast (large bandwidth) networks, diskless replication
    # works better.
    repl-diskless-sync no
    
    # When diskless replication is enabled, it is possible to configure the delay
    # the server waits in order to spawn the child that transfers the RDB via socket
    # to the slaves.
    #
    # This is important since once the transfer starts, it is not possible to serve
    # new slaves arriving, that will be queued for the next RDB transfer, so the server
    # waits a delay in order to let more slaves arrive.
    #
    # The delay is specified in seconds, and by default is 5 seconds. To disable
    # it entirely just set it to 0 seconds and the transfer will start ASAP.
    repl-diskless-sync-delay 5
    
    # Slaves send PINGs to server in a predefined interval. It's possible to change
    # this interval with the repl_ping_slave_period option. The default value is 10
    # seconds.
    #
    # repl-ping-slave-period 10
    
    # The following option sets the replication timeout for:
    #
    # 1) Bulk transfer I/O during SYNC, from the point of view of slave.
    # 2) Master timeout from the point of view of slaves (data, pings).
    # 3) Slave timeout from the point of view of masters (REPLCONF ACK pings).
    #
    # It is important to make sure that this value is greater than the value
    # specified for repl-ping-slave-period otherwise a timeout will be detected
    # every time there is low traffic between the master and the slave.
    #
    # repl-timeout 60
    
    # Disable TCP_NODELAY on the slave socket after SYNC?
    #
    # If you select "yes" Redis will use a smaller number of TCP packets and
    # less bandwidth to send data to slaves. But this can add a delay for
    # the data to appear on the slave side, up to 40 milliseconds with
    # Linux kernels using a default configuration.
    #
    # If you select "no" the delay for data to appear on the slave side will
    # be reduced but more bandwidth will be used for replication.
    #
    # By default we optimize for low latency, but in very high traffic conditions
    # or when the master and slaves are many hops away, turning this to "yes" may
    # be a good idea.
    repl-disable-tcp-nodelay no
    
    # Set the replication backlog size. The backlog is a buffer that accumulates
    # slave data when slaves are disconnected for some time, so that when a slave
    # wants to reconnect again, often a full resync is not needed, but a partial
    # resync is enough, just passing the portion of data the slave missed while
    # disconnected.
    #
    # The bigger the replication backlog, the longer the time the slave can be
    # disconnected and later be able to perform a partial resynchronization.
    #
    # The backlog is only allocated once there is at least a slave connected.
    #
    # repl-backlog-size 1mb
    
    # After a master has no longer connected slaves for some time, the backlog
    # will be freed. The following option configures the amount of seconds that
    # need to elapse, starting from the time the last slave disconnected, for
    # the backlog buffer to be freed.
    #
    # A value of 0 means to never release the backlog.
    #
    # repl-backlog-ttl 3600
    
    # The slave priority is an integer number published by Redis in the INFO output.
    # It is used by Redis Sentinel in order to select a slave to promote into a
    # master if the master is no longer working correctly.
    #
    # A slave with a low priority number is considered better for promotion, so
    # for instance if there are three slaves with priority 10, 100, 25 Sentinel will
    # pick the one with priority 10, that is the lowest.
    #
    # However a special priority of 0 marks the slave as not able to perform the
    # role of master, so a slave with priority of 0 will never be selected by
    # Redis Sentinel for promotion.
    #
    # By default the priority is 100.
    slave-priority 100
    
    # It is possible for a master to stop accepting writes if there are less than
    # N slaves connected, having a lag less or equal than M seconds.
    #
    # The N slaves need to be in "online" state.
    #
    # The lag in seconds, that must be <= the specified value, is calculated from
    # the last ping received from the slave, that is usually sent every second.
    #
    # This option does not GUARANTEE that N replicas will accept the write, but
    # will limit the window of exposure for lost writes in case not enough slaves
    # are available, to the specified number of seconds.
    #
    # For example to require at least 3 slaves with a lag <= 10 seconds use:
    #
    # min-slaves-to-write 3
    # min-slaves-max-lag 10
    #
    # Setting one or the other to 0 disables the feature.
    #
    # By default min-slaves-to-write is set to 0 (feature disabled) and
    # min-slaves-max-lag is set to 10.
    
    # A Redis master is able to list the address and port of the attached
    # slaves in different ways. For example the "INFO replication" section
    # offers this information, which is used, among other tools, by
    # Redis Sentinel in order to discover slave instances.
    # Another place where this info is available is in the output of the
    # "ROLE" command of a masteer.
    #
    # The listed IP and address normally reported by a slave is obtained
    # in the following way:
    #
    #   IP: The address is auto detected by checking the peer address
    #   of the socket used by the slave to connect with the master.
    #
    #   Port: The port is communicated by the slave during the replication
    #   handshake, and is normally the port that the slave is using to
    #   list for connections.
    #
    # However when port forwarding or Network Address Translation (NAT) is
    # used, the slave may be actually reachable via different IP and port
    # pairs. The following two options can be used by a slave in order to
    # report to its master a specific set of IP and port, so that both INFO
    # and ROLE will report those values.
    #
    # There is no need to use both the options if you need to override just
    # the port or the IP address.
    #
    # slave-announce-ip 5.5.5.5
    # slave-announce-port 1234
    
    ################################## SECURITY ###################################
    
    # Require clients to issue AUTH <PASSWORD> before processing any other
    # commands.  This might be useful in environments in which you do not trust
    # others with access to the host running redis-server.
    #
    # This should stay commented out for backward compatibility and because most
    # people do not need auth (e.g. they run their own servers).
    #
    # Warning: since Redis is pretty fast an outside user can try up to
    # 150k passwords per second against a good box. This means that you should
    # use a very strong password otherwise it will be very easy to break.
    #
    requirepass {{ redis_password }}
    
    # Command renaming.
    #
    # It is possible to change the name of dangerous commands in a shared
    # environment. For instance the CONFIG command may be renamed into something
    # hard to guess so that it will still be available for internal-use tools
    # but not available for general clients.
    #
    # Example:
    #
    # rename-command CONFIG b840fc02d524045429941cc15f59e41cb7be6c52
    #
    # It is also possible to completely kill a command by renaming it into
    # an empty string:
    #
    # rename-command CONFIG ""
    #
    # Please note that changing the name of commands that are logged into the
    # AOF file or transmitted to slaves may cause problems.
    
    ################################### LIMITS ####################################
    
    # Set the max number of connected clients at the same time. By default
    # this limit is set to 10000 clients, however if the Redis server is not
    # able to configure the process file limit to allow for the specified limit
    # the max number of allowed clients is set to the current file limit
    # minus 32 (as Redis reserves a few file descriptors for internal uses).
    #
    # Once the limit is reached Redis will close all the new connections sending
    # an error 'max number of clients reached'.
    #
    # maxclients 10000
    
    # Don't use more memory than the specified amount of bytes.
    # When the memory limit is reached Redis will try to remove keys
    # according to the eviction policy selected (see maxmemory-policy).
    #
    # If Redis can't remove keys according to the policy, or if the policy is
    # set to 'noeviction', Redis will start to reply with errors to commands
    # that would use more memory, like SET, LPUSH, and so on, and will continue
    # to reply to read-only commands like GET.
    #
    # This option is usually useful when using Redis as an LRU cache, or to set
    # a hard memory limit for an instance (using the 'noeviction' policy).
    #
    # WARNING: If you have slaves attached to an instance with maxmemory on,
    # the size of the output buffers needed to feed the slaves are subtracted
    # from the used memory count, so that network problems / resyncs will
    # not trigger a loop where keys are evicted, and in turn the output
    # buffer of slaves is full with DELs of keys evicted triggering the deletion
    # of more keys, and so forth until the database is completely emptied.
    #
    # In short... if you have slaves attached it is suggested that you set a lower
    # limit for maxmemory so that there is some free RAM on the system for slave
    # output buffers (but this is not needed if the policy is 'noeviction').
    #
    # maxmemory <bytes>
    
    # MAXMEMORY POLICY: how Redis will select what to remove when maxmemory
    # is reached. You can select among five behaviors:
    #
    # volatile-lru -> remove the key with an expire set using an LRU algorithm
    # allkeys-lru -> remove any key according to the LRU algorithm
    # volatile-random -> remove a random key with an expire set
    # allkeys-random -> remove a random key, any key
    # volatile-ttl -> remove the key with the nearest expire time (minor TTL)
    # noeviction -> don't expire at all, just return an error on write operations
    #
    # Note: with any of the above policies, Redis will return an error on write
    #       operations, when there are no suitable keys for eviction.
    #
    #       At the date of writing these commands are: set setnx setex append
    #       incr decr rpush lpush rpushx lpushx linsert lset rpoplpush sadd
    #       sinter sinterstore sunion sunionstore sdiff sdiffstore zadd zincrby
    #       zunionstore zinterstore hset hsetnx hmset hincrby incrby decrby
    #       getset mset msetnx exec sort
    #
    # The default is:
    #
    # maxmemory-policy noeviction
    
    # LRU and minimal TTL algorithms are not precise algorithms but approximated
    # algorithms (in order to save memory), so you can tune it for speed or
    # accuracy. For default Redis will check five keys and pick the one that was
    # used less recently, you can change the sample size using the following
    # configuration directive.
    #
    # The default of 5 produces good enough results. 10 Approximates very closely
    # true LRU but costs a bit more CPU. 3 is very fast but not very accurate.
    #
    # maxmemory-samples 5
    
    ############################## APPEND ONLY MODE ###############################
    
    # By default Redis asynchronously dumps the dataset on disk. This mode is
    # good enough in many applications, but an issue with the Redis process or
    # a power outage may result into a few minutes of writes lost (depending on
    # the configured save points).
    #
    # The Append Only File is an alternative persistence mode that provides
    # much better durability. For instance using the default data fsync policy
    # (see later in the config file) Redis can lose just one second of writes in a
    # dramatic event like a server power outage, or a single write if something
    # wrong with the Redis process itself happens, but the operating system is
    # still running correctly.
    #
    # AOF and RDB persistence can be enabled at the same time without problems.
    # If the AOF is enabled on startup Redis will load the AOF, that is the file
    # with the better durability guarantees.
    #
    # Please check http://redis.io/topics/persistence for more information.
    
    appendonly no
    
    # The name of the append only file (default: "appendonly.aof")
    
    appendfilename "appendonly.aof"
    
    # The fsync() call tells the Operating System to actually write data on disk
    # instead of waiting for more data in the output buffer. Some OS will really flush
    # data on disk, some other OS will just try to do it ASAP.
    #
    # Redis supports three different modes:
    #
    # no: don't fsync, just let the OS flush the data when it wants. Faster.
    # always: fsync after every write to the append only log. Slow, Safest.
    # everysec: fsync only one time every second. Compromise.
    #
    # The default is "everysec", as that's usually the right compromise between
    # speed and data safety. It's up to you to understand if you can relax this to
    # "no" that will let the operating system flush the output buffer when
    # it wants, for better performances (but if you can live with the idea of
    # some data loss consider the default persistence mode that's snapshotting),
    # or on the contrary, use "always" that's very slow but a bit safer than
    # everysec.
    #
    # More details please check the following article:
    # http://antirez.com/post/redis-persistence-demystified.html
    #
    # If unsure, use "everysec".
    
    # appendfsync always
    appendfsync everysec
    # appendfsync no
    
    # When the AOF fsync policy is set to always or everysec, and a background
    # saving process (a background save or AOF log background rewriting) is
    # performing a lot of I/O against the disk, in some Linux configurations
    # Redis may block too long on the fsync() call. Note that there is no fix for
    # this currently, as even performing fsync in a different thread will block
    # our synchronous write(2) call.
    #
    # In order to mitigate this problem it's possible to use the following option
    # that will prevent fsync() from being called in the main process while a
    # BGSAVE or BGREWRITEAOF is in progress.
    #
    # This means that while another child is saving, the durability of Redis is
    # the same as "appendfsync none". In practical terms, this means that it is
    # possible to lose up to 30 seconds of log in the worst scenario (with the
    # default Linux settings).
    #
    # If you have latency problems turn this to "yes". Otherwise leave it as
    # "no" that is the safest pick from the point of view of durability.
    
    no-appendfsync-on-rewrite no
    
    # Automatic rewrite of the append only file.
    # Redis is able to automatically rewrite the log file implicitly calling
    # BGREWRITEAOF when the AOF log size grows by the specified percentage.
    #
    # This is how it works: Redis remembers the size of the AOF file after the
    # latest rewrite (if no rewrite has happened since the restart, the size of
    # the AOF at startup is used).
    #
    # This base size is compared to the current size. If the current size is
    # bigger than the specified percentage, the rewrite is triggered. Also
    # you need to specify a minimal size for the AOF file to be rewritten, this
    # is useful to avoid rewriting the AOF file even if the percentage increase
    # is reached but it is still pretty small.
    #
    # Specify a percentage of zero in order to disable the automatic AOF
    # rewrite feature.
    
    auto-aof-rewrite-percentage 100
    auto-aof-rewrite-min-size 64mb
    
    # An AOF file may be found to be truncated at the end during the Redis
    # startup process, when the AOF data gets loaded back into memory.
    # This may happen when the system where Redis is running
    # crashes, especially when an ext4 filesystem is mounted without the
    # data=ordered option (however this can't happen when Redis itself
    # crashes or aborts but the operating system still works correctly).
    #
    # Redis can either exit with an error when this happens, or load as much
    # data as possible (the default now) and start if the AOF file is found
    # to be truncated at the end. The following option controls this behavior.
    #
    # If aof-load-truncated is set to yes, a truncated AOF file is loaded and
    # the Redis server starts emitting a log to inform the user of the event.
    # Otherwise if the option is set to no, the server aborts with an error
    # and refuses to start. When the option is set to no, the user requires
    # to fix the AOF file using the "redis-check-aof" utility before to restart
    # the server.
    #
    # Note that if the AOF file will be found to be corrupted in the middle
    # the server will still exit with an error. This option only applies when
    # Redis will try to read more data from the AOF file but not enough bytes
    # will be found.
    aof-load-truncated yes
    
    ################################ LUA SCRIPTING  ###############################
    
    # Max execution time of a Lua script in milliseconds.
    #
    # If the maximum execution time is reached Redis will log that a script is
    # still in execution after the maximum allowed time and will start to
    # reply to queries with an error.
    #
    # When a long running script exceeds the maximum execution time only the
    # SCRIPT KILL and SHUTDOWN NOSAVE commands are available. The first can be
    # used to stop a script that did not yet called write commands. The second
    # is the only way to shut down the server in the case a write command was
    # already issued by the script but the user doesn't want to wait for the natural
    # termination of the script.
    #
    # Set it to 0 or a negative value for unlimited execution without warnings.
    lua-time-limit 5000
    
    ################################ REDIS CLUSTER  ###############################
    #
    # ++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++
    # WARNING EXPERIMENTAL: Redis Cluster is considered to be stable code, however
    # in order to mark it as "mature" we need to wait for a non trivial percentage
    # of users to deploy it in production.
    # ++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++
    #
    # Normal Redis instances can't be part of a Redis Cluster; only nodes that are
    # started as cluster nodes can. In order to start a Redis instance as a
    # cluster node enable the cluster support uncommenting the following:
    #
    # cluster-enabled yes
    
    # Every cluster node has a cluster configuration file. This file is not
    # intended to be edited by hand. It is created and updated by Redis nodes.
    # Every Redis Cluster node requires a different cluster configuration file.
    # Make sure that instances running in the same system do not have
    # overlapping cluster configuration file names.
    #
    # cluster-config-file nodes-6379.conf
    
    # Cluster node timeout is the amount of milliseconds a node must be unreachable
    # for it to be considered in failure state.
    # Most other internal time limits are multiple of the node timeout.
    #
    # cluster-node-timeout 15000
    
    # A slave of a failing master will avoid to start a failover if its data
    # looks too old.
    #
    # There is no simple way for a slave to actually have a exact measure of
    # its "data age", so the following two checks are performed:
    #
    # 1) If there are multiple slaves able to failover, they exchange messages
    #    in order to try to give an advantage to the slave with the best
    #    replication offset (more data from the master processed).
    #    Slaves will try to get their rank by offset, and apply to the start
    #    of the failover a delay proportional to their rank.
    #
    # 2) Every single slave computes the time of the last interaction with
    #    its master. This can be the last ping or command received (if the master
    #    is still in the "connected" state), or the time that elapsed since the
    #    disconnection with the master (if the replication link is currently down).
    #    If the last interaction is too old, the slave will not try to failover
    #    at all.
    #
    # The point "2" can be tuned by user. Specifically a slave will not perform
    # the failover if, since the last interaction with the master, the time
    # elapsed is greater than:
    #
    #   (node-timeout * slave-validity-factor) + repl-ping-slave-period
    #
    # So for example if node-timeout is 30 seconds, and the slave-validity-factor
    # is 10, and assuming a default repl-ping-slave-period of 10 seconds, the
    # slave will not try to failover if it was not able to talk with the master
    # for longer than 310 seconds.
    #
    # A large slave-validity-factor may allow slaves with too old data to failover
    # a master, while a too small value may prevent the cluster from being able to
    # elect a slave at all.
    #
    # For maximum availability, it is possible to set the slave-validity-factor
    # to a value of 0, which means, that slaves will always try to failover the
    # master regardless of the last time they interacted with the master.
    # (However they'll always try to apply a delay proportional to their
    # offset rank).
    #
    # Zero is the only value able to guarantee that when all the partitions heal
    # the cluster will always be able to continue.
    #
    # cluster-slave-validity-factor 10
    
    # Cluster slaves are able to migrate to orphaned masters, that are masters
    # that are left without working slaves. This improves the cluster ability
    # to resist to failures as otherwise an orphaned master can't be failed over
    # in case of failure if it has no working slaves.
    #
    # Slaves migrate to orphaned masters only if there are still at least a
    # given number of other working slaves for their old master. This number
    # is the "migration barrier". A migration barrier of 1 means that a slave
    # will migrate only if there is at least 1 other working slave for its master
    # and so forth. It usually reflects the number of slaves you want for every
    # master in your cluster.
    #
    # Default is 1 (slaves migrate only if their masters remain with at least
    # one slave). To disable migration just set it to a very large value.
    # A value of 0 can be set but is useful only for debugging and dangerous
    # in production.
    #
    # cluster-migration-barrier 1
    
    # By default Redis Cluster nodes stop accepting queries if they detect there
    # is at least an hash slot uncovered (no available node is serving it).
    # This way if the cluster is partially down (for example a range of hash slots
    # are no longer covered) all the cluster becomes, eventually, unavailable.
    # It automatically returns available as soon as all the slots are covered again.
    #
    # However sometimes you want the subset of the cluster which is working,
    # to continue to accept queries for the part of the key space that is still
    # covered. In order to do so, just set the cluster-require-full-coverage
    # option to no.
    #
    # cluster-require-full-coverage yes
    
    # In order to setup your cluster make sure to read the documentation
    # available at http://redis.io web site.
    
    ################################## SLOW LOG ###################################
    
    # The Redis Slow Log is a system to log queries that exceeded a specified
    # execution time. The execution time does not include the I/O operations
    # like talking with the client, sending the reply and so forth,
    # but just the time needed to actually execute the command (this is the only
    # stage of command execution where the thread is blocked and can not serve
    # other requests in the meantime).
    #
    # You can configure the slow log with two parameters: one tells Redis
    # what is the execution time, in microseconds, to exceed in order for the
    # command to get logged, and the other parameter is the length of the
    # slow log. When a new command is logged the oldest one is removed from the
    # queue of logged commands.
    
    # The following time is expressed in microseconds, so 1000000 is equivalent
    # to one second. Note that a negative number disables the slow log, while
    # a value of zero forces the logging of every command.
    slowlog-log-slower-than 10000
    
    # There is no limit to this length. Just be aware that it will consume memory.
    # You can reclaim memory used by the slow log with SLOWLOG RESET.
    slowlog-max-len 128
    
    ################################ LATENCY MONITOR ##############################
    
    # The Redis latency monitoring subsystem samples different operations
    # at runtime in order to collect data related to possible sources of
    # latency of a Redis instance.
    #
    # Via the LATENCY command this information is available to the user that can
    # print graphs and obtain reports.
    #
    # The system only logs operations that were performed in a time equal or
    # greater than the amount of milliseconds specified via the
    # latency-monitor-threshold configuration directive. When its value is set
    # to zero, the latency monitor is turned off.
    #
    # By default latency monitoring is disabled since it is mostly not needed
    # if you don't have latency issues, and collecting data has a performance
    # impact, that while very small, can be measured under big load. Latency
    # monitoring can easily be enabled at runtime using the command
    # "CONFIG SET latency-monitor-threshold <milliseconds>" if needed.
    latency-monitor-threshold 0
    
    ############################# EVENT NOTIFICATION ##############################
    
    # Redis can notify Pub/Sub clients about events happening in the key space.
    # This feature is documented at http://redis.io/topics/notifications
    #
    # For instance if keyspace events notification is enabled, and a client
    # performs a DEL operation on key "foo" stored in the Database 0, two
    # messages will be published via Pub/Sub:
    #
    # PUBLISH __keyspace@0__:foo del
    # PUBLISH __keyevent@0__:del foo
    #
    # It is possible to select the events that Redis will notify among a set
    # of classes. Every class is identified by a single character:
    #
    #  K     Keyspace events, published with __keyspace@<db>__ prefix.
    #  E     Keyevent events, published with __keyevent@<db>__ prefix.
    #  g     Generic commands (non-type specific) like DEL, EXPIRE, RENAME, ...
    #  $     String commands
    #  l     List commands
    #  s     Set commands
    #  h     Hash commands
    #  z     Sorted set commands
    #  x     Expired events (events generated every time a key expires)
    #  e     Evicted events (events generated when a key is evicted for maxmemory)
    #  A     Alias for g$lshzxe, so that the "AKE" string means all the events.
    #
    #  The "notify-keyspace-events" takes as argument a string that is composed
    #  of zero or multiple characters. The empty string means that notifications
    #  are disabled.
    #
    #  Example: to enable list and generic events, from the point of view of the
    #           event name, use:
    #
    #  notify-keyspace-events Elg
    #
    #  Example 2: to get the stream of the expired keys subscribing to channel
    #             name __keyevent@0__:expired use:
    #
    #  notify-keyspace-events Ex
    #
    #  By default all notifications are disabled because most users don't need
    #  this feature and the feature has some overhead. Note that if you don't
    #  specify at least one of K or E, no events will be delivered.
    notify-keyspace-events ""
    
    ############################### ADVANCED CONFIG ###############################
    
    # Hashes are encoded using a memory efficient data structure when they have a
    # small number of entries, and the biggest entry does not exceed a given
    # threshold. These thresholds can be configured using the following directives.
    hash-max-ziplist-entries 512
    hash-max-ziplist-value 64
    
    # Lists are also encoded in a special way to save a lot of space.
    # The number of entries allowed per internal list node can be specified
    # as a fixed maximum size or a maximum number of elements.
    # For a fixed maximum size, use -5 through -1, meaning:
    # -5: max size: 64 Kb  <-- not recommended for normal workloads
    # -4: max size: 32 Kb  <-- not recommended
    # -3: max size: 16 Kb  <-- probably not recommended
    # -2: max size: 8 Kb   <-- good
    # -1: max size: 4 Kb   <-- good
    # Positive numbers mean store up to _exactly_ that number of elements
    # per list node.
    # The highest performing option is usually -2 (8 Kb size) or -1 (4 Kb size),
    # but if your use case is unique, adjust the settings as necessary.
    list-max-ziplist-size -2
    
    # Lists may also be compressed.
    # Compress depth is the number of quicklist ziplist nodes from *each* side of
    # the list to *exclude* from compression.  The head and tail of the list
    # are always uncompressed for fast push/pop operations.  Settings are:
    # 0: disable all list compression
    # 1: depth 1 means "don't start compressing until after 1 node into the list,
    #    going from either the head or tail"
    #    So: [head]->node->node->...->node->[tail]
    #    [head], [tail] will always be uncompressed; inner nodes will compress.
    # 2: [head]->[next]->node->node->...->node->[prev]->[tail]
    #    2 here means: don't compress head or head->next or tail->prev or tail,
    #    but compress all nodes between them.
    # 3: [head]->[next]->[next]->node->node->...->node->[prev]->[prev]->[tail]
    # etc.
    list-compress-depth 0
    
    # Sets have a special encoding in just one case: when a set is composed
    # of just strings that happen to be integers in radix 10 in the range
    # of 64 bit signed integers.
    # The following configuration setting sets the limit in the size of the
    # set in order to use this special memory saving encoding.
    set-max-intset-entries 512
    
    # Similarly to hashes and lists, sorted sets are also specially encoded in
    # order to save a lot of space. This encoding is only used when the length and
    # elements of a sorted set are below the following limits:
    zset-max-ziplist-entries 128
    zset-max-ziplist-value 64
    
    # HyperLogLog sparse representation bytes limit. The limit includes the
    # 16 bytes header. When an HyperLogLog using the sparse representation crosses
    # this limit, it is converted into the dense representation.
    #
    # A value greater than 16000 is totally useless, since at that point the
    # dense representation is more memory efficient.
    #
    # The suggested value is ~ 3000 in order to have the benefits of
    # the space efficient encoding without slowing down too much PFADD,
    # which is O(N) with the sparse encoding. The value can be raised to
    # ~ 10000 when CPU is not a concern, but space is, and the data set is
    # composed of many HyperLogLogs with cardinality in the 0 - 15000 range.
    hll-sparse-max-bytes 3000
    
    # Active rehashing uses 1 millisecond every 100 milliseconds of CPU time in
    # order to help rehashing the main Redis hash table (the one mapping top-level
    # keys to values). The hash table implementation Redis uses (see dict.c)
    # performs a lazy rehashing: the more operation you run into a hash table
    # that is rehashing, the more rehashing "steps" are performed, so if the
    # server is idle the rehashing is never complete and some more memory is used
    # by the hash table.
    #
    # The default is to use this millisecond 10 times every second in order to
    # actively rehash the main dictionaries, freeing memory when possible.
    #
    # If unsure:
    # use "activerehashing no" if you have hard latency requirements and it is
    # not a good thing in your environment that Redis can reply from time to time
    # to queries with 2 milliseconds delay.
    #
    # use "activerehashing yes" if you don't have such hard requirements but
    # want to free memory asap when possible.
    activerehashing yes
    
    # The client output buffer limits can be used to force disconnection of clients
    # that are not reading data from the server fast enough for some reason (a
    # common reason is that a Pub/Sub client can't consume messages as fast as the
    # publisher can produce them).
    #
    # The limit can be set differently for the three different classes of clients:
    #
    # normal -> normal clients including MONITOR clients
    # slave  -> slave clients
    # pubsub -> clients subscribed to at least one pubsub channel or pattern
    #
    # The syntax of every client-output-buffer-limit directive is the following:
    #
    # client-output-buffer-limit <class> <hard limit> <soft limit> <soft seconds>
    #
    # A client is immediately disconnected once the hard limit is reached, or if
    # the soft limit is reached and remains reached for the specified number of
    # seconds (continuously).
    # So for instance if the hard limit is 32 megabytes and the soft limit is
    # 16 megabytes / 10 seconds, the client will get disconnected immediately
    # if the size of the output buffers reach 32 megabytes, but will also get
    # disconnected if the client reaches 16 megabytes and continuously overcomes
    # the limit for 10 seconds.
    #
    # By default normal clients are not limited because they don't receive data
    # without asking (in a push way), but just after a request, so only
    # asynchronous clients may create a scenario where data is requested faster
    # than it can read.
    #
    # Instead there is a default limit for pubsub and slave clients, since
    # subscribers and slaves receive data in a push fashion.
    #
    # Both the hard or the soft limit can be disabled by setting them to zero.
    client-output-buffer-limit normal 0 0 0
    client-output-buffer-limit slave 256mb 64mb 60
    client-output-buffer-limit pubsub 32mb 8mb 60
    
    # Redis calls an internal function to perform many background tasks, like
    # closing connections of clients in timeout, purging expired keys that are
    # never requested, and so forth.
    #
    # Not all tasks are performed with the same frequency, but Redis checks for
    # tasks to perform according to the specified "hz" value.
    #
    # By default "hz" is set to 10. Raising the value will use more CPU when
    # Redis is idle, but at the same time will make Redis more responsive when
    # there are many keys expiring at the same time, and timeouts may be
    # handled with more precision.
    #
    # The range is between 1 and 500, however a value over 100 is usually not
    # a good idea. Most users should use the default of 10 and raise this up to
    # 100 only in environments where very low latency is required.
    hz 10
    
    # When a child rewrites the AOF file, if the following option is enabled
    # the file will be fsync-ed every 32 MB of data generated. This is useful
    # in order to commit the file to the disk more incrementally and avoid
    # big latency spikes.
    aof-rewrite-incremental-fsync yes
    templates/redis.conf.j2

    redis从节点安装

    - name: Create redis dir
      file: path="{{ taishi_dir }}/redis" state=directory owner={{ taishi_user }} group={{ taishi_user }} recurse=yes
      tags: redis-slave
    
    - name: Copy redis.tar to {{ groups["redis-slave"][0] }}
      copy: src=../../common/packages/redis/redis-6.2.1.tar.gz dest=/tmp
      tags: redis-slave
    
    - name: Install redis-slave for  {{ groups["redis-slave"][0] }}
      unarchive: src="/tmp/redis-6.2.1.tar.gz" dest="{{ taishi_dir }}/redis"  copy=no mode=0755
      tags: redis-slave
    
    - name: "create redis logs file"
      file: path="{{ taishi_dir }}/logs/redis.log" state=touch owner={{ taishi_user }} group={{ taishi_user }} 
      tags: redis-slave
    
    - name: "Copy the redis.conf"
      template: src=redis.conf.j2  dest="{{ taishi_dir }}/redis/redis-6.2.1/redis.conf" owner={{ taishi_user }} group={{ taishi_user }} mode=0755
      tags: redis-slave
    
    - name: "chown redis dir to {{ taishi_user }}"
      file: path="{{ taishi_dir }}/redis" state=directory owner={{ taishi_user }} group={{ taishi_user }} recurse=yes
      tags: redis-slave
    
    - name: Create redis data dir
      file: path="{{ redis_path_data }}" state=directory owner={{ taishi_user }} group={{ taishi_user }} recurse=yes
      tags: redis-slave
    tasks/main.yml
    # Redis configuration file example.
    #
    # Note that in order to read the configuration file, Redis must be
    # started with the file path as first argument:
    #
    # ./redis-server /path/to/redis.conf
    
    # Note on units: when memory size is needed, it is possible to specify
    # it in the usual form of 1k 5GB 4M and so forth:
    #
    # 1k => 1000 bytes
    # 1kb => 1024 bytes
    # 1m => 1000000 bytes
    # 1mb => 1024*1024 bytes
    # 1g => 1000000000 bytes
    # 1gb => 1024*1024*1024 bytes
    #
    # units are case insensitive so 1GB 1Gb 1gB are all the same.
    
    ################################## INCLUDES ###################################
    
    # Include one or more other config files here.  This is useful if you
    # have a standard template that goes to all Redis servers but also need
    # to customize a few per-server settings.  Include files can include
    # other files, so use this wisely.
    #
    # Note that option "include" won't be rewritten by command "CONFIG REWRITE"
    # from admin or Redis Sentinel. Since Redis always uses the last processed
    # line as value of a configuration directive, you'd better put includes
    # at the beginning of this file to avoid overwriting config change at runtime.
    #
    # If instead you are interested in using includes to override configuration
    # options, it is better to use include as the last line.
    #
    # include /path/to/local.conf
    # include /path/to/other.conf
    
    ################################## MODULES #####################################
    
    # Load modules at startup. If the server is not able to load modules
    # it will abort. It is possible to use multiple loadmodule directives.
    #
    # loadmodule /path/to/my_module.so
    # loadmodule /path/to/other_module.so
    
    ################################## NETWORK #####################################
    
    # By default, if no "bind" configuration directive is specified, Redis listens
    # for connections from all available network interfaces on the host machine.
    # It is possible to listen to just one or multiple selected interfaces using
    # the "bind" configuration directive, followed by one or more IP addresses.
    # Each address can be prefixed by "-", which means that redis will not fail to
    # start if the address is not available. Being not available only refers to
    # addresses that does not correspond to any network interfece. Addresses that
    # are already in use will always fail, and unsupported protocols will always BE
    # silently skipped.
    #
    # Examples:
    #
    # bind 192.168.1.100 10.0.0.1     # listens on two specific IPv4 addresses
    # bind 127.0.0.1 ::1              # listens on loopback IPv4 and IPv6
    # bind * -::*                     # like the default, all available interfaces
    #
    # ~~~ WARNING ~~~ If the computer running Redis is directly exposed to the
    # internet, binding to all the interfaces is dangerous and will expose the
    # instance to everybody on the internet. So by default we uncomment the
    # following bind directive, that will force Redis to listen only on the
    # IPv4 and IPv6 (if available) loopback interface addresses (this means Redis
    # will only be able to accept client connections from the same host that it is
    # running on).
    #
    # IF YOU ARE SURE YOU WANT YOUR INSTANCE TO LISTEN TO ALL THE INTERFACES
    # JUST COMMENT OUT THE FOLLOWING LINE.
    # ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
    #bind 127.0.0.1 -::1
    
    # Protected mode is a layer of security protection, in order to avoid that
    # Redis instances left open on the internet are accessed and exploited.
    #
    # When protected mode is on and if:
    #
    # 1) The server is not binding explicitly to a set of addresses using the
    #    "bind" directive.
    # 2) No password is configured.
    #
    # The server only accepts connections from clients connecting from the
    # IPv4 and IPv6 loopback addresses 127.0.0.1 and ::1, and from Unix domain
    # sockets.
    #
    # By default protected mode is enabled. You should disable it only if
    # you are sure you want clients from other hosts to connect to Redis
    # even if no authentication is configured, nor a specific set of interfaces
    # are explicitly listed using the "bind" directive.
    protected-mode no
    
    # Accept connections on the specified port, default is 6379 (IANA #815344).
    # If port 0 is specified Redis will not listen on a TCP socket.
    port 6379
    
    # TCP listen() backlog.
    #
    # In high requests-per-second environments you need a high backlog in order
    # to avoid slow clients connection issues. Note that the Linux kernel
    # will silently truncate it to the value of /proc/sys/net/core/somaxconn so
    # make sure to raise both the value of somaxconn and tcp_max_syn_backlog
    # in order to get the desired effect.
    tcp-backlog 511
    
    # Unix socket.
    #
    # Specify the path for the Unix socket that will be used to listen for
    # incoming connections. There is no default, so Redis will not listen
    # on a unix socket when not specified.
    #
    # unixsocket /run/redis.sock
    # unixsocketperm 700
    
    # Close the connection after a client is idle for N seconds (0 to disable)
    timeout 0
    
    # TCP keepalive.
    #
    # If non-zero, use SO_KEEPALIVE to send TCP ACKs to clients in absence
    # of communication. This is useful for two reasons:
    #
    # 1) Detect dead peers.
    # 2) Force network equipment in the middle to consider the connection to be
    #    alive.
    #
    # On Linux, the specified value (in seconds) is the period used to send ACKs.
    # Note that to close the connection the double of the time is needed.
    # On other kernels the period depends on the kernel configuration.
    #
    # A reasonable value for this option is 300 seconds, which is the new
    # Redis default starting with Redis 3.2.1.
    tcp-keepalive 300
    
    ################################# TLS/SSL #####################################
    
    # By default, TLS/SSL is disabled. To enable it, the "tls-port" configuration
    # directive can be used to define TLS-listening ports. To enable TLS on the
    # default port, use:
    #
    # port 0
    # tls-port 6379
    
    # Configure a X.509 certificate and private key to use for authenticating the
    # server to connected clients, masters or cluster peers.  These files should be
    # PEM formatted.
    #
    # tls-cert-file redis.crt
    # tls-key-file redis.key
    
    # Normally Redis uses the same certificate for both server functions (accepting
    # connections) and client functions (replicating from a master, establishing
    # cluster bus connections, etc.).
    #
    # Sometimes certificates are issued with attributes that designate them as
    # client-only or server-only certificates. In that case it may be desired to use
    # different certificates for incoming (server) and outgoing (client)
    # connections. To do that, use the following directives:
    #
    # tls-client-cert-file client.crt
    # tls-client-key-file client.key
    
    # Configure a DH parameters file to enable Diffie-Hellman (DH) key exchange:
    #
    # tls-dh-params-file redis.dh
    
    # Configure a CA certificate(s) bundle or directory to authenticate TLS/SSL
    # clients and peers.  Redis requires an explicit configuration of at least one
    # of these, and will not implicitly use the system wide configuration.
    #
    # tls-ca-cert-file ca.crt
    # tls-ca-cert-dir /etc/ssl/certs
    
    # By default, clients (including replica servers) on a TLS port are required
    # to authenticate using valid client side certificates.
    #
    # If "no" is specified, client certificates are not required and not accepted.
    # If "optional" is specified, client certificates are accepted and must be
    # valid if provided, but are not required.
    #
    # tls-auth-clients no
    # tls-auth-clients optional
    
    # By default, a Redis replica does not attempt to establish a TLS connection
    # with its master.
    #
    # Use the following directive to enable TLS on replication links.
    #
    # tls-replication yes
    
    # By default, the Redis Cluster bus uses a plain TCP connection. To enable
    # TLS for the bus protocol, use the following directive:
    #
    # tls-cluster yes
    
    # By default, only TLSv1.2 and TLSv1.3 are enabled and it is highly recommended
    # that older formally deprecated versions are kept disabled to reduce the attack surface.
    # You can explicitly specify TLS versions to support.
    # Allowed values are case insensitive and include "TLSv1", "TLSv1.1", "TLSv1.2",
    # "TLSv1.3" (OpenSSL >= 1.1.1) or any combination.
    # To enable only TLSv1.2 and TLSv1.3, use:
    #
    # tls-protocols "TLSv1.2 TLSv1.3"
    
    # Configure allowed ciphers.  See the ciphers(1ssl) manpage for more information
    # about the syntax of this string.
    #
    # Note: this configuration applies only to <= TLSv1.2.
    #
    # tls-ciphers DEFAULT:!MEDIUM
    
    # Configure allowed TLSv1.3 ciphersuites.  See the ciphers(1ssl) manpage for more
    # information about the syntax of this string, and specifically for TLSv1.3
    # ciphersuites.
    #
    # tls-ciphersuites TLS_CHACHA20_POLY1305_SHA256
    
    # When choosing a cipher, use the server's preference instead of the client
    # preference. By default, the server follows the client's preference.
    #
    # tls-prefer-server-ciphers yes
    
    # By default, TLS session caching is enabled to allow faster and less expensive
    # reconnections by clients that support it. Use the following directive to disable
    # caching.
    #
    # tls-session-caching no
    
    # Change the default number of TLS sessions cached. A zero value sets the cache
    # to unlimited size. The default size is 20480.
    #
    # tls-session-cache-size 5000
    
    # Change the default timeout of cached TLS sessions. The default timeout is 300
    # seconds.
    #
    # tls-session-cache-timeout 60
    
    ################################# GENERAL #####################################
    
    # By default Redis does not run as a daemon. Use 'yes' if you need it.
    # Note that Redis will write a pid file in /var/run/redis.pid when daemonized.
    # When Redis is supervised by upstart or systemd, this parameter has no impact.
    daemonize yes
    
    # If you run Redis from upstart or systemd, Redis can interact with your
    # supervision tree. Options:
    #   supervised no      - no supervision interaction
    #   supervised upstart - signal upstart by putting Redis into SIGSTOP mode
    #                        requires "expect stop" in your upstart job config
    #   supervised systemd - signal systemd by writing READY=1 to $NOTIFY_SOCKET
    #                        on startup, and updating Redis status on a regular
    #                        basis.
    #   supervised auto    - detect upstart or systemd method based on
    #                        UPSTART_JOB or NOTIFY_SOCKET environment variables
    # Note: these supervision methods only signal "process is ready."
    #       They do not enable continuous pings back to your supervisor.
    #
    # The default is "no". To run under upstart/systemd, you can simply uncomment
    # the line below:
    #
    # supervised auto
    
    # If a pid file is specified, Redis writes it where specified at startup
    # and removes it at exit.
    #
    # When the server runs non daemonized, no pid file is created if none is
    # specified in the configuration. When the server is daemonized, the pid file
    # is used even if not specified, defaulting to "/var/run/redis.pid".
    #
    # Creating a pid file is best effort: if Redis is not able to create it
    # nothing bad happens, the server will start and run normally.
    #
    # Note that on modern Linux systems "/run/redis.pid" is more conforming
    # and should be used instead.
    pidfile "/var/run/redis_6379.pid"
    
    # Specify the server verbosity level.
    # This can be one of:
    # debug (a lot of information, useful for development/testing)
    # verbose (many rarely useful info, but not a mess like the debug level)
    # notice (moderately verbose, what you want in production probably)
    # warning (only very important / critical messages are logged)
    loglevel notice
    
    # Specify the log file name. Also the empty string can be used to force
    # Redis to log on the standard output. Note that if you use standard
    # output for logging but daemonize, logs will be sent to /dev/null
    logfile "{{ taishi_dir }}/logs/redis.log"
    
    # To enable logging to the system logger, just set 'syslog-enabled' to yes,
    # and optionally update the other syslog parameters to suit your needs.
    # syslog-enabled no
    
    # Specify the syslog identity.
    # syslog-ident redis
    
    # Specify the syslog facility. Must be USER or between LOCAL0-LOCAL7.
    # syslog-facility local0
    
    # To disable the built in crash log, which will possibly produce cleaner core
    # dumps when they are needed, uncomment the following:
    #
    # crash-log-enabled no
    
    # To disable the fast memory check that's run as part of the crash log, which
    # will possibly let redis terminate sooner, uncomment the following:
    #
    # crash-memcheck-enabled no
    
    # Set the number of databases. The default database is DB 0, you can select
    # a different one on a per-connection basis using SELECT <dbid> where
    # dbid is a number between 0 and 'databases'-1
    databases 16
    
    # By default Redis shows an ASCII art logo only when started to log to the
    # standard output and if the standard output is a TTY and syslog logging is
    # disabled. Basically this means that normally a logo is displayed only in
    # interactive sessions.
    #
    # However it is possible to force the pre-4.0 behavior and always show a
    # ASCII art logo in startup logs by setting the following option to yes.
    always-show-logo no
    
    # By default, Redis modifies the process title (as seen in 'top' and 'ps') to
    # provide some runtime information. It is possible to disable this and leave
    # the process name as executed by setting the following to no.
    set-proc-title yes
    
    # When changing the process title, Redis uses the following template to construct
    # the modified title.
    #
    # Template variables are specified in curly brackets. The following variables are
    # supported:
    #
    # {title}           Name of process as executed if parent, or type of child process.
    # {listen-addr}     Bind address or '*' followed by TCP or TLS port listening on, or
    #                   Unix socket if only that's available.
    # {server-mode}     Special mode, i.e. "[sentinel]" or "[cluster]".
    # {port}            TCP port listening on, or 0.
    # {tls-port}        TLS port listening on, or 0.
    # {unixsocket}      Unix domain socket listening on, or "".
    # {config-file}     Name of configuration file used.
    #
    proc-title-template "{title} {listen-addr} {server-mode}"
    
    ################################ SNAPSHOTTING  ################################
    
    # Save the DB to disk.
    #
    # save <seconds> <changes>
    #
    # Redis will save the DB if both the given number of seconds and the given
    # number of write operations against the DB occurred.
    #
    # Snapshotting can be completely disabled with a single empty string argument
    # as in following example:
    #
    # save ""
    #
    # Unless specified otherwise, by default Redis will save the DB:
    #   * After 3600 seconds (an hour) if at least 1 key changed
    #   * After 300 seconds (5 minutes) if at least 100 keys changed
    #   * After 60 seconds if at least 10000 keys changed
    #
    # You can set these explicitly by uncommenting the three following lines.
    #
    # save 3600 1
    # save 300 100
    # save 60 10000
    
    # By default Redis will stop accepting writes if RDB snapshots are enabled
    # (at least one save point) and the latest background save failed.
    # This will make the user aware (in a hard way) that data is not persisting
    # on disk properly, otherwise chances are that no one will notice and some
    # disaster will happen.
    #
    # If the background saving process will start working again Redis will
    # automatically allow writes again.
    #
    # However if you have setup your proper monitoring of the Redis server
    # and persistence, you may want to disable this feature so that Redis will
    # continue to work as usual even if there are problems with disk,
    # permissions, and so forth.
    stop-writes-on-bgsave-error yes
    
    # Compress string objects using LZF when dump .rdb databases?
    # By default compression is enabled as it's almost always a win.
    # If you want to save some CPU in the saving child set it to 'no' but
    # the dataset will likely be bigger if you have compressible values or keys.
    rdbcompression yes
    
    # Since version 5 of RDB a CRC64 checksum is placed at the end of the file.
    # This makes the format more resistant to corruption but there is a performance
    # hit to pay (around 10%) when saving and loading RDB files, so you can disable it
    # for maximum performances.
    #
    # RDB files created with checksum disabled have a checksum of zero that will
    # tell the loading code to skip the check.
    rdbchecksum yes
    
    # Enables or disables full sanitation checks for ziplist and listpack etc when
    # loading an RDB or RESTORE payload. This reduces the chances of a assertion or
    # crash later on while processing commands.
    # Options:
    #   no         - Never perform full sanitation
    #   yes        - Always perform full sanitation
    #   clients    - Perform full sanitation only for user connections.
    #                Excludes: RDB files, RESTORE commands received from the master
    #                connection, and client connections which have the
    #                skip-sanitize-payload ACL flag.
    # The default should be 'clients' but since it currently affects cluster
    # resharding via MIGRATE, it is temporarily set to 'no' by default.
    #
    # sanitize-dump-payload no
    
    # The filename where to dump the DB
    dbfilename "dump.rdb"
    
    # Remove RDB files used by replication in instances without persistence
    # enabled. By default this option is disabled, however there are environments
    # where for regulations or other security concerns, RDB files persisted on
    # disk by masters in order to feed replicas, or stored on disk by replicas
    # in order to load them for the initial synchronization, should be deleted
    # ASAP. Note that this option ONLY WORKS in instances that have both AOF
    # and RDB persistence disabled, otherwise is completely ignored.
    #
    # An alternative (and sometimes better) way to obtain the same effect is
    # to use diskless replication on both master and replicas instances. However
    # in the case of replicas, diskless is not always an option.
    rdb-del-sync-files no
    
    # The working directory.
    #
    # The DB will be written inside this directory, with the filename specified
    # above using the 'dbfilename' configuration directive.
    #
    # The Append Only File will also be created inside this directory.
    #
    # Note that you must specify a directory here, not a file name.
    dir "{{ redis_path_data }}"
    
    ################################# REPLICATION #################################
    
    # Master-Replica replication. Use replicaof to make a Redis instance a copy of
    # another Redis server. A few things to understand ASAP about Redis replication.
    #
    #   +------------------+      +---------------+
    #   |      Master      | ---> |    Replica    |
    #   | (receive writes) |      |  (exact copy) |
    #   +------------------+      +---------------+
    #
    # 1) Redis replication is asynchronous, but you can configure a master to
    #    stop accepting writes if it appears to be not connected with at least
    #    a given number of replicas.
    # 2) Redis replicas are able to perform a partial resynchronization with the
    #    master if the replication link is lost for a relatively small amount of
    #    time. You may want to configure the replication backlog size (see the next
    #    sections of this file) with a sensible value depending on your needs.
    # 3) Replication is automatic and does not need user intervention. After a
    #    network partition replicas automatically try to reconnect to masters
    #    and resynchronize with them.
    #
    #replicaof 192.168.30.76 6379
    # If the master is password protected (using the "requirepass" configuration
    # directive below) it is possible to tell the replica to authenticate before
    # starting the replication synchronization process, otherwise the master will
    # refuse the replica request.
    #
    masterauth "{{ redis_password }}"
    #
    # However this is not enough if you are using Redis ACLs (for Redis version
    # 6 or greater), and the default user is not capable of running the PSYNC
    # command and/or other commands needed for replication. In this case it's
    # better to configure a special user to use with replication, and specify the
    # masteruser configuration as such:
    #
    # masteruser <username>
    #
    # When masteruser is specified, the replica will authenticate against its
    # master using the new AUTH form: AUTH <username> <password>.
    
    # When a replica loses its connection with the master, or when the replication
    # is still in progress, the replica can act in two different ways:
    #
    # 1) if replica-serve-stale-data is set to 'yes' (the default) the replica will
    #    still reply to client requests, possibly with out of date data, or the
    #    data set may just be empty if this is the first synchronization.
    #
    # 2) If replica-serve-stale-data is set to 'no' the replica will reply with
    #    an error "SYNC with master in progress" to all commands except:
    #    INFO, REPLICAOF, AUTH, PING, SHUTDOWN, REPLCONF, ROLE, CONFIG, SUBSCRIBE,
    #    UNSUBSCRIBE, PSUBSCRIBE, PUNSUBSCRIBE, PUBLISH, PUBSUB, COMMAND, POST,
    #    HOST and LATENCY.
    #
    replica-serve-stale-data yes
    
    # You can configure a replica instance to accept writes or not. Writing against
    # a replica instance may be useful to store some ephemeral data (because data
    # written on a replica will be easily deleted after resync with the master) but
    # may also cause problems if clients are writing to it because of a
    # misconfiguration.
    #
    # Since Redis 2.6 by default replicas are read-only.
    #
    # Note: read only replicas are not designed to be exposed to untrusted clients
    # on the internet. It's just a protection layer against misuse of the instance.
    # Still a read only replica exports by default all the administrative commands
    # such as CONFIG, DEBUG, and so forth. To a limited extent you can improve
    # security of read only replicas using 'rename-command' to shadow all the
    # administrative / dangerous commands.
    replica-read-only yes
    
    # Replication SYNC strategy: disk or socket.
    #
    # New replicas and reconnecting replicas that are not able to continue the
    # replication process just receiving differences, need to do what is called a
    # "full synchronization". An RDB file is transmitted from the master to the
    # replicas.
    #
    # The transmission can happen in two different ways:
    #
    # 1) Disk-backed: The Redis master creates a new process that writes the RDB
    #                 file on disk. Later the file is transferred by the parent
    #                 process to the replicas incrementally.
    # 2) Diskless: The Redis master creates a new process that directly writes the
    #              RDB file to replica sockets, without touching the disk at all.
    #
    # With disk-backed replication, while the RDB file is generated, more replicas
    # can be queued and served with the RDB file as soon as the current child
    # producing the RDB file finishes its work. With diskless replication instead
    # once the transfer starts, new replicas arriving will be queued and a new
    # transfer will start when the current one terminates.
    #
    # When diskless replication is used, the master waits a configurable amount of
    # time (in seconds) before starting the transfer in the hope that multiple
    # replicas will arrive and the transfer can be parallelized.
    #
    # With slow disks and fast (large bandwidth) networks, diskless replication
    # works better.
    repl-diskless-sync no
    
    # When diskless replication is enabled, it is possible to configure the delay
    # the server waits in order to spawn the child that transfers the RDB via socket
    # to the replicas.
    #
    # This is important since once the transfer starts, it is not possible to serve
    # new replicas arriving, that will be queued for the next RDB transfer, so the
    # server waits a delay in order to let more replicas arrive.
    #
    # The delay is specified in seconds, and by default is 5 seconds. To disable
    # it entirely just set it to 0 seconds and the transfer will start ASAP.
    repl-diskless-sync-delay 5
    
    # -----------------------------------------------------------------------------
    # WARNING: RDB diskless load is experimental. Since in this setup the replica
    # does not immediately store an RDB on disk, it may cause data loss during
    # failovers. RDB diskless load + Redis modules not handling I/O reads may also
    # cause Redis to abort in case of I/O errors during the initial synchronization
    # stage with the master. Use only if you know what you are doing.
    # -----------------------------------------------------------------------------
    #
    # Replica can load the RDB it reads from the replication link directly from the
    # socket, or store the RDB to a file and read that file after it was completely
    # received from the master.
    #
    # In many cases the disk is slower than the network, and storing and loading
    # the RDB file may increase replication time (and even increase the master's
    # Copy on Write memory and salve buffers).
    # However, parsing the RDB file directly from the socket may mean that we have
    # to flush the contents of the current database before the full rdb was
    # received. For this reason we have the following options:
    #
    # "disabled"    - Don't use diskless load (store the rdb file to the disk first)
    # "on-empty-db" - Use diskless load only when it is completely safe.
    # "swapdb"      - Keep a copy of the current db contents in RAM while parsing
    #                 the data directly from the socket. note that this requires
    #                 sufficient memory, if you don't have it, you risk an OOM kill.
    repl-diskless-load disabled
    
    # Replicas send PINGs to server in a predefined interval. It's possible to
    # change this interval with the repl_ping_replica_period option. The default
    # value is 10 seconds.
    #
    # repl-ping-replica-period 10
    
    # The following option sets the replication timeout for:
    #
    # 1) Bulk transfer I/O during SYNC, from the point of view of replica.
    # 2) Master timeout from the point of view of replicas (data, pings).
    # 3) Replica timeout from the point of view of masters (REPLCONF ACK pings).
    #
    # It is important to make sure that this value is greater than the value
    # specified for repl-ping-replica-period otherwise a timeout will be detected
    # every time there is low traffic between the master and the replica. The default
    # value is 60 seconds.
    #
    # repl-timeout 60
    
    # Disable TCP_NODELAY on the replica socket after SYNC?
    #
    # If you select "yes" Redis will use a smaller number of TCP packets and
    # less bandwidth to send data to replicas. But this can add a delay for
    # the data to appear on the replica side, up to 40 milliseconds with
    # Linux kernels using a default configuration.
    #
    # If you select "no" the delay for data to appear on the replica side will
    # be reduced but more bandwidth will be used for replication.
    #
    # By default we optimize for low latency, but in very high traffic conditions
    # or when the master and replicas are many hops away, turning this to "yes" may
    # be a good idea.
    repl-disable-tcp-nodelay no
    
    # Set the replication backlog size. The backlog is a buffer that accumulates
    # replica data when replicas are disconnected for some time, so that when a
    # replica wants to reconnect again, often a full resync is not needed, but a
    # partial resync is enough, just passing the portion of data the replica
    # missed while disconnected.
    #
    # The bigger the replication backlog, the longer the replica can endure the
    # disconnect and later be able to perform a partial resynchronization.
    #
    # The backlog is only allocated if there is at least one replica connected.
    #
    # repl-backlog-size 1mb
    
    # After a master has no connected replicas for some time, the backlog will be
    # freed. The following option configures the amount of seconds that need to
    # elapse, starting from the time the last replica disconnected, for the backlog
    # buffer to be freed.
    #
    # Note that replicas never free the backlog for timeout, since they may be
    # promoted to masters later, and should be able to correctly "partially
    # resynchronize" with other replicas: hence they should always accumulate backlog.
    #
    # A value of 0 means to never release the backlog.
    #
    # repl-backlog-ttl 3600
    
    # The replica priority is an integer number published by Redis in the INFO
    # output. It is used by Redis Sentinel in order to select a replica to promote
    # into a master if the master is no longer working correctly.
    #
    # A replica with a low priority number is considered better for promotion, so
    # for instance if there are three replicas with priority 10, 100, 25 Sentinel
    # will pick the one with priority 10, that is the lowest.
    #
    # However a special priority of 0 marks the replica as not able to perform the
    # role of master, so a replica with priority of 0 will never be selected by
    # Redis Sentinel for promotion.
    #
    # By default the priority is 100.
    replica-priority 100
    
    # It is possible for a master to stop accepting writes if there are less than
    # N replicas connected, having a lag less or equal than M seconds.
    #
    # The N replicas need to be in "online" state.
    #
    # The lag in seconds, that must be <= the specified value, is calculated from
    # the last ping received from the replica, that is usually sent every second.
    #
    # This option does not GUARANTEE that N replicas will accept the write, but
    # will limit the window of exposure for lost writes in case not enough replicas
    # are available, to the specified number of seconds.
    #
    # For example to require at least 3 replicas with a lag <= 10 seconds use:
    #
    # min-replicas-to-write 3
    # min-replicas-max-lag 10
    #
    # Setting one or the other to 0 disables the feature.
    #
    # By default min-replicas-to-write is set to 0 (feature disabled) and
    # min-replicas-max-lag is set to 10.
    
    # A Redis master is able to list the address and port of the attached
    # replicas in different ways. For example the "INFO replication" section
    # offers this information, which is used, among other tools, by
    # Redis Sentinel in order to discover replica instances.
    # Another place where this info is available is in the output of the
    # "ROLE" command of a master.
    #
    # The listed IP address and port normally reported by a replica is
    # obtained in the following way:
    #
    #   IP: The address is auto detected by checking the peer address
    #   of the socket used by the replica to connect with the master.
    #
    #   Port: The port is communicated by the replica during the replication
    #   handshake, and is normally the port that the replica is using to
    #   listen for connections.
    #
    # However when port forwarding or Network Address Translation (NAT) is
    # used, the replica may actually be reachable via different IP and port
    # pairs. The following two options can be used by a replica in order to
    # report to its master a specific set of IP and port, so that both INFO
    # and ROLE will report those values.
    #
    # There is no need to use both the options if you need to override just
    # the port or the IP address.
    #
    # replica-announce-ip 5.5.5.5
    # replica-announce-port 1234
    
    ############################### KEYS TRACKING #################################
    
    # Redis implements server assisted support for client side caching of values.
    # This is implemented using an invalidation table that remembers, using
    # a radix key indexed by key name, what clients have which keys. In turn
    # this is used in order to send invalidation messages to clients. Please
    # check this page to understand more about the feature:
    #
    #   https://redis.io/topics/client-side-caching
    #
    # When tracking is enabled for a client, all the read only queries are assumed
    # to be cached: this will force Redis to store information in the invalidation
    # table. When keys are modified, such information is flushed away, and
    # invalidation messages are sent to the clients. However if the workload is
    # heavily dominated by reads, Redis could use more and more memory in order
    # to track the keys fetched by many clients.
    #
    # For this reason it is possible to configure a maximum fill value for the
    # invalidation table. By default it is set to 1M of keys, and once this limit
    # is reached, Redis will start to evict keys in the invalidation table
    # even if they were not modified, just to reclaim memory: this will in turn
    # force the clients to invalidate the cached values. Basically the table
    # maximum size is a trade off between the memory you want to spend server
    # side to track information about who cached what, and the ability of clients
    # to retain cached objects in memory.
    #
    # If you set the value to 0, it means there are no limits, and Redis will
    # retain as many keys as needed in the invalidation table.
    # In the "stats" INFO section, you can find information about the number of
    # keys in the invalidation table at every given moment.
    #
    # Note: when key tracking is used in broadcasting mode, no memory is used
    # in the server side so this setting is useless.
    #
    # tracking-table-max-keys 1000000
    
    ################################## SECURITY ###################################
    
    # Warning: since Redis is pretty fast, an outside user can try up to
    # 1 million passwords per second against a modern box. This means that you
    # should use very strong passwords, otherwise they will be very easy to break.
    # Note that because the password is really a shared secret between the client
    # and the server, and should not be memorized by any human, the password
    # can be easily a long string from /dev/urandom or whatever, so by using a
    # long and unguessable password no brute force attack will be possible.
    
    # Redis ACL users are defined in the following format:
    #
    #   user <username> ... acl rules ...
    #
    # For example:
    #
    #   user worker +@list +@connection ~jobs:* on >ffa9203c493aa99
    #
    # The special username "default" is used for new connections. If this user
    # has the "nopass" rule, then new connections will be immediately authenticated
    # as the "default" user without the need of any password provided via the
    # AUTH command. Otherwise if the "default" user is not flagged with "nopass"
    # the connections will start in not authenticated state, and will require
    # AUTH (or the HELLO command AUTH option) in order to be authenticated and
    # start to work.
    #
    # The ACL rules that describe what a user can do are the following:
    #
    #  on           Enable the user: it is possible to authenticate as this user.
    #  off          Disable the user: it's no longer possible to authenticate
    #               with this user, however the already authenticated connections
    #               will still work.
    #  skip-sanitize-payload    RESTORE dump-payload sanitation is skipped.
    #  sanitize-payload         RESTORE dump-payload is sanitized (default).
    #  +<command>   Allow the execution of that command
    #  -<command>   Disallow the execution of that command
    #  +@<category> Allow the execution of all the commands in such category
    #               with valid categories are like @admin, @set, @sortedset, ...
    #               and so forth, see the full list in the server.c file where
    #               the Redis command table is described and defined.
    #               The special category @all means all the commands, but currently
    #               present in the server, and that will be loaded in the future
    #               via modules.
    #  +<command>|subcommand    Allow a specific subcommand of an otherwise
    #                           disabled command. Note that this form is not
    #                           allowed as negative like -DEBUG|SEGFAULT, but
    #                           only additive starting with "+".
    #  allcommands  Alias for +@all. Note that it implies the ability to execute
    #               all the future commands loaded via the modules system.
    #  nocommands   Alias for -@all.
    #  ~<pattern>   Add a pattern of keys that can be mentioned as part of
    #               commands. For instance ~* allows all the keys. The pattern
    #               is a glob-style pattern like the one of KEYS.
    #               It is possible to specify multiple patterns.
    #  allkeys      Alias for ~*
    #  resetkeys    Flush the list of allowed keys patterns.
    #  &<pattern>   Add a glob-style pattern of Pub/Sub channels that can be
    #               accessed by the user. It is possible to specify multiple channel
    #               patterns.
    #  allchannels  Alias for &*
    #  resetchannels            Flush the list of allowed channel patterns.
    #  ><password>  Add this password to the list of valid password for the user.
    #               For example >mypass will add "mypass" to the list.
    #               This directive clears the "nopass" flag (see later).
    #  <<password>  Remove this password from the list of valid passwords.
    #  nopass       All the set passwords of the user are removed, and the user
    #               is flagged as requiring no password: it means that every
    #               password will work against this user. If this directive is
    #               used for the default user, every new connection will be
    #               immediately authenticated with the default user without
    #               any explicit AUTH command required. Note that the "resetpass"
    #               directive will clear this condition.
    #  resetpass    Flush the list of allowed passwords. Moreover removes the
    #               "nopass" status. After "resetpass" the user has no associated
    #               passwords and there is no way to authenticate without adding
    #               some password (or setting it as "nopass" later).
    #  reset        Performs the following actions: resetpass, resetkeys, off,
    #               -@all. The user returns to the same state it has immediately
    #               after its creation.
    #
    # ACL rules can be specified in any order: for instance you can start with
    # passwords, then flags, or key patterns. However note that the additive
    # and subtractive rules will CHANGE MEANING depending on the ordering.
    # For instance see the following example:
    #
    #   user alice on +@all -DEBUG ~* >somepassword
    #
    # This will allow "alice" to use all the commands with the exception of the
    # DEBUG command, since +@all added all the commands to the set of the commands
    # alice can use, and later DEBUG was removed. However if we invert the order
    # of two ACL rules the result will be different:
    #
    #   user alice on -DEBUG +@all ~* >somepassword
    #
    # Now DEBUG was removed when alice had yet no commands in the set of allowed
    # commands, later all the commands are added, so the user will be able to
    # execute everything.
    #
    # Basically ACL rules are processed left-to-right.
    #
    # For more information about ACL configuration please refer to
    # the Redis web site at https://redis.io/topics/acl
    
    # ACL LOG
    #
    # The ACL Log tracks failed commands and authentication events associated
    # with ACLs. The ACL Log is useful to troubleshoot failed commands blocked
    # by ACLs. The ACL Log is stored in memory. You can reclaim memory with
    # ACL LOG RESET. Define the maximum entry length of the ACL Log below.
    acllog-max-len 128
    
    # Using an external ACL file
    #
    # Instead of configuring users here in this file, it is possible to use
    # a stand-alone file just listing users. The two methods cannot be mixed:
    # if you configure users here and at the same time you activate the external
    # ACL file, the server will refuse to start.
    #
    # The format of the external ACL user file is exactly the same as the
    # format that is used inside redis.conf to describe users.
    #
    # aclfile /etc/redis/users.acl
    
    # IMPORTANT NOTE: starting with Redis 6 "requirepass" is just a compatibility
    # layer on top of the new ACL system. The option effect will be just setting
    # the password for the default user. Clients will still authenticate using
    # AUTH <password> as usually, or more explicitly with AUTH default <password>
    # if they follow the new protocol: both will work.
    #
    # The requirepass is not compatable with aclfile option and the ACL LOAD
    # command, these will cause requirepass to be ignored.
    #
    requirepass "{{ redis_password }}"
    
    # New users are initialized with restrictive permissions by default, via the
    # equivalent of this ACL rule 'off resetkeys -@all'. Starting with Redis 6.2, it
    # is possible to manage access to Pub/Sub channels with ACL rules as well. The
    # default Pub/Sub channels permission if new users is controlled by the
    # acl-pubsub-default configuration directive, which accepts one of these values:
    #
    # allchannels: grants access to all Pub/Sub channels
    # resetchannels: revokes access to all Pub/Sub channels
    #
    # To ensure backward compatibility while upgrading Redis 6.0, acl-pubsub-default
    # defaults to the 'allchannels' permission.
    #
    # Future compatibility note: it is very likely that in a future version of Redis
    # the directive's default of 'allchannels' will be changed to 'resetchannels' in
    # order to provide better out-of-the-box Pub/Sub security. Therefore, it is
    # recommended that you explicitly define Pub/Sub permissions for all users
    # rather then rely on implicit default values. Once you've set explicit
    # Pub/Sub for all exisitn users, you should uncomment the following line.
    #
    # acl-pubsub-default resetchannels
    
    # Command renaming (DEPRECATED).
    #
    # ------------------------------------------------------------------------
    # WARNING: avoid using this option if possible. Instead use ACLs to remove
    # commands from the default user, and put them only in some admin user you
    # create for administrative purposes.
    # ------------------------------------------------------------------------
    #
    # It is possible to change the name of dangerous commands in a shared
    # environment. For instance the CONFIG command may be renamed into something
    # hard to guess so that it will still be available for internal-use tools
    # but not available for general clients.
    #
    # Example:
    #
    # rename-command CONFIG b840fc02d524045429941cc15f59e41cb7be6c52
    #
    # It is also possible to completely kill a command by renaming it into
    # an empty string:
    #
    # rename-command CONFIG ""
    #
    # Please note that changing the name of commands that are logged into the
    # AOF file or transmitted to replicas may cause problems.
    
    ################################### CLIENTS ####################################
    
    # Set the max number of connected clients at the same time. By default
    # this limit is set to 10000 clients, however if the Redis server is not
    # able to configure the process file limit to allow for the specified limit
    # the max number of allowed clients is set to the current file limit
    # minus 32 (as Redis reserves a few file descriptors for internal uses).
    #
    # Once the limit is reached Redis will close all the new connections sending
    # an error 'max number of clients reached'.
    #
    # IMPORTANT: When Redis Cluster is used, the max number of connections is also
    # shared with the cluster bus: every node in the cluster will use two
    # connections, one incoming and another outgoing. It is important to size the
    # limit accordingly in case of very large clusters.
    #
    # maxclients 10000
    
    ############################## MEMORY MANAGEMENT ################################
    
    # Set a memory usage limit to the specified amount of bytes.
    # When the memory limit is reached Redis will try to remove keys
    # according to the eviction policy selected (see maxmemory-policy).
    #
    # If Redis can't remove keys according to the policy, or if the policy is
    # set to 'noeviction', Redis will start to reply with errors to commands
    # that would use more memory, like SET, LPUSH, and so on, and will continue
    # to reply to read-only commands like GET.
    #
    # This option is usually useful when using Redis as an LRU or LFU cache, or to
    # set a hard memory limit for an instance (using the 'noeviction' policy).
    #
    # WARNING: If you have replicas attached to an instance with maxmemory on,
    # the size of the output buffers needed to feed the replicas are subtracted
    # from the used memory count, so that network problems / resyncs will
    # not trigger a loop where keys are evicted, and in turn the output
    # buffer of replicas is full with DELs of keys evicted triggering the deletion
    # of more keys, and so forth until the database is completely emptied.
    #
    # In short... if you have replicas attached it is suggested that you set a lower
    # limit for maxmemory so that there is some free RAM on the system for replica
    # output buffers (but this is not needed if the policy is 'noeviction').
    #
    maxmemory 4gb
    
    # MAXMEMORY POLICY: how Redis will select what to remove when maxmemory
    # is reached. You can select one from the following behaviors:
    #
    # volatile-lru -> Evict using approximated LRU, only keys with an expire set.
    # allkeys-lru -> Evict any key using approximated LRU.
    # volatile-lfu -> Evict using approximated LFU, only keys with an expire set.
    # allkeys-lfu -> Evict any key using approximated LFU.
    # volatile-random -> Remove a random key having an expire set.
    # allkeys-random -> Remove a random key, any key.
    # volatile-ttl -> Remove the key with the nearest expire time (minor TTL)
    # noeviction -> Don't evict anything, just return an error on write operations.
    #
    # LRU means Least Recently Used
    # LFU means Least Frequently Used
    #
    # Both LRU, LFU and volatile-ttl are implemented using approximated
    # randomized algorithms.
    #
    # Note: with any of the above policies, when there are no suitable keys for
    # eviction, Redis will return an error on write operations that require
    # more memory. These are usually commands that create new keys, add data or
    # modify existing keys. A few examples are: SET, INCR, HSET, LPUSH, SUNIONSTORE,
    # SORT (due to the STORE argument), and EXEC (if the transaction includes any
    # command that requires memory).
    #
    # The default is:
    #
    # maxmemory-policy noeviction
    
    # LRU, LFU and minimal TTL algorithms are not precise algorithms but approximated
    # algorithms (in order to save memory), so you can tune it for speed or
    # accuracy. By default Redis will check five keys and pick the one that was
    # used least recently, you can change the sample size using the following
    # configuration directive.
    #
    # The default of 5 produces good enough results. 10 Approximates very closely
    # true LRU but costs more CPU. 3 is faster but not very accurate.
    #
    # maxmemory-samples 5
    
    # Eviction processing is designed to function well with the default setting.
    # If there is an unusually large amount of write traffic, this value may need to
    # be increased.  Decreasing this value may reduce latency at the risk of
    # eviction processing effectiveness
    #   0 = minimum latency, 10 = default, 100 = process without regard to latency
    #
    # maxmemory-eviction-tenacity 10
    
    # Starting from Redis 5, by default a replica will ignore its maxmemory setting
    # (unless it is promoted to master after a failover or manually). It means
    # that the eviction of keys will be just handled by the master, sending the
    # DEL commands to the replica as keys evict in the master side.
    #
    # This behavior ensures that masters and replicas stay consistent, and is usually
    # what you want, however if your replica is writable, or you want the replica
    # to have a different memory setting, and you are sure all the writes performed
    # to the replica are idempotent, then you may change this default (but be sure
    # to understand what you are doing).
    #
    # Note that since the replica by default does not evict, it may end using more
    # memory than the one set via maxmemory (there are certain buffers that may
    # be larger on the replica, or data structures may sometimes take more memory
    # and so forth). So make sure you monitor your replicas and make sure they
    # have enough memory to never hit a real out-of-memory condition before the
    # master hits the configured maxmemory setting.
    #
    # replica-ignore-maxmemory yes
    
    # Redis reclaims expired keys in two ways: upon access when those keys are
    # found to be expired, and also in background, in what is called the
    # "active expire key". The key space is slowly and interactively scanned
    # looking for expired keys to reclaim, so that it is possible to free memory
    # of keys that are expired and will never be accessed again in a short time.
    #
    # The default effort of the expire cycle will try to avoid having more than
    # ten percent of expired keys still in memory, and will try to avoid consuming
    # more than 25% of total memory and to add latency to the system. However
    # it is possible to increase the expire "effort" that is normally set to
    # "1", to a greater value, up to the value "10". At its maximum value the
    # system will use more CPU, longer cycles (and technically may introduce
    # more latency), and will tolerate less already expired keys still present
    # in the system. It's a tradeoff between memory, CPU and latency.
    #
    # active-expire-effort 1
    
    ############################# LAZY FREEING ####################################
    
    # Redis has two primitives to delete keys. One is called DEL and is a blocking
    # deletion of the object. It means that the server stops processing new commands
    # in order to reclaim all the memory associated with an object in a synchronous
    # way. If the key deleted is associated with a small object, the time needed
    # in order to execute the DEL command is very small and comparable to most other
    # O(1) or O(log_N) commands in Redis. However if the key is associated with an
    # aggregated value containing millions of elements, the server can block for
    # a long time (even seconds) in order to complete the operation.
    #
    # For the above reasons Redis also offers non blocking deletion primitives
    # such as UNLINK (non blocking DEL) and the ASYNC option of FLUSHALL and
    # FLUSHDB commands, in order to reclaim memory in background. Those commands
    # are executed in constant time. Another thread will incrementally free the
    # object in the background as fast as possible.
    #
    # DEL, UNLINK and ASYNC option of FLUSHALL and FLUSHDB are user-controlled.
    # It's up to the design of the application to understand when it is a good
    # idea to use one or the other. However the Redis server sometimes has to
    # delete keys or flush the whole database as a side effect of other operations.
    # Specifically Redis deletes objects independently of a user call in the
    # following scenarios:
    #
    # 1) On eviction, because of the maxmemory and maxmemory policy configurations,
    #    in order to make room for new data, without going over the specified
    #    memory limit.
    # 2) Because of expire: when a key with an associated time to live (see the
    #    EXPIRE command) must be deleted from memory.
    # 3) Because of a side effect of a command that stores data on a key that may
    #    already exist. For example the RENAME command may delete the old key
    #    content when it is replaced with another one. Similarly SUNIONSTORE
    #    or SORT with STORE option may delete existing keys. The SET command
    #    itself removes any old content of the specified key in order to replace
    #    it with the specified string.
    # 4) During replication, when a replica performs a full resynchronization with
    #    its master, the content of the whole database is removed in order to
    #    load the RDB file just transferred.
    #
    # In all the above cases the default is to delete objects in a blocking way,
    # like if DEL was called. However you can configure each case specifically
    # in order to instead release memory in a non-blocking way like if UNLINK
    # was called, using the following configuration directives.
    
    lazyfree-lazy-eviction no
    lazyfree-lazy-expire no
    lazyfree-lazy-server-del no
    replica-lazy-flush no
    
    # It is also possible, for the case when to replace the user code DEL calls
    # with UNLINK calls is not easy, to modify the default behavior of the DEL
    # command to act exactly like UNLINK, using the following configuration
    # directive:
    
    lazyfree-lazy-user-del no
    
    # FLUSHDB, FLUSHALL, and SCRIPT FLUSH support both asynchronous and synchronous
    # deletion, which can be controlled by passing the [SYNC|ASYNC] flags into the
    # commands. When neither flag is passed, this directive will be used to determine
    # if the data should be deleted asynchronously.
    
    lazyfree-lazy-user-flush no
    
    ################################ THREADED I/O #################################
    
    # Redis is mostly single threaded, however there are certain threaded
    # operations such as UNLINK, slow I/O accesses and other things that are
    # performed on side threads.
    #
    # Now it is also possible to handle Redis clients socket reads and writes
    # in different I/O threads. Since especially writing is so slow, normally
    # Redis users use pipelining in order to speed up the Redis performances per
    # core, and spawn multiple instances in order to scale more. Using I/O
    # threads it is possible to easily speedup two times Redis without resorting
    # to pipelining nor sharding of the instance.
    #
    # By default threading is disabled, we suggest enabling it only in machines
    # that have at least 4 or more cores, leaving at least one spare core.
    # Using more than 8 threads is unlikely to help much. We also recommend using
    # threaded I/O only if you actually have performance problems, with Redis
    # instances being able to use a quite big percentage of CPU time, otherwise
    # there is no point in using this feature.
    #
    # So for instance if you have a four cores boxes, try to use 2 or 3 I/O
    # threads, if you have a 8 cores, try to use 6 threads. In order to
    # enable I/O threads use the following configuration directive:
    #
    # io-threads 4
    #
    # Setting io-threads to 1 will just use the main thread as usual.
    # When I/O threads are enabled, we only use threads for writes, that is
    # to thread the write(2) syscall and transfer the client buffers to the
    # socket. However it is also possible to enable threading of reads and
    # protocol parsing using the following configuration directive, by setting
    # it to yes:
    #
    # io-threads-do-reads no
    #
    # Usually threading reads doesn't help much.
    #
    # NOTE 1: This configuration directive cannot be changed at runtime via
    # CONFIG SET. Aso this feature currently does not work when SSL is
    # enabled.
    #
    # NOTE 2: If you want to test the Redis speedup using redis-benchmark, make
    # sure you also run the benchmark itself in threaded mode, using the
    # --threads option to match the number of Redis threads, otherwise you'll not
    # be able to notice the improvements.
    
    ############################ KERNEL OOM CONTROL ##############################
    
    # On Linux, it is possible to hint the kernel OOM killer on what processes
    # should be killed first when out of memory.
    #
    # Enabling this feature makes Redis actively control the oom_score_adj value
    # for all its processes, depending on their role. The default scores will
    # attempt to have background child processes killed before all others, and
    # replicas killed before masters.
    #
    # Redis supports three options:
    #
    # no:       Don't make changes to oom-score-adj (default).
    # yes:      Alias to "relative" see below.
    # absolute: Values in oom-score-adj-values are written as is to the kernel.
    # relative: Values are used relative to the initial value of oom_score_adj when
    #           the server starts and are then clamped to a range of -1000 to 1000.
    #           Because typically the initial value is 0, they will often match the
    #           absolute values.
    oom-score-adj no
    
    # When oom-score-adj is used, this directive controls the specific values used
    # for master, replica and background child processes. Values range -2000 to
    # 2000 (higher means more likely to be killed).
    #
    # Unprivileged processes (not root, and without CAP_SYS_RESOURCE capabilities)
    # can freely increase their value, but not decrease it below its initial
    # settings. This means that setting oom-score-adj to "relative" and setting the
    # oom-score-adj-values to positive values will always succeed.
    oom-score-adj-values 0 200 800
    
    #################### KERNEL transparent hugepage CONTROL ######################
    
    # Usually the kernel Transparent Huge Pages control is set to "madvise" or
    # or "never" by default (/sys/kernel/mm/transparent_hugepage/enabled), in which
    # case this config has no effect. On systems in which it is set to "always",
    # redis will attempt to disable it specifically for the redis process in order
    # to avoid latency problems specifically with fork(2) and CoW.
    # If for some reason you prefer to keep it enabled, you can set this config to
    # "no" and the kernel global to "always".
    
    disable-thp yes
    
    ############################## APPEND ONLY MODE ###############################
    
    # By default Redis asynchronously dumps the dataset on disk. This mode is
    # good enough in many applications, but an issue with the Redis process or
    # a power outage may result into a few minutes of writes lost (depending on
    # the configured save points).
    #
    # The Append Only File is an alternative persistence mode that provides
    # much better durability. For instance using the default data fsync policy
    # (see later in the config file) Redis can lose just one second of writes in a
    # dramatic event like a server power outage, or a single write if something
    # wrong with the Redis process itself happens, but the operating system is
    # still running correctly.
    #
    # AOF and RDB persistence can be enabled at the same time without problems.
    # If the AOF is enabled on startup Redis will load the AOF, that is the file
    # with the better durability guarantees.
    #
    # Please check http://redis.io/topics/persistence for more information.
    
    appendonly no
    
    # The name of the append only file (default: "appendonly.aof")
    
    appendfilename "appendonly.aof"
    
    # The fsync() call tells the Operating System to actually write data on disk
    # instead of waiting for more data in the output buffer. Some OS will really flush
    # data on disk, some other OS will just try to do it ASAP.
    #
    # Redis supports three different modes:
    #
    # no: don't fsync, just let the OS flush the data when it wants. Faster.
    # always: fsync after every write to the append only log. Slow, Safest.
    # everysec: fsync only one time every second. Compromise.
    #
    # The default is "everysec", as that's usually the right compromise between
    # speed and data safety. It's up to you to understand if you can relax this to
    # "no" that will let the operating system flush the output buffer when
    # it wants, for better performances (but if you can live with the idea of
    # some data loss consider the default persistence mode that's snapshotting),
    # or on the contrary, use "always" that's very slow but a bit safer than
    # everysec.
    #
    # More details please check the following article:
    # http://antirez.com/post/redis-persistence-demystified.html
    #
    # If unsure, use "everysec".
    
    # appendfsync always
    appendfsync everysec
    # appendfsync no
    
    # When the AOF fsync policy is set to always or everysec, and a background
    # saving process (a background save or AOF log background rewriting) is
    # performing a lot of I/O against the disk, in some Linux configurations
    # Redis may block too long on the fsync() call. Note that there is no fix for
    # this currently, as even performing fsync in a different thread will block
    # our synchronous write(2) call.
    #
    # In order to mitigate this problem it's possible to use the following option
    # that will prevent fsync() from being called in the main process while a
    # BGSAVE or BGREWRITEAOF is in progress.
    #
    # This means that while another child is saving, the durability of Redis is
    # the same as "appendfsync none". In practical terms, this means that it is
    # possible to lose up to 30 seconds of log in the worst scenario (with the
    # default Linux settings).
    #
    # If you have latency problems turn this to "yes". Otherwise leave it as
    # "no" that is the safest pick from the point of view of durability.
    
    no-appendfsync-on-rewrite no
    
    # Automatic rewrite of the append only file.
    # Redis is able to automatically rewrite the log file implicitly calling
    # BGREWRITEAOF when the AOF log size grows by the specified percentage.
    #
    # This is how it works: Redis remembers the size of the AOF file after the
    # latest rewrite (if no rewrite has happened since the restart, the size of
    # the AOF at startup is used).
    #
    # This base size is compared to the current size. If the current size is
    # bigger than the specified percentage, the rewrite is triggered. Also
    # you need to specify a minimal size for the AOF file to be rewritten, this
    # is useful to avoid rewriting the AOF file even if the percentage increase
    # is reached but it is still pretty small.
    #
    # Specify a percentage of zero in order to disable the automatic AOF
    # rewrite feature.
    
    auto-aof-rewrite-percentage 100
    auto-aof-rewrite-min-size 64mb
    
    # An AOF file may be found to be truncated at the end during the Redis
    # startup process, when the AOF data gets loaded back into memory.
    # This may happen when the system where Redis is running
    # crashes, especially when an ext4 filesystem is mounted without the
    # data=ordered option (however this can't happen when Redis itself
    # crashes or aborts but the operating system still works correctly).
    #
    # Redis can either exit with an error when this happens, or load as much
    # data as possible (the default now) and start if the AOF file is found
    # to be truncated at the end. The following option controls this behavior.
    #
    # If aof-load-truncated is set to yes, a truncated AOF file is loaded and
    # the Redis server starts emitting a log to inform the user of the event.
    # Otherwise if the option is set to no, the server aborts with an error
    # and refuses to start. When the option is set to no, the user requires
    # to fix the AOF file using the "redis-check-aof" utility before to restart
    # the server.
    #
    # Note that if the AOF file will be found to be corrupted in the middle
    # the server will still exit with an error. This option only applies when
    # Redis will try to read more data from the AOF file but not enough bytes
    # will be found.
    aof-load-truncated yes
    
    # When rewriting the AOF file, Redis is able to use an RDB preamble in the
    # AOF file for faster rewrites and recoveries. When this option is turned
    # on the rewritten AOF file is composed of two different stanzas:
    #
    #   [RDB file][AOF tail]
    #
    # When loading, Redis recognizes that the AOF file starts with the "REDIS"
    # string and loads the prefixed RDB file, then continues loading the AOF
    # tail.
    aof-use-rdb-preamble yes
    
    ################################ LUA SCRIPTING  ###############################
    
    # Max execution time of a Lua script in milliseconds.
    #
    # If the maximum execution time is reached Redis will log that a script is
    # still in execution after the maximum allowed time and will start to
    # reply to queries with an error.
    #
    # When a long running script exceeds the maximum execution time only the
    # SCRIPT KILL and SHUTDOWN NOSAVE commands are available. The first can be
    # used to stop a script that did not yet call any write commands. The second
    # is the only way to shut down the server in the case a write command was
    # already issued by the script but the user doesn't want to wait for the natural
    # termination of the script.
    #
    # Set it to 0 or a negative value for unlimited execution without warnings.
    lua-time-limit 5000
    
    ################################ REDIS CLUSTER  ###############################
    
    # Normal Redis instances can't be part of a Redis Cluster; only nodes that are
    # started as cluster nodes can. In order to start a Redis instance as a
    # cluster node enable the cluster support uncommenting the following:
    #
    # cluster-enabled yes
    
    # Every cluster node has a cluster configuration file. This file is not
    # intended to be edited by hand. It is created and updated by Redis nodes.
    # Every Redis Cluster node requires a different cluster configuration file.
    # Make sure that instances running in the same system do not have
    # overlapping cluster configuration file names.
    #
    # cluster-config-file nodes-6379.conf
    
    # Cluster node timeout is the amount of milliseconds a node must be unreachable
    # for it to be considered in failure state.
    # Most other internal time limits are a multiple of the node timeout.
    #
    # cluster-node-timeout 15000
    
    # A replica of a failing master will avoid to start a failover if its data
    # looks too old.
    #
    # There is no simple way for a replica to actually have an exact measure of
    # its "data age", so the following two checks are performed:
    #
    # 1) If there are multiple replicas able to failover, they exchange messages
    #    in order to try to give an advantage to the replica with the best
    #    replication offset (more data from the master processed).
    #    Replicas will try to get their rank by offset, and apply to the start
    #    of the failover a delay proportional to their rank.
    #
    # 2) Every single replica computes the time of the last interaction with
    #    its master. This can be the last ping or command received (if the master
    #    is still in the "connected" state), or the time that elapsed since the
    #    disconnection with the master (if the replication link is currently down).
    #    If the last interaction is too old, the replica will not try to failover
    #    at all.
    #
    # The point "2" can be tuned by user. Specifically a replica will not perform
    # the failover if, since the last interaction with the master, the time
    # elapsed is greater than:
    #
    #   (node-timeout * cluster-replica-validity-factor) + repl-ping-replica-period
    #
    # So for example if node-timeout is 30 seconds, and the cluster-replica-validity-factor
    # is 10, and assuming a default repl-ping-replica-period of 10 seconds, the
    # replica will not try to failover if it was not able to talk with the master
    # for longer than 310 seconds.
    #
    # A large cluster-replica-validity-factor may allow replicas with too old data to failover
    # a master, while a too small value may prevent the cluster from being able to
    # elect a replica at all.
    #
    # For maximum availability, it is possible to set the cluster-replica-validity-factor
    # to a value of 0, which means, that replicas will always try to failover the
    # master regardless of the last time they interacted with the master.
    # (However they'll always try to apply a delay proportional to their
    # offset rank).
    #
    # Zero is the only value able to guarantee that when all the partitions heal
    # the cluster will always be able to continue.
    #
    # cluster-replica-validity-factor 10
    
    # Cluster replicas are able to migrate to orphaned masters, that are masters
    # that are left without working replicas. This improves the cluster ability
    # to resist to failures as otherwise an orphaned master can't be failed over
    # in case of failure if it has no working replicas.
    #
    # Replicas migrate to orphaned masters only if there are still at least a
    # given number of other working replicas for their old master. This number
    # is the "migration barrier". A migration barrier of 1 means that a replica
    # will migrate only if there is at least 1 other working replica for its master
    # and so forth. It usually reflects the number of replicas you want for every
    # master in your cluster.
    #
    # Default is 1 (replicas migrate only if their masters remain with at least
    # one replica). To disable migration just set it to a very large value.
    # A value of 0 can be set but is useful only for debugging and dangerous
    # in production.
    #
    # cluster-migration-barrier 1
    
    # By default Redis Cluster nodes stop accepting queries if they detect there
    # is at least a hash slot uncovered (no available node is serving it).
    # This way if the cluster is partially down (for example a range of hash slots
    # are no longer covered) all the cluster becomes, eventually, unavailable.
    # It automatically returns available as soon as all the slots are covered again.
    #
    # However sometimes you want the subset of the cluster which is working,
    # to continue to accept queries for the part of the key space that is still
    # covered. In order to do so, just set the cluster-require-full-coverage
    # option to no.
    #
    # cluster-require-full-coverage yes
    
    # This option, when set to yes, prevents replicas from trying to failover its
    # master during master failures. However the replica can still perform a
    # manual failover, if forced to do so.
    #
    # This is useful in different scenarios, especially in the case of multiple
    # data center operations, where we want one side to never be promoted if not
    # in the case of a total DC failure.
    #
    # cluster-replica-no-failover no
    
    # This option, when set to yes, allows nodes to serve read traffic while the
    # the cluster is in a down state, as long as it believes it owns the slots.
    #
    # This is useful for two cases.  The first case is for when an application
    # doesn't require consistency of data during node failures or network partitions.
    # One example of this is a cache, where as long as the node has the data it
    # should be able to serve it.
    #
    # The second use case is for configurations that don't meet the recommended
    # three shards but want to enable cluster mode and scale later. A
    # master outage in a 1 or 2 shard configuration causes a read/write outage to the
    # entire cluster without this option set, with it set there is only a write outage.
    # Without a quorum of masters, slot ownership will not change automatically.
    #
    # cluster-allow-reads-when-down no
    
    # In order to setup your cluster make sure to read the documentation
    # available at http://redis.io web site.
    
    ########################## CLUSTER DOCKER/NAT support  ########################
    
    # In certain deployments, Redis Cluster nodes address discovery fails, because
    # addresses are NAT-ted or because ports are forwarded (the typical case is
    # Docker and other containers).
    #
    # In order to make Redis Cluster working in such environments, a static
    # configuration where each node knows its public address is needed. The
    # following two options are used for this scope, and are:
    #
    # * cluster-announce-ip
    # * cluster-announce-port
    # * cluster-announce-bus-port
    #
    # Each instructs the node about its address, client port, and cluster message
    # bus port. The information is then published in the header of the bus packets
    # so that other nodes will be able to correctly map the address of the node
    # publishing the information.
    #
    # If the above options are not used, the normal Redis Cluster auto-detection
    # will be used instead.
    #
    # Note that when remapped, the bus port may not be at the fixed offset of
    # clients port + 10000, so you can specify any port and bus-port depending
    # on how they get remapped. If the bus-port is not set, a fixed offset of
    # 10000 will be used as usual.
    #
    # Example:
    #
    # cluster-announce-ip 10.1.1.5
    # cluster-announce-port 6379
    # cluster-announce-bus-port 6380
    
    ################################## SLOW LOG ###################################
    
    # The Redis Slow Log is a system to log queries that exceeded a specified
    # execution time. The execution time does not include the I/O operations
    # like talking with the client, sending the reply and so forth,
    # but just the time needed to actually execute the command (this is the only
    # stage of command execution where the thread is blocked and can not serve
    # other requests in the meantime).
    #
    # You can configure the slow log with two parameters: one tells Redis
    # what is the execution time, in microseconds, to exceed in order for the
    # command to get logged, and the other parameter is the length of the
    # slow log. When a new command is logged the oldest one is removed from the
    # queue of logged commands.
    
    # The following time is expressed in microseconds, so 1000000 is equivalent
    # to one second. Note that a negative number disables the slow log, while
    # a value of zero forces the logging of every command.
    slowlog-log-slower-than 10000
    
    # There is no limit to this length. Just be aware that it will consume memory.
    # You can reclaim memory used by the slow log with SLOWLOG RESET.
    slowlog-max-len 128
    
    ################################ LATENCY MONITOR ##############################
    
    # The Redis latency monitoring subsystem samples different operations
    # at runtime in order to collect data related to possible sources of
    # latency of a Redis instance.
    #
    # Via the LATENCY command this information is available to the user that can
    # print graphs and obtain reports.
    #
    # The system only logs operations that were performed in a time equal or
    # greater than the amount of milliseconds specified via the
    # latency-monitor-threshold configuration directive. When its value is set
    # to zero, the latency monitor is turned off.
    #
    # By default latency monitoring is disabled since it is mostly not needed
    # if you don't have latency issues, and collecting data has a performance
    # impact, that while very small, can be measured under big load. Latency
    # monitoring can easily be enabled at runtime using the command
    # "CONFIG SET latency-monitor-threshold <milliseconds>" if needed.
    latency-monitor-threshold 0
    
    ############################# EVENT NOTIFICATION ##############################
    
    # Redis can notify Pub/Sub clients about events happening in the key space.
    # This feature is documented at http://redis.io/topics/notifications
    #
    # For instance if keyspace events notification is enabled, and a client
    # performs a DEL operation on key "foo" stored in the Database 0, two
    # messages will be published via Pub/Sub:
    #
    # PUBLISH __keyspace@0__:foo del
    # PUBLISH __keyevent@0__:del foo
    #
    # It is possible to select the events that Redis will notify among a set
    # of classes. Every class is identified by a single character:
    #
    #  K     Keyspace events, published with __keyspace@<db>__ prefix.
    #  E     Keyevent events, published with __keyevent@<db>__ prefix.
    #  g     Generic commands (non-type specific) like DEL, EXPIRE, RENAME, ...
    #  $     String commands
    #  l     List commands
    #  s     Set commands
    #  h     Hash commands
    #  z     Sorted set commands
    #  x     Expired events (events generated every time a key expires)
    #  e     Evicted events (events generated when a key is evicted for maxmemory)
    #  t     Stream commands
    #  m     Key-miss events (Note: It is not included in the 'A' class)
    #  A     Alias for g$lshzxet, so that the "AKE" string means all the events
    #        (Except key-miss events which are excluded from 'A' due to their
    #         unique nature).
    #
    #  The "notify-keyspace-events" takes as argument a string that is composed
    #  of zero or multiple characters. The empty string means that notifications
    #  are disabled.
    #
    #  Example: to enable list and generic events, from the point of view of the
    #           event name, use:
    #
    #  notify-keyspace-events Elg
    #
    #  Example 2: to get the stream of the expired keys subscribing to channel
    #             name __keyevent@0__:expired use:
    #
    #  notify-keyspace-events Ex
    #
    #  By default all notifications are disabled because most users don't need
    #  this feature and the feature has some overhead. Note that if you don't
    #  specify at least one of K or E, no events will be delivered.
    notify-keyspace-events ""
    
    ############################### GOPHER SERVER #################################
    
    # Redis contains an implementation of the Gopher protocol, as specified in
    # the RFC 1436 (https://www.ietf.org/rfc/rfc1436.txt).
    #
    # The Gopher protocol was very popular in the late '90s. It is an alternative
    # to the web, and the implementation both server and client side is so simple
    # that the Redis server has just 100 lines of code in order to implement this
    # support.
    #
    # What do you do with Gopher nowadays? Well Gopher never *really* died, and
    # lately there is a movement in order for the Gopher more hierarchical content
    # composed of just plain text documents to be resurrected. Some want a simpler
    # internet, others believe that the mainstream internet became too much
    # controlled, and it's cool to create an alternative space for people that
    # want a bit of fresh air.
    #
    # Anyway for the 10nth birthday of the Redis, we gave it the Gopher protocol
    # as a gift.
    #
    # --- HOW IT WORKS? ---
    #
    # The Redis Gopher support uses the inline protocol of Redis, and specifically
    # two kind of inline requests that were anyway illegal: an empty request
    # or any request that starts with "/" (there are no Redis commands starting
    # with such a slash). Normal RESP2/RESP3 requests are completely out of the
    # path of the Gopher protocol implementation and are served as usual as well.
    #
    # If you open a connection to Redis when Gopher is enabled and send it
    # a string like "/foo", if there is a key named "/foo" it is served via the
    # Gopher protocol.
    #
    # In order to create a real Gopher "hole" (the name of a Gopher site in Gopher
    # talking), you likely need a script like the following:
    #
    #   https://github.com/antirez/gopher2redis
    #
    # --- SECURITY WARNING ---
    #
    # If you plan to put Redis on the internet in a publicly accessible address
    # to server Gopher pages MAKE SURE TO SET A PASSWORD to the instance.
    # Once a password is set:
    #
    #   1. The Gopher server (when enabled, not by default) will still serve
    #      content via Gopher.
    #   2. However other commands cannot be called before the client will
    #      authenticate.
    #
    # So use the 'requirepass' option to protect your instance.
    #
    # Note that Gopher is not currently supported when 'io-threads-do-reads'
    # is enabled.
    #
    # To enable Gopher support, uncomment the following line and set the option
    # from no (the default) to yes.
    #
    # gopher-enabled no
    
    ############################### ADVANCED CONFIG ###############################
    
    # Hashes are encoded using a memory efficient data structure when they have a
    # small number of entries, and the biggest entry does not exceed a given
    # threshold. These thresholds can be configured using the following directives.
    hash-max-ziplist-entries 512
    hash-max-ziplist-value 64
    
    # Lists are also encoded in a special way to save a lot of space.
    # The number of entries allowed per internal list node can be specified
    # as a fixed maximum size or a maximum number of elements.
    # For a fixed maximum size, use -5 through -1, meaning:
    # -5: max size: 64 Kb  <-- not recommended for normal workloads
    # -4: max size: 32 Kb  <-- not recommended
    # -3: max size: 16 Kb  <-- probably not recommended
    # -2: max size: 8 Kb   <-- good
    # -1: max size: 4 Kb   <-- good
    # Positive numbers mean store up to _exactly_ that number of elements
    # per list node.
    # The highest performing option is usually -2 (8 Kb size) or -1 (4 Kb size),
    # but if your use case is unique, adjust the settings as necessary.
    list-max-ziplist-size -2
    
    # Lists may also be compressed.
    # Compress depth is the number of quicklist ziplist nodes from *each* side of
    # the list to *exclude* from compression.  The head and tail of the list
    # are always uncompressed for fast push/pop operations.  Settings are:
    # 0: disable all list compression
    # 1: depth 1 means "don't start compressing until after 1 node into the list,
    #    going from either the head or tail"
    #    So: [head]->node->node->...->node->[tail]
    #    [head], [tail] will always be uncompressed; inner nodes will compress.
    # 2: [head]->[next]->node->node->...->node->[prev]->[tail]
    #    2 here means: don't compress head or head->next or tail->prev or tail,
    #    but compress all nodes between them.
    # 3: [head]->[next]->[next]->node->node->...->node->[prev]->[prev]->[tail]
    # etc.
    list-compress-depth 0
    
    # Sets have a special encoding in just one case: when a set is composed
    # of just strings that happen to be integers in radix 10 in the range
    # of 64 bit signed integers.
    # The following configuration setting sets the limit in the size of the
    # set in order to use this special memory saving encoding.
    set-max-intset-entries 512
    
    # Similarly to hashes and lists, sorted sets are also specially encoded in
    # order to save a lot of space. This encoding is only used when the length and
    # elements of a sorted set are below the following limits:
    zset-max-ziplist-entries 128
    zset-max-ziplist-value 64
    
    # HyperLogLog sparse representation bytes limit. The limit includes the
    # 16 bytes header. When an HyperLogLog using the sparse representation crosses
    # this limit, it is converted into the dense representation.
    #
    # A value greater than 16000 is totally useless, since at that point the
    # dense representation is more memory efficient.
    #
    # The suggested value is ~ 3000 in order to have the benefits of
    # the space efficient encoding without slowing down too much PFADD,
    # which is O(N) with the sparse encoding. The value can be raised to
    # ~ 10000 when CPU is not a concern, but space is, and the data set is
    # composed of many HyperLogLogs with cardinality in the 0 - 15000 range.
    hll-sparse-max-bytes 3000
    
    # Streams macro node max size / items. The stream data structure is a radix
    # tree of big nodes that encode multiple items inside. Using this configuration
    # it is possible to configure how big a single node can be in bytes, and the
    # maximum number of items it may contain before switching to a new node when
    # appending new stream entries. If any of the following settings are set to
    # zero, the limit is ignored, so for instance it is possible to set just a
    # max entries limit by setting max-bytes to 0 and max-entries to the desired
    # value.
    stream-node-max-bytes 4kb
    stream-node-max-entries 100
    
    # Active rehashing uses 1 millisecond every 100 milliseconds of CPU time in
    # order to help rehashing the main Redis hash table (the one mapping top-level
    # keys to values). The hash table implementation Redis uses (see dict.c)
    # performs a lazy rehashing: the more operation you run into a hash table
    # that is rehashing, the more rehashing "steps" are performed, so if the
    # server is idle the rehashing is never complete and some more memory is used
    # by the hash table.
    #
    # The default is to use this millisecond 10 times every second in order to
    # actively rehash the main dictionaries, freeing memory when possible.
    #
    # If unsure:
    # use "activerehashing no" if you have hard latency requirements and it is
    # not a good thing in your environment that Redis can reply from time to time
    # to queries with 2 milliseconds delay.
    #
    # use "activerehashing yes" if you don't have such hard requirements but
    # want to free memory asap when possible.
    activerehashing yes
    
    # The client output buffer limits can be used to force disconnection of clients
    # that are not reading data from the server fast enough for some reason (a
    # common reason is that a Pub/Sub client can't consume messages as fast as the
    # publisher can produce them).
    #
    # The limit can be set differently for the three different classes of clients:
    #
    # normal -> normal clients including MONITOR clients
    # replica  -> replica clients
    # pubsub -> clients subscribed to at least one pubsub channel or pattern
    #
    # The syntax of every client-output-buffer-limit directive is the following:
    #
    # client-output-buffer-limit <class> <hard limit> <soft limit> <soft seconds>
    #
    # A client is immediately disconnected once the hard limit is reached, or if
    # the soft limit is reached and remains reached for the specified number of
    # seconds (continuously).
    # So for instance if the hard limit is 32 megabytes and the soft limit is
    # 16 megabytes / 10 seconds, the client will get disconnected immediately
    # if the size of the output buffers reach 32 megabytes, but will also get
    # disconnected if the client reaches 16 megabytes and continuously overcomes
    # the limit for 10 seconds.
    #
    # By default normal clients are not limited because they don't receive data
    # without asking (in a push way), but just after a request, so only
    # asynchronous clients may create a scenario where data is requested faster
    # than it can read.
    #
    # Instead there is a default limit for pubsub and replica clients, since
    # subscribers and replicas receive data in a push fashion.
    #
    # Both the hard or the soft limit can be disabled by setting them to zero.
    client-output-buffer-limit normal 0 0 0
    client-output-buffer-limit replica 256mb 64mb 60
    client-output-buffer-limit pubsub 32mb 8mb 60
    
    # Client query buffers accumulate new commands. They are limited to a fixed
    # amount by default in order to avoid that a protocol desynchronization (for
    # instance due to a bug in the client) will lead to unbound memory usage in
    # the query buffer. However you can configure it here if you have very special
    # needs, such us huge multi/exec requests or alike.
    #
    # client-query-buffer-limit 1gb
    
    # In the Redis protocol, bulk requests, that are, elements representing single
    # strings, are normally limited to 512 mb. However you can change this limit
    # here, but must be 1mb or greater
    #
    # proto-max-bulk-len 512mb
    
    # Redis calls an internal function to perform many background tasks, like
    # closing connections of clients in timeout, purging expired keys that are
    # never requested, and so forth.
    #
    # Not all tasks are performed with the same frequency, but Redis checks for
    # tasks to perform according to the specified "hz" value.
    #
    # By default "hz" is set to 10. Raising the value will use more CPU when
    # Redis is idle, but at the same time will make Redis more responsive when
    # there are many keys expiring at the same time, and timeouts may be
    # handled with more precision.
    #
    # The range is between 1 and 500, however a value over 100 is usually not
    # a good idea. Most users should use the default of 10 and raise this up to
    # 100 only in environments where very low latency is required.
    hz 10
    
    # Normally it is useful to have an HZ value which is proportional to the
    # number of clients connected. This is useful in order, for instance, to
    # avoid too many clients are processed for each background task invocation
    # in order to avoid latency spikes.
    #
    # Since the default HZ value by default is conservatively set to 10, Redis
    # offers, and enables by default, the ability to use an adaptive HZ value
    # which will temporarily raise when there are many connected clients.
    #
    # When dynamic HZ is enabled, the actual configured HZ will be used
    # as a baseline, but multiples of the configured HZ value will be actually
    # used as needed once more clients are connected. In this way an idle
    # instance will use very little CPU time while a busy instance will be
    # more responsive.
    dynamic-hz yes
    
    # When a child rewrites the AOF file, if the following option is enabled
    # the file will be fsync-ed every 32 MB of data generated. This is useful
    # in order to commit the file to the disk more incrementally and avoid
    # big latency spikes.
    aof-rewrite-incremental-fsync yes
    
    # When redis saves RDB file, if the following option is enabled
    # the file will be fsync-ed every 32 MB of data generated. This is useful
    # in order to commit the file to the disk more incrementally and avoid
    # big latency spikes.
    rdb-save-incremental-fsync yes
    
    # Redis LFU eviction (see maxmemory setting) can be tuned. However it is a good
    # idea to start with the default settings and only change them after investigating
    # how to improve the performances and how the keys LFU change over time, which
    # is possible to inspect via the OBJECT FREQ command.
    #
    # There are two tunable parameters in the Redis LFU implementation: the
    # counter logarithm factor and the counter decay time. It is important to
    # understand what the two parameters mean before changing them.
    #
    # The LFU counter is just 8 bits per key, it's maximum value is 255, so Redis
    # uses a probabilistic increment with logarithmic behavior. Given the value
    # of the old counter, when a key is accessed, the counter is incremented in
    # this way:
    #
    # 1. A random number R between 0 and 1 is extracted.
    # 2. A probability P is calculated as 1/(old_value*lfu_log_factor+1).
    # 3. The counter is incremented only if R < P.
    #
    # The default lfu-log-factor is 10. This is a table of how the frequency
    # counter changes with a different number of accesses with different
    # logarithmic factors:
    #
    # +--------+------------+------------+------------+------------+------------+
    # | factor | 100 hits   | 1000 hits  | 100K hits  | 1M hits    | 10M hits   |
    # +--------+------------+------------+------------+------------+------------+
    # | 0      | 104        | 255        | 255        | 255        | 255        |
    # +--------+------------+------------+------------+------------+------------+
    # | 1      | 18         | 49         | 255        | 255        | 255        |
    # +--------+------------+------------+------------+------------+------------+
    # | 10     | 10         | 18         | 142        | 255        | 255        |
    # +--------+------------+------------+------------+------------+------------+
    # | 100    | 8          | 11         | 49         | 143        | 255        |
    # +--------+------------+------------+------------+------------+------------+
    #
    # NOTE: The above table was obtained by running the following commands:
    #
    #   redis-benchmark -n 1000000 incr foo
    #   redis-cli object freq foo
    #
    # NOTE 2: The counter initial value is 5 in order to give new objects a chance
    # to accumulate hits.
    #
    # The counter decay time is the time, in minutes, that must elapse in order
    # for the key counter to be divided by two (or decremented if it has a value
    # less <= 10).
    #
    # The default value for the lfu-decay-time is 1. A special value of 0 means to
    # decay the counter every time it happens to be scanned.
    #
    # lfu-log-factor 10
    # lfu-decay-time 1
    
    ########################### ACTIVE DEFRAGMENTATION #######################
    #
    # What is active defragmentation?
    # -------------------------------
    #
    # Active (online) defragmentation allows a Redis server to compact the
    # spaces left between small allocations and deallocations of data in memory,
    # thus allowing to reclaim back memory.
    #
    # Fragmentation is a natural process that happens with every allocator (but
    # less so with Jemalloc, fortunately) and certain workloads. Normally a server
    # restart is needed in order to lower the fragmentation, or at least to flush
    # away all the data and create it again. However thanks to this feature
    # implemented by Oran Agra for Redis 4.0 this process can happen at runtime
    # in a "hot" way, while the server is running.
    #
    # Basically when the fragmentation is over a certain level (see the
    # configuration options below) Redis will start to create new copies of the
    # values in contiguous memory regions by exploiting certain specific Jemalloc
    # features (in order to understand if an allocation is causing fragmentation
    # and to allocate it in a better place), and at the same time, will release the
    # old copies of the data. This process, repeated incrementally for all the keys
    # will cause the fragmentation to drop back to normal values.
    #
    # Important things to understand:
    #
    # 1. This feature is disabled by default, and only works if you compiled Redis
    #    to use the copy of Jemalloc we ship with the source code of Redis.
    #    This is the default with Linux builds.
    #
    # 2. You never need to enable this feature if you don't have fragmentation
    #    issues.
    #
    # 3. Once you experience fragmentation, you can enable this feature when
    #    needed with the command "CONFIG SET activedefrag yes".
    #
    # The configuration parameters are able to fine tune the behavior of the
    # defragmentation process. If you are not sure about what they mean it is
    # a good idea to leave the defaults untouched.
    
    # Enabled active defragmentation
    # activedefrag no
    
    # Minimum amount of fragmentation waste to start active defrag
    # active-defrag-ignore-bytes 100mb
    
    # Minimum percentage of fragmentation to start active defrag
    # active-defrag-threshold-lower 10
    
    # Maximum percentage of fragmentation at which we use maximum effort
    # active-defrag-threshold-upper 100
    
    # Minimal effort for defrag in CPU percentage, to be used when the lower
    # threshold is reached
    # active-defrag-cycle-min 1
    
    # Maximal effort for defrag in CPU percentage, to be used when the upper
    # threshold is reached
    # active-defrag-cycle-max 25
    
    # Maximum number of set/hash/zset/list fields that will be processed from
    # the main dictionary scan
    # active-defrag-max-scan-fields 1000
    
    # Jemalloc background thread for purging will be enabled by default
    jemalloc-bg-thread yes
    
    # It is possible to pin different threads and processes of Redis to specific
    # CPUs in your system, in order to maximize the performances of the server.
    # This is useful both in order to pin different Redis threads in different
    # CPUs, but also in order to make sure that multiple Redis instances running
    # in the same host will be pinned to different CPUs.
    #
    # Normally you can do this using the "taskset" command, however it is also
    # possible to this via Redis configuration directly, both in Linux and FreeBSD.
    #
    # You can pin the server/IO threads, bio threads, aof rewrite child process, and
    # the bgsave child process. The syntax to specify the cpu list is the same as
    # the taskset command:
    #
    # Set redis server/io threads to cpu affinity 0,2,4,6:
    # server_cpulist 0-7:2
    #
    # Set bio threads to cpu affinity 1,3:
    # bio_cpulist 1,3
    #
    # Set aof rewrite child process to cpu affinity 8,9,10,11:
    # aof_rewrite_cpulist 8-11
    #
    # Set bgsave child process to cpu affinity 1,10,11
    # bgsave_cpulist 1,10-11
    
    # In some cases redis will emit warnings and even refuse to start if it detects
    # that the system is in bad state, it is possible to suppress these warnings
    # by setting the following config which takes a space delimited list of warnings
    # to suppress
    #
    # ignore-warnings ARM64-COW-BUG
    # Generated by CONFIG REWRITE
    maxclients 4064
    save 3600 1
    save 300 100
    save 60 10000
    user default on nopass sanitize-payload ~* &* +@all
    replicaof {{ groups["redis-master"][0] }} 6379
    templates/redis.conf.j2

    ansible搭建zookeeeper集群

    - name: Create zookeeper dir
      file: path="{{ taishi_dir }}/zookeeper" state=directory owner={{ taishi_user }} group={{ taishi_user }} recurse=yes
      tags: zookeeper
    
    - name: Copy zookeeper.tar to {{ groups["zookeeper"] }}
      copy: src=apache-zookeeper-3.6.1-bin.tar.gz dest=/tmp
      tags: zookeeper
    
    - name: Install zookeeper for  {{ groups["zookeeper"] }}
      unarchive: src="/tmp/apache-zookeeper-3.6.1-bin.tar.gz" dest="{{ taishi_dir }}/zookeeper"  copy=no mode=0755
      tags: zookeeper
    
    - name: "create zk_logDir"
      file: path="{{ zk_logDir }}" state=directory owner={{ taishi_user }} group={{ taishi_user }}  recurse=yes
      tags: zookeeper
    
    - name: "create zk_dataDir"
      file: path="{{ zk_dataDir }}" state=directory owner={{ taishi_user }} group={{ taishi_user }}  recurse=yes
      tags: zookeeper
    
    - name: "copy the zookeeper.conf"
      template: src=zoo.cfg.j2  dest="{{ taishi_dir }}/zookeeper/apache-zookeeper-3.6.1-bin/conf/zoo.cfg" owner={{ taishi_user }} group={{ taishi_user }} mode=0755
      tags: zookeeper
    
    - name: "copy zookeeper myid.sh"
      template: src=myid.sh.j2  dest="{{ taishi_dir }}/zookeeper/myid.sh" owner={{ taishi_user }} group={{ taishi_user }} mode=0755
      tags: zookeeper
    
    - name: sh myid.sh
      shell: "{{ taishi_dir }}/zookeeper/myid.sh"
      tags: zookeeper
    tasks/main.yml
    # The number of milliseconds of each tick
    tickTime=2000
    # The number of ticks that the initial
    # synchronization phase can take
    initLimit=10
    # The number of ticks that can pass between
    # sending a request and getting an acknowledgement
    syncLimit=5
    # the directory where the snapshot is stored.
    # do not use /tmp for storage, /tmp here is just
    # example sakes.
    dataDir=/tmp/zookeeper
    # the port at which the clients will connect
    clientPort=2181
    # the maximum number of client connections.
    # increase this if you need to handle more clients
    maxClientCnxns=0
    #
    # Be sure to read the maintenance section of the
    # administrator guide before turning on autopurge.
    #
    # http://zookeeper.apache.org/doc/current/zookeeperAdmin.html#sc_maintenance
    #
    # The number of snapshots to retain in dataDir
    #autopurge.snapRetainCount=3
    # Purge task interval in hours
    # Set to "0" to disable auto purge feature
    #autopurge.purgeInterval=1
    
    ## Metrics Providers
    #
    # https://prometheus.io Metrics Exporter
    #metricsProvider.className=org.apache.zookeeper.metrics.prometheus.PrometheusMetricsProvider
    #metricsProvider.httpPort=7000
    #metricsProvider.exportJvmInfo=true
    
    dataDir= {{ zk_dataDir }}
    dataLogDir= {{ zk_logDir }}
    
    {% for host in groups['zookeeper'] %} 
    server.{{ host.split('.')| last }}={{ host }}:2888:3888
    {% endfor %}
    templates/zoo.cfg.j2
    #!/bin/bash
    
    myid={{ ansible_default_ipv4.address.split('.')| last }}
    echo $myid > {{ zk_dataDir }}/myid
    templates/myid.sh.j2

         

         

     ansible安装kafka集群

          1.查看kafka的当前版本。可以进入kafka/libs文件夹,libs下的文件名称中就包含kafka版本信息

               

         2.启动kafka报错

              

         3.修改kafka配置文件

             在kafka的配置中(server.properties) 把连接zookeeper的超时时间改久些就可以了,在server.properties中加入以下配置 zookeeper.connection.timeout.ms=6000000 

            

        4.ansible代码文件

    - name: Create kafka dir
      file: path="{{ taishi_dir }}/kafka" state=directory owner={{ taishi_user }} group={{ taishi_user }} recurse=yes
      tags: kafka
    
    - name: Copy kafka.tar to {{ groups["kafka"] }}
      copy: src=kafka-2.12-2.5.0.tar.gz dest=/tmp
      tags: kafka
    
    - name: Install kafka for {{ groups["kafka"] }}
      unarchive: src="/tmp/kafka-2.12-2.5.0.tar.gz" dest="{{ taishi_dir }}/kafka"  copy=no mode=0755
      tags: kafka
    
    - name: "create kafka_logdir"
      file: path="{{ kafka_logdir }}" state=directory owner={{ taishi_user }} group={{ taishi_user }}  recurse=yes
      tags: kafka
    
    - name: "copy the server.properties"
      template: src=server.properties.j2  dest="{{ taishi_dir }}/kafka/config/server.properties" owner={{ taishi_user }} group={{ taishi_user }} mode=0755
      tags: kafka
    tasks/main.yml
    broker.id={{ ansible_default_ipv4.address.split('.')| last }}
    listeners=PLAINTEXT://{{ ansible_default_ipv4.address }}:9092
    num.network.threads=3
    num.io.threads=8
    socket.send.buffer.bytes=102400
    socket.receive.buffer.bytes=102400
    socket.request.max.bytes=104857600
    log.dirs={{ kafka_logdir }}
    num.partitions=1
    num.recovery.threads.per.data.dir=1
    log.retention.hours=96
    log.retention.bytes=-1
    log.segment.bytes=1073741824
    log.retention.check.interval.ms=300000
    zookeeper.connect={{ groups['zookeeper'] |  join(':2181,')  }}:2181
    zookeeper.connection.timeout.ms=6000000
    delete.topic.enable=true
    auto.create.topics.enable=true
    default.replication.factor=3
    templates/server.properties.j2

    离线安装ansible

           下载ansible离线的安装包,直接执行install.sh即可完成ansible自身的安装

           

     ansible离线安装docker

         输出shell执行错误日志  ansible-playbook -i hosts  --tags monitor site.yml -vvv

         

        2.目录结构

         

    - name: Create monitor dir
      file: path="{{ taishi_dir }}/monitor/setos" state=directory owner={{ taishi_user }} group={{ taishi_user }} recurse=yes
      tags: monitor
    
    - name: Copy setos to {{ groups["monitor"] | to_json }}
      copy: src=setos/ dest={{ taishi_dir }}/monitor/setos/ owner={{ taishi_user }} group={{ taishi_user }} mode=0755
      tags: monitor
    
    - name: "Copy the set_os.sh"
      template: src=set_os.sh  dest="{{ taishi_dir }}/monitor/setos/set_os.sh" owner={{ taishi_user }} group={{ taishi_user }} mode=0755
      tags: monitor
    
    - name: shell setos to {{ groups["monitor"] | to_json }}
      shell: "{{ taishi_dir }}/monitor/setos/set_os.sh"
      ignore_errors: True
      tags: monitor
    
    - name: Copy docker to {{ groups["monitor"] | to_json }}
      copy: src=docker/ dest={{ taishi_dir }}/monitor/docker/ owner={{ taishi_user }} group={{ taishi_user }} mode=0755
      tags: monitor
    
    
    - name: unarchive docker.zip for {{ groups["monitor"] | to_json }}
      unarchive: src="{{ taishi_dir }}/monitor/docker/docker-v19.03.x.zip" dest="{{ monitor_dir }}"  copy=no mode=0755
      tags: monitor
    
    - name: install docker for {{ groups["monitor"] | to_json }}
      shell: "{{monitor_dir}}/docker-v19.03.x/install.sh {{monitor_dir}}/docker-v19.03.x/docker-19.03.15.tgz"
      ignore_errors: True
      tags: monitor
    
    - name: unarchive docker.compose.zip for {{ groups["monitor"] | to_json }}
      unarchive: src="{{ taishi_dir }}/monitor/docker/docker-compose-v1.26.2.zip" dest="{{ monitor_dir }}"  copy=no mode=0755
      tags: monitor
    
    - name: "Copy the install.sh"
      template: src=install.sh.j2  dest="{{ monitor_dir }}/docker-compose-v1.26.2/install.sh" owner={{ taishi_user }} group={{ taishi_user }} mode=0755
      tags: monitor
    
    - name: install docker-compose for {{ groups["monitor"] | to_json }}
      shell: "{{ monitor_dir }}/docker-compose-v1.26.2/install.sh"
      ignore_errors: True
      tags: monitor
    
    - name: Copy promethues to {{ groups["monitor"] | to_json }}
      copy: src=promethues/ dest={{ taishi_dir }}/monitor/promethues/ owner={{ taishi_user }} group={{ taishi_user }} mode=0755
      tags: monitor
    
    - name: unarchive promethues-data.zip for {{ groups["monitor"] | to_json }}
      unarchive: src="{{ taishi_dir }}/monitor/promethues/promethues-data.zip" dest="{{ monitor_dir }}"  copy=no mode=0755
      tags: monitor
    
    - name: "Copy the install-promethues.sh"
      template: src=install-prometheus.sh.j2  dest="{{ monitor_dir }}/promethues/install-prometheus.sh" owner={{ taishi_user }} group={{ taishi_user }} mode=0755
      tags: monitor
    
    - name: "Copy the getsupervisor.py"
      template: src=getsupervisorstatus.py  dest="{{ monitor_dir }}/promethues/getsupervisorstatus.py" owner={{ taishi_user }} group={{ taishi_user }} mode=0755
      tags: monitor
    
    - name: "Copy the pshgateway.sh"
      template: src=Pushgateway.sh  dest="{{ monitor_dir }}/promethues/Pushgateway.sh" owner={{ taishi_user }} group={{ taishi_user }} mode=0755
      tags: monitor
    
    - name: install prometheus for {{ groups["monitor"] | to_json }}
      shell: "{{ monitor_dir }}/promethues/install-prometheus.sh"
      ignore_errors: True
      tags: monitor
    tasks/main.yml
    #!/usr/bin/bash
    
    
    cp -rf {{monitor_dir}}/docker-compose-v1.26.2/docker-compose-Linux-x86_64-v1.26.2  /usr/local/bin/docker-compose
    chmod +x /usr/local/bin/docker-compose
    
    cp -rf {{monitor_dir}}/docker-compose-v1.26.2/docker-compose-Linux-x86_64-v1.26.2  /usr/bin/docker-compose
    chmod +x /usr/bin/docker-compose
    templates/docker-compose-install.j2
    #!/bin/bash
    
    usage () {
        echo "USAGE: $0 [--local-ip-address xxx.xxx.xxx.xxx]"
        echo "  [-l|--local-ip-address ip_addr] local ip address."
        echo "  [-h|--help] Usage message"
    }
    
    
    set_hosts_info () {
        sed -i 's/127.0.0.1/'{{ groups["monitor"][0] }}'/g' {{monitor_dir}}/promethues/server/prometheus.yml
        rm -fr /usr/bin/node_exporter
        ln -s {{monitor_dir}}/promethues/node_exporter-1.0.1.linux-amd64/node_exporter /usr/bin/node_exporter
        chmod +x /usr/bin/node_exporter
    
        rm -fr /usr/bin/elasticsearch_exporter
        ln -s {{monitor_dir}}/promethues/elasticsearch_exporter-1.1.0.linux-amd64/elasticsearch_exporter /usr/bin/elasticsearch_exporter
        chmod +x /usr/bin/elasticsearch_exporter
    }
    
    set_hosts_info
    
    
    docker load -i {{taishi_dir}}/prometheus/prometheus*.tar.gz
    
    docker rm -f prometheus-v2.25.2
    docker run -d 
    --name=prometheus-v2.25.2 
    -p 9090:9090 
    -v {{monitor_dir}}/promethues/server/prometheus.yml:/etc/prometheus/prometheus.yml 
    -v {{monitor_dir}}/promethues/server/rules.yml:/etc/prometheus/rules.yml 
    prom/prometheus:v2.25.2 
    --config.file=/etc/prometheus/prometheus.yml 
    --web.enable-lifecycle 
    --storage.tsdb.retention=10d 
    --web.enable-admin-api
    
    
    docker rm -f pushgateway-v1.4.0
    docker run -d 
    --name pushgateway-v1.4.0 
    -p 9091:9091 
    prom/pushgateway:v1.4.0
    
    docker rm -f grafana-v7.4.5
    docker run -d 
    --user $(id -u) 
    --name grafana-v7.4.5 
    -p 3000:3000 
    -v {{monitor_dir}}/promethues/grafana/conf/defaults.ini:/usr/share/grafana/conf/defaults.ini 
    -v {{monitor_dir}}/promethues/grafana/public/views/index.html:/usr/share/grafana/public/views/index.html 
    -v {{monitor_dir}}/promethues/grafana/data:/var/lib/grafana 
    grafana/grafana:7.4.5
    
    docker rm -f alertmanager-v0.21.0
    docker run -d 
    --name alertmanager-v0.21.0 
    -p 9093:9093 
    -v {{monitor_dir}}/promethues/alertmanager/alertmanager.yml:/etc/alertmanager/alertmanager.yml 
    prom/alertmanager:v0.21.0
    
    ps -ef|grep node_exporter|grep -v grep|awk  '{print "kill -9 " $2}' |sh
    sleep 2s
    nohup node_exporter > /dev/null 2>&1 &
    
    echo "---------------------setting-----------------------"
    sleep 60s
    
    update_prometheus_datasources()
    {
      cat <<EOF
    {
        "id": 1,
        "name": "Prometheus",
        "type": "prometheus",
        "access": "proxy",
        "url": "http://{{ groups['monitor'][0] }}:9090",
        "isDefault":true
    }
    EOF
    }
    
    curl -k -X PUT 
        -H "Authorization: Bearer eyJrIjoibkFicXhkdndJeUI4eENvM3JNZ3JvcFNZWTBuVmEwZkIiLCJuIjoiY3JlYXRlIiwiaWQiOjF9" 
        -H "Content-Type: application/json" 
        -d "$(update_prometheus_datasources)" http://{{ groups["monitor"][0] }}:3000/api/datasources/1
    
    
    
    push_supervisor_status_cron () {
        chmod +x  {{monitor_dir}}/promethues/getsupervisorstatus.py
        sed -i 's/127.0.0.1/'{{ groups["monitor"][0]  }}'/g' {{monitor_dir}}/promethues/getsupervisorstatus.py
    
    
        chmod +x {{monitor_dir}}/promethues/Pushgateway.sh
    
        python {{monitor_dir}}/promethues/getsupervisorstatus.py > /dev/null 2>&1
        sed -i '/getsupervisorstatus.py/d' /var/spool/cron/root
        rm -fr crontab_info
        crontab -l > crontab_info
        echo "* * * * * python {{monitor_dir}}/promethues/getsupervisorstatus.py > /dev/null 2>&1">>crontab_info
        crontab crontab_info
    }
    
    push_supervisor_status_cron
    
    
    update_prometheus_alert_notifications()
    {
      cat <<EOF
    {
        "id": 1,
        "uid": "",
        "name": "pushAlert",
        "type": "webhook",
        "isDefault": false,
        "sendReminder": false,
        "disableResolveMessage": false,
        "frequency": "",
        "settings": {
            "autoResolve": true,
            "httpMethod": "POST",
            "password": "",
            "severity": "critical",
            "uploadImage": false,
            "url": "http://{{ groups['monitor'][0] }}:8083/monitor/pushAlert",
            "username": ""
        },
        "secureFields": {}
    }
    EOF
    }
    
    sleep 30s
    curl -k -X PUT 
        -H "Authorization: Bearer eyJrIjoibkFicXhkdndJeUI4eENvM3JNZ3JvcFNZWTBuVmEwZkIiLCJuIjoiY3JlYXRlIiwiaWQiOjF9" 
        -H "Content-Type: application/json" 
        -d "$(update_prometheus_alert_notifications)" http://{{ groups['monitor'][0] }}:3000/api/alert-notifications/1
    
    echo "---------------------promethues install is success-----------------------"
    templates/Prometheus-docker-install.j2

     ansible整合nginx和fdfs

         1.先安装fdfs

         2.nginx在安装的时候需要编译fastfds-nginx-module

         

         2.编译nginx时候需要注意的地方

          1.  ./configure --prefix=/app/taishi/nginx --with-http_stub_status_module --with-http_ssl_module --with-file-aio --with-http_realip_module --add-module=/home/admin/fastdfs-nginx-module-1.20/src/

               configure --prefix=/”的作用是:编译的时候用来指定程序存放路径 。如果路径被指定了,那么make生成的二进制可执行文件就必须存放到指定目录下.否则程序将启动失败

               不指定prefix,可执行文件默认放在/usr/local/bin,库文件默认放在/usr/local/lib,配置文件默认放在/usr/local/etc.其它的资源文件放在/usr /local/share

         2. make && make install

    - name: Create nginx dir
      file: path="{{ taishi_dir }}/nginx" state=directory owner={{ taishi_user }} group={{ taishi_user }} recurse=yes
      tags: nginx
    
    - name: Create ssl dir
      file: path="{{ taishi_dir }}/nginx/ssl" state=directory owner={{ taishi_user }} group={{ taishi_user }} recurse=yes
      tags: nginx
    
    - name: Create fdfs dir
      file: path="{{ taishi_dir }}/nginx/fdfs" state=directory owner={{ taishi_user }} group={{ taishi_user }} recurse=yes
      tags: nginx
    
    - name: Create etc_fdfs dir
      file: path="/etc/fdfs" state=directory owner={{ taishi_user }} group={{ taishi_user }} recurse=yes
      tags: nginx
    
    - name: Create fdfs_data dir
      file: path="{{ taishi_dir }}/nginx/fdfs/data" state=directory owner={{ taishi_user }} group={{ taishi_user }} recurse=yes
      tags: nginx
    
    
    - name: Create fdfs_log dir
      file: path="{{ taishi_dir }}/logs/fastdfs" state=directory owner={{ taishi_user }} group={{ taishi_user }} recurse=yes
      tags: nginx
    
    - name: Create fdfs store dir
      file: path="{{ taishi_dir }}/fastdfs" state=directory owner={{ taishi_user }} group={{ taishi_user }} recurse=yes
      tags: nginx
    
    - name: copy the nginx.tar to {{ groups["nginx"] | to_json }}
      copy: src=nginx.tar.gz dest=/tmp
      tags: nginx
    
    - name: Install nginx for {{ groups["nginx"] | to_json }}
      unarchive: src="/tmp/nginx.tar.gz" dest="{{ taishi_dir }}/nginx"  copy=no mode=0755
      tags: nginx
    
    - name: copy the html.tar to {{ groups["nginx"] | to_json }}
      copy: src=html.tar.gz dest=/tmp
      tags: nginx
    
    - name: Install webpage for {{ groups["nginx"]  | to_json }}
      unarchive: src="/tmp/html.tar.gz" dest="{{ taishi_dir }}/nginx/html"  copy=no mode=0755
      tags: nginx
    
    
    - name: copy the ssl.tar to {{ groups["nginx"] | to_json }}
      copy: src=ssl.tar.gz dest=/tmp
      tags: nginx
    
    - name: Install ssl for {{ groups["nginx"] | to_json }}
      unarchive: src="/tmp/ssl.tar.gz" dest="{{ taishi_dir }}/nginx/ssl"  copy=no mode=0755
      tags: nginx
    
    - name: copy the fdfs.tar to {{ groups["nginx"] | to_json }}
      copy: src=fdfs.tar.gz dest=/tmp
      tags: nginx
    
    - name: Install fdfs for {{ groups["nginx"] | to_json }}
      unarchive: src="/tmp/fdfs.tar.gz" dest="{{ taishi_dir }}/nginx/fdfs"  copy=no mode=0755
      tags: nginx
    
    - name: Create mockbuild user for {{ groups["nginx"] | to_json }}
      shell: "useradd -s /sbin/nologin mockbuild"
      ignore_errors: True
      tags: fdfs
    
    - name: rpm install fdfs rpmpackages for {{ groups["nginx"] | to_json }}
      shell: "cd {{ taishi_dir }}/nginx/fdfs && rpm -ivh ./fdfs_rpm/*.rpm"
      ignore_errors: True
      tags: fdfs
    
    - name: make fdfslib  for {{ groups["nginx"] | to_json }}
      shell: "cd {{ taishi_dir }}/nginx/fdfs/libfastcommon-1.0.38 && ./make.sh && ./make.sh install"
      ignore_errors: True
      tags: fdfs
    
    - name: make fdfs  for {{ groups["nginx"] | to_json }}
      shell: "cd {{ taishi_dir }}/nginx/fdfs/fastdfs-5.11 && ./make.sh && ./make.sh install"
      ignore_errors: True
      tags: fdfs
    
    - name: Copy client.conf to {{ groups["nginx"] | to_json }}
      template: src=client.conf.j2  dest="/etc/fdfs/client.conf" owner={{ taishi_user }} group={{ taishi_user }} mode=0755
      tags: nginx
    
    - name: Copy http.conf to {{ groups["nginx"] | to_json }}
      template: src=http.conf.j2  dest="/etc/fdfs/http.conf" owner={{ taishi_user }} group={{ taishi_user }} mode=0755
      tags: nginx
    
    - name: Copy mime.types to {{ groups["nginx"] | to_json }}
      template: src=mime.types.j2  dest="/etc/fdfs/mime.types" owner={{ taishi_user }} group={{ taishi_user }} mode=0755
      tags: nginx
    
    - name: Copy mod_fastdfs.conf to {{ groups["nginx"] | to_json }}
      template: src=mod_fastdfs.conf.j2  dest="/etc/fdfs/mod_fastdfs.conf" owner={{ taishi_user }} group={{ taishi_user }} mode=0755
      tags: nginx
    
    - name: Copy storage.conf to {{ groups["nginx"] | to_json }}
      template: src=storage.conf.j2 dest="/etc/fdfs/storage.conf" owner={{ taishi_user }} group={{ taishi_user }} mode=0755
      tags: nginx
    
    - name: Copy tracker.conf to {{ groups["nginx"] | to_json }}
      template: src=tracker.conf.j2  dest="/etc/fdfs/tracker.conf" owner={{ taishi_user }} group={{ taishi_user }} mode=0755
      tags: nginx
    
    - name: make ln  for {{ groups["nginx"] | to_json }}
      shell: "ln -s /usr/lib64/libfastcommon.so /usr/local/lib/libfastcommon.so && ln -s /usr/lib64/libfastcommon.so /usr/lib/libfastcommon.so"
      ignore_errors: True
      tags: fdfs
    
    - name: make ln  for {{ groups["nginx"] | to_json }}
      shell: "chown -R {{taishi_user}}:{{taishi_user}} /usr/lib/libfdfsclient.so && chown -R {{taishi_user}}:{{taishi_user}} /usr/lib64/libfdfsclient.so"
      ignore_errors: True
      tags: fdfs
    
    - name: start fdfs  for {{ groups["nginx"] | to_json }}
      shell: "/usr/bin/fdfs_storaged /etc/fdfs/storage.conf start &&  /usr/bin/fdfs_trackerd /etc/fdfs/tracker.conf start"
      ignore_errors: True
      tags: fdfs
    
    - name: copy the fdfscheck to {{ groups["nginx"] | to_json }}
      copy: src=fdfs_check.py dest={{ taishi_dir }}/nginx/fdfs_check.py owner={{ taishi_user }} group={{ taishi_user }} mode=0755
      tags: nginx
    
    - name: Create the fdfscheck crontab
      cron:
        name: "fdfscheck"
        minute: "2"
        hour: "*"
        job: "python {{ taishi_dir }}/nginx/fdfs_check.py > /dev/null 2>&1"
      tags: nginx
    
    - name: Copy nginx-config to {{ groups["nginx"] | to_json }}
      template: src=nginx.conf.j2  dest="{{ taishi_dir }}/nginx/conf/nginx.conf" owner={{ taishi_user }} group={{ taishi_user }} mode=0755
      tags: nginx
    
    - name: Copy fdfsnginx.ini to {{ groups["nginx"] | to_json }}
      template: src=fdfs_nginx.ini.j2  dest="{{ taishi_dir }}/etc/supervisord/fdfs_nginx.ini" owner={{ taishi_user }} group={{ taishi_user }} mode=0755
      tags: nginx
    
    - name: copy the nginx-1.15.2.tar to {{ groups["nginx"] | to_json }}
      copy: src=nginx-1.15.2.tar.gz dest=/tmp
      tags: nginx
    
    - name: unarchive nginx-1.15.2 for {{ groups["nginx"] | to_json }}
      unarchive: src="/tmp/nginx-1.15.2.tar.gz" dest="/tmp" owner={{ taishi_user }} group={{ taishi_user }}  copy=no mode=0755
      tags: nginx
    
    - name: copy the fastfds-nginx-module.tar.gz to {{ groups["nginx"] | to_json }}
      copy: src=fastfds-nginx-module.tar.gz dest=/tmp
      tags: nginx
    
    - name: Create fastfds-nginx-module dir
      file: path="/tmp/fastfds-nginx-module" state=directory owner={{ taishi_user }} group={{ taishi_user }} recurse=yes
      tags: nginx
    
    - name: unarchive fastfds-nginx-module.tar for {{ groups["nginx"] | to_json }}
      unarchive: src="/tmp/fastfds-nginx-module.tar.gz" dest="/tmp/fastfds-nginx-module" owner={{ taishi_user }} group={{ taishi_user }}  copy=no mode=0755
      tags: nginx
    
    - name: configure nginx-fastdfs for {{ groups["nginx"] | to_json }}
      shell: "cd /tmp/nginx-1.15.2 && ./configure --prefix={{taishi_dir}}/nginx --with-http_stub_status_module --with-http_ssl_module --with-file-aio --with-http_realip_module --add-module=/tmp/fastfds-nginx-module/src/"
      ignore_errors: True
      tags: nginx
    
    - name: make nginx-fastdfs for {{ groups["nginx"] | to_json }}
      shell: "cd /tmp/nginx-1.15.2 && make && make install"
      ignore_errors: True
      tags: nginx
    
    - name: copy nginx for {{ groups["nginx"] | to_json }}
      shell: "cp -r /tmp/nginx-1.15.2/objs/nginx {{taishi_dir}}/nginx/sbin/nginx"
      ignore_errors: True
      tags: nginx
    
    - name: "chown nginx dir to {{ taishi_user }}"
      file: path="{{ taishi_dir }}/nginx" state=directory owner={{ taishi_user }} group={{ taishi_user }} recurse=yes
      tags: nginx
    
    - name: "supervisor start nginx"
      shell: "supervisorctl update"
      ignore_errors: True
      tags: nginx
    tasks/main.yml

      3.编译实例

         

        

        

        4.安装完毕

            

        5.可以把当前机器上的/usr/local/nginx打包然后拷贝的其它主机上运行,这样就不用在其它主机上编译nginx了.不过拷贝到其它主机的路径也必须是/usr/local下

           

        6.在其它主机上启动nginx成功

    ansible部署mysql并初始化数据

             导入数据的时候提示下面信息

             ERROR 1064 (42000): You have an error in your SQL syntax; ... near 'databasesst test1' at line 1  经查是初始化脚本语法问题 

    -------------------------------
    -- 设置work地址
    -------------------------------
    UPDATE sa_log_worker SET ip = '{{ groups["Tlog"][0] | to_json }}' where name = 'work1';
    
    
    -------------------------------
    -- 设置输出ES地址
    -------------------------------
    UPDATE sa_log_output SET ioname = 'ES7_{{groups["elastic-master"][0] | to_json }}', param = '{"password":"{{es_password}}","sslCertFile":"{{taishi_dir}}/app/Tlog/certs/ca.crt","documentType":"_doc","hosts":"https://instance:9200/","username":"elastic"}' where iotype = 'Elasticsearch7';
    
    
    -------------------------------
    -- 设置输入kafka地址
    -------------------------------
    UPDATE sa_log_output SET ioname = 'KAFKA11_{{groups["app-master"][0] | to_json }}', param = '{"bootstrapServers":"{{groups["app-master"][0]}}:9092","zkAddress":"{{groups["app-master"][0]}}:2181"}' where iotype = 'Kafka11';
    
    -------------------------------
    -- 设置定时任务接口地址
    -------------------------------
    UPDATE sys_quartz_task_informations SET url = 'http://{{groups["app-master"][0] | to_json}}:8770/schedule/comp/situation' where id = 16;
    UPDATE sys_quartz_task_informations SET url = 'http://{{groups["app-master"][0] | to_json}}:8770/assetSchedule/comp/situation' where id = 20;
    UPDATE sys_quartz_task_informations SET url = 'http://{{groups["app-master"][0] | to_json}}:8083/timingDepartScore' where id = 21;
    UPDATE sys_quartz_task_informations SET url = 'http://{{groups["app-master"][0] | to_json}}:8770/schedule/security/incident' where id = 22;
    UPDATE sys_quartz_task_informations SET url = 'http://{{groups["app-master"][0] | to_json }}:8083/timingBusinessScore' where id = 25;
    UPDATE sys_quartz_task_informations SET url = 'http://{{groups["app-master"][0] | to_json}}:8083/timingAssetScore' where id = 26;
    UPDATE sys_quartz_task_informations SET url = 'http://{{groups["app-master"][0] | to_json}}:8770/schedule/comp/attackMap' where id = 28;
    UPDATE sys_quartz_task_informations SET url = 'http://{{groups["app-master"][0] | to_json}}:8770/schedule/home/situation' where id = 29;
    UPDATE sys_quartz_task_informations SET url = 'http://{{groups["app-master"][0] | to_json}}:8083/saAssetReport/executeWeekly' where id = 33;
    UPDATE sys_quartz_task_informations SET url = 'http://{{groups["app-master"][0] | to_json}}:8083/saAssetReport/secWeekly' where id = 34;
    UPDATE sys_quartz_task_informations SET url = 'http://{{groups["app-master"][0] | to_json}}:8083/saAssetReport/secMonthly' where id = 35;
    UPDATE sys_quartz_task_informations SET url = 'http://{{groups["app-master"][0] | to_json}}:8083/saAssetReport/secDayly' where id = 36;
    UPDATE sys_quartz_task_informations SET url = 'http://{{groups["app-master"][0] | to_json}}:8770/schedule/security/result' where id = 39;
    UPDATE sys_quartz_task_informations SET url = 'http://{{groups["app-master"][0] | to_json}}:8770/schedule/incidentInfo/situation' where id = 40;
    UPDATE sys_quartz_task_informations SET url = 'http://{{groups["app-master"][0] | to_json}}:8083/asset/assetExtract' where id = 41;
    UPDATE sys_quartz_task_informations SET url = 'http://{{groups["app-master"][0] | to_json}}:8083/monitor/excuteFlinkMonitor' where id = 53;
    UPDATE sys_quartz_task_informations SET url = 'http://{{groups["app-master"][0] | to_json}}:8082/sysdict/sysStaticData' where id = 46;
    UPDATE sys_config SET vals = 'http://{{groups["app-master"][0] | to_json}}:3000/d/rYdddlPWk-evan/sadan-ji-ban-jian-kong?orgId=1&refresh=1m&kiosk=tv' where flag = 'sysInfo' and feature = 'url';
    
    
    -------------------------------
    -- 设置网关权限
    -------------------------------
    UPDATE sys_config SET vals = 'https://{{groups["app-master"][0] | to_json}}:1688' where id = 'h89573859ol6yhn8uhbdccc564rf8ui9';
    
    -------------------------------
    -- 设置解析规则检测回环id为0
    -------------------------------
    UPDATE sa_log_input SET id = 0 where is_test = 1;
    
    
    -------------------------------
    -- 设置默认解析规则id为0
    -------------------------------
    UPDATE sa_log_filter SET id=0 WHERE rule_id='defult_rule';
    错误的sql数据脚本

          1.每条sql语句后面需要添加分号(;)

          2.不能包含上面的那些注释说明行

          3.sql语句单行注释   使用"#

          4.sql语句多行注释   使用/* */

    - name: Create mysql base dir
      file: path="{{ taishi_dir }}/mysql" state=directory owner={{ taishi_user }} group={{ taishi_user }} recurse=yes
      tags: mysql
    
    - name: remove mysql old_data
      file: path="{{ mysql_datadir }}" state=absent owner={{ taishi_user }} group={{ taishi_user }} 
      tags: mysql
    
    - name: Create mysql new data dir
      file: path="{{ mysql_datadir }}" state=directory owner={{ taishi_user }} group={{ taishi_user }} recurse=yes
      tags: mysql
    
    - name: Copy mysql.tar.gz to {{ groups["mysql"][0] | to_json }}
      copy: src="mysql-5.7.31-linux-glibc2.12-x86_64.tar.gz" dest=/tmp
      tags: mysql
    
    - name: Install mysql for {{ groups["mysql"][0] | to_json }}
      unarchive: src="/tmp/mysql-5.7.31-linux-glibc2.12-x86_64.tar.gz" dest="{{ taishi_dir }}/mysql"  copy=no mode=0755
      tags: mysql
    
    - name: "copy the mysql.ini"
      template: src=mysql.ini.j2  dest="{{ taishi_dir }}/etc/supervisord/mysql.ini" owner={{ taishi_user }} group={{ taishi_user }} mode=0755
      tags: mysql
    
    - name: "chown mysql dir to {{ taishi_user }}"
      file: path="{{ taishi_dir }}/mysql" state=directory owner={{ taishi_user }} group={{ taishi_user }} recurse=yes
      tags: mysql
    
    - name: Create etc dir
      file: path="{{ taishi_dir }}/etc" state=directory owner={{ taishi_user }} group={{ taishi_user }} recurse=yes
      tags: mysqlcnf
    
    - name: "copy the my.cnf"
      template: src=my.cnf.j2  dest="{{ taishi_dir }}/etc/my.cnf" owner={{ taishi_user }} group={{ taishi_user }} mode=0755
      tags: mysqlcnf
    
    
    - name: "chown mysql dir to {{ taishi_user }}"
      file: path="{{ taishi_dir }}/etc" state=directory owner={{ taishi_user }} group={{ taishi_user }} recurse=yes
      tags: mysqlcnf
    
    - name: "copy the db.sql"
      copy: src=db.sql  dest="{{ taishi_dir }}/mysql/db.sql" owner={{ taishi_user }} group={{ taishi_user }} mode=0755
      tags: mysqlinitdb
    
    - name: "copy the data.sql.j2"
      template: src=data.sql.j2  dest="{{ taishi_dir }}/mysql/data.sql" owner={{ taishi_user }} group={{ taishi_user }} mode=0755
      tags: mysqlinitdb
    
    - name: "copy the initdb.sh"
      template: src=initdb.sh.j2  dest="{{ taishi_dir }}/mysql/initdb.sh" owner={{ taishi_user }} group={{ taishi_user }} mode=0755
      tags: mysqlinitdb
    
    - name: "sh initdb.sh"
      shell: "{{ taishi_dir }}/mysql/initdb.sh"
      ignore_errors: True
      tags: mysqlinitdb
    
    - name: "supervisor start mysql"
      shell: "supervisorctl update && supervisorctl start mysql"
      ignore_errors: True
      tags: mysql
    
    - name: "copy the initdata.sh"
      template: src=initdata.sh.j2  dest="{{ taishi_dir }}/mysql/initdata.sh" owner={{ taishi_user }} group={{ taishi_user }} mode=0755
      tags: mysqlinitdb
    
    - name: "sh initdata.sh"
      shell: "{{ taishi_dir }}/mysql/initdata.sh"
      ignore_errors: True
      tags: mysqlinitdb
    
    - name: "supervisor stop mysql"
      shell: "supervisorctl stop mysql"
      tags: uninstall,mysql-uninstall
    
    - name: "delete mysql directory"
      file: path="{{ taishi_dir }}/mysql" state=absent
      tags: uninstall,mysql-uninstall
    
    - name: "delete mysql basedir"
      file: path="{{ mysql_basedir }}" state=absent
      tags: uninstall,mysql-uninstall
    
    - name: "delete mysql datadir"
      file: path="{{ mysql_datadir }}" state=absent
      tags: uninstall,mysql-uninstall
    tasks/main.yml
    #!/bin/bash
    
    {{ taishi_dir }}/mysql/mysql-5.7.31-linux-glibc2.12-x86_64/bin/mysqld --defaults-file={{ taishi_dir }}/etc/my.cnf --initialize
    nohup {{ taishi_dir }}/mysql/mysql-5.7.31-linux-glibc2.12-x86_64/bin/mysqld --defaults-file={{ taishi_dir }}/etc/my.cnf & > /dev/null 2>&1
    
    #Wait 300s for mysql to start
    echo "Wait for mysql to start..."
    
    count=0
    for((i=1;i<=5;i++));
    do 
       count=`netstat -antp | grep {{ MYSQL_PORT }} | grep mysqld | wc -l`
       sleep 5
       if  [ "$count" -gt 0 ];then
        break
       fi
    done
    if  [ "$count" -gt 0 ];then
       echo "Mysql success to start." 
    else
       echo "Mysql failed to start in 5 minutes."     
    fi
    sleep 3
    
    tmpPasswd=`grep "temporary password" {{ mysql_datadir }}/error.log | grep -ioE "root@localhost:.*" | cut -d ' ' -f 2`
    newPasswd={{ MYSQL_CONNECT_PASSWORD }}
    
    
    
    {{ taishi_dir }}/mysql/mysql-5.7.31-linux-glibc2.12-x86_64/bin/mysql -S {{ mysql_datadir }}/mysql.sock -u root -p"$tmpPasswd" --connect-expired-password -e "ALTER USER root@localhost IDENTIFIED BY "$newPasswd""
    
    {{ taishi_dir }}/mysql/mysql-5.7.31-linux-glibc2.12-x86_64/bin/mysql -S {{ mysql_datadir }}/mysql.sock -u root -p"$newPasswd" -e "CREATE USER {{MYSQL_CONNECT_USERNAME}}@'%' IDENTIFIED BY '{{MYSQL_CONNECT_PASSWORD}}'"
    {{ taishi_dir }}/mysql/mysql-5.7.31-linux-glibc2.12-x86_64/bin/mysql -S {{ mysql_datadir }}/mysql.sock -u root -p"$newPasswd" -e "GRANT ALL PRIVILEGES ON *.* TO '{{MYSQL_CONNECT_USERNAME}}'@'%'"
    
    {{ taishi_dir }}/mysql/mysql-5.7.31-linux-glibc2.12-x86_64/bin/mysql -S {{ mysql_datadir }}/mysql.sock -u root -p"$newPasswd" -e "CREATE USER {{MYSQL_CONNECT_USERNAME}}@'localhost' IDENTIFIED BY '{{MYSQL_CONNECT_PASSWORD}}'"
    {{ taishi_dir }}/mysql/mysql-5.7.31-linux-glibc2.12-x86_64/bin/mysql -S {{ mysql_datadir }}/mysql.sock -u root -p"$newPasswd" -e "GRANT ALL PRIVILEGES ON *.* TO '{{MYSQL_CONNECT_USERNAME}}'@'localhost'"
    {{ taishi_dir }}/mysql/mysql-5.7.31-linux-glibc2.12-x86_64/bin/mysql -S {{ mysql_datadir }}/mysql.sock -u root -p"$newPasswd" -e "FLUSH PRIVILEGES"
    
    #初始化数据库表结构
    {{ taishi_dir }}/mysql/mysql-5.7.31-linux-glibc2.12-x86_64/bin/mysql -S {{ mysql_datadir }}/mysql.sock -u root -p"$newPasswd"  < {{ taishi_dir }}/mysql/db.sql
    
    #根据不同的环境初始化表里面的数据
    # {{ taishi_dir }}/mysql/mysql-5.7.31-linux-glibc2.12-x86_64/bin/mysql -S {{ mysql_datadir }}/mysql.sock -u root -p"$newPasswd"  < {{ taishi_dir }}/mysql/data.sql
    
    #初始化数据后关闭数据库进程
    {{ taishi_dir }}/mysql/mysql-5.7.31-linux-glibc2.12-x86_64/bin/mysqladmin -S {{ mysql_datadir }}/mysql.sock -u root -p"$newPasswd" shutdown 2>&1 >/dev/null
    templates/initdb.sh.j2
    #!/bin/bash
    
    {{ taishi_dir }}/mysql/mysql-5.7.31-linux-glibc2.12-x86_64/bin/mysql -P{{ MYSQL_PORT }} -h{{ groups["mysql"][0] }} -u{{MYSQL_CONNECT_USERNAME}} -p{{MYSQL_CONNECT_PASSWORD}} siem < /app/taishi/mysql/data.sql
    templates/initdata.sh.j2
    /*
    设置work地址
    */
    UPDATE sa_log_worker SET ip = '{{ groups["Tlog"][0]  }}' where name = 'work1';
    
    
    /*
    设置输出ES地址
    */
    UPDATE sa_log_output SET ioname = 'ES7_{{groups["elastic-master"][0] }}', param = '{"password":"{{es_password}}","sslCertFile":"{{taishi_dir}}/app/Tlog/certs/ca.crt","documentType":"_doc","hosts":"https://instance:9200/","username":"elastic"}' where iotype = 'Elasticsearch7';
    
    
    
    /*
    设置输入kafka地址
    */
    UPDATE sa_log_output SET ioname = 'KAFKA11_{{groups["app-master"][0] }}', param = '{"bootstrapServers":"{{groups["app-master"][0]}}:9092","zkAddress":"{{groups["app-master"][0]}}:2181"}' where iotype = 'Kafka11';
    
    
    /*
    设置定时任务接口地址
    */
    UPDATE sys_quartz_task_informations SET url = 'http://{{groups["app-master"][0] }}:8770/schedule/comp/situation' where id = 16;
    UPDATE sys_quartz_task_informations SET url = 'http://{{groups["app-master"][0] }}:8770/assetSchedule/comp/situation' where id = 20;
    UPDATE sys_quartz_task_informations SET url = 'http://{{groups["app-master"][0] }}:8083/timingDepartScore' where id = 21;
    UPDATE sys_quartz_task_informations SET url = 'http://{{groups["app-master"][0] }}:8770/schedule/security/incident' where id = 22;
    UPDATE sys_quartz_task_informations SET url = 'http://{{groups["app-master"][0] }}:8083/timingBusinessScore' where id = 25;
    UPDATE sys_quartz_task_informations SET url = 'http://{{groups["app-master"][0] }}:8083/timingAssetScore' where id = 26;
    UPDATE sys_quartz_task_informations SET url = 'http://{{groups["app-master"][0] }}:8770/schedule/comp/attackMap' where id = 28;
    UPDATE sys_quartz_task_informations SET url = 'http://{{groups["app-master"][0] }}:8770/schedule/home/situation' where id = 29;
    UPDATE sys_quartz_task_informations SET url = 'http://{{groups["app-master"][0] }}:8083/saAssetReport/executeWeekly' where id = 33;
    UPDATE sys_quartz_task_informations SET url = 'http://{{groups["app-master"][0] }}:8083/saAssetReport/secWeekly' where id = 34;
    UPDATE sys_quartz_task_informations SET url = 'http://{{groups["app-master"][0] }}:8083/saAssetReport/secMonthly' where id = 35;
    UPDATE sys_quartz_task_informations SET url = 'http://{{groups["app-master"][0] }}:8083/saAssetReport/secDayly' where id = 36;
    UPDATE sys_quartz_task_informations SET url = 'http://{{groups["app-master"][0] }}:8770/schedule/security/result' where id = 39;
    UPDATE sys_quartz_task_informations SET url = 'http://{{groups["app-master"][0] }}:8770/schedule/incidentInfo/situation' where id = 40;
    UPDATE sys_quartz_task_informations SET url = 'http://{{groups["app-master"][0] }}:8083/asset/assetExtract' where id = 41;
    UPDATE sys_quartz_task_informations SET url = 'http://{{groups["app-master"][0] }}:8083/monitor/excuteFlinkMonitor' where id = 53;
    UPDATE sys_quartz_task_informations SET url = 'http://{{groups["app-master"][0] }}:8082/sysdict/sysStaticData' where id = 46;
    UPDATE sys_config SET vals = 'http://{{groups["app-master"][0] }}:3000/d/rYdddlPWk-evan/sadan-ji-ban-jian-kong?orgId=1&refresh=1m&kiosk=tv' where flag = 'sysInfo' and feature = 'url';
    
    
    /*
    设置网关权限
    */
    UPDATE sys_config SET vals = 'https://{{groups["app-master"][0] }}:1688' where id = 'h89573859ol6yhn8uhbdccc564rf8ui9';
    
    /*
    设置解析规则检测回环id为0
    */
    UPDATE sa_log_input SET id = 0 where is_test = 1;
    
    
    /*
    设置默认解析规则id为0
    */
    UPDATE sa_log_filter SET id=0 WHERE rule_id='defult_rule';
    templates/data.sql.j2

    安装mysql后自动创建表结构并按照不同的安装环境初始化表里面的数据

       手动修改mysql表中的数据

       

       

       nginx启动异常

          配置文件设置的ip带了双引号,导致启动失败

         

         修改配置文件并重新启动,即可正常打开nginx页面

    ansible安装prometheus监控

    - name: Create src monitor dir
      file: path="{{ taishi_dir }}/monitor/setos" state=directory owner={{ taishi_user }} group={{ taishi_user }} recurse=yes
      tags: monitor
    
    - name: Copy setos to {{ groups["monitor"] | to_json }}
      copy: src=setos/ dest={{ taishi_dir }}/monitor/setos/ owner={{ taishi_user }} group={{ taishi_user }} mode=0755
      tags: monitor
    
    - name: "Copy the set_os.sh"
      template: src=set_os.sh  dest="{{ taishi_dir }}/monitor/setos/set_os.sh" owner={{ taishi_user }} group={{ taishi_user }} mode=0755
      tags: monitor
    
    - name: shell setos to {{ groups["monitor"] }}
      shell: "{{ taishi_dir }}/monitor/setos/set_os.sh"
      ignore_errors: True
      tags: monitor
    
    - name: Copy docker to {{ groups["monitor"] }}
      copy: src=docker/ dest={{ taishi_dir }}/monitor/docker/ owner={{ taishi_user }} group={{ taishi_user }} mode=0755
      tags: monitor
    
    - name: unarchive docker.zip for {{ groups["monitor"] }}
      unarchive: src="{{ taishi_dir }}/monitor/docker/docker-v19.03.x.zip" dest="{{ monitor_dir }}"  copy=no mode=0755
      tags: monitor
    
    - name: install docker for {{ groups["monitor"]  }}
      shell: "{{monitor_dir}}/docker-v19.03.x/install.sh {{monitor_dir}}/docker-v19.03.x/docker-19.03.15.tgz"
      ignore_errors: True
      tags: monitor
    
    - name: unarchive docker.compose.zip for {{ groups["monitor"] }} 
      unarchive: src="{{ taishi_dir }}/monitor/docker/docker-compose-v1.26.2.zip" dest="{{ monitor_dir }}"  copy=no mode=0755
      tags: monitor
    
    - name: "Copy the install.sh"
      template: src=install.sh.j2  dest="{{ monitor_dir }}/docker-compose-v1.26.2/install.sh" owner={{ taishi_user }} group={{ taishi_user }} mode=0755
      tags: monitor
    
    - name: install docker-compose for {{ groups["monitor"] }}
      shell: "{{ monitor_dir }}/docker-compose-v1.26.2/install.sh"
      ignore_errors: True
      tags: monitor
    
    - name: Copy promethues to {{ groups["monitor"] }}
      copy: src=promethues/ dest={{ taishi_dir }}/monitor/promethues/ owner={{ taishi_user }} group={{ taishi_user }} mode=0755
      tags: monitor
    
    - name: unarchive promethues-data.zip for {{ groups["monitor"] }}
      unarchive: src="{{ taishi_dir }}/monitor/promethues/promethues-data.zip" dest="{{ monitor_dir }}"  copy=no mode=0755
      tags: monitor
    
    - name: "Copy the prometheus.yml"
      template: src=prometheus.yml.j2  dest="{{ monitor_dir }}/promethues/server/prometheus.yml" owner={{ taishi_user }} group={{ taishi_user }} mode=0755
      tags: monitor-test
    
    - name: "Copy the rules.yml"
      template: src=rules.yml.j2  dest="{{ monitor_dir }}/promethues/server/rules.yml" owner={{ taishi_user }} group={{ taishi_user }} mode=0755
      tags: monitor
    
    - name: "Copy the install-promethues.sh"
      template: src=install-prometheus.sh.j2  dest="{{ monitor_dir }}/promethues/install-prometheus.sh" owner={{ taishi_user }} group={{ taishi_user }} mode=0755
      tags: monitor
    
    - name: "Copy the getsupervisor.py"
      template: src=getsupervisorstatus.py  dest="{{ monitor_dir }}/promethues/getsupervisorstatus.py" owner={{ taishi_user }} group={{ taishi_user }} mode=0755
      tags: monitor
    
    - name: "Copy the pushgateway.sh"
      template: src=Pushgateway.sh  dest="{{ monitor_dir }}/promethues/Pushgateway.sh" owner={{ taishi_user }} group={{ taishi_user }} mode=0755
      tags: monitor
    
    - name: install prometheus for {{ groups["monitor"] }}
      shell: "{{ monitor_dir }}/promethues/install-prometheus.sh"
      ignore_errors: True
      tags: monitor
    tasks/main.yml
    global:
      scrape_interval:     15s # 默认抓取间隔, 15秒向目标抓取一次数据。
      external_labels:
        monitor: 'codelab-monitor'
    rule_files:
      - /etc/prometheus/rules.yml
    # 这里表示抓取对象的配置
    scrape_configs:
      #这个配置是表示在这个配置内的时间序例,每一条都会自动添加上这个{job_name:"prometheus"}的标签  - job_name: 'prometheus'
      - job_name: 'prometheus'
        scrape_interval: 5s # 重写了全局抓取间隔时间,由15秒重写成5秒
        static_configs:
          - targets: ["{{ groups['monitor'][0] }}:9090"] #docker prometheus web
          - targets: ["{{ groups['monitor'][0] }}:8080","{{ groups['monitor'][0] }}:8081","{{ groups['monitor'][0] }}:8082"] #3个微服务
            labels:
              group: 'client-golang'
    {% set hosts = [] %} 
    {% for host in groups["all"] %} 
    {{ hosts.append(host|string+":9100") }} 
    {% endfor %}
          - targets: {{ hosts | to_json }} 
            labels:
              group: 'client-node-exporter'
    
          - targets: ["{{ groups['elastic-master'][0] }}:9114"]
            labels:
              group: 'elastic-exporter'
    
          - targets: ["{{ groups['flink-master'][0] }}:9213","{{ groups['flink-master'][0] }}:9214"]
            labels:
              group: 'flink-exporter'
    
          - targets: ["{{ groups['mysql'][0] }}:9215"]
            labels:
              group: 'mysql-exporter'
    
          - targets: ["{{ groups['kafka'][0] }}:9216"]
            labels:
              group: 'kafka-exporter'
    
          - targets: ["{{ groups['redis-master'][0] }}:9217"]
            labels:
              group: 'redis-exporter'  
    
          - targets: ["{{ groups['monitor'][0] }}:9091"]  #docker prometheus pushgateway
            labels:
              group: 'pushgateway'
    alerting:
      alertmanagers:
        - static_configs:
            - targets: ["{{ groups['monitor'][0] }}:9093"]
    templates/prometheus.yml.j2
    #!/bin/bash
    
    usage () {
        echo "USAGE: $0 [--local-ip-address xxx.xxx.xxx.xxx]"
        echo "  [-l|--local-ip-address ip_addr] local ip address."
        echo "  [-h|--help] Usage message"
    }
    
    
    set_hosts_info () {
        sed -i 's/127.0.0.1/'{{ groups["monitor"][0] }}'/g' {{monitor_dir}}/promethues/server/prometheus.yml
        # rm -fr /usr/bin/node_exporter
        # ln -s {{monitor_dir}}/promethues/node_exporter-1.0.1.linux-amd64/node_exporter /usr/bin/node_exporter
        # chmod +x /usr/bin/node_exporter
    
        # rm -fr /usr/bin/elasticsearch_exporter
        # ln -s {{monitor_dir}}/promethues/elasticsearch_exporter-1.1.0.linux-amd64/elasticsearch_exporter /usr/bin/elasticsearch_exporter
        # chmod +x /usr/bin/elasticsearch_exporter
    }
    
    set_hosts_info
    
    
    docker load -i {{taishi_dir}}/prometheus/prometheus*.tar.gz
    
    docker rm -f prometheus-v2.25.2
    docker run -d 
    --name=prometheus-v2.25.2 
    -p 9090:9090 
    -v {{monitor_dir}}/promethues/server/prometheus.yml:/etc/prometheus/prometheus.yml 
    -v {{monitor_dir}}/promethues/server/rules.yml:/etc/prometheus/rules.yml 
    prom/prometheus:v2.25.2 
    --config.file=/etc/prometheus/prometheus.yml 
    --web.enable-lifecycle 
    --storage.tsdb.retention=10d 
    --web.enable-admin-api
    
    
    docker rm -f pushgateway-v1.4.0
    docker run -d 
    --name pushgateway-v1.4.0 
    -p 9091:9091 
    prom/pushgateway:v1.4.0
    
    docker rm -f grafana-v7.4.5
    docker run -d 
    --user $(id -u) 
    --name grafana-v7.4.5 
    -p 3000:3000 
    -v {{monitor_dir}}/promethues/grafana/conf/defaults.ini:/usr/share/grafana/conf/defaults.ini 
    -v {{monitor_dir}}/promethues/grafana/public/views/index.html:/usr/share/grafana/public/views/index.html 
    -v {{monitor_dir}}/promethues/grafana/data:/var/lib/grafana 
    grafana/grafana:7.4.5
    
    docker rm -f alertmanager-v0.21.0
    docker run -d 
    --name alertmanager-v0.21.0 
    -p 9093:9093 
    -v {{monitor_dir}}/promethues/alertmanager/alertmanager.yml:/etc/alertmanager/alertmanager.yml 
    prom/alertmanager:v0.21.0
    
    # ps -ef|grep node_exporter|grep -v grep|awk  '{print "kill -9 " $2}' |sh
    # sleep 2s
    # nohup node_exporter > /dev/null 2>&1 &
    
    echo "---------------------setting-----------------------"
    sleep 60s
    
    update_prometheus_datasources()
    {
      cat <<EOF
    {
        "id": 1,
        "name": "Prometheus",
        "type": "prometheus",
        "access": "proxy",
        "url": "http://{{ groups['monitor'][0] }}:9090",
        "isDefault":true
    }
    EOF
    }
    
    curl -k -X PUT 
        -H "Authorization: Bearer eyJrIjoibkFicXhkdndJeUI4eENvM3JNZ3JvcFNZWTBuVmEwZkIiLCJuIjoiY3JlYXRlIiwiaWQiOjF9" 
        -H "Content-Type: application/json" 
        -d "$(update_prometheus_datasources)" http://{{ groups["monitor"][0] }}:3000/api/datasources/1
    
    
    
    push_supervisor_status_cron () {
        chmod +x  {{monitor_dir}}/promethues/getsupervisorstatus.py
        sed -i 's/127.0.0.1/'{{ groups["monitor"][0]  }}'/g' {{monitor_dir}}/promethues/getsupervisorstatus.py
    
    
        chmod +x {{monitor_dir}}/promethues/Pushgateway.sh
    
        python {{monitor_dir}}/promethues/getsupervisorstatus.py > /dev/null 2>&1
        sed -i '/getsupervisorstatus.py/d' /var/spool/cron/root
        rm -fr crontab_info
        crontab -l > crontab_info
        echo "* * * * * python {{monitor_dir}}/promethues/getsupervisorstatus.py > /dev/null 2>&1">>crontab_info
        crontab crontab_info
    }
    
    push_supervisor_status_cron
    
    
    update_prometheus_alert_notifications()
    {
      cat <<EOF
    {
        "id": 1,
        "uid": "",
        "name": "pushAlert",
        "type": "webhook",
        "isDefault": false,
        "sendReminder": false,
        "disableResolveMessage": false,
        "frequency": "",
        "settings": {
            "autoResolve": true,
            "httpMethod": "POST",
            "password": "",
            "severity": "critical",
            "uploadImage": false,
            "url": "http://{{ groups['monitor'][0] }}:8083/monitor/pushAlert",
            "username": ""
        },
        "secureFields": {}
    }
    EOF
    }
    
    sleep 30s
    curl -k -X PUT 
        -H "Authorization: Bearer eyJrIjoibkFicXhkdndJeUI4eENvM3JNZ3JvcFNZWTBuVmEwZkIiLCJuIjoiY3JlYXRlIiwiaWQiOjF9" 
        -H "Content-Type: application/json" 
        -d "$(update_prometheus_alert_notifications)" http://{{ groups['monitor'][0] }}:3000/api/alert-notifications/1
    
    echo "---------------------promethues install is success-----------------------"
    install_prometheus.sh

      

      prometheus安装成功

    安装NFS服务

         

          手动挂载目录

  • 相关阅读:
    frida多版本安装 ubuntu同时安装多个版本的frida
    python爬虫备忘录
    解决linux不能启动google问题
    Centos7 自定义守护进程
    (CentOS 7.0和7.5实验过) 更改网卡名称
    centos 配置网络连接(本机是基于无线网,虚拟机上)
    linux 仿QQ 2.0版本
    linux 实现仿QQ应用程序
    linux 终端、伪终端、虚拟终端的理解
    linux中的make命令
  • 原文地址:https://www.cnblogs.com/yxh168/p/14832244.html
Copyright © 2011-2022 走看看