Due to the lack of a proper documentation on this matter (yes it's not that obvious how you write a log4j.properties based on xml examples from log4j2 documentation), I thought it would have been useful to share my log4j2.properties with multiple conditions on policies and strategies.
Only the server part will be treated, the rest is the default configuration without the "old" and "deprecated" parts.
Please, let me know if some commented parts are unclear. Feel free to bring your own configuration or more information if you can!
#This is an example of a log4j2.properties for an elasticsearch 7 cluster (7.6.1) with json formatted logs only.
status = error
# log action execution errors for easier debugging
logger.action.name = org.elasticsearch.action
logger.action.level = debug
appender.console.type = Console
appender.console.name = console
appender.console.layout.type = PatternLayout
appender.console.layout.pattern = [%d{ISO8601}][%-5p][%-25c{1.}] [%node_name]%marker %m%n
######## Server JSON ############################
appender.rolling.type = RollingFile
appender.rolling.name = rolling
appender.rolling.fileName = ${sys:es.logs.base_path}${sys:file.separator}${sys:es.logs.cluster_name}_server.json
appender.rolling.layout.type = ESJsonLayout
appender.rolling.layout.type_name = server
#File Pattern will determine the time unit for the "appender.rolling.policies.time.interval". Indeed, its time unit will be the smallest time unit described in the file pattern. In this example, the smallest unit of "yyyy-MM-dd" is days, we could also configure a file pattern with units like "yyyy-MM-dd-HH-mm-ss" if we want to set periodic rolls every x seconds (very useful to test a configuration ;-)).
appender.rolling.filePattern = ${sys:es.logs.base_path}${sys:file.separator}${sys:es.logs.cluster_name}-%d{yyyy-MM-dd}-%i.json.gz
#Policies allows to define the rolling(compressing logs) thresholds
appender.rolling.policies.type = Policies
#TimeBasedTriggeringPolicy allows to roll a log every x time. In this example, given the filepattern smallest time unit is day and given we set time.interval to 1, a log will be compressed every 1 day.
appender.rolling.policies.time.type = TimeBasedTriggeringPolicy
appender.rolling.policies.time.interval = 1
appender.rolling.policies.time.modulate = true
#SizeBasedTriggeringPolicy allows to roll logs everytime log size is above 128MB. Here it is used as a security, if in one day, a lot of logs are written we dont want to wait 1 day before compressing logs to save space.
appender.rolling.policies.size.type = SizeBasedTriggeringPolicy
appender.rolling.policies.size.size = 128MB
#Strategies determines when to delete these rolled(compressed) logs
appender.rolling.strategy.type = DefaultRolloverStrategy
#Here we state there is no maximum number of retained compressed logs
appender.rolling.strategy.fileIndex = nomax
#We dont want to keep logs or move them to another device, so we just delete them
appender.rolling.strategy.action.type = Delete
appender.rolling.strategy.action.basepath = ${sys:es.logs.base_path}
#We set a compressed logs deleting strategy based on filenames
appender.rolling.strategy.action.condition.type = IfFileName
#Feel free to set the regex which will suit you the best
appender.rolling.strategy.action.condition.glob = ${sys:es.logs.cluster_name}-*.json.gz
#Based on log4j doc, ifAny allow to set logical OR between the next deleting compressed logs strategy conditions
appender.rolling.strategy.action.condition.ifAny.type = IfAny
#Remember this condition is set based on filename so IfLastModified will check only the compressed logs name to determine if its modification date is older than the threshold we set. Unlike time.interval we have to set a time unit.
appender.rolling.strategy.action.condition.ifAny.ifLastModified.type = IfLastModified
appender.rolling.strategy.action.condition.ifAny.ifLastModified.age = 30D
#This condition states that if the summed size of compressed logs is above a stated value, the lowest incremented log file will be deleted
appender.rolling.strategy.action.condition.ifAny.ifAccumulatedFileSize.type = IfAccumulatedFileSize
appender.rolling.strategy.action.condition.ifAny.ifAccumulatedFileSize.exceeds = 7GB
#this entry set the various log levels (trace for example is very useful to test logs configuration, it allows to log A LOT)
rootLogger.level = info
rootLogger.appenderRef.console.ref = console
rootLogger.appenderRef.rolling.ref = rolling
#Hereafter, this is the default configuration.
######## Search slowlog JSON ####################
appender.index_search_slowlog_rolling.type = RollingFile
appender.index_search_slowlog_rolling.name = index_search_slowlog_rolling
appender.index_search_slowlog_rolling.fileName = ${sys:es.logs.base_path}${sys:file.separator}${sys:es.logs.cluster_name}_index_search_slowlog.json
appender.index_search_slowlog_rolling.layout.type = ESJsonLayout
appender.index_search_slowlog_rolling.layout.type_name = index_search_slowlog
appender.index_search_slowlog_rolling.layout.esmessagefields=message,took,took_millis,total_hits,types,stats,search_type,total_shards,source,id
appender.index_search_slowlog_rolling.filePattern = ${sys:es.logs.base_path}${sys:file.separator}${sys:es.logs.cluster_name}_index_search_slowlog-%i.json.gz
appender.index_search_slowlog_rolling.policies.type = Policies
appender.index_search_slowlog_rolling.policies.size.type = SizeBasedTriggeringPolicy
appender.index_search_slowlog_rolling.policies.size.size = 1GB
appender.index_search_slowlog_rolling.strategy.type = DefaultRolloverStrategy
appender.index_search_slowlog_rolling.strategy.max = 4
logger.index_search_slowlog_rolling.name = index.search.slowlog
logger.index_search_slowlog_rolling.level = trace
logger.index_search_slowlog_rolling.appenderRef.index_search_slowlog_rolling.ref = index_search_slowlog_rolling
logger.index_search_slowlog_rolling.additivity = false
######## Indexing slowlog JSON ##################
appender.index_indexing_slowlog_rolling.type = RollingFile
appender.index_indexing_slowlog_rolling.name = index_indexing_slowlog_rolling
appender.index_indexing_slowlog_rolling.fileName = ${sys:es.logs.base_path}${sys:file.separator}${sys:es.logs.cluster_name}_index_indexing_slowlog.json
appender.index_indexing_slowlog_rolling.layout.type = ESJsonLayout
appender.index_indexing_slowlog_rolling.layout.type_name = index_indexing_slowlog
appender.index_indexing_slowlog_rolling.layout.esmessagefields=message,took,took_millis,doc_type,id,routing,source
appender.index_indexing_slowlog_rolling.filePattern = ${sys:es.logs.base_path}${sys:file.separator}${sys:es.logs.cluster_name}_index_indexing_slowlog-%i.json.gz
appender.index_indexing_slowlog_rolling.policies.type = Policies
appender.index_indexing_slowlog_rolling.policies.size.type = SizeBasedTriggeringPolicy
appender.index_indexing_slowlog_rolling.policies.size.size = 1GB
appender.index_indexing_slowlog_rolling.strategy.type = DefaultRolloverStrategy
appender.index_indexing_slowlog_rolling.strategy.max = 4
logger.index_indexing_slowlog.name = index.indexing.slowlog.index
logger.index_indexing_slowlog.level = trace
logger.index_indexing_slowlog.appenderRef.index_indexing_slowlog_rolling.ref = index_indexing_slowlog_rolling
logger.index_indexing_slowlog.additivity = false
#################################################