Bash syslog_history --> rsyslog --> Elastic

I want to do the following. I want to enable the native Bash history logging with syslog: shopt -s syslog_history. Then I want rsyslog to relay the logs to a central server over TLS and then from that central server relay it to ELK over TLS as well.

I want to modify the example below to use a rsyslog relay, but then with TLS over TCP to keep the Bash history encrypted at transit.

A draft rsyslog config for the client:

# certificate files - just CA for a client
$DefaultNetstreamDriverCAFile /path/to/contrib/gnutls/ca.pem

# Set up the action
$DefaultNetstreamDriver gtls # Use gtls netstream driver
$ActionSendStreamDriverMode 1 # Require TLS for the connection
$ActionSendStreamDriverAuthMode anon # Server is NOT authenticated

# Forward everything to the central rsyslog server
if $programname == '-bash' or $programname == 'bash' and $msg contains 'HISTORY:' then {
    @$rsyslog_server:514;json-template
     & stop
}

A draft for the rsyslog config for the central server, which forwards it to Elastic:

# Load the Elastic module
module(load="omelasticsearch")

# Make gtls driver the default
$DefaultNetstreamDriver gtls

# Certificate files
$DefaultNetstreamDriverCAFile /rsyslog/protected/ca.pem
$DefaultNetstreamDriverCertFile /rsyslog/protected/machine-cert.pem
$DefaultNetstreamDriverKeyFile /rsyslog/protected/machine-key.pem

$ActionSendStreamDriverAuthMode x509/name
$ActionSendStreamDriverPermittedPeer central.example.net
$ActionSendStreamDriverMode 1 # Run driver in TLS-only mode

# First example template
template(name="json-template"
  type="list") {
    constant(value="{")
      constant(value="\"@timestamp\":\"")     property(name="timereported" dateFormat="rfc3339")
      constant(value="\",\"@version\":\"1")
      constant(value="\",\"message\":\"")     property(name="msg" format="json")
      constant(value="\",\"sysloghost\":\"")  property(name="hostname")
      constant(value="\",\"severity\":\"")    property(name="syslogseverity-text")
      constant(value="\",\"facility\":\"")    property(name="syslogfacility-text")
      constant(value="\",\"programname\":\"") property(name="programname")
      constant(value="\",\"procid\":\"")      property(name="procid")
    constant(value="\"}\n")
}

# Second example template
template(name="plain-syslog"
  type="list") {
    constant(value="{")
    constant(value="\"@timestamp\":\"")         property(name="timereported" dateFormat="rfc3339")
    constant(value="\",\"host\":\"")            property(name="hostname")
    constant(value="\",\"severity-num\":")      property(name="syslogseverity")
    constant(value=",\"facility-num\":")        property(name="syslogfacility")
    constant(value=",\"severity\":\"")          property(name="syslogseverity-text")
    constant(value="\",\"facility\":\"")        property(name="syslogfacility-text")
    constant(value="\",\"syslogtag\":\"")       property(name="syslogtag" format="json")
    constant(value="\",\"message\":\"")         property(name="msg" format="json")
    constant(value="\"}")
  }

if $programname == '-bash' or $programname == 'bash' and $msg contains 'HISTORY:' then {
action(type="omelasticsearch"
  server="{{ elastic_search_ip }}"
  serverport="9200"
  template="plain-syslog" 
  searchIndex="logstash-index"
  dynSearchIndex="on"
  searchType="events"
  bulkmode="on"                   # use the Bulk API
  queue.dequeuebatchsize="5000"   # ES bulk size
  queue.size="100000"   # Capacity of the action queue
  queue.workerthreads="5"   # 5 workers for the action
  action.resumeretrycount="-1"  # Retry indefinitely if ES is unreachable
  errorfile="/var/log/omelasticsearch.log"
) 

    @$IP_ELK:10514;json-template
     & stop
}

But to send data directly to port 9200, I will need to have authentication. How would that work? Or is it better to setup Logstash to receive the logs and then forward it to port 9200 locally on the server? If of course Logstash has the ability to receive encrypted rsyslog messages. I did create a token, but I'm unsure how to use that in such a setup. With Fleet it works quite well and straight forward.

Or is there a better way to set this up? There are many topics about this on this forum, but they rarely get a reply or show a fully working solution.

I suppose it's better to use the Fleet agent to setup an integration with UDP and then enable the syslog formatting. Then no Logstash is needed. And the Fleet agent is already encrypted.

Yes this works great, UDP socket on the central log server and forward the traffic to the Fleet agent from there. Works like a charm.

This topic was automatically closed 28 days after the last reply. New replies are no longer allowed.