Creating Gauge Visualization

I have a field that contains one of two values, pass or fail. I'd like to setup a gauge with the following math:

Total # of field occurrence / Total # field = pass

This should leave me with an integer between 0.0 to 1.0 which could then be expressed as a decimal. The upper bounds of the gauge would be the total count of the field with the amount of fill on the gauge being the count that matches pass. If that makes sense....

Can you give a few examples documents in Elasticsearch so we can see what the structure and field values look like?

You mean something like this?? It's the email.spf_evaluation field. It's going to be a string vaule of pass or fail.

{
  "_index": "dmarcxml-2018.03.62",
  "_type": "doc",
  "_id": "N-nn6mEBsGXTOKCBQqtC",
  "_version": 1,
  "_score": null,
  "_source": {
    "email.dmarc_action": [
      "none"
    ],
    "authresult.spf_result": [
      "pass"
    ],
    "report.start": [
      "1518220800"
    ],
    "report.org_contact": [
      "dmarc_support@corp.mail.ru"
    ],
    "policy.dmarc.subdomain_action": [
      "none"
    ],
    "policy.percentage": [
      "100"
    ],
    "@timestamp": "2018-03-03T08:07:59.247Z",
    "report.additional_contact": [
      "http://help.mail.ru/mail-help"
    ],
    "email.dkim_evaluation": [
      "fail"
    ],
    "email.count": [
      "1"
    ],
    "policy.dkim_mode": [
      "r"
    ],
    "report.org": [
      "Mail.Ru"
    ],
    "email.source_ip": [
      "192.168.1.1"
    ],
    "policy.domain": [
      "example.com"
    ],
    "policy.spf_mode": [
      "r"
    ],
    "authresult.spf_scope": [
      "mfrom"
    ],
    "path": "C:/DMARC/mail.ru!example.com!1518220800!1518307200.xml",
    "email.header_from": [
      "example.com"
    ],
    "report.id": [
      "37256247916566362691518220800"
    ],
    "authresult.spf_domain": [
      "example.com"
    ],
    "email.spf_evaluation": [
      "pass"
    ],
    "geoip": {
      "latitude": 37.4249,
      "dma_code": 807,
      "city_name": "Sunnyvale",
      "location": {
        "lat": 37.4249,
        "lon": -122.0074,
        "coordinates": "37.4249, -122.0074"
      },
      "country_name": "United States",
      "country_code3": "US",
      "region_code": "CA",
      "longitude": -122.0074,
      "region_name": "California",
      "continent_code": "NA",
      "ip": "192.168.1.1",
      "timezone": "America/Los_Angeles",
      "postal_code": "94089",
      "country_code2": "US"
    },
    "message": "  <record>\r\n    <row>\r\n      <source_ip>192.168.1.1</source_ip>\r\n      <count>1</count>\r\n      <policy_evaluated>\r\n        <disposition>none</disposition>\r\n        <dkim>fail</dkim>\r\n        <spf>pass</spf>\r\n      </policy_evaluated>\r\n    </row>\r\n    <identifiers>\r\n      <header_from>example.com</header_from>\r\n    </identifiers>\r\n    <auth_results>\r\n      <spf>\r\n        <domain>example.com</domain>\r\n        <scope>mfrom</scope>\r\n        <result>pass</result>\r\n      </spf>\r\n    </auth_results>\r\n    <report_metadata>\r\n      <org_name>Mail.Ru</org_name>\r\n      <email>dmarc_support@corp.mail.ru</email>\r\n      <extra_contact_info>http://help.mail.ru/mail-help</extra_contact_info>\r\n      <report_id>37256247916566362691518220800</report_id>\r\n      <date_range>\r\n        <begin>1518220800</begin>\r\n        <end>1518307200</end>\r\n      </date_range>\r\n    </report_metadata>\r\n    <policy_published>\r\n      <domain>example.com</domain>\r\n      <adkim>r</adkim>\r\n      <aspf>r</aspf>\r\n      <p>none</p>\r\n      <sp>none</sp>\r\n      <pct>100</pct>\r\n    </policy_published>\r\n  </record>\r",
    "policy.dmarc.domain_action": [
      "none"
    ],
    "report.end": [
      "1518307200"
    ],
    "tags": [
      "multiline"
    ]
  },
  "fields": {
    "report.end": [
      "2018-02-11T00:00:00.000Z"
    ],
    "report.start": [
      "2018-02-10T00:00:00.000Z"
    ],
    "@timestamp": [
      "2018-03-03T08:07:59.247Z"
    ]
  },
  "sort": [
    1520064479247
  ]
}

Well, I'm really not sure here but sometimes I'm a little bit lucky from just exploring the screens and guessing.

Using TSVB, you can start with a count aggregation that gives a count for the number of documents in each time bucket. Then make another metric in the same series, and choose Filter Ratio. Numerator and Denominator are both filters, so your numerator will be email.spf_evaluation:pass and the denominator will be *.

In general, the way I use TSVB is to start with the Time Series chart and make sure my series are creating a shape of data over time that looks right. Then I would switch to Guage to see the "last" ratio bucket as a single metric.

1 Like

This topic was automatically closed 28 days after the last reply. New replies are no longer allowed.