How does the "jaeger client" data access "APM" via "OpenTelemetry"?

APM Server version 7.13.2 elasticSearch: 7.13.2 kibana: 7.13.2

Questions:
1.The error rate module in the Overview screen shows "No data to display", and the "Error rate" in the Transactions screen is N/A. How do I set this value?
2.The "server Map" looks normal, but the "Dependencies" of the "Overview" shows "No items found"
3.How do I set the "span type"? All the modules related to it show "No data to display"

Step:
1.run apm,es,kibana:

docker network create -d bridge my-jaeger-net
docker run --name elasticsearch --network=my-jaeger-net -p 9200:9200 -p 9300:9300 -e "discovery.type=single-node" -d elasticsearch:7.13.2
docker run --name kibana --network=my-jaeger-net -p 5601:5601 -d kibana:7.13.2
docker run -d -p 8200:8200 --name=apm-server --network=my-jaeger-net --user=apm-server elastic/apm-server:7.13.2 --strict.perms=false -e -E output.elasticsearch.hosts=["elasticsearch:9200"]
  1. run openTelemetry: 0.27.0
	docker run  --name collector --network my-jaeger-net\
  		  -v otelcontribcol_config.yaml:/otelcontribcol_config.yaml \
          -d otel/opentelemetry-collector-contrib:0.27.0 --config=/config.yaml

config.yaml:

receivers:
  jaeger:
    protocols:
      grpc:
        endpoint: 0.0.0.0:14250
exporters:
  logging/detail:
    loglevel: debug
  alibabacloud_logservice/canary:
     /*******/
  elastic:
    apm_server_url: http://apm-server:8200

service:
  pipelines:
    traces:
      receivers: [ jaeger ] 
      exporters: [ alibabacloud_logservice/canary, elastic ]

3.run jaeger-agent:

    docker run -p 6831:6831/udp --network my-jaeger-net  -d jaegertracing/jaeger-agent:1.22.0 --reporter.grpc.host-port=collector:14250
  1. Use uber/jaeger-client-go to send data to jaeger-agent,agent to send data to openTelemetry
    server_t1:
package main
import (
	"fmt"
	"io"
	"log"
	"net/http"

	"github.com/opentracing/opentracing-go"
	"github.com/opentracing/opentracing-go/ext"
	"github.com/uber/jaeger-client-go"
	"github.com/uber/jaeger-client-go/config"
)

// service
type service struct {
}


func (m *MySpan) Context() opentracing.SpanContext {
	return m.SpanContext()
}
func (s *service) ServeHTTP(writer http.ResponseWriter, request *http.Request) {
	fmt.Println("request******")
	tracer := opentracing.GlobalTracer()
	spanCtx, err := tracer.Extract(opentracing.HTTPHeaders, opentracing.HTTPHeadersCarrier(request.Header))
	if err != nil {
		log.Fatal(err)
	}

	span := tracer.StartSpan("service_one_do", ext.RPCServerOption(spanCtx))
	span.Context()
	defer func() {
		span.Finish()
	}()

	writer.Write([]byte("12331"))
}

func main() {
	tracer, closer := initJaeger("service_t2")
	defer closer.Close()

	opentracing.SetGlobalTracer(tracer)
	fmt.Println(http.ListenAndServe(":8083", &service{}))
}

func initJaeger(service string) (opentracing.Tracer, io.Closer) {
	cfg := &config.Configuration{
		Sampler: &config.SamplerConfig{
			Type:  "const",
			Param: 1,
		},
		Reporter: &config.ReporterConfig{
			LogSpans:           true,
			LocalAgentHostPort: "127.0.0.1:6831",
		},
		ServiceName: service,
	}

	tracer, closer, err := cfg.NewTracer(config.Logger(jaeger.StdLogger))
	if err != nil {
		panic(fmt.Sprintf("ERROR: cannot init Jaeger: %v\n", err))
	}

	return tracer, closer
}

server-two:

package main

import (
	"bytes"
	"fmt"
	"io"
	"log"
	"net/http"

	"github.com/opentracing/opentracing-go"
	"github.com/opentracing/opentracing-go/ext"
	"github.com/uber/jaeger-client-go"
	"github.com/uber/jaeger-client-go/config"
)

func main() {
	tracer, closer := initJaeger("service_t1")

	defer closer.Close()

	opentracing.SetGlobalTracer(tracer)

	do("1")
	// do("2")
}
func do(i string) {
	var url = "http://127.0.0.1:8083"
	tracer := opentracing.GlobalTracer()
	request, err := http.NewRequest(http.MethodGet, url, bytes.NewBuffer([]byte("")))
	if err != nil {
		log.Fatal(err)
	}
	span := tracer.StartSpan("service_two_do" + i)
	defer span.Finish()

	span.SetTag("traces", "1")
	span = span.Tracer().StartSpan("service_t2", opentracing.ChildOf(span.Context()))
	defer span.Finish()
	ext.SpanKindRPCClient.Set(span)

	ext.HTTPUrl.Set(span, url)
	ext.HTTPMethod.Set(span, http.MethodGet)

	span.SetTag("traces", "1")
	err = tracer.Inject(span.Context(), opentracing.HTTPHeaders, opentracing.HTTPHeadersCarrier(request.Header))
	if err != nil {
		log.Fatal(err)
	}
	do, err := http.DefaultClient.Do(request)
	if err != nil {
		log.Fatal(err)
	}
	ext.HTTPStatusCode.Set(span, uint16(do.StatusCode))

	all, err := io.ReadAll(do.Body)
	fmt.Println(string(all), err)
}

func initJaeger(service string) (opentracing.Tracer, io.Closer) {
	cfg := &config.Configuration{
		Sampler: &config.SamplerConfig{
			Type:  "const",
			Param: 1,
		},
		Reporter: &config.ReporterConfig{
			LogSpans:           true,
			LocalAgentHostPort: "127.0.0.1:6831",
		},
		ServiceName: service,
	}

	tracer, closer, err := cfg.NewTracer(config.Logger(jaeger.StdLogger))
	if err != nil {
		panic(fmt.Sprintf("ERROR: cannot init Jaeger: %v\n", err))
	}

	return tracer, closer
}

Hello @tttoad .

The OpenTelemtry Collector Exporter for Elastic has been deprecated and replaced by a native support of the OpenTelemetry Protocol (OTLP) by Elastic Observability.

Please visit our documentation OpenTelemetry integration | APM Overview [7.13] | Elastic .

Can you please ensure that you use Elastic v 7.13 and evolve the configuration of your OpenTelemetry Collector to look like:

receivers:
  jaeger:
    protocols:
      grpc:
        endpoint: 0.0.0.0:14250
exporters:
  logging/detail:
    loglevel: debug
  alibabacloud_logservice/canary:
     /*******/
  otlp/elastic:
    endpoint: "localhost:8200"
    insecure: true
    headers:
      Authorization: "Bearer my_secret_token"

processors:
  memory_limiter:
    check_interval: 1s
    limit_mib: 2000
  batch:

service:
  pipelines:
    traces:
      receivers: [ jaeger ] 
      processors: [ memory_limiter, batch ]
      exporters: [ alibabacloud_logservice/canary, otlp/elastic ]

Note that I've added the recommended batch and memory_limiter processors.

1 Like

I want to integrate the "metrics" and "trace" data, the "metrics" data is grabbed by "prometheus". Now I use "openTelemetry" "receivers.prometheus_simple" to collect the data and send it to "APM The data is sent to "APM" and cannot be integrated with "trace". The "service.name" of both is different.
ES,APM,Kibana version: 7.13.3
openTelemetry version:7.13.3
config:

receivers:
  jaeger:
    protocols:
      grpc:
        endpoint: 0.0.0.0:14250
  prometheus_simple:
    collection_interval: 10s
    endpoint: "main2:9102"

processors:
  memory_limiter:
    check_interval: 1s
    limit_mib: 2000
  batch:
    
exporters:
  logging/detail:
    loglevel: debug

  otlp/elastic:
    endpoint: apm-server:8200
    insecure: true
    headers:
      Authorization: "Bearer my_secret_token"

service:
  pipelines:
    traces:
      receivers: [ jaeger ] #接收端配置为jaeger。
      exporters: [ otlp/elastic ] #发送端配置为alibabacloud_logservice/sls-trace。
      processors: [ memory_limiter, batch ]
    metrics:
      receivers: [ prometheus_simple ]
      exporters: [ otlp/elastic ]

main:9102/metrics response:

# HELP go_gc_duration_seconds A summary of the pause duration of garbage collection cycles.
# TYPE go_gc_duration_seconds summary
go_gc_duration_seconds{quantile="0"} 5.87e-05
go_gc_duration_seconds{quantile="0.25"} 0.0001382
go_gc_duration_seconds{quantile="0.5"} 0.0003104
go_gc_duration_seconds{quantile="0.75"} 0.000651
go_gc_duration_seconds{quantile="1"} 0.0023786
go_gc_duration_seconds_sum 0.0035369
go_gc_duration_seconds_count 5
# HELP go_goroutines Number of goroutines that currently exist.
# TYPE go_goroutines gauge
go_goroutines 260
# HELP go_info Information about the Go environment.
# TYPE go_info gauge
go_info{version="go1.16.6"} 1
# HELP go_memstats_alloc_bytes Number of bytes allocated and still in use.
# TYPE go_memstats_alloc_bytes gauge
go_memstats_alloc_bytes 5.843192e+06
# HELP go_memstats_alloc_bytes_total Total number of bytes allocated, even if freed.
# TYPE go_memstats_alloc_bytes_total counter
go_memstats_alloc_bytes_total 1.7327112e+07
# HELP go_memstats_buck_hash_sys_bytes Number of bytes used by the profiling bucket hash table.
# TYPE go_memstats_buck_hash_sys_bytes gauge
go_memstats_buck_hash_sys_bytes 1.449204e+06
# HELP go_memstats_frees_total Total number of frees.
# TYPE go_memstats_frees_total counter
go_memstats_frees_total 26693
# HELP go_memstats_gc_cpu_fraction The fraction of this program's available CPU time used by the GC since the program started.
# TYPE go_memstats_gc_cpu_fraction gauge
go_memstats_gc_cpu_fraction 0.01836957988419095
# HELP go_memstats_gc_sys_bytes Number of bytes used for garbage collection system metadata.
# TYPE go_memstats_gc_sys_bytes gauge
go_memstats_gc_sys_bytes 5.213408e+06
# HELP go_memstats_heap_alloc_bytes Number of heap bytes allocated and still in use.
# TYPE go_memstats_heap_alloc_bytes gauge
go_memstats_heap_alloc_bytes 5.843192e+06
# HELP go_memstats_heap_idle_bytes Number of heap bytes waiting to be used.
# TYPE go_memstats_heap_idle_bytes gauge
go_memstats_heap_idle_bytes 5.8195968e+07
# HELP go_memstats_heap_inuse_bytes Number of heap bytes that are in use.
# TYPE go_memstats_heap_inuse_bytes gauge
go_memstats_heap_inuse_bytes 7.53664e+06
# HELP go_memstats_heap_objects Number of allocated objects.
# TYPE go_memstats_heap_objects gauge
go_memstats_heap_objects 24186
# HELP go_memstats_heap_released_bytes Number of heap bytes released to OS.
# TYPE go_memstats_heap_released_bytes gauge
go_memstats_heap_released_bytes 5.709824e+07
# HELP go_memstats_heap_sys_bytes Number of heap bytes obtained from system.
# TYPE go_memstats_heap_sys_bytes gauge
go_memstats_heap_sys_bytes 6.5732608e+07
# HELP go_memstats_last_gc_time_seconds Number of seconds since 1970 of last garbage collection.
# TYPE go_memstats_last_gc_time_seconds gauge
go_memstats_last_gc_time_seconds 1.6263290546848054e+09
# HELP go_memstats_lookups_total Total number of pointer lookups.
# TYPE go_memstats_lookups_total counter
go_memstats_lookups_total 0
# HELP go_memstats_mallocs_total Total number of mallocs.
# TYPE go_memstats_mallocs_total counter
go_memstats_mallocs_total 50879
# HELP go_memstats_mcache_inuse_bytes Number of bytes in use by mcache structures.
# TYPE go_memstats_mcache_inuse_bytes gauge
go_memstats_mcache_inuse_bytes 7200
# HELP go_memstats_mcache_sys_bytes Number of bytes used for mcache structures obtained from system.
# TYPE go_memstats_mcache_sys_bytes gauge
go_memstats_mcache_sys_bytes 16384
# HELP go_memstats_mspan_inuse_bytes Number of bytes in use by mspan structures.
# TYPE go_memstats_mspan_inuse_bytes gauge
go_memstats_mspan_inuse_bytes 124440
# HELP go_memstats_mspan_sys_bytes Number of bytes used for mspan structures obtained from system.
# TYPE go_memstats_mspan_sys_bytes gauge
go_memstats_mspan_sys_bytes 131072
# HELP go_memstats_next_gc_bytes Number of heap bytes when next garbage collection will take place.
# TYPE go_memstats_next_gc_bytes gauge
go_memstats_next_gc_bytes 7.063104e+06
# HELP go_memstats_other_sys_bytes Number of bytes used for other system allocations.
# TYPE go_memstats_other_sys_bytes gauge
go_memstats_other_sys_bytes 1.399348e+06
# HELP go_memstats_stack_inuse_bytes Number of bytes in use by the stack allocator.
# TYPE go_memstats_stack_inuse_bytes gauge
go_memstats_stack_inuse_bytes 1.376256e+06
# HELP go_memstats_stack_sys_bytes Number of bytes obtained from system for stack allocator.
# TYPE go_memstats_stack_sys_bytes gauge
go_memstats_stack_sys_bytes 1.376256e+06
# HELP go_memstats_sys_bytes Number of bytes obtained from system.
# TYPE go_memstats_sys_bytes gauge
go_memstats_sys_bytes 7.531828e+07
# HELP go_threads Number of OS threads created.
# TYPE go_threads gauge
go_threads 12
# HELP gorm_dbstats_idle The number of idle connections.
# TYPE gorm_dbstats_idle gauge
gorm_dbstats_idle{db="jianjiu",micro_name="lb.example.test11"} 0
# HELP gorm_dbstats_in_use The number of connections currently in use.
# TYPE gorm_dbstats_in_use gauge
gorm_dbstats_in_use{db="jianjiu",micro_name="lb.example.test11"} 0
# HELP gorm_dbstats_max_idle_closed The total number of connections closed due to SetMaxIdleConns.
# TYPE gorm_dbstats_max_idle_closed gauge
gorm_dbstats_max_idle_closed{db="jianjiu",micro_name="lb.example.test11"} 0
# HELP gorm_dbstats_max_lifetime_closed The total number of connections closed due to SetConnMaxLifetime.
# TYPE gorm_dbstats_max_lifetime_closed gauge
gorm_dbstats_max_lifetime_closed{db="jianjiu",micro_name="lb.example.test11"} 0
# HELP gorm_dbstats_max_open_connections Maximum number of open connections to the database.
# TYPE gorm_dbstats_max_open_connections gauge
gorm_dbstats_max_open_connections{db="jianjiu",micro_name="lb.example.test11"} 0
# HELP gorm_dbstats_open_connections The number of established connections both in use and idle.
# TYPE gorm_dbstats_open_connections gauge
gorm_dbstats_open_connections{db="jianjiu",micro_name="lb.example.test11"} 0
# HELP gorm_dbstats_wait_count The total number of connections waited for.
# TYPE gorm_dbstats_wait_count gauge
gorm_dbstats_wait_count{db="jianjiu",micro_name="lb.example.test11"} 0
# HELP gorm_dbstats_wait_duration The total time blocked waiting for a new connection.
# TYPE gorm_dbstats_wait_duration gauge
gorm_dbstats_wait_duration{db="jianjiu",micro_name="lb.example.test11"} 0
# HELP process_cpu_seconds_total Total user and system CPU time spent in seconds.
# TYPE process_cpu_seconds_total counter
process_cpu_seconds_total 1.86
# HELP process_max_fds Maximum number of open file descriptors.
# TYPE process_max_fds gauge
process_max_fds 1.048576e+06
# HELP process_open_fds Number of open file descriptors.
# TYPE process_open_fds gauge
process_open_fds 22
# HELP process_resident_memory_bytes Resident memory size in bytes.
# TYPE process_resident_memory_bytes gauge
process_resident_memory_bytes 4.3569152e+07
# HELP process_start_time_seconds Start time of the process since unix epoch in seconds.
# TYPE process_start_time_seconds gauge
process_start_time_seconds 1.62632905214e+09
# HELP process_virtual_memory_bytes Virtual memory size in bytes.
# TYPE process_virtual_memory_bytes gauge
process_virtual_memory_bytes 7.70568192e+08
# HELP process_virtual_memory_max_bytes Maximum amount of virtual memory available in bytes.
# TYPE process_virtual_memory_max_bytes gauge
process_virtual_memory_max_bytes 1.8446744073709552e+19
# HELP promhttp_metric_handler_requests_in_flight Current number of scrapes being served.
# TYPE promhttp_metric_handler_requests_in_flight gauge
promhttp_metric_handler_requests_in_flight{micro_name="lb.example.test11"} 1
# HELP promhttp_metric_handler_requests_total Total number of scrapes by HTTP status code.
# TYPE promhttp_metric_handler_requests_total counter
promhttp_metric_handler_requests_total{code="200",micro_name="lb.example.test11"} 3
promhttp_metric_handler_requests_total{code="500",micro_name="lb.example.test11"} 0
promhttp_metric_handler_requests_total{code="503",micro_name="lb.example.test11"} 0

The result is as follows: (I have only grabbed the "metrics" of "lb.example.test11" so far)

{
  "_index": "apm-7.13.3-metric-000001",
  "_type": "_doc",
  "_id": "xMzcqHoBcStLGm-E-rvj",
  "_version": 1,
  "_score": null,
  "fields": {
    "host.hostname": [
      "main2"
    ],
    "process_virtual_memory_bytes": [
      770830340
    ],
    "go_memstats_mspan_sys_bytes": [
      180224
    ],
    "go_memstats_last_gc_time_seconds": [
      1626330620
    ],
    "go_memstats_stack_sys_bytes": [
      1474560
    ],
    "service.language.name": [
      "unknown"
    ],
    "labels.instance": [
      "main2:9102"
    ],
    "scrape_samples_post_metric_relabeling": [
      92
    ],
    "labels.scheme": [
      "http"
    ],
    "processor.event": [
      "metric"
    ],
    "agent.name": [
      "otlp"
    ],
    "process_start_time_seconds": [
      1626330240
    ],
    "host.name": [
      "main2"
    ],
    "up": [
      1
    ],
    "go_memstats_heap_alloc_bytes": [
      9424888
    ],
    "go_memstats_mspan_inuse_bytes": [
      155448
    ],
    "go_memstats_lookups_total": [
      0
    ],
    "go_memstats_frees_total": [
      134718
    ],
    "go_memstats_alloc_bytes": [
      9424888
    ],
    "processor.name": [
      "metric"
    ],
    "go_memstats_sys_bytes": [
      75580424
    ],
    "go_memstats_buck_hash_sys_bytes": [
      1455916
    ],
    "go_memstats_mallocs_total": [
      174129
    ],
    "ecs.version": [
      "1.8.0"
    ],
    "observer.type": [
      "apm-server"
    ],
    "observer.version": [
      "7.13.3"
    ],
    "go_memstats_gc_cpu_fraction": [
      0.000038187492
    ],
    "agent.version": [
      "unknown"
    ],
    "scrape_duration_seconds": [
      0.0027382
    ],
    "process_resident_memory_bytes": [
      28372992
    ],
    "process_cpu_seconds_total": [
      70.78
    ],
    "service.node.name": [
      "main2"
    ],
    "go_memstats_heap_idle_bytes": [
      54247424
    ],
    "go_memstats_heap_sys_bytes": [
      65634304
    ],
    "go_memstats_gc_sys_bytes": [
      5462296
    ],
    "process_open_fds": [
      22
    ],
    "scrape_series_added": [
      92
    ],
    "go_memstats_stack_inuse_bytes": [
      1474560
    ],
    "process_max_fds": [
      1048576
    ],
    "go_memstats_heap_objects": [
      39411
    ],
    "go_memstats_mcache_inuse_bytes": [
      7200
    ],
    "go_memstats_other_sys_bytes": [
      1356740
    ],
    "go_threads": [
      12
    ],
    "go_memstats_heap_inuse_bytes": [
      11386880
    ],
    "go_memstats_mcache_sys_bytes": [
      16384
    ],
    "go_memstats_alloc_bytes_total": [
      37136680
    ],
    "labels.port": [
      "9102"
    ],
    "go_goroutines": [
      265
    ],
    "service.name": [
      "prometheus_simple_main2_9102"
    ],
    "labels.job": [
      "prometheus_simple/main2:9102"
    ],
    "go_memstats_heap_released_bytes": [
      49135616
    ],
    "observer.version_major": [
      7
    ],
    "process_virtual_memory_max_bytes": [
      18446744000000000000
    ],
    "observer.hostname": [
      "797c4982dd79"
    ],
    "scrape_samples_scraped": [
      92
    ],
    "metricset.name": [
      "app"
    ],
    "event.ingested": [
      "2021-07-15T06:31:32.316Z"
    ],
    "@timestamp": [
      "2021-07-15T06:31:31.343Z"
    ],
    "go_memstats_next_gc_bytes": [
      16465936
    ]
  },
  "sort": [
    1626330691343
  ]
}

This topic was automatically closed 20 days after the last reply. New replies are no longer allowed.