No information in the "Metrics" module of the "APM" screen

APM Server version 7.13.3 elasticSearch: 7.13.3 kibana: 7.13.3

No information in the "Metrics" module of the "APM" screen

1.run apm,es,kibana:

docker network create -d bridge my-jaeger-net
docker run --name elasticsearch --network=my-jaeger-net -p 9200:9200 -p 9300:9300 -e "discovery.type=single-node" -d elasticsearch:7.13.3
docker run --name kibana --network=my-jaeger-net -p 5601:5601 -d kibana:7.13.3
docker run -d -p 8200:8200 --name=apm-server --network=my-jaeger-net --user=apm-server elastic/apm-server:7.13.3 --strict.perms=false -e -E output.elasticsearch.hosts=["elasticsearch:9200"]
  1. run openTelemetry: 0.27.0
	docker run  --name collector --network my-jaeger-net  -p 4317:4317\
  		  -v otelcontribcol_config.yaml:/otelcontribcol_config.yaml \
          -d otel/opentelemetry-collector-contrib:0.29.0 --config=/config.yaml

config.yaml:

receivers:
  jaeger:
    protocols:
      grpc:
        endpoint: 0.0.0.0:14250
  otlp:
    protocols:
      grpc:
        endpoint: "0.0.0.0:4317"

processors:
  memory_limiter:
    check_interval: 1s
    limit_mib: 2000
  batch:

exporters:
  logging/detail:
    loglevel: debug
  otlp/elastic:
    endpoint: apm-server:8200
    insecure: true
    headers:
      Authorization: "Bearer my_secret_token"

service:
  pipelines:
    traces:
      receivers: [ jaeger ] 
      exporters: [ otlp/elastic ] 
      processors: [ memory_limiter, batch ]
    metrics:
      receivers: [ otlp ]
      exporters: [ otlp/elastic ]

3.run jaeger-agent:

    docker run -p 6831:6831/udp --network my-jaeger-net  -d jaegertracing/jaeger-agent:1.22.0 --reporter.grpc.host-port=collector:14250
  1. run server
package main

import (
	"context"
	"fmt"
	"io"
	"log"
	"net/http"
	"time"

	"github.com/opentracing/opentracing-go"
	"github.com/opentracing/opentracing-go/ext"
	"github.com/uber/jaeger-client-go"
	"github.com/uber/jaeger-client-go/config"
	"go.opentelemetry.io/contrib/instrumentation/host"
	"go.opentelemetry.io/contrib/instrumentation/runtime"
	"go.opentelemetry.io/otel/exporters/otlp"
	"go.opentelemetry.io/otel/exporters/otlp/otlpgrpc"
	"go.opentelemetry.io/otel/metric/global"
	controller "go.opentelemetry.io/otel/sdk/metric/controller/basic"
	processor "go.opentelemetry.io/otel/sdk/metric/processor/basic"
	"go.opentelemetry.io/otel/sdk/metric/selector/simple"
	"go.opentelemetry.io/otel/sdk/resource"
	"go.opentelemetry.io/otel/semconv"
	"google.golang.org/grpc"
)

// service
type service struct {
}

func (s *service) ServeHTTP(writer http.ResponseWriter, request *http.Request) {
	fmt.Println("request******")
	tracer := opentracing.GlobalTracer()
	spanCtx, err := tracer.Extract(opentracing.HTTPHeaders, opentracing.HTTPHeadersCarrier(request.Header))
	if err != nil {
		log.Fatal(err)
	}

	span := tracer.StartSpan("service_one_do", ext.RPCServerOption(spanCtx))
	ext.SpanKindRPCServer.Set(span)

	defer func() {
		span.Finish()
	}()
	writer.WriteHeader(200)
	ext.HTTPStatusCode.Set(span, uint16(200))
	writer.Write([]byte("12331"))
}

func main() {
	serviceName:="service_t2"
	tracer, closer := initJaeger(serviceName)
	defer closer.Close()

	initProvider(serviceName)
	opentracing.SetGlobalTracer(tracer)
	fmt.Println(http.ListenAndServe(":8083", &service{}))
}

func initJaeger(service string) (opentracing.Tracer, io.Closer) {
	cfg := &config.Configuration{
		Sampler: &config.SamplerConfig{
			Type:  "const",
			Param: 1,
		},
		Reporter: &config.ReporterConfig{
			LogSpans:           true,
			LocalAgentHostPort: "127.0.0.1:6831",
		},
		ServiceName: service,
	}

	tracer, closer, err := cfg.NewTracer(config.Logger(jaeger.StdLogger))
	if err != nil {
		panic(fmt.Sprintf("ERROR: cannot init Jaeger: %v\n", err))
	}

	return tracer, closer
}

func initProvider(serviceName string) func() {
	ctx, cf := context.WithTimeout(context.Background(), time.Second*3)
	defer cf()
	otelAgentAddr := "0.0.0.0:4317"

	exporter, err := otlp.NewExporter(ctx, otlpgrpc.NewDriver(
		otlpgrpc.WithInsecure(),
		otlpgrpc.WithEndpoint(otelAgentAddr),
		otlpgrpc.WithDialOption(grpc.WithBlock()), // useful for testing
	))
	if err != nil {
		log.Fatal(err)
	}
	res := resource.NewWithAttributes(
		semconv.ServiceNameKey.String(serviceName),
	)
	detect, err := resource.TelemetrySDK{}.Detect(ctx)
	if err != nil {
		log.Fatal(err)
	}
	res = resource.Merge(res, detect)
	cont := controller.New(
		processor.New(
			simple.NewWithExactDistribution(),
			exporter,
		),
		controller.WithCollectPeriod(7*time.Second),
		controller.WithExporter(exporter),
		controller.WithResource(res),
	)

	provider := cont.MeterProvider()
	global.SetMeterProvider(provider)

	handleErr(cont.Start(context.Background()), "failed to start cont")

	handleErr(host.Start(host.WithMeterProvider(provider)), "failed to start host")

	handleErr(runtime.Start(runtime.WithMeterProvider(provider), runtime.WithMinimumReadMemStatsInterval(time.Second)), "failed to start runtime")

	return func() {
		handleErr(cont.Stop(context.Background()), "failed to stop metrics controller") // pushes any last exports to the receiver
		handleErr(exporter.Shutdown(ctx), "failed to stop exporter")
	}
}

func handleErr(err error, message string) {
	if err != nil {
		log.Fatalf("%s: %v", message, err)
	}
}

server-two:

package main

import (
	"bytes"
	"context"
	"fmt"
	"io"
	"log"
	"net/http"
	"time"

	"github.com/opentracing/opentracing-go"
	"github.com/opentracing/opentracing-go/ext"
	"github.com/uber/jaeger-client-go"
	"github.com/uber/jaeger-client-go/config"
	"go.opentelemetry.io/contrib/instrumentation/host"
	"go.opentelemetry.io/contrib/instrumentation/runtime"
	"go.opentelemetry.io/otel/exporters/otlp"
	"go.opentelemetry.io/otel/exporters/otlp/otlpgrpc"
	"go.opentelemetry.io/otel/metric/global"
	controller "go.opentelemetry.io/otel/sdk/metric/controller/basic"
	processor "go.opentelemetry.io/otel/sdk/metric/processor/basic"
	"go.opentelemetry.io/otel/sdk/metric/selector/simple"
	"go.opentelemetry.io/otel/sdk/resource"
	"go.opentelemetry.io/otel/semconv"
	"google.golang.org/grpc"
)

func main() {
	serviceName := "service_t1"
	tracer, closer := initJaeger(serviceName)
	defer closer.Close()

	initProvider(serviceName)

	opentracing.SetGlobalTracer(tracer)

	for i := 0; i < 20; i++ {
		do("1")
	}
	// do("2")
}
func do(i string) {
	var url = "http://127.0.0.1:8083"
	tracer := opentracing.GlobalTracer()
	request, err := http.NewRequest(http.MethodGet, url, bytes.NewBuffer([]byte("")))
	if err != nil {
		log.Fatal(err)
	}
	span := tracer.StartSpan("service_two_do" + i)
	defer span.Finish()

	span = span.Tracer().StartSpan("service_t2", opentracing.ChildOf(span.Context()))
	defer span.Finish()
	ext.SpanKindRPCClient.Set(span)

	ext.HTTPUrl.Set(span, url)
	// ext.HTTPMethod.Set(span, http.MethodGet)
	err = tracer.Inject(span.Context(), opentracing.HTTPHeaders, opentracing.HTTPHeadersCarrier(request.Header))
	if err != nil {
		log.Fatal(err)
	}

	do, err := http.DefaultClient.Do(request)
	if err != nil {
		log.Fatal(err)
	}
	ext.SpanKindRPCServer.Set(span)
	fmt.Println(do.StatusCode)
	ext.HTTPStatusCode.Set(span, uint16(do.StatusCode))

	all, err := io.ReadAll(do.Body)
	fmt.Println(string(all), err)
}

func initJaeger(service string) (opentracing.Tracer, io.Closer) {
	cfg := &config.Configuration{
		Sampler: &config.SamplerConfig{
			Type:  "const",
			Param: 1,
		},
		Reporter: &config.ReporterConfig{
			LogSpans:           true,
			LocalAgentHostPort: "127.0.0.1:6831",
		},
		ServiceName: service,
	}

	tracer, closer, err := cfg.NewTracer(config.Logger(jaeger.StdLogger))
	if err != nil {
		panic(fmt.Sprintf("ERROR: cannot init Jaeger: %v\n", err))
	}

	return tracer, closer
}

func initProvider(serviceName string) func() {
	ctx, cf := context.WithTimeout(context.Background(), time.Second*3)
	defer cf()
	otelAgentAddr := "0.0.0.0:4317"

	exporter, err := otlp.NewExporter(ctx, otlpgrpc.NewDriver(
		otlpgrpc.WithInsecure(),
		otlpgrpc.WithEndpoint(otelAgentAddr),
		otlpgrpc.WithDialOption(grpc.WithBlock()), // useful for testing
	))
	if err != nil {
		log.Fatal(err)
	}
	res := resource.NewWithAttributes(
		semconv.ServiceNameKey.String(serviceName),
	)
	detect, err := resource.TelemetrySDK{}.Detect(ctx)
	if err != nil {
		log.Fatal(err)
	}
	res = resource.Merge(res, detect)
	cont := controller.New(
		processor.New(
			simple.NewWithExactDistribution(),
			exporter,
		),
		controller.WithCollectPeriod(7*time.Second),
		controller.WithExporter(exporter),
		controller.WithResource(res),
	)

	provider := cont.MeterProvider()
	global.SetMeterProvider(provider)

	handleErr(cont.Start(context.Background()), "failed to start cont")

	handleErr(host.Start(host.WithMeterProvider(provider)), "failed to start host")

	handleErr(runtime.Start(runtime.WithMeterProvider(provider), runtime.WithMinimumReadMemStatsInterval(time.Second)), "failed to start runtime")

	return func() {
		handleErr(cont.Stop(context.Background()), "failed to stop metrics controller") // pushes any last exports to the receiver
		handleErr(exporter.Shutdown(ctx), "failed to stop exporter")
	}
}

func handleErr(err error, message string) {
	if err != nil {
		log.Fatalf("%s: %v", message, err)
	}
}

5.Screenshots



1 Like

I got same results. Wondering how it is supposed to work.

The "Metrics" panel in the APM app currently only shows a limited set of metrics that Elastic APM agents send. OpenTelemetry SDKs use different metric names, and we do not currently handle those. I think it makes sense to do that, so I've opened Translate OpenTelemetry system metrics · Issue #5796 · elastic/apm-server · GitHub to track the enhancement.

In the meantime, these metrics docs can be visualised using Lens | Kibana Guide [7.13] | Elastic or TSVB | Kibana Guide [7.13] | Elastic

If I change the name of the metrics reported by "OpenTelemetry", can I make the data available in the "APM" interface "metrics"?

Maybe. I haven't looked at all of the details yet so I don't know if it's feasible, but that is how it could possibly work. It depends on whether all of the same information is available.

If you would like to try this, you should compare these:

This topic was automatically closed 20 days after the last reply. New replies are no longer allowed.