I'm using golang otelgin middleware to create a span for every request that goes through my go server, but for some reason when I try to create a child span to do more detailed tracing I failed to do it and I can only see the span from the middleware. my code below. main.go:
package main
import (
"context"
"log"
"net/http"
"net/url"
"os"
"strconv"
"time"
"github.com/gin-gonic/gin"
"go.opentelemetry.io/contrib/instrumentation/github.com/gin-gonic/gin/otelgin"
"go.opentelemetry.io/contrib/propagators/aws/xray"
sampler "go.opentelemetry.io/contrib/samplers/aws/xray"
"go.opentelemetry.io/otel"
"go.opentelemetry.io/otel/attribute"
"go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracegrpc"
"go.opentelemetry.io/otel/sdk/trace"
oteltrace "go.opentelemetry.io/otel/trace"
"google.golang.org/grpc"
)
var svcName = os.Getenv("OTEL_SERVICE_NAME")
func main() {
err := start_xray()
if err != nil {
log.Fatalf("Failed to start XRay: %v", err)
return
}
r := gin.Default()
r.Use(otelgin.Middleware(svcName))
r.GET("/", func(ctx *gin.Context) {
tracer := otel.Tracer(svcName)
_, span := tracer.Start(ctx.Request.Context(), "sample-trace", oteltrace.WithSpanKind(oteltrace.SpanKindServer), oteltrace.WithAttributes([]attribute.KeyValue{
{Key: "http.method", Value: attribute.StringValue(ctx.Request.Method)},
}...))
defer span.End()
time.Sleep(1 * time.Second)
ctx.JSON(http.StatusOK, gin.H{
"healthy": true,
})
span.SetAttributes(attribute.KeyValue{Key: "http.status", Value: attribute.StringValue(strconv.Itoa(http.StatusOK))})
})
if err := r.Run(":8080"); err != nil {
log.Fatal(err)
}
}
func start_xray() error {
ctx := context.Background()
exporterEndpoint := os.Getenv("OTEL_EXPORTER_OTLP_ENDPOINT")
if exporterEndpoint == "" {
exporterEndpoint = "localhost:4317"
}
log.Println("Creating new OTLP trace exporter...")
traceExporter, err := otlptracegrpc.New(ctx, otlptracegrpc.WithInsecure(), otlptracegrpc.WithEndpoint(exporterEndpoint), otlptracegrpc.WithDialOption(grpc.WithBlock()))
if err != nil {
log.Fatalf("Failed to create new OTLP trace exporter: %v", err)
return err
}
idg := xray.NewIDGenerator()
samplerEndpoint := os.Getenv("XRAY_ENDPOINT")
if samplerEndpoint == "" {
samplerEndpoint = "http://localhost:2000"
}
endpointUrl, err := url.Parse(samplerEndpoint)
res, err := sampler.NewRemoteSampler(ctx, svcName, "ecs", sampler.WithEndpoint(*endpointUrl), sampler.WithSamplingRulesPollingInterval(10*time.Second))
if err != nil {
log.Fatalf("Failed to create new XRay Remote Sampler: %v", err)
return err
}
// attach remote sampler to tracer provider
tp := trace.NewTracerProvider(
trace.WithSampler(res),
trace.WithBatcher(traceExporter),
trace.WithIDGenerator(idg),
)
otel.SetTracerProvider(tp)
otel.SetTextMapPropagator(xray.Propagator{})
return nil
}
docker-compose.yml
services:
app:
build:
context: ./
depends_on:
- otel
environment:
- LISTEN_ADDRESS=0.0.0.0:8080
- AWS_ACCESS_KEY_ID=<key>
- AWS_SECRET_ACCESS_KEY=<secret>
- OTEL_RESOURCE_ATTRIBUTES=service.name=sample-service
- OTEL_SERVICE_NAME=sample-service
- OTEL_EXPORTER_OTLP_ENDPOINT=otel:4317
- XRAY_ENDPOINT=http://otel:2000
- OTEL_JAVAAGENT_DEBUG=true
- OTEL_METRICS_EXPORTER=otlp
volumes:
- /tmp/awscreds:/tmp/awscreds
ports:
- '8080:8080'
otel:
image: amazon/aws-otel-collector:latest
command: --config /etc/ecs/ecs-default-config.yaml
environment:
- AWS_ACCESS_KEY_ID=<key>
- AWS_SECRET_ACCESS_KEY=<secret>
- AWS_REGION=us-east-1
volumes:
- .:/config
- /tmp/awscreds:/tmp/awscreds
ports:
- '4317:4317'
- '2000:2000'
Dockerfile
##
## STEP 1 - BUILD
##
FROM golang:1.21 AS build
WORKDIR /app
COPY go.mod go.sum ./
RUN go mod download
COPY ./ ./
RUN CGO_ENABLED=0 GOOS=linux GOARCH=amd64 go build -o /bootstrap ./cmd/server
##
## STEP 2 - DEPLOY
##
FROM alpine:latest
WORKDIR /
COPY --from=build /bootstrap /bootstrap
RUN apk update && apk add curl
EXPOSE 8080
ENTRYPOINT ["/bootstrap"]
the issue is that the aws ADOT remote sample was discarding my trace. I fixed it by using the default always sample sampler. although this is not the recommended option due to possible high traffic to your server/collector. so I will be posting the new configuration here shortly.