Collect Annotations and Labels

You can collect annotations and labels by leveraging the Kubernetes Attributes Processor. For example, if the pod my-otel-demo-adservice-86f8b6d9c6-l4hvh has the following labels and annotations:

$ kubectl describe pod my-otel-demo-adservice-86f8b6d9c6-l4hvh

Name:             my-otel-demo-adservice-86f8b6d9c6-l4hvh
Namespace:        default
Priority:         0
Service Account:  my-otel-demo
Node:             gke-cluster-5-default-pool-d1b464b0-4v86/10.128.0.15
Start Time:       Wed, 29 Jan 2025 03:08:33 +0000
Labels:           app.kubernetes.io/component=adservice
                  app.kubernetes.io/instance=my-otel-demo
                  app.kubernetes.io/name=my-otel-demo-adservice
                  opentelemetry.io/name=my-otel-demo-adservice
                  pod-template-hash=86f8b6d9c6
Annotations:      example.com/maintainer: teamA
...

Note

To view all available Agent Chart versions, run: helm search repo observe --versions | grep observe/agent. If you’re currently using Agent Chart versions 0.38 through 0.40, please upgrade to version 0.41 or later.

  1. Create a file named k8sattributes-values.yaml with the following contents:

agent:
  config:
    # 1) Define an anchor for the repeated configuration (must appear before you reference it).
    k8sattributes_custom_base: &k8sattributes_custom_base
      passthrough: false
      extract:
        labels:
          - tag_name: app.kubernetes.io/component
            key: app.kubernetes.io/component
            from: pod
          - tag_name: app.kubernetes.io/instance
            key: app.kubernetes.io/instance
            from: pod
          - tag_name: app.kubernetes.io/name
            key: app.kubernetes.io/name
            from: pod
          - tag_name: opentelemetry.io/name
            key: opentelemetry.io/name
            from: pod
          - tag_name: pod-template-hash
            key: pod-template-hash
            from: pod
        annotations:
          - tag_name: example.com/maintainer
            key: example.com/maintainer
            from: pod
      pod_association:
        - sources:
            - from: resource_attribute
              name: k8s.pod.ip
        - sources:
            - from: resource_attribute
              name: k8s.pod.uid
        - sources:
            - from: connection

    nodeLogsMetrics:
      processors:
        # 2) Reference the anchor
        k8sattributes/custom: *k8sattributes_custom_base
        #
        # (Any other processors remain the same)
        #
      service:
        pipelines:
          logs:
            processors: [memory_limiter, k8sattributes, batch, resourcedetection/cloud, resource/observe_common, attributes/debug_source_pod_logs, k8sattributes/custom]
          metrics/hostmetrics:
            processors: [memory_limiter, k8sattributes, batch, resourcedetection/cloud, resource/observe_common, attributes/debug_source_hostmetrics, k8sattributes/custom]
          metrics/kubeletstats:
            processors: [memory_limiter, k8sattributes, batch, resourcedetection/cloud, resource/observe_common, attributes/debug_source_kubeletstats_metrics, k8sattributes/custom]

    clusterEvents:
      processors:
        k8sattributes/custom: *k8sattributes_custom_base
      service:
        pipelines:
          logs/cluster:
            processors: [memory_limiter, batch, resource/observe_common, filter/cluster, transform/cluster, k8sattributes/custom]
          logs/objects:
            processors: [memory_limiter, batch, resource/observe_common, transform/unify, observek8sattributes, transform/object, k8sattributes/custom]

    clusterMetrics:
      processors:
        k8sattributes/custom: *k8sattributes_custom_base
      service:
        pipelines:
          metrics/pod_metrics:
            processors: [memory_limiter, k8sattributes, batch, resource/observe_common, attributes/debug_source_pod_metrics, k8sattributes/custom]
          metrics:
            processors: [memory_limiter, k8sattributes, batch, resource/observe_common, attributes/debug_source_cluster_metrics, k8sattributes/custom]

    monitor:
      processors:
        k8sattributes/custom: *k8sattributes_custom_base
      service:
        pipelines:
          metrics:
            processors: [memory_limiter, k8sattributes, batch, resource/observe_common, attributes/debug_source_agent_monitor, k8sattributes/custom]

    forwarder:
      processors:
        k8sattributes/custom: *k8sattributes_custom_base
      service:
        pipelines:
          traces/observe-forward:
            processors: [memory_limiter, k8sattributes, batch, resourcedetection/cloud, resource/observe_common, attributes/debug_source_app_traces, k8sattributes/custom]
          logs/observe-forward:
            processors: [memory_limiter, k8sattributes, batch, resourcedetection/cloud, resource/observe_common, attributes/debug_source_app_logs, k8sattributes/custom]
          metrics/observe-forward:
            processors: [memory_limiter, k8sattributes, batch, resourcedetection/cloud, resource/observe_common, attributes/debug_source_app_metrics, k8sattributes/custom]
  1. Redeploy the Observe Agent with the updated config.

helm upgrade --reuse-values observe-agent observe/agent -n observe --values k8sattributes-values.yaml
  1. Restart the pods.

kubectl rollout restart deployment -n observe
kubectl rollout restart daemonset -n observe
  1. Create a file named k8sattributes-values.yaml with the following contents:

agent:
  config:
    # 1) Define an anchor for the repeated configuration (must appear before you reference it).
    k8sattributes_custom_base: &k8sattributes_custom_base
      passthrough: false
      extract:
        labels:
          - tag_name: app.kubernetes.io/component
            key: app.kubernetes.io/component
            from: pod
          - tag_name: app.kubernetes.io/instance
            key: app.kubernetes.io/instance
            from: pod
          - tag_name: app.kubernetes.io/name
            key: app.kubernetes.io/name
            from: pod
          - tag_name: opentelemetry.io/name
            key: opentelemetry.io/name
            from: pod
          - tag_name: pod-template-hash
            key: pod-template-hash
            from: pod
        annotations:
          - tag_name: example.com/maintainer
            key: example.com/maintainer
            from: pod
      pod_association:
        - sources:
            - from: resource_attribute
              name: k8s.pod.ip
        - sources:
            - from: resource_attribute
              name: k8s.pod.uid
        - sources:
            - from: connection

    nodeLogsMetrics:
      processors:
        # 2) Reference the anchor
        k8sattributes/custom: *k8sattributes_custom_base
        #
        # (Any other processors remain the same)
        #
      service:
        pipelines:
          logs:
            processors: [memory_limiter, k8sattributes, batch, resourcedetection/cloud, resource/observe_common, attributes/debug_source_pod_logs, k8sattributes/custom]
          metrics/hostmetrics:
            processors: [memory_limiter, k8sattributes, batch, resourcedetection/cloud, resource/observe_common, attributes/debug_source_hostmetrics, k8sattributes/custom]
          metrics/kubeletstats:
            processors: [memory_limiter, k8sattributes, batch, resourcedetection/cloud, resource/observe_common, attributes/debug_source_kubletstats_metrics, k8sattributes/custom]
          # # Uncomment these if you are using observe-forward pipelines:
          # # observe-forward
          # traces/observe-forward:
          #   processors: [memory_limiter, k8sattributes, batch, resourcedetection/cloud, resource/observe_common, attributes/debug_source_app_traces, k8sattributes/custom]
          # logs/observe-forward:
          #   processors: [memory_limiter, k8sattributes, batch, resourcedetection/cloud, resource/observe_common, attributes/debug_source_app_logs, k8sattributes/custom]
          # metrics/observe-forward:
          #   processors: [memory_limiter, k8sattributes, batch, resourcedetection/cloud, resource/observe_common, attributes/debug_source_app_metrics, k8sattributes/custom]

    clusterEvents:
      processors:
        k8sattributes/custom: *k8sattributes_custom_base
      service:
        pipelines:
          logs/cluster:
            processors: [memory_limiter, batch, resource/observe_common, filter/cluster, transform/cluster, k8sattributes/custom]
          logs/objects:
            processors: [memory_limiter, batch, resource/observe_common, transform/unify, observek8sattributes, transform/object, k8sattributes/custom]

    clusterMetrics:
      processors:
        k8sattributes/custom: *k8sattributes_custom_base
      service:
        pipelines:
          metrics/pod_metrics:
            processors: [memory_limiter, k8sattributes, batch, resource/observe_common, attributes/debug_source_pod_metrics, k8sattributes/custom]
          metrics:
            processors: [memory_limiter, k8sattributes, batch, resource/observe_common, attributes/debug_source_cluster_metrics, k8sattributes/custom]

    monitor:
      processors:
        k8sattributes/custom: *k8sattributes_custom_base
      service:
        pipelines:
          metrics:
            processors: [memory_limiter, k8sattributes, batch, resource/observe_common, attributes/debug_source_agent_monitor, k8sattributes/custom]
  1. Redeploy the Observe Agent with the updated config.

helm upgrade --reuse-values observe-agent observe/agent -n observe --values k8sattributes-values.yaml --version 0.37.0
  1. Restart the pods.

kubectl rollout restart deployment -n observe
kubectl rollout restart daemonset -n observe