Skip to content

Commit

Permalink
[opentelemetry-kube-stack] Fix values.schema.json for accepting targe…
Browse files Browse the repository at this point in the history
…tAllocator.image type (#1251)

* fix collectors.defaultCRConfig.targetAllocator.image type

* fix serviceMonitorSelector

* remove id from values.schema.json, same as collector

* move defaultCRConfig up from collectors

* generate examples

* bump chart version

* create example for targetAllocator

* add enabled to TA

---------

Co-authored-by: Jacob Aronoff <jaronoff97@users.noreply.github.com>
  • Loading branch information
sergeybataev and jaronoff97 authored Jul 10, 2024
1 parent d523295 commit 783a47c
Show file tree
Hide file tree
Showing 20 changed files with 1,248 additions and 37 deletions.
2 changes: 1 addition & 1 deletion charts/opentelemetry-kube-stack/Chart.yaml
Original file line number Diff line number Diff line change
@@ -1,6 +1,6 @@
apiVersion: v2
name: opentelemetry-kube-stack
version: 0.0.7
version: 0.0.8
description: |
OpenTelemetry Quickstart chart for Kubernetes.
Installs an operator and collector for an easy way to get started with Kubernetes observability.
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -5,7 +5,7 @@ kind: OpAMPBridge
metadata:
name: example
labels:
helm.sh/chart: opentelemetry-kube-stack-0.0.7
helm.sh/chart: opentelemetry-kube-stack-0.0.8
app.kubernetes.io/version: "0.103.0"
app.kubernetes.io/managed-by: Helm
annotations:
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -6,7 +6,7 @@ metadata:
name: example-cluster-stats
namespace: default
labels:
helm.sh/chart: opentelemetry-kube-stack-0.0.7
helm.sh/chart: opentelemetry-kube-stack-0.0.8
app.kubernetes.io/version: "0.103.0"
app.kubernetes.io/managed-by: Helm
opentelemetry.io/opamp-reporting: "true"
Expand Down Expand Up @@ -192,7 +192,7 @@ metadata:
name: example-daemon
namespace: default
labels:
helm.sh/chart: opentelemetry-kube-stack-0.0.7
helm.sh/chart: opentelemetry-kube-stack-0.0.8
app.kubernetes.io/version: "0.103.0"
app.kubernetes.io/managed-by: Helm
opentelemetry.io/opamp-reporting: "true"
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -5,7 +5,7 @@ kind: Instrumentation
metadata:
name: example
labels:
helm.sh/chart: opentelemetry-kube-stack-0.0.7
helm.sh/chart: opentelemetry-kube-stack-0.0.8
app.kubernetes.io/version: "0.103.0"
app.kubernetes.io/managed-by: Helm
annotations:
Expand Down
Original file line number Diff line number Diff line change
@@ -0,0 +1,124 @@
---
# Source: opentelemetry-kube-stack/templates/clusterrole.yaml
apiVersion: rbac.authorization.k8s.io/v1
kind: ClusterRole
metadata:
name: example-collector
rules:
- apiGroups: [""]
resources:
- namespaces
- nodes
- nodes/proxy
- nodes/metrics
- nodes/stats
- services
- endpoints
- pods
- events
- secrets
verbs: ["get", "list", "watch"]
- apiGroups: ["monitoring.coreos.com"]
resources:
- servicemonitors
- podmonitors
verbs: ["get", "list", "watch"]
- apiGroups:
- extensions
resources:
- ingresses
verbs: ["get", "list", "watch"]
- apiGroups:
- apps
resources:
- daemonsets
- deployments
- replicasets
- statefulsets
verbs: ["get", "list", "watch"]
- apiGroups:
- networking.k8s.io
resources:
- ingresses
verbs: ["get", "list", "watch"]
- apiGroups: ["discovery.k8s.io"]
resources:
- endpointslices
verbs: ["get", "list", "watch"]
- nonResourceURLs: ["/metrics", "/metrics/cadvisor"]
verbs: ["get"]

- apiGroups:
- ""
resources:
- events
- namespaces
- namespaces/status
- nodes
- nodes/spec
- pods
- pods/status
- replicationcontrollers
- replicationcontrollers/status
- resourcequotas
- services
verbs:
- get
- list
- watch
- apiGroups:
- apps
resources:
- daemonsets
- deployments
- replicasets
- statefulsets
verbs:
- get
- list
- watch
- apiGroups:
- extensions
resources:
- daemonsets
- deployments
- replicasets
verbs:
- get
- list
- watch
- apiGroups:
- batch
resources:
- jobs
- cronjobs
verbs:
- get
- list
- watch
- apiGroups:
- autoscaling
resources:
- horizontalpodautoscalers
verbs:
- get
- list
- watch
- apiGroups: ["events.k8s.io"]
resources: ["events"]
verbs: ["watch", "list"]
---
# Source: opentelemetry-kube-stack/templates/clusterrole.yaml
apiVersion: rbac.authorization.k8s.io/v1
kind: ClusterRoleBinding
metadata:
name: example-daemon
roleRef:
apiGroup: rbac.authorization.k8s.io
kind: ClusterRole
name: example-collector
subjects:
- kind: ServiceAccount
# quirk of the Operator
name: "example-daemon-collector"
namespace: default
Original file line number Diff line number Diff line change
@@ -0,0 +1,146 @@
---
# Source: opentelemetry-kube-stack/templates/collector.yaml
apiVersion: opentelemetry.io/v1beta1
kind: OpenTelemetryCollector
metadata:
name: example-daemon
namespace: default
labels:
helm.sh/chart: opentelemetry-kube-stack-0.0.8
app.kubernetes.io/version: "0.103.0"
app.kubernetes.io/managed-by: Helm
annotations:
"helm.sh/hook": post-install,post-upgrade
"helm.sh/hook-delete-policy": hook-failed
spec:
managementState: managed
mode: daemonset
config:
exporters:
debug: {}
otlp:
endpoint: ingest.example.com:443
headers:
access-token: ${ACCESS_TOKEN}
processors:
batch:
send_batch_max_size: 1500
send_batch_size: 1000
timeout: 1s
resourcedetection/env:
detectors:
- env
override: false
timeout: 2s
receivers:
otlp:
protocols:
grpc:
endpoint: 0.0.0.0:4317
http:
endpoint: 0.0.0.0:4318
prometheus:
config:
scrape_configs:
- job_name: otel-collector
scrape_interval: 30s
static_configs:
- targets:
- 0.0.0.0:8888
target_allocator:
collector_id: ${POD_NAME}
endpoint: http://otelcol-targetallocator
interval: 30s
service:
pipelines:
logs:
exporters:
- debug
- otlp
processors:
- resourcedetection/env
- batch
receivers:
- otlp
metrics:
exporters:
- debug
- otlp
processors:
- resourcedetection/env
- batch
receivers:
- prometheus
traces:
exporters:
- debug
- otlp
processors:
- resourcedetection/env
- batch
receivers:
- otlp
image: "otel/opentelemetry-collector-k8s:0.103.1"
imagePullPolicy: IfNotPresent
upgradeStrategy: automatic
hostNetwork: false
shareProcessNamespace: false
terminationGracePeriodSeconds: 30
resources:
limits:
cpu: 100m
memory: 250Mi
requests:
cpu: 100m
memory: 128Mi
securityContext:
{}
targetAllocator:
enabled: true
image: ghcr.io/open-telemetry/opentelemetry-operator/target-allocator:main
prometheusCR:
enabled: true
podMonitorSelector:
matchLabels:
app: my-app
scrapeInterval: 30s
serviceMonitorSelector:
matchExpressions:
- key: kubernetes.io/app-name
operator: In
values:
- my-app
volumeMounts:
env:
- name: OTEL_K8S_NODE_NAME
valueFrom:
fieldRef:
fieldPath: spec.nodeName
- name: OTEL_K8S_NODE_IP
valueFrom:
fieldRef:
fieldPath: status.hostIP
- name: OTEL_K8S_NAMESPACE
valueFrom:
fieldRef:
apiVersion: v1
fieldPath: metadata.namespace
- name: OTEL_K8S_POD_NAME
valueFrom:
fieldRef:
apiVersion: v1
fieldPath: metadata.name
- name: OTEL_K8S_POD_IP
valueFrom:
fieldRef:
apiVersion: v1
fieldPath: status.podIP
- name: OTEL_RESOURCE_ATTRIBUTES
value: "k8s.cluster.name=demo"

- name: ACCESS_TOKEN
valueFrom:
secretKeyRef:
key: access_token
name: otel-collector-secret
volumes:
Loading

0 comments on commit 783a47c

Please sign in to comment.