values.yaml 2.4 KB
Newer Older
Caleb Bron's avatar
Caleb Bron committed
1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78 79 80 81 82 83 84 85 86 87
#
# addon jaeger tracing configuration
#
enabled: false

provider: jaeger
nodeSelector: {}
tolerations: []

# Specify the pod anti-affinity that allows you to constrain which nodes
# your pod is eligible to be scheduled based on labels on pods that are
# already running on the node rather than based on labels on nodes.
# There are currently two types of anti-affinity:
#    "requiredDuringSchedulingIgnoredDuringExecution"
#    "preferredDuringSchedulingIgnoredDuringExecution"
# which denote "hard" vs. "soft" requirements, you can define your values
# in "podAntiAffinityLabelSelector" and "podAntiAffinityTermLabelSelector"
# correspondingly.
# For example:
# podAntiAffinityLabelSelector:
# - key: security
#   operator: In
#   values: S1,S2
#   topologyKey: "kubernetes.io/hostname"
# This pod anti-affinity rule says that the pod requires not to be scheduled
# onto a node if that node is already running a pod with label having key
# "security" and value "S1".
podAntiAffinityLabelSelector: []
podAntiAffinityTermLabelSelector: []

jaeger:
  hub: docker.io/jaegertracing
  image: all-in-one
  tag: 1.14
  podAnnotations: {}
  memory:
    max_traces: 50000
  # spanStorageType value can be "memory" and "badger" for all-in-one image
  spanStorageType: badger
  persist: false
  storageClassName: ""
  accessMode: ReadWriteMany


zipkin:
  hub: docker.io/openzipkin
  image: zipkin
  tag: 2.14.2
  podAnnotations: {}
  probeStartupDelay: 200
  queryPort: 9411
  resources:
    limits:
      cpu: 300m
      memory: 900Mi
    requests:
      cpu: 150m
      memory: 900Mi
  javaOptsHeap: 700
  # From: https://github.com/openzipkin/zipkin/blob/master/zipkin-server/src/main/resources/zipkin-server-shared.yml#L51
  # Maximum number of spans to keep in memory.  When exceeded, oldest traces (and their spans) will be purged.
  # A safe estimate is 1K of memory per span (each span with 2 annotations + 1 binary annotation), plus
  # 100 MB for a safety buffer.  You'll need to verify in your own environment.
  maxSpans: 500000
  node:
    cpus: 2

service:
  annotations: {}
  name: http
  type: ClusterIP
  externalPort: 80

ingress:
  enabled: false
  # Used to create an Ingress record.
  hosts:
    # - tracing.local
  annotations:
    # kubernetes.io/ingress.class: nginx
    # kubernetes.io/tls-acme: "true"
  tls:
    # Secrets must be manually created in the namespace.
    # - secretName: tracing-tls
    #   hosts:
    #     - tracing.local