Targets


logging/fluent-bit/0 (13/13 up)

Endpoint State Labels Last Scrape Scrape Duration Error
http://10.200.2.108:2020/api/v1/metrics/prometheus
up endpoint="metrics" instance="10.200.2.108:2020" job="fluent-bit-metrics" namespace="logging" pod="fluent-bit-9f8sd" service="fluent-bit-metrics" 3.594s ago 2.201ms
http://10.200.2.194:2020/api/v1/metrics/prometheus
up endpoint="metrics" instance="10.200.2.194:2020" job="fluent-bit-metrics" namespace="logging" pod="fluent-bit-r8vck" service="fluent-bit-metrics" 12.497s ago 2.184ms
http://10.200.2.202:2020/api/v1/metrics/prometheus
up endpoint="metrics" instance="10.200.2.202:2020" job="fluent-bit-metrics" namespace="logging" pod="fluent-bit-ww7jj" service="fluent-bit-metrics" 24.818s ago 2.152ms
http://10.200.2.216:2020/api/v1/metrics/prometheus
up endpoint="metrics" instance="10.200.2.216:2020" job="fluent-bit-metrics" namespace="logging" pod="fluent-bit-p99zc" service="fluent-bit-metrics" 17.114s ago 2.324ms
http://10.200.2.229:2020/api/v1/metrics/prometheus
up endpoint="metrics" instance="10.200.2.229:2020" job="fluent-bit-metrics" namespace="logging" pod="fluent-bit-pqwm9" service="fluent-bit-metrics" 16.468s ago 2.208ms
http://10.200.2.236:2020/api/v1/metrics/prometheus
up endpoint="metrics" instance="10.200.2.236:2020" job="fluent-bit-metrics" namespace="logging" pod="fluent-bit-v4pfw" service="fluent-bit-metrics" 3.659s ago 2.138ms
http://10.200.3.125:2020/api/v1/metrics/prometheus
up endpoint="metrics" instance="10.200.3.125:2020" job="fluent-bit-metrics" namespace="logging" pod="fluent-bit-kxmkk" service="fluent-bit-metrics" 27.494s ago 654.4us
http://10.200.3.160:2020/api/v1/metrics/prometheus
up endpoint="metrics" instance="10.200.3.160:2020" job="fluent-bit-metrics" namespace="logging" pod="fluent-bit-chpkn" service="fluent-bit-metrics" 1.814s ago 709.2us
http://10.200.3.181:2020/api/v1/metrics/prometheus
up endpoint="metrics" instance="10.200.3.181:2020" job="fluent-bit-metrics" namespace="logging" pod="fluent-bit-7xkr2" service="fluent-bit-metrics" 16.503s ago 720.8us
http://10.200.3.198:2020/api/v1/metrics/prometheus
up endpoint="metrics" instance="10.200.3.198:2020" job="fluent-bit-metrics" namespace="logging" pod="fluent-bit-7zwwq" service="fluent-bit-metrics" 3.418s ago 750.2us
http://10.200.3.203:2020/api/v1/metrics/prometheus
up endpoint="metrics" instance="10.200.3.203:2020" job="fluent-bit-metrics" namespace="logging" pod="fluent-bit-xjwt8" service="fluent-bit-metrics" 11.032s ago 726us
http://10.200.3.220:2020/api/v1/metrics/prometheus
up endpoint="metrics" instance="10.200.3.220:2020" job="fluent-bit-metrics" namespace="logging" pod="fluent-bit-tctff" service="fluent-bit-metrics" 16.642s ago 831.8us
http://10.200.3.245:2020/api/v1/metrics/prometheus
up endpoint="metrics" instance="10.200.3.245:2020" job="fluent-bit-metrics" namespace="logging" pod="fluent-bit-kmv9j" service="fluent-bit-metrics" 11.2s ago 698.9us

monitoring/bandwidth/0 (4/4 up)

Endpoint State Labels Last Scrape Scrape Duration Error
http://10.200.2.186:8080/metrics/prometheus
up endpoint="bandwidth" instance="10.200.2.186:8080" job="bandwidth" namespace="prod-bandwidth" pod="bandwidth-8dd557488-tptxz" service="bandwidth" 8.568s ago 2.783ms
http://10.200.2.205:8080/metrics/prometheus
up endpoint="bandwidth" instance="10.200.2.205:8080" job="bandwidth" namespace="prod-bandwidth" pod="bandwidth-8dd557488-ttdjn" service="bandwidth" 6.691s ago 2.689ms
http://10.200.3.170:8080/metrics/prometheus
up endpoint="bandwidth" instance="10.200.3.170:8080" job="bandwidth" namespace="prod-bandwidth" pod="bandwidth-8dd557488-p4bcx" service="bandwidth" 18.929s ago 2.136ms
http://10.200.3.247:8080/metrics/prometheus
up endpoint="bandwidth" instance="10.200.3.247:8080" job="bandwidth" namespace="prod-bandwidth" pod="bandwidth-8dd557488-f7s6c" service="bandwidth" 10.105s ago 2.219ms

monitoring/bandwidthprocessor/0 (4/5 up)

Endpoint State Labels Last Scrape Scrape Duration Error
http://10.200.2.243:8080/bandwidthprocessor/actuator/prometheus
up endpoint="bandwidthprocessor" instance="10.200.2.243:8080" job="bandwidthprocessor" namespace="prod-bandwidth" pod="bandwidthprocessor-66856f8679-55lnl" service="bandwidthprocessor" 4.107s ago 39.37ms
http://10.200.2.37:8080/bandwidthprocessor/actuator/prometheus
up endpoint="bandwidthprocessor" instance="10.200.2.37:8080" job="bandwidthprocessor" namespace="prod-bandwidth" pod="bandwidthprocessor-66856f8679-sswxl" service="bandwidthprocessor" 4.915s ago 35.86ms
http://10.200.3.124:8080/bandwidthprocessor/actuator/prometheus
up endpoint="bandwidthprocessor" instance="10.200.3.124:8080" job="bandwidthprocessor" namespace="prod-bandwidth" pod="bandwidthprocessor-66856f8679-vnjjh" service="bandwidthprocessor" 26.539s ago 42.35ms
http://10.200.3.158:8080/bandwidthprocessor/actuator/prometheus
down endpoint="bandwidthprocessor" instance="10.200.3.158:8080" job="bandwidthprocessor" namespace="prod-bandwidth" pod="bandwidthprocessor-66856f8679-xt6hn" service="bandwidthprocessor" 26.256s ago 10s Get "http://10.200.3.158:8080/bandwidthprocessor/actuator/prometheus": context deadline exceeded
http://10.200.3.18:8080/bandwidthprocessor/actuator/prometheus
up endpoint="bandwidthprocessor" instance="10.200.3.18:8080" job="bandwidthprocessor" namespace="prod-bandwidth" pod="bandwidthprocessor-66856f8679-v4hrk" service="bandwidthprocessor" 19.574s ago 34.51ms

monitoring/burrow/0 (1/1 up)

Endpoint State Labels Last Scrape Scrape Duration Error
http://10.200.3.213:8237/metrics
up endpoint="metrics" instance="10.200.3.213:8237" job="burrow" namespace="monitoring" pod="burrow-96d64f6d7-qn5c2" service="burrow" 7.54s ago 16.2ms

monitoring/emailadminapplication/0 (1/1 up)

Endpoint State Labels Last Scrape Scrape Duration Error
http://10.200.3.182:8080/emailadmin/actuator/prometheus
up endpoint="emailadminapplication" instance="10.200.3.182:8080" job="emailadminapplication" namespace="prod-message-system" pod="emailadminapplication-748bdc578b-lhb7z" service="emailadminapplication" 5.035s ago 7.569ms

monitoring/emailcampaignsystem/0 (0/0 up)

Endpoint State Labels Last Scrape Scrape Duration Error

monitoring/emailmessagesystemprocessor/0 (2/2 up)

Endpoint State Labels Last Scrape Scrape Duration Error
http://10.200.2.137:8080/emailmessagesystemprocessor/actuator/prometheus
up endpoint="emailmessagesystemprocessor" instance="10.200.2.137:8080" job="emailmessagesystemprocessor" namespace="prod-message-system" pod="emailmessagesystemprocessor-68f944579d-v7kbv" service="emailmessagesystemprocessor" 15.379s ago 3.17ms
http://10.200.3.201:8080/emailmessagesystemprocessor/actuator/prometheus
up endpoint="emailmessagesystemprocessor" instance="10.200.3.201:8080" job="emailmessagesystemprocessor" namespace="prod-message-system" pod="emailmessagesystemprocessor-68f944579d-hc549" service="emailmessagesystemprocessor" 6.756s ago 2.332ms

monitoring/emailmessagesystemproducer/0 (2/2 up)

Endpoint State Labels Last Scrape Scrape Duration Error
http://10.200.2.253:8080/email/actuator/prometheus
up endpoint="emailmessagesystemproducer" instance="10.200.2.253:8080" job="emailmessagesystemproducer" namespace="prod-message-system" pod="emailmessagesystemproducer-b88bc96c8-r4twr" service="emailmessagesystemproducer" 25.094s ago 4.981ms
http://10.200.3.148:8080/email/actuator/prometheus
up endpoint="emailmessagesystemproducer" instance="10.200.3.148:8080" job="emailmessagesystemproducer" namespace="prod-message-system" pod="emailmessagesystemproducer-b88bc96c8-7qwbk" service="emailmessagesystemproducer" 6.45s ago 3.862ms

monitoring/emailscheduleprocessor/0 (1/1 up)

Endpoint State Labels Last Scrape Scrape Duration Error
http://10.200.2.168:8080/emailscheduleprocessor/actuator/prometheus
up endpoint="emailscheduleprocessor" instance="10.200.2.168:8080" job="emailscheduleprocessor" namespace="prod-message-system" pod="emailscheduleprocessor-84c6b85788-9tprb" service="emailscheduleprocessor" 23.026s ago 4.053ms

monitoring/ingress-eks-ingress-nginx-controller/0 (3/4 up)

Endpoint State Labels Last Scrape Scrape Duration Error
http://10.200.2.103:10254/metrics
down endpoint="metrics" instance="10.200.2.103:10254" job="ingress-eks-ingress-nginx-controller-metrics" namespace="nginx-ingress" pod="ingress-eks-ingress-nginx-controller-7f7d8696b6-54lgn" service="ingress-eks-ingress-nginx-controller-metrics" 15.583s ago 1.226ms Get "http://10.200.2.103:10254/metrics": dial tcp 10.200.2.103:10254: connect: connection refused
http://10.200.2.77:10254/metrics
up endpoint="metrics" instance="10.200.2.77:10254" job="ingress-eks-ingress-nginx-controller-metrics" namespace="nginx-ingress" pod="ingress-eks-ingress-nginx-controller-7f7d8696b6-ww29v" service="ingress-eks-ingress-nginx-controller-metrics" 4.064s ago 116.6ms
http://10.200.3.41:10254/metrics
up endpoint="metrics" instance="10.200.3.41:10254" job="ingress-eks-ingress-nginx-controller-metrics" namespace="nginx-ingress" pod="ingress-eks-ingress-nginx-controller-7f7d8696b6-v2zq9" service="ingress-eks-ingress-nginx-controller-metrics" 17.131s ago 91.47ms
http://10.200.3.53:10254/metrics
up endpoint="metrics" instance="10.200.3.53:10254" job="ingress-eks-ingress-nginx-controller-metrics" namespace="nginx-ingress" pod="ingress-eks-ingress-nginx-controller-7f7d8696b6-rhm4d" service="ingress-eks-ingress-nginx-controller-metrics" 6.317s ago 90.51ms

monitoring/multitrackingroutes/0 (0/0 up)

Endpoint State Labels Last Scrape Scrape Duration Error

monitoring/prometheus-pii-prod-promet-apiserver/0 (2/2 up)

Endpoint State Labels Last Scrape Scrape Duration Error
https://10.200.0.171:443/metrics
up endpoint="https" instance="10.200.0.171:443" job="apiserver" namespace="default" service="kubernetes" 14.745s ago 124.5ms
https://10.200.1.224:443/metrics
up endpoint="https" instance="10.200.1.224:443" job="apiserver" namespace="default" service="kubernetes" 777ms ago 121.2ms

monitoring/prometheus-pii-prod-promet-coredns/0 (2/2 up)

Endpoint State Labels Last Scrape Scrape Duration Error
http://10.200.2.23:9153/metrics
up endpoint="http-metrics" instance="10.200.2.23:9153" job="coredns" namespace="kube-system" pod="coredns-556765db45-7dgxm" service="prometheus-pii-prod-promet-coredns" 27.3s ago 3.491ms
http://10.200.3.59:9153/metrics
up endpoint="http-metrics" instance="10.200.3.59:9153" job="coredns" namespace="kube-system" pod="coredns-556765db45-nn76g" service="prometheus-pii-prod-promet-coredns" 2.488s ago 2.497ms

monitoring/prometheus-pii-prod-promet-kube-controller-manager/0 (0/0 up)

Endpoint State Labels Last Scrape Scrape Duration Error

monitoring/prometheus-pii-prod-promet-kube-etcd/0 (0/0 up)

Endpoint State Labels Last Scrape Scrape Duration Error

monitoring/prometheus-pii-prod-promet-kube-proxy/0 (0/13 up)

Endpoint State Labels Last Scrape Scrape Duration Error
http://10.200.2.138:10249/metrics
down endpoint="http-metrics" instance="10.200.2.138:10249" job="kube-proxy" namespace="kube-system" pod="kube-proxy-hkzpc" service="prometheus-pii-prod-promet-kube-proxy" 16.788s ago 1.318ms Get "http://10.200.2.138:10249/metrics": dial tcp 10.200.2.138:10249: connect: connection refused
http://10.200.2.142:10249/metrics
down endpoint="http-metrics" instance="10.200.2.142:10249" job="kube-proxy" namespace="kube-system" pod="kube-proxy-r45r5" service="prometheus-pii-prod-promet-kube-proxy" 14.857s ago 1.071ms Get "http://10.200.2.142:10249/metrics": dial tcp 10.200.2.142:10249: connect: connection refused
http://10.200.2.158:10249/metrics
down endpoint="http-metrics" instance="10.200.2.158:10249" job="kube-proxy" namespace="kube-system" pod="kube-proxy-pv99w" service="prometheus-pii-prod-promet-kube-proxy" 29.853s ago 1.156ms Get "http://10.200.2.158:10249/metrics": dial tcp 10.200.2.158:10249: connect: connection refused
http://10.200.2.173:10249/metrics
down endpoint="http-metrics" instance="10.200.2.173:10249" job="kube-proxy" namespace="kube-system" pod="kube-proxy-2l5bj" service="prometheus-pii-prod-promet-kube-proxy" 19.133s ago 1.122ms Get "http://10.200.2.173:10249/metrics": dial tcp 10.200.2.173:10249: connect: connection refused
http://10.200.2.47:10249/metrics
down endpoint="http-metrics" instance="10.200.2.47:10249" job="kube-proxy" namespace="kube-system" pod="kube-proxy-6lsb6" service="prometheus-pii-prod-promet-kube-proxy" 25.649s ago 1.181ms Get "http://10.200.2.47:10249/metrics": dial tcp 10.200.2.47:10249: connect: connection refused
http://10.200.2.91:10249/metrics
down endpoint="http-metrics" instance="10.200.2.91:10249" job="kube-proxy" namespace="kube-system" pod="kube-proxy-hbpvt" service="prometheus-pii-prod-promet-kube-proxy" 6.62s ago 1.127ms Get "http://10.200.2.91:10249/metrics": dial tcp 10.200.2.91:10249: connect: connection refused
http://10.200.3.153:10249/metrics
down endpoint="http-metrics" instance="10.200.3.153:10249" job="kube-proxy" namespace="kube-system" pod="kube-proxy-46dgw" service="prometheus-pii-prod-promet-kube-proxy" 14.877s ago 417.2us Get "http://10.200.3.153:10249/metrics": dial tcp 10.200.3.153:10249: connect: connection refused
http://10.200.3.164:10249/metrics
down endpoint="http-metrics" instance="10.200.3.164:10249" job="kube-proxy" namespace="kube-system" pod="kube-proxy-4hqsn" service="prometheus-pii-prod-promet-kube-proxy" 1.935s ago 398.1us Get "http://10.200.3.164:10249/metrics": dial tcp 10.200.3.164:10249: connect: connection refused
http://10.200.3.171:10249/metrics
down endpoint="http-metrics" instance="10.200.3.171:10249" job="kube-proxy" namespace="kube-system" pod="kube-proxy-28jt5" service="prometheus-pii-prod-promet-kube-proxy" 4.318s ago 405.5us Get "http://10.200.3.171:10249/metrics": dial tcp 10.200.3.171:10249: connect: connection refused
http://10.200.3.191:10249/metrics
down endpoint="http-metrics" instance="10.200.3.191:10249" job="kube-proxy" namespace="kube-system" pod="kube-proxy-9pbtb" service="prometheus-pii-prod-promet-kube-proxy" 8.982s ago 311.2us Get "http://10.200.3.191:10249/metrics": dial tcp 10.200.3.191:10249: connect: connection refused
http://10.200.3.44:10249/metrics
down endpoint="http-metrics" instance="10.200.3.44:10249" job="kube-proxy" namespace="kube-system" pod="kube-proxy-xtdmz" service="prometheus-pii-prod-promet-kube-proxy" 23.569s ago 394.8us Get "http://10.200.3.44:10249/metrics": dial tcp 10.200.3.44:10249: connect: connection refused
http://10.200.3.76:10249/metrics
down endpoint="http-metrics" instance="10.200.3.76:10249" job="kube-proxy" namespace="kube-system" pod="kube-proxy-gkl4b" service="prometheus-pii-prod-promet-kube-proxy" 26.809s ago 461.8us Get "http://10.200.3.76:10249/metrics": dial tcp 10.200.3.76:10249: connect: connection refused
http://10.200.3.87:10249/metrics
down endpoint="http-metrics" instance="10.200.3.87:10249" job="kube-proxy" namespace="kube-system" pod="kube-proxy-dg4lj" service="prometheus-pii-prod-promet-kube-proxy" 28.981s ago 412.3us Get "http://10.200.3.87:10249/metrics": dial tcp 10.200.3.87:10249: connect: connection refused

monitoring/prometheus-pii-prod-promet-kube-scheduler/0 (0/0 up)

Endpoint State Labels Last Scrape Scrape Duration Error

monitoring/prometheus-pii-prod-promet-kube-state-metrics/0 (1/1 up)

Endpoint State Labels Last Scrape Scrape Duration Error
http://10.200.2.10:8080/metrics
up endpoint="http" instance="10.200.2.10:8080" job="kube-state-metrics" namespace="monitoring" pod="prometheus-pii-prod-kube-state-metrics-5bbdc47847-9pqr2" service="prometheus-pii-prod-kube-state-metrics" 20.81s ago 70.01ms

monitoring/prometheus-pii-prod-promet-kubelet/0 (13/13 up)

Endpoint State Labels Last Scrape Scrape Duration Error
https://10.200.2.138:10250/metrics
up endpoint="https-metrics" instance="10.200.2.138:10250" job="kubelet" metrics_path="/metrics" namespace="kube-system" node="ip-10-200-2-138.us-west-1.compute.internal" service="prometheus-pii-prod-promet-kubelet" 10.767s ago 12.9ms
https://10.200.2.142:10250/metrics
up endpoint="https-metrics" instance="10.200.2.142:10250" job="kubelet" metrics_path="/metrics" namespace="kube-system" node="ip-10-200-2-142.us-west-1.compute.internal" service="prometheus-pii-prod-promet-kubelet" 16.916s ago 11.68ms
https://10.200.2.158:10250/metrics
up endpoint="https-metrics" instance="10.200.2.158:10250" job="kubelet" metrics_path="/metrics" namespace="kube-system" node="ip-10-200-2-158.us-west-1.compute.internal" service="prometheus-pii-prod-promet-kubelet" 24.862s ago 13.5ms
https://10.200.2.173:10250/metrics
up endpoint="https-metrics" instance="10.200.2.173:10250" job="kubelet" metrics_path="/metrics" namespace="kube-system" node="ip-10-200-2-173.us-west-1.compute.internal" service="prometheus-pii-prod-promet-kubelet" 22.775s ago 8.307ms
https://10.200.2.47:10250/metrics
up endpoint="https-metrics" instance="10.200.2.47:10250" job="kubelet" metrics_path="/metrics" namespace="kube-system" node="ip-10-200-2-47.us-west-1.compute.internal" service="prometheus-pii-prod-promet-kubelet" 25.463s ago 20.37ms
https://10.200.2.91:10250/metrics
up endpoint="https-metrics" instance="10.200.2.91:10250" job="kubelet" metrics_path="/metrics" namespace="kube-system" node="ip-10-200-2-91.us-west-1.compute.internal" service="prometheus-pii-prod-promet-kubelet" 29.057s ago 14.63ms
https://10.200.3.153:10250/metrics
up endpoint="https-metrics" instance="10.200.3.153:10250" job="kubelet" metrics_path="/metrics" namespace="kube-system" node="ip-10-200-3-153.us-west-1.compute.internal" service="prometheus-pii-prod-promet-kubelet" 9.308s ago 9.477ms
https://10.200.3.164:10250/metrics
up endpoint="https-metrics" instance="10.200.3.164:10250" job="kubelet" metrics_path="/metrics" namespace="kube-system" node="ip-10-200-3-164.us-west-1.compute.internal" service="prometheus-pii-prod-promet-kubelet" 16.241s ago 60.91ms
https://10.200.3.171:10250/metrics
up endpoint="https-metrics" instance="10.200.3.171:10250" job="kubelet" metrics_path="/metrics" namespace="kube-system" node="ip-10-200-3-171.us-west-1.compute.internal" service="prometheus-pii-prod-promet-kubelet" 206ms ago 12.25ms
https://10.200.3.191:10250/metrics
up endpoint="https-metrics" instance="10.200.3.191:10250" job="kubelet" metrics_path="/metrics" namespace="kube-system" node="ip-10-200-3-191.us-west-1.compute.internal" service="prometheus-pii-prod-promet-kubelet" 29.749s ago 13.73ms
https://10.200.3.44:10250/metrics
up endpoint="https-metrics" instance="10.200.3.44:10250" job="kubelet" metrics_path="/metrics" namespace="kube-system" node="ip-10-200-3-44.us-west-1.compute.internal" service="prometheus-pii-prod-promet-kubelet" 23.056s ago 12.78ms
https://10.200.3.76:10250/metrics
up endpoint="https-metrics" instance="10.200.3.76:10250" job="kubelet" metrics_path="/metrics" namespace="kube-system" node="ip-10-200-3-76.us-west-1.compute.internal" service="prometheus-pii-prod-promet-kubelet" 2.796s ago 12.36ms
https://10.200.3.87:10250/metrics
up endpoint="https-metrics" instance="10.200.3.87:10250" job="kubelet" metrics_path="/metrics" namespace="kube-system" node="ip-10-200-3-87.us-west-1.compute.internal" service="prometheus-pii-prod-promet-kubelet" 6.342s ago 14ms

monitoring/prometheus-pii-prod-promet-kubelet/1 (13/13 up)

Endpoint State Labels Last Scrape Scrape Duration Error
https://10.200.2.138:10250/metrics/cadvisor
up endpoint="https-metrics" instance="10.200.2.138:10250" job="kubelet" metrics_path="/metrics/cadvisor" namespace="kube-system" node="ip-10-200-2-138.us-west-1.compute.internal" service="prometheus-pii-prod-promet-kubelet" 14.398s ago 61.84ms
https://10.200.2.142:10250/metrics/cadvisor
up endpoint="https-metrics" instance="10.200.2.142:10250" job="kubelet" metrics_path="/metrics/cadvisor" namespace="kube-system" node="ip-10-200-2-142.us-west-1.compute.internal" service="prometheus-pii-prod-promet-kubelet" 11.288s ago 85.58ms
https://10.200.2.158:10250/metrics/cadvisor
up endpoint="https-metrics" instance="10.200.2.158:10250" job="kubelet" metrics_path="/metrics/cadvisor" namespace="kube-system" node="ip-10-200-2-158.us-west-1.compute.internal" service="prometheus-pii-prod-promet-kubelet" 4.089s ago 51.59ms
https://10.200.2.173:10250/metrics/cadvisor
up endpoint="https-metrics" instance="10.200.2.173:10250" job="kubelet" metrics_path="/metrics/cadvisor" namespace="kube-system" node="ip-10-200-2-173.us-west-1.compute.internal" service="prometheus-pii-prod-promet-kubelet" 7.757s ago 49.1ms
https://10.200.2.47:10250/metrics/cadvisor
up endpoint="https-metrics" instance="10.200.2.47:10250" job="kubelet" metrics_path="/metrics/cadvisor" namespace="kube-system" node="ip-10-200-2-47.us-west-1.compute.internal" service="prometheus-pii-prod-promet-kubelet" 28.167s ago 98.44ms
https://10.200.2.91:10250/metrics/cadvisor
up endpoint="https-metrics" instance="10.200.2.91:10250" job="kubelet" metrics_path="/metrics/cadvisor" namespace="kube-system" node="ip-10-200-2-91.us-west-1.compute.internal" service="prometheus-pii-prod-promet-kubelet" 6.574s ago 94.23ms
https://10.200.3.153:10250/metrics/cadvisor
up endpoint="https-metrics" instance="10.200.3.153:10250" job="kubelet" metrics_path="/metrics/cadvisor" namespace="kube-system" node="ip-10-200-3-153.us-west-1.compute.internal" service="prometheus-pii-prod-promet-kubelet" 4.234s ago 63.95ms
https://10.200.3.164:10250/metrics/cadvisor
up endpoint="https-metrics" instance="10.200.3.164:10250" job="kubelet" metrics_path="/metrics/cadvisor" namespace="kube-system" node="ip-10-200-3-164.us-west-1.compute.internal" service="prometheus-pii-prod-promet-kubelet" 2.619s ago 86.52ms
https://10.200.3.171:10250/metrics/cadvisor
up endpoint="https-metrics" instance="10.200.3.171:10250" job="kubelet" metrics_path="/metrics/cadvisor" namespace="kube-system" node="ip-10-200-3-171.us-west-1.compute.internal" service="prometheus-pii-prod-promet-kubelet" 3.534s ago 31.59ms
https://10.200.3.191:10250/metrics/cadvisor
up endpoint="https-metrics" instance="10.200.3.191:10250" job="kubelet" metrics_path="/metrics/cadvisor" namespace="kube-system" node="ip-10-200-3-191.us-west-1.compute.internal" service="prometheus-pii-prod-promet-kubelet" 28.461s ago 70.95ms
https://10.200.3.44:10250/metrics/cadvisor
up endpoint="https-metrics" instance="10.200.3.44:10250" job="kubelet" metrics_path="/metrics/cadvisor" namespace="kube-system" node="ip-10-200-3-44.us-west-1.compute.internal" service="prometheus-pii-prod-promet-kubelet" 28.449s ago 43.29ms
https://10.200.3.76:10250/metrics/cadvisor
up endpoint="https-metrics" instance="10.200.3.76:10250" job="kubelet" metrics_path="/metrics/cadvisor" namespace="kube-system" node="ip-10-200-3-76.us-west-1.compute.internal" service="prometheus-pii-prod-promet-kubelet" 15.251s ago 58.86ms
https://10.200.3.87:10250/metrics/cadvisor
up endpoint="https-metrics" instance="10.200.3.87:10250" job="kubelet" metrics_path="/metrics/cadvisor" namespace="kube-system" node="ip-10-200-3-87.us-west-1.compute.internal" service="prometheus-pii-prod-promet-kubelet" 27.121s ago 99.03ms

monitoring/prometheus-pii-prod-promet-kubelet/2 (13/13 up)

Endpoint State Labels Last Scrape Scrape Duration Error
https://10.200.2.138:10250/metrics/probes
up endpoint="https-metrics" instance="10.200.2.138:10250" job="kubelet" metrics_path="/metrics/probes" namespace="kube-system" node="ip-10-200-2-138.us-west-1.compute.internal" service="prometheus-pii-prod-promet-kubelet" 29.142s ago 1.873ms
https://10.200.2.142:10250/metrics/probes
up endpoint="https-metrics" instance="10.200.2.142:10250" job="kubelet" metrics_path="/metrics/probes" namespace="kube-system" node="ip-10-200-2-142.us-west-1.compute.internal" service="prometheus-pii-prod-promet-kubelet" 15.209s ago 1.716ms
https://10.200.2.158:10250/metrics/probes
up endpoint="https-metrics" instance="10.200.2.158:10250" job="kubelet" metrics_path="/metrics/probes" namespace="kube-system" node="ip-10-200-2-158.us-west-1.compute.internal" service="prometheus-pii-prod-promet-kubelet" 6.566s ago 1.735ms
https://10.200.2.173:10250/metrics/probes
up endpoint="https-metrics" instance="10.200.2.173:10250" job="kubelet" metrics_path="/metrics/probes" namespace="kube-system" node="ip-10-200-2-173.us-west-1.compute.internal" service="prometheus-pii-prod-promet-kubelet" 2.188s ago 1.761ms
https://10.200.2.47:10250/metrics/probes
up endpoint="https-metrics" instance="10.200.2.47:10250" job="kubelet" metrics_path="/metrics/probes" namespace="kube-system" node="ip-10-200-2-47.us-west-1.compute.internal" service="prometheus-pii-prod-promet-kubelet" 3.721s ago 1.741ms
https://10.200.2.91:10250/metrics/probes
up endpoint="https-metrics" instance="10.200.2.91:10250" job="kubelet" metrics_path="/metrics/probes" namespace="kube-system" node="ip-10-200-2-91.us-west-1.compute.internal" service="prometheus-pii-prod-promet-kubelet" 14.603s ago 1.808ms
https://10.200.3.153:10250/metrics/probes
up endpoint="https-metrics" instance="10.200.3.153:10250" job="kubelet" metrics_path="/metrics/probes" namespace="kube-system" node="ip-10-200-3-153.us-west-1.compute.internal" service="prometheus-pii-prod-promet-kubelet" 13.272s ago 1.083ms
https://10.200.3.164:10250/metrics/probes
up endpoint="https-metrics" instance="10.200.3.164:10250" job="kubelet" metrics_path="/metrics/probes" namespace="kube-system" node="ip-10-200-3-164.us-west-1.compute.internal" service="prometheus-pii-prod-promet-kubelet" 7.28s ago 1.008ms
https://10.200.3.171:10250/metrics/probes
up endpoint="https-metrics" instance="10.200.3.171:10250" job="kubelet" metrics_path="/metrics/probes" namespace="kube-system" node="ip-10-200-3-171.us-west-1.compute.internal" service="prometheus-pii-prod-promet-kubelet" 23.212s ago 9.092ms
https://10.200.3.191:10250/metrics/probes
up endpoint="https-metrics" instance="10.200.3.191:10250" job="kubelet" metrics_path="/metrics/probes" namespace="kube-system" node="ip-10-200-3-191.us-west-1.compute.internal" service="prometheus-pii-prod-promet-kubelet" 4.72s ago 1.151ms
https://10.200.3.44:10250/metrics/probes
up endpoint="https-metrics" instance="10.200.3.44:10250" job="kubelet" metrics_path="/metrics/probes" namespace="kube-system" node="ip-10-200-3-44.us-west-1.compute.internal" service="prometheus-pii-prod-promet-kubelet" 25.006s ago 997.9us
https://10.200.3.76:10250/metrics/probes
up endpoint="https-metrics" instance="10.200.3.76:10250" job="kubelet" metrics_path="/metrics/probes" namespace="kube-system" node="ip-10-200-3-76.us-west-1.compute.internal" service="prometheus-pii-prod-promet-kubelet" 11.619s ago 999.2us
https://10.200.3.87:10250/metrics/probes
up endpoint="https-metrics" instance="10.200.3.87:10250" job="kubelet" metrics_path="/metrics/probes" namespace="kube-system" node="ip-10-200-3-87.us-west-1.compute.internal" service="prometheus-pii-prod-promet-kubelet" 6.709s ago 1.094ms

monitoring/prometheus-pii-prod-promet-kubelet/3 (13/13 up)

Endpoint State Labels Last Scrape Scrape Duration Error
https://10.200.2.138:10250/metrics/resource/v1alpha1
up endpoint="https-metrics" instance="10.200.2.138:10250" job="kubelet" metrics_path="/metrics/resource/v1alpha1" namespace="kube-system" node="ip-10-200-2-138.us-west-1.compute.internal" service="prometheus-pii-prod-promet-kubelet" 23.879s ago 2.127ms
https://10.200.2.142:10250/metrics/resource/v1alpha1
up endpoint="https-metrics" instance="10.200.2.142:10250" job="kubelet" metrics_path="/metrics/resource/v1alpha1" namespace="kube-system" node="ip-10-200-2-142.us-west-1.compute.internal" service="prometheus-pii-prod-promet-kubelet" 3.334s ago 7.196ms
https://10.200.2.158:10250/metrics/resource/v1alpha1
up endpoint="https-metrics" instance="10.200.2.158:10250" job="kubelet" metrics_path="/metrics/resource/v1alpha1" namespace="kube-system" node="ip-10-200-2-158.us-west-1.compute.internal" service="prometheus-pii-prod-promet-kubelet" 24.485s ago 2.15ms
https://10.200.2.173:10250/metrics/resource/v1alpha1
up endpoint="https-metrics" instance="10.200.2.173:10250" job="kubelet" metrics_path="/metrics/resource/v1alpha1" namespace="kube-system" node="ip-10-200-2-173.us-west-1.compute.internal" service="prometheus-pii-prod-promet-kubelet" 3.758s ago 2.095ms
https://10.200.2.47:10250/metrics/resource/v1alpha1
up endpoint="https-metrics" instance="10.200.2.47:10250" job="kubelet" metrics_path="/metrics/resource/v1alpha1" namespace="kube-system" node="ip-10-200-2-47.us-west-1.compute.internal" service="prometheus-pii-prod-promet-kubelet" 6.862s ago 8.627ms
https://10.200.2.91:10250/metrics/resource/v1alpha1
up endpoint="https-metrics" instance="10.200.2.91:10250" job="kubelet" metrics_path="/metrics/resource/v1alpha1" namespace="kube-system" node="ip-10-200-2-91.us-west-1.compute.internal" service="prometheus-pii-prod-promet-kubelet" 26.28s ago 2.326ms
https://10.200.3.153:10250/metrics/resource/v1alpha1
up endpoint="https-metrics" instance="10.200.3.153:10250" job="kubelet" metrics_path="/metrics/resource/v1alpha1" namespace="kube-system" node="ip-10-200-3-153.us-west-1.compute.internal" service="prometheus-pii-prod-promet-kubelet" 24.01s ago 6.366ms
https://10.200.3.164:10250/metrics/resource/v1alpha1
up endpoint="https-metrics" instance="10.200.3.164:10250" job="kubelet" metrics_path="/metrics/resource/v1alpha1" namespace="kube-system" node="ip-10-200-3-164.us-west-1.compute.internal" service="prometheus-pii-prod-promet-kubelet" 9.097s ago 7.732ms
https://10.200.3.171:10250/metrics/resource/v1alpha1
up endpoint="https-metrics" instance="10.200.3.171:10250" job="kubelet" metrics_path="/metrics/resource/v1alpha1" namespace="kube-system" node="ip-10-200-3-171.us-west-1.compute.internal" service="prometheus-pii-prod-promet-kubelet" 25.336s ago 1.254ms
https://10.200.3.191:10250/metrics/resource/v1alpha1
up endpoint="https-metrics" instance="10.200.3.191:10250" job="kubelet" metrics_path="/metrics/resource/v1alpha1" namespace="kube-system" node="ip-10-200-3-191.us-west-1.compute.internal" service="prometheus-pii-prod-promet-kubelet" 20.37s ago 11ms
https://10.200.3.44:10250/metrics/resource/v1alpha1
up endpoint="https-metrics" instance="10.200.3.44:10250" job="kubelet" metrics_path="/metrics/resource/v1alpha1" namespace="kube-system" node="ip-10-200-3-44.us-west-1.compute.internal" service="prometheus-pii-prod-promet-kubelet" 27.029s ago 1.217ms
https://10.200.3.76:10250/metrics/resource/v1alpha1
up endpoint="https-metrics" instance="10.200.3.76:10250" job="kubelet" metrics_path="/metrics/resource/v1alpha1" namespace="kube-system" node="ip-10-200-3-76.us-west-1.compute.internal" service="prometheus-pii-prod-promet-kubelet" 10.041s ago 8.179ms
https://10.200.3.87:10250/metrics/resource/v1alpha1
up endpoint="https-metrics" instance="10.200.3.87:10250" job="kubelet" metrics_path="/metrics/resource/v1alpha1" namespace="kube-system" node="ip-10-200-3-87.us-west-1.compute.internal" service="prometheus-pii-prod-promet-kubelet" 11.804s ago 1.715ms

monitoring/prometheus-pii-prod-promet-node-exporter/0 (13/13 up)

Endpoint State Labels Last Scrape Scrape Duration Error
http://10.200.2.138:9100/metrics
up endpoint="metrics" instance="10.200.2.138:9100" job="node-exporter" namespace="monitoring" pod="prometheus-pii-prod-prometheus-node-exporter-28h44" service="prometheus-pii-prod-prometheus-node-exporter" 14.487s ago 12.13ms
http://10.200.2.142:9100/metrics
up endpoint="metrics" instance="10.200.2.142:9100" job="node-exporter" namespace="monitoring" pod="prometheus-pii-prod-prometheus-node-exporter-65nbc" service="prometheus-pii-prod-prometheus-node-exporter" 4.318s ago 15.06ms
http://10.200.2.158:9100/metrics
up endpoint="metrics" instance="10.200.2.158:9100" job="node-exporter" namespace="monitoring" pod="prometheus-pii-prod-prometheus-node-exporter-nfw6p" service="prometheus-pii-prod-prometheus-node-exporter" 7.65s ago 17.51ms
http://10.200.2.173:9100/metrics
up endpoint="metrics" instance="10.200.2.173:9100" job="node-exporter" namespace="monitoring" pod="prometheus-pii-prod-prometheus-node-exporter-vczrl" service="prometheus-pii-prod-prometheus-node-exporter" 6.911s ago 15.26ms
http://10.200.2.47:9100/metrics
up endpoint="metrics" instance="10.200.2.47:9100" job="node-exporter" namespace="monitoring" pod="prometheus-pii-prod-prometheus-node-exporter-klxs6" service="prometheus-pii-prod-prometheus-node-exporter" 12.456s ago 16.78ms
http://10.200.2.91:9100/metrics
up endpoint="metrics" instance="10.200.2.91:9100" job="node-exporter" namespace="monitoring" pod="prometheus-pii-prod-prometheus-node-exporter-bsxx6" service="prometheus-pii-prod-prometheus-node-exporter" 14.793s ago 16.39ms
http://10.200.3.153:9100/metrics
up endpoint="metrics" instance="10.200.3.153:9100" job="node-exporter" namespace="monitoring" pod="prometheus-pii-prod-prometheus-node-exporter-trl8q" service="prometheus-pii-prod-prometheus-node-exporter" 14.348s ago 12.61ms
http://10.200.3.164:9100/metrics
up endpoint="metrics" instance="10.200.3.164:9100" job="node-exporter" namespace="monitoring" pod="prometheus-pii-prod-prometheus-node-exporter-c5pf4" service="prometheus-pii-prod-prometheus-node-exporter" 7.514s ago 17.12ms
http://10.200.3.171:9100/metrics
up endpoint="metrics" instance="10.200.3.171:9100" job="node-exporter" namespace="monitoring" pod="prometheus-pii-prod-prometheus-node-exporter-z6zkk" service="prometheus-pii-prod-prometheus-node-exporter" 6.105s ago 10.59ms
http://10.200.3.191:9100/metrics
up endpoint="metrics" instance="10.200.3.191:9100" job="node-exporter" namespace="monitoring" pod="prometheus-pii-prod-prometheus-node-exporter-7rnpq" service="prometheus-pii-prod-prometheus-node-exporter" 23.437s ago 12.64ms
http://10.200.3.44:9100/metrics
up endpoint="metrics" instance="10.200.3.44:9100" job="node-exporter" namespace="monitoring" pod="prometheus-pii-prod-prometheus-node-exporter-44sqf" service="prometheus-pii-prod-prometheus-node-exporter" 6.2s ago 14.51ms
http://10.200.3.76:9100/metrics
up endpoint="metrics" instance="10.200.3.76:9100" job="node-exporter" namespace="monitoring" pod="prometheus-pii-prod-prometheus-node-exporter-f4sjv" service="prometheus-pii-prod-prometheus-node-exporter" 11.706s ago 14.14ms
http://10.200.3.87:9100/metrics
up endpoint="metrics" instance="10.200.3.87:9100" job="node-exporter" namespace="monitoring" pod="prometheus-pii-prod-prometheus-node-exporter-8k4tj" service="prometheus-pii-prod-prometheus-node-exporter" 18.413s ago 14.39ms

monitoring/prometheus-pii-prod-promet-operator/0 (1/1 up)

Endpoint State Labels Last Scrape Scrape Duration Error
http://10.200.2.22:8080/metrics
up endpoint="http" instance="10.200.2.22:8080" job="prometheus-pii-prod-promet-operator" namespace="monitoring" pod="prometheus-pii-prod-promet-operator-5d8b889d6-c7pz4" service="prometheus-pii-prod-promet-operator" 17.986s ago 2.58ms

monitoring/prometheus-pii-prod-promet-prometheus/0 (1/1 up)

Endpoint State Labels Last Scrape Scrape Duration Error
http://10.200.3.75:9090/metrics
up endpoint="web" instance="10.200.3.75:9090" job="prometheus-pii-prod-promet-prometheus" namespace="monitoring" pod="prometheus-prometheus-pii-prod-promet-prometheus-0" service="prometheus-pii-prod-promet-prometheus" 1.089s ago 9.054ms

monitoring/redis-servicemonitor/0 (1/1 up)

Endpoint State Labels Last Scrape Scrape Duration Error
http://10.200.2.130:9121/metrics
up endpoint="redis-exporter" instance="10.200.2.130:9121" job="redis-exporter-prometheus-redis-exporter" namespace="monitoring" pod="redis-exporter-prometheus-redis-exporter-8787b455b-n8gb7" service="redis-exporter-prometheus-redis-exporter" 8.706s ago 13.1ms

monitoring/tracking/0 (2/2 up)

Endpoint State Labels Last Scrape Scrape Duration Error
http://10.200.2.211:8080/metrics/prometheus
up endpoint="tracking" instance="10.200.2.211:8080" job="tracking" namespace="prod-tracking" pod="tracking-766458df88-f959j" service="tracking" 20.721s ago 2.625ms
http://10.200.3.131:8080/metrics/prometheus
up endpoint="tracking" instance="10.200.3.131:8080" job="tracking" namespace="prod-tracking" pod="tracking-766458df88-w7gvz" service="tracking" 14.232s ago 1.663ms

prod-kafka-brokers-metrics (4/4 up)

Endpoint State Labels Last Scrape Scrape Duration Error
http://b-1.indiaapps-msk-prod-new.uo1zoy.c3.kafka.us-west-1.amazonaws.com:11001/metrics
up instance="broker-1" job="prod_kafka_jmx" 924ms ago 514.7ms
http://b-2.indiaapps-msk-prod-new.uo1zoy.c3.kafka.us-west-1.amazonaws.com:11001/metrics
up instance="broker-2" job="prod_kafka_jmx" 6.364s ago 679.2ms
http://b-1.indiaapps-msk-prod-new.uo1zoy.c3.kafka.us-west-1.amazonaws.com:11002/metrics
up instance="broker-1" job="prod_kafka_node" 13.676s ago 3.528ms
http://b-2.indiaapps-msk-prod-new.uo1zoy.c3.kafka.us-west-1.amazonaws.com:11002/metrics
up instance="broker-2" job="prod_kafka_node" 14.352s ago 2.956ms

prometheus (1/1 up)

Endpoint State Labels Last Scrape Scrape Duration Error
http://0.0.0.0:9090/metrics
up instance="0.0.0.0:9090" job="prometheus" 22.519s ago 9.637ms