resolv.conf solved!
This commit is contained in:
parent
fe33aee2fe
commit
4ba0cadf07
36
.drone.yml
36
.drone.yml
@ -1,19 +1,19 @@
|
|||||||
---
|
---
|
||||||
# kind: pipeline
|
# kind: pipeline
|
||||||
# type: exec
|
# type: exec
|
||||||
# name: default
|
# name: default
|
||||||
|
|
||||||
# platform:
|
# platform:
|
||||||
# os: linux
|
# os: linux
|
||||||
# arch: amd64
|
# arch: amd64
|
||||||
|
|
||||||
|
|
||||||
# clone:
|
# clone:
|
||||||
# # убрано так как сейчас не тестим ничего предварительно а сразу тянем в директорию
|
# # убрано так как сейчас не тестим ничего предварительно а сразу тянем в директорию
|
||||||
# disable: true
|
# disable: true
|
||||||
|
|
||||||
# steps:
|
# steps:
|
||||||
# - name: pull into the folduh
|
# - name: pull into the folduh
|
||||||
# commands:
|
# commands:
|
||||||
# - cd /etc/ansible
|
# - cd /etc/ansible
|
||||||
# - git pull origin some-kind-of-lobster
|
# - git pull origin some-kind-of-lobster
|
2
.gitignore
vendored
2
.gitignore
vendored
@ -1,2 +1,2 @@
|
|||||||
.vaulto
|
.vaulto
|
||||||
asdf
|
asdf
|
87
ansible.cfg
87
ansible.cfg
@ -1,44 +1,45 @@
|
|||||||
#### export ANSIBLE_CONFIG=./ansible.cfg
|
#### export ANSIBLE_CONFIG=./ansible.cfg
|
||||||
|
|
||||||
[defaults]
|
[defaults]
|
||||||
gathering = smart
|
gathering = smart
|
||||||
fact_caching = jsonfile
|
fact_caching = jsonfile
|
||||||
fact_caching_connection = /tmp/facts_cache
|
fact_caching_connection = /tmp/facts_cache
|
||||||
# two hours timeout
|
# two hours timeout
|
||||||
fact_caching_timeout = 7200
|
fact_caching_timeout = 7200
|
||||||
|
|
||||||
|
|
||||||
interpreter_python = auto_silent
|
interpreter_python = auto_silent
|
||||||
ansible_python_interpreter = auto_silent
|
ansible_python_interpreter = auto_silent
|
||||||
# Use the YAML callback plugin.
|
# Use the YAML callback plugin.
|
||||||
stdout_callback = yaml
|
stdout_callback = yaml
|
||||||
# Use the stdout_callback when running ad-hoc commands.
|
# Use the stdout_callback when running ad-hoc commands.
|
||||||
bin_ansible_callbacks = True
|
bin_ansible_callbacks = True
|
||||||
|
|
||||||
host_key_checking = false
|
host_key_checking = false
|
||||||
|
|
||||||
#vault_password_file = /etc/ansible/.vaulto
|
#vault_password_file = /etc/ansible/.vaulto
|
||||||
vault_password_file = /tmp/.vaulto
|
#vault_password_file = /tmp/.vaulto
|
||||||
|
vault_password_file = /usr/share/.vaulto
|
||||||
# callback_plugins = /etc/ansible/plugins/callback
|
|
||||||
# callback_whitelist = telegram
|
# callback_plugins = /etc/ansible/plugins/callback
|
||||||
# callbacks_enabled = telegram
|
# callback_whitelist = telegram
|
||||||
|
# callbacks_enabled = telegram
|
||||||
strategy_plugins = mitogen-0.3.9/ansible_mitogen/plugins/strategy
|
|
||||||
strategy = mitogen_linear
|
strategy_plugins = mitogen-0.3.9/ansible_mitogen/plugins/strategy
|
||||||
|
strategy = mitogen_linear
|
||||||
#### TODO чому-то не делается
|
|
||||||
roles_path = roles:internal_roles
|
#### TODO чому-то не делается
|
||||||
# # [callback_telegram]
|
roles_path = roles:internal_roles
|
||||||
# # tg_token = 6472915685:AAHPvgrQoqG7DxtfbnHWPe3Lfild-CGJ1j8
|
# # [callback_telegram]
|
||||||
# # tg_chat_id = -4023350326
|
# # tg_token = 6472915685:AAHPvgrQoqG7DxtfbnHWPe3Lfild-CGJ1j8
|
||||||
|
# # tg_chat_id = -4023350326
|
||||||
|
|
||||||
# добавление юзера
|
|
||||||
# useradd -m hogweed1 -s /usr/bin/bash
|
# добавление юзера
|
||||||
# passwd hogweed1
|
# useradd -m hogweed1 -s /usr/bin/bash
|
||||||
# sudo adduser hogweed1 sudo
|
# passwd hogweed1
|
||||||
|
# sudo adduser hogweed1 sudo
|
||||||
[ssh_connection]
|
|
||||||
# Enable pipelining, requires disabling requiretty in sudoers
|
[ssh_connection]
|
||||||
|
# Enable pipelining, requires disabling requiretty in sudoers
|
||||||
pipelining = True
|
pipelining = True
|
@ -1,10 +1,10 @@
|
|||||||
$ANSIBLE_VAULT;1.1;AES256
|
$ANSIBLE_VAULT;1.1;AES256
|
||||||
31363137313338616231343430646133386434313864323835633839353631313262313365396662
|
31363137313338616231343430646133386434313864323835633839353631313262313365396662
|
||||||
6430623831346630336138613735653333386565346461300a326639383234383063646366343039
|
6430623831346630336138613735653333386565346461300a326639383234383063646366343039
|
||||||
32396436373561376431383338643464313131336135333864336530636164616661616261363930
|
32396436373561376431383338643464313131336135333864336530636164616661616261363930
|
||||||
3662306533383232660a386464396437653835356564333032393063386532346463376332626536
|
3662306533383232660a386464396437653835356564333032393063386532346463376332626536
|
||||||
35373439633936396539383163396632313462626336363164353038643664633734326136356135
|
35373439633936396539383163396632313462626336363164353038643664633734326136356135
|
||||||
36656235616231363234323632393833323035313739363565393932326535643834633464303361
|
36656235616231363234323632393833323035313739363565393932326535643834633464303361
|
||||||
63643531643430336164336261653539353236346533653030336634383031663535383264383365
|
63643531643430336164336261653539353236346533653030336634383031663535383264383365
|
||||||
32653235386436303133623233653235356131643633643937373630333166373063633731353661
|
32653235386436303133623233653235356131643633643937373630333166373063633731353661
|
||||||
36393539333435366439313364633735326339646264626262633063633664626461
|
36393539333435366439313364633735326339646264626262633063633664626461
|
||||||
|
@ -1,18 +1,18 @@
|
|||||||
---
|
---
|
||||||
all: # keys must be unique, i.e. only one 'hosts' per group
|
all: # keys must be unique, i.e. only one 'hosts' per group
|
||||||
hosts:
|
hosts:
|
||||||
semyon-0x01.guaranteedstruggle.host:
|
semyon-0x01.guaranteedstruggle.host:
|
||||||
semyon-0x02.guaranteedstruggle.host:
|
semyon-0x02.guaranteedstruggle.host:
|
||||||
semyon-0x03.guaranteedstruggle.host:
|
semyon-0x03.guaranteedstruggle.host:
|
||||||
semyon-0x04.guaranteedstruggle.host:
|
semyon-0x04.guaranteedstruggle.host:
|
||||||
semyon-0x05.guaranteedstruggle.host:
|
semyon-0x05.guaranteedstruggle.host:
|
||||||
|
|
||||||
samehost-zero.guaranteedstruggle.host:
|
samehost-zero.guaranteedstruggle.host:
|
||||||
|
|
||||||
puppets: # keys must be unique, i.e. only one 'hosts' per group
|
puppets: # keys must be unique, i.e. only one 'hosts' per group
|
||||||
hosts:
|
hosts:
|
||||||
semyon-0x01.guaranteedstruggle.host:
|
semyon-0x01.guaranteedstruggle.host:
|
||||||
semyon-0x02.guaranteedstruggle.host:
|
semyon-0x02.guaranteedstruggle.host:
|
||||||
semyon-0x03.guaranteedstruggle.host:
|
semyon-0x03.guaranteedstruggle.host:
|
||||||
semyon-0x04.guaranteedstruggle.host:
|
semyon-0x04.guaranteedstruggle.host:
|
||||||
semyon-0x05.guaranteedstruggle.host:
|
semyon-0x05.guaranteedstruggle.host:
|
||||||
|
@ -1,5 +1,5 @@
|
|||||||
---
|
---
|
||||||
ansible_ssh_user: root
|
ansible_ssh_user: root
|
||||||
ansible_ssh_pass: admin
|
ansible_ssh_pass: admin
|
||||||
ansible_sudo_pass: admin
|
ansible_sudo_pass: admin
|
||||||
ansible_ssh_private_key_file: '/home/hogweed1/id25519.key'
|
ansible_ssh_private_key_file: '/home/hogweed1/id25519.key'
|
5
environments/just-created/group_vars/lxc/ssh-creds.yml
Normal file
5
environments/just-created/group_vars/lxc/ssh-creds.yml
Normal file
@ -0,0 +1,5 @@
|
|||||||
|
---
|
||||||
|
ansible_ssh_user: root
|
||||||
|
ansible_ssh_pass: admin
|
||||||
|
ansible_sudo_pass: admin
|
||||||
|
ansible_ssh_private_key_file: '/home/hogweed1/id25519.key'
|
@ -1,5 +1,5 @@
|
|||||||
---
|
---
|
||||||
ansible_ssh_user: hogweed1
|
ansible_ssh_user: hogweed1
|
||||||
ansible_ssh_pass: coloredhorses
|
ansible_ssh_pass: coloredhorses
|
||||||
ansible_sudo_pass: coloredhorses
|
ansible_sudo_pass: coloredhorses
|
||||||
ansible_ssh_private_key_file: '/home/hogweed1/id25519.key'
|
#ansible_ssh_private_key_file: '/home/hogweed1/id25519.key'
|
@ -0,0 +1,5 @@
|
|||||||
|
---
|
||||||
|
ansible_ssh_user: hogweed1
|
||||||
|
ansible_ssh_pass: coloredhorses
|
||||||
|
ansible_sudo_pass: coloredhorses
|
||||||
|
#ansible_ssh_private_key_file: '/home/hogweed1/id25519.key'
|
@ -1,11 +1,24 @@
|
|||||||
---
|
---
|
||||||
# all: # keys must be unique, i.e. only one 'hosts' per group
|
all: # keys must be unique, i.e. only one 'hosts' per group
|
||||||
# hosts:
|
hosts:
|
||||||
# #nexus.guaranteedstruggle.host:
|
#k3s-rancher.guaranteedstruggle.host:
|
||||||
# #printing-slut.guaranteedstruggle.host:
|
# #nexus.guaranteedstruggle.host:
|
||||||
# harbor.guaranteedstruggle.host:
|
# #printing-slut.guaranteedstruggle.host:
|
||||||
|
# harbor.guaranteedstruggle.host:
|
||||||
lxc: # keys must be unique, i.e. only one 'hosts' per group
|
|
||||||
hosts:
|
#192.168.0.26
|
||||||
### but its a vm wtf
|
#192.168.0.32:
|
||||||
harbor.guaranteedstruggle.host:
|
lxc: # keys must be unique, i.e. only one 'hosts' per group
|
||||||
|
hosts:
|
||||||
|
### but its a vm wtf
|
||||||
|
#harbor.guaranteedstruggle.host:
|
||||||
|
|
||||||
|
#etcd.guaranteedstruggle.host:
|
||||||
|
#prometheus.guaranteedstruggle.host:
|
||||||
|
# 192.168.0.240
|
||||||
|
#192.168.0.251
|
||||||
|
#192.168.0.40
|
||||||
|
#192.168.0.88
|
||||||
|
#192.168.0.52
|
||||||
|
#192.168.0.113
|
||||||
|
#recording-slut.guaranteedstruggle.host:
|
@ -1,2 +1,2 @@
|
|||||||
# отключаем ворнинг с митогена - https://github.com/mitogen-hq/mitogen/issues/740#issuecomment-731513058
|
# отключаем ворнинг с митогена - https://github.com/mitogen-hq/mitogen/issues/740#issuecomment-731513058
|
||||||
ansible_python_interpreter: /usr/bin/python3
|
ansible_python_interpreter: /usr/bin/python3
|
@ -1,4 +1,4 @@
|
|||||||
---
|
---
|
||||||
ansible_ssh_user: hogweed1
|
ansible_ssh_user: hogweed1
|
||||||
ansible_ssh_pass: coloredhorses
|
ansible_ssh_pass: coloredhorses
|
||||||
ansible_sudo_pass: coloredhorses
|
ansible_sudo_pass: coloredhorses
|
@ -1,55 +1,71 @@
|
|||||||
---
|
---
|
||||||
physical_machines:
|
physical_machines:
|
||||||
hosts:
|
hosts:
|
||||||
cyberbully.guaranteedstruggle.host:
|
cyberbully.guaranteedstruggle.host:
|
||||||
gpu-slut.guaranteedstruggle.host:
|
#
|
||||||
children:
|
gpu-slut.guaranteedstruggle.host:
|
||||||
proxmoxes:
|
children:
|
||||||
|
proxmoxes:
|
||||||
proxmoxes: # keys must be unique, i.e. only one 'hosts' per group
|
|
||||||
hosts:
|
proxmoxes: # keys must be unique, i.e. only one 'hosts' per group
|
||||||
king-albert.guaranteedstruggle.host:
|
hosts:
|
||||||
children:
|
king-albert.guaranteedstruggle.host:
|
||||||
semyons:
|
children:
|
||||||
|
semyons:
|
||||||
semyons: # keys must be unique, i.e. only one 'hosts' per group
|
|
||||||
hosts:
|
semyons: # keys must be unique, i.e. only one 'hosts' per group
|
||||||
semyon-0x01.guaranteedstruggle.host:
|
hosts:
|
||||||
semyon-0x02.guaranteedstruggle.host:
|
semyon-0x01.guaranteedstruggle.host:
|
||||||
semyon-0x03.guaranteedstruggle.host:
|
semyon-0x02.guaranteedstruggle.host:
|
||||||
semyon-0x04.guaranteedstruggle.host:
|
semyon-0x03.guaranteedstruggle.host:
|
||||||
semyon-0x05.guaranteedstruggle.host:
|
semyon-0x04.guaranteedstruggle.host:
|
||||||
vms:
|
semyon-0x05.guaranteedstruggle.host:
|
||||||
children:
|
vms:
|
||||||
printer:
|
hosts:
|
||||||
kubernetes:
|
#recording-slut.guaranteedstruggle.host:
|
||||||
docker:
|
#192.168.0.26
|
||||||
|
children:
|
||||||
docker:
|
printer:
|
||||||
hosts:
|
kubernetes:
|
||||||
swarm-node1.guaranteedstruggle.host:
|
docker:
|
||||||
swarm-node2.guaranteedstruggle.host:
|
|
||||||
swarm-node3.guaranteedstruggle.host:
|
|
||||||
|
docker:
|
||||||
harbor.guaranteedstruggle.host:
|
hosts:
|
||||||
|
swarm-node1.guaranteedstruggle.host:
|
||||||
kubernetes:
|
swarm-node2.guaranteedstruggle.host:
|
||||||
hosts:
|
swarm-node3.guaranteedstruggle.host:
|
||||||
rke2-master1.guaranteedstruggle.host:
|
|
||||||
rke2-master2.guaranteedstruggle.host:
|
harbor.guaranteedstruggle.host:
|
||||||
rke2-master3.guaranteedstruggle.host:
|
|
||||||
rke2-worker1.guaranteedstruggle.host:
|
kubernetes:
|
||||||
rke2-worker2.guaranteedstruggle.host:
|
hosts:
|
||||||
rke2-worker3.guaranteedstruggle.host:
|
rke2-master1.guaranteedstruggle.host:
|
||||||
rke2-worker4.guaranteedstruggle.host:
|
rke2-master2.guaranteedstruggle.host:
|
||||||
rke2-worker5.guaranteedstruggle.host:
|
rke2-master3.guaranteedstruggle.host:
|
||||||
|
rke2-worker1.guaranteedstruggle.host:
|
||||||
k3s-rancher.guaranteedstruggle.host:
|
rke2-worker2.guaranteedstruggle.host:
|
||||||
k3s-awx.guaranteedstruggle.host:
|
rke2-worker3.guaranteedstruggle.host:
|
||||||
|
rke2-worker4.guaranteedstruggle.host:
|
||||||
printer:
|
rke2-worker5.guaranteedstruggle.host:
|
||||||
hosts:
|
|
||||||
printing-slut.guaranteedstruggle.host:
|
k3s-rancher.guaranteedstruggle.host:
|
||||||
|
k3s-awx.guaranteedstruggle.host:
|
||||||
#### TODO
|
|
||||||
# lxc:
|
printer:
|
||||||
|
hosts:
|
||||||
|
printing-slut.guaranteedstruggle.host:
|
||||||
|
|
||||||
|
#### TODO
|
||||||
|
lxc:
|
||||||
|
hosts:
|
||||||
|
### but its a vm wtf
|
||||||
|
#harbor.guaranteedstruggle.host:
|
||||||
|
#etcd.guaranteedstruggle.host:
|
||||||
|
prometheus.guaranteedstruggle.host:
|
||||||
|
recording-slut.guaranteedstruggle.host:
|
||||||
|
|
||||||
|
pg.just-for-me.internal:
|
||||||
|
grafana.just-for-me.internal:
|
||||||
|
price-loader.just-for-me.internal:
|
||||||
|
|
||||||
|
21
files/alertmanager/alertmanager.service
Normal file
21
files/alertmanager/alertmanager.service
Normal file
@ -0,0 +1,21 @@
|
|||||||
|
[Unit]
|
||||||
|
Description=Prometheus alertmanager
|
||||||
|
Wants=network-online.target
|
||||||
|
After=network-online.target
|
||||||
|
|
||||||
|
[Service]
|
||||||
|
User=prometheus
|
||||||
|
Group=prometheus
|
||||||
|
EnvironmentFile=-/etc/sysconfig/alertmanager
|
||||||
|
ExecStart=/usr/sbin/alertmanager \
|
||||||
|
--config.file=/etc/alertmanager/alertmanager.yaml \
|
||||||
|
--storage.path=/base/alertmanager \
|
||||||
|
--web.config.file=/etc/prometheus/web-config.yaml
|
||||||
|
|
||||||
|
ExecReload=/bin/kill -HUP $MAINPID
|
||||||
|
KillMode=process
|
||||||
|
Restart=always
|
||||||
|
|
||||||
|
[Install]
|
||||||
|
WantedBy=multi-user.target
|
||||||
|
|
50
files/alertmanager/alertmanager.yaml
Normal file
50
files/alertmanager/alertmanager.yaml
Normal file
@ -0,0 +1,50 @@
|
|||||||
|
global:
|
||||||
|
resolve_timeout: 5m
|
||||||
|
|
||||||
|
route:
|
||||||
|
group_by: [ 'alertname', 'job' ]
|
||||||
|
group_wait: 30s
|
||||||
|
group_interval: 5m
|
||||||
|
repeat_interval: 1h
|
||||||
|
receiver: what-went-wrong
|
||||||
|
|
||||||
|
# routes:
|
||||||
|
|
||||||
|
receivers:
|
||||||
|
# /dev/null receiver
|
||||||
|
- name: 'blackhole'
|
||||||
|
|
||||||
|
# конфа
|
||||||
|
- name: 'what-went-wrong'
|
||||||
|
telegram_configs:
|
||||||
|
- send_resolved: true
|
||||||
|
bot_token: '6472915685:AAHPvgrQoqG7DxtfbnHWPe3Lfild-CGJ1j8'
|
||||||
|
chat_id: -4023350326
|
||||||
|
message: '{{ template "teletempl" . }}'
|
||||||
|
api_url: https://api.telegram.org
|
||||||
|
parse_mode: HTML
|
||||||
|
# - name: 'vdk2ch'
|
||||||
|
# telegram_configs:
|
||||||
|
# - send_resolved: true
|
||||||
|
# bot_token: '5724991559:AAEuLvpLsgP6LHRGMSyFtQLlR5qPQUO4b_w'
|
||||||
|
# chat_id: -1001355646177
|
||||||
|
# message: '{{ template "teletempl" . }}'
|
||||||
|
# api_url: https://api.telegram.org
|
||||||
|
# parse_mode: HTML
|
||||||
|
|
||||||
|
# A list of inhibition rules.
|
||||||
|
#inhibit_rules:
|
||||||
|
|
||||||
|
templates:
|
||||||
|
- '/etc/alertmanager/templates/my.tmpl'
|
||||||
|
|
||||||
|
# A list of time intervals for muting/activating routes.
|
||||||
|
# time_intervals:
|
||||||
|
# - name: business_hours
|
||||||
|
# time_intervals:
|
||||||
|
# - weekdays: ['monday:friday']
|
||||||
|
# times:
|
||||||
|
# # Начало в 10:00 Asia/Vladivostok
|
||||||
|
# - start_time: '00:00'
|
||||||
|
# # Заканчивается в 19:00 Asia/Vladivostok
|
||||||
|
# end_time: '09:00'
|
33
files/alertmanager/simple_telegram.tmpl
Normal file
33
files/alertmanager/simple_telegram.tmpl
Normal file
@ -0,0 +1,33 @@
|
|||||||
|
{{ define "teletempl" }}
|
||||||
|
<b>{{ .CommonLabels.alertname }} : </b>
|
||||||
|
{{- if eq .Status "firing" -}}
|
||||||
|
<b>{{ .Status | toUpper}} 🔥</b>
|
||||||
|
{{- end -}}
|
||||||
|
{{- if eq .Status "resolved" -}}
|
||||||
|
<b>{{ .Status | toUpper}} ✅</b>
|
||||||
|
{{- end -}}
|
||||||
|
{{ $alerts_count := len .Alerts }}
|
||||||
|
{{ if eq $alerts_count 1 -}} {{/* Single alert block */}}
|
||||||
|
{{ .CommonAnnotations.summary }}
|
||||||
|
|
||||||
|
Host: {{ .CommonLabels.host }}
|
||||||
|
Instance: {{ .CommonLabels.instance }}
|
||||||
|
Job: <b>{{ .CommonLabels.job }}</b>
|
||||||
|
|
||||||
|
Details:
|
||||||
|
{{ .CommonAnnotations.description }}
|
||||||
|
|
||||||
|
Alert started: [ {{ .CommonAnnotations.alert_started_vl_time }} ]
|
||||||
|
|
||||||
|
{{- else -}} {{/* Grouped alert block */}}
|
||||||
|
{{ .CommonAnnotations.summary }}
|
||||||
|
|
||||||
|
Job: <b>{{ .CommonLabels.job }}</b>
|
||||||
|
|
||||||
|
|
||||||
|
Instances:
|
||||||
|
{{- range .Alerts }}
|
||||||
|
{{ .Labels.instance }} [ {{ .Annotations.alert_started_vl_time }} ]
|
||||||
|
{{- end }}
|
||||||
|
{{ end }}
|
||||||
|
{{ end }}
|
188
files/prometheus/alerts.yaml
Normal file
188
files/prometheus/alerts.yaml
Normal file
@ -0,0 +1,188 @@
|
|||||||
|
---
|
||||||
|
# можно набирать примеров отсюда
|
||||||
|
# https://awesome-prometheus-alerts.grep.to/rules.html
|
||||||
|
|
||||||
|
|
||||||
|
groups:
|
||||||
|
|
||||||
|
|
||||||
|
- name: standard
|
||||||
|
|
||||||
|
rules:
|
||||||
|
- alert: _plchldr
|
||||||
|
expr: up == -999
|
||||||
|
for: 999m
|
||||||
|
labels:
|
||||||
|
severity: info
|
||||||
|
annotations: &anno
|
||||||
|
alert_started_vl_time: "{{ with $b := printf `ALERTS_FOR_STATE{job=\"%s\",instance=\"%s\"} + 36000` $labels.job $labels.instance | query }}{{if $b}}{{ with $a := $b | first | value | humanizeTimestamp }}{{- slice $a 0 19 -}}{{end}}{{end}}{{end}}"
|
||||||
|
|
||||||
|
|
||||||
|
- alert: jackbot failed
|
||||||
|
expr: node_systemd_unit_state{ name="jack_bot.service", state="active" } != 1
|
||||||
|
for: 1m
|
||||||
|
labels:
|
||||||
|
severity: warning
|
||||||
|
annotations:
|
||||||
|
<<: *anno
|
||||||
|
summary: "PIPISA IS DOWN!"
|
||||||
|
description: "Pipisa on {{ $labels.instance }} does not working!"
|
||||||
|
|
||||||
|
- alert: jackbot failed
|
||||||
|
expr: node_systemd_unit_state{ name="jack_bot.service", state="active" } != 1
|
||||||
|
for: 5m
|
||||||
|
labels:
|
||||||
|
severity: cricical
|
||||||
|
annotations:
|
||||||
|
<<: *anno
|
||||||
|
summary: "PIPISA IS DOWN!"
|
||||||
|
description: "Pipisa on {{ $labels.instance }} does not working!"
|
||||||
|
|
||||||
|
|
||||||
|
### отслужило своё, майнеров больше нет.
|
||||||
|
# - alert: MAINER JACK KURWA!!
|
||||||
|
# expr: node_load15 > 2
|
||||||
|
# for: 20m
|
||||||
|
# labels:
|
||||||
|
# severity: cricical
|
||||||
|
# annotations:
|
||||||
|
# <<: *anno
|
||||||
|
# summary: "It THAT shit again!"
|
||||||
|
# description: "Kill fucking mainer processes!"
|
||||||
|
|
||||||
|
|
||||||
|
|
||||||
|
- alert: Uptime
|
||||||
|
expr: floor((time() - node_boot_time_seconds)) < 3600
|
||||||
|
for: 5m
|
||||||
|
labels:
|
||||||
|
severity: warning
|
||||||
|
annotations:
|
||||||
|
<<: *anno
|
||||||
|
summary: "Uptime less than 1 hour"
|
||||||
|
description: "Uptime on {{ $labels.instance }} is less than 1 hour"
|
||||||
|
|
||||||
|
- alert: LoadAverage
|
||||||
|
expr: (node_load5{}) > ( instance:node_cpus:count{} )
|
||||||
|
for: 5m
|
||||||
|
labels:
|
||||||
|
severity: warning
|
||||||
|
annotations:
|
||||||
|
<<: *anno
|
||||||
|
summary: "High LoadAverage5"
|
||||||
|
description: |
|
||||||
|
{{ $labels.host }} [{{ printf `instance:node_cpus:count{host='%s', instance='%s'}` .Labels.host .Labels.instance | query | first | value }} CPU] LA: {{ printf `node_load1{host='%s', instance='%s'}` .Labels.host .Labels.instance | query | first | value }} {{ printf `node_load5{host='%s', instance='%s'}` .Labels.host .Labels.instance | query | first | value }} {{ printf `node_load15{host='%s', instance='%s'}` .Labels.host .Labels.instance | query | first | value }}
|
||||||
|
|
||||||
|
- alert: LoadAverage
|
||||||
|
expr: (node_load15{}) > ( instance:node_cpus:count{} )
|
||||||
|
for: 5m
|
||||||
|
labels:
|
||||||
|
severity: critical
|
||||||
|
annotations:
|
||||||
|
<<: *anno
|
||||||
|
summary: "High LoadAverage15"
|
||||||
|
description: |
|
||||||
|
{{ $labels.host }} [{{ printf `instance:node_cpus:count{host='%s', instance='%s'}` .Labels.host .Labels.instance | query | first | value }} CPU] LA: {{ printf `node_load1{host='%s', instance='%s'}` .Labels.host .Labels.instance | query | first | value }} {{ printf `node_load5{host='%s', instance='%s'}` .Labels.host .Labels.instance | query | first | value }} {{ printf `node_load15{host='%s', instance='%s'}` .Labels.host .Labels.instance | query | first | value }}
|
||||||
|
|
||||||
|
- alert: RAM
|
||||||
|
expr: node_memory_MemAvailable_bytes{ } / node_memory_MemTotal_bytes * 100 < 10
|
||||||
|
for: 10m
|
||||||
|
labels:
|
||||||
|
severity: warning
|
||||||
|
annotations:
|
||||||
|
<<: *anno
|
||||||
|
summary: "Low available memory"
|
||||||
|
description: "Free RAM: {{ printf `%.2f` $value }}% Свободно {{ printf `node_memory_MemAvailable_bytes{instance='%s'}` .Labels.instance | query | first | value | humanize1024 }} из {{ printf `node_memory_MemTotal_bytes{instance='%s'}` .Labels.instance | query | first | value | humanize1024 }}"
|
||||||
|
|
||||||
|
- alert: RAM
|
||||||
|
expr: node_memory_MemAvailable_bytes{ } / node_memory_MemTotal_bytes * 100 < 5
|
||||||
|
for: 10m
|
||||||
|
labels:
|
||||||
|
severity: critical
|
||||||
|
annotations:
|
||||||
|
<<: *anno
|
||||||
|
summary: "Low available memory"
|
||||||
|
description: "Free RAM: {{ printf `%.2f` $value }}% Свободно {{ printf `node_memory_MemAvailable_bytes{instance='%s'}` .Labels.instance | query | first | value | humanize1024 }} из {{ printf `node_memory_MemTotal_bytes{instance='%s'}` .Labels.instance | query | first | value | humanize1024 }}"
|
||||||
|
|
||||||
|
- alert: iNodes
|
||||||
|
expr: (node_filesystem_files_free{fstype!~"rootfs|fuse.lxcfs|squashfs",mountpoint!~"/boot|boot/efi|/backup|/swap"} / node_filesystem_files) * 100 < 10
|
||||||
|
for: 10m
|
||||||
|
labels:
|
||||||
|
severity: warning
|
||||||
|
annotations:
|
||||||
|
<<: *anno
|
||||||
|
summary: "[WARN] Low available inodes"
|
||||||
|
description: "Available i-nodes: {{ printf `%.2f` $value }}%\n"
|
||||||
|
|
||||||
|
- alert: iNodes
|
||||||
|
expr: (node_filesystem_files_free{fstype!~"rootfs|fuse.lxcfs|squashfs",mountpoint!~"/boot|boot/efi|/backup|/swap"} / node_filesystem_files) * 100 < 5
|
||||||
|
for: 10m
|
||||||
|
labels:
|
||||||
|
severity: critical
|
||||||
|
annotations:
|
||||||
|
<<: *anno
|
||||||
|
summary: "[CRIT] Host out of inodes"
|
||||||
|
description: "Available i-nodes: {{ printf `%.2f` $value }}%\n"
|
||||||
|
|
||||||
|
|
||||||
|
- alert: DiskUsage
|
||||||
|
expr: ( node_filesystem_avail_bytes{mountpoint!~"/boot|boot/efi|/backup|/swap", fstype!~"rootfs|fuse.lxcfs|squashfs"}/ node_filesystem_size_bytes ) * 100 < 10
|
||||||
|
for: 5m
|
||||||
|
labels:
|
||||||
|
severity: info
|
||||||
|
annotations:
|
||||||
|
<<: *anno
|
||||||
|
summary: "Disk usage is more than 90%"
|
||||||
|
description: |
|
||||||
|
{{ $labels.device }} ({{ $labels.mountpoint }}): {{ printf `node_filesystem_avail_bytes{mountpoint='%s', device='%s', instance='%s'}` .Labels.mountpoint .Labels.device .Labels.instance | query | first | value | humanize1024 }} / {{ printf `node_filesystem_size_bytes{mountpoint='%s', device='%s', instance='%s'}` .Labels.mountpoint .Labels.device .Labels.instance | query | first | value | humanize1024 }}
|
||||||
|
Свободного места: {{ printf `%.2f` $value }}%
|
||||||
|
|
||||||
|
- alert: DiskUsagePredict
|
||||||
|
expr: |
|
||||||
|
(node_filesystem_avail_bytes{mountpoint!~"/boot|boot/efi|/backup", fstype!~"rootfs|fuse.lxcfs|squashfs"}/ node_filesystem_size_bytes) * 100 < 10
|
||||||
|
and
|
||||||
|
predict_linear(node_filesystem_avail_bytes{fstype!~"rootfs|fuse.lxcfs|squashfs"}[1h], 4 * 3600) < 0
|
||||||
|
for: 5m
|
||||||
|
labels:
|
||||||
|
severity: critical
|
||||||
|
annotations:
|
||||||
|
<<: *anno
|
||||||
|
summary: "Disk usage is more than 90% and will fill soon"
|
||||||
|
description: "{{ $labels.mountpoint }} usage is more than 90% and will fill soon on {{ $labels.instance }}"
|
||||||
|
|
||||||
|
- name: Prometheus
|
||||||
|
rules:
|
||||||
|
- alert: PrometheusAlertmanagerNotificationFailing
|
||||||
|
expr: rate(alertmanager_notifications_failed_total[1m]) > 0
|
||||||
|
for: 0m
|
||||||
|
labels:
|
||||||
|
severity: cricical
|
||||||
|
annotations:
|
||||||
|
<<: *anno
|
||||||
|
summary: Prometheus AlertManager notification failing (instance {{ $labels.instance }})
|
||||||
|
description: "Alertmanager is failing sending notifications on {{ $labels.host }}"
|
||||||
|
|
||||||
|
- alert: PrometheusConfigurationReloadFailure
|
||||||
|
expr: prometheus_config_last_reload_successful != 1
|
||||||
|
for: 0m
|
||||||
|
labels:
|
||||||
|
severity: warning
|
||||||
|
annotations:
|
||||||
|
<<: *anno
|
||||||
|
summary: Prometheus configuration reload failure (instance {{ $labels.instance }})
|
||||||
|
description: "Prometheus configuration reload error on {{ $labels.host }}"
|
||||||
|
|
||||||
|
- alert: PrometheusConsulServiceDiscoveryError
|
||||||
|
expr: increase(prometheus_sd_consul_rpc_failures_total[15m]) > 0
|
||||||
|
for: 0m
|
||||||
|
labels:
|
||||||
|
severity: critical
|
||||||
|
annotations:
|
||||||
|
<<: *anno
|
||||||
|
summary: Prometheus consul_sd many failures (instance {{ $labels.instance }})
|
||||||
|
description: "Prometheus consul_sd many failures on {{ $labels.host }}"
|
||||||
|
|
||||||
|
|
||||||
|
|
||||||
|
|
||||||
|
|
30
files/prometheus/prometheus.service
Normal file
30
files/prometheus/prometheus.service
Normal file
@ -0,0 +1,30 @@
|
|||||||
|
[Unit]
|
||||||
|
Description=Prometheus
|
||||||
|
Wants=network-online.target
|
||||||
|
After=network-online.target
|
||||||
|
|
||||||
|
[Service]
|
||||||
|
User=prometheus
|
||||||
|
Group=prometheus
|
||||||
|
Type=simple
|
||||||
|
Restart=always
|
||||||
|
OOMScoreAdjust=-1000
|
||||||
|
LimitNOFILE=16384
|
||||||
|
ExecStart=/usr/sbin/prometheus \
|
||||||
|
--config.file /etc/prometheus/prometheus.yaml \
|
||||||
|
--web.config.file=/etc/prometheus/web-config.yaml \
|
||||||
|
--storage.tsdb.path /prometheus-data/ \
|
||||||
|
--storage.tsdb.retention.time 180d \
|
||||||
|
--storage.tsdb.max-block-duration=2h \
|
||||||
|
--storage.tsdb.min-block-duration=2h \
|
||||||
|
--web.enable-remote-write-receiver \
|
||||||
|
--web.console.templates=/etc/prometheus/consoles \
|
||||||
|
--web.console.libraries=/etc/prometheus/console_libraries \
|
||||||
|
--web.enable-admin-api \
|
||||||
|
--query.max-samples=50000000
|
||||||
|
|
||||||
|
ExecReload=/usr/bin/kill -s HUP $MAINPID
|
||||||
|
ExecStop=/usr/bin/kill -s QUIT $MAINPID
|
||||||
|
|
||||||
|
[Install]
|
||||||
|
WantedBy=multi-user.target
|
179
files/prometheus/prometheus.yaml
Normal file
179
files/prometheus/prometheus.yaml
Normal file
@ -0,0 +1,179 @@
|
|||||||
|
# my global config
|
||||||
|
global:
|
||||||
|
scrape_interval: 10s # Set the scrape interval to every 15 seconds. Default is every 1 minute.
|
||||||
|
evaluation_interval: 60s # Evaluate rules every 15 seconds. The default is every 1 minute.
|
||||||
|
#external_labels:
|
||||||
|
|
||||||
|
# scrape_timeout is set to the global default (10s).
|
||||||
|
|
||||||
|
# Alertmanager configuration
|
||||||
|
alerting:
|
||||||
|
alertmanagers:
|
||||||
|
- scheme: https
|
||||||
|
static_configs:
|
||||||
|
- targets: ['alertmanager.guaranteedstruggle.host']
|
||||||
|
|
||||||
|
# Writing data to remote long-term storage (VictoriaMetrics)
|
||||||
|
# remote_write:
|
||||||
|
# - url:
|
||||||
|
|
||||||
|
# Load rules once and periodically evaluate them according to the global 'evaluation_interval'.
|
||||||
|
rule_files:
|
||||||
|
- '/etc/prometheus/alerts.yaml'
|
||||||
|
- '/etc/prometheus/service_alerts/*.yaml'
|
||||||
|
|
||||||
|
# A scrape configuration containing exactly one endpoint to scrape:
|
||||||
|
# Here it's Prometheus itself.
|
||||||
|
scrape_configs:
|
||||||
|
|
||||||
|
|
||||||
|
#### TODO вынести в шаблоны сбор экспортеров на основе ролей машин
|
||||||
|
|
||||||
|
- job_name: 'node-exporters'
|
||||||
|
scheme: http
|
||||||
|
static_configs:
|
||||||
|
- targets:
|
||||||
|
- 'semyon-0x01:9100'
|
||||||
|
- 'semyon-0x02:9100'
|
||||||
|
- 'semyon-0x03:9100'
|
||||||
|
- 'semyon-0x04:9100'
|
||||||
|
- 'semyon-0x05:9100'
|
||||||
|
|
||||||
|
- 'king-albert:9100'
|
||||||
|
- 'gpu-slut:9100'
|
||||||
|
# relabel_configs:
|
||||||
|
# - target_label: instance
|
||||||
|
# replacement: 'cyberbully:9100'
|
||||||
|
# - target_label: host
|
||||||
|
# replacement: cyberbully
|
||||||
|
|
||||||
|
- job_name: 'node-exporters-vms'
|
||||||
|
scheme: http
|
||||||
|
static_configs:
|
||||||
|
- targets:
|
||||||
|
- 'printing-slut:9100'
|
||||||
|
|
||||||
|
- 'swarm-node1:9100'
|
||||||
|
- 'swarm-node2:9100'
|
||||||
|
- 'swarm-node3:9100'
|
||||||
|
|
||||||
|
- 'harbor:9100'
|
||||||
|
|
||||||
|
- 'rke2-master1:9100'
|
||||||
|
- 'rke2-master2:9100'
|
||||||
|
- 'rke2-master3:9100'
|
||||||
|
- 'rke2-worker1:9100'
|
||||||
|
- 'rke2-worker2:9100'
|
||||||
|
- 'rke2-worker3:9100'
|
||||||
|
- 'rke2-worker4:9100'
|
||||||
|
- 'rke2-worker5:9100'
|
||||||
|
|
||||||
|
- 'k3s-rancher:9100'
|
||||||
|
- 'k3s-awx:9100'
|
||||||
|
|
||||||
|
# # - job_name: 'node-exporters-lxc'
|
||||||
|
# # scheme: http
|
||||||
|
# # static_configs:
|
||||||
|
# # - targets:
|
||||||
|
|
||||||
|
|
||||||
|
- job_name: 'impi-exporters'
|
||||||
|
scheme: http
|
||||||
|
static_configs:
|
||||||
|
- targets:
|
||||||
|
#- 'cyberbully:9290'
|
||||||
|
- 'king-albert:9290'
|
||||||
|
# - 'semyon-0x01:9290'
|
||||||
|
# - 'semyon-0x02:9290'
|
||||||
|
# - 'semyon-0x03:9290'
|
||||||
|
# - 'semyon-0x04:9290'
|
||||||
|
# - 'semyon-0x05:9290'
|
||||||
|
# - 'gpu-slut:9290'
|
||||||
|
|
||||||
|
# пиписа-экспортер
|
||||||
|
# # - job_name: 'vdk2ch-pipisa-exporter'
|
||||||
|
# # scheme: http
|
||||||
|
# # static_configs:
|
||||||
|
# # - targets:
|
||||||
|
# # - '192.168.0.55:9992'
|
||||||
|
# # relabel_configs:
|
||||||
|
# # - target_label: instance
|
||||||
|
# # replacement: 'cyberbully:9992'
|
||||||
|
# # - target_label: host
|
||||||
|
# # replacement: cyberbully
|
||||||
|
|
||||||
|
# пиписа-экспортер
|
||||||
|
# - job_name: 'vllm-exporter'
|
||||||
|
# scheme: http
|
||||||
|
# static_configs:
|
||||||
|
# - targets:
|
||||||
|
# - '192.168.0.4:8000'
|
||||||
|
# relabel_configs:
|
||||||
|
# - target_label: instance
|
||||||
|
# replacement: 'new-computer-home:8000'
|
||||||
|
# - target_label: host
|
||||||
|
# replacement: new-computer-home
|
||||||
|
|
||||||
|
|
||||||
|
#
|
||||||
|
# # - job_name: 'nginx-vts-metrics'
|
||||||
|
# # scheme: http
|
||||||
|
# # metrics_path: /status/format/prometheus
|
||||||
|
# # static_configs:
|
||||||
|
# # - targets:
|
||||||
|
# # - '192.168.0.55:9042'
|
||||||
|
# # relabel_configs:
|
||||||
|
# # - target_label: instance
|
||||||
|
# # replacement: 'cyberbully:9042'
|
||||||
|
# - target_label: host
|
||||||
|
# replacement: cyberbully
|
||||||
|
|
||||||
|
# шиндоус-экспортер поверх ноута через домашний вайфай
|
||||||
|
# # - job_name: 'i-programmed-my-home-computer'
|
||||||
|
# # scheme: http
|
||||||
|
# # static_configs:
|
||||||
|
# # - targets:
|
||||||
|
# # - '192.168.0.2:9182'
|
||||||
|
# # - '192.168.0.3:9182'
|
||||||
|
# # relabel_configs:
|
||||||
|
# # - source_labels: [__address__]
|
||||||
|
# # regex: "(192.168.0.2.+)"
|
||||||
|
# # target_label: instance
|
||||||
|
# # replacement: 'Desktop-O50pt4s:9182'
|
||||||
|
# # - source_labels: [__address__]
|
||||||
|
# # regex: "(192.168.0.2.+)"
|
||||||
|
# # target_label: host
|
||||||
|
# # replacement: Desktop-O50pt4s
|
||||||
|
# # - source_labels: [__address__]
|
||||||
|
# # regex: "(192.168.0.3.+)"
|
||||||
|
# # target_label: instance
|
||||||
|
# # replacement: 'Desktop-edov3u5:9182'
|
||||||
|
# # - source_labels: [__address__]
|
||||||
|
# # regex: "(192.168.0.3.+)"
|
||||||
|
# # target_label: host
|
||||||
|
# # replacement: Desktop-edov3u5
|
||||||
|
|
||||||
|
#
|
||||||
|
# # - job_name: 'nvidia-gpu-metrics'
|
||||||
|
# # scheme: http
|
||||||
|
# # static_configs:
|
||||||
|
# # - targets:
|
||||||
|
# # - '192.168.0.2:9835'
|
||||||
|
# # relabel_configs:
|
||||||
|
# # - target_label: instance
|
||||||
|
# # replacement: 'Desktop-O50pt4s:9835'
|
||||||
|
|
||||||
|
|
||||||
|
# # # личный твиттус
|
||||||
|
# # - job_name: 'pleroma'
|
||||||
|
# # metrics_path: /api/pleroma/app_metrics
|
||||||
|
# # scheme: https
|
||||||
|
# # static_configs:
|
||||||
|
# # - targets: ['social.vdk2ch.ru']
|
||||||
|
|
||||||
|
# хайпервиза
|
||||||
|
- job_name: 'proxmox'
|
||||||
|
metrics_path: /pve
|
||||||
|
static_configs:
|
||||||
|
- targets:
|
||||||
|
- 'king-albert.guaranteedstruggle.host:9221'
|
14
files/prometheus/rules.yaml
Normal file
14
files/prometheus/rules.yaml
Normal file
@ -0,0 +1,14 @@
|
|||||||
|
groups:
|
||||||
|
- name: node-exporter-rules
|
||||||
|
rules:
|
||||||
|
|
||||||
|
# CPU count
|
||||||
|
- record: instance:node_cpus:count
|
||||||
|
expr: count(node_cpu_seconds_total{mode="idle"}) without (cpu,mode)
|
||||||
|
|
||||||
|
#взято отсюда
|
||||||
|
# https://stackoverflow.com/questions/52480567/count-alerts-fired-by-prometheus
|
||||||
|
- name: alerts
|
||||||
|
rules:
|
||||||
|
- record: ALERTS_FOR_STATE:firing
|
||||||
|
expr: ALERTS_FOR_STATE and ignoring(alertstate) ALERTS{alertstate="firing"}
|
3
files/prometheus/web-config.yaml
Normal file
3
files/prometheus/web-config.yaml
Normal file
@ -0,0 +1,3 @@
|
|||||||
|
# tls_server_config:
|
||||||
|
# cert_file: /etc/prometheus/ssl/ .crt
|
||||||
|
# key_file: /etc/prometheus/ssl/ .key
|
6
playbooks/_common-setup.yml
Normal file
6
playbooks/_common-setup.yml
Normal file
@ -0,0 +1,6 @@
|
|||||||
|
#### TODO обе роли - пакаджесы и юзеры
|
||||||
|
---
|
||||||
|
- import_playbook: packages.yml
|
||||||
|
- import_playbook: resolvconf.yml
|
||||||
|
- import_playbook: users.yml
|
||||||
|
- import_playbook: exporters.yml
|
@ -1,21 +1,21 @@
|
|||||||
---
|
---
|
||||||
- name: node exporter!
|
- name: node exporter!
|
||||||
hosts: all
|
hosts: all:!lxc
|
||||||
gather_facts: yes
|
gather_facts: yes
|
||||||
become: yes
|
become: yes
|
||||||
roles:
|
roles:
|
||||||
#- role:
|
#- role:
|
||||||
#- prometheus.prometheus.ipmi_exporter
|
#- prometheus.prometheus.ipmi_exporter
|
||||||
- prometheus.prometheus.node_exporter
|
- prometheus.prometheus.node_exporter
|
||||||
#node_exporter_local_cache_path: "/tmp/node_exporter_cache"
|
#node_exporter_local_cache_path: "/tmp/node_exporter_cache"
|
||||||
- name: for hardware monitoring
|
- name: for hardware monitoring
|
||||||
hosts: physical_machines
|
hosts: king-albert.guaranteedstruggle.host
|
||||||
gather_facts: yes
|
gather_facts: yes
|
||||||
become: yes
|
become: yes
|
||||||
roles:
|
roles:
|
||||||
- role: prometheus.prometheus.ipmi_exporter
|
- role: prometheus.prometheus.ipmi_exporter
|
||||||
ipmi_exporter_system_user: root
|
ipmi_exporter_system_user: root
|
||||||
ipmi_exporter_version: "1.9.0"
|
ipmi_exporter_version: "1.9.0"
|
||||||
|
|
||||||
|
|
||||||
|
|
@ -1,59 +1,63 @@
|
|||||||
---
|
---
|
||||||
- name: packages
|
- name: packages
|
||||||
hosts: all
|
hosts: all
|
||||||
become: true
|
become: true
|
||||||
become_method: sudo
|
become_method: sudo
|
||||||
tasks:
|
tasks:
|
||||||
- name: Install the packages versions
|
- name: Install the packages versions
|
||||||
ansible.builtin.package:
|
ansible.builtin.package:
|
||||||
name:
|
name:
|
||||||
- htop
|
- htop
|
||||||
#- iperf3
|
#- iperf3
|
||||||
- git
|
- git
|
||||||
- curl
|
- curl
|
||||||
- net-tools
|
- net-tools
|
||||||
- vim
|
- vim
|
||||||
- sudo
|
- sudo
|
||||||
#state: latest
|
- tree
|
||||||
state: present
|
- jq
|
||||||
|
- rsync
|
||||||
- name: check-stuff packages
|
|
||||||
hosts: net-stuff
|
#state: latest
|
||||||
become: true
|
state: present
|
||||||
become_method: sudo
|
|
||||||
tasks:
|
- name: check-stuff packages
|
||||||
- name: Install the packages versions
|
hosts: net-stuff
|
||||||
ansible.builtin.package:
|
become: true
|
||||||
name:
|
become_method: sudo
|
||||||
- iperf3
|
tasks:
|
||||||
#state: latest
|
- name: Install the packages versions
|
||||||
state: present
|
ansible.builtin.package:
|
||||||
|
name:
|
||||||
|
- iperf3
|
||||||
- name: ceph packages
|
#state: latest
|
||||||
hosts: semyons
|
state: present
|
||||||
become: true
|
|
||||||
become_method: sudo
|
|
||||||
tasks:
|
- name: ceph packages
|
||||||
- name: Install the packages versions
|
hosts: semyons
|
||||||
ansible.builtin.package:
|
become: true
|
||||||
name:
|
become_method: sudo
|
||||||
- ceph
|
tasks:
|
||||||
#state: latest
|
- name: Install the packages versions
|
||||||
state: present
|
ansible.builtin.package:
|
||||||
|
name:
|
||||||
|
- ceph
|
||||||
|
#state: latest
|
||||||
- name: iptables
|
state: present
|
||||||
hosts:
|
|
||||||
- kubernetes
|
|
||||||
- docker
|
|
||||||
become: true
|
- name: iptables
|
||||||
become_method: sudo
|
hosts:
|
||||||
tasks:
|
- kubernetes
|
||||||
- name: Install the packages versions
|
- docker
|
||||||
ansible.builtin.package:
|
become: true
|
||||||
name:
|
become_method: sudo
|
||||||
- iptables
|
tasks:
|
||||||
#state: latest
|
- name: Install the packages versions
|
||||||
|
ansible.builtin.package:
|
||||||
|
name:
|
||||||
|
- iptables
|
||||||
|
#state: latest
|
||||||
state: present
|
state: present
|
@ -1,20 +1,20 @@
|
|||||||
---
|
---
|
||||||
- name: pingu
|
- name: pingu
|
||||||
hosts: all
|
hosts: all
|
||||||
# remote_user: root
|
# remote_user: root
|
||||||
|
|
||||||
gather_facts: no
|
gather_facts: no
|
||||||
become: yes
|
become: no # yes
|
||||||
tasks:
|
tasks:
|
||||||
- name: pingu!
|
- name: pingu!
|
||||||
ansible.builtin.ping:
|
ansible.builtin.ping:
|
||||||
|
|
||||||
# - name: Send notify to Telegram
|
# - name: Send notify to Telegram
|
||||||
# community.general.telegram:
|
# community.general.telegram:
|
||||||
# token: '6472915685:AAHPvgrQoqG7DxtfbnHWPe3Lfild-CGJ1j8'
|
# token: '6472915685:AAHPvgrQoqG7DxtfbnHWPe3Lfild-CGJ1j8'
|
||||||
# api_args:
|
# api_args:
|
||||||
# chat_id: -4023350326
|
# chat_id: -4023350326
|
||||||
# parse_mode: "markdown"
|
# parse_mode: "markdown"
|
||||||
# text: "Your precious application has been deployed: https://example.com"
|
# text: "Your precious application has been deployed: https://example.com"
|
||||||
# disable_web_page_preview: true
|
# disable_web_page_preview: true
|
||||||
# disable_notification: true
|
# disable_notification: true
|
65
playbooks/resolvconf.yml
Normal file
65
playbooks/resolvconf.yml
Normal file
@ -0,0 +1,65 @@
|
|||||||
|
---
|
||||||
|
- name: make resolv.conf work fine
|
||||||
|
hosts: all
|
||||||
|
become: yes
|
||||||
|
tasks:
|
||||||
|
- name: Install the packages versions
|
||||||
|
ansible.builtin.package:
|
||||||
|
name:
|
||||||
|
- systemd-resolved
|
||||||
|
state: present
|
||||||
|
- name: Make small file
|
||||||
|
register: systemd_resolved_conf
|
||||||
|
copy:
|
||||||
|
dest: "/etc/systemd/resolved.conf"
|
||||||
|
content: |
|
||||||
|
# This file is part of systemd.
|
||||||
|
#
|
||||||
|
# systemd is free software; you can redistribute it and/or modify it under the
|
||||||
|
# terms of the GNU Lesser General Public License as published by the Free
|
||||||
|
# Software Foundation; either version 2.1 of the License, or (at your option)
|
||||||
|
# any later version.
|
||||||
|
#
|
||||||
|
# Entries in this file show the compile time defaults. Local configuration
|
||||||
|
# should be created by either modifying this file, or by creating "drop-ins" in
|
||||||
|
# the resolved.conf.d/ subdirectory. The latter is generally recommended.
|
||||||
|
# Defaults can be restored by simply deleting this file and all drop-ins.
|
||||||
|
#
|
||||||
|
# Use 'systemd-analyze cat-config systemd/resolved.conf' to display the full config.
|
||||||
|
# See resolved.conf(5) for details.
|
||||||
|
|
||||||
|
[Resolve]
|
||||||
|
# Some examples of DNS servers which may be used for DNS= and FallbackDNS=:
|
||||||
|
# Cloudflare: 1.1.1.1#cloudflare-dns.com 1.0.0.1#cloudflare-dns.com 2606:4700:4700::1111#cloudflare-dns.com 2606:4700:4700::1001#cloudflare-dns.com
|
||||||
|
# Google: 8.8.8.8#dns.google 8.8.4.4#dns.google 2001:4860:4860::8888#dns.google 2001:4860:4860::8844#dns.google
|
||||||
|
# Quad9: 9.9.9.9#dns.quad9.net 149.112.112.112#dns.quad9.net 2620:fe::fe#dns.quad9.net 2620:fe::9#dns.quad9.net
|
||||||
|
DNS=192.168.0.88
|
||||||
|
FallbackDNS=192.168.0.1
|
||||||
|
Domains=guaranteedstruggle.host,just-for-me.internal
|
||||||
|
#DNSSEC=no
|
||||||
|
#DNSOverTLS=no
|
||||||
|
#MulticastDNS=yes
|
||||||
|
#LLMNR=yes
|
||||||
|
#Cache=yes
|
||||||
|
#CacheFromLocalhost=no
|
||||||
|
DNSStubListener=yes
|
||||||
|
#DNSStubListenerExtra=
|
||||||
|
#ReadEtcHosts=yes
|
||||||
|
#ResolveUnicastSingleLabel=no
|
||||||
|
|
||||||
|
|
||||||
|
- name: Make fix for resolv-conf rewriting
|
||||||
|
copy:
|
||||||
|
dest: "/etc/dhcp/dhclient-enter-hooks.d/nodnsupdate"
|
||||||
|
content: |
|
||||||
|
#!/bin/sh
|
||||||
|
make_resolv_conf(){
|
||||||
|
:
|
||||||
|
}
|
||||||
|
mode: +x
|
||||||
|
|
||||||
|
- name: restart service
|
||||||
|
service:
|
||||||
|
name: systemd-resolved
|
||||||
|
state: restarted
|
||||||
|
when: systemd_resolved_conf.changed
|
@ -1,19 +1,19 @@
|
|||||||
---
|
---
|
||||||
- name: run it
|
- name: run it
|
||||||
hosts: puppets
|
hosts: puppets
|
||||||
gather_facts: no
|
gather_facts: no
|
||||||
become: yes
|
become: yes
|
||||||
tasks:
|
tasks:
|
||||||
- name: run 'em
|
- name: run 'em
|
||||||
community.general.puppet:
|
community.general.puppet:
|
||||||
summarize: true
|
summarize: true
|
||||||
|
|
||||||
- name: run it 2
|
- name: run it 2
|
||||||
hosts: samehost-zero.guaranteedstruggle.host
|
hosts: samehost-zero.guaranteedstruggle.host
|
||||||
gather_facts: no
|
gather_facts: no
|
||||||
become: yes
|
become: yes
|
||||||
tasks:
|
tasks:
|
||||||
- name: run this
|
- name: run this
|
||||||
community.general.puppet:
|
community.general.puppet:
|
||||||
summarize: yes
|
summarize: yes
|
||||||
certname: samehost-zero.guaranteedstuggle.host
|
certname: samehost-zero.guaranteedstuggle.host
|
||||||
|
133
playbooks/software/prometheus.yml
Normal file
133
playbooks/software/prometheus.yml
Normal file
@ -0,0 +1,133 @@
|
|||||||
|
---
|
||||||
|
- name: prom
|
||||||
|
hosts:
|
||||||
|
- prometheus.guaranteedstruggle.host
|
||||||
|
vars:
|
||||||
|
prom_version: '2.55.1'
|
||||||
|
gather_facts: yes
|
||||||
|
become: yes
|
||||||
|
tasks:
|
||||||
|
|
||||||
|
- name: Ensure group "prometheus" exists
|
||||||
|
ansible.builtin.group:
|
||||||
|
name: prometheus
|
||||||
|
state: present
|
||||||
|
- name: Add user "prometheus"
|
||||||
|
ansible.builtin.user:
|
||||||
|
name: prometheus
|
||||||
|
groups: prometheus
|
||||||
|
shell: /sbin/nologin
|
||||||
|
create_home: no
|
||||||
|
append: yes
|
||||||
|
comment: "prometheus nologin User"
|
||||||
|
state: present
|
||||||
|
|
||||||
|
- name: Creates directory
|
||||||
|
ansible.builtin.file:
|
||||||
|
path: /etc/prometheus
|
||||||
|
state: directory
|
||||||
|
group: prometheus
|
||||||
|
owner: prometheus
|
||||||
|
- name: Creates directory
|
||||||
|
ansible.builtin.file:
|
||||||
|
path: /usr/share/prometheus
|
||||||
|
state: directory
|
||||||
|
group: prometheus
|
||||||
|
owner: prometheus
|
||||||
|
- name: Creates directory
|
||||||
|
ansible.builtin.file:
|
||||||
|
path: /prometheus-data
|
||||||
|
state: directory
|
||||||
|
group: prometheus
|
||||||
|
owner: prometheus
|
||||||
|
|
||||||
|
- name: Unarchive a file that needs to be downloaded (added in 2.0)
|
||||||
|
ansible.builtin.unarchive:
|
||||||
|
src: https://github.com/prometheus/prometheus/releases/download/v{{prom_version}}/prometheus-{{prom_version}}.linux-amd64.tar.gz
|
||||||
|
dest: /usr/share/prometheus
|
||||||
|
creates: /usr/share/prometheus/prometheus-{{prom_version}}.linux-amd64
|
||||||
|
remote_src: yes
|
||||||
|
|
||||||
|
|
||||||
|
- name: Create a symbolic link
|
||||||
|
ansible.builtin.file:
|
||||||
|
src: /usr/share/prometheus/prometheus-{{prom_version}}.linux-amd64/prometheus
|
||||||
|
dest: /usr/sbin/prometheus
|
||||||
|
owner: prometheus
|
||||||
|
group: prometheus
|
||||||
|
state: link
|
||||||
|
- name: Create a symbolic link
|
||||||
|
ansible.builtin.file:
|
||||||
|
src: /usr/share/prometheus/prometheus-{{prom_version}}.linux-amd64/promtool
|
||||||
|
dest: /usr/sbin/promtool
|
||||||
|
owner: prometheus
|
||||||
|
group: prometheus
|
||||||
|
state: link
|
||||||
|
|
||||||
|
- name: Copy prometheus.yaml
|
||||||
|
register: prometheus_config_file
|
||||||
|
copy:
|
||||||
|
src: ../../files/prometheus/prometheus.yaml
|
||||||
|
dest: /etc/prometheus/prometheus.yaml
|
||||||
|
notify:
|
||||||
|
- reload prometheus
|
||||||
|
- name: Copy web-config
|
||||||
|
register: web_config_file
|
||||||
|
copy:
|
||||||
|
src: ../../files/prometheus/web-config.yaml
|
||||||
|
dest: /etc/prometheus/web-config.yaml
|
||||||
|
notify:
|
||||||
|
- reload prometheus
|
||||||
|
- name: Copy rules.yaml
|
||||||
|
register: rules_file
|
||||||
|
copy:
|
||||||
|
src: ../../files/prometheus/rules.yaml
|
||||||
|
dest: /etc/prometheus/rules.yaml
|
||||||
|
notify:
|
||||||
|
- reload prometheus
|
||||||
|
- name: Copy alerts.yaml
|
||||||
|
register: alerts_file
|
||||||
|
copy:
|
||||||
|
src: ../../files/prometheus/alerts.yaml
|
||||||
|
dest: /etc/prometheus/alerts.yaml
|
||||||
|
notify:
|
||||||
|
- reload prometheus
|
||||||
|
|
||||||
|
|
||||||
|
- name: Copy prometheus.service
|
||||||
|
register: prometheus_service_file
|
||||||
|
copy:
|
||||||
|
src: ../../files/prometheus/prometheus.service
|
||||||
|
dest: /etc/systemd/system/prometheus.service
|
||||||
|
|
||||||
|
|
||||||
|
- name: ensure service
|
||||||
|
ansible.builtin.systemd_service:
|
||||||
|
name: prometheus
|
||||||
|
state: started
|
||||||
|
enabled: true
|
||||||
|
|
||||||
|
# - name: reload service
|
||||||
|
# ansible.builtin.systemd_service:
|
||||||
|
# name: prometheus
|
||||||
|
# state: reloaded
|
||||||
|
# when:
|
||||||
|
# - rules_file.changed
|
||||||
|
# - alerts_file.changed
|
||||||
|
# - prometheus_service_file.changed
|
||||||
|
# - web_config_file.changed
|
||||||
|
|
||||||
|
- name: Just force systemd to reread configs
|
||||||
|
ansible.builtin.systemd_service:
|
||||||
|
daemon_reload: true
|
||||||
|
when: prometheus_service_file.changed
|
||||||
|
|
||||||
|
|
||||||
|
|
||||||
|
handlers:
|
||||||
|
- name: reload prometheus
|
||||||
|
ansible.builtin.systemd_service:
|
||||||
|
name: prometheus
|
||||||
|
state: reloaded
|
||||||
|
|
||||||
|
#### TODO как откатывать неудачную проверку promtool'ом ?
|
@ -1,28 +1,28 @@
|
|||||||
---
|
---
|
||||||
- name: users
|
- name: users
|
||||||
hosts: all
|
hosts: all
|
||||||
become: yes
|
become: yes
|
||||||
tasks:
|
tasks:
|
||||||
- name: make-me
|
- name: make-me
|
||||||
ansible.builtin.user:
|
ansible.builtin.user:
|
||||||
name: hogweed1
|
name: hogweed1
|
||||||
shell: /bin/bash
|
shell: /bin/bash
|
||||||
create_home: yes
|
create_home: yes
|
||||||
# python -c 'import crypt; print crypt.crypt("This is my Password", "$1$SomeSalt$")'
|
# python -c 'import crypt; print crypt.crypt("This is my Password", "$1$SomeSalt$")'
|
||||||
password: $6$KHOI$0Dq28VBwgtNFvfbQQ.4s6koctN6e5ZWRRBhWp0lkKKiel8y2qhc89E0CY479b4EX5.CnfDhS8rlaOATk/rXLu0
|
password: $6$KHOI$0Dq28VBwgtNFvfbQQ.4s6koctN6e5ZWRRBhWp0lkKKiel8y2qhc89E0CY479b4EX5.CnfDhS8rlaOATk/rXLu0
|
||||||
|
|
||||||
- name: Set authorized key taken from file
|
- name: Set authorized key taken from file
|
||||||
ansible.posix.authorized_key:
|
ansible.posix.authorized_key:
|
||||||
user: hogweed1
|
user: hogweed1
|
||||||
state: present
|
state: present
|
||||||
key: "ssh-ed25519 AAAAC3NzaC1lZDI1NTE5AAAAINHTnXy693g6ivEJ6D5VrTBnjEjIe/a00cU7/9Hb79Zf hogweed1@vdk2ch.ru"
|
key: "ssh-ed25519 AAAAC3NzaC1lZDI1NTE5AAAAINHTnXy693g6ivEJ6D5VrTBnjEjIe/a00cU7/9Hb79Zf hogweed1@vdk2ch.ru"
|
||||||
|
|
||||||
|
|
||||||
|
|
||||||
- name: Make users passwordless for sudo in group wheel
|
- name: Make users passwordless for sudo in group wheel
|
||||||
lineinfile:
|
lineinfile:
|
||||||
path: /etc/sudoers
|
path: /etc/sudoers
|
||||||
state: present
|
state: present
|
||||||
regexp: '^%hogweed1'
|
regexp: '^%hogweed1'
|
||||||
line: '%hogweed1 ALL=(ALL) NOPASSWD: ALL'
|
line: '%hogweed1 ALL=(ALL) NOPASSWD: ALL'
|
||||||
validate: 'visudo -cf %s'
|
validate: 'visudo -cf %s'
|
@ -1,210 +1,210 @@
|
|||||||
from __future__ import (absolute_import, division, print_function)
|
from __future__ import (absolute_import, division, print_function)
|
||||||
__metaclass__ = type
|
__metaclass__ = type
|
||||||
|
|
||||||
DOCUMENTATION = '''
|
DOCUMENTATION = '''
|
||||||
callback: telegram
|
callback: telegram
|
||||||
callback_type: notification
|
callback_type: notification
|
||||||
requirements:
|
requirements:
|
||||||
- whitelist in configuration
|
- whitelist in configuration
|
||||||
- telebot (pip install pyTelegramBotApi)
|
- telebot (pip install pyTelegramBotApi)
|
||||||
- prettytable (pip install prettytable)
|
- prettytable (pip install prettytable)
|
||||||
- latest requests (pip install requests --upgrade)
|
- latest requests (pip install requests --upgrade)
|
||||||
short_description: Sends play events to a telegram channel
|
short_description: Sends play events to a telegram channel
|
||||||
version_added: "2.1"
|
version_added: "2.1"
|
||||||
description:
|
description:
|
||||||
- This is an ansible callback plugin that sends status updates to a telegram channel during playbook execution.
|
- This is an ansible callback plugin that sends status updates to a telegram channel during playbook execution.
|
||||||
- Before 2.4 only environment variables were available for configuring this plugin
|
- Before 2.4 only environment variables were available for configuring this plugin
|
||||||
options:
|
options:
|
||||||
tg_token:
|
tg_token:
|
||||||
required: True
|
required: True
|
||||||
description: telegram bot token
|
description: telegram bot token
|
||||||
env:
|
env:
|
||||||
- name: TG_TOKEN
|
- name: TG_TOKEN
|
||||||
ini:
|
ini:
|
||||||
- section: callback_telegram
|
- section: callback_telegram
|
||||||
key: tg_token
|
key: tg_token
|
||||||
tg_chat_id:
|
tg_chat_id:
|
||||||
required: True
|
required: True
|
||||||
description: telegram chat id to post in.
|
description: telegram chat id to post in.
|
||||||
env:
|
env:
|
||||||
- name: TG_CHAT_ID
|
- name: TG_CHAT_ID
|
||||||
ini:
|
ini:
|
||||||
- section: callback_telegram
|
- section: callback_telegram
|
||||||
key: tg_chat_id
|
key: tg_chat_id
|
||||||
socks5_uri:
|
socks5_uri:
|
||||||
description: socks5 proxy uri to bypass rkn's restarictions
|
description: socks5 proxy uri to bypass rkn's restarictions
|
||||||
env:
|
env:
|
||||||
- name: SOCKS5_URI
|
- name: SOCKS5_URI
|
||||||
ini:
|
ini:
|
||||||
- section: callback_telegram
|
- section: callback_telegram
|
||||||
key: socks5_uri
|
key: socks5_uri
|
||||||
'''
|
'''
|
||||||
|
|
||||||
import os
|
import os
|
||||||
from datetime import datetime
|
from datetime import datetime
|
||||||
|
|
||||||
from ansible import context
|
from ansible import context
|
||||||
from ansible.module_utils._text import to_text
|
from ansible.module_utils._text import to_text
|
||||||
from ansible.module_utils.urls import open_url
|
from ansible.module_utils.urls import open_url
|
||||||
from ansible.plugins.callback import CallbackBase
|
from ansible.plugins.callback import CallbackBase
|
||||||
|
|
||||||
try:
|
try:
|
||||||
import telebot
|
import telebot
|
||||||
from telebot import apihelper
|
from telebot import apihelper
|
||||||
HAS_TELEBOT = True
|
HAS_TELEBOT = True
|
||||||
except ImportError:
|
except ImportError:
|
||||||
HAS_TELEBOT = False
|
HAS_TELEBOT = False
|
||||||
|
|
||||||
try:
|
try:
|
||||||
import prettytable
|
import prettytable
|
||||||
HAS_PRETTYTABLE = True
|
HAS_PRETTYTABLE = True
|
||||||
except ImportError:
|
except ImportError:
|
||||||
HAS_PRETTYTABLE = False
|
HAS_PRETTYTABLE = False
|
||||||
|
|
||||||
class CallbackModule(CallbackBase):
|
class CallbackModule(CallbackBase):
|
||||||
"""This is an ansible callback plugin that sends status
|
"""This is an ansible callback plugin that sends status
|
||||||
updates to a telegram channel during playbook execution.
|
updates to a telegram channel during playbook execution.
|
||||||
"""
|
"""
|
||||||
CALLBACK_VERSION = 2.0
|
CALLBACK_VERSION = 2.0
|
||||||
CALLBACK_TYPE = 'notification'
|
CALLBACK_TYPE = 'notification'
|
||||||
CALLBACK_NAME = 'telegram'
|
CALLBACK_NAME = 'telegram'
|
||||||
CALLBACK_NEEDS_WHITELIST = True
|
CALLBACK_NEEDS_WHITELIST = True
|
||||||
|
|
||||||
def __init__(self, display=None):
|
def __init__(self, display=None):
|
||||||
|
|
||||||
super(CallbackModule, self).__init__(display=display)
|
super(CallbackModule, self).__init__(display=display)
|
||||||
|
|
||||||
if not HAS_TELEBOT:
|
if not HAS_TELEBOT:
|
||||||
self.disabled = True
|
self.disabled = True
|
||||||
self._display.warning('The `telebot` python module is not '
|
self._display.warning('The `telebot` python module is not '
|
||||||
'installed. Disabling the Slack callback '
|
'installed. Disabling the Slack callback '
|
||||||
'plugin.')
|
'plugin.')
|
||||||
|
|
||||||
if not HAS_PRETTYTABLE:
|
if not HAS_PRETTYTABLE:
|
||||||
self.disabled = True
|
self.disabled = True
|
||||||
self._display.warning('The `prettytable` python module is not '
|
self._display.warning('The `prettytable` python module is not '
|
||||||
'installed. Disabling the Slack callback '
|
'installed. Disabling the Slack callback '
|
||||||
'plugin.')
|
'plugin.')
|
||||||
|
|
||||||
self.playbook_name = None
|
self.playbook_name = None
|
||||||
self.play = None
|
self.play = None
|
||||||
self.now = datetime.now().strftime("%Y-%m-%d %H:%M:%S")
|
self.now = datetime.now().strftime("%Y-%m-%d %H:%M:%S")
|
||||||
|
|
||||||
def set_options(self, task_keys=None, var_options=None, direct=None):
|
def set_options(self, task_keys=None, var_options=None, direct=None):
|
||||||
|
|
||||||
super(CallbackModule, self).set_options(task_keys=task_keys, var_options=var_options, direct=direct)
|
super(CallbackModule, self).set_options(task_keys=task_keys, var_options=var_options, direct=direct)
|
||||||
|
|
||||||
self.tg_token = self.get_option('tg_token')
|
self.tg_token = self.get_option('tg_token')
|
||||||
self.tg_chat_id = self.get_option('tg_chat_id')
|
self.tg_chat_id = self.get_option('tg_chat_id')
|
||||||
self.socks5_uri = self.get_option('socks5_uri')
|
self.socks5_uri = self.get_option('socks5_uri')
|
||||||
|
|
||||||
if self.tg_token is None:
|
if self.tg_token is None:
|
||||||
self.disabled = True
|
self.disabled = True
|
||||||
self._display.warning('tg_token was not provided. The '
|
self._display.warning('tg_token was not provided. The '
|
||||||
'tg_token can be provided using '
|
'tg_token can be provided using '
|
||||||
'the `TG_TOKEN` environment '
|
'the `TG_TOKEN` environment '
|
||||||
'variable.')
|
'variable.')
|
||||||
|
|
||||||
if self.tg_chat_id is None:
|
if self.tg_chat_id is None:
|
||||||
self.disabled = True
|
self.disabled = True
|
||||||
self._display.warning('tg_chat_id was not provided. The '
|
self._display.warning('tg_chat_id was not provided. The '
|
||||||
'tg_chat_id can be provided using '
|
'tg_chat_id can be provided using '
|
||||||
'the `TG_CHAT_ID` environment '
|
'the `TG_CHAT_ID` environment '
|
||||||
'variable.')
|
'variable.')
|
||||||
|
|
||||||
def send_msg(self, msg):
|
def send_msg(self, msg):
|
||||||
if self.socks5_uri is not None:
|
if self.socks5_uri is not None:
|
||||||
apihelper.proxy = {'https': self.socks5_uri}
|
apihelper.proxy = {'https': self.socks5_uri}
|
||||||
# print(self.tg_token)
|
# print(self.tg_token)
|
||||||
bot = telebot.TeleBot(self.tg_token)
|
bot = telebot.TeleBot(self.tg_token)
|
||||||
# print(bot)
|
# print(bot)
|
||||||
# print(self.tg_chat_id)
|
# print(self.tg_chat_id)
|
||||||
# print(msg)
|
# print(msg)
|
||||||
# bot.send_message(self.tg_chat_id, 'Hi! I\'m a Bot!')
|
# bot.send_message(self.tg_chat_id, 'Hi! I\'m a Bot!')
|
||||||
bot.send_message(self.tg_chat_id, msg, parse_mode='HTML')
|
bot.send_message(self.tg_chat_id, msg, parse_mode='HTML')
|
||||||
|
|
||||||
def v2_playbook_on_start(self, playbook):
|
def v2_playbook_on_start(self, playbook):
|
||||||
|
|
||||||
self.playbook_name = os.path.abspath(playbook._file_name)
|
self.playbook_name = os.path.abspath(playbook._file_name)
|
||||||
|
|
||||||
def v2_playbook_on_play_start(self, play):
|
def v2_playbook_on_play_start(self, play):
|
||||||
self.play = play
|
self.play = play
|
||||||
|
|
||||||
title = [
|
title = [
|
||||||
'<u><b>Ansible:</b></u> <b>STARTED</b> ⚙️'
|
'<u><b>Ansible:</b></u> <b>STARTED</b> ⚙️'
|
||||||
]
|
]
|
||||||
|
|
||||||
msg_items = [' '.join(title)]
|
msg_items = [' '.join(title)]
|
||||||
msg_items.append('\n time: ' + '<code>' + str(self.now) + '</code>')
|
msg_items.append('\n time: ' + '<code>' + str(self.now) + '</code>')
|
||||||
msg_items.append('playbook: ' + '<code>' + self.playbook_name + '</code>')
|
msg_items.append('playbook: ' + '<code>' + self.playbook_name + '</code>')
|
||||||
msg_items.append(' hosts:')
|
msg_items.append(' hosts:')
|
||||||
for host in play.hosts:
|
for host in play.hosts:
|
||||||
msg_items.append('<code> - ' + host + '</code>')
|
msg_items.append('<code> - ' + host + '</code>')
|
||||||
msg_items.append(' tags:')
|
msg_items.append(' tags:')
|
||||||
for tag in play.only_tags:
|
for tag in play.only_tags:
|
||||||
msg_items.append('<code> - ' + tag + '</code>')
|
msg_items.append('<code> - ' + tag + '</code>')
|
||||||
msg = '\n'.join(msg_items)
|
msg = '\n'.join(msg_items)
|
||||||
self.send_msg(msg=msg)
|
self.send_msg(msg=msg)
|
||||||
|
|
||||||
def v2_runner_on_failed(self, result, ignore_errors=False):
|
def v2_runner_on_failed(self, result, ignore_errors=False):
|
||||||
|
|
||||||
msg = []
|
msg = []
|
||||||
title = [
|
title = [
|
||||||
'<u><b>Ansible:</b></u> <b>FAILED ❌</b>'
|
'<u><b>Ansible:</b></u> <b>FAILED ❌</b>'
|
||||||
]
|
]
|
||||||
msg_items = [' '.join(title)]
|
msg_items = [' '.join(title)]
|
||||||
msg_items.append('\n time: ' + '<code>' + str(self.now) + '</code>')
|
msg_items.append('\n time: ' + '<code>' + str(self.now) + '</code>')
|
||||||
msg_items.append('playbook: ' + '<code>' + self.playbook_name + '</code>')
|
msg_items.append('playbook: ' + '<code>' + self.playbook_name + '</code>')
|
||||||
msg_items.append(' host: ' + '<code>' + result._host.get_name() + '</code>')
|
msg_items.append(' host: ' + '<code>' + result._host.get_name() + '</code>')
|
||||||
msg_items.append(' stderr: ' + '<code>' + result._result['stderr'] + '</code>')
|
msg_items.append(' stderr: ' + '<code>' + result._result['stderr'] + '</code>')
|
||||||
|
|
||||||
msg = '\n'.join(msg_items)
|
msg = '\n'.join(msg_items)
|
||||||
|
|
||||||
self.send_msg(msg=msg)
|
self.send_msg(msg=msg)
|
||||||
|
|
||||||
def v2_playbook_on_stats(self, stats):
|
def v2_playbook_on_stats(self, stats):
|
||||||
"""Display info about playbook statistics"""
|
"""Display info about playbook statistics"""
|
||||||
|
|
||||||
hosts = sorted(stats.processed.keys())
|
hosts = sorted(stats.processed.keys())
|
||||||
|
|
||||||
t = prettytable.PrettyTable(['Host and state'] )
|
t = prettytable.PrettyTable(['Host and state'] )
|
||||||
#, 'Ok', 'Changed', 'Unreachable',
|
#, 'Ok', 'Changed', 'Unreachable',
|
||||||
# 'Failures', 'Rescued', 'Ignored'])
|
# 'Failures', 'Rescued', 'Ignored'])
|
||||||
|
|
||||||
failures = False
|
failures = False
|
||||||
unreachable = False
|
unreachable = False
|
||||||
|
|
||||||
for h in hosts:
|
for h in hosts:
|
||||||
s = stats.summarize(h)
|
s = stats.summarize(h)
|
||||||
|
|
||||||
if s['failures'] > 0:
|
if s['failures'] > 0:
|
||||||
failures = True
|
failures = True
|
||||||
if s['unreachable'] > 0:
|
if s['unreachable'] > 0:
|
||||||
unreachable = True
|
unreachable = True
|
||||||
|
|
||||||
print(s)
|
print(s)
|
||||||
t.add_row([h.replace('.guaranteedstruggle.host','')] )
|
t.add_row([h.replace('.guaranteedstruggle.host','')] )
|
||||||
print([h.replace('.guaranteedstruggle.host','')])
|
print([h.replace('.guaranteedstruggle.host','')])
|
||||||
print(', '.join([ str(s[k]) for k in ['ok', 'changed', 'unreachable',
|
print(', '.join([ str(s[k]) for k in ['ok', 'changed', 'unreachable',
|
||||||
'failures', 'rescued', 'ignored']]))
|
'failures', 'rescued', 'ignored']]))
|
||||||
t.add_row( ["[" + ', '.join([ str(s[k]) for k in ['ok', 'changed', 'unreachable',
|
t.add_row( ["[" + ', '.join([ str(s[k]) for k in ['ok', 'changed', 'unreachable',
|
||||||
'failures', 'rescued', 'ignored']]) + "]"])
|
'failures', 'rescued', 'ignored']]) + "]"])
|
||||||
|
|
||||||
msg = []
|
msg = []
|
||||||
title = '<u><b>Ansible:</b></u> <b>ENDED</b>'
|
title = '<u><b>Ansible:</b></u> <b>ENDED</b>'
|
||||||
if failures or unreachable:
|
if failures or unreachable:
|
||||||
msg_items = [
|
msg_items = [
|
||||||
title + ' ❌'
|
title + ' ❌'
|
||||||
]
|
]
|
||||||
else:
|
else:
|
||||||
msg_items = [
|
msg_items = [
|
||||||
title + ' ✅'
|
title + ' ✅'
|
||||||
]
|
]
|
||||||
msg_items.append('\n time: ' + '<code>' + str(self.now) + '</code>')
|
msg_items.append('\n time: ' + '<code>' + str(self.now) + '</code>')
|
||||||
msg_items.append('playbook: ' + '<code>' + self.playbook_name + '</code>')
|
msg_items.append('playbook: ' + '<code>' + self.playbook_name + '</code>')
|
||||||
msg_items.append('<code>\n%s\n</code>' % t)
|
msg_items.append('<code>\n%s\n</code>' % t)
|
||||||
msg_items.append('<code>' + 'ok,chg,unr,fail,res,ign' + '</code>')
|
msg_items.append('<code>' + 'ok,chg,unr,fail,res,ign' + '</code>')
|
||||||
|
|
||||||
msg = '\n'.join(msg_items)
|
msg = '\n'.join(msg_items)
|
||||||
|
|
||||||
self.send_msg(msg=msg)
|
self.send_msg(msg=msg)
|
@ -1,9 +1,9 @@
|
|||||||
---
|
---
|
||||||
collections:
|
collections:
|
||||||
- name: community.general
|
- name: community.general
|
||||||
version: 9.5.0
|
version: 9.5.0
|
||||||
- name: ansible.utils
|
- name: ansible.utils
|
||||||
version: 4.1.0
|
version: 4.1.0
|
||||||
|
|
||||||
- name: prometheus.prometheus
|
- name: prometheus.prometheus
|
||||||
version: 0.18.0
|
version: 0.22.0
|
Loading…
x
Reference in New Issue
Block a user