First release

This commit is contained in:
Ricardo Hernandez 2016-10-04 16:03:07 +01:00
commit 702998db40
46 changed files with 1238 additions and 0 deletions

5
.gitignore vendored Normal file
View File

@ -0,0 +1,5 @@
.vagrant
.tox
.cache
__pycache__
*~

5
AUTHORS Normal file
View File

@ -0,0 +1,5 @@
=======
Authors
=======
* Ricardo Hernandez (richerve)

7
CHANGELOG.rst Normal file
View File

@ -0,0 +1,7 @@
prometheus formula
==================
201610 (2016-10-04)
-------------------
- First release

9
FORMULA Normal file
View File

@ -0,0 +1,9 @@
name: prometheus
os: Ubuntu
os_family: Debian
version: 201610
release: 1
summary: Prometheus saltstack formula
description: Prometheus saltstack formula
top_level_dir: prometheus
recommended:

14
LICENSE Normal file
View File

@ -0,0 +1,14 @@
Copyright (c) 2016 Ricardo Hernandez
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.

46
README.rst Normal file
View File

@ -0,0 +1,46 @@
============================
Prometheus saltstack formula
============================
.. note::
See the full `Salt Formulas installation and usage instructions
<http://docs.saltstack.com/en/latest/topics/development/conventions/formulas.html>`_.
Features
========
- Install and configure prometheus server, node_exporter and alertmanager
Compatibility
=============
- prometheus >= 1.0
- saltstack = 2016.3.3
Available states
================
.. contents::
:local:
``prometheus.server``
``prometheus.exportes.node``
``prometheus.alertmanager``
Running
=======
$ salt node state.apply prometheus
Ideas and future development
============================
Template
========
This formula was created from a cookiecutter template.
See https://github.com/richerve/saltstack-formula-cookiecutter.

1
VERSION Normal file
View File

@ -0,0 +1 @@
201609

66
pillar.example Normal file
View File

@ -0,0 +1,66 @@
prometheus:
server:
version: 1.1.3.linux-amd64
install_dir: /opt
source: https://github.com/prometheus/prometheus/releases/download/v1.1.3/prometheus-1.1.3.linux-amd64.tar.gz
source_hash: md5=839293afa9bfce47c476a0bdfd01a780
args:
config_file: /etc/prometheus/prometheus.yml
storage:
local_path: /var/lib/prometheus/metrics
config:
global:
scrape_interval: 15s
evaluation_interval: 15s
scrape_configs:
- job_name: 'prometheus'
scrape_interval: 5s
static_configs:
- targets: ['localhost:9090']
- job_name: 'node'
scrape_interval: 5s
static_configs:
- targets: ['localhost:9100']
alertmanager:
version: 0.4.2.linux-amd64
install_dir: /opt
source: https://github.com/prometheus/alertmanager/releases/download/v0.4.2/alertmanager-0.4.2.linux-amd64.tar.gz
source_hash: md5=715878f14ab091769c989ecdb2db9f15
args:
config_file: /etc/alertmanager/alertmanager.yml
storage:
path: /var/lib/alertmanager
config:
global:
smtp_smarthost: "localhost:25"
smtp_from: 'alertmanager@example.org'
smtp_auth_username: 'alertmanager'
smtp_auth_password: 'password'
templates:
- "/etc/alertmanager/template/*.tmpl"
route:
group_by: ['alertname', 'cluster', 'service']
group_wait: 30s
group_interval: 5m
repeat_interval: 3h
receiver: team-X-mails
routes:
- match_re:
service: ^(foo1|foo2|baz)$
receiver: team-X-mails
inhibit_rules:
- source_match:
severity: 'critical'
target_match:
severity: 'warning'
equal: ['alertname', 'cluster', 'service']
receivers:
- name: 'team-X-mails'
email_configs:
- to: 'team-X+alerts@example.org'
exporters:
node:
version: 0.12.0.linux-amd64
install_dir: /opt
source: https://github.com/prometheus/node_exporter/releases/download/0.12.0/node_exporter-0.12.0.linux-amd64.tar.gz
source_hash: md5=efe49b6fae4b1a5cb75b24a60a35e1fc

View File

@ -0,0 +1,73 @@
{% from "prometheus/map.jinja" import prometheus with context %}
{%- set version_path = prometheus.alertmanager.install_dir ~ "/alertmanager-" ~ prometheus.alertmanager.version %}
include:
- prometheus.user
alertmanager_tarball:
archive.extracted:
- name: {{ prometheus.alertmanager.install_dir }}
- source: {{ prometheus.alertmanager.source }}
- source_hash: {{ prometheus.alertmanager.source_hash }}
- archive_format: tar
- if_missing: {{ version_path }}
alertmanager_bin_link:
alternatives.install:
- name: alertmanager
- link: /usr/bin/alertmanager
- path: {{ version_path }}/alertmanager
- priority: 10
- require:
- archive: alertmanager_tarball
alertmanager_config:
file.managed:
- name: {{ prometheus.alertmanager.args.config_file }}
- source: salt://prometheus/files/config.jinja
- template: jinja
- user: prometheus
- group: prometheus
- makedirs: True
- defaults:
data: {{ prometheus.alertmanager.config }}
alertmanager_defaults:
file.managed:
- name: /etc/default/alertmanager
- source: salt://prometheus/files/default-alertmanager.jinja
- template: jinja
- defaults:
config_file: {{ prometheus.alertmanager.args.config_file }}
storage_path: {{ prometheus.alertmanager.args.storage.path }}
alertmanager_storage_path:
file.directory:
- name: {{ prometheus.alertmanager.args.storage.path }}
- user: prometheus
- group: prometheus
- makedirs: True
- watch:
- file: alertmanager_defaults
alertmanager_service_unit:
file.managed:
{%- if grains.get('init') == 'systemd' %}
- name: /etc/systemd/system/alertmanager.service
- source: salt://prometheus/files/alertmanager.systemd.jinja
{%- elif grains.get('init') == 'upstart' %}
- name: /etc/init/alertmanager.conf
- source: salt://prometheus/files/alertmanager.upstart.jinja
{%- endif %}
- watch:
- file: alertmanager_defaults
- require_in:
- file: alertmanager_service
alertmanager_service:
service.running:
- name: alertmanager
- enable: True
- reload: True
- watch:
- file: alertmanager_config

66
prometheus/defaults.yaml Normal file
View File

@ -0,0 +1,66 @@
prometheus:
server:
version: 1.1.3.linux-amd64
install_dir: /opt
source: https://github.com/prometheus/prometheus/releases/download/v1.1.3/prometheus-1.1.3.linux-amd64.tar.gz
source_hash: md5=839293afa9bfce47c476a0bdfd01a780
args:
config_file: /etc/prometheus/prometheus.yml
storage:
local_path: /var/lib/prometheus/metrics
config:
global:
scrape_interval: 15s
evaluation_interval: 15s
scrape_configs:
- job_name: 'prometheus'
scrape_interval: 5s
static_configs:
- targets: ['localhost:9090']
- job_name: 'node'
scrape_interval: 5s
static_configs:
- targets: ['localhost:9100']
alertmanager:
version: 0.4.2.linux-amd64
install_dir: /opt
source: https://github.com/prometheus/alertmanager/releases/download/v0.4.2/alertmanager-0.4.2.linux-amd64.tar.gz
source_hash: md5=715878f14ab091769c989ecdb2db9f15
args:
config_file: /etc/alertmanager/alertmanager.yml
storage:
path: /var/lib/alertmanager
config:
global:
smtp_smarthost: "localhost:25"
smtp_from: 'alertmanager@example.org'
smtp_auth_username: 'alertmanager'
smtp_auth_password: 'password'
templates:
- "/etc/alertmanager/template/*.tmpl"
route:
group_by: ['alertname', 'cluster', 'service']
group_wait: 30s
group_interval: 5m
repeat_interval: 3h
receiver: team-X-mails
routes:
- match_re:
service: ^(foo1|foo2|baz)$
receiver: team-X-mails
inhibit_rules:
- source_match:
severity: 'critical'
target_match:
severity: 'warning'
equal: ['alertname', 'cluster', 'service']
receivers:
- name: 'team-X-mails'
email_configs:
- to: 'team-X+alerts@example.org'
exporters:
node:
version: 0.12.0.linux-amd64
install_dir: /opt
source: https://github.com/prometheus/node_exporter/releases/download/0.12.0/node_exporter-0.12.0.linux-amd64.tar.gz
source_hash: md5=efe49b6fae4b1a5cb75b24a60a35e1fc

View File

@ -0,0 +1,48 @@
{% from "prometheus/map.jinja" import prometheus with context %}
{%- set version_path = prometheus.exporters.node.install_dir ~ "/node_exporter-" ~ prometheus.exporters.node.version %}
include:
- prometheus.user
node_exporter_tarball:
archive.extracted:
- name: {{ prometheus.exporters.node.install_dir }}
- source: {{ prometheus.exporters.node.source }}
- source_hash: {{ prometheus.exporters.node.source_hash }}
- archive_format: tar
- if_missing: {{ version_path }}
node_exporter_bin_link:
alternatives.install:
- name: node_exporter
- link: /usr/bin/node_exporter
- path: {{ version_path }}/node_exporter
- priority: 10
- require:
- archive: node_exporter_tarball
node_exporter_defaults:
file.managed:
- name: /etc/default/node_exporter
- source: salt://prometheus/files/default-node_exporter.jinja
- template: jinja
node_exporter_service_unit:
file.managed:
{%- if grains.get('init') == 'systemd' %}
- name: /etc/systemd/system/node_exporter.service
- source: salt://prometheus/files/node_exporter.systemd.jinja
{%- elif grains.get('init') == 'upstart' %}
- name: /etc/init/node_exporter.conf
- source: salt://prometheus/files/node_exporter.upstart.jinja
{%- endif %}
- require_in:
- file: node_exporter_service
node_exporter_service:
service.running:
- name: node_exporter
- enable: True
- reload: True
- watch:
- file: node_exporter_defaults

View File

@ -0,0 +1,18 @@
[Unit]
Description=The Alertmanager handles alerts sent by client applications such as the Prometheus server
Documentation=https://prometheus.io/docs/alerting/alertmanager/
Wants=basic.target
After=basic.target network.target
[Service]
User=prometheus
Group=prometheus
EnvironmentFile=/etc/default/alertmanager
ExecStart=/usr/bin/alertmanager $ARGS
ExecReload=/bin/kill -HUP $MAINPID
KillMode=process
Restart=always
RestartSec=42s
[Install]
WantedBy=multi-user.target

View File

@ -0,0 +1,30 @@
# Prometheus Alert Manager (Upstart unit)
description "The Alertmanager handles alerts sent by client applications such as the Prometheus server."
start on runlevel [2345]
stop on runlevel [06]
env ALERTMANAGER=/usr/bin/alertmanager
env USER=prometheus
env GROUP=prometheus
env DEFAULTS=/etc/default/alertmanager
env RUNDIR=/var/run/alertmanager
env PID_FILE=/var/run/alertmanager/alertmanager.pid
pre-start script
[ -e $DEFAULTS ] && . $DEFAULTS
mkdir -p $RUNDIR || true
chmod 0750 $RUNDIR || true
chown $USER:$GROUP $RUNDIR || true
end script
script
# read settings like GOMAXPROCS from "/etc/default/alertmanager", if available.
[ -e $DEFAULTS ] && . $DEFAULTS
export GOMAXPROCS=${GOMAXPROCS:-2}
exec start-stop-daemon -c $USER -g $GROUP -p $PID_FILE -x $ALERTMANAGER -S -- $ARGS
end script
respawn
respawn limit 10 10
kill timeout 10

View File

@ -0,0 +1 @@
{{ data|yaml(False) }}

View File

@ -0,0 +1,25 @@
# Set the command-line arguments to pass to the server.
ARGS="-config.file {{config_file}} -storage.path {{ storage_path }}"
# The alert manager supports the following options:
# -config.file string
# Alertmanager configuration file name.
# (default "/etc/prometheus/alertmanager.yml")
# -log.level value
# Only log messages with the given severity or above.
# Valid levels: [debug, info, warn, error, fatal]. (default info)
# -storage.path string
# Base path for data storage.
# (default "/var/lib/prometheus/alertmanager/")
# -web.external-url string
# The URL under which Alertmanager is externally reachable (for example,
# if Alertmanager is served via a reverse proxy). Used for generating
# relative and absolute links back to Alertmanager itself. If the URL has
# a path portion, it will be used to prefix all HTTP endpoints served by
# Alertmanager. If omitted, relevant URL components will be derived
# automatically.
# -web.listen-address string
# Address to listen on for the web interface and API. (default ":9093")
# -web.local-assets string
# Path to static assets/templates directory.
# (default "/usr/share/prometheus/alertmanager/")

View File

@ -0,0 +1,2 @@
# Set the command-line arguments to pass to the server.
ARGS=""

View File

@ -0,0 +1,178 @@
# Set the command-line arguments to pass to the server.
ARGS="-config.file {{config_file}} -storage.local.path {{storage_local_path}} -web.console.libraries {{web_console_libraries}} -web.console.templates {{web_console_templates}}"
# Prometheus supports the following options:
# -config.file "/etc/prometheus/prometheus.yml"
# Prometheus configuration file name.
#
# == ALERTMANAGER ==
#
# -alertmanager.notification-queue-capacity 10000
# The capacity of the queue for pending alert manager notifications.
#
# -alertmanager.timeout 10s
# Alert manager HTTP API timeout.
#
# -alertmanager.url
# The URL of the alert manager to send notifications to.
#
# == LOG ==
#
# -log.format
# If set use a syslog logger or JSON logging. Example:
# logger:syslog?appname=bob&local=7 or logger:stdout?json=true. Defaults to
# stderr.
#
# -log.level "info"
# Only log messages with the given severity or above. Valid levels:
# [debug, info, warn, error, fatal].
#
# == QUERY ==
#
# -query.max-concurrency 20
# Maximum number of queries executed concurrently.
#
# -query.staleness-delta 5m0s
# Staleness delta allowance during expression evaluations.
#
# -query.timeout 2m0s
# Maximum time a query may take before being aborted.
#
# == STORAGE ==
#
# -storage.local.checkpoint-dirty-series-limit 5000
# If approx. that many time series are in a state that would require
# a recovery operation after a crash, a checkpoint is triggered, even if
# the checkpoint interval hasn't passed yet. A recovery operation requires
# a disk seek. The default limit intends to keep the recovery time below
# 1min even on spinning disks. With SSD, recovery is much faster, so you
# might want to increase this value in that case to avoid overly frequent
# checkpoints.
#
# -storage.local.checkpoint-interval 5m0s
# The period at which the in-memory metrics and the chunks not yet
# persisted to series files are checkpointed.
#
# -storage.local.chunk-encoding-version 1
# Which chunk encoding version to use for newly created chunks.
# Currently supported is 0 (delta encoding) and 1 (double-delta encoding).
#
# -storage.local.dirty false
# If set, the local storage layer will perform crash recovery even if
# the last shutdown appears to be clean.
#
# -storage.local.index-cache-size.fingerprint-to-metric 10485760
# The size in bytes for the fingerprint to metric index cache.
#
# -storage.local.index-cache-size.fingerprint-to-timerange 5242880
# The size in bytes for the metric time range index cache.
#
# -storage.local.index-cache-size.label-name-to-label-values 10485760
# The size in bytes for the label name to label values index cache.
#
# -storage.local.index-cache-size.label-pair-to-fingerprints 20971520
# The size in bytes for the label pair to fingerprints index cache.
#
# -storage.local.max-chunks-to-persist 524288
# How many chunks can be waiting for persistence before sample
# ingestion will be throttled. Many chunks waiting to be persisted will
# increase the checkpoint size.
#
# -storage.local.memory-chunks 1048576
# How many chunks to keep in memory. While the size of a chunk is
# 1kiB, the total memory usage will be significantly higher than this value
# * 1kiB. Furthermore, for various reasons, more chunks might have to be
# kept in memory temporarily. Sample ingestion will be throttled if the
# configured value is exceeded by more than 10%.
#
# -storage.local.path "/var/lib/prometheus/metrics"
# Base path for metrics storage.
#
# -storage.local.pedantic-checks false
# If set, a crash recovery will perform checks on each series file.
# This might take a very long time.
#
# -storage.local.retention 360h0m0s
# How long to retain samples in the local storage.
#
# -storage.local.series-file-shrink-ratio 0.1
# A series file is only truncated (to delete samples that have
# exceeded the retention period) if it shrinks by at least the provided
# ratio. This saves I/O operations while causing only a limited storage
# space overhead. If 0 or smaller, truncation will be performed even for a
# single dropped chunk, while 1 or larger will effectively prevent any
# truncation.
#
# -storage.local.series-sync-strategy "adaptive"
# When to sync series files after modification. Possible values:
# 'never', 'always', 'adaptive'. Sync'ing slows down storage performance
# but reduces the risk of data loss in case of an OS crash. With the
# 'adaptive' strategy, series files are sync'd for as long as the storage
# is not too much behind on chunk persistence.
#
# -storage.remote.graphite-address
# The host:port of the remote Graphite server to send samples to.
# None, if empty.
#
# -storage.remote.graphite-prefix
# The prefix to prepend to all metrics exported to Graphite. None, if
# empty.
#
# -storage.remote.graphite-transport "tcp"
# Transport protocol to use to communicate with Graphite. 'tcp', if
# empty.
#
# -storage.remote.influxdb-url
# The URL of the remote InfluxDB server to send samples to. None, if
# empty.
#
# -storage.remote.influxdb.database "prometheus"
# The name of the database to use for storing samples in InfluxDB.
#
# -storage.remote.influxdb.retention-policy "default"
# The InfluxDB retention policy to use.
#
# -storage.remote.influxdb.username
# The username to use when sending samples to InfluxDB. The
# corresponding password must be provided via the INFLUXDB_PW environment
# variable.
#
# -storage.remote.opentsdb-url
# The URL of the remote OpenTSDB server to send samples to. None, if
# empty.
#
# -storage.remote.timeout 30s
# The timeout to use when sending samples to the remote storage.
#
# == WEB ==
#
# -web.console.libraries "/etc/prometheus/console_libraries"
# Path to the console library directory.
#
# -web.console.templates "/etc/prometheus/consoles"
# Path to the console template directory, available at /consoles.
#
# -web.enable-remote-shutdown false
# Enable remote service shutdown.
#
# -web.external-url
# The URL under which Prometheus is externally reachable (for
# example, if Prometheus is served via a reverse proxy). Used for
# generating relative and absolute links back to Prometheus itself. If the
# URL has a path portion, it will be used to prefix all HTTP endpoints
# served by Prometheus. If omitted, relevant URL components will be derived
# automatically.
#
# -web.listen-address ":9090"
# Address to listen on for the web interface, API, and telemetry.
#
# -web.local-assets "/usr/share/prometheus/web/"
# Path to static assets/templates directory.
#
# -web.telemetry-path "/metrics"
# Path under which to expose metrics.
#
# -web.user-assets
# Path to static asset directory, available at /user.
#

View File

@ -0,0 +1,18 @@
[Unit]
Description=Prometheus exporter for machine metrics
Documentation=https://github.com/prometheus/node_exporter
Wants=basic.target
After=basic.target network.target
[Service]
User=prometheus
Group=prometheus
EnvironmentFile=/etc/default/node_exporter
ExecStart=/usr/bin/node_exporter $ARGS
ExecReload=/bin/kill -HUP $MAINPID
KillMode=process
Restart=always
RestartSec=42s
[Install]
WantedBy=multi-user.target

View File

@ -0,0 +1,30 @@
# Prometheus Node Exporter (Upstart unit)
description "Prometheus exporter for machine metrics"
start on runlevel [2345]
stop on runlevel [06]
env NODE_EXPORTER=/usr/bin/node_exporter
env USER=prometheus
env GROUP=prometheus
env DEFAULTS=/etc/default/node_exporter
env RUNDIR=/var/run/node_exporter
env PID_FILE=/var/run/node_exporter/node_exporter.pid
pre-start script
[ -e $DEFAULTS ] && . $DEFAULTS
mkdir -p $RUNDIR || true
chmod 0750 $RUNDIR || true
chown $USER:$GROUP $RUNDIR || true
end script
script
# read settings like GOMAXPROCS from "/etc/default/node_exporter", if available.
[ -e $DEFAULTS ] && . $DEFAULTS
export GOMAXPROCS=${GOMAXPROCS:-2}
exec start-stop-daemon -c $USER -g $GROUP -p $PID_FILE -x $NODE_EXPORTER -S -- $ARGS
end script
respawn
respawn limit 10 10
kill timeout 10

View File

@ -0,0 +1,123 @@
global:
# The smarthost and SMTP sender used for mail notifications.
smtp_smarthost: 'localhost:25'
smtp_from: 'alertmanager@example.org'
smtp_auth_username: 'alertmanager'
smtp_auth_password: 'password'
# The auth token for Hipchat.
hipchat_auth_token: '1234556789'
# Alternative host for Hipchat.
hipchat_url: 'https://hipchat.foobar.org/'
# The directory from which notification templates are read.
templates:
- '/etc/alertmanager/template/*.tmpl'
# The root route on which each incoming alert enters.
route:
# The labels by which incoming alerts are grouped together. For example,
# multiple alerts coming in for cluster=A and alertname=LatencyHigh would
# be batched into a single group.
group_by: ['alertname', 'cluster', 'service']
# When a new group of alerts is created by an incoming alert, wait at
# least 'group_wait' to send the initial notification.
# This way ensures that you get multiple alerts for the same group that start
# firing shortly after another are batched together on the first
# notification.
group_wait: 30s
# When the first notification was sent, wait 'group_interval' to send a batch
# of new alerts that started firing for that group.
group_interval: 5m
# If an alert has successfully been sent, wait 'repeat_interval' to
# resend them.
repeat_interval: 3h
# A default receiver
receiver: team-X-mails
# All the above attributes are inherited by all child routes and can
# overwritten on each.
# The child route trees.
routes:
# This routes performs a regular expression match on alert labels to
# catch alerts that are related to a list of services.
- match_re:
service: ^(foo1|foo2|baz)$
receiver: team-X-mails
# The service has a sub-route for critical alerts, any alerts
# that do not match, i.e. severity != critical, fall-back to the
# parent node and are sent to 'team-X-mails'
routes:
- match:
severity: critical
receiver: team-X-pager
- match:
service: files
receiver: team-Y-mails
routes:
- match:
severity: critical
receiver: team-Y-pager
# This route handles all alerts coming from a database service. If there's
# no team to handle it, it defaults to the DB team.
- match:
service: database
receiver: team-DB-pager
# Also group alerts by affected database.
group_by: [alertname, cluster, database]
routes:
- match:
owner: team-X
receiver: team-X-pager
- match:
owner: team-Y
receiver: team-Y-pager
# Inhibition rules allow to mute a set of alerts given that another alert is
# firing.
# We use this to mute any warning-level notifications if the same alert is
# already critical.
inhibit_rules:
- source_match:
severity: 'critical'
target_match:
severity: 'warning'
# Apply inhibition if the alertname is the same.
equal: ['alertname', 'cluster', 'service']
receivers:
- name: 'team-X-mails'
email_configs:
- to: 'team-X+alerts@example.org'
- name: 'team-X-pager'
email_configs:
- to: 'team-X+alerts-critical@example.org'
pagerduty_configs:
- service_key: <team-X-key>
- name: 'team-Y-mails'
email_configs:
- to: 'team-Y+alerts@example.org'
- name: 'team-Y-pager'
pagerduty_configs:
- service_key: <team-Y-key>
- name: 'team-DB-pager'
pagerduty_configs:
- service_key: <team-DB-key>
- name: 'team-X-hipchat'
hipchat_configs:
- auth_token: <auth_token>
room_id: 85
message_format: html
notify: true

View File

@ -0,0 +1,30 @@
# my global config
global:
scrape_interval: 15s # By default, scrape targets every 15 seconds.
evaluation_interval: 15s # By default, scrape targets every 15 seconds.
# scrape_timeout is set to the global default (10s).
# Attach these labels to any time series or alerts when communicating with
# external systems (federation, remote storage, Alertmanager).
external_labels:
monitor: 'codelab-monitor'
# Load rules once and periodically evaluate them according to the global 'evaluation_interval'.
rule_files:
# - "first.rules"
# - "second.rules"
# A scrape configuration containing exactly one endpoint to scrape:
# Here it's Prometheus itself.
scrape_configs:
# The job name is added as a label `job=<job_name>` to any timeseries scraped from this config.
- job_name: 'prometheus'
# Override the global default and scrape targets from this job every 5 seconds.
scrape_interval: 5s
# metrics_path defaults to '/metrics'
# scheme defaults to 'http'.
static_configs:
- targets: ['localhost:9090']

View File

@ -0,0 +1,18 @@
[Unit]
Description=Monitoring system and time series database
Documentation=https://prometheus.io/docs/introduction/overview/
Wants=basic.target
After=basic.target network.target
[Service]
User=prometheus
Group=prometheus
EnvironmentFile=/etc/default/prometheus
ExecStart=/usr/bin/prometheus $ARGS
ExecReload=/bin/kill -HUP $MAINPID
KillMode=process
Restart=always
RestartSec=42s
[Install]
WantedBy=multi-user.target

View File

@ -0,0 +1,30 @@
# Prometheus Monitoring Framework (Upstart unit)
description "Prometheus Monitoring Framework"
start on runlevel [2345]
stop on runlevel [06]
env PROMETHEUS=/usr/bin/prometheus
env USER=prometheus
env GROUP=prometheus
env DEFAULTS=/etc/default/prometheus
env RUNDIR=/var/run/prometheus
env PID_FILE=/var/run/prometheus/prometheus.pid
pre-start script
[ -e $DEFAULTS ] && . $DEFAULTS
mkdir -p $RUNDIR || true
chmod 0750 $RUNDIR || true
chown $USER:$GROUP $RUNDIR || true
end script
script
# read settings like GOMAXPROCS from "/etc/default/prometheus", if available.
[ -e $DEFAULTS ] && . $DEFAULTS
export GOMAXPROCS=${GOMAXPROCS:-2}
exec start-stop-daemon -c $USER -g $GROUP -p $PID_FILE -x $PROMETHEUS -S -- $ARGS
end script
respawn
respawn limit 10 10
kill timeout 10

4
prometheus/init.sls Normal file
View File

@ -0,0 +1,4 @@
{% from "prometheus/map.jinja" import prometheus with context %}
include:
- prometheus.server

3
prometheus/map.jinja Normal file
View File

@ -0,0 +1,3 @@
{% import_yaml 'prometheus/defaults.yaml' as defaults %}
{% set prometheus = salt['pillar.get']('prometheus', default=defaults.prometheus, merge=True) %}

77
prometheus/server.sls Normal file
View File

@ -0,0 +1,77 @@
{% from "prometheus/map.jinja" import prometheus with context %}
{%- set version_path = prometheus.server.install_dir ~ "/prometheus-" ~ prometheus.server.version %}
include:
- prometheus.user
prometheus_server_tarball:
archive.extracted:
- name: {{ prometheus.server.install_dir }}
- source: {{ prometheus.server.source }}
- source_hash: {{ prometheus.server.source_hash }}
- archive_format: tar
- if_missing: {{ version_path }}
prometheus_bin_link:
alternatives.install:
- name: prometheus
- link: /usr/bin/prometheus
- path: {{ version_path }}/prometheus
- priority: 10
- require:
- archive: prometheus_server_tarball
prometheus_server_config:
file.managed:
- name: {{ prometheus.server.args.config_file }}
- source: salt://prometheus/files/config.jinja
- template: jinja
- user: prometheus
- group: prometheus
- makedirs: True
- defaults:
data: {{ prometheus.server.config }}
prometheus_defaults:
file.managed:
- name: /etc/default/prometheus
- source: salt://prometheus/files/default-prometheus.jinja
- template: jinja
- defaults:
config_file: {{ prometheus.server.args.config_file }}
storage_local_path: {{ prometheus.server.args.storage.local_path }}
web_console_libraries: {{ version_path }}/console_libraries
web_console_templates: {{ version_path }}/consoles
{%- if prometheus.server.args.storage.local_path is defined %}
prometheus_storage_local_path:
file.directory:
- name: {{ prometheus.server.args.storage.local_path }}
- user: prometheus
- group: prometheus
- makedirs: True
- watch:
- file: prometheus_defaults
{%- endif %}
prometheus_service_unit:
file.managed:
{%- if grains.get('init') == 'systemd' %}
- name: /etc/systemd/system/prometheus.service
- source: salt://prometheus/files/prometheus.systemd.jinja
{%- elif grains.get('init') == 'upstart' %}
- name: /etc/init/prometheus.conf
- source: salt://prometheus/files/prometheus.upstart.jinja
{%- endif %}
- watch:
- file: prometheus_defaults
- require_in:
- file: prometheus_service
prometheus_service:
service.running:
- name: prometheus
- enable: True
- reload: True
- watch:
- file: prometheus_server_config

9
prometheus/user.sls Normal file
View File

@ -0,0 +1,9 @@
prometheus_group:
group.present:
- name: prometheus
prometheus_user:
user.present:
- name: prometheus
- home: /var/lib/prometheus
- gid_from_name: True

3
requirements.dev.txt Normal file
View File

@ -0,0 +1,3 @@
pytest
pytest-xdist
testinfra

65
test/cluster/Vagrantfile vendored Normal file
View File

@ -0,0 +1,65 @@
# -*- mode: ruby -*-
# vi: set ft=ruby :
required_plugins = %w( vagrant-hostmanager )
required_plugins.each do |plugin|
exec "vagrant plugin install #{plugin};vagrant #{ARGV.join(" ")}" unless Vagrant.has_plugin? plugin || ARGV[0] == 'plugin'
end
Vagrant.configure(2) do |config|
config.vm.box = "ubuntu/trusty64"
if Vagrant.has_plugin?("vagrant-cachier")
config.cache.scope = :box
end
config.hostmanager.enabled = true
config.hostmanager.manage_guest = true
config.hostmanager.include_offline = true
config.vm.provision :shell, inline: "sudo locale-gen en_IE.UTF-8"
(1..3).each do |m|
config.vm.define "min#{m}" do |min|
min.vm.network "private_network", ip: "172.16.199.1#{m}"
min.vm.hostname = "min#{m}"
min.vm.provider "virtualbox" do |vb|
vb.name = "prometheus-min#{m}"
vb.memory = "2048"
end
min.vm.provision :salt do |salt|
salt.minion_config = "conf/minion"
salt.minion_key = "keys/min#{m}.pem"
salt.minion_pub = "keys/min#{m}.pub"
end
end
end
config.vm.define :master do |master|
master.vm.network "private_network", ip: "172.16.199.10"
master.vm.hostname = "salt"
master.vm.synced_folder "../../prometheus", "/srv/formulas/prometheus"
master.vm.synced_folder "../salt", "/srv/salt"
master.vm.synced_folder "../pillar", "/srv/pillar"
master.vm.provider "virtualbox" do |vb|
vb.name = "prometheus-master"
vb.memory = "1024"
end
master.vm.provision :shell, inline: "sudo apt-get install -y python-git python-pip"
master.vm.provision :salt do |salt|
salt.run_highstate = false
salt.install_master = true
salt.master_config = "conf/master"
salt.master_key = "keys/master.pem"
salt.master_pub = "keys/master.pub"
salt.seed_master = {"min1": "keys/min1.pub",
"min2": "keys/min2.pub",
"min3": "keys/min3.pub"}
end
end
end

11
test/cluster/conf/master Normal file
View File

@ -0,0 +1,11 @@
fileserver_backend:
- roots
# - git
file_roots:
base:
- /srv/salt
- /srv/formulas
#gitfs_remotes:
# - https://github.com/repo/something-formula.git

3
test/cluster/conf/minion Normal file
View File

@ -0,0 +1,3 @@
grains:
roles:
- prometheus

View File

@ -0,0 +1,27 @@
-----BEGIN RSA PRIVATE KEY-----
MIIEpAIBAAKCAQEAlbO37bcGrTXwDlHA7Oea2ZYS1e6UP6alvifCBfQNyPAqU/qr
tZmANExHev6jXjkgB8TKbwdZtg21zCsUfJzY2LAYlbj5iT3HDJ4fRnB0JIs5v6Zq
n/u8w2bB+K71TfqpzEFVcEbIQ7bMjfOTSx06XhqnsiDyLqKu7eoWUpCBtJO1+1UF
OYNJwVqnrGkab/4MPDSqnuOlNa0QP/OUSLANiw1nts4MFvdaOtFZ7fFCOuYdsocd
Ec6LFvzhqUjwSnCunIMndo7hiPUgKxyBogxSaTduPE6zCHeGNCkbov8LiuPYThvC
D+ZLZBje09QYPgNU8+iFaoAyOjIrEUja6LAlWwIDAQABAoIBAAR04DHcc/9uDqJu
fp7O2kZF+HOT8affbym1B0qkFFD6DViIqtnyIuXrq2kW8YgZjUGN8zTQ9DO+Pr5A
PhDjnnT88Q5r/km+pBFVF7tCF8MKvgJdkukGVn0+ruGXW8ghgTKKc1+vS8DwTTCg
oq7mxNkOVBWyc2M+BpUgld+JW8MjYuQItbxw8a/PNrJqKAdipFnE3THI0CIZhZEE
ja0ExjIJ4c+0bS5hPpq3LenQkeQbtBgeIFfPEm+1yO7ijtkeUEauwU0TAG4ELNOa
74biw7fyoI1i1PTSdHfBWJDTWsNm5mhi333bmwH84I6tmq6e6MM061cOKSOxRGYv
Z7dAykECgYEAtXQth67J4eLtNuqb4smplbDFLckVPQo5F34/kfy7rFKGWJdUJp4p
EzVOXeQ9J6oXYAHjz7yeb7bTYDvqp/rF6g9Ptui09Vt8oRdUYSlTZVBO/hYZkbMt
3IUOXC+R4+XVJZvvxkQtO8h70s/v4jL4PdchNqDZ30N5Y2CJCXWcuzsCgYEA0zQl
0JS9EB5Xl6U3T9p3I26KQp84TttTeWhUhlraojJZra9BnsXF4bqdzRBVAv0+gmeF
t+M9MjxPvGLOgZA0GQ3rJHjRMj5zAYiYFbxCfbvU0kvQ1lLfNameLtRnkQrLBCSN
4sTjETxd3HFNqRbGaif5OpV94maYOgzjQlbMXGECgYAi/9q8XiGAmYXJ3uzcjKtS
PTt7PlsDYBJtDqPyY75LWcuiEOZhYIZptxF7mMeW+/7wPM0VR7uDrs8Ms+HqLru4
DuYAUNh+rvvWEqrq5s64LwpdhiO+b8I708pWS7KnM7CuRWq1YncbHeuMWSFbDxvE
ydcVM2dy4g/q9DbahT4K+wKBgQCgOkSh6+VO8CX1ltLSkuOljrv3dQFr2bqHNZ5C
W3TYbihYusemo1XPPRtwMf68iNE+bzkj25JWpSi8u5z88CCGDWghwEK8wmHPfQgc
HTm4V6JKdFgX8MED3g1ya5OylqSUHQNDUakOL2Tp009egze921uZlUXuV3UPPS2/
60eYoQKBgQCqOPv/40dW1CE67y+z2qXWye6GNkUcxCM3A0WW3wRFFXuPfy+4lBSX
43+Zq6w6+sAcv4RpKQFF4kjtLGkekAOWgOmQmPafSeUUZ2dtuydVk0Q9GDMaAFHO
n1teSXnYfb2vZMz4kQ6a5PyrqKLhwfk7aCaDXYF4NlQTGokxtTqa5w==
-----END RSA PRIVATE KEY-----

View File

@ -0,0 +1,9 @@
-----BEGIN PUBLIC KEY-----
MIIBIjANBgkqhkiG9w0BAQEFAAOCAQ8AMIIBCgKCAQEAlbO37bcGrTXwDlHA7Oea
2ZYS1e6UP6alvifCBfQNyPAqU/qrtZmANExHev6jXjkgB8TKbwdZtg21zCsUfJzY
2LAYlbj5iT3HDJ4fRnB0JIs5v6Zqn/u8w2bB+K71TfqpzEFVcEbIQ7bMjfOTSx06
XhqnsiDyLqKu7eoWUpCBtJO1+1UFOYNJwVqnrGkab/4MPDSqnuOlNa0QP/OUSLAN
iw1nts4MFvdaOtFZ7fFCOuYdsocdEc6LFvzhqUjwSnCunIMndo7hiPUgKxyBogxS
aTduPE6zCHeGNCkbov8LiuPYThvCD+ZLZBje09QYPgNU8+iFaoAyOjIrEUja6LAl
WwIDAQAB
-----END PUBLIC KEY-----

View File

@ -0,0 +1,27 @@
-----BEGIN RSA PRIVATE KEY-----
MIIEowIBAAKCAQEAivO8wR28TXAcKJhZmt+8w9Dc/KpqakkjHVea3ujBweLSmajj
qJPwd9DkdzjwygZBLNTYm24DSljBHcolG/tByZxM6/lP8HIWCJqxSaIeW6L0Hwab
fyj/OU/mHbNH51HXzwnRkOsNWp1X+wU7cSJyBncA1IrObMw27A814h6y5CbYG6gc
mDk3WkljDjx29uzNY+Sd7lPpbLy9p2z5eMh2crCi+mgXrJmWZSLtlXHvDKFQhWZo
BJbZzs4hdjAia7y9QScbTra1rIxBYYEwiuOsymCVkGNyX18lCuu4DFop22Qnlbqz
Y0LzlYUlSUndLQraTDqh7D5HScUzj83jKie1owIDAQABAoIBAGNXGk1eeu9KBXeo
Y/drmcLNIhCprWbLXwVtSlWvRkbdaP9eVVSCX9rpR01NMijdC9+HVEza/BM9HOO1
T46PtVr0GbJjDZmzlrkUD0e7VcTh3XIJAUaKAt3wl24IbJGeW5GzTVY0FdoAv5PB
C9w4ahjhH4DUs/vojCN8RGOi2vZtGUfRs0slbRTqyHrIjKvhSCUOwJp7ZFGVLP3X
3aL2j/5CBy0/0m73EXR2YS68UvB+l8h1XUEYk+mi/SmN4x9QloW8reUCbb4CXmNr
3qlYpomQmwBjLsINvXlHgEG+mnqHNBvfCh3mZxcpvcT6wBtc6K1B6o3ThaV1qFbI
9/lDJ/ECgYEAu0QN7o/4PIgHSczUWzaC6hImagvJ4PcPH63hNVJaKborwlUrkT5X
lQNpSLMyK9kRd7TyABC0sZ9+UdRCHUuC7iXfn4RT98DiyuKpmtM/2ATnKuZ6Zzh3
WLRC93/mYz9/mp+GmvDDzeHJxrRK+LP4zsPuk0fXSF6i3Z9PjJ0Rvy8CgYEAvfQC
MXV4i5DhG+FnBMTDV0JGjDdyRaupwatowdQ19n9xvEscglQcY/NvJx+Q+lktWCiJ
F/BEbxe0C5QDZADat99u7x8rwBIl8o6nJHqzYbbCcUT7e6fIS+8gSpFgOaXtD0n0
/SfvYPyJp6aTRZt/aGpwlADqJrjkm2SBGT8I880CgYA59mWNirheHAiNej3cgbHh
u35yYnW5ER94QbysYXIQ6lPzEaOrkC1RrT2gR/7mfYifkb058fHL7L8PmvBf2uiK
x+RAnpfS3rwT6jZu+qYPOe1HZRFL87lVFm6H5umrnJLljC0AtjcU+acXvuRZuHLx
pMmqgpkLCGfGyzG3J0K/9wKBgQCb4SLvnXvn5GOm8okM07+N2vhz1ohy/hVXtidI
cLI774fRQj5KAWYu67U6gOADYa0hXFpsh6JRHD2HBtbleQIDxFqP5p+LhhkX/NLS
z9JnDIhyVW6eoihvj/OK3dP+Wti4JDPkj6IUkAkSyFKw25Nph3eNLVb5od8QWiDL
K/xXvQKBgEWAvAas9yGhgSEhgX9xMyGbll8CLie6Xd9VwqF9gnlkoYLUOcOxME9K
CU1j/dryAED1pjtZbkYS40hbzoWiw/Qvt5deQPzbKAn19C/RZK+5EnCNzkUgHlk6
5KPMzlXZ3nCwrnAYF12ItziAP3gDlpRbJqQGPCkVruY2teQjQOxU
-----END RSA PRIVATE KEY-----

View File

@ -0,0 +1,9 @@
-----BEGIN PUBLIC KEY-----
MIIBIjANBgkqhkiG9w0BAQEFAAOCAQ8AMIIBCgKCAQEAivO8wR28TXAcKJhZmt+8
w9Dc/KpqakkjHVea3ujBweLSmajjqJPwd9DkdzjwygZBLNTYm24DSljBHcolG/tB
yZxM6/lP8HIWCJqxSaIeW6L0Hwabfyj/OU/mHbNH51HXzwnRkOsNWp1X+wU7cSJy
BncA1IrObMw27A814h6y5CbYG6gcmDk3WkljDjx29uzNY+Sd7lPpbLy9p2z5eMh2
crCi+mgXrJmWZSLtlXHvDKFQhWZoBJbZzs4hdjAia7y9QScbTra1rIxBYYEwiuOs
ymCVkGNyX18lCuu4DFop22QnlbqzY0LzlYUlSUndLQraTDqh7D5HScUzj83jKie1
owIDAQAB
-----END PUBLIC KEY-----

View File

@ -0,0 +1,27 @@
-----BEGIN RSA PRIVATE KEY-----
MIIEowIBAAKCAQEA1HA3HZrHBBeS22+ocqidpvY4nymcNjnpnytnRWzbPyLJHPmM
dPYGkmttaeApUtOFKaqmJ9WQoNvyEdrWgkRuMBl++1LavSqL1GDhxkz/nM/JI3vq
VByhyC7EFhMxtvltE8bqIk4CvPM2Ya+1iEL1f8vahigUNQU7TRq6Yi02w4xnL0sb
R5J8w/7DX5EvO8Se4mG6+0mb4EWg2UnYWdZO3BHpv0OrrA9uEzF+j02/R6JclFGg
3h1COc9aOlJ5q8rrd+4q+et/Q1JBo9cVV+9QiuJg62CwpfI2sPAihEts7SYKpOIn
5THt0dCVCv8/JGvBWdBnlcyeNSA/oWPbyMqJOwIDAQABAoIBAEMuHW/iQ2962PlF
7TodMoXbNXq32OuZEtVAD4935jmoEXt1O0K9jsR87oPd3JORynmrupG8Stlj1MBq
uQ1HAKV7Wpm10PCjZMbLwHpKXUdEunHbPPZOe6bniyED/uPDKwhnDiG9E8aXDyqD
O2AU9LfuSnPEdudxeRhgDHMBo2DhEL7ehKLAbpcusOiAQtevunYIKtLmpXKlOZ13
GUVgPa04meJaYu2grXAnC3pTMiGoGUO5HO/eDTPUirbZKBBMCLufiGQJq19C5NS4
FWj9nLDS1Bl8q+gnlLV6/LBC7Ieojkrv1zwKR5D+W4CskXt76dt1DfBmDCWbX+78
dd8biPECgYEA1uB8JCK1QZ+1QxaNzjLqOKLrePJ1O9grOV0NosqALJO6xDch5qnY
Fzr6tN39Yhd1dGP9Gyh4KB4t1RBQN4EmHDdbX3NdPmdYXH9MypXmTsap8cJdDVq+
iemGuyRP5kmbaL0Yobyjv1s9Ttd9oII5LBHdF9RS8tm1KnPN1oesMVkCgYEA/RhB
+6+wqPIMD1vV/iGDc0H8hCFkcn6geN5jui4RlBDNJWvRN5mie097L83Qhm8hxCjX
DXLOoRtf7FARUSHC5jT4rRH/0JDlYHwLYjOZ8IedLhKJNcTHYnRu5gHG3vZjC00J
a4dKaN+enqrgnmXzbAUgPntTvcQle8vDik47SLMCgYBEsGI+0vGKVtmE5rLSFaiW
Dk6jzS34wXOf1MVFIuKyXU845j7qE6VGV85I/yW+s60SVdxVUCyI9ozELYDXHWvn
XTLSYniOOE8g0njj0eNbj1pINE+2sylxwLsXG9aG0UTiyIAVQ848n5gLmgtIwhgw
pE0/DtqtUzb8qSRFzf0XkQKBgQCoOP9icpxRLtClRL0vdMcSTMGycR96mn++1tIi
ZYx5hdFVFhyxgARnQaT3WS1IrYUcBKkr3ytIGhHf2DgqXO5WZM8E+fm2fxh20Pwg
AXxIS1tFzDw26Nw9rHYpmvS8LbgXQtUIC5yBqGCdxaYWolzxZ1bPMNmwQDdJ2V3X
B7iP0QKBgFFNWvsKcTrKmAe+9XOgH2TH9pzpiLxY6k6qo2psRB6JjNB9O9kVXhUc
O49A+IVlhfrTAsT2/ygXhyGLPdDBQUyXu+mquiteg4lgVmzjOveEMXzwgjb94qdH
JsDSYLbuG6MfY6BztXALzTCXlo/mgWjY4NZPpDX5qUyfrgts+Xko
-----END RSA PRIVATE KEY-----

View File

@ -0,0 +1,9 @@
-----BEGIN PUBLIC KEY-----
MIIBIjANBgkqhkiG9w0BAQEFAAOCAQ8AMIIBCgKCAQEA1HA3HZrHBBeS22+ocqid
pvY4nymcNjnpnytnRWzbPyLJHPmMdPYGkmttaeApUtOFKaqmJ9WQoNvyEdrWgkRu
MBl++1LavSqL1GDhxkz/nM/JI3vqVByhyC7EFhMxtvltE8bqIk4CvPM2Ya+1iEL1
f8vahigUNQU7TRq6Yi02w4xnL0sbR5J8w/7DX5EvO8Se4mG6+0mb4EWg2UnYWdZO
3BHpv0OrrA9uEzF+j02/R6JclFGg3h1COc9aOlJ5q8rrd+4q+et/Q1JBo9cVV+9Q
iuJg62CwpfI2sPAihEts7SYKpOIn5THt0dCVCv8/JGvBWdBnlcyeNSA/oWPbyMqJ
OwIDAQAB
-----END PUBLIC KEY-----

View File

@ -0,0 +1,27 @@
-----BEGIN RSA PRIVATE KEY-----
MIIEowIBAAKCAQEAtir2HZ/CoT93HRHrj1IYPt91X63TAk6u+khbq1mGdf2QmhrW
s+OTGFVZh0pMFax0jJvJUIkii4jaehemJUtZzkmqfyS8Sk/Nlzg7FCokjnwqIftM
Q2+9PBcY2R3YSOoEuaNMpCbrBzTfrih3rKHM/pRJ/hOIPQ7XaKrqh+/L695AJXPH
FYvZx4DOX2RdCoGvuN4utJmGDDpF0jPEp8J/aV9hov/vPC0auuPBiIAGyPsRsI4a
IW5ghBZmGBILqkg7WO8Jyg6CVa0bU3ynr64Wltjz4mocH4vV1uOa7gxShXkyKROw
sDiOS7GRBKPqKkHpbMGRfBXEafIirS+6xzc2ewIDAQABAoIBAA68zwh6gk51Sfeq
GXqQ07FSdrZ045tOLk3FSNVF+uDU0LwG69PF14KidQnC8wDV4N/3QoQndAoB2lsC
Ssi9ndhYkViT6j/A0Qso2264Mhjs5qO1JuhVw+N2ouX9yh+r/xHdqjelhmIg3u/C
ClyHAoSeDpZlkiIOb1LicPLqbX75awpcSOkuMNtfa3XBW6BPPSgQh8QMF9CRs6fJ
xUNBRLjsUFLV6LUESei3DIbZQPwJwX+TzsMVtYTawQHwazNRsTB2yV4JJMA/6NKR
Txhr5VMuDhx9yMbsM/ye2r644Ha7RWS0ZxghaJi6XDpfm7GwJBVtFb8r14z9uTa3
acDsIsECgYEAy404/uKDq5CGMiATIXBQZkmtKpSBZxGalRAfSqZw5WJFv+Z1pc5I
4OxUoq7qS1TRyCXftbZyeo03gzqpCti4CTZovOOtG/+x0UVgUxz7ggpD4h+D/xM4
CfrOKizhhaUHJFJqG5RvehWKb/sTWiPSXmyeq3jJ3ZcvsVp2N8nIfdECgYEA5Rs1
HqcmNg7LJ9OzqjAerNX0SJBP8Y91F+qqfdF+BhPMByxamspzlWabOmBnSpnJaxUx
OX+5JCBhpR/5BtGFKaEgI+ppYHl8iooADWp6SzPd6lXhIJZGVF27qdimvEk73641
540hmJlIAymiCbvkyWdoLV0fvEseEz7NME/oBosCgYEAqc8hmjk7oRfLa+uoKgA5
gmu6Y8hJ/8MG044EnNEnY6VNDN9kjDMAR2vA24rSeI/FBpTaAQy4ihZfFkIMnbjY
Jw9V5yaE1tPD8xJOXmfSRdTsdqrWAih1WpX6AeM5LLXUtG9wh5uYOB/aQrsRlORH
bnBtGh2FQjALIIiYEMhsUtECgYAlVBeFlQpeyG5FYPLnCcwVsQ6KPCkqXIthiDSb
J2T3LIaTrKBelBnc/8jO7CIoP4JX1i4dBjhuMHB4bQswFzcxHzWTZIAi3ehy0iVM
Cks+sYa4xaFWo+V1nYgUFl6kUvJUcZgK0C3nJSJckY37NAgE4LpSmdev4DVYQWVB
N+PAHQKBgHLSijHknPIeXUzqmSpnsGy+QHY5bgVp+0lFOwPdl+jUs9ciJL+D5Agu
WXXbj+57WAKeGjd9rJhAUimAmTitINSlJ9ggNLqqHb3/2oTNKp5pdSjbCVY5S4pp
1lYwCVBwJvXk4SRx11XthNTdCfp3WY1J3chi9srC8P6OXzd9RtQZ
-----END RSA PRIVATE KEY-----

View File

@ -0,0 +1,9 @@
-----BEGIN PUBLIC KEY-----
MIIBIjANBgkqhkiG9w0BAQEFAAOCAQ8AMIIBCgKCAQEAtir2HZ/CoT93HRHrj1IY
Pt91X63TAk6u+khbq1mGdf2QmhrWs+OTGFVZh0pMFax0jJvJUIkii4jaehemJUtZ
zkmqfyS8Sk/Nlzg7FCokjnwqIftMQ2+9PBcY2R3YSOoEuaNMpCbrBzTfrih3rKHM
/pRJ/hOIPQ7XaKrqh+/L695AJXPHFYvZx4DOX2RdCoGvuN4utJmGDDpF0jPEp8J/
aV9hov/vPC0auuPBiIAGyPsRsI4aIW5ghBZmGBILqkg7WO8Jyg6CVa0bU3ynr64W
ltjz4mocH4vV1uOa7gxShXkyKROwsDiOS7GRBKPqKkHpbMGRfBXEafIirS+6xzc2
ewIDAQAB
-----END PUBLIC KEY-----

View File

@ -0,0 +1,12 @@
prometheus:
server:
config:
scrape_configs:
- job_name: 'prometheus'
scrape_interval: 5s
static_configs:
- targets: ['localhost:9090']
- job_name: 'node'
scrape_interval: 5s
static_configs:
- targets: ['localhost:9100']

3
test/pillar/top.sls Normal file
View File

@ -0,0 +1,3 @@
base:
'*':
- prometheus

3
test/salt/top.sls Normal file
View File

@ -0,0 +1,3 @@
base:
'*':
- prometheus

37
test/single/Vagrantfile vendored Normal file
View File

@ -0,0 +1,37 @@
# -*- mode: ruby -*-
# vi: set ft=ruby :
Vagrant.configure(2) do |config|
config.vm.box = "bento/ubuntu-16.04"
if Vagrant.has_plugin?("vagrant-cachier")
config.cache.scope = :box
end
config.vm.provision :shell, inline: "sudo locale-gen en_IE.UTF-8"
config.vm.network "forwarded_port", guest: 9090, host: 9090, auto_correct: true
config.vm.network "forwarded_port", guest: 9100, host: 9100, auto_correct: true
config.vm.hostname = "prometheus-formula"
config.vm.synced_folder "../../prometheus", "/srv/formulas/prometheus"
config.vm.synced_folder "../salt", "/srv/salt"
config.vm.synced_folder "../pillar", "/srv/pillar/"
# Testing with tox and testinfra
# config.vm.synced_folder "../testinfra", "/vagrant/testinfra"
config.vm.provider "virtualbox" do |vb|
vb.name = "prometheus-formula"
vb.memory = "1024"
end
config.vm.provision :salt do |salt|
salt.masterless = true
salt.minion_config = "conf/minion"
salt.run_highstate = true
end
#config.vm.provision "shell", inline: "sudo pip install tox"
#config.vm.provision "test", type: "shell" do |t|
# t.inline = "tox -c /vagrant/tox.ini"
#end
end

13
test/single/conf/minion Normal file
View File

@ -0,0 +1,13 @@
file_client: local
fileserver_backend:
- roots
file_roots:
base:
- /srv/salt
- /srv/formulas
grains:
roles:
- prometheus

6
test/single/tox.ini Normal file
View File

@ -0,0 +1,6 @@
[tox]
skipsdist = True
[testenv]
deps = testinfra
commands = testinfra -v

View File

@ -0,0 +1,2 @@
def test_prometheus(User):
assert User("prometheus").exists