commit
cb170d8e90
59
README.rst
59
README.rst
|
@ -44,20 +44,36 @@ Server installed with PostgreSQL database
|
|||
user: grafana
|
||||
password: passwd
|
||||
|
||||
Server installed with default StackLight JSON dashboards
|
||||
Server installed with default StackLight JSON dashboards. This will
|
||||
be replaced by the possibility for a service to provide its own dashboard
|
||||
using salt-mine.
|
||||
|
||||
.. code-block:: yaml
|
||||
|
||||
grafana:
|
||||
server:
|
||||
enabled: true
|
||||
admin:
|
||||
user: admin
|
||||
password: passwd
|
||||
dashboards:
|
||||
enabled: true
|
||||
path: /var/lib/grafana/dashboards
|
||||
|
||||
Server with theme overrides
|
||||
|
||||
.. code-block:: yaml
|
||||
|
||||
grafana:
|
||||
server:
|
||||
enabled: true
|
||||
theme:
|
||||
light:
|
||||
css_override:
|
||||
source: http://path.to.theme
|
||||
source_hash: sha256=xyz
|
||||
build: xyz
|
||||
dark:
|
||||
css_override:
|
||||
source: salt://path.to.theme
|
||||
|
||||
|
||||
Collector setup
|
||||
---------------
|
||||
|
@ -74,7 +90,7 @@ Used to aggregate dashboards from monitoring node.
|
|||
Client setups
|
||||
-------------
|
||||
|
||||
Client enforced data sources
|
||||
Client with token based auth
|
||||
|
||||
.. code-block:: yaml
|
||||
|
||||
|
@ -86,12 +102,43 @@ Client enforced data sources
|
|||
host: grafana.host
|
||||
port: 3000
|
||||
token: token
|
||||
|
||||
Client with base auth
|
||||
|
||||
.. code-block:: yaml
|
||||
|
||||
grafana:
|
||||
client:
|
||||
enabled: true
|
||||
server:
|
||||
protocol: https
|
||||
host: grafana.host
|
||||
port: 3000
|
||||
user: admin
|
||||
password: password
|
||||
|
||||
Client enforcing graphite data source
|
||||
|
||||
.. code-block:: yaml
|
||||
|
||||
grafana:
|
||||
client:
|
||||
enabled: true
|
||||
datasource:
|
||||
graphite:
|
||||
type: graphite
|
||||
host: mtr01.domain.com
|
||||
protocol: https
|
||||
port: 443
|
||||
|
||||
Client enforcing elasticsearch data source
|
||||
|
||||
.. code-block:: yaml
|
||||
|
||||
grafana:
|
||||
client:
|
||||
enabled: true
|
||||
datasource:
|
||||
elasticsearch:
|
||||
type: elasticsearch
|
||||
host: log01.domain.com
|
||||
|
@ -157,7 +204,7 @@ The default format of Grafana dashboards with lists for rows, panels and targets
|
|||
span: 6
|
||||
editable: false
|
||||
type: graph
|
||||
targets:
|
||||
targets:
|
||||
- refId: A
|
||||
target: "support_prd.cfg01_iot_tcpcloud_eu.cpu.0.idle"
|
||||
datasource: graphite01
|
||||
|
|
|
@ -0,0 +1,584 @@
|
|||
# -*- coding: utf-8 -*-
|
||||
'''
|
||||
Manage Grafana v3.0 Dashboards
|
||||
|
||||
.. versionadded:: 2016.3.0
|
||||
|
||||
.. code-block:: yaml
|
||||
|
||||
grafana:
|
||||
grafana_timeout: 3
|
||||
grafana_token: qwertyuiop
|
||||
grafana_url: 'https://url.com'
|
||||
|
||||
.. code-block:: yaml
|
||||
|
||||
Ensure minimum dashboard is managed:
|
||||
grafana_dashboard.present:
|
||||
- name: insightful-dashboard
|
||||
- base_dashboards_from_pillar:
|
||||
- default_dashboard
|
||||
- base_rows_from_pillar:
|
||||
- default_row
|
||||
- base_panels_from_pillar:
|
||||
- default_panel
|
||||
- dashboard:
|
||||
rows:
|
||||
- title: Usage
|
||||
panels:
|
||||
- targets:
|
||||
- target: alias(constantLine(50), 'max')
|
||||
title: Imaginary
|
||||
type: graph
|
||||
|
||||
|
||||
The behavior of this module is to create dashboards if they do not exist, to
|
||||
add rows if they do not exist in existing dashboards, and to update rows if
|
||||
they exist in dashboards. The module will not manage rows that are not defined,
|
||||
allowing users to manage their own custom rows.
|
||||
'''
|
||||
|
||||
# Import Python libs
|
||||
from __future__ import absolute_import
|
||||
import copy
|
||||
import json
|
||||
import requests
|
||||
|
||||
# Import Salt libs
|
||||
import salt.ext.six as six
|
||||
from salt.utils.dictdiffer import DictDiffer
|
||||
|
||||
|
||||
def __virtual__():
|
||||
'''Only load if grafana v2.0 is configured.'''
|
||||
return __salt__['config.get']('grafana_version', 1) == 3
|
||||
|
||||
|
||||
_DEFAULT_DASHBOARD_PILLAR = 'grafana_dashboards:default'
|
||||
_DEFAULT_PANEL_PILLAR = 'grafana_panels:default'
|
||||
_DEFAULT_ROW_PILLAR = 'grafana_rows:default'
|
||||
_PINNED_ROWS_PILLAR = 'grafana_pinned_rows'
|
||||
|
||||
|
||||
def present(name,
|
||||
base_dashboards_from_pillar=None,
|
||||
base_panels_from_pillar=None,
|
||||
base_rows_from_pillar=None,
|
||||
dashboard=None,
|
||||
dashboard_format='yaml',
|
||||
profile='grafana'):
|
||||
'''
|
||||
Ensure the grafana dashboard exists and is managed.
|
||||
|
||||
name
|
||||
Name of the grafana dashboard.
|
||||
|
||||
base_dashboards_from_pillar
|
||||
A pillar key that contains a list of dashboards to inherit from
|
||||
|
||||
base_panels_from_pillar
|
||||
A pillar key that contains a list of panels to inherit from
|
||||
|
||||
base_rows_from_pillar
|
||||
A pillar key that contains a list of rows to inherit from
|
||||
|
||||
dashboard
|
||||
A dict that defines a dashboard that should be managed.
|
||||
|
||||
dashboard_format
|
||||
You can use two formats for dashboards. You can use the JSON format
|
||||
if you provide a complete dashboard in raw JSON or you can use the YAML
|
||||
format (this is the default) and provide a description of the
|
||||
dashboard in YAML.
|
||||
|
||||
profile
|
||||
A pillar key or dict that contains grafana information
|
||||
'''
|
||||
ret = {'name': name, 'result': True, 'comment': '', 'changes': {}}
|
||||
dashboard = dashboard or {}
|
||||
|
||||
if isinstance(profile, six.string_types):
|
||||
profile = __salt__['config.option'](profile)
|
||||
|
||||
if dashboard_format == 'json':
|
||||
# In this case, a raw JSON of the full dashboard is provided.
|
||||
response = _update(dashboard, profile)
|
||||
|
||||
if response.get('status') == 'success':
|
||||
ret['comment'] = 'Dashboard {0} created.'.format(name)
|
||||
ret['changes']['new'] = 'Dashboard {0} created.'.format(name)
|
||||
else:
|
||||
ret['result'] = False
|
||||
ret['comment'] = ("Failed to create dashboard {0}, "
|
||||
"response={1}").format(name, response)
|
||||
|
||||
return ret
|
||||
|
||||
base_dashboards_from_pillar = base_dashboards_from_pillar or []
|
||||
base_panels_from_pillar = base_panels_from_pillar or []
|
||||
base_rows_from_pillar = base_rows_from_pillar or []
|
||||
|
||||
# Add pillar keys for default configuration
|
||||
base_dashboards_from_pillar = ([_DEFAULT_DASHBOARD_PILLAR] +
|
||||
base_dashboards_from_pillar)
|
||||
base_panels_from_pillar = ([_DEFAULT_PANEL_PILLAR] +
|
||||
base_panels_from_pillar)
|
||||
base_rows_from_pillar = [_DEFAULT_ROW_PILLAR] + base_rows_from_pillar
|
||||
|
||||
# Build out all dashboard fields
|
||||
new_dashboard = _inherited_dashboard(
|
||||
dashboard, base_dashboards_from_pillar, ret)
|
||||
new_dashboard['title'] = name
|
||||
rows = new_dashboard.get('rows', [])
|
||||
for i, row in enumerate(rows):
|
||||
rows[i] = _inherited_row(row, base_rows_from_pillar, ret)
|
||||
for row in rows:
|
||||
panels = row.get('panels', [])
|
||||
for i, panel in enumerate(panels):
|
||||
panels[i] = _inherited_panel(panel, base_panels_from_pillar, ret)
|
||||
_auto_adjust_panel_spans(new_dashboard)
|
||||
_ensure_panel_ids(new_dashboard)
|
||||
_ensure_annotations(new_dashboard)
|
||||
|
||||
# Create dashboard if it does not exist
|
||||
url = 'db/{0}'.format(name)
|
||||
old_dashboard = _get(url, profile)
|
||||
if not old_dashboard:
|
||||
if __opts__['test']:
|
||||
ret['result'] = None
|
||||
ret['comment'] = 'Dashboard {0} is set to be created.'.format(name)
|
||||
return ret
|
||||
|
||||
response = _update(new_dashboard, profile)
|
||||
if response.get('status') == 'success':
|
||||
ret['comment'] = 'Dashboard {0} created.'.format(name)
|
||||
ret['changes']['new'] = 'Dashboard {0} created.'.format(name)
|
||||
else:
|
||||
ret['result'] = False
|
||||
ret['comment'] = ("Failed to create dashboard {0}, "
|
||||
"response={1}").format(name, response)
|
||||
return ret
|
||||
|
||||
# Add unmanaged rows to the dashboard. They appear at the top if they are
|
||||
# marked as pinned. They appear at the bottom otherwise.
|
||||
managed_row_titles = [row.get('title')
|
||||
for row in new_dashboard.get('rows', [])]
|
||||
new_rows = new_dashboard.get('rows', [])
|
||||
for old_row in old_dashboard.get('rows', []):
|
||||
if old_row.get('title') not in managed_row_titles:
|
||||
new_rows.append(copy.deepcopy(old_row))
|
||||
_ensure_pinned_rows(new_dashboard)
|
||||
_ensure_panel_ids(new_dashboard)
|
||||
|
||||
# Update dashboard if it differs
|
||||
dashboard_diff = DictDiffer(_cleaned(new_dashboard),
|
||||
_cleaned(old_dashboard))
|
||||
updated_needed = (dashboard_diff.changed() or
|
||||
dashboard_diff.added() or
|
||||
dashboard_diff.removed())
|
||||
if updated_needed:
|
||||
if __opts__['test']:
|
||||
ret['result'] = None
|
||||
ret['comment'] = ('Dashboard {0} is set to be updated, '
|
||||
'changes={1}').format(
|
||||
name,
|
||||
json.dumps(
|
||||
_dashboard_diff(
|
||||
_cleaned(new_dashboard),
|
||||
_cleaned(old_dashboard)
|
||||
),
|
||||
indent=4
|
||||
))
|
||||
return ret
|
||||
|
||||
response = _update(new_dashboard, profile)
|
||||
if response.get('status') == 'success':
|
||||
updated_dashboard = _get(url, profile)
|
||||
dashboard_diff = DictDiffer(_cleaned(updated_dashboard),
|
||||
_cleaned(old_dashboard))
|
||||
ret['comment'] = 'Dashboard {0} updated.'.format(name)
|
||||
ret['changes'] = _dashboard_diff(_cleaned(new_dashboard),
|
||||
_cleaned(old_dashboard))
|
||||
else:
|
||||
ret['result'] = False
|
||||
ret['comment'] = ("Failed to update dashboard {0}, "
|
||||
"response={1}").format(name, response)
|
||||
return ret
|
||||
|
||||
ret['comment'] = 'Dashboard present'
|
||||
return ret
|
||||
|
||||
|
||||
def absent(name, profile='grafana'):
|
||||
'''
|
||||
Ensure the named grafana dashboard is absent.
|
||||
|
||||
name
|
||||
Name of the grafana dashboard.
|
||||
|
||||
profile
|
||||
A pillar key or dict that contains grafana information
|
||||
'''
|
||||
ret = {'name': name, 'result': True, 'comment': '', 'changes': {}}
|
||||
|
||||
if isinstance(profile, six.string_types):
|
||||
profile = __salt__['config.option'](profile)
|
||||
|
||||
url = 'db/{0}'.format(name)
|
||||
existing_dashboard = _get(url, profile)
|
||||
if existing_dashboard:
|
||||
if __opts__['test']:
|
||||
ret['result'] = None
|
||||
ret['comment'] = 'Dashboard {0} is set to be deleted.'.format(name)
|
||||
return ret
|
||||
|
||||
_delete(url, profile)
|
||||
ret['comment'] = 'Dashboard {0} deleted.'.format(name)
|
||||
ret['changes']['new'] = 'Dashboard {0} deleted.'.format(name)
|
||||
return ret
|
||||
|
||||
ret['comment'] = 'Dashboard absent'
|
||||
return ret
|
||||
|
||||
|
||||
_IGNORED_DASHBOARD_FIELDS = [
|
||||
'id',
|
||||
'originalTitle',
|
||||
'version',
|
||||
]
|
||||
_IGNORED_ROW_FIELDS = []
|
||||
_IGNORED_PANEL_FIELDS = [
|
||||
'grid',
|
||||
'mode',
|
||||
'tooltip',
|
||||
]
|
||||
_IGNORED_TARGET_FIELDS = [
|
||||
'textEditor',
|
||||
]
|
||||
|
||||
|
||||
def _cleaned(_dashboard):
|
||||
'''Return a copy without fields that can differ.'''
|
||||
dashboard = copy.deepcopy(_dashboard)
|
||||
|
||||
for ignored_dashboard_field in _IGNORED_DASHBOARD_FIELDS:
|
||||
dashboard.pop(ignored_dashboard_field, None)
|
||||
for row in dashboard.get('rows', []):
|
||||
for ignored_row_field in _IGNORED_ROW_FIELDS:
|
||||
row.pop(ignored_row_field, None)
|
||||
for i, panel in enumerate(row.get('panels', [])):
|
||||
for ignored_panel_field in _IGNORED_PANEL_FIELDS:
|
||||
panel.pop(ignored_panel_field, None)
|
||||
for target in panel.get('targets', []):
|
||||
for ignored_target_field in _IGNORED_TARGET_FIELDS:
|
||||
target.pop(ignored_target_field, None)
|
||||
row['panels'][i] = _stripped(panel)
|
||||
|
||||
return dashboard
|
||||
|
||||
|
||||
def _inherited_dashboard(dashboard, base_dashboards_from_pillar, ret):
|
||||
'''Return a dashboard with properties from parents.'''
|
||||
base_dashboards = []
|
||||
for base_dashboard_from_pillar in base_dashboards_from_pillar:
|
||||
base_dashboard = __salt__['pillar.get'](base_dashboard_from_pillar)
|
||||
if base_dashboard:
|
||||
base_dashboards.append(base_dashboard)
|
||||
elif base_dashboard_from_pillar != _DEFAULT_DASHBOARD_PILLAR:
|
||||
ret.setdefault('warnings', [])
|
||||
warning_message = 'Cannot find dashboard pillar "{0}".'.format(
|
||||
base_dashboard_from_pillar)
|
||||
if warning_message not in ret['warnings']:
|
||||
ret['warnings'].append(warning_message)
|
||||
base_dashboards.append(dashboard)
|
||||
|
||||
result_dashboard = {}
|
||||
tags = set()
|
||||
for dashboard in base_dashboards:
|
||||
tags.update(dashboard.get('tags', []))
|
||||
result_dashboard.update(dashboard)
|
||||
result_dashboard['tags'] = list(tags)
|
||||
return result_dashboard
|
||||
|
||||
|
||||
def _inherited_row(row, base_rows_from_pillar, ret):
|
||||
'''Return a row with properties from parents.'''
|
||||
base_rows = []
|
||||
for base_row_from_pillar in base_rows_from_pillar:
|
||||
base_row = __salt__['pillar.get'](base_row_from_pillar)
|
||||
if base_row:
|
||||
base_rows.append(base_row)
|
||||
elif base_row_from_pillar != _DEFAULT_ROW_PILLAR:
|
||||
ret.setdefault('warnings', [])
|
||||
warning_message = 'Cannot find row pillar "{0}".'.format(
|
||||
base_row_from_pillar)
|
||||
if warning_message not in ret['warnings']:
|
||||
ret['warnings'].append(warning_message)
|
||||
base_rows.append(row)
|
||||
|
||||
result_row = {}
|
||||
for row in base_rows:
|
||||
result_row.update(row)
|
||||
return result_row
|
||||
|
||||
|
||||
def _inherited_panel(panel, base_panels_from_pillar, ret):
|
||||
'''Return a panel with properties from parents.'''
|
||||
base_panels = []
|
||||
for base_panel_from_pillar in base_panels_from_pillar:
|
||||
base_panel = __salt__['pillar.get'](base_panel_from_pillar)
|
||||
if base_panel:
|
||||
base_panels.append(base_panel)
|
||||
elif base_panel_from_pillar != _DEFAULT_PANEL_PILLAR:
|
||||
ret.setdefault('warnings', [])
|
||||
warning_message = 'Cannot find panel pillar "{0}".'.format(
|
||||
base_panel_from_pillar)
|
||||
if warning_message not in ret['warnings']:
|
||||
ret['warnings'].append(warning_message)
|
||||
base_panels.append(panel)
|
||||
|
||||
result_panel = {}
|
||||
for panel in base_panels:
|
||||
result_panel.update(panel)
|
||||
return result_panel
|
||||
|
||||
|
||||
_FULL_LEVEL_SPAN = 12
|
||||
_DEFAULT_PANEL_SPAN = 2.5
|
||||
|
||||
|
||||
def _auto_adjust_panel_spans(dashboard):
|
||||
'''Adjust panel spans to take up the available width.
|
||||
|
||||
For each group of panels that would be laid out on the same level, scale up
|
||||
the unspecified panel spans to fill up the level.
|
||||
'''
|
||||
for row in dashboard.get('rows', []):
|
||||
levels = []
|
||||
current_level = []
|
||||
levels.append(current_level)
|
||||
for panel in row.get('panels', []):
|
||||
current_level_span = sum(panel.get('span', _DEFAULT_PANEL_SPAN)
|
||||
for panel in current_level)
|
||||
span = panel.get('span', _DEFAULT_PANEL_SPAN)
|
||||
if current_level_span + span > _FULL_LEVEL_SPAN:
|
||||
current_level = [panel]
|
||||
levels.append(current_level)
|
||||
else:
|
||||
current_level.append(panel)
|
||||
|
||||
for level in levels:
|
||||
specified_panels = [panel for panel in level if 'span' in panel]
|
||||
unspecified_panels = [panel for panel in level
|
||||
if 'span' not in panel]
|
||||
if not unspecified_panels:
|
||||
continue
|
||||
|
||||
specified_span = sum(panel['span'] for panel in specified_panels)
|
||||
available_span = _FULL_LEVEL_SPAN - specified_span
|
||||
auto_span = float(available_span) / len(unspecified_panels)
|
||||
for panel in unspecified_panels:
|
||||
panel['span'] = auto_span
|
||||
|
||||
|
||||
def _ensure_pinned_rows(dashboard):
|
||||
'''Pin rows to the top of the dashboard.'''
|
||||
pinned_row_titles = __salt__['pillar.get'](_PINNED_ROWS_PILLAR)
|
||||
if not pinned_row_titles:
|
||||
return
|
||||
|
||||
pinned_row_titles_lower = []
|
||||
for title in pinned_row_titles:
|
||||
pinned_row_titles_lower.append(title.lower())
|
||||
rows = dashboard.get('rows', [])
|
||||
pinned_rows = []
|
||||
for i, row in enumerate(rows):
|
||||
if row.get('title', '').lower() in pinned_row_titles_lower:
|
||||
del rows[i]
|
||||
pinned_rows.append(row)
|
||||
rows = pinned_rows + rows
|
||||
|
||||
|
||||
def _ensure_panel_ids(dashboard):
|
||||
'''Assign panels auto-incrementing IDs.'''
|
||||
panel_id = 1
|
||||
for row in dashboard.get('rows', []):
|
||||
for panel in row.get('panels', []):
|
||||
panel['id'] = panel_id
|
||||
panel_id += 1
|
||||
|
||||
|
||||
def _ensure_annotations(dashboard):
|
||||
'''Explode annotation_tags into annotations.'''
|
||||
if 'annotation_tags' not in dashboard:
|
||||
return
|
||||
tags = dashboard['annotation_tags']
|
||||
annotations = {
|
||||
'enable': True,
|
||||
'list': [],
|
||||
}
|
||||
for tag in tags:
|
||||
annotations['list'].append({
|
||||
'datasource': "graphite",
|
||||
'enable': False,
|
||||
'iconColor': "#C0C6BE",
|
||||
'iconSize': 13,
|
||||
'lineColor': "rgba(255, 96, 96, 0.592157)",
|
||||
'name': tag,
|
||||
'showLine': True,
|
||||
'tags': tag,
|
||||
})
|
||||
del dashboard['annotation_tags']
|
||||
dashboard['annotations'] = annotations
|
||||
|
||||
|
||||
def _get(url, profile):
|
||||
'''Get a specific dashboard.'''
|
||||
request_url = "{0}/api/dashboards/{1}".format(profile.get('grafana_url'),
|
||||
url)
|
||||
if profile.get('grafana_token', False):
|
||||
response = requests.get(
|
||||
request_url,
|
||||
headers=_get_headers(profile),
|
||||
timeout=profile.get('grafana_timeout', 3),
|
||||
)
|
||||
else:
|
||||
response = requests.get(
|
||||
request_url,
|
||||
auth=_get_auth(profile),
|
||||
timeout=profile.get('grafana_timeout', 3),
|
||||
)
|
||||
data = response.json()
|
||||
if data.get('message') == 'Not found':
|
||||
return None
|
||||
if 'dashboard' not in data:
|
||||
return None
|
||||
return data['dashboard']
|
||||
|
||||
|
||||
def _delete(url, profile):
|
||||
'''Delete a specific dashboard.'''
|
||||
request_url = "{0}/api/dashboards/{1}".format(profile.get('grafana_url'),
|
||||
url)
|
||||
response = requests.delete(
|
||||
request_url,
|
||||
auth=_get_auth(profile),
|
||||
headers=_get_headers(profile),
|
||||
timeout=profile.get('grafana_timeout'),
|
||||
)
|
||||
data = response.json()
|
||||
return data
|
||||
|
||||
|
||||
def _update(dashboard, profile):
|
||||
'''Update a specific dashboard.'''
|
||||
payload = {
|
||||
'dashboard': dashboard,
|
||||
'overwrite': True
|
||||
}
|
||||
response = requests.post(
|
||||
"{0}/api/dashboards/db".format(profile.get('grafana_url')),
|
||||
auth=_get_auth(profile),
|
||||
headers=_get_headers(profile),
|
||||
json=payload
|
||||
)
|
||||
return response.json()
|
||||
|
||||
|
||||
def _get_headers(profile):
|
||||
headers = {'Content-type': 'application/json'}
|
||||
|
||||
if profile.get('grafana_token', False):
|
||||
headers['Authorization'] = 'Bearer {0}'.format(profile['grafana_token'])
|
||||
|
||||
return headers
|
||||
|
||||
|
||||
def _get_auth(profile):
|
||||
if profile.get('grafana_token', False):
|
||||
return None
|
||||
|
||||
return requests.auth.HTTPBasicAuth(
|
||||
profile['grafana_user'],
|
||||
profile['grafana_password']
|
||||
)
|
||||
|
||||
|
||||
def _dashboard_diff(_new_dashboard, _old_dashboard):
|
||||
'''Return a dictionary of changes between dashboards.'''
|
||||
diff = {}
|
||||
|
||||
# Dashboard diff
|
||||
new_dashboard = copy.deepcopy(_new_dashboard)
|
||||
old_dashboard = copy.deepcopy(_old_dashboard)
|
||||
dashboard_diff = DictDiffer(new_dashboard, old_dashboard)
|
||||
diff['dashboard'] = _stripped({
|
||||
'changed': list(dashboard_diff.changed()) or None,
|
||||
'added': list(dashboard_diff.added()) or None,
|
||||
'removed': list(dashboard_diff.removed()) or None,
|
||||
})
|
||||
|
||||
# Row diff
|
||||
new_rows = new_dashboard.get('rows', [])
|
||||
old_rows = old_dashboard.get('rows', [])
|
||||
new_rows_by_title = {}
|
||||
old_rows_by_title = {}
|
||||
for row in new_rows:
|
||||
if 'title' in row:
|
||||
new_rows_by_title[row['title']] = row
|
||||
for row in old_rows:
|
||||
if 'title' in row:
|
||||
old_rows_by_title[row['title']] = row
|
||||
rows_diff = DictDiffer(new_rows_by_title, old_rows_by_title)
|
||||
diff['rows'] = _stripped({
|
||||
'added': list(rows_diff.added()) or None,
|
||||
'removed': list(rows_diff.removed()) or None,
|
||||
})
|
||||
for changed_row_title in rows_diff.changed():
|
||||
old_row = old_rows_by_title[changed_row_title]
|
||||
new_row = new_rows_by_title[changed_row_title]
|
||||
row_diff = DictDiffer(new_row, old_row)
|
||||
diff['rows'].setdefault('changed', {})
|
||||
diff['rows']['changed'][changed_row_title] = _stripped({
|
||||
'changed': list(row_diff.changed()) or None,
|
||||
'added': list(row_diff.added()) or None,
|
||||
'removed': list(row_diff.removed()) or None,
|
||||
})
|
||||
|
||||
# Panel diff
|
||||
old_panels_by_id = {}
|
||||
new_panels_by_id = {}
|
||||
for row in old_dashboard.get('rows', []):
|
||||
for panel in row.get('panels', []):
|
||||
if 'id' in panel:
|
||||
old_panels_by_id[panel['id']] = panel
|
||||
for row in new_dashboard.get('rows', []):
|
||||
for panel in row.get('panels', []):
|
||||
if 'id' in panel:
|
||||
new_panels_by_id[panel['id']] = panel
|
||||
panels_diff = DictDiffer(new_panels_by_id, old_panels_by_id)
|
||||
diff['panels'] = _stripped({
|
||||
'added': list(panels_diff.added()) or None,
|
||||
'removed': list(panels_diff.removed()) or None,
|
||||
})
|
||||
for changed_panel_id in panels_diff.changed():
|
||||
old_panel = old_panels_by_id[changed_panel_id]
|
||||
new_panel = new_panels_by_id[changed_panel_id]
|
||||
panels_diff = DictDiffer(new_panel, old_panel)
|
||||
diff['panels'].setdefault('changed', {})
|
||||
diff['panels']['changed'][changed_panel_id] = _stripped({
|
||||
'changed': list(panels_diff.changed()) or None,
|
||||
'added': list(panels_diff.added()) or None,
|
||||
'removed': list(panels_diff.removed()) or None,
|
||||
})
|
||||
|
||||
return diff
|
||||
|
||||
|
||||
def _stripped(d):
|
||||
'''Strip falsey entries.'''
|
||||
ret = {}
|
||||
for k, v in six.iteritems(d):
|
||||
if v:
|
||||
ret[k] = v
|
||||
return ret
|
|
@ -0,0 +1,260 @@
|
|||
# -*- coding: utf-8 -*-
|
||||
'''
|
||||
Manage Grafana v3.0 data sources
|
||||
|
||||
.. versionadded:: 2016.3.0
|
||||
|
||||
Token auth setup
|
||||
|
||||
.. code-block:: yaml
|
||||
|
||||
grafana_version: 3
|
||||
grafana:
|
||||
grafana_timeout: 5
|
||||
grafana_token: qwertyuiop
|
||||
grafana_url: 'https://url.com'
|
||||
|
||||
Basic auth setup
|
||||
|
||||
.. code-block:: yaml
|
||||
|
||||
grafana_version: 3
|
||||
grafana:
|
||||
grafana_timeout: 5
|
||||
grafana_user: grafana
|
||||
grafana_password: qwertyuiop
|
||||
grafana_url: 'https://url.com'
|
||||
|
||||
.. code-block:: yaml
|
||||
|
||||
Ensure influxdb data source is present:
|
||||
grafana_datasource.present:
|
||||
- name: influxdb
|
||||
- type: influxdb
|
||||
- url: http://localhost:8086
|
||||
- access: proxy
|
||||
- basic_auth: true
|
||||
- basic_auth_user: myuser
|
||||
- basic_auth_password: mypass
|
||||
- is_default: true
|
||||
'''
|
||||
from __future__ import absolute_import
|
||||
|
||||
import json
|
||||
import requests
|
||||
|
||||
from salt.ext.six import string_types
|
||||
|
||||
|
||||
def __virtual__():
|
||||
'''Only load if grafana v3.0 is configured.'''
|
||||
return __salt__['config.get']('grafana_version', 1) == 3
|
||||
|
||||
|
||||
def present(name,
|
||||
type,
|
||||
url,
|
||||
access='proxy',
|
||||
user='',
|
||||
password='',
|
||||
database='',
|
||||
basic_auth=False,
|
||||
basic_auth_user='',
|
||||
basic_auth_password='',
|
||||
is_default=False,
|
||||
profile='grafana'):
|
||||
'''
|
||||
Ensure that a data source is present.
|
||||
|
||||
name
|
||||
Name of the data source.
|
||||
|
||||
type
|
||||
Which type of data source it is ('graphite', 'influxdb' etc.).
|
||||
|
||||
access
|
||||
Use proxy or direct. Default: proxy
|
||||
|
||||
url
|
||||
The URL to the data source API.
|
||||
|
||||
user
|
||||
Optional - user to authenticate with the data source
|
||||
|
||||
password
|
||||
Optional - password to authenticate with the data source
|
||||
|
||||
database
|
||||
Optional - database to use with the data source
|
||||
|
||||
basic_auth
|
||||
Optional - set to True to use HTTP basic auth to authenticate with the
|
||||
data source.
|
||||
|
||||
basic_auth_user
|
||||
Optional - HTTP basic auth username.
|
||||
|
||||
basic_auth_password
|
||||
Optional - HTTP basic auth password.
|
||||
|
||||
is_default
|
||||
Optional - Set data source as default. Default: False
|
||||
'''
|
||||
if isinstance(profile, string_types):
|
||||
profile = __salt__['config.option'](profile)
|
||||
|
||||
ret = {'name': name, 'result': None, 'comment': None, 'changes': None}
|
||||
datasource = _get_datasource(profile, name)
|
||||
data = _get_json_data(name, type, url,
|
||||
access=access,
|
||||
user=user,
|
||||
password=password,
|
||||
database=database,
|
||||
basic_auth=basic_auth,
|
||||
basic_auth_user=basic_auth_user,
|
||||
basic_auth_password=basic_auth_password,
|
||||
is_default=is_default)
|
||||
|
||||
if datasource:
|
||||
requests.put(
|
||||
_get_url(profile, datasource['id']),
|
||||
data=json.dumps(data),
|
||||
auth=_get_auth(profile),
|
||||
headers=_get_headers(profile),
|
||||
timeout=profile.get('grafana_timeout', 3),
|
||||
)
|
||||
ret['result'] = True
|
||||
ret['changes'] = _diff(datasource, data)
|
||||
if ret['changes']['new'] or ret['changes']['old']:
|
||||
ret['comment'] = 'Data source {0} updated'.format(name)
|
||||
else:
|
||||
ret['changes'] = None
|
||||
ret['comment'] = 'Data source {0} already up-to-date'.format(name)
|
||||
else:
|
||||
requests.post(
|
||||
'{0}/api/datasources'.format(profile['grafana_url']),
|
||||
data=json.dumps(data),
|
||||
auth=_get_auth(profile),
|
||||
headers=_get_headers(profile),
|
||||
timeout=profile.get('grafana_timeout', 3),
|
||||
)
|
||||
ret['result'] = True
|
||||
ret['comment'] = 'New data source {0} added'.format(name)
|
||||
ret['changes'] = data
|
||||
|
||||
return ret
|
||||
|
||||
|
||||
def absent(name, profile='grafana'):
|
||||
'''
|
||||
Ensure that a data source is present.
|
||||
|
||||
name
|
||||
Name of the data source to remove.
|
||||
'''
|
||||
if isinstance(profile, string_types):
|
||||
profile = __salt__['config.option'](profile)
|
||||
|
||||
ret = {'result': None, 'comment': None, 'changes': None}
|
||||
datasource = _get_datasource(profile, name)
|
||||
|
||||
if not datasource:
|
||||
ret['result'] = True
|
||||
ret['comment'] = 'Data source {0} already absent'.format(name)
|
||||
return ret
|
||||
|
||||
requests.delete(
|
||||
_get_url(profile, datasource['id']),
|
||||
auth=_get_auth(profile),
|
||||
headers=_get_headers(profile),
|
||||
timeout=profile.get('grafana_timeout', 3),
|
||||
)
|
||||
|
||||
ret['result'] = True
|
||||
ret['comment'] = 'Data source {0} was deleted'.format(name)
|
||||
|
||||
return ret
|
||||
|
||||
|
||||
def _get_url(profile, datasource_id):
|
||||
return '{0}/api/datasources/{1}'.format(
|
||||
profile['grafana_url'],
|
||||
datasource_id
|
||||
)
|
||||
|
||||
|
||||
def _get_datasource(profile, name):
|
||||
response = requests.get(
|
||||
'{0}/api/datasources'.format(profile['grafana_url']),
|
||||
auth=_get_auth(profile),
|
||||
headers=_get_headers(profile),
|
||||
timeout=profile.get('grafana_timeout', 3),
|
||||
)
|
||||
data = response.json()
|
||||
for datasource in data:
|
||||
if datasource['name'] == name:
|
||||
return datasource
|
||||
return None
|
||||
|
||||
|
||||
def _get_headers(profile):
|
||||
|
||||
headers = {'Content-type': 'application/json'}
|
||||
|
||||
if profile.get('grafana_token', False):
|
||||
headers['Authorization'] = 'Bearer {0}'.format(profile['grafana_token'])
|
||||
|
||||
return headers
|
||||
|
||||
|
||||
def _get_auth(profile):
|
||||
if profile.get('grafana_token', False):
|
||||
return None
|
||||
|
||||
return requests.auth.HTTPBasicAuth(
|
||||
profile['grafana_user'],
|
||||
profile['grafana_password']
|
||||
)
|
||||
|
||||
|
||||
def _get_json_data(name,
|
||||
type,
|
||||
url,
|
||||
access='proxy',
|
||||
user='',
|
||||
password='',
|
||||
database='',
|
||||
basic_auth=False,
|
||||
basic_auth_user='',
|
||||
basic_auth_password='',
|
||||
is_default=False,
|
||||
type_logo_url='public/app/plugins/datasource/influxdb/img/influxdb_logo.svg',
|
||||
with_credentials=False):
|
||||
return {
|
||||
'name': name,
|
||||
'type': type,
|
||||
'url': url,
|
||||
'access': access,
|
||||
'user': user,
|
||||
'password': password,
|
||||
'database': database,
|
||||
'basicAuth': basic_auth,
|
||||
'basicAuthUser': basic_auth_user,
|
||||
'basicAuthPassword': basic_auth_password,
|
||||
'isDefault': is_default,
|
||||
'typeLogoUrl': type_logo_url,
|
||||
'withCredentials': with_credentials,
|
||||
}
|
||||
|
||||
|
||||
def _diff(old, new):
|
||||
old_keys = old.keys()
|
||||
old = old.copy()
|
||||
new = new.copy()
|
||||
for key in old_keys:
|
||||
if key == 'id' or key == 'orgId':
|
||||
del old[key]
|
||||
elif old[key] == new[key]:
|
||||
del old[key]
|
||||
del new[key]
|
||||
return {'old': old, 'new': new}
|
|
@ -1,5 +1,5 @@
|
|||
{%- from "grafana/map.jinja" import client with context %}
|
||||
{%- if client.enabled %}
|
||||
{%- if client.get('enabled', False) %}
|
||||
|
||||
/etc/salt/minion.d/_grafana.conf:
|
||||
file.managed:
|
||||
|
@ -11,7 +11,7 @@
|
|||
{%- for datasource_name, datasource in client.datasource.iteritems() %}
|
||||
|
||||
grafana_client_datasource_{{ datasource_name }}:
|
||||
grafana_datasource.present:
|
||||
grafana3_datasource.present:
|
||||
- name: {{ datasource_name }}
|
||||
- type: {{ datasource.type }}
|
||||
- url: http://{{ datasource.host }}:{{ datasource.get('port', 80) }}
|
||||
|
@ -19,9 +19,14 @@ grafana_client_datasource_{{ datasource_name }}:
|
|||
- access: proxy
|
||||
{%- endif %}
|
||||
{%- if datasource.user is defined %}
|
||||
- basic_auth: true
|
||||
- basic_auth_user: {{ datasource.user }}
|
||||
- basic_auth_password: {{ datasource.password }}
|
||||
- user: {{ datasource.user }}
|
||||
- password: {{ datasource.password }}
|
||||
{%- endif %}
|
||||
{%- if datasource.get('is_default', False) %}
|
||||
- is_default: {{ datasource.is_default|lower }}
|
||||
{%- endif %}
|
||||
{%- if datasource.database is defined %}
|
||||
- database: {{ datasource.database }}
|
||||
{%- endif %}
|
||||
|
||||
{%- endfor %}
|
||||
|
@ -31,42 +36,56 @@ grafana_client_datasource_{{ datasource_name }}:
|
|||
|
||||
{%- if client.remote_data.engine == 'salt_mine' %}
|
||||
{%- for node_name, node_grains in salt['mine.get']('*', 'grains.items').iteritems() %}
|
||||
{%- if node_grains.grafana is defined %}
|
||||
{%- set raw_dict = salt['grains.filter_by']({'default': raw_dict}, merge=node_grains.grafana.get('dashboard', {})) %}
|
||||
{%- endif %}
|
||||
{%- if node_grains.grafana is defined %}
|
||||
{%- set raw_dict = salt['grains.filter_by']({'default': raw_dict}, merge=node_grains.grafana.get('dashboard', {})) %}
|
||||
{%- endif %}
|
||||
{%- endfor %}
|
||||
{%- endif %}
|
||||
|
||||
{%- if client.dashboard is defined %}
|
||||
{%- set raw_dict = salt['grains.filter_by']({'default': raw_dict}, merge=client.dashboard) %}
|
||||
{%- set raw_dict = salt['grains.filter_by']({'default': raw_dict}, merge=client.dashboard) %}
|
||||
{%- endif %}
|
||||
|
||||
{%- for dashboard_name, dashboard in raw_dict.iteritems() %}
|
||||
{%- set rows = [] %}
|
||||
{%- for row_name, row in dashboard.get('row', {}).iteritems() %}
|
||||
{%- set panels = [] %}
|
||||
{%- for panel_name, panel in row.get('panel', {}).iteritems() %}
|
||||
{%- set targets = [] %}
|
||||
{%- for target_name, target in panel.get('target', {}).iteritems() %}
|
||||
{%- do targets.extend([target]) %}
|
||||
{%- endfor %}
|
||||
{%- do panel.update({'targets': targets}) %}
|
||||
{%- do panels.extend([panel]) %}
|
||||
{%- endfor %}
|
||||
{%- do row.update({'panels': panels}) %}
|
||||
{%- do rows.extend([row]) %}
|
||||
{%- endfor %}
|
||||
{%- do dashboard.update({'rows': rows}) %}
|
||||
{%- do final_dict.update({dashboard_name: dashboard}) %}
|
||||
{%- if dashboard.get('format', 'yaml')|lower == 'yaml' %}
|
||||
# Dashboards in JSON format are considered as blob
|
||||
{%- set rows = [] %}
|
||||
{%- for row_name, row in dashboard.get('row', {}).iteritems() %}
|
||||
{%- set panels = [] %}
|
||||
{%- for panel_name, panel in row.get('panel', {}).iteritems() %}
|
||||
{%- set targets = [] %}
|
||||
{%- for target_name, target in panel.get('target', {}).iteritems() %}
|
||||
{%- do targets.extend([target]) %}
|
||||
{%- endfor %}
|
||||
{%- do panel.update({'targets': targets}) %}
|
||||
{%- do panels.extend([panel]) %}
|
||||
{%- endfor %}
|
||||
{%- do row.update({'panels': panels}) %}
|
||||
{%- do rows.extend([row]) %}
|
||||
{%- endfor %}
|
||||
{%- do dashboard.update({'rows': rows}) %}
|
||||
{%- endif %}
|
||||
|
||||
{%- do final_dict.update({dashboard_name: dashboard}) %}
|
||||
{%- endfor %}
|
||||
|
||||
{%- for dashboard_name, dashboard in final_dict.iteritems() %}
|
||||
|
||||
{%- if dashboard.get('enabled', True) %}
|
||||
grafana_client_dashboard_{{ dashboard_name }}:
|
||||
grafana_dashboard.present:
|
||||
grafana3_dashboard.present:
|
||||
- name: {{ dashboard_name }}
|
||||
{%- if dashboard.get('format', 'yaml')|lower == 'json' %}
|
||||
{%- import_json dashboard.template as dash %}
|
||||
- dashboard: {{ dash|json }}
|
||||
- dashboard_format: json
|
||||
{%- else %}
|
||||
- dashboard: {{ dashboard }}
|
||||
|
||||
{%- endif %}
|
||||
{%- else %}
|
||||
grafana_client_dashboard_{{ dashboard_name }}:
|
||||
grafana3_dashboard.absent:
|
||||
- name: {{ dashboard_name }}
|
||||
{%- endif %}
|
||||
{%- endfor %}
|
||||
|
||||
{%- endif %}
|
||||
|
|
|
@ -13,6 +13,7 @@ grafana_grains_dir:
|
|||
{# Loading the other service support metadata for localhost #}
|
||||
|
||||
{%- for service_name, service in pillar.iteritems() %}
|
||||
{%- if service.get('_support', {}).get('grafana', {}).get('enabled', False) %}
|
||||
|
||||
{%- macro load_grains_file(grains_fragment_file) %}{% include grains_fragment_file ignore missing %}{% endmacro %}
|
||||
|
||||
|
@ -20,6 +21,7 @@ grafana_grains_dir:
|
|||
{%- set grains_yaml = load_grains_file(grains_fragment_file)|load_yaml %}
|
||||
{%- set service_grains = salt['grains.filter_by']({'default': service_grains}, merge=grains_yaml) %}
|
||||
|
||||
{%- endif %}
|
||||
{%- endfor %}
|
||||
|
||||
grafana_grain:
|
||||
|
|
|
@ -1,8 +1,13 @@
|
|||
{%- from "grafana/map.jinja" import client with context %}
|
||||
|
||||
grafana_version: 2
|
||||
grafana_version: {{ client.server.get('version', 3) }}
|
||||
|
||||
grafana:
|
||||
grafana_timeout: 3
|
||||
{%- if client.server.token is defined %}
|
||||
grafana_token: {{ client.server.token }}
|
||||
grafana_url: 'http://{{ client.server.host }}:{{ client.server.get('port', 80) }}'
|
||||
{%- else %}
|
||||
grafana_user: {{ client.server.user }}
|
||||
grafana_password: {{ client.server.password }}
|
||||
{%- endif %}
|
||||
grafana_url: '{{ client.server.get('protocol', 'http') }}://{{ client.server.host }}:{{ client.server.get('port', 80) }}'
|
||||
|
|
|
@ -219,7 +219,7 @@
|
|||
"groupByTags": [],
|
||||
"measurement": "apache_requests",
|
||||
"policy": "default",
|
||||
"query": "SELECT mean(\"value\") FROM \"apache_requests\" WHERE \"hostname\" = '$server' AND $timeFilter GROUP BY time($interval) fill(0)",
|
||||
"query": "SELECT mean(\"value\") FROM \"apache_requests\" WHERE \"hostname\" =~ /^$server$/ AND $timeFilter GROUP BY time($interval) fill(0)",
|
||||
"rawQuery": false,
|
||||
"refId": "A",
|
||||
"resultFormat": "time_series",
|
||||
|
@ -333,7 +333,7 @@
|
|||
"groupByTags": [],
|
||||
"measurement": "apache_bytes",
|
||||
"policy": "default",
|
||||
"query": "SELECT mean(\"value\") FROM \"apache_bytes\" WHERE \"hostname\" = '$server' AND $timeFilter GROUP BY time($interval) fill(0)",
|
||||
"query": "SELECT mean(\"value\") FROM \"apache_bytes\" WHERE \"hostname\" =~ /^$server$/ AND $timeFilter GROUP BY time($interval) fill(0)",
|
||||
"rawQuery": false,
|
||||
"refId": "A",
|
||||
"resultFormat": "time_series",
|
||||
|
@ -447,7 +447,7 @@
|
|||
"groupByTags": [],
|
||||
"measurement": "apache_connections",
|
||||
"policy": "default",
|
||||
"query": "SELECT mean(\"value\") FROM \"apache_connections\" WHERE \"hostname\" = '$server' AND $timeFilter GROUP BY time($interval) fill(0)",
|
||||
"query": "SELECT mean(\"value\") FROM \"apache_connections\" WHERE \"hostname\" =~ /^$server$/ AND $timeFilter GROUP BY time($interval) fill(0)",
|
||||
"rawQuery": false,
|
||||
"refId": "A",
|
||||
"resultFormat": "time_series",
|
||||
|
@ -561,7 +561,7 @@
|
|||
"groupByTags": [],
|
||||
"measurement": "apache_connections",
|
||||
"policy": "default",
|
||||
"query": "SELECT last(\"value\") FROM \"apache_connections\" WHERE \"hostname\" = '$server' AND $timeFilter GROUP BY time($interval) fill(null)",
|
||||
"query": "SELECT last(\"value\") FROM \"apache_connections\" WHERE \"hostname\" =~ /^$server$/ AND $timeFilter GROUP BY time($interval) fill(null)",
|
||||
"rawQuery": false,
|
||||
"refId": "A",
|
||||
"resultFormat": "time_series",
|
||||
|
@ -662,7 +662,7 @@
|
|||
"groupByTags": [],
|
||||
"measurement": "/apache_workers/",
|
||||
"policy": "default",
|
||||
"query": "SELECT mean(\"value\") FROM /apache_workers/ WHERE \"hostname\" = '$server' AND $timeFilter GROUP BY time($interval) fill(0)",
|
||||
"query": "SELECT mean(\"value\") FROM /apache_workers/ WHERE \"hostname\" =~ /^$server$/ AND $timeFilter GROUP BY time($interval) fill(0)",
|
||||
"rawQuery": false,
|
||||
"refId": "A",
|
||||
"resultFormat": "time_series",
|
||||
|
@ -777,7 +777,7 @@
|
|||
"groupByTags": [],
|
||||
"measurement": "apache_idle_workers",
|
||||
"policy": "default",
|
||||
"query": "SELECT last(\"value\") FROM \"apache_idle_workers\" WHERE \"hostname\" = '$server' AND $timeFilter GROUP BY time($interval) fill(null)",
|
||||
"query": "SELECT last(\"value\") FROM \"apache_idle_workers\" WHERE \"hostname\" =~ /^$server$/ AND $timeFilter GROUP BY time($interval) fill(null)",
|
||||
"rawQuery": false,
|
||||
"refId": "A",
|
||||
"resultFormat": "time_series",
|
||||
|
@ -849,7 +849,7 @@
|
|||
"includeAll": false,
|
||||
"name": "server",
|
||||
"options": [],
|
||||
"query": "show tag values from apache_requests with key = hostname where environment_label = '$environment'",
|
||||
"query": "show tag values from apache_requests with key = hostname where environment_label =~ /^$environment$/",
|
||||
"refresh": 1,
|
||||
"refresh_on_load": true,
|
||||
"regex": "",
|
||||
|
|
|
@ -200,7 +200,7 @@
|
|||
"groupByTags": [],
|
||||
"measurement": "ceph_quorum_count",
|
||||
"policy": "default",
|
||||
"query": "SELECT last(\"value\") FROM \"ceph_quorum_count\" WHERE \"environment_label\" = '$environment' AND $timeFilter GROUP BY time($interval) fill(null)",
|
||||
"query": "SELECT last(\"value\") FROM \"ceph_quorum_count\" WHERE \"environment_label\" =~ /^$environment$/ AND $timeFilter GROUP BY time($interval) fill(null)",
|
||||
"rawQuery": false,
|
||||
"refId": "A",
|
||||
"resultFormat": "time_series",
|
||||
|
@ -299,7 +299,7 @@
|
|||
"groupByTags": [],
|
||||
"measurement": "ceph_monitor_count",
|
||||
"policy": "default",
|
||||
"query": "SELECT last(\"value\") FROM \"ceph_monitor_count\" WHERE \"environment_label\" = '$environment' AND $timeFilter GROUP BY time($interval) fill(null)",
|
||||
"query": "SELECT last(\"value\") FROM \"ceph_monitor_count\" WHERE \"environment_label\" =~ /^$environment$/ AND $timeFilter GROUP BY time($interval) fill(null)",
|
||||
"rawQuery": false,
|
||||
"refId": "A",
|
||||
"resultFormat": "time_series",
|
||||
|
@ -396,7 +396,7 @@
|
|||
"groupByTags": [],
|
||||
"measurement": "ceph_objects_count",
|
||||
"policy": "default",
|
||||
"query": "SELECT last(\"value\") FROM \"ceph_objects_count\" WHERE \"environment_label\" = '$environment' AND \"cluster\" =~ /$cluster/ AND $timeFilter GROUP BY time($interval) fill(null)",
|
||||
"query": "SELECT last(\"value\") FROM \"ceph_objects_count\" WHERE \"environment_label\" =~ /^$environment$/ AND \"cluster\" =~ /$cluster/ AND $timeFilter GROUP BY time($interval) fill(null)",
|
||||
"rawQuery": false,
|
||||
"refId": "A",
|
||||
"resultFormat": "time_series",
|
||||
|
@ -498,7 +498,7 @@
|
|||
"groupByTags": [],
|
||||
"measurement": "ceph_pg_count",
|
||||
"policy": "default",
|
||||
"query": "SELECT last(\"value\") FROM \"ceph_pg_count\" WHERE \"environment_label\" = '$environment' AND $timeFilter GROUP BY time($interval) fill(null)",
|
||||
"query": "SELECT last(\"value\") FROM \"ceph_pg_count\" WHERE \"environment_label\" =~ /^$environment$/ AND $timeFilter GROUP BY time($interval) fill(null)",
|
||||
"rawQuery": false,
|
||||
"refId": "A",
|
||||
"resultFormat": "time_series",
|
||||
|
@ -596,7 +596,7 @@
|
|||
"groupByTags": [],
|
||||
"measurement": "ceph_pool_total_percent_free",
|
||||
"policy": "default",
|
||||
"query": "SELECT last(\"value\") FROM \"ceph_pool_total_percent_free\" WHERE \"environment_label\" = '$environment' AND $timeFilter GROUP BY time($interval) fill(null)",
|
||||
"query": "SELECT last(\"value\") FROM \"ceph_pool_total_percent_free\" WHERE \"environment_label\" =~ /^$environment$/ AND $timeFilter GROUP BY time($interval) fill(null)",
|
||||
"rawQuery": false,
|
||||
"refId": "A",
|
||||
"resultFormat": "time_series",
|
||||
|
@ -696,7 +696,7 @@
|
|||
"groupByTags": [],
|
||||
"measurement": "ceph_pool_total_bytes_used",
|
||||
"policy": "default",
|
||||
"query": "SELECT mean(\"value\") FROM \"ceph_pool_total_bytes_used\" WHERE \"environment_label\" = '$environment' AND $timeFilter GROUP BY time($interval) fill(0)",
|
||||
"query": "SELECT mean(\"value\") FROM \"ceph_pool_total_bytes_used\" WHERE \"environment_label\" =~ /^$environment$/ AND $timeFilter GROUP BY time($interval) fill(0)",
|
||||
"rawQuery": false,
|
||||
"refId": "A",
|
||||
"resultFormat": "time_series",
|
||||
|
@ -744,7 +744,7 @@
|
|||
"groupByTags": [],
|
||||
"measurement": "ceph_pool_total_bytes_free",
|
||||
"policy": "default",
|
||||
"query": "SELECT mean(\"value\") FROM \"ceph_pool_total_bytes_free\" WHERE \"environment_label\" = '$environment' AND $timeFilter GROUP BY time($interval) fill(0)",
|
||||
"query": "SELECT mean(\"value\") FROM \"ceph_pool_total_bytes_free\" WHERE \"environment_label\" =~ /^$environment$/ AND $timeFilter GROUP BY time($interval) fill(0)",
|
||||
"rawQuery": false,
|
||||
"refId": "B",
|
||||
"resultFormat": "time_series",
|
||||
|
@ -2992,7 +2992,7 @@
|
|||
"includeAll": false,
|
||||
"name": "mon",
|
||||
"options": [],
|
||||
"query": "show tag values from ceph_health with key = hostname where environment_label = '$environment'",
|
||||
"query": "show tag values from ceph_health with key = hostname where environment_label =~ /^$environment$/",
|
||||
"refresh": 1,
|
||||
"refresh_on_load": true,
|
||||
"regex": "",
|
||||
|
@ -3006,7 +3006,7 @@
|
|||
"includeAll": false,
|
||||
"name": "cluster",
|
||||
"options": [],
|
||||
"query": "show tag values from ceph_health with key = cluster where environment_label = '$environment'",
|
||||
"query": "show tag values from ceph_health with key = cluster where environment_label =~ /^$environment$/",
|
||||
"refresh": 1,
|
||||
"refresh_on_load": true,
|
||||
"regex": "",
|
||||
|
@ -3020,7 +3020,7 @@
|
|||
"includeAll": false,
|
||||
"name": "pool",
|
||||
"options": [],
|
||||
"query": "show tag values from ceph_pool_size with key = pool where environment_label = '$environment'",
|
||||
"query": "show tag values from ceph_pool_size with key = pool where environment_label =~ /^$environment$/",
|
||||
"refresh": 1,
|
||||
"refresh_on_load": true,
|
||||
"regex": "/^[^.]/",
|
||||
|
@ -3034,7 +3034,7 @@
|
|||
"includeAll": false,
|
||||
"name": "osd",
|
||||
"options": [],
|
||||
"query": "show tag values from ceph_osd_space_total with key = osd where environment_label = '$environment'",
|
||||
"query": "show tag values from ceph_osd_space_total with key = osd where environment_label =~ /^$environment$/",
|
||||
"refresh": 1,
|
||||
"refresh_on_load": true,
|
||||
"regex": "",
|
||||
|
|
|
@ -191,7 +191,7 @@
|
|||
],
|
||||
"measurement": "ceph_perf_osd_op",
|
||||
"policy": "default",
|
||||
"query": "SELECT derivative(first(value),1s) FROM \"ceph_perf_osd_op\" WHERE \"cluster\" =~ /$cluster/ AND \"osd\" =~ /$osd/ AND $timeFilter AND \"environment_label\" = '$environment'GROUP BY time($interval) fill(0)",
|
||||
"query": "SELECT derivative(first(value),1s) FROM \"ceph_perf_osd_op\" WHERE \"cluster\" =~ /$cluster/ AND \"osd\" =~ /$osd/ AND $timeFilter AND \"environment_label\" =~ /^$environment$/GROUP BY time($interval) fill(0)",
|
||||
"rawQuery": true,
|
||||
"refId": "A",
|
||||
"resultFormat": "time_series",
|
||||
|
@ -309,7 +309,7 @@
|
|||
"interval": "",
|
||||
"measurement": "ceph_perf_osd_op_in_bytes",
|
||||
"policy": "default",
|
||||
"query": "SELECT derivative(first(value),1s) FROM \"ceph_perf_osd_op_in_bytes\" WHERE \"cluster\" =~ /$cluster/ AND \"osd\" =~ /$osd/ AND $timeFilter AND \"environment_label\" = '$environment'GROUP BY time($interval) fill(0)",
|
||||
"query": "SELECT derivative(first(value),1s) FROM \"ceph_perf_osd_op_in_bytes\" WHERE \"cluster\" =~ /$cluster/ AND \"osd\" =~ /$osd/ AND $timeFilter AND \"environment_label\" =~ /^$environment$/GROUP BY time($interval) fill(0)",
|
||||
"rawQuery": true,
|
||||
"refId": "A",
|
||||
"resultFormat": "time_series",
|
||||
|
@ -358,7 +358,7 @@
|
|||
"interval": "",
|
||||
"measurement": "ceph_perf_osd_op_out_bytes",
|
||||
"policy": "default",
|
||||
"query": "SELECT derivative(first(value),1s) FROM \"ceph_perf_osd_op_out_bytes\" WHERE \"cluster\" =~ /$cluster/ AND \"osd\" =~ /$osd/ AND $timeFilter AND \"environment_label\" = '$environment'GROUP BY time($interval) fill(0)",
|
||||
"query": "SELECT derivative(first(value),1s) FROM \"ceph_perf_osd_op_out_bytes\" WHERE \"cluster\" =~ /$cluster/ AND \"osd\" =~ /$osd/ AND $timeFilter AND \"environment_label\" =~ /^$environment$/GROUP BY time($interval) fill(0)",
|
||||
"rawQuery": true,
|
||||
"refId": "B",
|
||||
"resultFormat": "time_series",
|
||||
|
@ -612,7 +612,7 @@
|
|||
"interval": "",
|
||||
"measurement": "ceph_perf_osd_op_r_out_bytes",
|
||||
"policy": "default",
|
||||
"query": "SELECT derivative(first(value),1s) FROM \"ceph_perf_osd_op_r_out_bytes\" WHERE \"cluster\" =~ /$cluster/ AND \"osd\" =~ /$osd/ AND $timeFilter AND \"environment_label\" = '$environment'GROUP BY time($interval) fill(0)",
|
||||
"query": "SELECT derivative(first(value),1s) FROM \"ceph_perf_osd_op_r_out_bytes\" WHERE \"cluster\" =~ /$cluster/ AND \"osd\" =~ /$osd/ AND $timeFilter AND \"environment_label\" =~ /^$environment$/GROUP BY time($interval) fill(0)",
|
||||
"rawQuery": true,
|
||||
"refId": "A",
|
||||
"resultFormat": "time_series",
|
||||
|
@ -661,7 +661,7 @@
|
|||
"interval": "",
|
||||
"measurement": "ceph_perf_osd_op_r",
|
||||
"policy": "default",
|
||||
"query": "SELECT derivative(first(value),1s) FROM \"ceph_perf_osd_op_r\" WHERE \"cluster\" =~ /$cluster/ AND \"osd\" =~ /$osd/ AND $timeFilter AND \"environment_label\" = '$environment'GROUP BY time($interval) fill(0)",
|
||||
"query": "SELECT derivative(first(value),1s) FROM \"ceph_perf_osd_op_r\" WHERE \"cluster\" =~ /$cluster/ AND \"osd\" =~ /$osd/ AND $timeFilter AND \"environment_label\" =~ /^$environment$/GROUP BY time($interval) fill(0)",
|
||||
"rawQuery": true,
|
||||
"refId": "B",
|
||||
"resultFormat": "time_series",
|
||||
|
@ -961,7 +961,7 @@
|
|||
],
|
||||
"measurement": "ceph_perf_osd_op_w_in_bytes",
|
||||
"policy": "default",
|
||||
"query": "SELECT derivative(first(value),1s) FROM \"ceph_perf_osd_op_w_in_bytes\" WHERE \"cluster\" =~ /$cluster/ AND \"osd\" =~ /$osd/ AND $timeFilter AND \"environment_label\" = '$environment'GROUP BY time($interval) fill(0)",
|
||||
"query": "SELECT derivative(first(value),1s) FROM \"ceph_perf_osd_op_w_in_bytes\" WHERE \"cluster\" =~ /$cluster/ AND \"osd\" =~ /$osd/ AND $timeFilter AND \"environment_label\" =~ /^$environment$/GROUP BY time($interval) fill(0)",
|
||||
"rawQuery": true,
|
||||
"refId": "A",
|
||||
"resultFormat": "time_series",
|
||||
|
@ -1009,7 +1009,7 @@
|
|||
],
|
||||
"measurement": "ceph_perf_osd_op_w",
|
||||
"policy": "default",
|
||||
"query": "SELECT derivative(first(value),1s) FROM \"ceph_perf_osd_op_w\" WHERE \"cluster\" =~ /$cluster/ AND \"osd\" =~ /$osd/ AND $timeFilter AND \"environment_label\" = '$environment'GROUP BY time($interval) fill(0)",
|
||||
"query": "SELECT derivative(first(value),1s) FROM \"ceph_perf_osd_op_w\" WHERE \"cluster\" =~ /$cluster/ AND \"osd\" =~ /$osd/ AND $timeFilter AND \"environment_label\" =~ /^$environment$/GROUP BY time($interval) fill(0)",
|
||||
"rawQuery": true,
|
||||
"refId": "B",
|
||||
"resultFormat": "time_series",
|
||||
|
@ -1057,7 +1057,7 @@
|
|||
],
|
||||
"measurement": "ceph_perf_osd_op_w_rlat",
|
||||
"policy": "default",
|
||||
"query": "SELECT derivative(first(value),1s) FROM \"ceph_perf_osd_op_w_rlat\" WHERE \"cluster\" =~ /$cluster/ AND \"osd\" =~ /$osd/ AND $timeFilter AND \"environment_label\" = '$environment'GROUP BY time($interval) fill(0)",
|
||||
"query": "SELECT derivative(first(value),1s) FROM \"ceph_perf_osd_op_w_rlat\" WHERE \"cluster\" =~ /$cluster/ AND \"osd\" =~ /$osd/ AND $timeFilter AND \"environment_label\" =~ /^$environment$/GROUP BY time($interval) fill(0)",
|
||||
"rawQuery": true,
|
||||
"refId": "C",
|
||||
"resultFormat": "time_series",
|
||||
|
@ -1366,7 +1366,7 @@
|
|||
],
|
||||
"measurement": "ceph_perf_osd_op_rw_in_bytes",
|
||||
"policy": "default",
|
||||
"query": "SELECT derivative(first(value),1s) FROM \"ceph_perf_osd_op_rw_in_bytes\" WHERE \"cluster\" =~ /$cluster/ AND \"osd\" =~ /$osd/ AND $timeFilter AND \"environment_label\" = '$environment'GROUP BY time($interval) fill(0)",
|
||||
"query": "SELECT derivative(first(value),1s) FROM \"ceph_perf_osd_op_rw_in_bytes\" WHERE \"cluster\" =~ /$cluster/ AND \"osd\" =~ /$osd/ AND $timeFilter AND \"environment_label\" =~ /^$environment$/GROUP BY time($interval) fill(0)",
|
||||
"rawQuery": true,
|
||||
"refId": "A",
|
||||
"resultFormat": "time_series",
|
||||
|
@ -1414,7 +1414,7 @@
|
|||
],
|
||||
"measurement": "ceph_perf_osd_op_rw",
|
||||
"policy": "default",
|
||||
"query": "SELECT derivative(first(value),1s) FROM \"ceph_perf_osd_op_rw\" WHERE \"cluster\" =~ /$cluster/ AND \"osd\" =~ /$osd/ AND $timeFilter AND \"environment_label\" = '$environment'GROUP BY time($interval) fill(0)",
|
||||
"query": "SELECT derivative(first(value),1s) FROM \"ceph_perf_osd_op_rw\" WHERE \"cluster\" =~ /$cluster/ AND \"osd\" =~ /$osd/ AND $timeFilter AND \"environment_label\" =~ /^$environment$/GROUP BY time($interval) fill(0)",
|
||||
"rawQuery": true,
|
||||
"refId": "B",
|
||||
"resultFormat": "time_series",
|
||||
|
@ -1462,7 +1462,7 @@
|
|||
],
|
||||
"measurement": "ceph_perf_osd_op_rw_rlat",
|
||||
"policy": "default",
|
||||
"query": "SELECT derivative(first(value),1s) FROM \"ceph_perf_osd_op_rw_rlat\" WHERE \"cluster\" =~ /$cluster/ AND \"osd\" =~ /$osd/ AND $timeFilter AND \"environment_label\" = '$environment'GROUP BY time($interval) fill(0)",
|
||||
"query": "SELECT derivative(first(value),1s) FROM \"ceph_perf_osd_op_rw_rlat\" WHERE \"cluster\" =~ /$cluster/ AND \"osd\" =~ /$osd/ AND $timeFilter AND \"environment_label\" =~ /^$environment$/GROUP BY time($interval) fill(0)",
|
||||
"rawQuery": true,
|
||||
"refId": "C",
|
||||
"resultFormat": "time_series",
|
||||
|
@ -1869,7 +1869,7 @@
|
|||
"includeAll": false,
|
||||
"name": "cluster",
|
||||
"options": [],
|
||||
"query": "show tag values from ceph_health with key = cluster where environment_label = '$environment'",
|
||||
"query": "show tag values from ceph_health with key = cluster where environment_label =~ /^$environment$/",
|
||||
"refresh": 1,
|
||||
"refresh_on_load": true,
|
||||
"regex": "",
|
||||
|
@ -1883,7 +1883,7 @@
|
|||
"includeAll": false,
|
||||
"name": "osd",
|
||||
"options": [],
|
||||
"query": "show tag values from ceph_perf_osd_op_latency with key = osd where environment_label = '$environment'",
|
||||
"query": "show tag values from ceph_perf_osd_op_latency with key = osd where environment_label =~ /^$environment$/",
|
||||
"refresh": 1,
|
||||
"refresh_on_load": true,
|
||||
"regex": "",
|
||||
|
|
|
@ -85,7 +85,7 @@
|
|||
"interval": "",
|
||||
"measurement": "cluster_status",
|
||||
"policy": |