...
 
Commits (19)
---
version: 2
jobs:
test:
docker:
- image: circleci/golang:1.10
working_directory: /go/src/github.com/prometheus/mysqld_exporter
steps:
- checkout
- run: make promu
- run: make
- run: rm -v mysqld_exporter
codespell:
docker:
- image: circleci/python
steps:
- checkout
- run: sudo pip install codespell
- run: codespell --skip=".git,./vendor,ttar"
build:
machine: true
working_directory: /home/circleci/.go_workspace/src/github.com/prometheus/mysqld_exporter
steps:
- checkout
- run: make promu
- run: promu crossbuild -v
- persist_to_workspace:
root: .
paths:
- .build
docker_hub_master:
docker:
- image: circleci/golang:1.10
working_directory: /go/src/github.com/prometheus/mysqld_exporter
environment:
DOCKER_IMAGE_NAME: prom/mysqld-exporter
QUAY_IMAGE_NAME: quay.io/prometheus/mysqld-exporter
steps:
- checkout
- setup_remote_docker
- attach_workspace:
at: .
- run: ln -s .build/linux-amd64/mysqld_exporter mysqld_exporter
- run: make docker DOCKER_IMAGE_NAME=$DOCKER_IMAGE_NAME
- run: make docker DOCKER_IMAGE_NAME=$QUAY_IMAGE_NAME
- run: docker images
- run: docker login -u $DOCKER_LOGIN -p $DOCKER_PASSWORD
- run: docker login -u $QUAY_LOGIN -p $QUAY_PASSWORD quay.io
- run: docker push $DOCKER_IMAGE_NAME
- run: docker push $QUAY_IMAGE_NAME
docker_hub_release_tags:
docker:
- image: circleci/golang:1.10
working_directory: /go/src/github.com/prometheus/mysqld_exporter
environment:
DOCKER_IMAGE_NAME: prom/mysqld-exporter
QUAY_IMAGE_NAME: quay.io/prometheus/mysqld-exporter
steps:
- checkout
- setup_remote_docker
- run: mkdir -v -p ${HOME}/bin
- run: curl -L 'https://github.com/aktau/github-release/releases/download/v0.7.2/linux-amd64-github-release.tar.bz2' | tar xvjf - --strip-components 3 -C ${HOME}/bin
- run: echo 'export PATH=${HOME}/bin:${PATH}' >> ${BASH_ENV}
- attach_workspace:
at: .
- run: make promu
- run: promu crossbuild tarballs
- run: promu checksum .tarballs
- run: promu release .tarballs
- store_artifacts:
path: .tarballs
destination: releases
- run: ln -s .build/linux-amd64/mysqld_exporter mysqld_exporter
- run: make docker DOCKER_IMAGE_NAME=$DOCKER_IMAGE_NAME DOCKER_IMAGE_TAG=$CIRCLE_TAG
- run: make docker DOCKER_IMAGE_NAME=$QUAY_IMAGE_NAME DOCKER_IMAGE_TAG=$CIRCLE_TAG
- run: docker login -u $DOCKER_LOGIN -p $DOCKER_PASSWORD
- run: docker login -u $QUAY_LOGIN -p $QUAY_PASSWORD quay.io
- run: |
if [[ "$CIRCLE_TAG" =~ ^v[0-9]+(\.[0-9]+){2}$ ]]; then
docker tag "$DOCKER_IMAGE_NAME:$CIRCLE_TAG" "$DOCKER_IMAGE_NAME:latest"
docker tag "$QUAY_IMAGE_NAME:$CIRCLE_TAG" "$QUAY_IMAGE_NAME:latest"
fi
- run: docker push $DOCKER_IMAGE_NAME
- run: docker push $QUAY_IMAGE_NAME
workflows:
version: 2
mysqld_exporter:
jobs:
- test:
filters:
tags:
only: /.*/
- build:
filters:
tags:
only: /.*/
- codespell:
filters:
tags:
only: /.*/
- docker_hub_master:
requires:
- test
- build
filters:
branches:
only: master
- docker_hub_release_tags:
requires:
- test
- build
filters:
tags:
only: /^v[0-9]+(\.[0-9]+){2}(-.+|[^-.]*)$/
branches:
ignore: /.*/
......@@ -3,9 +3,8 @@ sudo: required
language: go
go:
- 1.8.x
- 1.9.x
- master
- 1.10.x
env:
- MYSQL_IMAGE=mysql/mysql-server:5.5
......
## v0.10.0 / 2018-06-29
### BREAKING CHANGES:
* Flags now use the Kingpin library, and require double-dashes. #222
This also changes the behavior of boolean flags.
* Enable: `--collector.global_status`
* Disable: `--no-collector.global_status`
### Changes:
* [CHANGE] Limit number and lifetime of connections #208
* [ENHANCEMENT] Move session params to DSN #259
* [ENHANCEMENT] Use native DB.Ping() instead of self-written implementation #210
* [FEATURE] Add collector duration metrics #197
* [FEATURE] Add 'collect[]' URL parameter to filter enabled collectors #235
* [FEATURE] Set a `lock_wait_timeout` on the MySQL connection #252
* [FEATURE] Set `last_scrape_error` when an error occurs #237
* [FEATURE] Collect metrics from `performance_schema.replication_group_member_stats` #271
* [FEATURE] Add innodb compression statistic #275
* [FEATURE] Add metrics for the output of `SHOW SLAVE HOSTS` #279
* [FEATURE] Support custom CA truststore and client SSL keypair. #255
* [BUGFIX] Fix perfEventsStatementsQuery #213
* [BUGFIX] Fix `file_instances` metric collector #205
* [BUGFIX] Fix prefix removal in `perf_schema_file_instances` #257
* [BUGFIX] Fix 32bit compile issue #273
* [BUGFIX] Ignore boolean keys in my.cnf. #283
## v0.10.0 / 2017-04-25
BREAKING CHANGES:
......
......@@ -11,55 +11,17 @@
# See the License for the specific language governing permissions and
# limitations under the License.
GO := go
FIRST_GOPATH := $(firstword $(subst :, ,$(shell $(GO) env GOPATH)))
PROMU := $(FIRST_GOPATH)/bin/promu
pkgs = $(shell $(GO) list ./... | grep -v /vendor/)
all: vet
PREFIX ?= $(shell pwd)
BIN_DIR ?= $(shell pwd)
DOCKER_IMAGE_NAME ?= mysqld-exporter
DOCKER_IMAGE_TAG ?= $(subst /,-,$(shell git rev-parse --abbrev-ref HEAD))
include Makefile.common
STATICCHECK_IGNORE = \
github.com/prometheus/mysqld_exporter/mysqld_exporter.go:SA1019
all: format build test-short
DOCKER_IMAGE_NAME ?= mysqld-exporter
style:
@echo ">> checking code style"
@! gofmt -d $(shell find . -path ./vendor -prune -o -name '*.go' -print) | grep '^'
test-docker:
@echo ">> testing docker image"
./test_image.sh "$(DOCKER_IMAGE_NAME):$(DOCKER_IMAGE_TAG)" 9104
test-short:
@echo ">> running short tests"
@$(GO) test -short -race $(pkgs)
test:
@echo ">> running tests"
@$(GO) test -race $(pkgs)
format:
@echo ">> formatting code"
@$(GO) fmt $(pkgs)
vet:
@echo ">> vetting code"
@$(GO) vet $(pkgs)
build: promu
@echo ">> building binaries"
@$(PROMU) build --prefix $(PREFIX)
tarball: promu
@echo ">> building release tarball"
@$(PROMU) tarball --prefix $(PREFIX) $(BIN_DIR)
docker:
@echo ">> building docker image"
@docker build -t "$(DOCKER_IMAGE_NAME):$(DOCKER_IMAGE_TAG)" .
promu:
@GOOS=$(shell uname -s | tr A-Z a-z) \
GOARCH=$(subst x86_64,amd64,$(patsubst i%86,386,$(shell uname -m))) \
$(GO) get -u github.com/prometheus/promu
.PHONY: all style format build test vet tarball docker promu
.PHONY: test-docker
# Copyright 2018 The Prometheus Authors
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# A common Makefile that includes rules to be reused in different prometheus projects.
# !!! Open PRs only against the prometheus/prometheus/Makefile.common repository!
# Example usage :
# Create the main Makefile in the root project directory.
# include Makefile.common
# customTarget:
# @echo ">> Running customTarget"
#
# Ensure GOBIN is not set during build so that promu is installed to the correct path
unexport GOBIN
GO ?= go
GOFMT ?= $(GO)fmt
FIRST_GOPATH := $(firstword $(subst :, ,$(shell $(GO) env GOPATH)))
PROMU := $(FIRST_GOPATH)/bin/promu
STATICCHECK := $(FIRST_GOPATH)/bin/staticcheck
GOVENDOR := $(FIRST_GOPATH)/bin/govendor
pkgs = ./...
PREFIX ?= $(shell pwd)
BIN_DIR ?= $(shell pwd)
DOCKER_IMAGE_TAG ?= $(subst /,-,$(shell git rev-parse --abbrev-ref HEAD))
all: style staticcheck unused build test
style:
@echo ">> checking code style"
! $(GOFMT) -d $$(find . -path ./vendor -prune -o -name '*.go' -print) | grep '^'
check_license:
@echo ">> checking license header"
@licRes=$$(for file in $$(find . -type f -iname '*.go' ! -path './vendor/*') ; do \
awk 'NR<=3' $$file | grep -Eq "(Copyright|generated|GENERATED)" || echo $$file; \
done); \
if [ -n "$${licRes}" ]; then \
echo "license header checking failed:"; echo "$${licRes}"; \
exit 1; \
fi
test-short:
@echo ">> running short tests"
$(GO) test -short $(pkgs)
test:
@echo ">> running all tests"
$(GO) test -race $(pkgs)
format:
@echo ">> formatting code"
$(GO) fmt $(pkgs)
vet:
@echo ">> vetting code"
$(GO) vet $(pkgs)
staticcheck: $(STATICCHECK)
@echo ">> running staticcheck"
$(STATICCHECK) -ignore "$(STATICCHECK_IGNORE)" $(pkgs)
unused: $(GOVENDOR)
@echo ">> running check for unused packages"
@$(GOVENDOR) list +unused | grep . && exit 1 || echo 'No unused packages'
build: promu
@echo ">> building binaries"
$(PROMU) build --prefix $(PREFIX)
tarball: promu
@echo ">> building release tarball"
$(PROMU) tarball --prefix $(PREFIX) $(BIN_DIR)
docker:
docker build -t "$(DOCKER_IMAGE_NAME):$(DOCKER_IMAGE_TAG)" .
promu:
GOOS= GOARCH= $(GO) get -u github.com/prometheus/promu
$(FIRST_GOPATH)/bin/staticcheck:
GOOS= GOARCH= $(GO) get -u honnef.co/go/tools/cmd/staticcheck
$(FIRST_GOPATH)/bin/govendor:
GOOS= GOARCH= $(GO) get -u github.com/kardianos/govendor
.PHONY: all style check_license format build test vet assets tarball docker promu staticcheck $(FIRST_GOPATH)/bin/staticcheck govendor $(FIRST_GOPATH)/bin/govendor
\ No newline at end of file
......@@ -28,7 +28,7 @@ NOTE: It is recommended to set a max connection limit for the user to avoid over
Running using an environment variable:
export DATA_SOURCE_NAME='login:password@(hostname:port)/'
export DATA_SOURCE_NAME='user:password@(hostname:3306)/'
./mysqld_exporter <flags>
Running using ~/.my.cnf:
......@@ -58,6 +58,8 @@ collect.global_variables | 5.1 | Collect
collect.info_schema.clientstats | 5.5 | If running with userstat=1, set to true to collect client statistics.
collect.info_schema.innodb_metrics | 5.6 | Collect metrics from information_schema.innodb_metrics.
collect.info_schema.innodb_tablespaces | 5.7 | Collect metrics from information_schema.innodb_sys_tablespaces.
collect.info_schema.innodb_cmp | 5.5 | Collect InnoDB compressed tables metrics from information_schema.innodb_cmp.
collect.info_schema.innodb_cmpmem | 5.5 | Collect InnoDB buffer pool compression metrics from information_schema.innodb_cmpmem.
collect.info_schema.processlist | 5.1 | Collect thread state counts from information_schema.processlist.
collect.info_schema.processlist.min_time | 5.1 | Minimum time a thread must be in each state to be counted. (default: 0)
collect.info_schema.query_response_time | 5.5 | Collect query response time distribution if query_response_time_stats is ON.
......@@ -75,7 +77,9 @@ collect.perf_schema.file_instances | 5.5 | Collect
collect.perf_schema.indexiowaits | 5.6 | Collect metrics from performance_schema.table_io_waits_summary_by_index_usage.
collect.perf_schema.tableiowaits | 5.6 | Collect metrics from performance_schema.table_io_waits_summary_by_table.
collect.perf_schema.tablelocks | 5.6 | Collect metrics from performance_schema.table_lock_waits_summary_by_table.
collect.perf_schema.replication_group_member_stats | 5.7 | Collect metrics from performance_schema.replication_group_member_stats.
collect.slave_status | 5.1 | Collect from SHOW SLAVE STATUS (Enabled by default)
collect.slave_hosts | 5.1 | Collect from SHOW SLAVE HOSTS
collect.heartbeat | 5.1 | Collect from [heartbeat](#heartbeat).
collect.heartbeat.database | 5.1 | Database from where to collect heartbeat data. (default: heartbeat)
collect.heartbeat.table | 5.1 | Table from where to collect heartbeat data. (default: heartbeat)
......@@ -98,6 +102,24 @@ The MySQL server's [data source name](http://en.wikipedia.org/wiki/Data_source_n
must be set via the `DATA_SOURCE_NAME` environment variable.
The format of this variable is described at https://github.com/go-sql-driver/mysql#dsn-data-source-name.
## Customizing Configuration for a SSL Connection
if The MySQL server supports SSL, you may need to specify a CA truststore to verify the server's chain-of-trust. You may also need to specify a SSL keypair for the client side of the SSL connection. To configure the mysqld exporter to use a custom CA certificate, add the following to the mysql cnf file:
```
ssl-ca=/path/to/ca/file
```
To specify the client SSL keypair, add the following to the cnf.
```
ssl-key=/path/to/ssl/client/key
ssl-cert=/path/to/ssl/client/cert
```
Customizing the SSL configuration is only supported in the mysql cnf file and is not supported if you set the mysql server's data source name in the environment variable DATA_SOURCE_NAME.
## Using Docker
You can deploy this exporter using the [prom/mysqld-exporter](https://registry.hub.docker.com/u/prom/mysqld-exporter/) Docker image.
......@@ -105,10 +127,14 @@ You can deploy this exporter using the [prom/mysqld-exporter](https://registry.h
For example:
```bash
docker network create my-mysql-network
docker pull prom/mysqld-exporter
docker run -d -p 9104:9104 --link=my_mysql_container:bdd \
-e DATA_SOURCE_NAME="user:password@(bdd:3306)/database" prom/mysqld-exporter
docker run -d \
-p 9104:9104 \
--network my-mysql-network \
-e DATA_SOURCE_NAME="user:password@(my-mysql-network:3306)/" \
prom/mysqld-exporter
```
## heartbeat
......
machine:
environment:
DOCKER_IMAGE_NAME: prom/mysqld-exporter
QUAY_IMAGE_NAME: quay.io/prometheus/mysqld-exporter
DOCKER_TEST_IMAGE_NAME: quay.io/prometheus/golang-builder:1.8-base
REPO_PATH: github.com/prometheus/mysqld_exporter
pre:
- sudo curl -L -o /usr/bin/docker 'https://s3-external-1.amazonaws.com/circle-downloads/docker-1.9.1-circleci'
- sudo chmod 0755 /usr/bin/docker
- sudo curl -L 'https://github.com/aktau/github-release/releases/download/v0.6.2/linux-amd64-github-release.tar.bz2' | tar xvjf - --strip-components 3 -C $HOME/bin
services:
- docker
dependencies:
pre:
- make promu
- docker info
override:
- promu crossbuild
- ln -s .build/linux-amd64/mysqld_exporter mysqld_exporter
- |
if [ -n "$CIRCLE_TAG" ]; then
make docker DOCKER_IMAGE_NAME=$DOCKER_IMAGE_NAME DOCKER_IMAGE_TAG=$CIRCLE_TAG
make docker DOCKER_IMAGE_NAME=$QUAY_IMAGE_NAME DOCKER_IMAGE_TAG=$CIRCLE_TAG
else
make docker DOCKER_IMAGE_NAME=$DOCKER_IMAGE_NAME
make docker DOCKER_IMAGE_NAME=$QUAY_IMAGE_NAME
fi
post:
- mkdir $CIRCLE_ARTIFACTS/binaries/ && cp -a .build/* $CIRCLE_ARTIFACTS/binaries/
- docker images
test:
override:
- docker run --rm -t -v "$(pwd):/app" "${DOCKER_TEST_IMAGE_NAME}" -i "${REPO_PATH}" -T
deployment:
hub_branch:
branch: master
owner: prometheus
commands:
- docker login -e $DOCKER_EMAIL -u $DOCKER_LOGIN -p $DOCKER_PASSWORD
- docker login -e $QUAY_EMAIL -u $QUAY_LOGIN -p $QUAY_PASSWORD quay.io
- docker push $DOCKER_IMAGE_NAME
- docker push $QUAY_IMAGE_NAME
hub_tag:
tag: /^v[0-9]+(\.[0-9]+){2}(-.+|[^-.]*)$/
owner: prometheus
commands:
- promu crossbuild tarballs
- promu checksum .tarballs
- promu release .tarballs
- mkdir $CIRCLE_ARTIFACTS/releases/ && cp -a .tarballs/* $CIRCLE_ARTIFACTS/releases/
- docker login -e $DOCKER_EMAIL -u $DOCKER_LOGIN -p $DOCKER_PASSWORD
- docker login -e $QUAY_EMAIL -u $QUAY_LOGIN -p $QUAY_PASSWORD quay.io
- |
if [[ "$CIRCLE_TAG" =~ ^v[0-9]+(\.[0-9]+){2}$ ]]; then
docker tag "$DOCKER_IMAGE_NAME:$CIRCLE_TAG" "$DOCKER_IMAGE_NAME:latest"
docker tag "$QUAY_IMAGE_NAME:$CIRCLE_TAG" "$QUAY_IMAGE_NAME:latest"
fi
- docker push $DOCKER_IMAGE_NAME
- docker push $QUAY_IMAGE_NAME
......@@ -38,7 +38,20 @@ var (
)
// ScrapeBinlogSize colects from `SHOW BINARY LOGS`.
func ScrapeBinlogSize(db *sql.DB, ch chan<- prometheus.Metric) error {
type ScrapeBinlogSize struct{}
// Name of the Scraper. Should be unique.
func (ScrapeBinlogSize) Name() string {
return "binlog_size"
}
// Help describes the role of the Scraper.
func (ScrapeBinlogSize) Help() string {
return "Collect the current size of all registered binlog files"
}
// Scrape collects data from database connection and sends it over channel as prometheus metric.
func (ScrapeBinlogSize) Scrape(db *sql.DB, ch chan<- prometheus.Metric) error {
var logBin uint8
err := db.QueryRow(logbinQuery).Scan(&logBin)
if err != nil {
......
......@@ -27,7 +27,7 @@ func TestScrapeBinlogSize(t *testing.T) {
ch := make(chan prometheus.Metric)
go func() {
if err = ScrapeBinlogSize(db, ch); err != nil {
if err = (ScrapeBinlogSize{}).Scrape(db, ch); err != nil {
t.Errorf("error calling function on test: %s", err)
}
close(ch)
......@@ -47,6 +47,6 @@ func TestScrapeBinlogSize(t *testing.T) {
// Ensure all SQL queries were executed
if err := mock.ExpectationsWereMet(); err != nil {
t.Errorf("there were unfulfilled expections: %s", err)
t.Errorf("there were unfulfilled exceptions: %s", err)
}
}
......@@ -19,7 +19,20 @@ const (
)
// ScrapeEngineInnodbStatus scrapes from `SHOW ENGINE INNODB STATUS`.
func ScrapeEngineInnodbStatus(db *sql.DB, ch chan<- prometheus.Metric) error {
type ScrapeEngineInnodbStatus struct{}
// Name of the Scraper. Should be unique.
func (ScrapeEngineInnodbStatus) Name() string {
return "engine_innodb_status"
}
// Help describes the role of the Scraper.
func (ScrapeEngineInnodbStatus) Help() string {
return "Collect from SHOW ENGINE INNODB STATUS"
}
// Scrape collects data from database connection and sends it over channel as prometheus metric.
func (ScrapeEngineInnodbStatus) Scrape(db *sql.DB, ch chan<- prometheus.Metric) error {
rows, err := db.Query(engineInnodbStatusQuery)
if err != nil {
return err
......
......@@ -140,7 +140,7 @@ END OF INNODB MONITOR OUTPUT
ch := make(chan prometheus.Metric)
go func() {
if err = ScrapeEngineInnodbStatus(db, ch); err != nil {
if err = (ScrapeEngineInnodbStatus{}).Scrape(db, ch); err != nil {
t.Errorf("error calling function on test: %s", err)
}
close(ch)
......@@ -160,6 +160,6 @@ END OF INNODB MONITOR OUTPUT
// Ensure all SQL queries were executed
if err := mock.ExpectationsWereMet(); err != nil {
t.Errorf("there were unfulfilled expections: %s", err)
t.Errorf("there were unfulfilled exceptions: %s", err)
}
}
......@@ -16,26 +16,21 @@ const (
engineTokudbStatusQuery = `SHOW ENGINE TOKUDB STATUS`
)
func sanitizeTokudbMetric(metricName string) string {
replacements := map[string]string{
">": "",
",": "",
":": "",
"(": "",
")": "",
" ": "_",
"-": "_",
"+": "and",
"/": "and",
}
for r := range replacements {
metricName = strings.Replace(metricName, r, replacements[r], -1)
}
return metricName
// ScrapeEngineTokudbStatus scrapes from `SHOW ENGINE TOKUDB STATUS`.
type ScrapeEngineTokudbStatus struct{}
// Name of the Scraper. Should be unique.
func (ScrapeEngineTokudbStatus) Name() string {
return "engine_tokudb_status"
}
// ScrapeEngineTokudbStatus scrapes from `SHOW ENGINE TOKUDB STATUS`.
func ScrapeEngineTokudbStatus(db *sql.DB, ch chan<- prometheus.Metric) error {
// Help describes the role of the Scraper.
func (ScrapeEngineTokudbStatus) Help() string {
return "Collect from SHOW ENGINE TOKUDB STATUS"
}
// Scrape collects data from database connection and sends it over channel as prometheus metric.
func (ScrapeEngineTokudbStatus) Scrape(db *sql.DB, ch chan<- prometheus.Metric) error {
tokudbRows, err := db.Query(engineTokudbStatusQuery)
if err != nil {
return err
......@@ -60,3 +55,21 @@ func ScrapeEngineTokudbStatus(db *sql.DB, ch chan<- prometheus.Metric) error {
}
return nil
}
func sanitizeTokudbMetric(metricName string) string {
replacements := map[string]string{
">": "",
",": "",
":": "",
"(": "",
")": "",
" ": "_",
"-": "_",
"+": "and",
"/": "and",
}
for r := range replacements {
metricName = strings.Replace(metricName, r, replacements[r], -1)
}
return metricName
}
......@@ -44,7 +44,7 @@ func TestScrapeEngineTokudbStatus(t *testing.T) {
ch := make(chan prometheus.Metric)
go func() {
if err = ScrapeEngineTokudbStatus(db, ch); err != nil {
if err = (ScrapeEngineTokudbStatus{}).Scrape(db, ch); err != nil {
t.Errorf("error calling function on test: %s", err)
}
close(ch)
......@@ -64,6 +64,6 @@ func TestScrapeEngineTokudbStatus(t *testing.T) {
// Ensure all SQL queries were executed
if err := mock.ExpectationsWereMet(); err != nil {
t.Errorf("there were unfulfilled expections: %s", err)
t.Errorf("there were unfulfilled exceptions: %s", err)
}
}
This diff is collapsed.
......@@ -15,9 +15,12 @@ func TestExporter(t *testing.T) {
t.Skip("-short is passed, skipping test")
}
exporter := New(dsn, Collect{
GlobalStatus: true,
})
exporter := New(
dsn,
NewMetrics(),
[]Scraper{
ScrapeGlobalStatus{},
})
convey.Convey("Metrics describing", t, func() {
ch := make(chan *prometheus.Desc)
......
......@@ -11,15 +11,16 @@ import (
)
const (
// Scrape query
// Scrape query.
globalStatusQuery = `SHOW GLOBAL STATUS`
// Subsytem.
// Subsystem.
globalStatus = "global_status"
)
// Regexp to match various groups of status vars.
var globalStatusRE = regexp.MustCompile(`^(com|handler|connection_errors|innodb_buffer_pool_pages|innodb_rows|performance_schema)_(.*)$`)
// Metric descriptors.
var (
globalCommandsDesc = prometheus.NewDesc(
prometheus.BuildFQName(namespace, globalStatus, "commands_total"),
......@@ -59,7 +60,20 @@ var (
)
// ScrapeGlobalStatus collects from `SHOW GLOBAL STATUS`.
func ScrapeGlobalStatus(db *sql.DB, ch chan<- prometheus.Metric) error {
type ScrapeGlobalStatus struct{}
// Name of the Scraper. Should be unique.
func (ScrapeGlobalStatus) Name() string {
return globalStatus
}
// Help describes the role of the Scraper.
func (ScrapeGlobalStatus) Help() string {
return "Collect from SHOW GLOBAL STATUS"
}
// Scrape collects data from database connection and sends it over channel as prometheus metric.
func (ScrapeGlobalStatus) Scrape(db *sql.DB, ch chan<- prometheus.Metric) error {
globalStatusRows, err := db.Query(globalStatusQuery)
if err != nil {
return err
......
......@@ -38,7 +38,7 @@ func TestScrapeGlobalStatus(t *testing.T) {
ch := make(chan prometheus.Metric)
go func() {
if err = ScrapeGlobalStatus(db, ch); err != nil {
if err = (ScrapeGlobalStatus{}).Scrape(db, ch); err != nil {
t.Errorf("error calling function on test: %s", err)
}
close(ch)
......@@ -68,6 +68,6 @@ func TestScrapeGlobalStatus(t *testing.T) {
// Ensure all SQL queries were executed
if err := mock.ExpectationsWereMet(); err != nil {
t.Errorf("there were unfulfilled expections: %s", err)
t.Errorf("there were unfulfilled exceptions: %s", err)
}
}
......@@ -19,7 +19,20 @@ const (
)
// ScrapeGlobalVariables collects from `SHOW GLOBAL VARIABLES`.
func ScrapeGlobalVariables(db *sql.DB, ch chan<- prometheus.Metric) error {
type ScrapeGlobalVariables struct{}
// Name of the Scraper. Should be unique.
func (ScrapeGlobalVariables) Name() string {
return globalVariables
}
// Help describes the role of the Scraper.
func (ScrapeGlobalVariables) Help() string {
return "Collect from SHOW GLOBAL VARIABLES"
}
// Scrape collects data from database connection and sends it over channel as prometheus metric.
func (ScrapeGlobalVariables) Scrape(db *sql.DB, ch chan<- prometheus.Metric) error {
globalVariablesRows, err := db.Query(globalVariablesQuery)
if err != nil {
return err
......
......@@ -37,7 +37,7 @@ func TestScrapeGlobalVariables(t *testing.T) {
ch := make(chan prometheus.Metric)
go func() {
if err = ScrapeGlobalVariables(db, ch); err != nil {
if err = (ScrapeGlobalVariables{}).Scrape(db, ch); err != nil {
t.Errorf("error calling function on test: %s", err)
}
close(ch)
......@@ -64,7 +64,7 @@ func TestScrapeGlobalVariables(t *testing.T) {
// Ensure all SQL queries were executed
if err := mock.ExpectationsWereMet(); err != nil {
t.Errorf("there were unfulfilled expections: %s", err)
t.Errorf("there were unfulfilled exceptions: %s", err)
}
}
......@@ -76,7 +76,7 @@ func TestParseWsrepProviderOptions(t *testing.T) {
convey.Convey("Parse wsrep_provider_options", t, func() {
convey.So(parseWsrepProviderOptions(testE), convey.ShouldEqual, 0)
convey.So(parseWsrepProviderOptions(testM), convey.ShouldEqual, 128*1024*1024)
convey.So(parseWsrepProviderOptions(testG), convey.ShouldEqual, 2*1024*1024*1024)
convey.So(parseWsrepProviderOptions(testG), convey.ShouldEqual, int64(2*1024*1024*1024))
convey.So(parseWsrepProviderOptions(testB), convey.ShouldEqual, 131072)
})
}
......@@ -8,6 +8,7 @@ import (
"strconv"
"github.com/prometheus/client_golang/prometheus"
"gopkg.in/alecthomas/kingpin.v2"
)
const (
......@@ -20,6 +21,17 @@ const (
heartbeatQuery = "SELECT UNIX_TIMESTAMP(ts), UNIX_TIMESTAMP(NOW(6)), server_id from `%s`.`%s`"
)
var (
collectHeartbeatDatabase = kingpin.Flag(
"collect.heartbeat.database",
"Database from where to collect heartbeat data",
).Default("heartbeat").String()
collectHeartbeatTable = kingpin.Flag(
"collect.heartbeat.table",
"Table from where to collect heartbeat data",
).Default("heartbeat").String()
)
// Metric descriptors.
var (
HeartbeatStoredDesc = prometheus.NewDesc(
......@@ -41,8 +53,21 @@ var (
// ts varchar(26) NOT NULL,
// server_id int unsigned NOT NULL PRIMARY KEY,
// );
func ScrapeHeartbeat(db *sql.DB, ch chan<- prometheus.Metric, collectDatabase, collectTable string) error {
query := fmt.Sprintf(heartbeatQuery, collectDatabase, collectTable)
type ScrapeHeartbeat struct{}
// Name of the Scraper. Should be unique.
func (ScrapeHeartbeat) Name() string {
return "heartbeat"
}
// Help describes the role of the Scraper.
func (ScrapeHeartbeat) Help() string {
return "Collect from heartbeat"
}
// Scrape collects data from database connection and sends it over channel as prometheus metric.
func (ScrapeHeartbeat) Scrape(db *sql.DB, ch chan<- prometheus.Metric) error {
query := fmt.Sprintf(heartbeatQuery, *collectHeartbeatDatabase, *collectHeartbeatTable)
heartbeatRows, err := db.Query(query)
if err != nil {
return err
......
......@@ -7,9 +7,18 @@ import (
dto "github.com/prometheus/client_model/go"
"github.com/smartystreets/goconvey/convey"
"gopkg.in/DATA-DOG/go-sqlmock.v1"
"gopkg.in/alecthomas/kingpin.v2"
)
func TestScrapeHeartbeat(t *testing.T) {
_, err := kingpin.CommandLine.Parse([]string{
"--collect.heartbeat.database", "heartbeat-test",
"--collect.heartbeat.table", "heartbeat-test",
})
if err != nil {
t.Fatal(err)
}
db, mock, err := sqlmock.New()
if err != nil {
t.Fatalf("error opening a stub database connection: %s", err)
......@@ -19,13 +28,11 @@ func TestScrapeHeartbeat(t *testing.T) {
columns := []string{"UNIX_TIMESTAMP(ts)", "UNIX_TIMESTAMP(NOW(6))", "server_id"}
rows := sqlmock.NewRows(columns).
AddRow("1487597613.001320", "1487598113.448042", 1)
mock.ExpectQuery(sanitizeQuery("SELECT UNIX_TIMESTAMP(ts), UNIX_TIMESTAMP(NOW(6)), server_id from `heartbeat`.`heartbeat`")).WillReturnRows(rows)
mock.ExpectQuery(sanitizeQuery("SELECT UNIX_TIMESTAMP(ts), UNIX_TIMESTAMP(NOW(6)), server_id from `heartbeat-test`.`heartbeat-test`")).WillReturnRows(rows)
ch := make(chan prometheus.Metric)
go func() {
database := "heartbeat"
table := "heartbeat"
if err = ScrapeHeartbeat(db, ch, database, table); err != nil {
if err = (ScrapeHeartbeat{}).Scrape(db, ch); err != nil {
t.Errorf("error calling function on test: %s", err)
}
close(ch)
......@@ -44,6 +51,6 @@ func TestScrapeHeartbeat(t *testing.T) {
// Ensure all SQL queries were executed
if err := mock.ExpectationsWereMet(); err != nil {
t.Errorf("there were unfulfilled expections: %s", err)
t.Errorf("there were unfulfilled exceptions: %s", err)
}
}
......@@ -22,6 +22,7 @@ const infoSchemaAutoIncrementQuery = `
WHERE c.extra = 'auto_increment' AND t.auto_increment IS NOT NULL
`
// Metric descriptors.
var (
globalInfoSchemaAutoIncrementDesc = prometheus.NewDesc(
prometheus.BuildFQName(namespace, informationSchema, "auto_increment_column"),
......@@ -36,7 +37,20 @@ var (
)
// ScrapeAutoIncrementColumns collects auto_increment column information.
func ScrapeAutoIncrementColumns(db *sql.DB, ch chan<- prometheus.Metric) error {
type ScrapeAutoIncrementColumns struct{}
// Name of the Scraper. Should be unique.
func (ScrapeAutoIncrementColumns) Name() string {
return "auto_increment.columns"
}
// Help describes the role of the Scraper.
func (ScrapeAutoIncrementColumns) Help() string {
return "Collect auto_increment columns and max values from information_schema"
}
// Scrape collects data from database connection and sends it over channel as prometheus metric.
func (ScrapeAutoIncrementColumns) Scrape(db *sql.DB, ch chan<- prometheus.Metric) error {
autoIncrementRows, err := db.Query(infoSchemaAutoIncrementQuery)
if err != nil {
return err
......
......@@ -128,7 +128,20 @@ var (
)
// ScrapeClientStat collects from `information_schema.client_statistics`.
func ScrapeClientStat(db *sql.DB, ch chan<- prometheus.Metric) error {
type ScrapeClientStat struct{}
// Name of the Scraper. Should be unique.
func (ScrapeClientStat) Name() string {
return "info_schema.clientstats"
}
// Help describes the role of the Scraper.
func (ScrapeClientStat) Help() string {
return "If running with userstat=1, set to true to collect client statistics"
}
// Scrape collects data from database connection and sends it over channel as prometheus metric.
func (ScrapeClientStat) Scrape(db *sql.DB, ch chan<- prometheus.Metric) error {
var varName, varVal string
err := db.QueryRow(userstatCheckQuery).Scan(&varName, &varVal)
if err != nil {
......
......@@ -26,7 +26,7 @@ func TestScrapeClientStat(t *testing.T) {
ch := make(chan prometheus.Metric)
go func() {
if err = ScrapeClientStat(db, ch); err != nil {
if err = (ScrapeClientStat{}).Scrape(db, ch); err != nil {
t.Errorf("error calling function on test: %s", err)
}
close(ch)
......@@ -65,6 +65,6 @@ func TestScrapeClientStat(t *testing.T) {
// Ensure all SQL queries were executed
if err := mock.ExpectationsWereMet(); err != nil {
t.Errorf("there were unfulfilled expections: %s", err)
t.Errorf("there were unfulfilled exceptions: %s", err)
}
}
// Scrape `information_schema.INNODB_CMP`.
package collector
import (
"database/sql"
"github.com/prometheus/client_golang/prometheus"
)
const innodbCmpQuery = `
SELECT
page_size, compress_ops, compress_ops_ok, compress_time, uncompress_ops, uncompress_time
FROM information_schema.innodb_cmp
`
// Metric descriptors.
var (
infoSchemaInnodbCmpCompressOps = prometheus.NewDesc(
prometheus.BuildFQName(namespace, informationSchema, "innodb_cmp_compress_ops_total"),
"Number of times a B-tree page of the size PAGE_SIZE has been compressed.",
[]string{"page_size"}, nil,
)
infoSchemaInnodbCmpCompressOpsOk = prometheus.NewDesc(
prometheus.BuildFQName(namespace, informationSchema, "innodb_cmp_compress_ops_ok_total"),
"Number of times a B-tree page of the size PAGE_SIZE has been successfully compressed.",
[]string{"page_size"}, nil,
)
infoSchemaInnodbCmpCompressTime = prometheus.NewDesc(
prometheus.BuildFQName(namespace, informationSchema, "innodb_cmp_compress_time_seconds_total"),
"Total time in seconds spent in attempts to compress B-tree pages.",
[]string{"page_size"}, nil,
)
infoSchemaInnodbCmpUncompressOps = prometheus.NewDesc(
prometheus.BuildFQName(namespace, informationSchema, "innodb_cmp_uncompress_ops_total"),
"Number of times a B-tree page of the size PAGE_SIZE has been uncompressed.",
[]string{"page_size"}, nil,
)
infoSchemaInnodbCmpUncompressTime = prometheus.NewDesc(
prometheus.BuildFQName(namespace, informationSchema, "innodb_cmp_uncompress_time_seconds_total"),
"Total time in seconds spent in uncompressing B-tree pages.",
[]string{"page_size"}, nil,
)
)
// ScrapeInnodbCmp collects from `information_schema.innodb_cmp`.
type ScrapeInnodbCmp struct{}
// Name of the Scraper. Should be unique.
func (ScrapeInnodbCmp) Name() string {
return informationSchema + ".innodb_cmp"
}
// Help describes the role of the Scraper.
func (ScrapeInnodbCmp) Help() string {
return "Collect metrics from information_schema.innodb_cmp"
}
// Scrape collects data from database connection and sends it over channel as prometheus metric.
func (ScrapeInnodbCmp) Scrape(db *sql.DB, ch chan<- prometheus.Metric) error {
informationSchemaInnodbCmpRows, err := db.Query(innodbCmpQuery)
if err != nil {
return err
}
defer informationSchemaInnodbCmpRows.Close()
var (
page_size string
compress_ops, compress_ops_ok, compress_time, uncompress_ops, uncompress_time float64
)
for informationSchemaInnodbCmpRows.Next() {
if err := informationSchemaInnodbCmpRows.Scan(
&page_size, &compress_ops, &compress_ops_ok, &compress_time, &uncompress_ops, &uncompress_time,
); err != nil {
return err
}
ch <- prometheus.MustNewConstMetric(infoSchemaInnodbCmpCompressOps, prometheus.CounterValue, compress_ops, page_size)
ch <- prometheus.MustNewConstMetric(infoSchemaInnodbCmpCompressOpsOk, prometheus.CounterValue, compress_ops_ok, page_size)
ch <- prometheus.MustNewConstMetric(infoSchemaInnodbCmpCompressTime, prometheus.CounterValue, compress_time, page_size)
ch <- prometheus.MustNewConstMetric(infoSchemaInnodbCmpUncompressOps, prometheus.CounterValue, uncompress_ops, page_size)
ch <- prometheus.MustNewConstMetric(infoSchemaInnodbCmpUncompressTime, prometheus.CounterValue, uncompress_time, page_size)
}
return nil
}
package collector
import (
"testing"
"github.com/prometheus/client_golang/prometheus"
dto "github.com/prometheus/client_model/go"
"github.com/smartystreets/goconvey/convey"
"gopkg.in/DATA-DOG/go-sqlmock.v1"
)
func TestScrapeInnodbCmp(t *testing.T) {
db, mock, err := sqlmock.New()
if err != nil {
t.Fatalf("error opening a stub database connection: %s", err)
}
defer db.Close()
columns := []string{"page_size", "compress_ops", "compress_ops_ok", "compress_time", "uncompress_ops", "uncompress_time"}
rows := sqlmock.NewRows(columns).
AddRow("1024", 10, 20, 30, 40, 50)
mock.ExpectQuery(sanitizeQuery(innodbCmpQuery)).WillReturnRows(rows)
ch := make(chan prometheus.Metric)
go func() {
if err = (ScrapeInnodbCmp{}).Scrape(db, ch); err != nil {
t.Errorf("error calling function on test: %s", err)
}
close(ch)
}()
expected := []MetricResult{
{labels: labelMap{"page_size": "1024"}, value: 10, metricType: dto.MetricType_COUNTER},
{labels: labelMap{"page_size": "1024"}, value: 20, metricType: dto.MetricType_COUNTER},
{labels: labelMap{"page_size": "1024"}, value: 30, metricType: dto.MetricType_COUNTER},
{labels: labelMap{"page_size": "1024"}, value: 40, metricType: dto.MetricType_COUNTER},
{labels: labelMap{"page_size": "1024"}, value: 50, metricType: dto.MetricType_COUNTER},
}
convey.Convey("Metrics comparison", t, func() {
for _, expect := range expected {
got := readMetric(<-ch)
convey.So(expect, convey.ShouldResemble, got)
}
})
// Ensure all SQL queries were executed
if err := mock.ExpectationsWereMet(); err != nil {
t.Errorf("there were unfulfilled exceptions: %s", err)
}
}
// Scrape `information_schema.INNODB_CMPMEM`.
package collector
import (
"database/sql"
"github.com/prometheus/client_golang/prometheus"
)
const innodbCmpMemQuery = `
SELECT
page_size, buffer_pool_instance, pages_used, pages_free, relocation_ops, relocation_time
FROM information_schema.innodb_cmpmem
`
// Metric descriptors.
var (
infoSchemaInnodbCmpMemPagesRead = prometheus.NewDesc(
prometheus.BuildFQName(namespace, informationSchema, "innodb_cmpmem_pages_used_total"),
"Number of blocks of the size PAGE_SIZE that are currently in use.",
[]string{"page_size", "buffer_pool"}, nil,
)
infoSchemaInnodbCmpMemPagesFree = prometheus.NewDesc(
prometheus.BuildFQName(namespace, informationSchema, "innodb_cmpmem_pages_free_total"),
"Number of blocks of the size PAGE_SIZE that are currently available for allocation.",
[]string{"page_size", "buffer_pool"}, nil,
)
infoSchemaInnodbCmpMemRelocationOps = prometheus.NewDesc(
prometheus.BuildFQName(namespace, informationSchema, "innodb_cmpmem_relocation_ops_total"),
"Number of times a block of the size PAGE_SIZE has been relocated.",
[]string{"page_size", "buffer_pool"}, nil,
)
infoSchemaInnodbCmpMemRelocationTime = prometheus.NewDesc(
prometheus.BuildFQName(namespace, informationSchema, "innodb_cmpmem_relocation_time_seconds_total"),
"Total time in seconds spent in relocating blocks.",
[]string{"page_size", "buffer_pool"}, nil,
)
)
// ScrapeInnodbCmp collects from `information_schema.innodb_cmp`.
type ScrapeInnodbCmpMem struct{}
// Name of the Scraper. Should be unique.
func (ScrapeInnodbCmpMem) Name() string {
return informationSchema + ".innodb_cmpmem"
}
// Help describes the role of the Scraper.
func (ScrapeInnodbCmpMem) Help() string {
return "Collect metrics from information_schema.innodb_cmpmem"
}
// Scrape collects data from database connection and sends it over channel as prometheus metric.