...
 
Commits (128)
---
version: 2
jobs:
test:
docker:
- image: circleci/golang:1.10
working_directory: /go/src/github.com/prometheus/mysqld_exporter
steps:
- checkout
- run: make promu
- run: make
- run: rm -v mysqld_exporter
codespell:
docker:
- image: circleci/python
steps:
- checkout
- run: sudo pip install codespell
- run: codespell --skip=".git,./vendor,ttar"
build:
machine: true
working_directory: /home/circleci/.go_workspace/src/github.com/prometheus/mysqld_exporter
steps:
- checkout
- run: make promu
- run: promu crossbuild -v
- persist_to_workspace:
root: .
paths:
- .build
docker_hub_master:
docker:
- image: circleci/golang:1.10
working_directory: /go/src/github.com/prometheus/mysqld_exporter
environment:
DOCKER_IMAGE_NAME: prom/mysqld-exporter
QUAY_IMAGE_NAME: quay.io/prometheus/mysqld-exporter
steps:
- checkout
- setup_remote_docker
- attach_workspace:
at: .
- run: ln -s .build/linux-amd64/mysqld_exporter mysqld_exporter
- run: make docker DOCKER_IMAGE_NAME=$DOCKER_IMAGE_NAME
- run: make docker DOCKER_IMAGE_NAME=$QUAY_IMAGE_NAME
- run: docker images
- run: docker login -u $DOCKER_LOGIN -p $DOCKER_PASSWORD
- run: docker login -u $QUAY_LOGIN -p $QUAY_PASSWORD quay.io
- run: docker push $DOCKER_IMAGE_NAME
- run: docker push $QUAY_IMAGE_NAME
docker_hub_release_tags:
docker:
- image: circleci/golang:1.10
working_directory: /go/src/github.com/prometheus/mysqld_exporter
environment:
DOCKER_IMAGE_NAME: prom/mysqld-exporter
QUAY_IMAGE_NAME: quay.io/prometheus/mysqld-exporter
steps:
- checkout
- setup_remote_docker
- run: mkdir -v -p ${HOME}/bin
- run: curl -L 'https://github.com/aktau/github-release/releases/download/v0.7.2/linux-amd64-github-release.tar.bz2' | tar xvjf - --strip-components 3 -C ${HOME}/bin
- run: echo 'export PATH=${HOME}/bin:${PATH}' >> ${BASH_ENV}
- attach_workspace:
at: .
- run: make promu
- run: promu crossbuild tarballs
- run: promu checksum .tarballs
- run: promu release .tarballs
- store_artifacts:
path: .tarballs
destination: releases
- run: ln -s .build/linux-amd64/mysqld_exporter mysqld_exporter
- run: make docker DOCKER_IMAGE_NAME=$DOCKER_IMAGE_NAME DOCKER_IMAGE_TAG=$CIRCLE_TAG
- run: make docker DOCKER_IMAGE_NAME=$QUAY_IMAGE_NAME DOCKER_IMAGE_TAG=$CIRCLE_TAG
- run: docker login -u $DOCKER_LOGIN -p $DOCKER_PASSWORD
- run: docker login -u $QUAY_LOGIN -p $QUAY_PASSWORD quay.io
- run: |
if [[ "$CIRCLE_TAG" =~ ^v[0-9]+(\.[0-9]+){2}$ ]]; then
docker tag "$DOCKER_IMAGE_NAME:$CIRCLE_TAG" "$DOCKER_IMAGE_NAME:latest"
docker tag "$QUAY_IMAGE_NAME:$CIRCLE_TAG" "$QUAY_IMAGE_NAME:latest"
fi
- run: docker push $DOCKER_IMAGE_NAME
- run: docker push $QUAY_IMAGE_NAME
workflows:
version: 2
mysqld_exporter:
jobs:
- test:
filters:
tags:
only: /.*/
- build:
filters:
tags:
only: /.*/
- codespell:
filters:
tags:
only: /.*/
- docker_hub_master:
requires:
- test
- build
filters:
branches:
only: master
- docker_hub_release_tags:
requires:
- test
- build
filters:
tags:
only: /^v[0-9]+(\.[0-9]+){2}(-.+|[^-.]*)$/
branches:
ignore: /.*/
<!--
Please note: GitHub issues should only be used for feature requests and
bug reports. For general usage/help/discussions, please refer to one of:
- #prometheus on freenode
- the Prometheus Users list: https://groups.google.com/forum/#!forum/prometheus-users
For bug reports, please fill out the below fields and provide as much detail
as possible about your issue. For feature requests, you may omit the
following template.
-->
### Host operating system: output of `uname -a`
### mysqld_exporter version: output of `mysqld_exporter --version`
<!-- If building from source, run `make` first. -->
### MySQL server version
### mysqld_exporter command line flags
<!-- Please list all of the command line flags -->
### What did you do that produced an error?
### What did you expect to see?
### What did you see instead?
go: 1.6.2
repository:
path: github.com/prometheus/mysqld_exporter
build:
......
sudo: false
dist: trusty
sudo: required
language: go
go:
- 1.5.4
- 1.6.2
- 1.9.x
- 1.10.x
env:
- MYSQL_IMAGE=mysql/mysql-server:5.5
- MYSQL_IMAGE=mysql/mysql-server:5.6
- MYSQL_IMAGE=mysql/mysql-server:5.7
- MYSQL_IMAGE=mysql/mysql-server:8.0
services:
- docker
go_import_path: github.com/prometheus/mysqld_exporter
before_script:
- sudo service mysql stop
- docker --version
- docker-compose --version
- docker-compose up -d
script:
- make
- make test
The Prometheus project was started by Matt T. Proud (emeritus) and
Julius Volz in 2012.
Maintainers of this repository:
* Julius Volz <julius.volz@gmail.com>
* Brian Brazil <brian.brazil@boxever.com>
The following individuals have contributed code to this repository
(listed in alphabetical order):
* Eugene Chertikhin <e.chertikhin@crestwavetech.ru>
## v0.10.0 / 2018-06-29
### BREAKING CHANGES:
* Flags now use the Kingpin library, and require double-dashes. #222
This also changes the behavior of boolean flags.
* Enable: `--collector.global_status`
* Disable: `--no-collector.global_status`
### Changes:
* [CHANGE] Limit number and lifetime of connections #208
* [ENHANCEMENT] Move session params to DSN #259
* [ENHANCEMENT] Use native DB.Ping() instead of self-written implementation #210
* [FEATURE] Add collector duration metrics #197
* [FEATURE] Add 'collect[]' URL parameter to filter enabled collectors #235
* [FEATURE] Set a `lock_wait_timeout` on the MySQL connection #252
* [FEATURE] Set `last_scrape_error` when an error occurs #237
* [FEATURE] Collect metrics from `performance_schema.replication_group_member_stats` #271
* [FEATURE] Add innodb compression statistic #275
* [FEATURE] Add metrics for the output of `SHOW SLAVE HOSTS` #279
* [FEATURE] Support custom CA truststore and client SSL keypair. #255
* [BUGFIX] Fix perfEventsStatementsQuery #213
* [BUGFIX] Fix `file_instances` metric collector #205
* [BUGFIX] Fix prefix removal in `perf_schema_file_instances` #257
* [BUGFIX] Fix 32bit compile issue #273
* [BUGFIX] Ignore boolean keys in my.cnf. #283
## v0.10.0 / 2017-04-25
BREAKING CHANGES:
* `mysql_slave_...` metrics now include an additional `connection_name` label to support mariadb multi-source replication. (#178)
* [FEATURE] Add read/write query response time #166
* [FEATURE] Add Galera gcache size metric #169
* [FEATURE] Add MariaDB multi source replication support #178
* [FEATURE] Implement heartbeat metrics #183
* [FEATURE] Add basic file_summary_by_instance metrics #189
* [BUGFIX] Workaround MySQL bug 79533 #173
## 0.9.0 / 2016-09-26
BREAKING CHANGES:
* InnoDB buffer pool page stats have been renamed/fixed to better support aggregations (#130)
* [FEATURE] scrape slave status for multisource replication #134
* [FEATURE] Add client statistics support (+ add tests on users & clients statistics) #138
* [IMPROVEMENT] Consistency of error logging. #144
* [IMPROVEMENT] Add label aggregation for innodb buffer metrics #130
* [IMPROVEMENT] Improved and fixed user/client statistics #149
* [FEATURE] Added the last binlog file number metric. #152
* [MISC] Add an example recording rules file #156
* [FEATURE] Added PXC/Galera info metrics. #155
* [FEATURE] Added metrics from SHOW ENGINE INNODB STATUS. #160
* [IMPROVEMENT] Fix wsrep_cluster_status #146
## 0.8.1 / 2016-05-05
* [BUGFIX] Fix collect.info_schema.innodb_tablespaces #119
......
......@@ -2,9 +2,9 @@
Prometheus uses GitHub to manage reviews of pull requests.
* If you have a trivial fix or improvement, go ahead and create a pull
request, addressing (with `@...`) one or more of the maintainers
(see [AUTHORS.md](AUTHORS.md)) in the description of the pull request.
* If you have a trivial fix or improvement, go ahead and create a pull request,
addressing (with `@...`) the maintainer of this repository (see
[MAINTAINERS.md](MAINTAINERS.md)) in the description of the pull request.
* If you plan to do something more involved, first discuss your ideas
on our [mailing list](https://groups.google.com/forum/?fromgroups#!forum/prometheus-developers).
......@@ -16,3 +16,14 @@ Prometheus uses GitHub to manage reviews of pull requests.
and the _Formatting and style_ section of Peter Bourgon's [Go: Best
Practices for Production
Environments](http://peter.bourgon.org/go-in-production/#formatting-and-style).
## Local setup
The easiest way to make a local development setup is to use Docker Compose.
```
docker-compose up
make
make test
```
* Ben Kochie <superq@gmail.com>
......@@ -11,50 +11,17 @@
# See the License for the specific language governing permissions and
# limitations under the License.
GO := GO15VENDOREXPERIMENT=1 go
PROMU := $(GOPATH)/bin/promu
pkgs = $(shell $(GO) list ./... | grep -v /vendor/)
all: vet
PREFIX ?= $(shell pwd)
BIN_DIR ?= $(shell pwd)
DOCKER_IMAGE_NAME ?= mysqld-exporter
DOCKER_IMAGE_TAG ?= $(subst /,-,$(shell git rev-parse --abbrev-ref HEAD))
include Makefile.common
STATICCHECK_IGNORE = \
github.com/prometheus/mysqld_exporter/mysqld_exporter.go:SA1019
all: format build test
DOCKER_IMAGE_NAME ?= mysqld-exporter
style:
@echo ">> checking code style"
@! gofmt -d $(shell find . -path ./vendor -prune -o -name '*.go' -print) | grep '^'
test-docker:
@echo ">> testing docker image"
./test_image.sh "$(DOCKER_IMAGE_NAME):$(DOCKER_IMAGE_TAG)" 9104
test:
@echo ">> running tests"
@$(GO) test -short -race $(pkgs)
format:
@echo ">> formatting code"
@$(GO) fmt $(pkgs)
vet:
@echo ">> vetting code"
@$(GO) vet $(pkgs)
build: promu
@echo ">> building binaries"
@$(PROMU) build --prefix $(PREFIX)
tarball: promu
@echo ">> building release tarball"
@$(PROMU) tarball --prefix $(PREFIX) $(BIN_DIR)
docker:
@echo ">> building docker image"
@docker build -t "$(DOCKER_IMAGE_NAME):$(DOCKER_IMAGE_TAG)" .
promu:
@GOOS=$(shell uname -s | tr A-Z a-z) \
GOARCH=$(subst x86_64,amd64,$(patsubst i%86,386,$(shell uname -m))) \
$(GO) get -u github.com/prometheus/promu
.PHONY: all style format build test vet tarball docker promu
.PHONY: test-docker
# Copyright 2018 The Prometheus Authors
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# A common Makefile that includes rules to be reused in different prometheus projects.
# !!! Open PRs only against the prometheus/prometheus/Makefile.common repository!
# Example usage :
# Create the main Makefile in the root project directory.
# include Makefile.common
# customTarget:
# @echo ">> Running customTarget"
#
# Ensure GOBIN is not set during build so that promu is installed to the correct path
unexport GOBIN
GO ?= go
GOFMT ?= $(GO)fmt
FIRST_GOPATH := $(firstword $(subst :, ,$(shell $(GO) env GOPATH)))
PROMU := $(FIRST_GOPATH)/bin/promu
STATICCHECK := $(FIRST_GOPATH)/bin/staticcheck
GOVENDOR := $(FIRST_GOPATH)/bin/govendor
pkgs = ./...
PREFIX ?= $(shell pwd)
BIN_DIR ?= $(shell pwd)
DOCKER_IMAGE_TAG ?= $(subst /,-,$(shell git rev-parse --abbrev-ref HEAD))
all: style staticcheck unused build test
style:
@echo ">> checking code style"
! $(GOFMT) -d $$(find . -path ./vendor -prune -o -name '*.go' -print) | grep '^'
check_license:
@echo ">> checking license header"
@licRes=$$(for file in $$(find . -type f -iname '*.go' ! -path './vendor/*') ; do \
awk 'NR<=3' $$file | grep -Eq "(Copyright|generated|GENERATED)" || echo $$file; \
done); \
if [ -n "$${licRes}" ]; then \
echo "license header checking failed:"; echo "$${licRes}"; \
exit 1; \
fi
test-short:
@echo ">> running short tests"
$(GO) test -short $(pkgs)
test:
@echo ">> running all tests"
$(GO) test -race $(pkgs)
format:
@echo ">> formatting code"
$(GO) fmt $(pkgs)
vet:
@echo ">> vetting code"
$(GO) vet $(pkgs)
staticcheck: $(STATICCHECK)
@echo ">> running staticcheck"
$(STATICCHECK) -ignore "$(STATICCHECK_IGNORE)" $(pkgs)
unused: $(GOVENDOR)
@echo ">> running check for unused packages"
@$(GOVENDOR) list +unused | grep . && exit 1 || echo 'No unused packages'
build: promu
@echo ">> building binaries"
$(PROMU) build --prefix $(PREFIX)
tarball: promu
@echo ">> building release tarball"
$(PROMU) tarball --prefix $(PREFIX) $(BIN_DIR)
docker:
docker build -t "$(DOCKER_IMAGE_NAME):$(DOCKER_IMAGE_TAG)" .
promu:
GOOS= GOARCH= $(GO) get -u github.com/prometheus/promu
$(FIRST_GOPATH)/bin/staticcheck:
GOOS= GOARCH= $(GO) get -u honnef.co/go/tools/cmd/staticcheck
$(FIRST_GOPATH)/bin/govendor:
GOOS= GOARCH= $(GO) get -u github.com/kardianos/govendor
.PHONY: all style check_license format build test vet assets tarball docker promu staticcheck $(FIRST_GOPATH)/bin/staticcheck govendor $(FIRST_GOPATH)/bin/govendor
\ No newline at end of file
This diff is collapsed.
machine:
environment:
DOCKER_IMAGE_NAME: prom/mysqld-exporter
QUAY_IMAGE_NAME: quay.io/prometheus/mysqld-exporter
DOCKER_TEST_IMAGE_NAME: quay.io/prometheus/golang-builder:1.6.2-main
REPO_PATH: github.com/prometheus/mysqld_exporter
pre:
- sudo curl -L -o /usr/bin/docker 'https://s3-external-1.amazonaws.com/circle-downloads/docker-1.9.1-circleci'
- sudo chmod 0755 /usr/bin/docker
- sudo curl -L 'https://github.com/aktau/github-release/releases/download/v0.6.2/linux-amd64-github-release.tar.bz2' | tar xvjf - --strip-components 3 -C $HOME/bin
services:
- docker
dependencies:
pre:
- make promu
- docker info
override:
- promu crossbuild
- ln -s .build/linux-amd64/mysqld_exporter mysqld_exporter
- |
if [ -n "$CIRCLE_TAG" ]; then
make docker DOCKER_IMAGE_NAME=$DOCKER_IMAGE_NAME DOCKER_IMAGE_TAG=$CIRCLE_TAG
make docker DOCKER_IMAGE_NAME=$QUAY_IMAGE_NAME DOCKER_IMAGE_TAG=$CIRCLE_TAG
else
make docker DOCKER_IMAGE_NAME=$DOCKER_IMAGE_NAME
make docker DOCKER_IMAGE_NAME=$QUAY_IMAGE_NAME
fi
post:
- mkdir $CIRCLE_ARTIFACTS/binaries/ && cp -a .build/* $CIRCLE_ARTIFACTS/binaries/
- docker images
test:
override:
- docker run --rm -t -v "$(pwd):/app" "${DOCKER_TEST_IMAGE_NAME}" -i "${REPO_PATH}" -T
deployment:
hub_branch:
branch: master
owner: prometheus
commands:
- docker login -e $DOCKER_EMAIL -u $DOCKER_LOGIN -p $DOCKER_PASSWORD
- docker login -e $QUAY_EMAIL -u $QUAY_LOGIN -p $QUAY_PASSWORD quay.io
- docker push $DOCKER_IMAGE_NAME
- docker push $QUAY_IMAGE_NAME
hub_tag:
tag: /^[0-9]+(\.[0-9]+){2}(-.+|[^-.]*)$/
owner: prometheus
commands:
- promu crossbuild tarballs
- promu release .tarballs
- mkdir $CIRCLE_ARTIFACTS/releases/ && cp -a .tarballs/* $CIRCLE_ARTIFACTS/releases/
- docker login -e $DOCKER_EMAIL -u $DOCKER_LOGIN -p $DOCKER_PASSWORD
- docker login -e $QUAY_EMAIL -u $QUAY_LOGIN -p $QUAY_PASSWORD quay.io
- |
if [[ "$CIRCLE_TAG" =~ ^[0-9]+(\.[0-9]+){2}$ ]]; then
docker tag "$DOCKER_IMAGE_NAME:$CIRCLE_TAG" "$DOCKER_IMAGE_NAME:latest"
docker tag "$QUAY_IMAGE_NAME:$CIRCLE_TAG" "$QUAY_IMAGE_NAME:latest"
fi
- docker push $DOCKER_IMAGE_NAME
- docker push $QUAY_IMAGE_NAME
......@@ -4,6 +4,8 @@ package collector
import (
"database/sql"
"strconv"
"strings"
"github.com/prometheus/client_golang/prometheus"
)
......@@ -28,10 +30,28 @@ var (
"Number of registered binlog files.",
[]string{}, nil,
)
binlogFileNumberDesc = prometheus.NewDesc(
prometheus.BuildFQName(namespace, binlog, "file_number"),
"The last binlog file number.",
[]string{}, nil,
)
)
// ScrapeBinlogSize colects from `SHOW BINARY LOGS`.
func ScrapeBinlogSize(db *sql.DB, ch chan<- prometheus.Metric) error {
type ScrapeBinlogSize struct{}
// Name of the Scraper. Should be unique.
func (ScrapeBinlogSize) Name() string {
return "binlog_size"
}
// Help describes the role of the Scraper.
func (ScrapeBinlogSize) Help() string {
return "Collect the current size of all registered binlog files"
}
// Scrape collects data from database connection and sends it over channel as prometheus metric.
func (ScrapeBinlogSize) Scrape(db *sql.DB, ch chan<- prometheus.Metric) error {
var logBin uint8
err := db.QueryRow(logbinQuery).Scan(&logBin)
if err != nil {
......@@ -71,6 +91,11 @@ func ScrapeBinlogSize(db *sql.DB, ch chan<- prometheus.Metric) error {
ch <- prometheus.MustNewConstMetric(
binlogFilesDesc, prometheus.GaugeValue, float64(count),
)
// The last row contains the last binlog file number.
value, _ := strconv.ParseFloat(strings.Split(filename, ".")[1], 64)
ch <- prometheus.MustNewConstMetric(
binlogFileNumberDesc, prometheus.GaugeValue, value,
)
return nil
}
package collector
import (
"testing"
"github.com/prometheus/client_golang/prometheus"
dto "github.com/prometheus/client_model/go"
"github.com/smartystreets/goconvey/convey"
"gopkg.in/DATA-DOG/go-sqlmock.v1"
)
func TestScrapeBinlogSize(t *testing.T) {
db, mock, err := sqlmock.New()
if err != nil {
t.Fatalf("error opening a stub database connection: %s", err)
}
defer db.Close()
mock.ExpectQuery(logbinQuery).WillReturnRows(sqlmock.NewRows([]string{""}).AddRow(1))
columns := []string{"Log_name", "File_size"}
rows := sqlmock.NewRows(columns).
AddRow("centos6-bin.000001", "1813").
AddRow("centos6-bin.000002", "120").
AddRow("centos6-bin.000444", "573009")
mock.ExpectQuery(sanitizeQuery(binlogQuery)).WillReturnRows(rows)
ch := make(chan prometheus.Metric)
go func() {
if err = (ScrapeBinlogSize{}).Scrape(db, ch); err != nil {
t.Errorf("error calling function on test: %s", err)
}
close(ch)
}()
counterExpected := []MetricResult{
{labels: labelMap{}, value: 574942, metricType: dto.MetricType_GAUGE},
{labels: labelMap{}, value: 3, metricType: dto.MetricType_GAUGE},
{labels: labelMap{}, value: 444, metricType: dto.MetricType_GAUGE},
}
convey.Convey("Metrics comparison", t, func() {
for _, expect := range counterExpected {
got := readMetric(<-ch)
convey.So(got, convey.ShouldResemble, expect)
}
})
// Ensure all SQL queries were executed
if err := mock.ExpectationsWereMet(); err != nil {
t.Errorf("there were unfulfilled exceptions: %s", err)
}
}
......@@ -14,6 +14,9 @@ const (
namespace = "mysql"
// Math constant for picoseconds to seconds.
picoSeconds = 1e12
// Query to check whether user/table/client stats are enabled.
userstatCheckQuery = `SHOW VARIABLES WHERE Variable_Name='userstat'
OR Variable_Name='userstat_running'`
)
var logRE = regexp.MustCompile(`.+\.(\d+)$`)
......@@ -36,6 +39,13 @@ func parseStatus(data sql.RawBytes) (float64, bool) {
if bytes.Compare(data, []byte("Connecting")) == 0 {
return 0, true
}
// SHOW GLOBAL STATUS like 'wsrep_cluster_status' can return "Primary" or "Non-Primary"/"Disconnected"
if bytes.Compare(data, []byte("Primary")) == 0 {
return 1, true
}
if bytes.Compare(data, []byte("Non-Primary")) == 0 || bytes.Compare(data, []byte("Disconnected")) == 0 {
return 0, true
}
if logNum := logRE.Find(data); logNum != nil {
value, err := strconv.ParseFloat(string(logNum), 64)
return value, err == nil
......
......@@ -38,5 +38,6 @@ func sanitizeQuery(q string) string {
q = strings.Join(strings.Fields(q), " ")
q = strings.Replace(q, "(", "\\(", -1)
q = strings.Replace(q, ")", "\\)", -1)
q = strings.Replace(q, "*", "\\*", -1)
return q
}
// Scrape `SHOW ENGINE INNODB STATUS`.
package collector
import (
"database/sql"
"regexp"
"strconv"
"strings"
"github.com/prometheus/client_golang/prometheus"
)
const (
// Subsystem.
innodb = "engine_innodb"
// Query.
engineInnodbStatusQuery = `SHOW ENGINE INNODB STATUS`
)
// ScrapeEngineInnodbStatus scrapes from `SHOW ENGINE INNODB STATUS`.
type ScrapeEngineInnodbStatus struct{}
// Name of the Scraper. Should be unique.
func (ScrapeEngineInnodbStatus) Name() string {
return "engine_innodb_status"
}
// Help describes the role of the Scraper.
func (ScrapeEngineInnodbStatus) Help() string {
return "Collect from SHOW ENGINE INNODB STATUS"
}
// Scrape collects data from database connection and sends it over channel as prometheus metric.
func (ScrapeEngineInnodbStatus) Scrape(db *sql.DB, ch chan<- prometheus.Metric) error {
rows, err := db.Query(engineInnodbStatusQuery)
if err != nil {
return err
}
defer rows.Close()
var typeCol, nameCol, statusCol string
// First row should contain the necessary info. If many rows returned then it's unknown case.
if rows.Next() {
if err := rows.Scan(&typeCol, &nameCol, &statusCol); err != nil {
return err
}
}
// 0 queries inside InnoDB, 0 queries in queue
// 0 read views open inside InnoDB
rQueries, _ := regexp.Compile(`(\d+) queries inside InnoDB, (\d+) queries in queue`)
rViews, _ := regexp.Compile(`(\d+) read views open inside InnoDB`)
for _, line := range strings.Split(statusCol, "\n") {
if data := rQueries.FindStringSubmatch(line); data != nil {
value, _ := strconv.ParseFloat(data[1], 64)
ch <- prometheus.MustNewConstMetric(
newDesc(innodb, "queries_inside_innodb", "Queries inside InnoDB."),
prometheus.GaugeValue,
value,
)
value, _ = strconv.ParseFloat(data[2], 64)
ch <- prometheus.MustNewConstMetric(
newDesc(innodb, "queries_in_queue", "Queries in queue."),
prometheus.GaugeValue,
value,
)
} else if data := rViews.FindStringSubmatch(line); data != nil {
value, _ := strconv.ParseFloat(data[1], 64)
ch <- prometheus.MustNewConstMetric(
newDesc(innodb, "read_views_open_inside_innodb", "Read views open inside InnoDB."),
prometheus.GaugeValue,
value,
)
}
}
return nil
}
package collector
import (
"testing"
"github.com/prometheus/client_golang/prometheus"
dto "github.com/prometheus/client_model/go"
"github.com/smartystreets/goconvey/convey"
"gopkg.in/DATA-DOG/go-sqlmock.v1"
)
func TestScrapeEngineInnodbStatus(t *testing.T) {
db, mock, err := sqlmock.New()
if err != nil {
t.Fatalf("error opening a stub database connection: %s", err)
}
defer db.Close()
sample := `
=====================================
2016-09-14 19:04:38 0x7fed21462700 INNODB MONITOR OUTPUT
=====================================
Per second averages calculated from the last 30 seconds
-----------------
BACKGROUND THREAD
-----------------
srv_master_thread loops: 1 srv_active, 0 srv_shutdown, 49166 srv_idle
srv_master_thread log flush and writes: 49165
----------
SEMAPHORES
----------
OS WAIT ARRAY INFO: reservation count 15
OS WAIT ARRAY INFO: signal count 12
RW-shared spins 0, rounds 4, OS waits 2
RW-excl spins 0, rounds 0, OS waits 0
RW-sx spins 0, rounds 0, OS waits 0
Spin rounds per wait: 4.00 RW-shared, 0.00 RW-excl, 0.00 RW-sx
------------
TRANSACTIONS
------------
Trx id counter 67843
Purge done for trx's n:o < 55764 undo n:o < 0 state: running but idle
History list length 779
LIST OF TRANSACTIONS FOR EACH SESSION:
---TRANSACTION 422131596298608, not started
0 lock struct(s), heap size 1136, 0 row lock(s)
--------
FILE I/O
--------
I/O thread 0 state: waiting for completed aio requests (insert buffer thread)
I/O thread 1 state: waiting for completed aio requests (log thread)
I/O thread 2 state: waiting for completed aio requests (read thread)
I/O thread 3 state: waiting for completed aio requests (read thread)
I/O thread 4 state: waiting for completed aio requests (read thread)
I/O thread 5 state: waiting for completed aio requests (read thread)
I/O thread 6 state: waiting for completed aio requests (write thread)
I/O thread 7 state: waiting for completed aio requests (write thread)
I/O thread 8 state: waiting for completed aio requests (write thread)
I/O thread 9 state: waiting for completed aio requests (write thread)
Pending normal aio reads: [0, 0, 0, 0] , aio writes: [0, 0, 0, 0] ,
ibuf aio reads:, log i/o's:, sync i/o's:
Pending flushes (fsync) log: 0; buffer pool: 0
512 OS file reads, 57 OS file writes, 8 OS fsyncs
0.00 reads/s, 0 avg bytes/read, 0.00 writes/s, 0.00 fsyncs/s
-------------------------------------
INSERT BUFFER AND ADAPTIVE HASH INDEX
-------------------------------------
Ibuf: size 1, free list len 0, seg size 2, 0 merges
merged operations:
insert 0, delete mark 0, delete 0
discarded operations:
insert 0, delete mark 0, delete 0
Hash table size 34673, node heap has 0 buffer(s)
Hash table size 34673, node heap has 0 buffer(s)
Hash table size 34673, node heap has 0 buffer(s)
Hash table size 34673, node heap has 0 buffer(s)
Hash table size 34673, node heap has 0 buffer(s)
Hash table size 34673, node heap has 0 buffer(s)
Hash table size 34673, node heap has 0 buffer(s)
Hash table size 34673, node heap has 0 buffer(s)
0.00 hash searches/s, 0.00 non-hash searches/s
---
LOG
---
Log sequence number 37771171
Log flushed up to 37771171
Pages flushed up to 37771171
Last checkpoint at 37771162
Max checkpoint age 80826164
Checkpoint age target 78300347
Modified age 0
Checkpoint age 9
0 pending log flushes, 0 pending chkp writes
10 log i/o's done, 0.00 log i/o's/second
----------------------
BUFFER POOL AND MEMORY
----------------------
Total large memory allocated 139722752
Dictionary memory allocated 367821
Internal hash tables (constant factor + variable factor)
Adaptive hash index 2252736 (2219072 + 33664)
Page hash 139112 (buffer pool 0 only)
Dictionary cache 922589 (554768 + 367821)
File system 839328 (812272 + 27056)
Lock system 334008 (332872 + 1136)
Recovery system 0 (0 + 0)
Buffer pool size 8191
Buffer pool size, bytes 0
Free buffers 7684
Database pages 507
Old database pages 0
Modified db pages 0
Pending reads 0
Pending writes: LRU 0, flush list 0, single page 0
Pages made young 0, not young 0
0.00 youngs/s, 0.00 non-youngs/s
Pages read 473, created 34, written 36
0.00 reads/s, 0.00 creates/s, 0.00 writes/s
No buffer pool page gets since the last printout
Pages read ahead 0.00/s, evicted without access 0.00/s, Random read ahead 0.00/s
LRU len: 507, unzip_LRU len: 0
I/O sum[0]:cur[0], unzip sum[0]:cur[0]
--------------
ROW OPERATIONS
--------------
661 queries inside InnoDB, 10 queries in queue
15 read views open inside InnoDB
0 RW transactions active inside InnoDB
Process ID=1, Main thread ID=140656308950784, state: sleeping
Number of rows inserted 0, updated 0, deleted 0, read 12
0.00 inserts/s, 0.00 updates/s, 0.00 deletes/s, 0.00 reads/s
----------------------------
END OF INNODB MONITOR OUTPUT
============================
`
columns := []string{"Type", "Name", "Status"}
rows := sqlmock.NewRows(columns).AddRow("InnoDB", "", sample)
mock.ExpectQuery(sanitizeQuery(engineInnodbStatusQuery)).WillReturnRows(rows)
ch := make(chan prometheus.Metric)
go func() {
if err = (ScrapeEngineInnodbStatus{}).Scrape(db, ch); err != nil {
t.Errorf("error calling function on test: %s", err)
}
close(ch)
}()
metricsExpected := []MetricResult{
{labels: labelMap{}, value: 661, metricType: dto.MetricType_GAUGE},
{labels: labelMap{}, value: 10, metricType: dto.MetricType_GAUGE},
{labels: labelMap{}, value: 15, metricType: dto.MetricType_GAUGE},
}
convey.Convey("Metrics comparison", t, func() {
for _, expect := range metricsExpected {
got := readMetric(<-ch)
convey.So(got, convey.ShouldResemble, expect)
}
})
// Ensure all SQL queries were executed
if err := mock.ExpectationsWereMet(); err != nil {
t.Errorf("there were unfulfilled exceptions: %s", err)
}
}
......@@ -16,26 +16,21 @@ const (
engineTokudbStatusQuery = `SHOW ENGINE TOKUDB STATUS`
)
func sanitizeTokudbMetric(metricName string) string {
replacements := map[string]string{
">": "",
",": "",
":": "",
"(": "",
")": "",
" ": "_",
"-": "_",
"+": "and",
"/": "and",
}
for r := range replacements {
metricName = strings.Replace(metricName, r, replacements[r], -1)
}
return metricName
// ScrapeEngineTokudbStatus scrapes from `SHOW ENGINE TOKUDB STATUS`.
type ScrapeEngineTokudbStatus struct{}
// Name of the Scraper. Should be unique.
func (ScrapeEngineTokudbStatus) Name() string {
return "engine_tokudb_status"
}
// ScrapeEngineTokudbStatus scrapes from `SHOW ENGINE TOKUDB STATUS`.
func ScrapeEngineTokudbStatus(db *sql.DB, ch chan<- prometheus.Metric) error {
// Help describes the role of the Scraper.
func (ScrapeEngineTokudbStatus) Help() string {
return "Collect from SHOW ENGINE TOKUDB STATUS"
}
// Scrape collects data from database connection and sends it over channel as prometheus metric.
func (ScrapeEngineTokudbStatus) Scrape(db *sql.DB, ch chan<- prometheus.Metric) error {
tokudbRows, err := db.Query(engineTokudbStatusQuery)
if err != nil {
return err
......@@ -60,3 +55,21 @@ func ScrapeEngineTokudbStatus(db *sql.DB, ch chan<- prometheus.Metric) error {
}
return nil
}
func sanitizeTokudbMetric(metricName string) string {
replacements := map[string]string{
">": "",
",": "",
":": "",
"(": "",
")": "",
" ": "_",
"-": "_",
"+": "and",
"/": "and",
}
for r := range replacements {
metricName = strings.Replace(metricName, r, replacements[r], -1)
}
return metricName
}
......@@ -44,7 +44,7 @@ func TestScrapeEngineTokudbStatus(t *testing.T) {
ch := make(chan prometheus.Metric)
go func() {
if err = ScrapeEngineTokudbStatus(db, ch); err != nil {
if err = (ScrapeEngineTokudbStatus{}).Scrape(db, ch); err != nil {
t.Errorf("error calling function on test: %s", err)
}
close(ch)
......@@ -64,6 +64,6 @@ func TestScrapeEngineTokudbStatus(t *testing.T) {
// Ensure all SQL queries were executed
if err := mock.ExpectationsWereMet(); err != nil {
t.Errorf("there were unfulfilled expections: %s", err)
t.Errorf("there were unfulfilled exceptions: %s", err)
}
}
package collector
import (
"database/sql"
"fmt"
"strings"
"sync"
"time"
_ "github.com/go-sql-driver/mysql"
"github.com/prometheus/client_golang/prometheus"
"github.com/prometheus/common/log"
"gopkg.in/alecthomas/kingpin.v2"
)
// Metric name parts.
const (
// Subsystem(s).
exporter = "exporter"
)
// SQL Queries.
const (
// System variable params formatting.
// See: https://github.com/go-sql-driver/mysql#system-variables
sessionSettingsParam = `log_slow_filter=%27tmp_table_on_disk,filesort_on_disk%27`
timeoutParam = `lock_wait_timeout=%d`
)
// Tunable flags.
var (
exporterLockTimeout = kingpin.Flag(
"exporter.lock_wait_timeout",
"Set a lock_wait_timeout on the connection to avoid long metadata locking.",
).Default("2").Int()
slowLogFilter = kingpin.Flag(
"exporter.log_slow_filter",
"Add a log_slow_filter to avoid slow query logging of scrapes. NOTE: Not supported by Oracle MySQL.",
).Default("false").Bool()
)
// Metric descriptors.
var (
scrapeDurationDesc = prometheus.NewDesc(
prometheus.BuildFQName(namespace, exporter, "collector_duration_seconds"),
"Collector time duration.",
[]string{"collector"}, nil,
)
)
// Exporter collects MySQL metrics. It implements prometheus.Collector.
type Exporter struct {
dsn string
scrapers []Scraper
metrics Metrics
}
// New returns a new MySQL exporter for the provided DSN.
func New(dsn string, metrics Metrics, scrapers []Scraper) *Exporter {
// Setup extra params for the DSN, default to having a lock timeout.
dsnParams := []string{fmt.Sprintf(timeoutParam, *exporterLockTimeout)}
if *slowLogFilter {
dsnParams = append(dsnParams, sessionSettingsParam)
}
if strings.Contains(dsn, "?") {
dsn = dsn + "&"
} else {
dsn = dsn + "?"
}
dsn += strings.Join(dsnParams, "&")
return &Exporter{
dsn: dsn,
scrapers: scrapers,
metrics: metrics,
}
}
// Describe implements prometheus.Collector.
func (e *Exporter) Describe(ch chan<- *prometheus.Desc) {
ch <- e.metrics.TotalScrapes.Desc()
ch <- e.metrics.Error.Desc()
e.metrics.ScrapeErrors.Describe(ch)
ch <- e.metrics.MySQLUp.Desc()
}
// Collect implements prometheus.Collector.
func (e *Exporter) Collect(ch chan<- prometheus.Metric) {
e.scrape(ch)
ch <- e.metrics.TotalScrapes
ch <- e.metrics.Error
e.metrics.ScrapeErrors.Collect(ch)
ch <- e.metrics.MySQLUp
}
func (e *Exporter) scrape(ch chan<- prometheus.Metric) {
e.metrics.TotalScrapes.Inc()
var err error
scrapeTime := time.Now()
db, err := sql.Open("mysql", e.dsn)
if err != nil {
log.Errorln("Error opening connection to database:", err)
e.metrics.Error.Set(1)
return
}
defer db.Close()
// By design exporter should use maximum one connection per request.
db.SetMaxOpenConns(1)
db.SetMaxIdleConns(1)
// Set max lifetime for a connection.
db.SetConnMaxLifetime(1 * time.Minute)
if err := db.Ping(); err != nil {
log.Errorln("Error pinging mysqld:", err)
e.metrics.MySQLUp.Set(0)
e.metrics.Error.Set(1)
return
}
e.metrics.MySQLUp.Set(1)
ch <- prometheus.MustNewConstMetric(scrapeDurationDesc, prometheus.GaugeValue, time.Since(scrapeTime).Seconds(), "connection")
wg := &sync.WaitGroup{}
defer wg.Wait()
for _, scraper := range e.scrapers {
wg.Add(1)
go func(scraper Scraper) {
defer wg.Done()
label := "collect." + scraper.Name()
scrapeTime := time.Now()
if err := scraper.Scrape(db, ch); err != nil {
log.Errorln("Error scraping for "+label+":", err)
e.metrics.ScrapeErrors.WithLabelValues(label).Inc()
e.metrics.Error.Set(1)
}
ch <- prometheus.MustNewConstMetric(scrapeDurationDesc, prometheus.GaugeValue, time.Since(scrapeTime).Seconds(), label)
}(scraper)
}
}
// Metrics represents exporter metrics which values can be carried between http requests.
type Metrics struct {
TotalScrapes prometheus.Counter
ScrapeErrors *prometheus.CounterVec
Error prometheus.Gauge
MySQLUp prometheus.Gauge
}
// NewMetrics creates new Metrics instance.
func NewMetrics() Metrics {
subsystem := exporter
return Metrics{
TotalScrapes: prometheus.NewCounter(prometheus.CounterOpts{
Namespace: namespace,
Subsystem: subsystem,
Name: "scrapes_total",
Help: "Total number of times MySQL was scraped for metrics.",
}),
ScrapeErrors: prometheus.NewCounterVec(prometheus.CounterOpts{
Namespace: namespace,
Subsystem: subsystem,
Name: "scrape_errors_total",
Help: "Total number of times an error occurred scraping a MySQL.",
}, []string{"collector"}),
Error: prometheus.NewGauge(prometheus.GaugeOpts{
Namespace: namespace,
Subsystem: subsystem,
Name: "last_scrape_error",
Help: "Whether the last scrape of metrics from MySQL resulted in an error (1 for error, 0 for success).",
}),
MySQLUp: prometheus.NewGauge(prometheus.GaugeOpts{
Namespace: namespace,
Name: "up",
Help: "Whether the MySQL server is up.",
}),
}
}
package collector
import (
"testing"
"github.com/prometheus/client_golang/prometheus"
"github.com/prometheus/common/model"
"github.com/smartystreets/goconvey/convey"
)
const dsn = "root@/mysql"
func TestExporter(t *testing.T) {
if testing.Short() {
t.Skip("-short is passed, skipping test")
}
exporter := New(
dsn,
NewMetrics(),
[]Scraper{
ScrapeGlobalStatus{},
})
convey.Convey("Metrics describing", t, func() {
ch := make(chan *prometheus.Desc)
go func() {
exporter.Describe(ch)
close(ch)
}()
for range ch {
}
})
convey.Convey("Metrics collection", t, func() {
ch := make(chan prometheus.Metric)
go func() {
exporter.Collect(ch)
close(ch)
}()
for m := range ch {
got := readMetric(m)
if got.labels[model.MetricNameLabel] == "mysql_up" {
convey.So(got.value, convey.ShouldEqual, 1)
}
}
})
}
......@@ -11,15 +11,16 @@ import (
)
const (
// Scrape query
// Scrape query.
globalStatusQuery = `SHOW GLOBAL STATUS`
// Subsytem.
// Subsystem.
globalStatus = "global_status"
)
// Regexp to match various groups of status vars.
var globalStatusRE = regexp.MustCompile(`^(com|handler|connection_errors|innodb_buffer_pool_pages|innodb_rows|performance_schema)_(.*)$`)
// Metric descriptors.
var (
globalCommandsDesc = prometheus.NewDesc(
prometheus.BuildFQName(namespace, globalStatus, "commands_total"),
......@@ -59,7 +60,20 @@ var (
)
// ScrapeGlobalStatus collects from `SHOW GLOBAL STATUS`.
func ScrapeGlobalStatus(db *sql.DB, ch chan<- prometheus.Metric) error {
type ScrapeGlobalStatus struct{}
// Name of the Scraper. Should be unique.
func (ScrapeGlobalStatus) Name() string {
return globalStatus
}
// Help describes the role of the Scraper.
func (ScrapeGlobalStatus) Help() string {
return "Collect from SHOW GLOBAL STATUS"
}
// Scrape collects data from database connection and sends it over channel as prometheus metric.
func (ScrapeGlobalStatus) Scrape(db *sql.DB, ch chan<- prometheus.Metric) error {
globalStatusRows, err := db.Query(globalStatusQuery)
if err != nil {
return err
......@@ -68,6 +82,11 @@ func ScrapeGlobalStatus(db *sql.DB, ch chan<- prometheus.Metric) error {
var key string
var val sql.RawBytes
var textItems = map[string]string{
"wsrep_local_state_uuid": "",
"wsrep_cluster_state_uuid": "",
"wsrep_provider_version": "",
}
for globalStatusRows.Next() {
if err := globalStatusRows.Scan(&key, &val); err != nil {
......@@ -117,7 +136,19 @@ func ScrapeGlobalStatus(db *sql.DB, ch chan<- prometheus.Metric) error {
globalPerformanceSchemaLostDesc, prometheus.CounterValue, floatVal, match[2],
)
}
} else if _, ok := textItems[key]; ok {
textItems[key] = string(val)
}
}
// mysql_galera_variables_info metric.
if textItems["wsrep_local_state_uuid"] != "" {
ch <- prometheus.MustNewConstMetric(
prometheus.NewDesc(prometheus.BuildFQName(namespace, "galera", "status_info"), "PXC/Galera status information.",
[]string{"wsrep_local_state_uuid", "wsrep_cluster_state_uuid", "wsrep_provider_version"}, nil),
prometheus.GaugeValue, 1, textItems["wsrep_local_state_uuid"], textItems["wsrep_cluster_state_uuid"], textItems["wsrep_provider_version"],
)
}
return nil
}
......@@ -29,12 +29,16 @@ func TestScrapeGlobalStatus(t *testing.T) {
AddRow("Performance_schema_users_lost", "9").
AddRow("Slave_running", "OFF").
AddRow("Ssl_version", "").
AddRow("Uptime", "10")
AddRow("Uptime", "10").
AddRow("wsrep_cluster_status", "Primary").
AddRow("wsrep_local_state_uuid", "6c06e583-686f-11e6-b9e3-8336ad58138c").
AddRow("wsrep_cluster_state_uuid", "6c06e583-686f-11e6-b9e3-8336ad58138c").
AddRow("wsrep_provider_version", "3.16(r5c765eb)")
mock.ExpectQuery(sanitizeQuery(globalStatusQuery)).WillReturnRows(rows)
ch := make(chan prometheus.Metric)
go func() {
if err = ScrapeGlobalStatus(db, ch); err != nil {
if err = (ScrapeGlobalStatus{}).Scrape(db, ch); err != nil {
t.Errorf("error calling function on test: %s", err)
}
close(ch)
......@@ -52,6 +56,8 @@ func TestScrapeGlobalStatus(t *testing.T) {
{labels: labelMap{"instrumentation": "users_lost"}, value: 9, metricType: dto.MetricType_COUNTER},
{labels: labelMap{}, value: 0, metricType: dto.MetricType_UNTYPED},
{labels: labelMap{}, value: 10, metricType: dto.MetricType_UNTYPED},
{labels: labelMap{}, value: 1, metricType: dto.MetricType_UNTYPED},
{labels: labelMap{"wsrep_local_state_uuid": "6c06e583-686f-11e6-b9e3-8336ad58138c", "wsrep_cluster_state_uuid": "6c06e583-686f-11e6-b9e3-8336ad58138c", "wsrep_provider_version": "3.16(r5c765eb)"}, value: 1, metricType: dto.MetricType_GAUGE},
}
convey.Convey("Metrics comparison", t, func() {
for _, expect := range counterExpected {
......@@ -62,6 +68,6 @@ func TestScrapeGlobalStatus(t *testing.T) {
// Ensure all SQL queries were executed
if err := mock.ExpectationsWereMet(); err != nil {
t.Errorf("there were unfulfilled expections: %s", err)
t.Errorf("there were unfulfilled exceptions: %s", err)
}
}
......@@ -4,6 +4,8 @@ package collector
import (
"database/sql"
"regexp"
"strconv"
"strings"
"github.com/prometheus/client_golang/prometheus"
......@@ -17,7 +19,20 @@ const (
)
// ScrapeGlobalVariables collects from `SHOW GLOBAL VARIABLES`.
func ScrapeGlobalVariables(db *sql.DB, ch chan<- prometheus.Metric) error {
type ScrapeGlobalVariables struct{}
// Name of the Scraper. Should be unique.
func (ScrapeGlobalVariables) Name() string {
return globalVariables
}
// Help describes the role of the Scraper.
func (ScrapeGlobalVariables) Help() string {
return "Collect from SHOW GLOBAL VARIABLES"
}
// Scrape collects data from database connection and sends it over channel as prometheus metric.
func (ScrapeGlobalVariables) Scrape(db *sql.DB, ch chan<- prometheus.Metric) error {
globalVariablesRows, err := db.Query(globalVariablesQuery)
if err != nil {
return err
......@@ -26,10 +41,12 @@ func ScrapeGlobalVariables(db *sql.DB, ch chan<- prometheus.Metric) error {
var key string
var val sql.RawBytes
var mysqlVersion = map[string]string{
"innodb_version": "",
"version": "",
"version_comment": "",
var textItems = map[string]string{
"innodb_version": "",
"version": "",
"version_comment": "",
"wsrep_cluster_name": "",
"wsrep_provider_options": "",
}