Introduce Scraper interface

Signed-off-by: 's avatarKamil Dziedzic <arvenil@klecza.pl>
parent f76ef420
......@@ -38,7 +38,20 @@ var (
)
// ScrapeBinlogSize colects from `SHOW BINARY LOGS`.
func ScrapeBinlogSize(db *sql.DB, ch chan<- prometheus.Metric) error {
type ScrapeBinlogSize struct{}
// Name of the Scraper. Should be unique.
func (ScrapeBinlogSize) Name() string {
return "binlog_size"
}
// Help describes the role of the Scraper.
func (ScrapeBinlogSize) Help() string {
return "Collect the current size of all registered binlog files"
}
// Scrape collects data from database connection and sends it over channel as prometheus metric.
func (ScrapeBinlogSize) Scrape(db *sql.DB, ch chan<- prometheus.Metric) error {
var logBin uint8
err := db.QueryRow(logbinQuery).Scan(&logBin)
if err != nil {
......
......@@ -27,7 +27,7 @@ func TestScrapeBinlogSize(t *testing.T) {
ch := make(chan prometheus.Metric)
go func() {
if err = ScrapeBinlogSize(db, ch); err != nil {
if err = (ScrapeBinlogSize{}).Scrape(db, ch); err != nil {
t.Errorf("error calling function on test: %s", err)
}
close(ch)
......
......@@ -19,7 +19,20 @@ const (
)
// ScrapeEngineInnodbStatus scrapes from `SHOW ENGINE INNODB STATUS`.
func ScrapeEngineInnodbStatus(db *sql.DB, ch chan<- prometheus.Metric) error {
type ScrapeEngineInnodbStatus struct{}
// Name of the Scraper. Should be unique.
func (ScrapeEngineInnodbStatus) Name() string {
return "engine_innodb_status"
}
// Help describes the role of the Scraper.
func (ScrapeEngineInnodbStatus) Help() string {
return "Collect from SHOW ENGINE INNODB STATUS"
}
// Scrape collects data from database connection and sends it over channel as prometheus metric.
func (ScrapeEngineInnodbStatus) Scrape(db *sql.DB, ch chan<- prometheus.Metric) error {
rows, err := db.Query(engineInnodbStatusQuery)
if err != nil {
return err
......
......@@ -140,7 +140,7 @@ END OF INNODB MONITOR OUTPUT
ch := make(chan prometheus.Metric)
go func() {
if err = ScrapeEngineInnodbStatus(db, ch); err != nil {
if err = (ScrapeEngineInnodbStatus{}).Scrape(db, ch); err != nil {
t.Errorf("error calling function on test: %s", err)
}
close(ch)
......
......@@ -16,26 +16,21 @@ const (
engineTokudbStatusQuery = `SHOW ENGINE TOKUDB STATUS`
)
func sanitizeTokudbMetric(metricName string) string {
replacements := map[string]string{
">": "",
",": "",
":": "",
"(": "",
")": "",
" ": "_",
"-": "_",
"+": "and",
"/": "and",
}
for r := range replacements {
metricName = strings.Replace(metricName, r, replacements[r], -1)
}
return metricName
// ScrapeEngineTokudbStatus scrapes from `SHOW ENGINE TOKUDB STATUS`.
type ScrapeEngineTokudbStatus struct{}
// Name of the Scraper. Should be unique.
func (ScrapeEngineTokudbStatus) Name() string {
return "engine_tokudb_status"
}
// ScrapeEngineTokudbStatus scrapes from `SHOW ENGINE TOKUDB STATUS`.
func ScrapeEngineTokudbStatus(db *sql.DB, ch chan<- prometheus.Metric) error {
// Help describes the role of the Scraper.
func (ScrapeEngineTokudbStatus) Help() string {
return "Collect from SHOW ENGINE TOKUDB STATUS"
}
// Scrape collects data from database connection and sends it over channel as prometheus metric.
func (ScrapeEngineTokudbStatus) Scrape(db *sql.DB, ch chan<- prometheus.Metric) error {
tokudbRows, err := db.Query(engineTokudbStatusQuery)
if err != nil {
return err
......@@ -60,3 +55,21 @@ func ScrapeEngineTokudbStatus(db *sql.DB, ch chan<- prometheus.Metric) error {
}
return nil
}
func sanitizeTokudbMetric(metricName string) string {
replacements := map[string]string{
">": "",
",": "",
":": "",
"(": "",
")": "",
" ": "_",
"-": "_",
"+": "and",
"/": "and",
}
for r := range replacements {
metricName = strings.Replace(metricName, r, replacements[r], -1)
}
return metricName
}
......@@ -44,7 +44,7 @@ func TestScrapeEngineTokudbStatus(t *testing.T) {
ch := make(chan prometheus.Metric)
go func() {
if err = ScrapeEngineTokudbStatus(db, ch); err != nil {
if err = (ScrapeEngineTokudbStatus{}).Scrape(db, ch); err != nil {
t.Errorf("error calling function on test: %s", err)
}
close(ch)
......
This diff is collapsed.
......@@ -15,8 +15,8 @@ func TestExporter(t *testing.T) {
t.Skip("-short is passed, skipping test")
}
exporter := New(dsn, Collect{
GlobalStatus: true,
exporter := New(dsn, []Scraper{
ScrapeGlobalStatus{},
})
convey.Convey("Metrics describing", t, func() {
......
......@@ -11,7 +11,7 @@ import (
)
const (
// Scrape query
// Scrape query.
globalStatusQuery = `SHOW GLOBAL STATUS`
// Subsystem.
globalStatus = "global_status"
......@@ -20,6 +20,7 @@ const (
// Regexp to match various groups of status vars.
var globalStatusRE = regexp.MustCompile(`^(com|handler|connection_errors|innodb_buffer_pool_pages|innodb_rows|performance_schema)_(.*)$`)
// Metric descriptors.
var (
globalCommandsDesc = prometheus.NewDesc(
prometheus.BuildFQName(namespace, globalStatus, "commands_total"),
......@@ -59,7 +60,20 @@ var (
)
// ScrapeGlobalStatus collects from `SHOW GLOBAL STATUS`.
func ScrapeGlobalStatus(db *sql.DB, ch chan<- prometheus.Metric) error {
type ScrapeGlobalStatus struct{}
// Name of the Scraper. Should be unique.
func (ScrapeGlobalStatus) Name() string {
return globalStatus
}
// Help describes the role of the Scraper.
func (ScrapeGlobalStatus) Help() string {
return "Collect from SHOW GLOBAL STATUS"
}
// Scrape collects data from database connection and sends it over channel as prometheus metric.
func (ScrapeGlobalStatus) Scrape(db *sql.DB, ch chan<- prometheus.Metric) error {
globalStatusRows, err := db.Query(globalStatusQuery)
if err != nil {
return err
......
......@@ -38,7 +38,7 @@ func TestScrapeGlobalStatus(t *testing.T) {
ch := make(chan prometheus.Metric)
go func() {
if err = ScrapeGlobalStatus(db, ch); err != nil {
if err = (ScrapeGlobalStatus{}).Scrape(db, ch); err != nil {
t.Errorf("error calling function on test: %s", err)
}
close(ch)
......
......@@ -19,7 +19,20 @@ const (
)
// ScrapeGlobalVariables collects from `SHOW GLOBAL VARIABLES`.
func ScrapeGlobalVariables(db *sql.DB, ch chan<- prometheus.Metric) error {
type ScrapeGlobalVariables struct{}
// Name of the Scraper. Should be unique.
func (ScrapeGlobalVariables) Name() string {
return globalVariables
}
// Help describes the role of the Scraper.
func (ScrapeGlobalVariables) Help() string {
return "Collect from SHOW GLOBAL VARIABLES"
}
// Scrape collects data from database connection and sends it over channel as prometheus metric.
func (ScrapeGlobalVariables) Scrape(db *sql.DB, ch chan<- prometheus.Metric) error {
globalVariablesRows, err := db.Query(globalVariablesQuery)
if err != nil {
return err
......
......@@ -37,7 +37,7 @@ func TestScrapeGlobalVariables(t *testing.T) {
ch := make(chan prometheus.Metric)
go func() {
if err = ScrapeGlobalVariables(db, ch); err != nil {
if err = (ScrapeGlobalVariables{}).Scrape(db, ch); err != nil {
t.Errorf("error calling function on test: %s", err)
}
close(ch)
......
......@@ -8,6 +8,7 @@ import (
"strconv"
"github.com/prometheus/client_golang/prometheus"
"gopkg.in/alecthomas/kingpin.v2"
)
const (
......@@ -20,6 +21,17 @@ const (
heartbeatQuery = "SELECT UNIX_TIMESTAMP(ts), UNIX_TIMESTAMP(NOW(6)), server_id from `%s`.`%s`"
)
var (
collectHeartbeatDatabase = kingpin.Flag(
"collect.heartbeat.database",
"Database from where to collect heartbeat data",
).Default("heartbeat").String()
collectHeartbeatTable = kingpin.Flag(
"collect.heartbeat.table",
"Table from where to collect heartbeat data",
).Default("heartbeat").String()
)
// Metric descriptors.
var (
HeartbeatStoredDesc = prometheus.NewDesc(
......@@ -41,8 +53,21 @@ var (
// ts varchar(26) NOT NULL,
// server_id int unsigned NOT NULL PRIMARY KEY,
// );
func ScrapeHeartbeat(db *sql.DB, ch chan<- prometheus.Metric, collectDatabase, collectTable string) error {
query := fmt.Sprintf(heartbeatQuery, collectDatabase, collectTable)
type ScrapeHeartbeat struct{}
// Name of the Scraper. Should be unique.
func (ScrapeHeartbeat) Name() string {
return "heartbeat"
}
// Help describes the role of the Scraper.
func (ScrapeHeartbeat) Help() string {
return "Collect from heartbeat"
}
// Scrape collects data from database connection and sends it over channel as prometheus metric.
func (ScrapeHeartbeat) Scrape(db *sql.DB, ch chan<- prometheus.Metric) error {
query := fmt.Sprintf(heartbeatQuery, *collectHeartbeatDatabase, *collectHeartbeatTable)
heartbeatRows, err := db.Query(query)
if err != nil {
return err
......
......@@ -7,9 +7,18 @@ import (
dto "github.com/prometheus/client_model/go"
"github.com/smartystreets/goconvey/convey"
"gopkg.in/DATA-DOG/go-sqlmock.v1"
"gopkg.in/alecthomas/kingpin.v2"
)
func TestScrapeHeartbeat(t *testing.T) {
_, err := kingpin.CommandLine.Parse([]string{
"--collect.heartbeat.database", "heartbeat-test",
"--collect.heartbeat.table", "heartbeat-test",
})
if err != nil {
t.Fatal(err)
}
db, mock, err := sqlmock.New()
if err != nil {
t.Fatalf("error opening a stub database connection: %s", err)
......@@ -19,13 +28,11 @@ func TestScrapeHeartbeat(t *testing.T) {
columns := []string{"UNIX_TIMESTAMP(ts)", "UNIX_TIMESTAMP(NOW(6))", "server_id"}
rows := sqlmock.NewRows(columns).
AddRow("1487597613.001320", "1487598113.448042", 1)
mock.ExpectQuery(sanitizeQuery("SELECT UNIX_TIMESTAMP(ts), UNIX_TIMESTAMP(NOW(6)), server_id from `heartbeat`.`heartbeat`")).WillReturnRows(rows)
mock.ExpectQuery(sanitizeQuery("SELECT UNIX_TIMESTAMP(ts), UNIX_TIMESTAMP(NOW(6)), server_id from `heartbeat-test`.`heartbeat-test`")).WillReturnRows(rows)
ch := make(chan prometheus.Metric)
go func() {
database := "heartbeat"
table := "heartbeat"
if err = ScrapeHeartbeat(db, ch, database, table); err != nil {
if err = (ScrapeHeartbeat{}).Scrape(db, ch); err != nil {
t.Errorf("error calling function on test: %s", err)
}
close(ch)
......
......@@ -22,6 +22,7 @@ const infoSchemaAutoIncrementQuery = `
WHERE c.extra = 'auto_increment' AND t.auto_increment IS NOT NULL
`
// Metric descriptors.
var (
globalInfoSchemaAutoIncrementDesc = prometheus.NewDesc(
prometheus.BuildFQName(namespace, informationSchema, "auto_increment_column"),
......@@ -36,7 +37,20 @@ var (
)
// ScrapeAutoIncrementColumns collects auto_increment column information.
func ScrapeAutoIncrementColumns(db *sql.DB, ch chan<- prometheus.Metric) error {
type ScrapeAutoIncrementColumns struct{}
// Name of the Scraper. Should be unique.
func (ScrapeAutoIncrementColumns) Name() string {
return "auto_increment.columns"
}
// Help describes the role of the Scraper.
func (ScrapeAutoIncrementColumns) Help() string {
return "Collect auto_increment columns and max values from information_schema"
}
// Scrape collects data from database connection and sends it over channel as prometheus metric.
func (ScrapeAutoIncrementColumns) Scrape(db *sql.DB, ch chan<- prometheus.Metric) error {
autoIncrementRows, err := db.Query(infoSchemaAutoIncrementQuery)
if err != nil {
return err
......
......@@ -128,7 +128,20 @@ var (
)
// ScrapeClientStat collects from `information_schema.client_statistics`.
func ScrapeClientStat(db *sql.DB, ch chan<- prometheus.Metric) error {
type ScrapeClientStat struct{}
// Name of the Scraper. Should be unique.
func (ScrapeClientStat) Name() string {
return "info_schema.clientstats"
}
// Help describes the role of the Scraper.
func (ScrapeClientStat) Help() string {
return "If running with userstat=1, set to true to collect client statistics"
}
// Scrape collects data from database connection and sends it over channel as prometheus metric.
func (ScrapeClientStat) Scrape(db *sql.DB, ch chan<- prometheus.Metric) error {
var varName, varVal string
err := db.QueryRow(userstatCheckQuery).Scan(&varName, &varVal)
if err != nil {
......
......@@ -26,7 +26,7 @@ func TestScrapeClientStat(t *testing.T) {
ch := make(chan prometheus.Metric)
go func() {
if err = ScrapeClientStat(db, ch); err != nil {
if err = (ScrapeClientStat{}).Scrape(db, ch); err != nil {
t.Errorf("error calling function on test: %s", err)
}
close(ch)
......
......@@ -14,6 +14,7 @@ const innodbCmpQuery = `
FROM information_schema.innodb_cmp
`
// Metric descriptors.
var (
infoSchemaInnodbCmpCompressOps = prometheus.NewDesc(
prometheus.BuildFQName(namespace, informationSchema, "innodb_cmp_compress_ops_total"),
......@@ -43,7 +44,20 @@ var (
)
// ScrapeInnodbCmp collects from `information_schema.innodb_cmp`.
func ScrapeInnodbCmp(db *sql.DB, ch chan<- prometheus.Metric) error {
type ScrapeInnodbCmp struct{}
// Name of the Scraper. Should be unique.
func (ScrapeInnodbCmp) Name() string {
return informationSchema + ".innodb_cmp"
}
// Help describes the role of the Scraper.
func (ScrapeInnodbCmp) Help() string {
return "Collect metrics from information_schema.innodb_cmp"
}
// Scrape collects data from database connection and sends it over channel as prometheus metric.
func (ScrapeInnodbCmp) Scrape(db *sql.DB, ch chan<- prometheus.Metric) error {
informationSchemaInnodbCmpRows, err := db.Query(innodbCmpQuery)
if err != nil {
......
......@@ -23,7 +23,7 @@ func TestScrapeInnodbCmp(t *testing.T) {
ch := make(chan prometheus.Metric)
go func() {
if err = ScrapeInnodbCmp(db, ch); err != nil {
if err = (ScrapeInnodbCmp{}).Scrape(db, ch); err != nil {
t.Errorf("error calling function on test: %s", err)
}
close(ch)
......
......@@ -14,6 +14,7 @@ const innodbCmpMemQuery = `
FROM information_schema.innodb_cmpmem
`
// Metric descriptors.
var (
infoSchemaInnodbCmpMemPagesRead = prometheus.NewDesc(
prometheus.BuildFQName(namespace, informationSchema, "innodb_cmpmem_pages_used_total"),
......@@ -38,7 +39,20 @@ var (
)
// ScrapeInnodbCmp collects from `information_schema.innodb_cmp`.
func ScrapeInnodbCmpMem(db *sql.DB, ch chan<- prometheus.Metric) error {
type ScrapeInnodbCmpMem struct{}
// Name of the Scraper. Should be unique.
func (ScrapeInnodbCmpMem) Name() string {
return informationSchema + ".innodb_cmpmem"
}
// Help describes the role of the Scraper.
func (ScrapeInnodbCmpMem) Help() string {
return "Collect metrics from information_schema.innodb_cmpmem"
}
// Scrape collects data from database connection and sends it over channel as prometheus metric.
func (ScrapeInnodbCmpMem) Scrape(db *sql.DB, ch chan<- prometheus.Metric) error {
informationSchemaInnodbCmpMemRows, err := db.Query(innodbCmpMemQuery)
if err != nil {
......
......@@ -23,7 +23,7 @@ func TestScrapeInnodbCmpMem(t *testing.T) {
ch := make(chan prometheus.Metric)
go func() {
if err = ScrapeInnodbCmpMem(db, ch); err != nil {
if err = (ScrapeInnodbCmpMem{}).Scrape(db, ch); err != nil {
t.Errorf("error calling function on test: %s", err)
}
close(ch)
......
......@@ -49,7 +49,20 @@ var (
)
// ScrapeInnodbMetrics collects from `information_schema.innodb_metrics`.
func ScrapeInnodbMetrics(db *sql.DB, ch chan<- prometheus.Metric) error {
type ScrapeInnodbMetrics struct{}
// Name of the Scraper. Should be unique.
func (ScrapeInnodbMetrics) Name() string {
return informationSchema + ".innodb_metrics"
}
// Help describes the role of the Scraper.
func (ScrapeInnodbMetrics) Help() string {
return "Collect metrics from information_schema.innodb_metrics"
}
// Scrape collects data from database connection and sends it over channel as prometheus metric.
func (ScrapeInnodbMetrics) Scrape(db *sql.DB, ch chan<- prometheus.Metric) error {
innodbMetricsRows, err := db.Query(infoSchemaInnodbMetricsQuery)
if err != nil {
return err
......
......@@ -40,7 +40,7 @@ func TestScrapeInnodbMetrics(t *testing.T) {
ch := make(chan prometheus.Metric)
go func() {
if err = ScrapeInnodbMetrics(db, ch); err != nil {
if err = (ScrapeInnodbMetrics{}).Scrape(db, ch); err != nil {
t.Errorf("error calling function on test: %s", err)
}
close(ch)
......
......@@ -20,6 +20,7 @@ const innodbTablespacesQuery = `
FROM information_schema.innodb_sys_tablespaces
`
// Metric descriptors.
var (
infoSchemaInnodbTablesspaceInfoDesc = prometheus.NewDesc(
prometheus.BuildFQName(namespace, informationSchema, "innodb_tablespace_space_info"),
......@@ -39,7 +40,20 @@ var (
)
// ScrapeInfoSchemaInnodbTablespaces collects from `information_schema.innodb_sys_tablespaces`.
func ScrapeInfoSchemaInnodbTablespaces(db *sql.DB, ch chan<- prometheus.Metric) error {
type ScrapeInfoSchemaInnodbTablespaces struct{}
// Name of the Scraper. Should be unique.
func (ScrapeInfoSchemaInnodbTablespaces) Name() string {
return informationSchema + ".innodb_tablespaces"
}
// Help describes the role of the Scraper.
func (ScrapeInfoSchemaInnodbTablespaces) Help() string {
return "Collect metrics from information_schema.innodb_sys_tablespaces"
}
// Scrape collects data from database connection and sends it over channel as prometheus metric.
func (ScrapeInfoSchemaInnodbTablespaces) Scrape(db *sql.DB, ch chan<- prometheus.Metric) error {
tablespacesRows, err := db.Query(innodbTablespacesQuery)
if err != nil {
return err
......
......@@ -24,7 +24,7 @@ func TestScrapeInfoSchemaInnodbTablespaces(t *testing.T) {
ch := make(chan prometheus.Metric)
go func() {
if err = ScrapeInfoSchemaInnodbTablespaces(db, ch); err != nil {
if err = (ScrapeInfoSchemaInnodbTablespaces{}).Scrape(db, ch); err != nil {
t.Errorf("error calling function on test: %s", err)
}
close(ch)
......
......@@ -20,13 +20,16 @@ const infoSchemaProcesslistQuery = `
ORDER BY null
`
// Tunable flags.
var (
// Tunable flags.
processlistMinTime = kingpin.Flag(
"collect.info_schema.processlist.min_time",
"Minimum time a thread must be in each state to be counted",
).Default("0").Int()
// Prometheus descriptors.
)
// Metric descriptors.
var (
processlistCountDesc = prometheus.NewDesc(
prometheus.BuildFQName(namespace, informationSchema, "threads"),
"The number of threads (connections) split by current state.",
......@@ -118,37 +121,21 @@ var (
}
)
func deriveThreadState(command string, state string) string {
var normCmd = strings.Replace(strings.ToLower(command), "_", " ", -1)
var normState = strings.Replace(strings.ToLower(state), "_", " ", -1)
// check if it's already a valid state
_, knownState := threadStateCounterMap[normState]
if knownState {
return normState
}
// check if plain mapping applies
mappedState, canMap := threadStateMapping[normState]
if canMap {
return mappedState
}
// check special waiting for XYZ lock
if strings.Contains(normState, "waiting for") && strings.Contains(normState, "lock") {
return "waiting for lock"
}
if normCmd == "sleep" && normState == "" {
return "idle"
}
if normCmd == "query" {
return "executing"
}
if normCmd == "binlog dump" {
return "replication master"
}
return "other"
// ScrapeProcesslist collects from `information_schema.processlist`.
type ScrapeProcesslist struct{}
// Name of the Scraper. Should be unique.
func (ScrapeProcesslist) Name() string {
return informationSchema + ".processlist"
}
// ScrapeProcesslist collects from `information_schema.processlist`.
func ScrapeProcesslist(db *sql.DB, ch chan<- prometheus.Metric) error {
// Help describes the role of the Scraper.
func (ScrapeProcesslist) Help() string {
return "Collect current thread state counts from the information_schema.processlist"
}
// Scrape collects data from database connection and sends it over channel as prometheus metric.
func (ScrapeProcesslist) Scrape(db *sql.DB, ch chan<- prometheus.Metric) error {
processQuery := fmt.Sprintf(
infoSchemaProcesslistQuery,
*processlistMinTime,
......@@ -191,3 +178,32 @@ func ScrapeProcesslist(db *sql.DB, ch chan<- prometheus.Metric) error {
return nil
}
func deriveThreadState(command string, state string) string {
var normCmd = strings.Replace(strings.ToLower(command), "_", " ", -1)
var normState = strings.Replace(strings.ToLower(state), "_", " ", -1)
// check if it's already a valid state
_, knownState := threadStateCounterMap[normState]
if knownState {
return normState
}
// check if plain mapping applies
mappedState, canMap := threadStateMapping[normState]
if canMap {
return mappedState
}
// check special waiting for XYZ lock
if strings.Contains(normState, "waiting for") && strings.Contains(normState, "lock") {
return "waiting for lock"
}
if normCmd == "sleep" && normState == "" {
return "idle"
}
if normCmd == "query" {
return "executing"
}
if normCmd == "binlog dump" {
return "replication master"
}
return "other"
}
......@@ -86,7 +86,20 @@ func processQueryResponseTimeTable(db *sql.DB, ch chan<- prometheus.Metric, quer
}
// ScrapeQueryResponseTime collects from `information_schema.query_response_time`.
func ScrapeQueryResponseTime(db *sql.DB, ch chan<- prometheus.Metric) error {
type ScrapeQueryResponseTime struct{}
// Name of the Scraper. Should be unique.
func (ScrapeQueryResponseTime) Name() string {
return "info_schema.query_response_time"
}
// Help describes the role of the Scraper.
func (ScrapeQueryResponseTime) Help() string {
return "Collect query response time distribution if query_response_time_stats is ON."
}
// Scrape collects data from database connection and sends it over channel as prometheus metric.
func (ScrapeQueryResponseTime) Scrape(db *sql.DB, ch chan<- prometheus.Metric) error {
var queryStats uint8
err := db.QueryRow(queryResponseCheckQuery).Scan(&queryStats)
if err != nil {
......
......@@ -37,7 +37,7 @@ func TestScrapeQueryResponseTime(t *testing.T) {
ch := make(chan prometheus.Metric)
go func() {
if err = ScrapeQueryResponseTime(db, ch); err != nil {
if err = (ScrapeQueryResponseTime{}).Scrape(db, ch); err != nil {
t.Errorf("error calling function on test: %s", err)
}
close(ch)
......
......@@ -36,11 +36,16 @@ const (
`
)
// Tunable flags.
var (
tableSchemaDatabases = kingpin.Flag(
"collect.info_schema.tables.databases",
"The list of databases to collect table stats for, or '*' for all",
).Default("*").String()
)
// Metric descriptors.
var (
infoSchemaTablesVersionDesc = prometheus<