Renamed influxdb to influxdata in Godeps

This commit is contained in:
Piotr Szczesniak
2016-08-26 14:02:44 +02:00
parent 94ca825a19
commit 2fb43eb68c
39 changed files with 10 additions and 10 deletions

20
vendor/github.com/influxdata/influxdb/LICENSE generated vendored Normal file
View File

@@ -0,0 +1,20 @@
The MIT License (MIT)
Copyright (c) 2013-2015 Errplane Inc.
Permission is hereby granted, free of charge, to any person obtaining a copy of
this software and associated documentation files (the "Software"), to deal in
the Software without restriction, including without limitation the rights to
use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies of
the Software, and to permit persons to whom the Software is furnished to do so,
subject to the following conditions:
The above copyright notice and this permission notice shall be included in all
copies or substantial portions of the Software.
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS
FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR
COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER
IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.

206
vendor/github.com/influxdata/influxdb/client/README.md generated vendored Normal file
View File

@@ -0,0 +1,206 @@
# InfluxDB Client
[![GoDoc](https://godoc.org/github.com/influxdb/influxdb?status.svg)](http://godoc.org/github.com/influxdb/influxdb/client)
## Description
A Go client library written and maintained by the **InfluxDB** team.
This package provides convenience functions to read and write time series data.
It uses the HTTP protocol to communicate with your **InfluxDB** cluster.
## Getting Started
### Connecting To Your Database
Connecting to an **InfluxDB** database is straightforward. You will need a host
name, a port and the cluster user credentials if applicable. The default port is 8086.
You can customize these settings to your specific installation via the
**InfluxDB** configuration file.
Thought not necessary for experimentation, you may want to create a new user
and authenticate the connection to your database.
For more information please check out the
[Cluster Admin Docs](http://influxdb.com/docs/v0.9/query_language/database_administration.html).
For the impatient, you can create a new admin user _bubba_ by firing off the
[InfluxDB CLI](https://github.com/influxdb/influxdb/blob/master/cmd/influx/main.go).
```shell
influx
> create user bubba with password 'bumblebeetuna'
> grant all privileges to bubba
```
And now for good measure set the credentials in you shell environment.
In the example below we will use $INFLUX_USER and $INFLUX_PWD
Now with the administrivia out of the way, let's connect to our database.
NOTE: If you've opted out of creating a user, you can omit Username and Password in
the configuration below.
```go
package main
import "github.com/influxdb/influxdb/client"
const (
MyHost = "localhost"
MyPort = 8086
MyDB = "square_holes"
MyMeasurement = "shapes"
)
func main() {
u, err := url.Parse(fmt.Sprintf("http://%s:%d", MyHost, MyPort))
if err != nil {
log.Fatal(err)
}
conf := client.Config{
URL: *u,
Username: os.Getenv("INFLUX_USER"),
Password: os.Getenv("INFLUX_PWD"),
}
con, err := client.NewClient(conf)
if err != nil {
log.Fatal(err)
}
dur, ver, err := con.Ping()
if err != nil {
log.Fatal(err)
}
log.Printf("Happy as a Hippo! %v, %s", dur, ver)
}
```
### Inserting Data
Time series data aka *points* are written to the database using batch inserts.
The mechanism is to create one or more points and then create a batch aka *batch points*
and write these to a given database and series. A series is a combination of a
measurement (time/values) and a set of tags.
In this sample we will create a batch of a 1,000 points. Each point has a time and
a single value as well as 2 tags indicating a shape and color. We write these points
to a database called _square_holes_ using a measurement named _shapes_.
NOTE: You can specify a RetentionPolicy as part of the batch points. If not
provided InfluxDB will use the database _default_ retention policy. By default, the _default_
retention policy never deletes any data it contains.
```go
func writePoints(con *client.Client) {
var (
shapes = []string{"circle", "rectangle", "square", "triangle"}
colors = []string{"red", "blue", "green"}
sampleSize = 1000
pts = make([]client.Point, sampleSize)
)
rand.Seed(42)
for i := 0; i < sampleSize; i++ {
pts[i] = client.Point{
Measurement: "shapes",
Tags: map[string]string{
"color": strconv.Itoa(rand.Intn(len(colors))),
"shape": strconv.Itoa(rand.Intn(len(shapes))),
},
Fields: map[string]interface{}{
"value": rand.Intn(sampleSize),
},
Time: time.Now(),
Precision: "s",
}
}
bps := client.BatchPoints{
Points: pts,
Database: MyDB,
RetentionPolicy: "default",
}
_, err := con.Write(bps)
if err != nil {
log.Fatal(err)
}
}
```
### Querying Data
One nice advantage of using **InfluxDB** the ability to query your data using familiar
SQL constructs. In this example we can create a convenience function to query the database
as follows:
```go
// queryDB convenience function to query the database
func queryDB(con *client.Client, cmd string) (res []client.Result, err error) {
q := client.Query{
Command: cmd,
Database: MyDB,
}
if response, err := con.Query(q); err == nil {
if response.Error() != nil {
return res, response.Error()
}
res = response.Results
}
return
}
```
#### Creating a Database
```go
_, err := queryDB(con, fmt.Sprintf("create database %s", MyDB))
if err != nil {
log.Fatal(err)
}
```
#### Count Records
```go
q := fmt.Sprintf("select count(%s) from %s", "value", MyMeasurement)
res, err := queryDB(con, q)
if err != nil {
log.Fatal(err)
}
count := res[0].Series[0].Values[0][1]
log.Printf("Found a total of `%v records", count)
```
#### Find the last 10 _shapes_ records
```go
q := fmt.Sprintf("select * from %s limit %d", MyMeasurement, 20)
res, err = queryDB(con, q)
if err != nil {
log.Fatal(err)
}
for i, row := range res[0].Series[0].Values {
t, err := time.Parse(time.RFC3339, row[0].(string))
if err != nil {
log.Fatal(err)
}
val, err := row[1].(json.Number).Int64()
log.Printf("[%2d] %s: %03d\n", i, t.Format(time.Stamp), val)
}
```
## Go Docs
Please refer to
[http://godoc.org/github.com/influxdb/influxdb/client](http://godoc.org/github.com/influxdb/influxdb/client)
for documentation.
## See Also
You can also examine how the client library is used by the
[InfluxDB CLI](https://github.com/influxdb/influxdb/blob/master/cmd/influx/main.go).

View File

@@ -0,0 +1,583 @@
package client
import (
"bytes"
"encoding/json"
"errors"
"fmt"
"io"
"io/ioutil"
"net/http"
"net/url"
"time"
"github.com/influxdb/influxdb/influxql"
"github.com/influxdb/influxdb/tsdb"
)
// Query is used to send a command to the server. Both Command and Database are required.
type Query struct {
Command string
Database string
}
// Config is used to specify what server to connect to.
// URL: The URL of the server connecting to.
// Username/Password are optional. They will be passed via basic auth if provided.
// UserAgent: If not provided, will default "InfluxDBClient",
// Timeout: If not provided, will default to 0 (no timeout)
type Config struct {
URL url.URL
Username string
Password string
UserAgent string
Timeout time.Duration
}
// Client is used to make calls to the server.
type Client struct {
url url.URL
username string
password string
httpClient *http.Client
userAgent string
}
const (
ConsistencyOne = "one"
ConsistencyAll = "all"
ConsistencyQuorum = "quorum"
ConsistencyAny = "any"
)
// NewClient will instantiate and return a connected client to issue commands to the server.
func NewClient(c Config) (*Client, error) {
client := Client{
url: c.URL,
username: c.Username,
password: c.Password,
httpClient: &http.Client{Timeout: c.Timeout},
userAgent: c.UserAgent,
}
if client.userAgent == "" {
client.userAgent = "InfluxDBClient"
}
return &client, nil
}
// SetAuth will update the username and passwords
func (c *Client) SetAuth(u, p string) {
c.username = u
c.password = p
}
// Query sends a command to the server and returns the Response
func (c *Client) Query(q Query) (*Response, error) {
u := c.url
u.Path = "query"
values := u.Query()
values.Set("q", q.Command)
values.Set("db", q.Database)
u.RawQuery = values.Encode()
req, err := http.NewRequest("GET", u.String(), nil)
if err != nil {
return nil, err
}
req.Header.Set("User-Agent", c.userAgent)
if c.username != "" {
req.SetBasicAuth(c.username, c.password)
}
resp, err := c.httpClient.Do(req)
if err != nil {
return nil, err
}
defer resp.Body.Close()
var response Response
dec := json.NewDecoder(resp.Body)
dec.UseNumber()
decErr := dec.Decode(&response)
// ignore this error if we got an invalid status code
if decErr != nil && decErr.Error() == "EOF" && resp.StatusCode != http.StatusOK {
decErr = nil
}
// If we got a valid decode error, send that back
if decErr != nil {
return nil, decErr
}
// If we don't have an error in our json response, and didn't get statusOK, then send back an error
if resp.StatusCode != http.StatusOK && response.Error() == nil {
return &response, fmt.Errorf("received status code %d from server", resp.StatusCode)
}
return &response, nil
}
// Write takes BatchPoints and allows for writing of multiple points with defaults
// If successful, error is nil and Response is nil
// If an error occurs, Response may contain additional information if populated.
func (c *Client) Write(bp BatchPoints) (*Response, error) {
c.url.Path = "write"
var b bytes.Buffer
for _, p := range bp.Points {
if p.Raw != "" {
if _, err := b.WriteString(p.Raw); err != nil {
return nil, err
}
} else {
for k, v := range bp.Tags {
if p.Tags == nil {
p.Tags = make(map[string]string, len(bp.Tags))
}
p.Tags[k] = v
}
if _, err := b.WriteString(p.MarshalString()); err != nil {
return nil, err
}
}
if err := b.WriteByte('\n'); err != nil {
return nil, err
}
}
req, err := http.NewRequest("POST", c.url.String(), &b)
if err != nil {
return nil, err
}
req.Header.Set("Content-Type", "")
req.Header.Set("User-Agent", c.userAgent)
if c.username != "" {
req.SetBasicAuth(c.username, c.password)
}
params := req.URL.Query()
params.Add("db", bp.Database)
params.Add("rp", bp.RetentionPolicy)
params.Add("precision", bp.Precision)
params.Add("consistency", bp.WriteConsistency)
req.URL.RawQuery = params.Encode()
resp, err := c.httpClient.Do(req)
if err != nil {
return nil, err
}
defer resp.Body.Close()
var response Response
body, err := ioutil.ReadAll(resp.Body)
if err != nil && err.Error() != "EOF" {
return nil, err
}
if resp.StatusCode != http.StatusNoContent && resp.StatusCode != http.StatusOK {
var err = fmt.Errorf(string(body))
response.Err = err
return &response, err
}
return nil, nil
}
// Ping will check to see if the server is up
// Ping returns how long the request took, the version of the server it connected to, and an error if one occurred.
func (c *Client) Ping() (time.Duration, string, error) {
now := time.Now()
u := c.url
u.Path = "ping"
req, err := http.NewRequest("GET", u.String(), nil)
if err != nil {
return 0, "", err
}
req.Header.Set("User-Agent", c.userAgent)
if c.username != "" {
req.SetBasicAuth(c.username, c.password)
}
resp, err := c.httpClient.Do(req)
if err != nil {
return 0, "", err
}
defer resp.Body.Close()
version := resp.Header.Get("X-Influxdb-Version")
return time.Since(now), version, nil
}
// Dump connects to server and retrieves all data stored for specified database.
// If successful, Dump returns the entire response body, which is an io.ReadCloser
func (c *Client) Dump(db string) (io.ReadCloser, error) {
u := c.url
u.Path = "dump"
values := u.Query()
values.Set("db", db)
u.RawQuery = values.Encode()
req, err := http.NewRequest("GET", u.String(), nil)
if err != nil {
return nil, err
}
req.Header.Set("User-Agent", c.userAgent)
if c.username != "" {
req.SetBasicAuth(c.username, c.password)
}
resp, err := c.httpClient.Do(req)
if err != nil {
return nil, err
}
if resp.StatusCode != http.StatusOK {
return resp.Body, fmt.Errorf("HTTP Protocol error %d", resp.StatusCode)
}
return resp.Body, nil
}
// Structs
// Result represents a resultset returned from a single statement.
type Result struct {
Series []influxql.Row
Err error
}
// MarshalJSON encodes the result into JSON.
func (r *Result) MarshalJSON() ([]byte, error) {
// Define a struct that outputs "error" as a string.
var o struct {
Series []influxql.Row `json:"series,omitempty"`
Err string `json:"error,omitempty"`
}
// Copy fields to output struct.
o.Series = r.Series
if r.Err != nil {
o.Err = r.Err.Error()
}
return json.Marshal(&o)
}
// UnmarshalJSON decodes the data into the Result struct
func (r *Result) UnmarshalJSON(b []byte) error {
var o struct {
Series []influxql.Row `json:"series,omitempty"`
Err string `json:"error,omitempty"`
}
dec := json.NewDecoder(bytes.NewBuffer(b))
dec.UseNumber()
err := dec.Decode(&o)
if err != nil {
return err
}
r.Series = o.Series
if o.Err != "" {
r.Err = errors.New(o.Err)
}
return nil
}
// Response represents a list of statement results.
type Response struct {
Results []Result
Err error
}
// MarshalJSON encodes the response into JSON.
func (r *Response) MarshalJSON() ([]byte, error) {
// Define a struct that outputs "error" as a string.
var o struct {
Results []Result `json:"results,omitempty"`
Err string `json:"error,omitempty"`
}
// Copy fields to output struct.
o.Results = r.Results
if r.Err != nil {
o.Err = r.Err.Error()
}
return json.Marshal(&o)
}
// UnmarshalJSON decodes the data into the Response struct
func (r *Response) UnmarshalJSON(b []byte) error {
var o struct {
Results []Result `json:"results,omitempty"`
Err string `json:"error,omitempty"`
}
dec := json.NewDecoder(bytes.NewBuffer(b))
dec.UseNumber()
err := dec.Decode(&o)
if err != nil {
return err
}
r.Results = o.Results
if o.Err != "" {
r.Err = errors.New(o.Err)
}
return nil
}
// Error returns the first error from any statement.
// Returns nil if no errors occurred on any statements.
func (r Response) Error() error {
if r.Err != nil {
return r.Err
}
for _, result := range r.Results {
if result.Err != nil {
return result.Err
}
}
return nil
}
// Point defines the fields that will be written to the database
// Measurement, Time, and Fields are required
// Precision can be specified if the time is in epoch format (integer).
// Valid values for Precision are n, u, ms, s, m, and h
type Point struct {
Measurement string
Tags map[string]string
Time time.Time
Fields map[string]interface{}
Precision string
Raw string
}
// MarshalJSON will format the time in RFC3339Nano
// Precision is also ignored as it is only used for writing, not reading
// Or another way to say it is we always send back in nanosecond precision
func (p *Point) MarshalJSON() ([]byte, error) {
point := struct {
Measurement string `json:"measurement,omitempty"`
Tags map[string]string `json:"tags,omitempty"`
Time string `json:"time,omitempty"`
Fields map[string]interface{} `json:"fields,omitempty"`
Precision string `json:"precision,omitempty"`
}{
Measurement: p.Measurement,
Tags: p.Tags,
Fields: p.Fields,
Precision: p.Precision,
}
// Let it omit empty if it's really zero
if !p.Time.IsZero() {
point.Time = p.Time.UTC().Format(time.RFC3339Nano)
}
return json.Marshal(&point)
}
func (p *Point) MarshalString() string {
return tsdb.NewPoint(p.Measurement, p.Tags, p.Fields, p.Time).String()
}
// UnmarshalJSON decodes the data into the Point struct
func (p *Point) UnmarshalJSON(b []byte) error {
var normal struct {
Measurement string `json:"measurement"`
Tags map[string]string `json:"tags"`
Time time.Time `json:"time"`
Precision string `json:"precision"`
Fields map[string]interface{} `json:"fields"`
}
var epoch struct {
Measurement string `json:"measurement"`
Tags map[string]string `json:"tags"`
Time *int64 `json:"time"`
Precision string `json:"precision"`
Fields map[string]interface{} `json:"fields"`
}
if err := func() error {
var err error
dec := json.NewDecoder(bytes.NewBuffer(b))
dec.UseNumber()
if err = dec.Decode(&epoch); err != nil {
return err
}
// Convert from epoch to time.Time, but only if Time
// was actually set.
var ts time.Time
if epoch.Time != nil {
ts, err = EpochToTime(*epoch.Time, epoch.Precision)
if err != nil {
return err
}
}
p.Measurement = epoch.Measurement
p.Tags = epoch.Tags
p.Time = ts
p.Precision = epoch.Precision
p.Fields = normalizeFields(epoch.Fields)
return nil
}(); err == nil {
return nil
}
dec := json.NewDecoder(bytes.NewBuffer(b))
dec.UseNumber()
if err := dec.Decode(&normal); err != nil {
return err
}
normal.Time = SetPrecision(normal.Time, normal.Precision)
p.Measurement = normal.Measurement
p.Tags = normal.Tags
p.Time = normal.Time
p.Precision = normal.Precision
p.Fields = normalizeFields(normal.Fields)
return nil
}
// Remove any notion of json.Number
func normalizeFields(fields map[string]interface{}) map[string]interface{} {
newFields := map[string]interface{}{}
for k, v := range fields {
switch v := v.(type) {
case json.Number:
jv, e := v.Float64()
if e != nil {
panic(fmt.Sprintf("unable to convert json.Number to float64: %s", e))
}
newFields[k] = jv
default:
newFields[k] = v
}
}
return newFields
}
// BatchPoints is used to send batched data in a single write.
// Database and Points are required
// If no retention policy is specified, it will use the databases default retention policy.
// If tags are specified, they will be "merged" with all points. If a point already has that tag, it is ignored.
// If time is specified, it will be applied to any point with an empty time.
// Precision can be specified if the time is in epoch format (integer).
// Valid values for Precision are n, u, ms, s, m, and h
type BatchPoints struct {
Points []Point `json:"points,omitempty"`
Database string `json:"database,omitempty"`
RetentionPolicy string `json:"retentionPolicy,omitempty"`
Tags map[string]string `json:"tags,omitempty"`
Time time.Time `json:"time,omitempty"`
Precision string `json:"precision,omitempty"`
WriteConsistency string `json:"-"`
}
// UnmarshalJSON decodes the data into the BatchPoints struct
func (bp *BatchPoints) UnmarshalJSON(b []byte) error {
var normal struct {
Points []Point `json:"points"`
Database string `json:"database"`
RetentionPolicy string `json:"retentionPolicy"`
Tags map[string]string `json:"tags"`
Time time.Time `json:"time"`
Precision string `json:"precision"`
}
var epoch struct {
Points []Point `json:"points"`
Database string `json:"database"`
RetentionPolicy string `json:"retentionPolicy"`
Tags map[string]string `json:"tags"`
Time *int64 `json:"time"`
Precision string `json:"precision"`
}
if err := func() error {
var err error
if err = json.Unmarshal(b, &epoch); err != nil {
return err
}
// Convert from epoch to time.Time
var ts time.Time
if epoch.Time != nil {
ts, err = EpochToTime(*epoch.Time, epoch.Precision)
if err != nil {
return err
}
}
bp.Points = epoch.Points
bp.Database = epoch.Database
bp.RetentionPolicy = epoch.RetentionPolicy
bp.Tags = epoch.Tags
bp.Time = ts
bp.Precision = epoch.Precision
return nil
}(); err == nil {
return nil
}
if err := json.Unmarshal(b, &normal); err != nil {
return err
}
normal.Time = SetPrecision(normal.Time, normal.Precision)
bp.Points = normal.Points
bp.Database = normal.Database
bp.RetentionPolicy = normal.RetentionPolicy
bp.Tags = normal.Tags
bp.Time = normal.Time
bp.Precision = normal.Precision
return nil
}
// utility functions
// Addr provides the current url as a string of the server the client is connected to.
func (c *Client) Addr() string {
return c.url.String()
}
// helper functions
// EpochToTime takes a unix epoch time and uses precision to return back a time.Time
func EpochToTime(epoch int64, precision string) (time.Time, error) {
if precision == "" {
precision = "s"
}
var t time.Time
switch precision {
case "h":
t = time.Unix(0, epoch*int64(time.Hour))
case "m":
t = time.Unix(0, epoch*int64(time.Minute))
case "s":
t = time.Unix(0, epoch*int64(time.Second))
case "ms":
t = time.Unix(0, epoch*int64(time.Millisecond))
case "u":
t = time.Unix(0, epoch*int64(time.Microsecond))
case "n":
t = time.Unix(0, epoch)
default:
return time.Time{}, fmt.Errorf("Unknown precision %q", precision)
}
return t, nil
}
// SetPrecision will round a time to the specified precision
func SetPrecision(t time.Time, precision string) time.Time {
switch precision {
case "n":
case "u":
return t.Round(time.Microsecond)
case "ms":
return t.Round(time.Millisecond)
case "s":
return t.Round(time.Second)
case "m":
return t.Round(time.Minute)
case "h":
return t.Round(time.Hour)
}
return t
}

View File

@@ -0,0 +1,650 @@
# The Influx Query Language Specification
## Introduction
This is a reference for the Influx Query Language ("InfluxQL").
InfluxQL is a SQL-like query language for interacting with InfluxDB. It has been lovingly crafted to feel familiar to those coming from other SQL or SQL-like environments while providing features specific to storing and analyzing time series data.
## Notation
The syntax is specified using Extended Backus-Naur Form ("EBNF"). EBNF is the same notation used in the [Go](http://golang.org) programming language specification, which can be found [here](https://golang.org/ref/spec). Not so coincidentally, InfluxDB is written in Go.
```
Production = production_name "=" [ Expression ] "." .
Expression = Alternative { "|" Alternative } .
Alternative = Term { Term } .
Term = production_name | token [ "…" token ] | Group | Option | Repetition .
Group = "(" Expression ")" .
Option = "[" Expression "]" .
Repetition = "{" Expression "}" .
```
Notation operators in order of increasing precedence:
```
| alternation
() grouping
[] option (0 or 1 times)
{} repetition (0 to n times)
```
## Query representation
### Characters
InfluxQL is Unicode text encoded in [UTF-8](http://en.wikipedia.org/wiki/UTF-8).
```
newline = /* the Unicode code point U+000A */ .
unicode_char = /* an arbitrary Unicode code point except newline */ .
```
## Letters and digits
Letters are the set of ASCII characters plus the underscore character _ (U+005F) is considered a letter.
Only decimal digits are supported.
```
letter = ascii_letter | "_" .
ascii_letter = "A" … "Z" | "a" … "z" .
digit = "0" … "9" .
```
## Identifiers
Identifiers are tokens which refer to database names, retention policy names, user names, measurement names, tag keys, and field names.
The rules:
- double quoted identifiers can contain any unicode character other than a new line
- double quoted identifiers can contain escaped `"` characters (i.e., `\"`)
- unquoted identifiers must start with an upper or lowercase ASCII character or "_"
- unquoted identifiers may contain only ASCII letters, decimal digits, and "_"
```
identifier = unquoted_identifier | quoted_identifier .
unquoted_identifier = ( letter ) { letter | digit } .
quoted_identifier = `"` unicode_char { unicode_char } `"` .
```
#### Examples:
```
cpu
_cpu_stats
"1h"
"anything really"
"1_Crazy-1337.identifier>NAME👍"
```
## Keywords
```
ALL ALTER AS ASC BEGIN BY
CREATE CONTINUOUS DATABASE DATABASES DEFAULT DELETE
DESC DROP DURATION END EXISTS EXPLAIN
FIELD FROM GRANT GROUP IF IN
INNER INSERT INTO KEY KEYS LIMIT
SHOW MEASUREMENT MEASUREMENTS OFFSET ON ORDER
PASSWORD POLICY POLICIES PRIVILEGES QUERIES QUERY
READ REPLICATION RETENTION REVOKE SELECT SERIES
SLIMIT SOFFSET TAG TO USER USERS
VALUES WHERE WITH WRITE
```
## Literals
### Integers
InfluxQL supports decimal integer literals. Hexadecimal and octal literals are not currently supported.
```
int_lit = ( "1" … "9" ) { digit } .
```
### Floats
InfluxQL supports floating-point literals. Exponents are not currently supported.
```
float_lit = int_lit "." int_lit .
```
### Strings
String literals must be surrounded by single quotes. Strings may contain `'` characters as long as they are escaped (i.e., `\'`).
```
string_lit = `'` { unicode_char } `'`' .
```
### Durations
Duration literals specify a length of time. An integer literal followed immediately (with no spaces) by a duration unit listed below is interpreted as a duration literal.
```
Duration unit definitions
-------------------------
| Units | Meaning |
|--------|-----------------------------------------|
| u or µ | microseconds (1 millionth of a second) |
| ms | milliseconds (1 thousandth of a second) |
| s | second |
| m | minute |
| h | hour |
| d | day |
| w | week |
```
```
duration_lit = int_lit duration_unit .
duration_unit = "u" | "µ" | "s" | "h" | "d" | "w" | "ms" .
```
### Dates & Times
The date and time literal format is not specified in EBNF like the rest of this document. It is specified using Go's date / time parsing format, which is a reference date written in the format required by InfluxQL. The reference date time is:
InfluxQL reference date time: January 2nd, 2006 at 3:04:05 PM
```
time_lit = "2006-01-02 15:04:05.999999" | "2006-01-02"
```
### Booleans
```
bool_lit = TRUE | FALSE .
```
### Regular Expressions
```
regex_lit = "/" { unicode_char } "/" .
```
## Queries
A query is composed of one or more statements separated by a semicolon.
```
query = statement { ; statement } .
statement = alter_retention_policy_stmt |
create_continuous_query_stmt |
create_database_stmt |
create_retention_policy_stmt |
create_user_stmt |
delete_stmt |
drop_continuous_query_stmt |
drop_database_stmt |
drop_measurement_stmt |
drop_retention_policy_stmt |
drop_series_stmt |
drop_user_stmt |
grant_stmt |
show_continuous_queries_stmt |
show_databases_stmt |
show_field_keys_stmt |
show_measurements_stmt |
show_retention_policies |
show_series_stmt |
show_tag_keys_stmt |
show_tag_values_stmt |
show_users_stmt |
revoke_stmt |
select_stmt .
```
## Statements
### ALTER RETENTION POLICY
```
alter_retention_policy_stmt = "ALTER RETENTION POLICY" policy_name "ON"
db_name retention_policy_option
[ retention_policy_option ]
[ retention_policy_option ] .
db_name = identifier .
policy_name = identifier .
retention_policy_option = retention_policy_duration |
retention_policy_replication |
"DEFAULT" .
retention_policy_duration = "DURATION" duration_lit .
retention_policy_replication = "REPLICATION" int_lit
```
#### Examples:
```sql
-- Set default retention policy for mydb to 1h.cpu.
ALTER RETENTION POLICY "1h.cpu" ON mydb DEFAULT;
-- Change duration and replication factor.
ALTER RETENTION POLICY policy1 ON somedb DURATION 1h REPLICATION 4
```
### CREATE CONTINUOUS QUERY
```
create_continuous_query_stmt = "CREATE CONTINUOUS QUERY" query_name "ON" db_name
"BEGIN" select_stmt "END" .
query_name = identifier .
```
#### Examples:
```sql
-- selects from default retention policy and writes into 6_months retention policy
CREATE CONTINUOUS QUERY "10m_event_count"
ON db_name
BEGIN
SELECT count(value)
INTO "6_months".events
FROM events
GROUP BY time(10m)
END;
-- this selects from the output of one continuous query in one retention policy and outputs to another series in another retention policy
CREATE CONTINUOUS QUERY "1h_event_count"
ON db_name
BEGIN
SELECT sum(count) as count
INTO "2_years".events
FROM "6_months".events
GROUP BY time(1h)
END;
```
### CREATE DATABASE
```
create_database_stmt = "CREATE DATABASE" db_name
```
#### Example:
```sql
CREATE DATABASE foo
```
### CREATE RETENTION POLICY
```
create_retention_policy_stmt = "CREATE RETENTION POLICY" policy_name "ON"
db_name retention_policy_duration
retention_policy_replication
[ "DEFAULT" ] .
```
#### Examples
```sql
-- Create a retention policy.
CREATE RETENTION POLICY "10m.events" ON somedb DURATION 10m REPLICATION 2;
-- Create a retention policy and set it as the default.
CREATE RETENTION POLICY "10m.events" ON somedb DURATION 10m REPLICATION 2 DEFAULT;
```
### CREATE USER
```
create_user_stmt = "CREATE USER" user_name "WITH PASSWORD" password
[ "WITH ALL PRIVILEGES" ] .
```
#### Examples:
```sql
-- Create a normal database user.
CREATE USER jdoe WITH PASSWORD '1337password';
-- Create a cluster admin.
-- Note: Unlike the GRANT statement, the "PRIVILEGES" keyword is required here.
CREATE USER jdoe WITH PASSWORD '1337password' WITH ALL PRIVILEGES;
```
### DELETE
```
delete_stmt = "DELETE" from_clause where_clause .
```
#### Example:
```sql
-- delete data points from the cpu measurement where the region tag
-- equals 'uswest'
DELETE FROM cpu WHERE region = 'uswest';
```
### DROP CONTINUOUS QUERY
drop_continuous_query_stmt = "DROP CONTINUOUS QUERY" query_name .
#### Example:
```sql
DROP CONTINUOUS QUERY myquery;
```
### DROP DATABASE
drop_database_stmt = "DROP DATABASE" db_name .
#### Example:
```sql
DROP DATABASE mydb;
```
### DROP MEASUREMENT
```
drop_measurement_stmt = "DROP MEASUREMENT" measurement .
```
#### Examples:
```sql
-- drop the cpu measurement
DROP MEASUREMENT cpu;
```
### DROP RETENTION POLICY
```
drop_retention_policy_stmt = "DROP RETENTION POLICY" policy_name "ON" db_name .
```
#### Example:
```sql
-- drop the retention policy named 1h.cpu from mydb
DROP RETENTION POLICY "1h.cpu" ON mydb;
```
### DROP SERIES
```
drop_series_stmt = "DROP SERIES" [ from_clause ] [ where_clause ]
```
#### Example:
```sql
```
### DROP USER
```
drop_user_stmt = "DROP USER" user_name .
```
#### Example:
```sql
DROP USER jdoe;
```
### GRANT
NOTE: Users can be granted privileges on databases that do not exist.
```
grant_stmt = "GRANT" privilege [ on_clause ] to_clause
```
#### Examples:
```sql
-- grant cluster admin privileges
GRANT ALL TO jdoe;
-- grant read access to a database
GRANT READ ON mydb TO jdoe;
```
### SHOW CONTINUOUS QUERIES
show_continuous_queries_stmt = "SHOW CONTINUOUS QUERIES"
#### Example:
```sql
-- show all continuous queries
SHOW CONTINUOUS QUERIES;
```
### SHOW DATABASES
```
show_databases_stmt = "SHOW DATABASES" .
```
#### Example:
```sql
-- show all databases
SHOW DATABASES;
```
### SHOW FIELD
show_field_keys_stmt = "SHOW FIELD KEYS" [ from_clause ] .
#### Examples:
```sql
-- show field keys from all measurements
SHOW FIELD KEYS;
-- show field keys from specified measurement
SHOW FIELD KEYS FROM cpu;
```
### SHOW MEASUREMENTS
show_measurements_stmt = [ where_clause ] [ group_by_clause ] [ limit_clause ]
[ offset_clause ] .
```sql
-- show all measurements
SHOW MEASUREMENTS;
-- show measurements where region tag = 'uswest' AND host tag = 'serverA'
SHOW MEASUREMENTS WHERE region = 'uswest' AND host = 'serverA';
```
### SHOW RETENTION POLICIES
```
show_retention_policies = "SHOW RETENTION POLICIES" db_name .
```
#### Example:
```sql
-- show all retention policies on a database
SHOW RETENTION POLICIES mydb;
```
### SHOW SERIES
```
show_series_stmt = [ from_clause ] [ where_clause ] [ group_by_clause ]
[ limit_clause ] [ offset_clause ] .
```
#### Example:
```sql
```
### SHOW TAG KEYS
```
show_tag_keys_stmt = [ from_clause ] [ where_clause ] [ group_by_clause ]
[ limit_clause ] [ offset_clause ] .
```
#### Examples:
```sql
-- show all tag keys
SHOW TAG KEYS;
-- show all tag keys from the cpu measurement
SHOW TAG KEYS FROM cpu;
-- show all tag keys from the cpu measurement where the region key = 'uswest'
SHOW TAG KEYS FROM cpu WHERE region = 'uswest';
-- show all tag keys where the host key = 'serverA'
SHOW TAG KEYS WHERE host = 'serverA';
```
### SHOW TAG VALUES
```
show_tag_values_stmt = [ from_clause ] with_tag_clause [ where_clause ]
[ group_by_clause ] [ limit_clause ] [ offset_clause ] .
```
#### Examples:
```sql
-- show all tag values across all measurements for the region tag
SHOW TAG VALUES WITH TAG = 'region';
-- show tag values from the cpu measurement for the region tag
SHOW TAG VALUES FROM cpu WITH TAG = 'region';
-- show tag values from the cpu measurement for region & host tag keys where service = 'redis'
SHOW TAG VALUES FROM cpu WITH TAG IN (region, host) WHERE service = 'redis';
```
### SHOW USERS
```
show_users_stmt = "SHOW USERS" .
```
#### Example:
```sql
-- show all users
SHOW USERS;
```
### REVOKE
```
revoke_stmt = privilege [ "ON" db_name ] "FROM" user_name
```
#### Examples:
```sql
-- revoke cluster admin from jdoe
REVOKE ALL PRIVILEGES FROM jdoe;
-- revoke read privileges from jdoe on mydb
REVOKE READ ON mydb FROM jdoe;
```
### SELECT
```
select_stmt = fields from_clause [ into_clause ] [ where_clause ]
[ group_by_clause ] [ order_by_clause ] [ limit_clause ]
[ offset_clause ] [ slimit_clause ] [ soffset_clause ].
```
#### Examples:
```sql
-- select mean value from the cpu measurement where region = 'uswest' grouped by 10 minute intervals
SELECT mean(value) FROM cpu WHERE region = 'uswest' GROUP BY time(10m) fill(0);
```
## Clauses
```
from_clause = "FROM" measurements .
group_by_clause = "GROUP BY" dimensions fill(<option>).
limit_clause = "LIMIT" int_lit .
offset_clause = "OFFSET" int_lit .
slimit_clause = "SLIMIT" int_lit .
soffset_clause = "SOFFSET" int_lit .
on_clause = db_name .
order_by_clause = "ORDER BY" sort_fields .
to_clause = user_name .
where_clause = "WHERE" expr .
```
## Expressions
```
binary_op = "+" | "-" | "*" | "/" | "AND" | "OR" | "=" | "!=" | "<" |
"<=" | ">" | ">=" .
expr = unary_expr { binary_op unary_expr } .
unary_expr = "(" expr ")" | var_ref | time_lit | string_lit | int_lit |
float_lit | bool_lit | duration_lit | regex_lit .
```
## Other
```
dimension = expr .
dimensions = dimension { "," dimension } .
field = expr [ alias ] .
fields = field { "," field } .
measurement = measurement_name |
( policy_name "." measurement_name ) |
( db_name "." [ policy_name ] "." measurement_name ) .
measurements = measurement { "," measurement } .
measurement_name = identifier .
password = identifier .
policy_name = identifier .
privilege = "ALL" [ "PRIVILEGES" ] | "READ" | "WRITE" .
series_id = int_lit .
sort_field = field_name [ ASC | DESC ] .
sort_fields = sort_field { "," sort_field } .
user_name = identifier .
```

682
vendor/github.com/influxdata/influxdb/influxql/NOTES generated vendored Normal file
View File

@@ -0,0 +1,682 @@
SELECT mean(value) FROM cpu
WHERE service = 'redis'
GROUP BY region, time(10m)
based on group by, get unique tag sets for region
cpu region=uswest -> get series ids from cpu where <tagset> and <where cond>
cpu region=useast -> get series ids from cpu where <tagset> and <where cond>
for each shard group in time range {
for each group by tagset {
shardItrs := map[shard]itr
for id := range seriesIds {
shard := group.shardForId(id)
shardItrs[shard].addId(id)
}
for _, itr := range shardItrs {
itr.tags = tagset
itr.name = cpu
}
}
}
(host = 'serverA' AND value > 100) OR (region = 'uswest' AND value < 10)
value > 100 OR value < 10 (host=serverA, region=uswest)
value < 10 (host!=serverA, region=uswest)
value > 100
filters := make(map[whereCond]seriesIds)
filters := make(map[uint32]whereCond)
seriesIds
select mean(value) from foo WHERE someField = 'important' group by time(5m)
===================
select derivative(mean(value))
from cpu
group by time(5m)
select mean(value) from cpu group by time(5m)
select top(10, value) from cpu group by host where time > now() - 1h
this query uses this type of cycle
-------REMOTE HOST ------------- -----HOST THAT GOT QUERY ---
map -> reduce -> combine -> map -> reduce -> combine -> user
select mean(value) cpu group by time(5m), host where time > now() -4h
map -> reduce -> combine -> user
map -> reduce -> map -> reduce -> combine -> user
map -> reduce -> combine -> map -> reduce -> combine -> user
select value from
(
select mean(value) AS value FROM cpu GROUP BY time(5m)
)
[
{
name: cpu,
tags: {
host: servera,
},
columns: [time, mean],
values : [
[23423423, 88.8]
]
},
{
name: cpu,
tags: {
host: serverb,
}
}
]
================================================================================
// list series ->
/*
[
{
"name": "cpu",
"columns": ["id", "region", "host"],
"values": [
1, "uswest", "servera",
2, "uswest", "serverb"
]
},
{
""
}
]
list series where region = 'uswest'
list tags where name = 'cpu'
list tagKeys where name = 'cpu'
list series where name = 'cpu' and region = 'uswest'
select distinct(region) from cpu
list names
list tagKeys
list tagValeus where tagKey = 'region' and time > now() -1h
select a.value, b.value from a join b where a.user_id == 100
select a.value from a where a.user_id == 100
select b.value from b
3 1 2
select sum(a.value) + (sum(b.value) / min(b.value)) from a join b group by region
select suM(a.value) from a group by time(5m)
select sum(b.value) from b group by time(5m)
execute sum MR on series [23, 65, 88, 99, 101, 232]
map -> 1 tick per 5m
reduce -> combines ticks per 5m interval -> outputs
planner -> take reduce output per 5m interval from the two reducers
and combine with the join function, which is +
[1,/,2,+,3]
for v := s[0].Next(); v != nil; v = 2[0].Next() {
var result interface{}
for i := 1; i < len(s); i += 2 {
/ it's an operator
if i % 2 == 1 {
}
}
}
select count(distinct(host)) from cpu where time > now() - 5m
type mapper interface {
Map(iterator)
}
type floatCountMapper struct {}
func(m *floatCountMapper) Map(i Iterator) {
itr := i.(*floatIterator)
}
type Iterator interface {
itr()
}
type iterator struct {
cursor *bolt.Cursor
timeBucket time.Time
name string
seriesID uint32
tags map[string]string
fieldID uint8
where *WhereClause
}
func (i *intIterator) itr() {}
func (i *intIterator) Next() (k int64, v float64) {
// loop through bolt cursor applying where clause and yield next point
// if cursor is at end or time is out of range, yield nil
}
*/
field: ipaddress
select top(10, count, ipaddress) from hits group by time(5m), host
map -> 10 records, <key(time,host)>, <value(count,ipaddresses)>
reducer -> take in all map outputs for each 5m bucket
combine them, sort, take out the top 10
output -> 10 records, count, ipaddresses, time
==========
select top(10, count, host) from hits group by time(5m)
select host, value from cpu where time > now() - 1h
select last(value) from cpu group by time(auto), host fill(previous) where time > now() - 1h
select sum(value) from cpu group by host where time > now() - 1h
select sum(value) from cpu where time > now() - 1h
select * from a;
[
{
"name": "cpu",
"tags": {
"host": "servera"
},
"fields": [
"time",
"count",
"ipaddress"
]
"values": [
[t, v, "123.23.22.2"],
[t, v, "192.232.2.2"],
]
},
{
"name": "cpu",
"tags": {
"host": "serverb"
},
"values": [
[t, v],
[t + 1, v],
]
},
]
[t, v, "servera"]
[t, v, "serverb"]
[t+1, v, "servera"]
[t+1, v, "serverb"]
======
a INNER JOIN b
- planner always has "group by"
select count(errors.value) / count(requests.value) as error_rate
from errors join requests as "mysuperseries"
group by time(5m)
fill(previous)
where time > now() - 4h
select mean(value) as cpu_mean from cpu group by time(5m) where host = 'servera'
select count(value) from errors group by time(5m) fill(previous) where..
select count(value) from requests group by time(5m) fill(previ...
{
"name": "errors.requests",
"tags": {},
"fields": ["time", "errors.count", "requests.count"],
"values": [
[t, n, m]
]
}
a MERGE b
a - t
b - t
a - t + 1
b - t + 1
b - t + 2
a - t + 3
<cpu, host>
select value from cpu
select mean(value) from cpu group by time(5m)
select first(value) from cpu
=====
1. Group by time
2. Group by
3. Raw
======
SELECT sum(value) FROM myseries
host=servera
host=serverb
{"host":"servera", "value":100}
{"host":"serverb", "value":"hello!"}
series = <name, tags>
series = seriesID
seriesID -> name
name has_many seriesIDs
name has_many fields
field -> (type, id)
<seriesName,fieldID> -> (type, id)
<seriesID, time> -> fieldValues
field
type topMapper struct {
count int
}
func newTopMaper(count int) {
}
func (t *topCountMapper) Map(i Iterator) {
topValues := make(map[string]int)
for p := i.Next(); p != nil; p = i.Next() {
topValues[p.String()] += 1
}
for k, v := range topValues {
t.job.Emit(k, v)
}
}
type topCountReducer struct {
count int
}
func (r *topCountReducer) Reduce(i Iterator) {
realzTop10 := make(map[string]int)
for v := i.Next(); v != nil; v = i.Next() {
top10 := v.(map[string]int)
for k, n := range top10 {
realzTop10[k] += n
}
}
realyrealTop10 := make(map[string]int)
// do sorty magic on reazTop10 and set realyreal
r.job.Emit(realyrealTop10)
}
type Transformer interface {
Transform(interface{}) Series
}
type ReduceOutput struct {
values [][]interface{}
fieldIDs []
}
// for topCountReducer ReduceOutput would look like
// values = [t, c, "some string"]
// fieldIDs = [0, 0, 3]
SELECT val1, val2 FROM abc
select mean(value) from cpu where region='uswest' group by time(5m), host
2000 series
200 series to each machine
================================================================================
type Mapper interface {
Map(Iterator)
}
type countMapper struct {}
// Iterator is the entire series if not an aggregate query
// or iterator is the entire time bucket if an aggregate query
func (m *sumMapper) Map(i Iterator) {
var sum int
for p := i.Next(); p != nil; p = i.Next() {
sum += p.Float()
}
m.Emitter.Emit(k, sum)
}
type Point interface {
String(name)
Int(name)
}
type cursorIterator struct {
Cursor *bolt.Cursor
FieldID uint8
Value []byte
}
func (i cursorIterator) Next() Point {
_, i.Value = i.Cursor.Next()
return byteSlicePoint(i.Value)
}
type byteSlicePoint []byte
func (p byteSlicePoint) String() string {
// unmarshal from byte slice.
}
/*
{
"name": "foo",
"fields": {
"value": 23.2,
"user_id": 23
},
"tags": {
}
}
*/
CNT ID0 VALUEVALUEVALUEVALUEVALUEVALUEVALUEVALU
0001 0000 0000 0000 0000 0000 0000 0000 0000 0000
CNT ID0 ID1 ID2 FLOATFLOA STRINGSTR STRINGSTR
0002 0001 0002 0003 0000 0000 0000 0000 0000 0000
// SELECT count() FROM cpu GROUP BY host
// SELECT mean(value) from cpu where region = 'uswest'
// SELECT derivative(value) from redis_key_count GROUP BY time(5m)
// SELECT host, mean(value)
// FROM cpu
// GROUP BY host
// HAVING top(20, mean)
// WHERE time > now() - 1h
// AND region = 'uswest'
// SELECT ipaddress, count(ipaddress)
// FROM hits
// GROUP BY ipaddress
// HAVING top(10, count)
// WHERE time > now() - 1h
series := meta.DistinctTagValues("cpu", "host")
tye Series struct {
name string
fields map[uint8]string
}
type SeriesData struct {
ID
tags map[string]string
}
<id, time, value>
mrJobs := make([]*MRJob, 0, len(series))
for _, s := range series {
j := NewMRJob(s)
mrJobs = append(mrJobs, j)
j.Execute()
}
for _, j := range mrJobs {
// pull in results
// construct series object with same tags as series
}
================================================================================
type Mapper interface {
Map(Iterator)
}
type countMapper struct {}
// Iterator is the entire series if not an aggregate query
// or iterator is the entire time bucket if an aggregate query
func (m *sumMapper) Map(i Iterator) {
var sum int
for p := i.Next(); p != nil; p = i.Next() {
sum += p.Float()
}
m.Emitter.Emit(k, sum)
}
type Point interface {
String(name)
Int(name)
}
type cursorIterator struct {
Cursor *bolt.Cursor
FieldID uint8
Value []byte
}
func (i cursorIterator) Next() Point {
_, i.Value = i.Cursor.Next()
return byteSlicePoint(i.Value)
}
type byteSlicePoint []byte
func (p byteSlicePoint) String() string {
// unmarshal from byte slice.
}
/*
{
"name": "foo",
"fields": {
"value": 23.2,
"user_id": 23
},
"tags": {
}
}
*/
CNT ID0 VALUEVALUEVALUEVALUEVALUEVALUEVALUEVALU
0001 0000 0000 0000 0000 0000 0000 0000 0000 0000
CNT ID0 ID1 ID2 FLOATFLOA STRINGSTR STRINGSTR
0002 0001 0002 0003 0000 0000 0000 0000 0000 0000
// SELECT count() FROM cpu GROUP BY host
// SELECT mean(value) from cpu where region = 'uswest'
// SELECT derivative(value) from redis_key_count GROUP BY time(5m)
// SELECT host, mean(value)
// FROM cpu
// GROUP BY host
// HAVING top(20, mean)
// WHERE time > now() - 1h
// AND region = 'uswest'
// SELECT ipaddress, count(ipaddress)
// FROM hits
// GROUP BY ipaddress
// HAVING top(10, count)
// WHERE time > now() - 1h
series := meta.DistinctTagValues("cpu", "host")
mrJobs := make([]*MRJob, 0, len(series))
for _, s := range series {
j := NewMRJob(s)
mrJobs = append(mrJobs, j)
j.Execute()
}
for _, j := range mrJobs {
// pull in results
// construct series object with same tags as series
}
================================================================================
type Iterator interface {
Next() (interface{}, bool)
}
type iteratorCounter struct {
iterator Iterator
}
func (iteratorCounter) Next() {
}
SELECT max(a.value), min(a.value), max(b.value)
FROM a, b
WHERE a.host = 'influxdb.org'
grouper {
[]Iterator
}
SELECT max(a.value) FROM a WHERE a.host = 'influxdb.org' --> 1 value
SELECT min(a.value) FROM a WHERE a.host = 'influxdb.org' --> 1 value
SELECT max(b.value) FROM b --> 1 value
SELECT max(a.value) FROM a GROUP BY time WHERE a.host = 'influxdb.org' --> key,value
timeGrouper {
[]Iterator
}
type maxMapper struct {
}
IntervalIterator {
}
maxMapper.Map(Iterator)
- GROUP BY time
- GROUP BY time, <tag>
- GROUP BY <tag>
COUNT(field)
MIN(field)
MAX(field)
MEAN(field)
MODE(field)
MEDIAN(field)
COUNT(DISTINCT field)
PERCENTILE(field, N)
HISTOGRAM(field [, bucketSize])
DERIVATIVE(field)
SUM(field)
STDDEV(field)
FIRST(field)
LAST(field)
DIFFERENCE(field)
TOP(field, N)
BOTTOM(field, N) <----- multivalue
================================================================================

3016
vendor/github.com/influxdata/influxdb/influxql/ast.go generated vendored Normal file

File diff suppressed because it is too large Load Diff

64
vendor/github.com/influxdata/influxdb/influxql/doc.go generated vendored Normal file
View File

@@ -0,0 +1,64 @@
/*
Package influxql implements a parser for the InfluxDB query language.
InfluxQL is a DML and DDL language for the InfluxDB time series database.
It provides the ability to query for aggregate statistics as well as create
and configure the InfluxDB server.
Selecting data
The SELECT query is used for retrieving data from one or more series. It allows
for a list of columns followed by a list of series to select from.
SELECT value FROM cpu_load
You can also add a a conditional expression to limit the results of the query:
SELECT value FROM cpu_load WHERE host = 'influxdb.com'
Two or more series can be combined into a single query and executed together:
SELECT cpu0.value + cpu1.value
FROM cpu_load AS cpu0 INNER JOIN cpu_load cpu1 ON cpu0.host = cpu1.host
Limits and ordering can be set on selection queries as well:
SELECT value FROM cpu_load LIMIT 100 ORDER DESC;
Removing data
The DELETE query is available to remove time series data points from the
database. This query will delete "cpu_load" values older than an hour:
DELETE FROM cpu_load WHERE time < now() - 1h
Continuous Queries
Queries can be run indefinitely on the server in order to generate new series.
This is done by running a "SELECT INTO" query. For example, this query computes
the hourly mean for cpu_load and stores it into a "cpu_load" series in the
"daily" shard space.
SELECT mean(value) AS value FROM cpu_load GROUP BY 1h
INTO daily.cpu_load
If there is existing data on the source series then this query will be run for
all historic data. To only execute the query on new incoming data you can append
"NO BACKFILL" to the end of the query:
SELECT mean(value) AS value FROM cpu_load GROUP BY 1h
INTO daily.cpu_load NO BACKFILL
Continuous queries will return an id that can be used to remove them in the
future. To remove a continous query, use the DROP CONTINUOUS QUERY statement:
DROP CONTINUOUS QUERY 12
You can also list all continuous queries by running:
LIST CONTINUOUS QUERIES
*/
package influxql

File diff suppressed because it is too large Load Diff

2238
vendor/github.com/influxdata/influxdb/influxql/parser.go generated vendored Normal file

File diff suppressed because it is too large Load Diff

View File

@@ -0,0 +1,223 @@
package influxql
import (
"encoding/json"
"errors"
"hash/fnv"
"sort"
)
// TagSet is a fundamental concept within the query system. It represents a composite series,
// composed of multiple individual series that share a set of tag attributes.
type TagSet struct {
Tags map[string]string
Filters []Expr
SeriesKeys []string
Key []byte
}
// AddFilter adds a series-level filter to the Tagset.
func (t *TagSet) AddFilter(key string, filter Expr) {
t.SeriesKeys = append(t.SeriesKeys, key)
t.Filters = append(t.Filters, filter)
}
// Row represents a single row returned from the execution of a statement.
type Row struct {
Name string `json:"name,omitempty"`
Tags map[string]string `json:"tags,omitempty"`
Columns []string `json:"columns,omitempty"`
Values [][]interface{} `json:"values,omitempty"`
Err error `json:"err,omitempty"`
}
// tagsHash returns a hash of tag key/value pairs.
func (r *Row) tagsHash() uint64 {
h := fnv.New64a()
keys := r.tagsKeys()
for _, k := range keys {
h.Write([]byte(k))
h.Write([]byte(r.Tags[k]))
}
return h.Sum64()
}
// tagKeys returns a sorted list of tag keys.
func (r *Row) tagsKeys() []string {
a := make([]string, 0, len(r.Tags))
for k := range r.Tags {
a = append(a, k)
}
sort.Strings(a)
return a
}
// Rows represents a list of rows that can be sorted consistently by name/tag.
type Rows []*Row
func (p Rows) Len() int { return len(p) }
func (p Rows) Less(i, j int) bool {
// Sort by name first.
if p[i].Name != p[j].Name {
return p[i].Name < p[j].Name
}
// Sort by tag set hash. Tags don't have a meaningful sort order so we
// just compute a hash and sort by that instead. This allows the tests
// to receive rows in a predictable order every time.
return p[i].tagsHash() < p[j].tagsHash()
}
func (p Rows) Swap(i, j int) { p[i], p[j] = p[j], p[i] }
// Result represents a resultset returned from a single statement.
type Result struct {
// StatementID is just the statement's position in the query. It's used
// to combine statement results if they're being buffered in memory.
StatementID int `json:"-"`
Series Rows
Err error
}
// MarshalJSON encodes the result into JSON.
func (r *Result) MarshalJSON() ([]byte, error) {
// Define a struct that outputs "error" as a string.
var o struct {
Series []*Row `json:"series,omitempty"`
Err string `json:"error,omitempty"`
}
// Copy fields to output struct.
o.Series = r.Series
if r.Err != nil {
o.Err = r.Err.Error()
}
return json.Marshal(&o)
}
// UnmarshalJSON decodes the data into the Result struct
func (r *Result) UnmarshalJSON(b []byte) error {
var o struct {
Series []*Row `json:"series,omitempty"`
Err string `json:"error,omitempty"`
}
err := json.Unmarshal(b, &o)
if err != nil {
return err
}
r.Series = o.Series
if o.Err != "" {
r.Err = errors.New(o.Err)
}
return nil
}
func GetProcessor(expr Expr, startIndex int) (Processor, int) {
switch expr := expr.(type) {
case *VarRef:
return newEchoProcessor(startIndex), startIndex + 1
case *Call:
return newEchoProcessor(startIndex), startIndex + 1
case *BinaryExpr:
return getBinaryProcessor(expr, startIndex)
case *ParenExpr:
return GetProcessor(expr.Expr, startIndex)
case *NumberLiteral:
return newLiteralProcessor(expr.Val), startIndex
case *StringLiteral:
return newLiteralProcessor(expr.Val), startIndex
case *BooleanLiteral:
return newLiteralProcessor(expr.Val), startIndex
case *TimeLiteral:
return newLiteralProcessor(expr.Val), startIndex
case *DurationLiteral:
return newLiteralProcessor(expr.Val), startIndex
}
panic("unreachable")
}
type Processor func(values []interface{}) interface{}
func newEchoProcessor(index int) Processor {
return func(values []interface{}) interface{} {
return values[index]
}
}
func newLiteralProcessor(val interface{}) Processor {
return func(values []interface{}) interface{} {
return val
}
}
func getBinaryProcessor(expr *BinaryExpr, startIndex int) (Processor, int) {
lhs, index := GetProcessor(expr.LHS, startIndex)
rhs, index := GetProcessor(expr.RHS, index)
return newBinaryExprEvaluator(expr.Op, lhs, rhs), index
}
func newBinaryExprEvaluator(op Token, lhs, rhs Processor) Processor {
switch op {
case ADD:
return func(values []interface{}) interface{} {
l := lhs(values)
r := rhs(values)
if lv, ok := l.(float64); ok {
if rv, ok := r.(float64); ok {
if rv != 0 {
return lv + rv
}
}
}
return nil
}
case SUB:
return func(values []interface{}) interface{} {
l := lhs(values)
r := rhs(values)
if lv, ok := l.(float64); ok {
if rv, ok := r.(float64); ok {
if rv != 0 {
return lv - rv
}
}
}
return nil
}
case MUL:
return func(values []interface{}) interface{} {
l := lhs(values)
r := rhs(values)
if lv, ok := l.(float64); ok {
if rv, ok := r.(float64); ok {
if rv != 0 {
return lv * rv
}
}
}
return nil
}
case DIV:
return func(values []interface{}) interface{} {
l := lhs(values)
r := rhs(values)
if lv, ok := l.(float64); ok {
if rv, ok := r.(float64); ok {
if rv != 0 {
return lv / rv
}
}
}
return nil
}
default:
// we shouldn't get here, but give them back nils if it goes this way
return func(values []interface{}) interface{} {
return nil
}
}
}

View File

@@ -0,0 +1,561 @@
package influxql
import (
"bufio"
"bytes"
"errors"
"fmt"
"io"
"strings"
)
// Scanner represents a lexical scanner for InfluxQL.
type Scanner struct {
r *reader
}
// NewScanner returns a new instance of Scanner.
func NewScanner(r io.Reader) *Scanner {
return &Scanner{r: &reader{r: bufio.NewReader(r)}}
}
// Scan returns the next token and position from the underlying reader.
// Also returns the literal text read for strings, numbers, and duration tokens
// since these token types can have different literal representations.
func (s *Scanner) Scan() (tok Token, pos Pos, lit string) {
// Read next code point.
ch0, pos := s.r.read()
// If we see whitespace then consume all contiguous whitespace.
// If we see a letter, or certain acceptable special characters, then consume
// as an ident or reserved word.
if isWhitespace(ch0) {
return s.scanWhitespace()
} else if isLetter(ch0) || ch0 == '_' {
s.r.unread()
return s.scanIdent()
} else if isDigit(ch0) {
return s.scanNumber()
}
// Otherwise parse individual characters.
switch ch0 {
case eof:
return EOF, pos, ""
case '"':
s.r.unread()
return s.scanIdent()
case '\'':
return s.scanString()
case '.':
ch1, _ := s.r.read()
s.r.unread()
if isDigit(ch1) {
return s.scanNumber()
}
return DOT, pos, ""
case '+', '-':
return s.scanNumber()
case '*':
return MUL, pos, ""
case '/':
return DIV, pos, ""
case '=':
if ch1, _ := s.r.read(); ch1 == '~' {
return EQREGEX, pos, ""
}
s.r.unread()
return EQ, pos, ""
case '!':
if ch1, _ := s.r.read(); ch1 == '=' {
return NEQ, pos, ""
} else if ch1 == '~' {
return NEQREGEX, pos, ""
}
s.r.unread()
case '>':
if ch1, _ := s.r.read(); ch1 == '=' {
return GTE, pos, ""
}
s.r.unread()
return GT, pos, ""
case '<':
if ch1, _ := s.r.read(); ch1 == '=' {
return LTE, pos, ""
} else if ch1 == '>' {
return NEQ, pos, ""
}
s.r.unread()
return LT, pos, ""
case '(':
return LPAREN, pos, ""
case ')':
return RPAREN, pos, ""
case ',':
return COMMA, pos, ""
case ';':
return SEMICOLON, pos, ""
}
return ILLEGAL, pos, string(ch0)
}
// scanWhitespace consumes the current rune and all contiguous whitespace.
func (s *Scanner) scanWhitespace() (tok Token, pos Pos, lit string) {
// Create a buffer and read the current character into it.
var buf bytes.Buffer
ch, pos := s.r.curr()
_, _ = buf.WriteRune(ch)
// Read every subsequent whitespace character into the buffer.
// Non-whitespace characters and EOF will cause the loop to exit.
for {
ch, _ = s.r.read()
if ch == eof {
break
} else if !isWhitespace(ch) {
s.r.unread()
break
} else {
_, _ = buf.WriteRune(ch)
}
}
return WS, pos, buf.String()
}
func (s *Scanner) scanIdent() (tok Token, pos Pos, lit string) {
// Save the starting position of the identifier.
_, pos = s.r.read()
s.r.unread()
var buf bytes.Buffer
for {
if ch, _ := s.r.read(); ch == eof {
break
} else if ch == '"' {
tok0, pos0, lit0 := s.scanString()
if tok0 == BADSTRING || tok0 == BADESCAPE {
return tok0, pos0, lit0
}
return IDENT, pos, lit0
} else if isIdentChar(ch) {
s.r.unread()
buf.WriteString(ScanBareIdent(s.r))
} else {
s.r.unread()
break
}
}
lit = buf.String()
// If the literal matches a keyword then return that keyword.
if tok = Lookup(lit); tok != IDENT {
return tok, pos, ""
}
return IDENT, pos, lit
}
// scanString consumes a contiguous string of non-quote characters.
// Quote characters can be consumed if they're first escaped with a backslash.
func (s *Scanner) scanString() (tok Token, pos Pos, lit string) {
s.r.unread()
_, pos = s.r.curr()
var err error
lit, err = ScanString(s.r)
if err == errBadString {
return BADSTRING, pos, lit
} else if err == errBadEscape {
_, pos = s.r.curr()
return BADESCAPE, pos, lit
}
return STRING, pos, lit
}
func (s *Scanner) ScanRegex() (tok Token, pos Pos, lit string) {
_, pos = s.r.curr()
// Start & end sentinels.
start, end := '/', '/'
// Valid escape chars.
escapes := map[rune]rune{'/': '/'}
b, err := ScanDelimited(s.r, start, end, escapes, true)
if err == errBadEscape {
_, pos = s.r.curr()
return BADESCAPE, pos, lit
} else if err != nil {
return BADREGEX, pos, lit
}
return REGEX, pos, string(b)
}
// scanNumber consumes anything that looks like the start of a number.
// Numbers start with a digit, full stop, plus sign or minus sign.
// This function can return non-number tokens if a scan is a false positive.
// For example, a minus sign followed by a letter will just return a minus sign.
func (s *Scanner) scanNumber() (tok Token, pos Pos, lit string) {
var buf bytes.Buffer
// Check if the initial rune is a "+" or "-".
ch, pos := s.r.curr()
if ch == '+' || ch == '-' {
// Peek at the next two runes.
ch1, _ := s.r.read()
ch2, _ := s.r.read()
s.r.unread()
s.r.unread()
// This rune must be followed by a digit or a full stop and a digit.
if isDigit(ch1) || (ch1 == '.' && isDigit(ch2)) {
_, _ = buf.WriteRune(ch)
} else if ch == '+' {
return ADD, pos, ""
} else if ch == '-' {
return SUB, pos, ""
}
} else if ch == '.' {
// Peek and see if the next rune is a digit.
ch1, _ := s.r.read()
s.r.unread()
if !isDigit(ch1) {
return ILLEGAL, pos, "."
}
// Unread the full stop so we can read it later.
s.r.unread()
} else {
s.r.unread()
}
// Read as many digits as possible.
_, _ = buf.WriteString(s.scanDigits())
// If next code points are a full stop and digit then consume them.
if ch0, _ := s.r.read(); ch0 == '.' {
if ch1, _ := s.r.read(); isDigit(ch1) {
_, _ = buf.WriteRune(ch0)
_, _ = buf.WriteRune(ch1)
_, _ = buf.WriteString(s.scanDigits())
} else {
s.r.unread()
s.r.unread()
}
} else {
s.r.unread()
}
// Attempt to read as a duration if it doesn't have a fractional part.
if !strings.Contains(buf.String(), ".") {
// If the next rune is a duration unit (u,µ,ms,s) then return a duration token
if ch0, _ := s.r.read(); ch0 == 'u' || ch0 == 'µ' || ch0 == 's' || ch0 == 'h' || ch0 == 'd' || ch0 == 'w' {
_, _ = buf.WriteRune(ch0)
return DURATION_VAL, pos, buf.String()
} else if ch0 == 'm' {
_, _ = buf.WriteRune(ch0)
if ch1, _ := s.r.read(); ch1 == 's' {
_, _ = buf.WriteRune(ch1)
} else {
s.r.unread()
}
return DURATION_VAL, pos, buf.String()
}
s.r.unread()
}
return NUMBER, pos, buf.String()
}
// scanDigits consume a contiguous series of digits.
func (s *Scanner) scanDigits() string {
var buf bytes.Buffer
for {
ch, _ := s.r.read()
if !isDigit(ch) {
s.r.unread()
break
}
_, _ = buf.WriteRune(ch)
}
return buf.String()
}
// isWhitespace returns true if the rune is a space, tab, or newline.
func isWhitespace(ch rune) bool { return ch == ' ' || ch == '\t' || ch == '\n' }
// isLetter returns true if the rune is a letter.
func isLetter(ch rune) bool { return (ch >= 'a' && ch <= 'z') || (ch >= 'A' && ch <= 'Z') }
// isDigit returns true if the rune is a digit.
func isDigit(ch rune) bool { return (ch >= '0' && ch <= '9') }
// isIdentChar returns true if the rune can be used in an unquoted identifier.
func isIdentChar(ch rune) bool { return isLetter(ch) || isDigit(ch) || ch == '_' }
// isIdentFirstChar returns true if the rune can be used as the first char in an unquoted identifer.
func isIdentFirstChar(ch rune) bool { return isLetter(ch) || ch == '_' }
// bufScanner represents a wrapper for scanner to add a buffer.
// It provides a fixed-length circular buffer that can be unread.
type bufScanner struct {
s *Scanner
i int // buffer index
n int // buffer size
buf [3]struct {
tok Token
pos Pos
lit string
}
}
// newBufScanner returns a new buffered scanner for a reader.
func newBufScanner(r io.Reader) *bufScanner {
return &bufScanner{s: NewScanner(r)}
}
// Scan reads the next token from the scanner.
func (s *bufScanner) Scan() (tok Token, pos Pos, lit string) {
return s.scanFunc(s.s.Scan)
}
// ScanRegex reads a regex token from the scanner.
func (s *bufScanner) ScanRegex() (tok Token, pos Pos, lit string) {
return s.scanFunc(s.s.ScanRegex)
}
// scanFunc uses the provided function to scan the next token.
func (s *bufScanner) scanFunc(scan func() (Token, Pos, string)) (tok Token, pos Pos, lit string) {
// If we have unread tokens then read them off the buffer first.
if s.n > 0 {
s.n--
return s.curr()
}
// Move buffer position forward and save the token.
s.i = (s.i + 1) % len(s.buf)
buf := &s.buf[s.i]
buf.tok, buf.pos, buf.lit = scan()
return s.curr()
}
// Unscan pushes the previously token back onto the buffer.
func (s *bufScanner) Unscan() { s.n++ }
// curr returns the last read token.
func (s *bufScanner) curr() (tok Token, pos Pos, lit string) {
buf := &s.buf[(s.i-s.n+len(s.buf))%len(s.buf)]
return buf.tok, buf.pos, buf.lit
}
// reader represents a buffered rune reader used by the scanner.
// It provides a fixed-length circular buffer that can be unread.
type reader struct {
r io.RuneScanner
i int // buffer index
n int // buffer char count
pos Pos // last read rune position
buf [3]struct {
ch rune
pos Pos
}
eof bool // true if reader has ever seen eof.
}
// ReadRune reads the next rune from the reader.
// This is a wrapper function to implement the io.RuneReader interface.
// Note that this function does not return size.
func (r *reader) ReadRune() (ch rune, size int, err error) {
ch, _ = r.read()
if ch == eof {
err = io.EOF
}
return
}
// UnreadRune pushes the previously read rune back onto the buffer.
// This is a wrapper function to implement the io.RuneScanner interface.
func (r *reader) UnreadRune() error {
r.unread()
return nil
}
// read reads the next rune from the reader.
func (r *reader) read() (ch rune, pos Pos) {
// If we have unread characters then read them off the buffer first.
if r.n > 0 {
r.n--
return r.curr()
}
// Read next rune from underlying reader.
// Any error (including io.EOF) should return as EOF.
ch, _, err := r.r.ReadRune()
if err != nil {
ch = eof
} else if ch == '\r' {
if ch, _, err := r.r.ReadRune(); err != nil {
// nop
} else if ch != '\n' {
_ = r.r.UnreadRune()
}
ch = '\n'
}
// Save character and position to the buffer.
r.i = (r.i + 1) % len(r.buf)
buf := &r.buf[r.i]
buf.ch, buf.pos = ch, r.pos
// Update position.
// Only count EOF once.
if ch == '\n' {
r.pos.Line++
r.pos.Char = 0
} else if !r.eof {
r.pos.Char++
}
// Mark the reader as EOF.
// This is used so we don't double count EOF characters.
if ch == eof {
r.eof = true
}
return r.curr()
}
// unread pushes the previously read rune back onto the buffer.
func (r *reader) unread() {
r.n++
}
// curr returns the last read character and position.
func (r *reader) curr() (ch rune, pos Pos) {
i := (r.i - r.n + len(r.buf)) % len(r.buf)
buf := &r.buf[i]
return buf.ch, buf.pos
}
// eof is a marker code point to signify that the reader can't read any more.
const eof = rune(0)
func ScanDelimited(r io.RuneScanner, start, end rune, escapes map[rune]rune, escapesPassThru bool) ([]byte, error) {
// Scan start delimiter.
if ch, _, err := r.ReadRune(); err != nil {
return nil, err
} else if ch != start {
return nil, fmt.Errorf("expected %s; found %s", string(start), string(ch))
}
var buf bytes.Buffer
for {
ch0, _, err := r.ReadRune()
if ch0 == end {
return buf.Bytes(), nil
} else if err != nil {
return buf.Bytes(), err
} else if ch0 == '\n' {
return nil, errors.New("delimited text contains new line")
} else if ch0 == '\\' {
// If the next character is an escape then write the escaped char.
// If it's not a valid escape then return an error.
ch1, _, err := r.ReadRune()
if err != nil {
return nil, err
}
c, ok := escapes[ch1]
if !ok {
if escapesPassThru {
// Unread ch1 (char after the \)
_ = r.UnreadRune()
// Write ch0 (\) to the output buffer.
_, _ = buf.WriteRune(ch0)
continue
} else {
buf.Reset()
_, _ = buf.WriteRune(ch0)
_, _ = buf.WriteRune(ch1)
return buf.Bytes(), errBadEscape
}
}
_, _ = buf.WriteRune(c)
} else {
_, _ = buf.WriteRune(ch0)
}
}
}
// ScanString reads a quoted string from a rune reader.
func ScanString(r io.RuneScanner) (string, error) {
ending, _, err := r.ReadRune()
if err != nil {
return "", errBadString
}
var buf bytes.Buffer
for {
ch0, _, err := r.ReadRune()
if ch0 == ending {
return buf.String(), nil
} else if err != nil || ch0 == '\n' {
return buf.String(), errBadString
} else if ch0 == '\\' {
// If the next character is an escape then write the escaped char.
// If it's not a valid escape then return an error.
ch1, _, _ := r.ReadRune()
if ch1 == 'n' {
_, _ = buf.WriteRune('\n')
} else if ch1 == '\\' {
_, _ = buf.WriteRune('\\')
} else if ch1 == '"' {
_, _ = buf.WriteRune('"')
} else {
return string(ch0) + string(ch1), errBadEscape
}
} else {
_, _ = buf.WriteRune(ch0)
}
}
}
var errBadString = errors.New("bad string")
var errBadEscape = errors.New("bad escape")
var errBadRegex = errors.New("bad regex")
// ScanBareIdent reads bare identifier from a rune reader.
func ScanBareIdent(r io.RuneScanner) string {
// Read every ident character into the buffer.
// Non-ident characters and EOF will cause the loop to exit.
var buf bytes.Buffer
for {
ch, _, err := r.ReadRune()
if err != nil {
break
} else if !isIdentChar(ch) {
r.UnreadRune()
break
} else {
_, _ = buf.WriteRune(ch)
}
}
return buf.String()
}
var errInvalidIdentifier = errors.New("invalid identifier")
// IsRegexOp returns true if the operator accepts a regex operand.
func IsRegexOp(t Token) bool {
return (t == EQREGEX || t == NEQREGEX)
}
// assert will panic with a given formatted message if the given condition is false.
func assert(condition bool, msg string, v ...interface{}) {
if !condition {
panic(fmt.Sprintf("assert failed: "+msg, v...))
}
}

296
vendor/github.com/influxdata/influxdb/influxql/token.go generated vendored Normal file
View File

@@ -0,0 +1,296 @@
package influxql
import (
"strings"
)
// Token is a lexical token of the InfluxQL language.
type Token int
const (
// Special tokens
ILLEGAL Token = iota
EOF
WS
literal_beg
// Literals
IDENT // main
NUMBER // 12345.67
DURATION_VAL // 13h
STRING // "abc"
BADSTRING // "abc
BADESCAPE // \q
TRUE // true
FALSE // false
REGEX // Regular expressions
BADREGEX // `.*
literal_end
operator_beg
// Operators
ADD // +
SUB // -
MUL // *
DIV // /
AND // AND
OR // OR
EQ // =
NEQ // !=
EQREGEX // =~
NEQREGEX // !~
LT // <
LTE // <=
GT // >
GTE // >=
operator_end
LPAREN // (
RPAREN // )
COMMA // ,
SEMICOLON // ;
DOT // .
keyword_beg
// Keywords
ALL
ALTER
AS
ASC
BEGIN
BY
CREATE
CONTINUOUS
DATABASE
DATABASES
DEFAULT
DELETE
DESC
DISTINCT
DROP
DURATION
END
EXISTS
EXPLAIN
FIELD
FOR
FROM
GRANT
GRANTS
GROUP
IF
IN
INF
INNER
INSERT
INTO
KEY
KEYS
LIMIT
MEASUREMENT
MEASUREMENTS
OFFSET
ON
ORDER
PASSWORD
POLICY
POLICIES
PRIVILEGES
QUERIES
QUERY
READ
REPLICATION
RETENTION
REVOKE
SELECT
SERIES
SERVERS
SET
SHOW
SLIMIT
STATS
DIAGNOSTICS
SOFFSET
TAG
TO
USER
USERS
VALUES
WHERE
WITH
WRITE
keyword_end
)
var tokens = [...]string{
ILLEGAL: "ILLEGAL",
EOF: "EOF",
WS: "WS",
IDENT: "IDENT",
NUMBER: "NUMBER",
DURATION_VAL: "DURATION_VAL",
STRING: "STRING",
BADSTRING: "BADSTRING",
BADESCAPE: "BADESCAPE",
TRUE: "TRUE",
FALSE: "FALSE",
REGEX: "REGEX",
ADD: "+",
SUB: "-",
MUL: "*",
DIV: "/",
AND: "AND",
OR: "OR",
EQ: "=",
NEQ: "!=",
EQREGEX: "=~",
NEQREGEX: "!~",
LT: "<",
LTE: "<=",
GT: ">",
GTE: ">=",
LPAREN: "(",
RPAREN: ")",
COMMA: ",",
SEMICOLON: ";",
DOT: ".",
ALL: "ALL",
ALTER: "ALTER",
AS: "AS",
ASC: "ASC",
BEGIN: "BEGIN",
BY: "BY",
CREATE: "CREATE",
CONTINUOUS: "CONTINUOUS",
DATABASE: "DATABASE",
DATABASES: "DATABASES",
DEFAULT: "DEFAULT",
DELETE: "DELETE",
DESC: "DESC",
DROP: "DROP",
DISTINCT: "DISTINCT",
DURATION: "DURATION",
END: "END",
EXISTS: "EXISTS",
EXPLAIN: "EXPLAIN",
FIELD: "FIELD",
FOR: "FOR",
FROM: "FROM",
GRANT: "GRANT",
GRANTS: "GRANTS",
GROUP: "GROUP",
IF: "IF",
IN: "IN",
INF: "INF",
INNER: "INNER",
INSERT: "INSERT",
INTO: "INTO",
KEY: "KEY",
KEYS: "KEYS",
LIMIT: "LIMIT",
MEASUREMENT: "MEASUREMENT",
MEASUREMENTS: "MEASUREMENTS",
OFFSET: "OFFSET",
ON: "ON",
ORDER: "ORDER",
PASSWORD: "PASSWORD",
POLICY: "POLICY",
POLICIES: "POLICIES",
PRIVILEGES: "PRIVILEGES",
QUERIES: "QUERIES",
QUERY: "QUERY",
READ: "READ",
REPLICATION: "REPLICATION",
RETENTION: "RETENTION",
REVOKE: "REVOKE",
SELECT: "SELECT",
SERIES: "SERIES",
SERVERS: "SERVERS",
SET: "SET",
SHOW: "SHOW",
SLIMIT: "SLIMIT",
SOFFSET: "SOFFSET",
STATS: "STATS",
DIAGNOSTICS: "DIAGNOSTICS",
TAG: "TAG",
TO: "TO",
USER: "USER",
USERS: "USERS",
VALUES: "VALUES",
WHERE: "WHERE",
WITH: "WITH",
WRITE: "WRITE",
}
var keywords map[string]Token
func init() {
keywords = make(map[string]Token)
for tok := keyword_beg + 1; tok < keyword_end; tok++ {
keywords[strings.ToLower(tokens[tok])] = tok
}
for _, tok := range []Token{AND, OR} {
keywords[strings.ToLower(tokens[tok])] = tok
}
keywords["true"] = TRUE
keywords["false"] = FALSE
}
// String returns the string representation of the token.
func (tok Token) String() string {
if tok >= 0 && tok < Token(len(tokens)) {
return tokens[tok]
}
return ""
}
// Precedence returns the operator precedence of the binary operator token.
func (tok Token) Precedence() int {
switch tok {
case OR:
return 1
case AND:
return 2
case EQ, NEQ, EQREGEX, NEQREGEX, LT, LTE, GT, GTE:
return 3
case ADD, SUB:
return 4
case MUL, DIV:
return 5
}
return 0
}
// isOperator returns true for operator tokens.
func (tok Token) isOperator() bool { return tok > operator_beg && tok < operator_end }
// tokstr returns a literal if provided, otherwise returns the token string.
func tokstr(tok Token, lit string) string {
if lit != "" {
return lit
}
return tok.String()
}
// Lookup returns the token associated with a given string.
func Lookup(ident string) Token {
if tok, ok := keywords[strings.ToLower(ident)]; ok {
return tok
}
return IDENT
}
// Pos specifies the line and character position of a token.
// The Char and Line are both zero-based indexes.
type Pos struct {
Line int
Char int
}

52
vendor/github.com/influxdata/influxdb/meta/config.go generated vendored Normal file
View File

@@ -0,0 +1,52 @@
package meta
import (
"time"
"github.com/influxdb/influxdb/toml"
)
const (
// DefaultHostname is the default hostname if one is not provided.
DefaultHostname = "localhost"
// DefaultBindAddress is the default address to bind to.
DefaultBindAddress = ":8088"
// DefaultHeartbeatTimeout is the default heartbeat timeout for the store.
DefaultHeartbeatTimeout = 1000 * time.Millisecond
// DefaultElectionTimeout is the default election timeout for the store.
DefaultElectionTimeout = 1000 * time.Millisecond
// DefaultLeaderLeaseTimeout is the default leader lease for the store.
DefaultLeaderLeaseTimeout = 500 * time.Millisecond
// DefaultCommitTimeout is the default commit timeout for the store.
DefaultCommitTimeout = 50 * time.Millisecond
)
// Config represents the meta configuration.
type Config struct {
Dir string `toml:"dir"`
Hostname string `toml:"hostname"`
BindAddress string `toml:"bind-address"`
Peers []string `toml:"peers"`
RetentionAutoCreate bool `toml:"retention-autocreate"`
ElectionTimeout toml.Duration `toml:"election-timeout"`
HeartbeatTimeout toml.Duration `toml:"heartbeat-timeout"`
LeaderLeaseTimeout toml.Duration `toml:"leader-lease-timeout"`
CommitTimeout toml.Duration `toml:"commit-timeout"`
}
func NewConfig() Config {
return Config{
Hostname: DefaultHostname,
BindAddress: DefaultBindAddress,
RetentionAutoCreate: true,
ElectionTimeout: toml.Duration(DefaultElectionTimeout),
HeartbeatTimeout: toml.Duration(DefaultHeartbeatTimeout),
LeaderLeaseTimeout: toml.Duration(DefaultLeaderLeaseTimeout),
CommitTimeout: toml.Duration(DefaultCommitTimeout),
}
}

1055
vendor/github.com/influxdata/influxdb/meta/data.go generated vendored Normal file

File diff suppressed because it is too large Load Diff

116
vendor/github.com/influxdata/influxdb/meta/errors.go generated vendored Normal file
View File

@@ -0,0 +1,116 @@
package meta
import (
"errors"
"fmt"
)
var (
// ErrStoreOpen is returned when opening an already open store.
ErrStoreOpen = errors.New("store already open")
// ErrStoreClosed is returned when closing an already closed store.
ErrStoreClosed = errors.New("raft store already closed")
// ErrTooManyPeers is returned when more than 3 peers are used.
ErrTooManyPeers = errors.New("too many peers; influxdb v0.9.0 is limited to 3 nodes in a cluster")
)
var (
// ErrNodeExists is returned when creating an already existing node.
ErrNodeExists = errors.New("node already exists")
// ErrNodeNotFound is returned when mutating a node that doesn't exist.
ErrNodeNotFound = errors.New("node not found")
// ErrNodesRequired is returned when at least one node is required for an operation.
// This occurs when creating a shard group.
ErrNodesRequired = errors.New("at least one node required")
)
var (
// ErrDatabaseExists is returned when creating an already existing database.
ErrDatabaseExists = errors.New("database already exists")
// ErrDatabaseNotFound is returned when mutating a database that doesn't exist.
ErrDatabaseNotFound = errors.New("database not found")
// ErrDatabaseNameRequired is returned when creating a database without a name.
ErrDatabaseNameRequired = errors.New("database name required")
)
var (
// ErrRetentionPolicyExists is returned when creating an already existing policy.
ErrRetentionPolicyExists = errors.New("retention policy already exists")
// ErrRetentionPolicyNotFound is returned when mutating a policy that doesn't exist.
ErrRetentionPolicyNotFound = errors.New("retention policy not found")
// ErrRetentionPolicyNameRequired is returned when creating a policy without a name.
ErrRetentionPolicyNameRequired = errors.New("retention policy name required")
// ErrRetentionPolicyNameExists is returned when renaming a policy to
// the same name as another existing policy.
ErrRetentionPolicyNameExists = errors.New("retention policy name already exists")
// ErrRetentionPolicyDurationTooLow is returned when updating a retention
// policy that has a duration lower than the allowed minimum.
ErrRetentionPolicyDurationTooLow = errors.New(fmt.Sprintf("retention policy duration must be at least %s",
RetentionPolicyMinDuration))
// ErrReplicationFactorMismatch is returned when the replication factor
// does not match the number of nodes in the cluster. This is a temporary
// restriction until v0.9.1 is released.
ErrReplicationFactorMismatch = errors.New("replication factor must match cluster size; this limitation will be lifted in v0.9.1")
)
var (
// ErrShardGroupExists is returned when creating an already existing shard group.
ErrShardGroupExists = errors.New("shard group already exists")
// ErrShardGroupNotFound is returned when mutating a shard group that doesn't exist.
ErrShardGroupNotFound = errors.New("shard group not found")
)
var (
// ErrContinuousQueryExists is returned when creating an already existing continuous query.
ErrContinuousQueryExists = errors.New("continuous query already exists")
// ErrContinuousQueryNotFound is returned when removing a continuous query that doesn't exist.
ErrContinuousQueryNotFound = errors.New("continuous query not found")
)
var (
// ErrUserExists is returned when creating an already existing user.
ErrUserExists = errors.New("user already exists")
// ErrUserNotFound is returned when mutating a user that doesn't exist.
ErrUserNotFound = errors.New("user not found")
// ErrUsernameRequired is returned when creating a user without a username.
ErrUsernameRequired = errors.New("username required")
)
var errs = [...]error{
ErrStoreOpen, ErrStoreClosed,
ErrNodeExists, ErrNodeNotFound,
ErrDatabaseExists, ErrDatabaseNotFound, ErrDatabaseNameRequired,
}
// errLookup stores a mapping of error strings to well defined error types.
var errLookup = make(map[string]error)
func init() {
for _, err := range errs {
errLookup[err.Error()] = err
}
}
// lookupError returns a known error reference, if one exists.
// Otherwise returns err.
func lookupError(err error) error {
if e, ok := errLookup[err.Error()]; ok {
return e
}
return err
}

File diff suppressed because it is too large Load Diff

View File

@@ -0,0 +1,257 @@
package internal;
//========================================================================
//
// Metadata
//
//========================================================================
message Data {
required uint64 Term = 1;
required uint64 Index = 2;
required uint64 ClusterID = 3;
repeated NodeInfo Nodes = 4;
repeated DatabaseInfo Databases = 5;
repeated UserInfo Users = 6;
required uint64 MaxNodeID = 7;
required uint64 MaxShardGroupID = 8;
required uint64 MaxShardID = 9;
}
message NodeInfo {
required uint64 ID = 1;
required string Host = 2;
}
message DatabaseInfo {
required string Name = 1;
required string DefaultRetentionPolicy = 2;
repeated RetentionPolicyInfo RetentionPolicies = 3;
repeated ContinuousQueryInfo ContinuousQueries = 4;
}
message RetentionPolicyInfo {
required string Name = 1;
required int64 Duration = 2;
required int64 ShardGroupDuration = 3;
required uint32 ReplicaN = 4;
repeated ShardGroupInfo ShardGroups = 5;
}
message ShardGroupInfo {
required uint64 ID = 1;
required int64 StartTime = 2;
required int64 EndTime = 3;
required int64 DeletedAt = 4;
repeated ShardInfo Shards = 5;
}
message ShardInfo {
required uint64 ID = 1;
repeated uint64 OwnerIDs = 2;
}
message ContinuousQueryInfo {
required string Name = 1;
required string Query = 2;
}
message UserInfo {
required string Name = 1;
required string Hash = 2;
required bool Admin = 3;
repeated UserPrivilege Privileges = 4;
}
message UserPrivilege {
required string Database = 1;
required int32 Privilege = 2;
}
//========================================================================
//
// COMMANDS
//
//========================================================================
message Command {
extensions 100 to max;
enum Type {
CreateNodeCommand = 1;
DeleteNodeCommand = 2;
CreateDatabaseCommand = 3;
DropDatabaseCommand = 4;
CreateRetentionPolicyCommand = 5;
DropRetentionPolicyCommand = 6;
SetDefaultRetentionPolicyCommand = 7;
UpdateRetentionPolicyCommand = 8;
CreateShardGroupCommand = 9;
DeleteShardGroupCommand = 10;
CreateContinuousQueryCommand = 11;
DropContinuousQueryCommand = 12;
CreateUserCommand = 13;
DropUserCommand = 14;
UpdateUserCommand = 15;
SetPrivilegeCommand = 16;
SetDataCommand = 17;
}
required Type type = 1;
}
message CreateNodeCommand {
extend Command {
optional CreateNodeCommand command = 101;
}
required string Host = 1;
required uint64 Rand = 2;
}
message DeleteNodeCommand {
extend Command {
optional DeleteNodeCommand command = 102;
}
required uint64 ID = 1;
}
message CreateDatabaseCommand {
extend Command {
optional CreateDatabaseCommand command = 103;
}
required string Name = 1;
}
message DropDatabaseCommand {
extend Command {
optional DropDatabaseCommand command = 104;
}
required string Name = 1;
}
message CreateRetentionPolicyCommand {
extend Command {
optional CreateRetentionPolicyCommand command = 105;
}
required string Database = 1;
required RetentionPolicyInfo RetentionPolicy = 2;
}
message DropRetentionPolicyCommand {
extend Command {
optional DropRetentionPolicyCommand command = 106;
}
required string Database = 1;
required string Name = 2;
}
message SetDefaultRetentionPolicyCommand {
extend Command {
optional SetDefaultRetentionPolicyCommand command = 107;
}
required string Database = 1;
required string Name = 2;
}
message UpdateRetentionPolicyCommand {
extend Command {
optional UpdateRetentionPolicyCommand command = 108;
}
required string Database = 1;
required string Name = 2;
optional string NewName = 3;
optional int64 Duration = 4;
optional uint32 ReplicaN = 5;
}
message CreateShardGroupCommand {
extend Command {
optional CreateShardGroupCommand command = 109;
}
required string Database = 1;
required string Policy = 2;
required int64 Timestamp = 3;
}
message DeleteShardGroupCommand {
extend Command {
optional DeleteShardGroupCommand command = 110;
}
required string Database = 1;
required string Policy = 2;
required uint64 ShardGroupID = 3;
}
message CreateContinuousQueryCommand {
extend Command {
optional CreateContinuousQueryCommand command = 111;
}
required string Database = 1;
required string Name = 2;
required string Query = 3;
}
message DropContinuousQueryCommand {
extend Command {
optional DropContinuousQueryCommand command = 112;
}
required string Database = 1;
required string Name = 2;
}
message CreateUserCommand {
extend Command {
optional CreateUserCommand command = 113;
}
required string Name = 1;
required string Hash = 2;
required bool Admin = 3;
}
message DropUserCommand {
extend Command {
optional DropUserCommand command = 114;
}
required string Name = 1;
}
message UpdateUserCommand {
extend Command {
optional UpdateUserCommand command = 115;
}
required string Name = 1;
required string Hash = 2;
}
message SetPrivilegeCommand {
extend Command {
optional SetPrivilegeCommand command = 116;
}
required string Username = 1;
required string Database = 2;
required int32 Privilege = 3;
}
message SetDataCommand {
extend Command {
optional SetDataCommand command = 117;
}
required Data Data = 1;
}
message SetAdminPrivilegeCommand {
extend Command {
optional SetAdminPrivilegeCommand command = 118;
}
required string Username = 1;
required bool Admin = 2;
}
message Response {
required bool OK = 1;
optional string Error = 2;
optional uint64 Index = 3;
}

View File

@@ -0,0 +1,280 @@
package meta
import (
"fmt"
"github.com/influxdb/influxdb/influxql"
)
// StatementExecutor translates InfluxQL queries to meta store methods.
type StatementExecutor struct {
Store interface {
Nodes() ([]NodeInfo, error)
Database(name string) (*DatabaseInfo, error)
Databases() ([]DatabaseInfo, error)
CreateDatabase(name string) (*DatabaseInfo, error)
DropDatabase(name string) error
DefaultRetentionPolicy(database string) (*RetentionPolicyInfo, error)
CreateRetentionPolicy(database string, rpi *RetentionPolicyInfo) (*RetentionPolicyInfo, error)
UpdateRetentionPolicy(database, name string, rpu *RetentionPolicyUpdate) error
SetDefaultRetentionPolicy(database, name string) error
DropRetentionPolicy(database, name string) error
Users() ([]UserInfo, error)
CreateUser(name, password string, admin bool) (*UserInfo, error)
UpdateUser(name, password string) error
DropUser(name string) error
SetPrivilege(username, database string, p influxql.Privilege) error
SetAdminPrivilege(username string, admin bool) error
UserPrivileges(username string) (map[string]influxql.Privilege, error)
UserPrivilege(username, database string) (*influxql.Privilege, error)
CreateContinuousQuery(database, name, query string) error
DropContinuousQuery(database, name string) error
}
}
// ExecuteStatement executes stmt against the meta store as user.
func (e *StatementExecutor) ExecuteStatement(stmt influxql.Statement) *influxql.Result {
switch stmt := stmt.(type) {
case *influxql.CreateDatabaseStatement:
return e.executeCreateDatabaseStatement(stmt)
case *influxql.DropDatabaseStatement:
return e.executeDropDatabaseStatement(stmt)
case *influxql.ShowDatabasesStatement:
return e.executeShowDatabasesStatement(stmt)
case *influxql.ShowGrantsForUserStatement:
return e.executeShowGrantsForUserStatement(stmt)
case *influxql.ShowServersStatement:
return e.executeShowServersStatement(stmt)
case *influxql.CreateUserStatement:
return e.executeCreateUserStatement(stmt)
case *influxql.SetPasswordUserStatement:
return e.executeSetPasswordUserStatement(stmt)
case *influxql.DropUserStatement:
return e.executeDropUserStatement(stmt)
case *influxql.ShowUsersStatement:
return e.executeShowUsersStatement(stmt)
case *influxql.GrantStatement:
return e.executeGrantStatement(stmt)
case *influxql.GrantAdminStatement:
return e.executeGrantAdminStatement(stmt)
case *influxql.RevokeStatement:
return e.executeRevokeStatement(stmt)
case *influxql.RevokeAdminStatement:
return e.executeRevokeAdminStatement(stmt)
case *influxql.CreateRetentionPolicyStatement:
return e.executeCreateRetentionPolicyStatement(stmt)
case *influxql.AlterRetentionPolicyStatement:
return e.executeAlterRetentionPolicyStatement(stmt)
case *influxql.DropRetentionPolicyStatement:
return e.executeDropRetentionPolicyStatement(stmt)
case *influxql.ShowRetentionPoliciesStatement:
return e.executeShowRetentionPoliciesStatement(stmt)
case *influxql.CreateContinuousQueryStatement:
return e.executeCreateContinuousQueryStatement(stmt)
case *influxql.DropContinuousQueryStatement:
return e.executeDropContinuousQueryStatement(stmt)
case *influxql.ShowContinuousQueriesStatement:
return e.executeShowContinuousQueriesStatement(stmt)
case *influxql.ShowStatsStatement:
return e.executeShowStatsStatement(stmt)
default:
panic(fmt.Sprintf("unsupported statement type: %T", stmt))
}
}
func (e *StatementExecutor) executeCreateDatabaseStatement(q *influxql.CreateDatabaseStatement) *influxql.Result {
_, err := e.Store.CreateDatabase(q.Name)
return &influxql.Result{Err: err}
}
func (e *StatementExecutor) executeDropDatabaseStatement(q *influxql.DropDatabaseStatement) *influxql.Result {
return &influxql.Result{Err: e.Store.DropDatabase(q.Name)}
}
func (e *StatementExecutor) executeShowDatabasesStatement(q *influxql.ShowDatabasesStatement) *influxql.Result {
dis, err := e.Store.Databases()
if err != nil {
return &influxql.Result{Err: err}
}
row := &influxql.Row{Name: "databases", Columns: []string{"name"}}
for _, di := range dis {
row.Values = append(row.Values, []interface{}{di.Name})
}
return &influxql.Result{Series: []*influxql.Row{row}}
}
func (e *StatementExecutor) executeShowGrantsForUserStatement(q *influxql.ShowGrantsForUserStatement) *influxql.Result {
priv, err := e.Store.UserPrivileges(q.Name)
if err != nil {
return &influxql.Result{Err: err}
}
row := &influxql.Row{Columns: []string{"database", "privilege"}}
for d, p := range priv {
row.Values = append(row.Values, []interface{}{d, p.String()})
}
return &influxql.Result{Series: []*influxql.Row{row}}
}
func (e *StatementExecutor) executeShowServersStatement(q *influxql.ShowServersStatement) *influxql.Result {
nis, err := e.Store.Nodes()
if err != nil {
return &influxql.Result{Err: err}
}
row := &influxql.Row{Columns: []string{"id", "url"}}
for _, ni := range nis {
row.Values = append(row.Values, []interface{}{ni.ID, "http://" + ni.Host})
}
return &influxql.Result{Series: []*influxql.Row{row}}
}
func (e *StatementExecutor) executeCreateUserStatement(q *influxql.CreateUserStatement) *influxql.Result {
_, err := e.Store.CreateUser(q.Name, q.Password, q.Admin)
return &influxql.Result{Err: err}
}
func (e *StatementExecutor) executeSetPasswordUserStatement(q *influxql.SetPasswordUserStatement) *influxql.Result {
return &influxql.Result{Err: e.Store.UpdateUser(q.Name, q.Password)}
}
func (e *StatementExecutor) executeDropUserStatement(q *influxql.DropUserStatement) *influxql.Result {
return &influxql.Result{Err: e.Store.DropUser(q.Name)}
}
func (e *StatementExecutor) executeShowUsersStatement(q *influxql.ShowUsersStatement) *influxql.Result {
uis, err := e.Store.Users()
if err != nil {
return &influxql.Result{Err: err}
}
row := &influxql.Row{Columns: []string{"user", "admin"}}
for _, ui := range uis {
row.Values = append(row.Values, []interface{}{ui.Name, ui.Admin})
}
return &influxql.Result{Series: []*influxql.Row{row}}
}
func (e *StatementExecutor) executeGrantStatement(stmt *influxql.GrantStatement) *influxql.Result {
return &influxql.Result{Err: e.Store.SetPrivilege(stmt.User, stmt.On, stmt.Privilege)}
}
func (e *StatementExecutor) executeGrantAdminStatement(stmt *influxql.GrantAdminStatement) *influxql.Result {
return &influxql.Result{Err: e.Store.SetAdminPrivilege(stmt.User, true)}
}
func (e *StatementExecutor) executeRevokeStatement(stmt *influxql.RevokeStatement) *influxql.Result {
priv := influxql.NoPrivileges
// Revoking all privileges means there's no need to look at existing user privileges.
if stmt.Privilege != influxql.AllPrivileges {
p, err := e.Store.UserPrivilege(stmt.User, stmt.On)
if err != nil {
return &influxql.Result{Err: err}
}
// Bit clear (AND NOT) the user's privilege with the revoked privilege.
priv = *p &^ stmt.Privilege
}
return &influxql.Result{Err: e.Store.SetPrivilege(stmt.User, stmt.On, priv)}
}
func (e *StatementExecutor) executeRevokeAdminStatement(stmt *influxql.RevokeAdminStatement) *influxql.Result {
return &influxql.Result{Err: e.Store.SetAdminPrivilege(stmt.User, false)}
}
func (e *StatementExecutor) executeCreateRetentionPolicyStatement(stmt *influxql.CreateRetentionPolicyStatement) *influxql.Result {
rpi := NewRetentionPolicyInfo(stmt.Name)
rpi.Duration = stmt.Duration
rpi.ReplicaN = stmt.Replication
// Create new retention policy.
_, err := e.Store.CreateRetentionPolicy(stmt.Database, rpi)
if err != nil {
return &influxql.Result{Err: err}
}
// If requested, set new policy as the default.
if stmt.Default {
err = e.Store.SetDefaultRetentionPolicy(stmt.Database, stmt.Name)
}
return &influxql.Result{Err: err}
}
func (e *StatementExecutor) executeAlterRetentionPolicyStatement(stmt *influxql.AlterRetentionPolicyStatement) *influxql.Result {
rpu := &RetentionPolicyUpdate{
Duration: stmt.Duration,
ReplicaN: stmt.Replication,
}
// Update the retention policy.
err := e.Store.UpdateRetentionPolicy(stmt.Database, stmt.Name, rpu)
if err != nil {
return &influxql.Result{Err: err}
}
// If requested, set as default retention policy.
if stmt.Default {
err = e.Store.SetDefaultRetentionPolicy(stmt.Database, stmt.Name)
}
return &influxql.Result{Err: err}
}
func (e *StatementExecutor) executeDropRetentionPolicyStatement(q *influxql.DropRetentionPolicyStatement) *influxql.Result {
return &influxql.Result{Err: e.Store.DropRetentionPolicy(q.Database, q.Name)}
}
func (e *StatementExecutor) executeShowRetentionPoliciesStatement(q *influxql.ShowRetentionPoliciesStatement) *influxql.Result {
di, err := e.Store.Database(q.Database)
if err != nil {
return &influxql.Result{Err: err}
} else if di == nil {
return &influxql.Result{Err: ErrDatabaseNotFound}
}
row := &influxql.Row{Columns: []string{"name", "duration", "replicaN", "default"}}
for _, rpi := range di.RetentionPolicies {
row.Values = append(row.Values, []interface{}{rpi.Name, rpi.Duration.String(), rpi.ReplicaN, di.DefaultRetentionPolicy == rpi.Name})
}
return &influxql.Result{Series: []*influxql.Row{row}}
}
func (e *StatementExecutor) executeCreateContinuousQueryStatement(q *influxql.CreateContinuousQueryStatement) *influxql.Result {
return &influxql.Result{
Err: e.Store.CreateContinuousQuery(q.Database, q.Name, q.String()),
}
}
func (e *StatementExecutor) executeDropContinuousQueryStatement(q *influxql.DropContinuousQueryStatement) *influxql.Result {
return &influxql.Result{
Err: e.Store.DropContinuousQuery(q.Database, q.Name),
}
}
func (e *StatementExecutor) executeShowContinuousQueriesStatement(stmt *influxql.ShowContinuousQueriesStatement) *influxql.Result {
dis, err := e.Store.Databases()
if err != nil {
return &influxql.Result{Err: err}
}
rows := []*influxql.Row{}
for _, di := range dis {
row := &influxql.Row{Columns: []string{"name", "query"}, Name: di.Name}
for _, cqi := range di.ContinuousQueries {
row.Values = append(row.Values, []interface{}{cqi.Name, cqi.Query})
}
rows = append(rows, row)
}
return &influxql.Result{Series: rows}
}
func (e *StatementExecutor) executeShowStatsStatement(stmt *influxql.ShowStatsStatement) *influxql.Result {
return &influxql.Result{Err: fmt.Errorf("SHOW STATS is not implemented yet")}
}

1876
vendor/github.com/influxdata/influxdb/meta/store.go generated vendored Normal file

File diff suppressed because it is too large Load Diff

View File

@@ -0,0 +1,529 @@
package snapshot
import (
"archive/tar"
"encoding/json"
"fmt"
"io"
"os"
"sort"
"time"
)
// manifestName is the name of the manifest file in the snapshot.
const manifestName = "manifest"
// Manifest represents a list of files in a snapshot.
type Manifest struct {
Files []File `json:"files"`
}
// Diff returns a Manifest of files that are newer in m than other.
func (m *Manifest) Diff(other *Manifest) *Manifest {
diff := &Manifest{}
// Find versions of files that are newer in m.
loop:
for _, a := range m.Files {
// Try to find a newer version of the file in other.
// If found then don't append this file and move to the next file.
for _, b := range other.Files {
if a.Name != b.Name {
continue
} else if !a.ModTime.After(b.ModTime) {
continue loop
} else {
break
}
}
// Append the newest version.
diff.Files = append(diff.Files, a)
}
// Sort files.
sort.Sort(Files(diff.Files))
return diff
}
// Merge returns a Manifest that combines m with other.
// Only the newest file between the two snapshots is returned.
func (m *Manifest) Merge(other *Manifest) *Manifest {
ret := &Manifest{}
ret.Files = make([]File, len(m.Files))
copy(ret.Files, m.Files)
// Update/insert versions of files that are newer in other.
loop:
for _, a := range other.Files {
for i, b := range ret.Files {
// Ignore if it doesn't match.
if a.Name != b.Name {
continue
}
// Update if it's newer and then start the next file.
if a.ModTime.After(b.ModTime) {
ret.Files[i] = a
}
continue loop
}
// If the file wasn't found then append it.
ret.Files = append(ret.Files, a)
}
// Sort files.
sort.Sort(Files(ret.Files))
return ret
}
// File represents a single file in a manifest.
type File struct {
Name string `json:"name"` // filename
Size int64 `json:"size"` // file size
ModTime time.Time `json:"lastModified"` // last modified time
}
// Files represents a sortable list of files.
type Files []File
func (p Files) Len() int { return len(p) }
func (p Files) Less(i, j int) bool { return p[i].Name < p[j].Name }
func (p Files) Swap(i, j int) { p[i], p[j] = p[j], p[i] }
// Reader reads a snapshot from a Reader.
// This type is not safe for concurrent use.
type Reader struct {
tr *tar.Reader
manifest *Manifest
}
// NewReader returns a new Reader reading from r.
func NewReader(r io.Reader) *Reader {
return &Reader{
tr: tar.NewReader(r),
}
}
// Manifest returns the snapshot manifest.
func (sr *Reader) Manifest() (*Manifest, error) {
if err := sr.readManifest(); err != nil {
return nil, err
}
return sr.manifest, nil
}
// readManifest reads the first entry from the snapshot and materializes the snapshot.
// This is skipped if the snapshot manifest has already been read.
func (sr *Reader) readManifest() error {
// Already read, ignore.
if sr.manifest != nil {
return nil
}
// Read manifest header.
hdr, err := sr.tr.Next()
if err != nil {
return fmt.Errorf("snapshot header: %s", err)
} else if hdr.Name != manifestName {
return fmt.Errorf("invalid snapshot header: expected manifest")
}
// Materialize manifest.
var manifest Manifest
if err := json.NewDecoder(sr.tr).Decode(&manifest); err != nil {
return fmt.Errorf("decode manifest: %s", err)
}
sr.manifest = &manifest
return nil
}
// Next returns the next file in the snapshot.
func (sr *Reader) Next() (File, error) {
// Read manifest if it hasn't been read yet.
if err := sr.readManifest(); err != nil {
return File{}, err
}
// Read next header.
hdr, err := sr.tr.Next()
if err != nil {
return File{}, err
}
// Match header to file in snapshot.
for i := range sr.manifest.Files {
if sr.manifest.Files[i].Name == hdr.Name {
return sr.manifest.Files[i], nil
}
}
// Return error if file is not in the manifest.
return File{}, fmt.Errorf("snapshot entry not found in manifest: %s", hdr.Name)
}
// Read reads the current entry in the snapshot.
func (sr *Reader) Read(b []byte) (n int, err error) {
// Read manifest if it hasn't been read yet.
if err := sr.readManifest(); err != nil {
return 0, err
}
// Pass read through to the tar reader.
return sr.tr.Read(b)
}
// MultiReader reads from a collection of snapshots.
// Only files with the highest index are read from the reader.
// This type is not safe for concurrent use.
type MultiReader struct {
readers []*Reader // underlying snapshot readers
files []*File // current file for each reader
manifest *Manifest // combined manifest from all readers
index int // index of file in snapshot to read
curr *Reader // current reader
}
// NewMultiReader returns a new MultiReader reading from a list of readers.
func NewMultiReader(readers ...io.Reader) *MultiReader {
r := &MultiReader{
readers: make([]*Reader, len(readers)),
files: make([]*File, len(readers)),
index: -1,
}
for i := range readers {
r.readers[i] = NewReader(readers[i])
}
return r
}
// Manifest returns the combined manifest from all readers.
func (ssr *MultiReader) Manifest() (*Manifest, error) {
// Use manifest if it's already been calculated.
if ssr.manifest != nil {
return ssr.manifest, nil
}
// Build manifest from other readers.
ss := &Manifest{}
for i, sr := range ssr.readers {
other, err := sr.Manifest()
if err != nil {
return nil, fmt.Errorf("manifest: idx=%d, err=%s", i, err)
}
ss = ss.Merge(other)
}
// Cache manifest and return.
ssr.manifest = ss
return ss, nil
}
// Next returns the next file in the reader.
func (ssr *MultiReader) Next() (File, error) {
ss, err := ssr.Manifest()
if err != nil {
return File{}, fmt.Errorf("manifest: %s", err)
}
// Return EOF if there are no more files in snapshot.
if ssr.index == len(ss.Files)-1 {
ssr.curr = nil
return File{}, io.EOF
}
// Queue up next files.
if err := ssr.nextFiles(); err != nil {
return File{}, fmt.Errorf("next files: %s", err)
}
// Increment the file index.
ssr.index++
sf := ss.Files[ssr.index]
// Find the matching reader. Clear other readers.
var sr *Reader
for i, f := range ssr.files {
if f == nil || f.Name != sf.Name {
continue
}
// Set reader to the first match.
if sr == nil && *f == sf {
sr = ssr.readers[i]
}
ssr.files[i] = nil
}
// Return an error if file doesn't match.
// This shouldn't happen unless the underlying snapshot is altered.
if sr == nil {
return File{}, fmt.Errorf("snaphot file not found in readers: %s", sf.Name)
}
// Set current reader.
ssr.curr = sr
// Return file.
return sf, nil
}
// nextFiles queues up a next file for all readers.
func (ssr *MultiReader) nextFiles() error {
for i, sr := range ssr.readers {
if ssr.files[i] == nil {
// Read next file.
sf, err := sr.Next()
if err == io.EOF {
ssr.files[i] = nil
continue
} else if err != nil {
return fmt.Errorf("next: reader=%d, err=%s", i, err)
}
// Cache file.
ssr.files[i] = &sf
}
}
return nil
}
// nextIndex returns the index of the next reader to read from.
// Returns -1 if all readers are at EOF.
func (ssr *MultiReader) nextIndex() int {
// Find the next file by name and lowest index.
index := -1
for i, f := range ssr.files {
if f == nil {
continue
} else if index == -1 {
index = i
} else if f.Name < ssr.files[index].Name {
index = i
} else if f.Name == ssr.files[index].Name && f.ModTime.After(ssr.files[index].ModTime) {
index = i
}
}
return index
}
// Read reads the current entry in the reader.
func (ssr *MultiReader) Read(b []byte) (n int, err error) {
if ssr.curr == nil {
return 0, io.EOF
}
return ssr.curr.Read(b)
}
// OpenFileMultiReader returns a MultiReader based on the path of the base snapshot.
// Returns the underlying files which need to be closed separately.
func OpenFileMultiReader(path string) (*MultiReader, []io.Closer, error) {
var readers []io.Reader
var closers []io.Closer
if err := func() error {
// Open original snapshot file.
f, err := os.Open(path)
if os.IsNotExist(err) {
return err
} else if err != nil {
return fmt.Errorf("open snapshot: %s", err)
}
readers = append(readers, f)
closers = append(closers, f)
// Open all incremental snapshots.
for i := 0; ; i++ {
filename := path + fmt.Sprintf(".%d", i)
f, err := os.Open(filename)
if os.IsNotExist(err) {
break
} else if err != nil {
return fmt.Errorf("open incremental snapshot: file=%s, err=%s", filename, err)
}
readers = append(readers, f)
closers = append(closers, f)
}
return nil
}(); err != nil {
closeAll(closers)
return nil, nil, err
}
return NewMultiReader(readers...), nil, nil
}
// ReadFileManifest returns a Manifest for a given base snapshot path.
// This merges all incremental backup manifests as well.
func ReadFileManifest(path string) (*Manifest, error) {
// Open a multi-snapshot reader.
ssr, files, err := OpenFileMultiReader(path)
if os.IsNotExist(err) {
return nil, err
} else if err != nil {
return nil, fmt.Errorf("open file multi reader: %s", err)
}
defer closeAll(files)
// Read manifest.
ss, err := ssr.Manifest()
if err != nil {
return nil, fmt.Errorf("manifest: %s", err)
}
return ss, nil
}
func closeAll(a []io.Closer) {
for _, c := range a {
_ = c.Close()
}
}
// Writer writes a snapshot and the underlying files to disk as a tar archive.
type Writer struct {
// The manifest to write from.
// Removing files from the manifest after creation will cause those files to be ignored.
Manifest *Manifest
// Writers for each file by filename.
// Writers will be closed as they're processed and will close by the end of WriteTo().
FileWriters map[string]FileWriter
}
// NewWriter returns a new instance of Writer.
func NewWriter() *Writer {
return &Writer{
Manifest: &Manifest{},
FileWriters: make(map[string]FileWriter),
}
}
// Close closes all file writers on the snapshot.
func (sw *Writer) Close() error {
for _, fw := range sw.FileWriters {
_ = fw.Close()
}
return nil
}
// closeUnusedWriters closes all file writers not on the manifest.
// This allows transactions on these files to be short lived.
func (sw *Writer) closeUnusedWriters() {
loop:
for name, fw := range sw.FileWriters {
// Find writer in manifest.
for _, f := range sw.Manifest.Files {
if f.Name == name {
continue loop
}
}
// If not found then close it.
_ = fw.Close()
}
}
// WriteTo writes the snapshot to the writer.
// File writers are closed as they are written.
// This function will always return n == 0.
func (sw *Writer) WriteTo(w io.Writer) (n int64, err error) {
// Close any file writers that aren't required.
sw.closeUnusedWriters()
// Sort manifest files.
// This is required for combining multiple snapshots together.
sort.Sort(Files(sw.Manifest.Files))
// Begin writing a tar file to the output.
tw := tar.NewWriter(w)
defer tw.Close()
// Write manifest file.
if err := sw.writeManifestTo(tw); err != nil {
return 0, fmt.Errorf("write manifest: %s", err)
}
// Write each backup file.
for _, f := range sw.Manifest.Files {
if err := sw.writeFileTo(tw, &f); err != nil {
return 0, fmt.Errorf("write file: %s", err)
}
}
// Close tar writer and check error.
if err := tw.Close(); err != nil {
return 0, fmt.Errorf("tar close: %s", err)
}
return 0, nil
}
// writeManifestTo writes a manifest to the archive.
func (sw *Writer) writeManifestTo(tw *tar.Writer) error {
// Convert manifest to JSON.
b, err := json.Marshal(sw.Manifest)
if err != nil {
return fmt.Errorf("marshal json: %s", err)
}
// Write header & file.
if err := tw.WriteHeader(&tar.Header{
Name: manifestName,
Size: int64(len(b)),
Mode: 0666,
ModTime: time.Now(),
}); err != nil {
return fmt.Errorf("write header: %s", err)
}
if _, err := tw.Write(b); err != nil {
return fmt.Errorf("write: %s", err)
}
return nil
}
// writeFileTo writes a single file to the archive.
func (sw *Writer) writeFileTo(tw *tar.Writer, f *File) error {
// Retrieve the file writer by filename.
fw := sw.FileWriters[f.Name]
if fw == nil {
return fmt.Errorf("file writer not found: name=%s", f.Name)
}
// Write file header.
if err := tw.WriteHeader(&tar.Header{
Name: f.Name,
Size: f.Size,
Mode: 0666,
ModTime: time.Now(),
}); err != nil {
return fmt.Errorf("write header: file=%s, err=%s", f.Name, err)
}
// Copy the database to the writer.
if nn, err := fw.WriteTo(tw); err != nil {
return fmt.Errorf("write: file=%s, err=%s", f.Name, err)
} else if nn != f.Size {
return fmt.Errorf("short write: file=%s", f.Name)
}
// Close the writer.
if err := fw.Close(); err != nil {
return fmt.Errorf("close: file=%s, err=%s", f.Name, err)
}
return nil
}
// FileWriter is the interface used for writing a file to a snapshot.
type FileWriter interface {
io.WriterTo
io.Closer
}

72
vendor/github.com/influxdata/influxdb/toml/toml.go generated vendored Normal file
View File

@@ -0,0 +1,72 @@
package toml
import (
"fmt"
"strconv"
"time"
)
// maxInt is the largest integer representable by a word (architecture dependent).
const maxInt = int64(^uint(0) >> 1)
// Duration is a TOML wrapper type for time.Duration.
type Duration time.Duration
func (d Duration) String() string {
return time.Duration(d).String()
}
// UnmarshalText parses a TOML value into a duration value.
func (d *Duration) UnmarshalText(text []byte) error {
// Ignore if there is no value set.
if len(text) == 0 {
return nil
}
// Otherwise parse as a duration formatted string.
duration, err := time.ParseDuration(string(text))
if err != nil {
return err
}
// Set duration and return.
*d = Duration(duration)
return nil
}
// MarshalText converts a duration to a string for decoding toml
func (d Duration) MarshalText() (text []byte, err error) {
return []byte(d.String()), nil
}
// Size represents a TOML parseable file size.
// Users can specify size using "m" for megabytes and "g" for gigabytes.
type Size int
// UnmarshalText parses a byte size from text.
func (s *Size) UnmarshalText(text []byte) error {
// Parse numeric portion of value.
length := len(string(text))
size, err := strconv.ParseInt(string(text[:length-1]), 10, 64)
if err != nil {
return err
}
// Parse unit of measure ("m", "g", etc).
switch suffix := text[len(text)-1]; suffix {
case 'm':
size *= 1 << 20 // MB
case 'g':
size *= 1 << 30 // GB
default:
return fmt.Errorf("unknown size suffix: %c", suffix)
}
// Check for overflow.
if size > maxInt {
return fmt.Errorf("size %d cannot be represented by an int", size)
}
*s = Size(size)
return nil
}

85
vendor/github.com/influxdata/influxdb/tsdb/README.md generated vendored Normal file
View File

@@ -0,0 +1,85 @@
# Line Protocol
The line protocol is a text based format for writing points to InfluxDB. Each line defines a single point.
Multiple lines must be separated by the newline character `\n`. The format of the line consists of three parts:
```
[key] [fields] [timestamp]
```
Each section is separated by spaces. The minimum required point consists of a measurement name and at least one field. Points without a specified timestamp will be written using the server's local timestamp. Timestamps are assumed to be in nanoseconds unless a `precision` value is passed in the query string.
## Key
The key is the measurement name and any optional tags separated by commas. Measurement names, tag keys, and tag values must escape any spaces or commas using a backslash (`\`). For example: `\ ` and `\,`. All tag values are stored as strings and should not be surrounded in quotes.
Tags should be sorted by key before being sent for best performance. The sort should match that from the Go `bytes.Compare` function (http://golang.org/pkg/bytes/#Compare).
### Examples
```
# measurement only
cpu
# measurment and tags
cpu,host=serverA,region=us-west
# measurment with commas
cpu\,01,host=serverA,region=us-west
# tag value with spaces
cpu,host=server\ A,region=us\ west
```
## Fields
Fields are key-value metrics associated with the measurement. Every line must have at least one field. Multiple fields must be separated with commas and not spaces.
Field keys are always strings and follow the same syntactical rules as described above for tag keys and values. Field values can be one of four types. The first value written for a given field on a given measurement defines the type of that field for all series under that measurement.
* _integer_ - Numeric values that do not include a decimal. (e.g. 1, 345, 2015, -10)
* _float_ - Numeric values that include a decimal. (e.g. 1.0, -3.14, 6.0+e5). Note that all values _must_ have a decimal even if the decimal value is zero (1 is an _integer_, 1.0 is a _float_).
* _boolean_ - A value indicating true or false. Valid boolean strings are (t, T, true, TRUE, f, F, false, and FALSE).
* _string_ - A text value. All string values _must_ be surrounded in double-quotes `"`. If the string contains
a double-quote, it must be escaped with a backslash, e.g. `\"`.
```
# integer value
cpu value=1
# float value
cpu_load value=1.2
# boolean value
error fatal=true
# string value
event msg="logged out"
# multiple values
cpu load=10.0,alert=true,reason="value above maximum threshold"
```
## Timestamp
The timestamp section is optional but should be specified if possible. The value is an integer representing nanoseconds since the epoch. If the timestamp is not provided the point will inherit the server's local timestamp.
Some write APIs allow passing a lower precision. If the API supports a lower precision, the timestamp may also be
an integer epoch in microseconds, milliseconds, seconds, minutes or hours.
## Full Example
A full example is shown below.
```
cpu,host=server01,region=uswest value=1.0 1434055562000000000
cpu,host=server02,region=uswest value=3.0 1434055562000010000
```
In this example the first line shows a `measurement` of "cpu", there are two tags "host" and "region, the `value` is 1.0, and the `timestamp` is 1434055562000000000. Following this is a second line, also a point in the `measurement` "cpu" but belonging to a different "host".
```
cpu,host=server\ 01,region=uswest value=1.0,msg="all systems nominal"
cpu,host=server\ 01,region=us\,west value_int=1
```
In these examples, the "host" is set to `server 01`. The field value associated with field key `msg` is double-quoted, as it is a string. The second example shows a region of `us,west` with the comma properly escaped. In the first example `value` is written as a floating point number. In the second, `value_int` is an integer.
# Distributed Queries

142
vendor/github.com/influxdata/influxdb/tsdb/batcher.go generated vendored Normal file
View File

@@ -0,0 +1,142 @@
package tsdb
import (
"sync"
"sync/atomic"
"time"
)
// PointBatcher accepts Points and will emit a batch of those points when either
// a) the batch reaches a certain size, or b) a certain time passes.
type PointBatcher struct {
size int
duration time.Duration
stop chan struct{}
in chan Point
out chan []Point
flush chan struct{}
stats PointBatcherStats
wg *sync.WaitGroup
}
// NewPointBatcher returns a new PointBatcher.
func NewPointBatcher(sz int, d time.Duration) *PointBatcher {
return &PointBatcher{
size: sz,
duration: d,
stop: make(chan struct{}),
in: make(chan Point),
out: make(chan []Point),
flush: make(chan struct{}),
}
}
// PointBatcherStats are the statistics each batcher tracks.
type PointBatcherStats struct {
BatchTotal uint64 // Total count of batches transmitted.
PointTotal uint64 // Total count of points processed.
SizeTotal uint64 // Number of batches that reached size threshold.
TimeoutTotal uint64 // Number of timeouts that occurred.
}
// Start starts the batching process. Returns the in and out channels for points
// and point-batches respectively.
func (b *PointBatcher) Start() {
// Already running?
if b.wg != nil {
return
}
var timer *time.Timer
var batch []Point
var timerCh <-chan time.Time
emit := func() {
b.out <- batch
atomic.AddUint64(&b.stats.BatchTotal, 1)
batch = nil
}
b.wg = &sync.WaitGroup{}
b.wg.Add(1)
go func() {
defer b.wg.Done()
for {
select {
case <-b.stop:
if len(batch) > 0 {
emit()
timerCh = nil
}
return
case p := <-b.in:
atomic.AddUint64(&b.stats.PointTotal, 1)
if batch == nil {
batch = make([]Point, 0, b.size)
if b.duration > 0 {
timer = time.NewTimer(b.duration)
timerCh = timer.C
}
}
batch = append(batch, p)
if len(batch) >= b.size { // 0 means send immediately.
atomic.AddUint64(&b.stats.SizeTotal, 1)
emit()
timerCh = nil
}
case <-b.flush:
if len(batch) > 0 {
emit()
timerCh = nil
}
case <-timerCh:
atomic.AddUint64(&b.stats.TimeoutTotal, 1)
emit()
}
}
}()
}
func (b *PointBatcher) Stop() {
// If not running, nothing to stop.
if b.wg == nil {
return
}
close(b.stop)
b.wg.Wait()
}
// In returns the channel to which points should be written.
func (b *PointBatcher) In() chan<- Point {
return b.in
}
// Out returns the channel from which batches should be read.
func (b *PointBatcher) Out() <-chan []Point {
return b.out
}
// Flush instructs the batcher to emit any pending points in a batch, regardless of batch size.
// If there are no pending points, no batch is emitted.
func (b *PointBatcher) Flush() {
b.flush <- struct{}{}
}
// Stats returns a PointBatcherStats object for the PointBatcher. While the each statistic should be
// closely correlated with each other statistic, it is not guaranteed.
func (b *PointBatcher) Stats() *PointBatcherStats {
stats := PointBatcherStats{}
stats.BatchTotal = atomic.LoadUint64(&b.stats.BatchTotal)
stats.PointTotal = atomic.LoadUint64(&b.stats.PointTotal)
stats.SizeTotal = atomic.LoadUint64(&b.stats.SizeTotal)
stats.TimeoutTotal = atomic.LoadUint64(&b.stats.TimeoutTotal)
return &stats
}

34
vendor/github.com/influxdata/influxdb/tsdb/config.go generated vendored Normal file
View File

@@ -0,0 +1,34 @@
package tsdb
import (
"time"
"github.com/influxdb/influxdb/toml"
)
const (
// DefaultMaxWALSize is the default size of the WAL before it is flushed.
DefaultMaxWALSize = 100 * 1024 * 1024 // 100MB
// DefaultWALFlushInterval is the frequency the WAL will get flushed if
// it doesn't reach its size threshold.
DefaultWALFlushInterval = 10 * time.Minute
// DefaultWALPartitionFlushDelay is the sleep time between WAL partition flushes.
DefaultWALPartitionFlushDelay = 2 * time.Second
)
type Config struct {
Dir string `toml:"dir"`
MaxWALSize int `toml:"max-wal-size"`
WALFlushInterval toml.Duration `toml:"wal-flush-interval"`
WALPartitionFlushDelay toml.Duration `toml:"wal-partition-flush-delay"`
}
func NewConfig() Config {
return Config{
MaxWALSize: DefaultMaxWALSize,
WALFlushInterval: toml.Duration(DefaultWALFlushInterval),
WALPartitionFlushDelay: toml.Duration(DefaultWALPartitionFlushDelay),
}
}

5
vendor/github.com/influxdata/influxdb/tsdb/doc.go generated vendored Normal file
View File

@@ -0,0 +1,5 @@
/*
Package tsdb implements a durable time series database.
*/
package tsdb

961
vendor/github.com/influxdata/influxdb/tsdb/engine.go generated vendored Normal file
View File

@@ -0,0 +1,961 @@
package tsdb
import (
"fmt"
"math"
"sort"
"time"
"github.com/influxdb/influxdb/influxql"
)
const (
// Return an error if the user is trying to select more than this number of points in a group by statement.
// Most likely they specified a group by interval without time boundaries.
MaxGroupByPoints = 100000
// Since time is always selected, the column count when selecting only a single other value will be 2
SelectColumnCountWithOneValue = 2
// IgnoredChunkSize is what gets passed into Mapper.Begin for aggregate queries as they don't chunk points out
IgnoredChunkSize = 0
)
// MapperResponse is the structure responses from mappers take over the network. Tagsets
// is only set with the first response. Data will be nil when the Mapper has no more data.
type MapperResponse struct {
TagSets []string `json:"tagSets,omitempty"`
Data []byte `json:"data"`
}
// Mapper is the interface all Mapper types must implement.
type Mapper interface {
Open() error
TagSets() []string
NextChunk() (interface{}, error)
Close()
}
// StatefulMapper encapsulates a Mapper and some state that the executor needs to
// track for that mapper.
type StatefulMapper struct {
Mapper
bufferedChunk *mapperOutput // Last read chunk.
drained bool
}
// Executor is the interface all Executor types must implement.
type Executor interface {
Execute() <-chan *influxql.Row
}
// NextChunk wraps a RawMapper and some state.
func (srm *StatefulMapper) NextChunk() (*mapperOutput, error) {
c, err := srm.Mapper.NextChunk()
if err != nil {
return nil, err
}
chunk, ok := c.(*mapperOutput)
if !ok {
if chunk == interface{}(nil) {
return nil, nil
}
}
return chunk, nil
}
// RawExecutor is an executor for RawMappers.
type RawExecutor struct {
stmt *influxql.SelectStatement
mappers []*StatefulMapper
chunkSize int
limitedTagSets map[string]struct{} // Set tagsets for which data has reached the LIMIT.
}
// NewRawExecutor returns a new RawExecutor.
func NewRawExecutor(stmt *influxql.SelectStatement, mappers []Mapper, chunkSize int) *RawExecutor {
a := []*StatefulMapper{}
for _, m := range mappers {
a = append(a, &StatefulMapper{m, nil, false})
}
return &RawExecutor{
stmt: stmt,
mappers: a,
chunkSize: chunkSize,
limitedTagSets: make(map[string]struct{}),
}
}
// Execute begins execution of the query and returns a channel to receive rows.
func (re *RawExecutor) Execute() <-chan *influxql.Row {
// Create output channel and stream data in a separate goroutine.
out := make(chan *influxql.Row, 0)
go re.execute(out)
return out
}
func (re *RawExecutor) execute(out chan *influxql.Row) {
// It's important that all resources are released when execution completes.
defer re.close()
// Open the mappers.
for _, m := range re.mappers {
if err := m.Open(); err != nil {
out <- &influxql.Row{Err: err}
return
}
}
// Used to read ahead chunks from mappers.
var rowWriter *limitedRowWriter
var currTagset string
// Keep looping until all mappers drained.
var err error
for {
// Get the next chunk from each Mapper.
for _, m := range re.mappers {
if m.drained {
continue
}
// Set the next buffered chunk on the mapper, or mark it drained.
for {
if m.bufferedChunk == nil {
m.bufferedChunk, err = m.NextChunk()
if err != nil {
out <- &influxql.Row{Err: err}
return
}
if m.bufferedChunk == nil {
// Mapper can do no more for us.
m.drained = true
break
}
}
if re.tagSetIsLimited(m.bufferedChunk.Name) {
// chunk's tagset is limited, so no good. Try again.
m.bufferedChunk = nil
continue
}
// This mapper has a chunk available, and it is not limited.
break
}
}
// All Mappers done?
if re.mappersDrained() {
rowWriter.Flush()
break
}
// Send out data for the next alphabetically-lowest tagset. All Mappers emit data in this order,
// so by always continuing with the lowest tagset until it is finished, we process all data in
// the required order, and don't "miss" any.
tagset := re.nextMapperTagSet()
if tagset != currTagset {
currTagset = tagset
// Tagset has changed, time for a new rowWriter. Be sure to kick out any residual values.
rowWriter.Flush()
rowWriter = nil
}
// Process the mapper outputs. We can send out everything up to the min of the last time
// of the chunks for the next tagset.
minTime := re.nextMapperLowestTime(tagset)
// Now empty out all the chunks up to the min time. Create new output struct for this data.
var chunkedOutput *mapperOutput
for _, m := range re.mappers {
if m.drained {
continue
}
// This mapper's next chunk is not for the next tagset, or the very first value of
// the chunk is at a higher acceptable timestamp. Skip it.
if m.bufferedChunk.key() != tagset || m.bufferedChunk.Values[0].Time > minTime {
continue
}
// Find the index of the point up to the min.
ind := len(m.bufferedChunk.Values)
for i, mo := range m.bufferedChunk.Values {
if mo.Time > minTime {
ind = i
break
}
}
// Add up to the index to the values
if chunkedOutput == nil {
chunkedOutput = &mapperOutput{
Name: m.bufferedChunk.Name,
Tags: m.bufferedChunk.Tags,
}
chunkedOutput.Values = m.bufferedChunk.Values[:ind]
} else {
chunkedOutput.Values = append(chunkedOutput.Values, m.bufferedChunk.Values[:ind]...)
}
// Clear out the values being sent out, keep the remainder.
m.bufferedChunk.Values = m.bufferedChunk.Values[ind:]
// If we emptied out all the values, clear the mapper's buffered chunk.
if len(m.bufferedChunk.Values) == 0 {
m.bufferedChunk = nil
}
}
// Sort the values by time first so we can then handle offset and limit
sort.Sort(mapperValues(chunkedOutput.Values))
// Now that we have full name and tag details, initialize the rowWriter.
// The Name and Tags will be the same for all mappers.
if rowWriter == nil {
rowWriter = &limitedRowWriter{
limit: re.stmt.Limit,
offset: re.stmt.Offset,
chunkSize: re.chunkSize,
name: chunkedOutput.Name,
tags: chunkedOutput.Tags,
selectNames: re.stmt.NamesInSelect(),
fields: re.stmt.Fields,
c: out,
}
}
if re.stmt.HasDerivative() {
interval, err := derivativeInterval(re.stmt)
if err != nil {
out <- &influxql.Row{Err: err}
return
}
rowWriter.transformer = &rawQueryDerivativeProcessor{
isNonNegative: re.stmt.FunctionCalls()[0].Name == "non_negative_derivative",
derivativeInterval: interval,
}
}
// Emit the data via the limiter.
if limited := rowWriter.Add(chunkedOutput.Values); limited {
// Limit for this tagset was reached, mark it and start draining a new tagset.
re.limitTagSet(chunkedOutput.key())
continue
}
}
close(out)
}
// mappersDrained returns whether all the executors Mappers have been drained of data.
func (re *RawExecutor) mappersDrained() bool {
for _, m := range re.mappers {
if !m.drained {
return false
}
}
return true
}
// nextMapperTagset returns the alphabetically lowest tagset across all Mappers.
func (re *RawExecutor) nextMapperTagSet() string {
tagset := ""
for _, m := range re.mappers {
if m.bufferedChunk != nil {
if tagset == "" {
tagset = m.bufferedChunk.key()
} else if m.bufferedChunk.key() < tagset {
tagset = m.bufferedChunk.key()
}
}
}
return tagset
}
// nextMapperLowestTime returns the lowest minimum time across all Mappers, for the given tagset.
func (re *RawExecutor) nextMapperLowestTime(tagset string) int64 {
minTime := int64(math.MaxInt64)
for _, m := range re.mappers {
if !m.drained && m.bufferedChunk != nil {
if m.bufferedChunk.key() != tagset {
continue
}
t := m.bufferedChunk.Values[len(m.bufferedChunk.Values)-1].Time
if t < minTime {
minTime = t
}
}
}
return minTime
}
// tagSetIsLimited returns whether data for the given tagset has been LIMITed.
func (re *RawExecutor) tagSetIsLimited(tagset string) bool {
_, ok := re.limitedTagSets[tagset]
return ok
}
// limitTagSet marks the given taset as LIMITed.
func (re *RawExecutor) limitTagSet(tagset string) {
re.limitedTagSets[tagset] = struct{}{}
}
// Close closes the executor such that all resources are released. Once closed,
// an executor may not be re-used.
func (re *RawExecutor) close() {
if re != nil {
for _, m := range re.mappers {
m.Close()
}
}
}
// AggregateExecutor is an executor for AggregateMappers.
type AggregateExecutor struct {
stmt *influxql.SelectStatement
queryTMin int64 // Needed?
queryTMax int64 // Needed?
mappers []*StatefulMapper
}
// NewAggregateExecutor returns a new AggregateExecutor.
func NewAggregateExecutor(stmt *influxql.SelectStatement, mappers []Mapper) *AggregateExecutor {
a := []*StatefulMapper{}
for _, m := range mappers {
a = append(a, &StatefulMapper{m, nil, false})
}
return &AggregateExecutor{
stmt: stmt,
mappers: a,
}
}
// Execute begins execution of the query and returns a channel to receive rows.
func (ae *AggregateExecutor) Execute() <-chan *influxql.Row {
// Create output channel and stream data in a separate goroutine.
out := make(chan *influxql.Row, 0)
go ae.execute(out)
return out
}
func (ae *AggregateExecutor) execute(out chan *influxql.Row) {
// It's important to close all resources when execution completes.
defer ae.close()
// Create the functions which will reduce values from mappers for
// a given interval. The function offsets within this slice match
// the offsets within the value slices that are returned by the
// mapper.
aggregates := ae.stmt.FunctionCalls()
reduceFuncs := make([]influxql.ReduceFunc, len(aggregates))
for i, c := range aggregates {
reduceFunc, err := influxql.InitializeReduceFunc(c)
if err != nil {
out <- &influxql.Row{Err: err}
return
}
reduceFuncs[i] = reduceFunc
}
// Put together the rows to return, starting with columns.
columnNames := make([]string, len(ae.stmt.Fields)+1)
columnNames[0] = "time"
for i, f := range ae.stmt.Fields {
columnNames[i+1] = f.Name()
}
// Open the mappers.
for _, m := range ae.mappers {
if err := m.Open(); err != nil {
out <- &influxql.Row{Err: err}
return
}
}
// Build the set of available tagsets across all mappers. This is used for
// later checks.
availTagSets := newStringSet()
for _, m := range ae.mappers {
for _, t := range m.TagSets() {
availTagSets.add(t)
}
}
// Prime each mapper's chunk buffer.
var err error
for _, m := range ae.mappers {
m.bufferedChunk, err = m.NextChunk()
if err != nil {
out <- &influxql.Row{Err: err}
return
}
if m.bufferedChunk == nil {
m.drained = true
}
}
// Keep looping until all mappers drained.
for !ae.mappersDrained() {
// Send out data for the next alphabetically-lowest tagset. All Mappers send out in this order
// so collect data for this tagset, ignoring all others.
tagset := ae.nextMapperTagSet()
chunks := []*mapperOutput{}
// Pull as much as possible from each mapper. Stop when a mapper offers
// data for a new tagset, or empties completely.
for _, m := range ae.mappers {
if m.drained {
continue
}
for {
if m.bufferedChunk == nil {
m.bufferedChunk, err = m.NextChunk()
if err != nil {
out <- &influxql.Row{Err: err}
return
}
if m.bufferedChunk == nil {
m.drained = true
break
}
}
// Got a chunk. Can we use it?
if m.bufferedChunk.key() != tagset {
// No, so just leave it in the buffer.
break
}
// We can, take it.
chunks = append(chunks, m.bufferedChunk)
m.bufferedChunk = nil
}
}
// Prep a row, ready for kicking out.
var row *influxql.Row
// Prep for bucketing data by start time of the interval.
buckets := map[int64][][]interface{}{}
for _, chunk := range chunks {
if row == nil {
row = &influxql.Row{
Name: chunk.Name,
Tags: chunk.Tags,
Columns: columnNames,
}
}
startTime := chunk.Values[0].Time
_, ok := buckets[startTime]
values := chunk.Values[0].Value.([]interface{})
if !ok {
buckets[startTime] = make([][]interface{}, len(values))
}
for i, v := range values {
buckets[startTime][i] = append(buckets[startTime][i], v)
}
}
// Now, after the loop above, within each time bucket is a slice. Within the element of each
// slice is another slice of interface{}, ready for passing to the reducer functions.
// Work each bucket of time, in time ascending order.
tMins := make(int64arr, 0, len(buckets))
for k, _ := range buckets {
tMins = append(tMins, k)
}
sort.Sort(tMins)
values := make([][]interface{}, len(tMins))
for i, t := range tMins {
values[i] = make([]interface{}, 0, len(columnNames))
values[i] = append(values[i], time.Unix(0, t).UTC()) // Time value is always first.
for j, f := range reduceFuncs {
reducedVal := f(buckets[t][j])
values[i] = append(values[i], reducedVal)
}
}
// Perform any mathematics.
values = processForMath(ae.stmt.Fields, values)
// Handle any fill options
values = ae.processFill(values)
// process derivatives
values = ae.processDerivative(values)
// If we have multiple tag sets we'll want to filter out the empty ones
if len(availTagSets.list()) > 1 && resultsEmpty(values) {
continue
}
row.Values = values
out <- row
}
close(out)
}
// processFill will take the results and return new results (or the same if no fill modifications are needed)
// with whatever fill options the query has.
func (ae *AggregateExecutor) processFill(results [][]interface{}) [][]interface{} {
// don't do anything if we're supposed to leave the nulls
if ae.stmt.Fill == influxql.NullFill {
return results
}
if ae.stmt.Fill == influxql.NoFill {
// remove any rows that have even one nil value. This one is tricky because they could have multiple
// aggregates, but this option means that any row that has even one nil gets purged.
newResults := make([][]interface{}, 0, len(results))
for _, vals := range results {
hasNil := false
// start at 1 because the first value is always time
for j := 1; j < len(vals); j++ {
if vals[j] == nil {
hasNil = true
break
}
}
if !hasNil {
newResults = append(newResults, vals)
}
}
return newResults
}
// They're either filling with previous values or a specific number
for i, vals := range results {
// start at 1 because the first value is always time
for j := 1; j < len(vals); j++ {
if vals[j] == nil {
switch ae.stmt.Fill {
case influxql.PreviousFill:
if i != 0 {
vals[j] = results[i-1][j]
}
case influxql.NumberFill:
vals[j] = ae.stmt.FillValue
}
}
}
}
return results
}
// processDerivative returns the derivatives of the results
func (ae *AggregateExecutor) processDerivative(results [][]interface{}) [][]interface{} {
// Return early if we're not supposed to process the derivatives
if ae.stmt.HasDerivative() {
interval, err := derivativeInterval(ae.stmt)
if err != nil {
return results // XXX need to handle this better.
}
// Determines whether to drop negative differences
isNonNegative := ae.stmt.FunctionCalls()[0].Name == "non_negative_derivative"
return processAggregateDerivative(results, isNonNegative, interval)
}
return results
}
// mappersDrained returns whether all the executors Mappers have been drained of data.
func (ae *AggregateExecutor) mappersDrained() bool {
for _, m := range ae.mappers {
if !m.drained {
return false
}
}
return true
}
// nextMapperTagset returns the alphabetically lowest tagset across all Mappers.
func (ae *AggregateExecutor) nextMapperTagSet() string {
tagset := ""
for _, m := range ae.mappers {
if m.bufferedChunk != nil {
if tagset == "" {
tagset = m.bufferedChunk.key()
} else if m.bufferedChunk.key() < tagset {
tagset = m.bufferedChunk.key()
}
}
}
return tagset
}
// Close closes the executor such that all resources are released. Once closed,
// an executor may not be re-used.
func (ae *AggregateExecutor) close() {
for _, m := range ae.mappers {
m.Close()
}
}
// limitedRowWriter accepts raw mapper values, and will emit those values as rows in chunks
// of the given size. If the chunk size is 0, no chunking will be performed. In addiiton if
// limit is reached, outstanding values will be emitted. If limit is zero, no limit is enforced.
type limitedRowWriter struct {
chunkSize int
limit int
offset int
name string
tags map[string]string
selectNames []string
fields influxql.Fields
c chan *influxql.Row
currValues []*mapperValue
totalOffSet int
totalSent int
transformer interface {
process(input []*mapperValue) []*mapperValue
}
}
// Add accepts a slice of values, and will emit those values as per chunking requirements.
// If limited is returned as true, the limit was also reached and no more values should be
// added. In that case only up the limit of values are emitted.
func (r *limitedRowWriter) Add(values []*mapperValue) (limited bool) {
if r.currValues == nil {
r.currValues = make([]*mapperValue, 0, r.chunkSize)
}
// Enforce offset.
if r.totalOffSet < r.offset {
// Still some offsetting to do.
offsetRequired := r.offset - r.totalOffSet
if offsetRequired >= len(values) {
r.totalOffSet += len(values)
return false
} else {
// Drop leading values and keep going.
values = values[offsetRequired:]
r.totalOffSet += offsetRequired
}
}
r.currValues = append(r.currValues, values...)
// Check limit.
limitReached := r.limit > 0 && r.totalSent+len(r.currValues) >= r.limit
if limitReached {
// Limit will be satified with current values. Truncate 'em.
r.currValues = r.currValues[:r.limit-r.totalSent]
}
// Is chunking in effect?
if r.chunkSize != IgnoredChunkSize {
// Chunking level reached?
for len(r.currValues) >= r.chunkSize {
index := len(r.currValues) - (len(r.currValues) - r.chunkSize)
r.c <- r.processValues(r.currValues[:index])
r.currValues = r.currValues[index:]
}
// After values have been sent out by chunking, there may still be some
// values left, if the remainder is less than the chunk size. But if the
// limit has been reached, kick them out.
if len(r.currValues) > 0 && limitReached {
r.c <- r.processValues(r.currValues)
r.currValues = nil
}
} else if limitReached {
// No chunking in effect, but the limit has been reached.
r.c <- r.processValues(r.currValues)
r.currValues = nil
}
return limitReached
}
// Flush instructs the limitedRowWriter to emit any pending values as a single row,
// adhering to any limits. Chunking is not enforced.
func (r *limitedRowWriter) Flush() {
if r == nil {
return
}
// If at least some rows were sent, and no values are pending, then don't
// emit anything, since at least 1 row was previously emitted. This ensures
// that if no rows were ever sent, at least 1 will be emitted, even an empty row.
if r.totalSent != 0 && len(r.currValues) == 0 {
return
}
if r.limit > 0 && len(r.currValues) > r.limit {
r.currValues = r.currValues[:r.limit]
}
r.c <- r.processValues(r.currValues)
r.currValues = nil
}
// processValues emits the given values in a single row.
func (r *limitedRowWriter) processValues(values []*mapperValue) *influxql.Row {
defer func() {
r.totalSent += len(values)
}()
selectNames := r.selectNames
if r.transformer != nil {
values = r.transformer.process(values)
}
// ensure that time is in the select names and in the first position
hasTime := false
for i, n := range selectNames {
if n == "time" {
// Swap time to the first argument for names
if i != 0 {
selectNames[0], selectNames[i] = selectNames[i], selectNames[0]
}
hasTime = true
break
}
}
// time should always be in the list of names they get back
if !hasTime {
selectNames = append([]string{"time"}, selectNames...)
}
// since selectNames can contain tags, we need to strip them out
selectFields := make([]string, 0, len(selectNames))
for _, n := range selectNames {
if _, found := r.tags[n]; !found {
selectFields = append(selectFields, n)
}
}
row := &influxql.Row{
Name: r.name,
Tags: r.tags,
Columns: selectFields,
}
// Kick out an empty row it no results available.
if len(values) == 0 {
return row
}
// if they've selected only a single value we have to handle things a little differently
singleValue := len(selectFields) == SelectColumnCountWithOneValue
// the results will have all of the raw mapper results, convert into the row
for _, v := range values {
vals := make([]interface{}, len(selectFields))
if singleValue {
vals[0] = time.Unix(0, v.Time).UTC()
vals[1] = v.Value.(interface{})
} else {
fields := v.Value.(map[string]interface{})
// time is always the first value
vals[0] = time.Unix(0, v.Time).UTC()
// populate the other values
for i := 1; i < len(selectFields); i++ {
vals[i] = fields[selectFields[i]]
}
}
row.Values = append(row.Values, vals)
}
// Perform any mathematical post-processing.
row.Values = processForMath(r.fields, row.Values)
return row
}
type rawQueryDerivativeProcessor struct {
lastValueFromPreviousChunk *mapperValue
isNonNegative bool // Whether to drop negative differences
derivativeInterval time.Duration
}
func (rqdp *rawQueryDerivativeProcessor) process(input []*mapperValue) []*mapperValue {
if len(input) == 0 {
return input
}
// If we only have 1 value, then the value did not change, so return
// a single row with 0.0
if len(input) == 1 {
return []*mapperValue{
&mapperValue{
Time: input[0].Time,
Value: 0.0,
},
}
}
if rqdp.lastValueFromPreviousChunk == nil {
rqdp.lastValueFromPreviousChunk = input[0]
}
derivativeValues := []*mapperValue{}
for i := 1; i < len(input); i++ {
v := input[i]
// Calculate the derivative of successive points by dividing the difference
// of each value by the elapsed time normalized to the interval
diff := int64toFloat64(v.Value) - int64toFloat64(rqdp.lastValueFromPreviousChunk.Value)
elapsed := v.Time - rqdp.lastValueFromPreviousChunk.Time
value := 0.0
if elapsed > 0 {
value = diff / (float64(elapsed) / float64(rqdp.derivativeInterval))
}
rqdp.lastValueFromPreviousChunk = v
// Drop negative values for non-negative derivatives
if rqdp.isNonNegative && diff < 0 {
continue
}
derivativeValues = append(derivativeValues, &mapperValue{
Time: v.Time,
Value: value,
})
}
return derivativeValues
}
// processForMath will apply any math that was specified in the select statement
// against the passed in results
func processForMath(fields influxql.Fields, results [][]interface{}) [][]interface{} {
hasMath := false
for _, f := range fields {
if _, ok := f.Expr.(*influxql.BinaryExpr); ok {
hasMath = true
} else if _, ok := f.Expr.(*influxql.ParenExpr); ok {
hasMath = true
}
}
if !hasMath {
return results
}
processors := make([]influxql.Processor, len(fields))
startIndex := 1
for i, f := range fields {
processors[i], startIndex = influxql.GetProcessor(f.Expr, startIndex)
}
mathResults := make([][]interface{}, len(results))
for i, _ := range mathResults {
mathResults[i] = make([]interface{}, len(fields)+1)
// put the time in
mathResults[i][0] = results[i][0]
for j, p := range processors {
mathResults[i][j+1] = p(results[i])
}
}
return mathResults
}
// processAggregateDerivative returns the derivatives of an aggregate result set
func processAggregateDerivative(results [][]interface{}, isNonNegative bool, interval time.Duration) [][]interface{} {
// Return early if we can't calculate derivatives
if len(results) == 0 {
return results
}
// If we only have 1 value, then the value did not change, so return
// a single row w/ 0.0
if len(results) == 1 {
return [][]interface{}{
[]interface{}{results[0][0], 0.0},
}
}
// Otherwise calculate the derivatives as the difference between consecutive
// points divided by the elapsed time. Then normalize to the requested
// interval.
derivatives := [][]interface{}{}
for i := 1; i < len(results); i++ {
prev := results[i-1]
cur := results[i]
if cur[1] == nil || prev[1] == nil {
continue
}
elapsed := cur[0].(time.Time).Sub(prev[0].(time.Time))
diff := int64toFloat64(cur[1]) - int64toFloat64(prev[1])
value := 0.0
if elapsed > 0 {
value = float64(diff) / (float64(elapsed) / float64(interval))
}
// Drop negative values for non-negative derivatives
if isNonNegative && diff < 0 {
continue
}
val := []interface{}{
cur[0],
value,
}
derivatives = append(derivatives, val)
}
return derivatives
}
// derivativeInterval returns the time interval for the one (and only) derivative func
func derivativeInterval(stmt *influxql.SelectStatement) (time.Duration, error) {
if len(stmt.FunctionCalls()[0].Args) == 2 {
return stmt.FunctionCalls()[0].Args[1].(*influxql.DurationLiteral).Val, nil
}
interval, err := stmt.GroupByInterval()
if err != nil {
return 0, err
}
if interval > 0 {
return interval, nil
}
return time.Second, nil
}
// resultsEmpty will return true if the all the result values are empty or contain only nulls
func resultsEmpty(resultValues [][]interface{}) bool {
for _, vals := range resultValues {
// start the loop at 1 because we want to skip over the time value
for i := 1; i < len(vals); i++ {
if vals[i] != nil {
return false
}
}
}
return true
}
func int64toFloat64(v interface{}) float64 {
switch v.(type) {
case int64:
return float64(v.(int64))
case float64:
return v.(float64)
}
panic(fmt.Sprintf("expected either int64 or float64, got %v", v))
}
type int64arr []int64
func (a int64arr) Len() int { return len(a) }
func (a int64arr) Swap(i, j int) { a[i], a[j] = a[j], a[i] }
func (a int64arr) Less(i, j int) bool { return a[i] < a[j] }

View File

@@ -0,0 +1,123 @@
// Code generated by protoc-gen-go.
// source: meta.proto
// DO NOT EDIT!
/*
Package internal is a generated protocol buffer package.
It is generated from these files:
meta.proto
It has these top-level messages:
Series
Tag
MeasurementFields
Field
*/
package internal
import proto "github.com/golang/protobuf/proto"
import math "math"
// Reference imports to suppress errors if they are not otherwise used.
var _ = proto.Marshal
var _ = math.Inf
type Series struct {
Key *string `protobuf:"bytes,1,req" json:"Key,omitempty"`
Tags []*Tag `protobuf:"bytes,2,rep" json:"Tags,omitempty"`
XXX_unrecognized []byte `json:"-"`
}
func (m *Series) Reset() { *m = Series{} }
func (m *Series) String() string { return proto.CompactTextString(m) }
func (*Series) ProtoMessage() {}
func (m *Series) GetKey() string {
if m != nil && m.Key != nil {
return *m.Key
}
return ""
}
func (m *Series) GetTags() []*Tag {
if m != nil {
return m.Tags
}
return nil
}
type Tag struct {
Key *string `protobuf:"bytes,1,req" json:"Key,omitempty"`
Value *string `protobuf:"bytes,2,req" json:"Value,omitempty"`
XXX_unrecognized []byte `json:"-"`
}
func (m *Tag) Reset() { *m = Tag{} }
func (m *Tag) String() string { return proto.CompactTextString(m) }
func (*Tag) ProtoMessage() {}
func (m *Tag) GetKey() string {
if m != nil && m.Key != nil {
return *m.Key
}
return ""
}
func (m *Tag) GetValue() string {
if m != nil && m.Value != nil {
return *m.Value
}
return ""
}
type MeasurementFields struct {
Fields []*Field `protobuf:"bytes,1,rep" json:"Fields,omitempty"`
XXX_unrecognized []byte `json:"-"`
}
func (m *MeasurementFields) Reset() { *m = MeasurementFields{} }
func (m *MeasurementFields) String() string { return proto.CompactTextString(m) }
func (*MeasurementFields) ProtoMessage() {}
func (m *MeasurementFields) GetFields() []*Field {
if m != nil {
return m.Fields
}
return nil
}
type Field struct {
ID *int32 `protobuf:"varint,1,req" json:"ID,omitempty"`
Name *string `protobuf:"bytes,2,req" json:"Name,omitempty"`
Type *int32 `protobuf:"varint,3,req" json:"Type,omitempty"`
XXX_unrecognized []byte `json:"-"`
}
func (m *Field) Reset() { *m = Field{} }
func (m *Field) String() string { return proto.CompactTextString(m) }
func (*Field) ProtoMessage() {}
func (m *Field) GetID() int32 {
if m != nil && m.ID != nil {
return *m.ID
}
return 0
}
func (m *Field) GetName() string {
if m != nil && m.Name != nil {
return *m.Name
}
return ""
}
func (m *Field) GetType() int32 {
if m != nil && m.Type != nil {
return *m.Type
}
return 0
}
func init() {
}

View File

@@ -0,0 +1,27 @@
package internal;
//========================================================================
//
// Metadata
//
//========================================================================
message Series {
required string Key = 1;
repeated Tag Tags = 2;
}
message Tag {
required string Key = 1;
required string Value = 2;
}
message MeasurementFields {
repeated Field Fields = 1;
}
message Field {
required int32 ID = 1;
required string Name = 2;
required int32 Type = 3;
}

751
vendor/github.com/influxdata/influxdb/tsdb/mapper.go generated vendored Normal file
View File

@@ -0,0 +1,751 @@
package tsdb
import (
"encoding/binary"
"errors"
"fmt"
"math"
"sort"
"strings"
"github.com/boltdb/bolt"
"github.com/influxdb/influxdb/influxql"
)
// mapperValue is a complex type, which can encapsulate data from both raw and aggregate
// mappers. This currently allows marshalling and network system to remain simpler. For
// aggregate output Time is ignored, and actual Time-Value pairs are contained soley
// within the Value field.
type mapperValue struct {
Time int64 `json:"time,omitempty"` // Ignored for aggregate output.
Value interface{} `json:"value,omitempty"` // For aggregate, contains interval time multiple values.
}
type mapperValues []*mapperValue
func (a mapperValues) Len() int { return len(a) }
func (a mapperValues) Less(i, j int) bool { return a[i].Time < a[j].Time }
func (a mapperValues) Swap(i, j int) { a[i], a[j] = a[j], a[i] }
type mapperOutput struct {
Name string `json:"name,omitempty"`
Tags map[string]string `json:"tags,omitempty"`
Values []*mapperValue `json:"values,omitempty"` // For aggregates contains a single value at [0]
}
func (mo *mapperOutput) key() string {
return formMeasurementTagSetKey(mo.Name, mo.Tags)
}
// RawMapper is for retrieving data, for a raw query, for a single shard.
type RawMapper struct {
shard *Shard
stmt *influxql.SelectStatement
chunkSize int
tx *bolt.Tx // Read transaction for this shard.
queryTMin int64
queryTMax int64
whereFields []string // field names that occur in the where clause
selectFields []string // field names that occur in the select clause
selectTags []string // tag keys that occur in the select clause
fieldName string // the field name being read.
decoders map[string]*FieldCodec // byte decoder per measurement
cursors []*tagSetCursor // Cursors per tag sets.
currCursorIndex int // Current tagset cursor being drained.
}
// NewRawMapper returns a mapper for the given shard, which will return data for the SELECT statement.
func NewRawMapper(shard *Shard, stmt *influxql.SelectStatement, chunkSize int) *RawMapper {
return &RawMapper{
shard: shard,
stmt: stmt,
chunkSize: chunkSize,
cursors: make([]*tagSetCursor, 0),
}
}
// Open opens the raw mapper.
func (rm *RawMapper) Open() error {
// Get a read-only transaction.
tx, err := rm.shard.DB().Begin(false)
if err != nil {
return err
}
rm.tx = tx
// Set all time-related parameters on the mapper.
rm.queryTMin, rm.queryTMax = influxql.TimeRangeAsEpochNano(rm.stmt.Condition)
// Create the TagSet cursors for the Mapper.
for _, src := range rm.stmt.Sources {
mm, ok := src.(*influxql.Measurement)
if !ok {
return fmt.Errorf("invalid source type: %#v", src)
}
m := rm.shard.index.Measurement(mm.Name)
if m == nil {
// This shard have never received data for the measurement. No Mapper
// required.
return nil
}
// Create tagset cursors and determine various field types within SELECT statement.
tsf, err := createTagSetsAndFields(m, rm.stmt)
if err != nil {
return err
}
tagSets := tsf.tagSets
rm.selectFields = tsf.selectFields
rm.selectTags = tsf.selectTags
rm.whereFields = tsf.whereFields
if len(rm.selectFields) == 0 {
return fmt.Errorf("select statement must include at least one field")
}
// SLIMIT and SOFFSET the unique series
if rm.stmt.SLimit > 0 || rm.stmt.SOffset > 0 {
if rm.stmt.SOffset > len(tagSets) {
tagSets = nil
} else {
if rm.stmt.SOffset+rm.stmt.SLimit > len(tagSets) {
rm.stmt.SLimit = len(tagSets) - rm.stmt.SOffset
}
tagSets = tagSets[rm.stmt.SOffset : rm.stmt.SOffset+rm.stmt.SLimit]
}
}
// Create all cursors for reading the data from this shard.
for _, t := range tagSets {
cursors := []*seriesCursor{}
for i, key := range t.SeriesKeys {
c := createCursorForSeries(rm.tx, rm.shard, key)
if c == nil {
// No data exists for this key.
continue
}
cm := newSeriesCursor(c, t.Filters[i])
cursors = append(cursors, cm)
}
tsc := newTagSetCursor(m.Name, t.Tags, cursors, rm.shard.FieldCodec(m.Name))
// Prime the buffers.
for i := 0; i < len(tsc.cursors); i++ {
k, v := tsc.cursors[i].SeekTo(rm.queryTMin)
tsc.keyBuffer[i] = k
tsc.valueBuffer[i] = v
}
rm.cursors = append(rm.cursors, tsc)
}
sort.Sort(tagSetCursors(rm.cursors))
}
return nil
}
// TagSets returns the list of TagSets for which this mapper has data.
func (rm *RawMapper) TagSets() []string {
return tagSetCursors(rm.cursors).Keys()
}
// NextChunk returns the next chunk of data. Data comes in the same order as the
// tags return by TagSets. A chunk never contains data for more than 1 tagset.
// If there is no more data for any tagset, nil will be returned.
func (rm *RawMapper) NextChunk() (interface{}, error) {
var output *mapperOutput
for {
if rm.currCursorIndex == len(rm.cursors) {
// All tagset cursors processed. NextChunk'ing complete.
return nil, nil
}
cursor := rm.cursors[rm.currCursorIndex]
k, v := cursor.Next(rm.queryTMin, rm.queryTMax, rm.selectFields, rm.whereFields)
if v == nil {
// Tagset cursor is empty, move to next one.
rm.currCursorIndex++
if output != nil {
// There is data, so return it and continue when next called.
return output, nil
} else {
// Just go straight to the next cursor.
continue
}
}
if output == nil {
output = &mapperOutput{
Name: cursor.measurement,
Tags: cursor.tags,
}
}
value := &mapperValue{Time: k, Value: v}
output.Values = append(output.Values, value)
if len(output.Values) == rm.chunkSize {
return output, nil
}
}
}
// Close closes the mapper.
func (rm *RawMapper) Close() {
if rm != nil && rm.tx != nil {
_ = rm.tx.Rollback()
}
}
// AggMapper is for retrieving data, for an aggregate query, from a given shard.
type AggMapper struct {
shard *Shard
stmt *influxql.SelectStatement
tx *bolt.Tx // Read transaction for this shard.
queryTMin int64 // Minimum time of the query.
queryTMinWindow int64 // Minimum time of the query floored to start of interval.
queryTMax int64 // Maximum time of the query.
intervalSize int64 // Size of each interval.
mapFuncs []influxql.MapFunc // The mapping functions.
fieldNames []string // the field name being read for mapping.
whereFields []string // field names that occur in the where clause
selectFields []string // field names that occur in the select clause
selectTags []string // tag keys that occur in the select clause
numIntervals int // Maximum number of intervals to return.
currInterval int // Current interval for which data is being fetched.
cursors []*tagSetCursor // Cursors per tag sets.
currCursorIndex int // Current tagset cursor being drained.
}
// NewAggMapper returns a mapper for the given shard, which will return data for the SELECT statement.
func NewAggMapper(shard *Shard, stmt *influxql.SelectStatement) *AggMapper {
return &AggMapper{
shard: shard,
stmt: stmt,
cursors: make([]*tagSetCursor, 0),
}
}
// Open opens the aggregate mapper.
func (am *AggMapper) Open() error {
var err error
// Get a read-only transaction.
tx, err := am.shard.DB().Begin(false)
if err != nil {
return err
}
am.tx = tx
// Set up each mapping function for this statement.
aggregates := am.stmt.FunctionCalls()
am.mapFuncs = make([]influxql.MapFunc, len(aggregates))
am.fieldNames = make([]string, len(am.mapFuncs))
for i, c := range aggregates {
am.mapFuncs[i], err = influxql.InitializeMapFunc(c)
if err != nil {
return err
}
// Check for calls like `derivative(mean(value), 1d)`
var nested *influxql.Call = c
if fn, ok := c.Args[0].(*influxql.Call); ok {
nested = fn
}
switch lit := nested.Args[0].(type) {
case *influxql.VarRef:
am.fieldNames[i] = lit.Val
case *influxql.Distinct:
if c.Name != "count" {
return fmt.Errorf("aggregate call didn't contain a field %s", c.String())
}
am.fieldNames[i] = lit.Val
default:
return fmt.Errorf("aggregate call didn't contain a field %s", c.String())
}
}
// Set all time-related parameters on the mapper.
am.queryTMin, am.queryTMax = influxql.TimeRangeAsEpochNano(am.stmt.Condition)
// For GROUP BY time queries, limit the number of data points returned by the limit and offset
d, err := am.stmt.GroupByInterval()
if err != nil {
return err
}
am.intervalSize = d.Nanoseconds()
if am.queryTMin == 0 || am.intervalSize == 0 {
am.numIntervals = 1
am.intervalSize = am.queryTMax - am.queryTMin
} else {
intervalTop := am.queryTMax/am.intervalSize*am.intervalSize + am.intervalSize
intervalBottom := am.queryTMin / am.intervalSize * am.intervalSize
am.numIntervals = int((intervalTop - intervalBottom) / am.intervalSize)
}
if am.stmt.Limit > 0 || am.stmt.Offset > 0 {
// ensure that the offset isn't higher than the number of points we'd get
if am.stmt.Offset > am.numIntervals {
return nil
}
// Take the lesser of either the pre computed number of GROUP BY buckets that
// will be in the result or the limit passed in by the user
if am.stmt.Limit < am.numIntervals {
am.numIntervals = am.stmt.Limit
}
}
// If we are exceeding our MaxGroupByPoints error out
if am.numIntervals > MaxGroupByPoints {
return errors.New("too many points in the group by interval. maybe you forgot to specify a where time clause?")
}
// Ensure that the start time for the results is on the start of the window.
am.queryTMinWindow = am.queryTMin
if am.intervalSize > 0 && am.numIntervals > 1 {
am.queryTMinWindow = am.queryTMinWindow / am.intervalSize * am.intervalSize
}
// Create the TagSet cursors for the Mapper.
for _, src := range am.stmt.Sources {
mm, ok := src.(*influxql.Measurement)
if !ok {
return fmt.Errorf("invalid source type: %#v", src)
}
m := am.shard.index.Measurement(mm.Name)
if m == nil {
// This shard have never received data for the measurement. No Mapper
// required.
return nil
}
// Create tagset cursors and determine various field types within SELECT statement.
tsf, err := createTagSetsAndFields(m, am.stmt)
if err != nil {
return err
}
tagSets := tsf.tagSets
am.selectFields = tsf.selectFields
am.selectTags = tsf.selectTags
am.whereFields = tsf.whereFields
// Validate that group by is not a field
if err := m.ValidateGroupBy(am.stmt); err != nil {
return err
}
// SLIMIT and SOFFSET the unique series
if am.stmt.SLimit > 0 || am.stmt.SOffset > 0 {
if am.stmt.SOffset > len(tagSets) {
tagSets = nil
} else {
if am.stmt.SOffset+am.stmt.SLimit > len(tagSets) {
am.stmt.SLimit = len(tagSets) - am.stmt.SOffset
}
tagSets = tagSets[am.stmt.SOffset : am.stmt.SOffset+am.stmt.SLimit]
}
}
// Create all cursors for reading the data from this shard.
for _, t := range tagSets {
cursors := []*seriesCursor{}
for i, key := range t.SeriesKeys {
c := createCursorForSeries(am.tx, am.shard, key)
if c == nil {
// No data exists for this key.
continue
}
cm := newSeriesCursor(c, t.Filters[i])
cursors = append(cursors, cm)
}
tsc := newTagSetCursor(m.Name, t.Tags, cursors, am.shard.FieldCodec(m.Name))
am.cursors = append(am.cursors, tsc)
}
sort.Sort(tagSetCursors(am.cursors))
}
return nil
}
// NextChunk returns the next chunk of data, which is the next interval of data
// for the current tagset. Tagsets are always processed in the same order as that
// returned by AvailTagsSets(). When there is no more data for any tagset nil
// is returned.
func (am *AggMapper) NextChunk() (interface{}, error) {
var output *mapperOutput
for {
if am.currCursorIndex == len(am.cursors) {
// All tagset cursors processed. NextChunk'ing complete.
return nil, nil
}
tsc := am.cursors[am.currCursorIndex]
tmin, tmax := am.nextInterval()
if tmin < 0 {
// All intervals complete for this tagset. Move to the next tagset.
am.resetIntervals()
am.currCursorIndex++
continue
}
// Prep the return data for this tagset. This will hold data for a single interval
// for a single tagset.
if output == nil {
output = &mapperOutput{
Name: tsc.measurement,
Tags: tsc.tags,
Values: make([]*mapperValue, 1),
}
// Aggregate values only use the first entry in the Values field. Set the time
// to the start of the interval.
output.Values[0] = &mapperValue{
Time: tmin,
Value: make([]interface{}, 0)}
}
// Always clamp tmin. This can happen as bucket-times are bucketed to the nearest
// interval, and this can be less than the times in the query.
qmin := tmin
if qmin < am.queryTMin {
qmin = am.queryTMin
}
for i := range am.mapFuncs {
// Prime the tagset cursor for the start of the interval. This is not ideal, as
// it should really calculate the values all in 1 pass, but that would require
// changes to the mapper functions, which can come later.
// Prime the buffers.
for i := 0; i < len(tsc.cursors); i++ {
k, v := tsc.cursors[i].SeekTo(tmin)
tsc.keyBuffer[i] = k
tsc.valueBuffer[i] = v
}
// Wrap the tagset cursor so it implements the mapping functions interface.
f := func() (time int64, value interface{}) {
return tsc.Next(qmin, tmax, []string{am.fieldNames[i]}, am.whereFields)
}
tagSetCursor := &aggTagSetCursor{
nextFunc: f,
}
// Execute the map function which walks the entire interval, and aggregates
// the result.
values := output.Values[0].Value.([]interface{})
output.Values[0].Value = append(values, am.mapFuncs[i](tagSetCursor))
}
return output, nil
}
}
// nextInterval returns the next interval for which to return data. If start is less than 0
// there are no more intervals.
func (am *AggMapper) nextInterval() (start, end int64) {
t := am.queryTMinWindow + int64(am.currInterval+am.stmt.Offset)*am.intervalSize
// Onto next interval.
am.currInterval++
if t > am.queryTMax || am.currInterval > am.numIntervals {
start, end = -1, 1
} else {
start, end = t, t+am.intervalSize
}
return
}
// resetIntervals starts the Mapper at the first interval. Subsequent intervals
// should be retrieved via nextInterval().
func (am *AggMapper) resetIntervals() {
am.currInterval = 0
}
// TagSets returns the list of TagSets for which this mapper has data.
func (am *AggMapper) TagSets() []string {
return tagSetCursors(am.cursors).Keys()
}
// Close closes the mapper.
func (am *AggMapper) Close() {
if am != nil && am.tx != nil {
_ = am.tx.Rollback()
}
}
// aggTagSetCursor wraps a standard tagSetCursor, such that the values it emits are aggregated
// by intervals.
type aggTagSetCursor struct {
nextFunc func() (time int64, value interface{})
}
// Next returns the next value for the aggTagSetCursor. It implements the interface expected
// by the mapping functions.
func (a *aggTagSetCursor) Next() (time int64, value interface{}) {
return a.nextFunc()
}
// tagSetCursor is virtual cursor that iterates over mutiple series cursors, as though it were
// a single series.
type tagSetCursor struct {
measurement string // Measurement name
tags map[string]string // Tag key-value pairs
cursors []*seriesCursor // Underlying series cursors.
decoder *FieldCodec // decoder for the raw data bytes
// Lookahead buffers for the cursors. Performance analysis shows that it is critical
// that these buffers are part of the tagSetCursor type and not part of the the
// cursors type.
keyBuffer []int64 // The current timestamp key for each cursor
valueBuffer [][]byte // The current value for each cursor
}
// tagSetCursors represents a sortable slice of tagSetCursors.
type tagSetCursors []*tagSetCursor
func (a tagSetCursors) Len() int { return len(a) }
func (a tagSetCursors) Less(i, j int) bool { return a[i].key() < a[j].key() }
func (a tagSetCursors) Swap(i, j int) { a[i], a[j] = a[j], a[i] }
func (a tagSetCursors) Keys() []string {
keys := []string{}
for i := range a {
keys = append(keys, a[i].key())
}
sort.Strings(keys)
return keys
}
// newTagSetCursor returns a tagSetCursor
func newTagSetCursor(m string, t map[string]string, c []*seriesCursor, d *FieldCodec) *tagSetCursor {
return &tagSetCursor{
measurement: m,
tags: t,
cursors: c,
decoder: d,
keyBuffer: make([]int64, len(c)),
valueBuffer: make([][]byte, len(c)),
}
}
func (tsc *tagSetCursor) key() string {
return formMeasurementTagSetKey(tsc.measurement, tsc.tags)
}
// Next returns the next matching series-key, timestamp and byte slice for the tagset. Filtering
// is enforced on the values. If there is no matching value, then a nil result is returned.
func (tsc *tagSetCursor) Next(tmin, tmax int64, selectFields, whereFields []string) (int64, interface{}) {
for {
// Find the next lowest timestamp
min := -1
minKey := int64(math.MaxInt64)
for i, k := range tsc.keyBuffer {
if k != -1 && (k == tmin) || k < minKey && k >= tmin && k < tmax {
min = i
minKey = k
}
}
// Return if there is no more data for this tagset.
if min == -1 {
return -1, nil
}
// set the current timestamp and seriesID
timestamp := tsc.keyBuffer[min]
var value interface{}
if len(selectFields) > 1 {
if fieldsWithNames, err := tsc.decoder.DecodeFieldsWithNames(tsc.valueBuffer[min]); err == nil {
value = fieldsWithNames
// if there's a where clause, make sure we don't need to filter this value
if tsc.cursors[min].filter != nil && !matchesWhere(tsc.cursors[min].filter, fieldsWithNames) {
value = nil
}
}
} else {
// With only 1 field SELECTed, decoding all fields may be avoidable, which is faster.
var err error
value, err = tsc.decoder.DecodeByName(selectFields[0], tsc.valueBuffer[min])
if err != nil {
value = nil
} else {
// If there's a WHERE clase, see if we need to filter
if tsc.cursors[min].filter != nil {
// See if the WHERE is only on this field or on one or more other fields.
// If the latter, we'll have to decode everything
if len(whereFields) == 1 && whereFields[0] == selectFields[0] {
if !matchesWhere(tsc.cursors[min].filter, map[string]interface{}{selectFields[0]: value}) {
value = nil
}
} else { // Decode everything
fieldsWithNames, err := tsc.decoder.DecodeFieldsWithNames(tsc.valueBuffer[min])
if err != nil || !matchesWhere(tsc.cursors[min].filter, fieldsWithNames) {
value = nil
}
}
}
}
}
// Advance the cursor
nextKey, nextVal := tsc.cursors[min].Next()
tsc.keyBuffer[min] = nextKey
tsc.valueBuffer[min] = nextVal
// Value didn't match, look for the next one.
if value == nil {
continue
}
return timestamp, value
}
}
// seriesCursor is a cursor that walks a single series. It provides lookahead functionality.
type seriesCursor struct {
cursor *shardCursor // BoltDB cursor for a series
filter influxql.Expr
}
// newSeriesCursor returns a new instance of a series cursor.
func newSeriesCursor(b *shardCursor, filter influxql.Expr) *seriesCursor {
return &seriesCursor{
cursor: b,
filter: filter,
}
}
// Seek positions returning the timestamp and value at that key.
func (sc *seriesCursor) SeekTo(key int64) (timestamp int64, value []byte) {
k, v := sc.cursor.Seek(u64tob(uint64(key)))
if k == nil {
timestamp = -1
} else {
timestamp, value = int64(btou64(k)), v
}
return
}
// Next returns the next timestamp and value from the cursor.
func (sc *seriesCursor) Next() (key int64, value []byte) {
k, v := sc.cursor.Next()
if k == nil {
key = -1
} else {
key, value = int64(btou64(k)), v
}
return
}
// createCursorForSeries creates a cursor for walking the given series key. The cursor
// consolidates both the Bolt store and any WAL cache.
func createCursorForSeries(tx *bolt.Tx, shard *Shard, key string) *shardCursor {
// Retrieve key bucket.
b := tx.Bucket([]byte(key))
// Ignore if there is no bucket or points in the cache.
partitionID := WALPartition([]byte(key))
if b == nil && len(shard.cache[partitionID][key]) == 0 {
return nil
}
// Retrieve a copy of the in-cache points for the key.
cache := make([][]byte, len(shard.cache[partitionID][key]))
copy(cache, shard.cache[partitionID][key])
// Build a cursor that merges the bucket and cache together.
cur := &shardCursor{cache: cache}
if b != nil {
cur.cursor = b.Cursor()
}
return cur
}
type tagSetsAndFields struct {
tagSets []*influxql.TagSet
selectFields []string
selectTags []string
whereFields []string
}
// createTagSetsAndFields returns the tagsets and various fields given a measurement and
// SELECT statement. It also ensures that the fields and tags exist.
func createTagSetsAndFields(m *Measurement, stmt *influxql.SelectStatement) (*tagSetsAndFields, error) {
_, tagKeys, err := stmt.Dimensions.Normalize()
if err != nil {
return nil, err
}
sfs := newStringSet()
sts := newStringSet()
wfs := newStringSet()
// Validate the fields and tags asked for exist and keep track of which are in the select vs the where
for _, n := range stmt.NamesInSelect() {
if m.HasField(n) {
sfs.add(n)
continue
}
if !m.HasTagKey(n) {
return nil, fmt.Errorf("unknown field or tag name in select clause: %s", n)
}
sts.add(n)
tagKeys = append(tagKeys, n)
}
for _, n := range stmt.NamesInWhere() {
if n == "time" {
continue
}
if m.HasField(n) {
wfs.add(n)
continue
}
if !m.HasTagKey(n) {
return nil, fmt.Errorf("unknown field or tag name in where clause: %s", n)
}
}
// Get the sorted unique tag sets for this statement.
tagSets, err := m.TagSets(stmt, tagKeys)
if err != nil {
return nil, err
}
return &tagSetsAndFields{
tagSets: tagSets,
selectFields: sfs.list(),
selectTags: sts.list(),
whereFields: wfs.list(),
}, nil
}
// matchesFilter returns true if the value matches the where clause
func matchesWhere(f influxql.Expr, fields map[string]interface{}) bool {
if ok, _ := influxql.Eval(f, fields).(bool); !ok {
return false
}
return true
}
func formMeasurementTagSetKey(name string, tags map[string]string) string {
if len(tags) == 0 {
return name
}
return strings.Join([]string{name, string(marshalTags(tags))}, "|")
}
// btou64 converts an 8-byte slice into an uint64.
func btou64(b []byte) uint64 { return binary.BigEndian.Uint64(b) }

1279
vendor/github.com/influxdata/influxdb/tsdb/meta.go generated vendored Normal file

File diff suppressed because it is too large Load Diff

83
vendor/github.com/influxdata/influxdb/tsdb/monitor.go generated vendored Normal file
View File

@@ -0,0 +1,83 @@
package tsdb
// Monitor represents a TSDB monitoring service.
type Monitor struct {
Store interface{}
}
func (m *Monitor) Open() error { return nil }
func (m *Monitor) Close() error { return nil }
// StartSelfMonitoring starts a goroutine which monitors the InfluxDB server
// itself and stores the results in the specified database at a given interval.
/*
func (s *Server) StartSelfMonitoring(database, retention string, interval time.Duration) error {
if interval == 0 {
return fmt.Errorf("statistics check interval must be non-zero")
}
go func() {
tick := time.NewTicker(interval)
for {
<-tick.C
// Create the batch and tags
tags := map[string]string{"serverID": strconv.FormatUint(s.ID(), 10)}
if h, err := os.Hostname(); err == nil {
tags["host"] = h
}
batch := pointsFromStats(s.stats, tags)
// Shard-level stats.
tags["shardID"] = strconv.FormatUint(s.id, 10)
s.mu.RLock()
for _, sh := range s.shards {
if !sh.HasDataNodeID(s.id) {
// No stats for non-local shards.
continue
}
batch = append(batch, pointsFromStats(sh.stats, tags)...)
}
s.mu.RUnlock()
// Server diagnostics.
for _, row := range s.DiagnosticsAsRows() {
points, err := s.convertRowToPoints(row.Name, row)
if err != nil {
s.Logger.Printf("failed to write diagnostic row for %s: %s", row.Name, err.Error())
continue
}
for _, p := range points {
p.AddTag("serverID", strconv.FormatUint(s.ID(), 10))
}
batch = append(batch, points...)
}
s.WriteSeries(database, retention, batch)
}
}()
return nil
}
// Function for local use turns stats into a slice of points
func pointsFromStats(st *Stats, tags map[string]string) []tsdb.Point {
var points []tsdb.Point
now := time.Now()
st.Walk(func(k string, v int64) {
point := tsdb.NewPoint(
st.name+"_"+k,
make(map[string]string),
map[string]interface{}{"value": int(v)},
now,
)
// Specifically create a new map.
for k, v := range tags {
tags[k] = v
point.AddTag(k, v)
}
points = append(points, point)
})
return points
}
*/

1135
vendor/github.com/influxdata/influxdb/tsdb/points.go generated vendored Normal file

File diff suppressed because it is too large Load Diff

File diff suppressed because it is too large Load Diff

1218
vendor/github.com/influxdata/influxdb/tsdb/shard.go generated vendored Normal file

File diff suppressed because it is too large Load Diff

View File

@@ -0,0 +1,124 @@
package tsdb
import (
"bytes"
"fmt"
"io"
"os"
"path/filepath"
"time"
"github.com/boltdb/bolt"
"github.com/influxdb/influxdb/snapshot"
)
// NewSnapshotWriter returns a new snapshot.Writer that will write
// metadata and the store's shards to an archive.
func NewSnapshotWriter(meta []byte, store *Store) (*snapshot.Writer, error) {
// Create snapshot writer.
sw := snapshot.NewWriter()
if err := func() error {
// Create meta file.
f := &snapshot.File{
Name: "meta",
Size: int64(len(meta)),
ModTime: time.Now(),
}
sw.Manifest.Files = append(sw.Manifest.Files, *f)
sw.FileWriters[f.Name] = NopWriteToCloser(bytes.NewReader(meta))
// Create files for each shard.
if err := appendShardSnapshotFiles(sw, store); err != nil {
return fmt.Errorf("create shard snapshot files: %s", err)
}
return nil
}(); err != nil {
_ = sw.Close()
return nil, err
}
return sw, nil
}
// appendShardSnapshotFiles adds snapshot files for each shard in the store.
func appendShardSnapshotFiles(sw *snapshot.Writer, store *Store) error {
// Calculate absolute path of store to use for relative shard paths.
storePath, err := filepath.Abs(store.Path())
if err != nil {
return fmt.Errorf("store abs path: %s", err)
}
// Create files for each shard.
for _, shardID := range store.ShardIDs() {
// Retrieve shard.
sh := store.Shard(shardID)
if sh == nil {
return fmt.Errorf("shard not found: %d", shardID)
}
// Calculate relative path from store.
shardPath, err := filepath.Abs(sh.Path())
if err != nil {
return fmt.Errorf("shard abs path: %s", err)
}
name, err := filepath.Rel(storePath, shardPath)
if err != nil {
return fmt.Errorf("shard rel path: %s", err)
}
if err := appendShardSnapshotFile(sw, sh, name); err != nil {
return fmt.Errorf("append shard: name=%s, err=%s", name, err)
}
}
return nil
}
func appendShardSnapshotFile(sw *snapshot.Writer, sh *Shard, name string) error {
// Stat the underlying data file to retrieve last modified date.
fi, err := os.Stat(sh.Path())
if err != nil {
return fmt.Errorf("stat shard data file: %s", err)
}
// Begin transaction.
tx, err := sh.db.Begin(false)
if err != nil {
return fmt.Errorf("begin: %s", err)
}
// Create file.
f := snapshot.File{
Name: name,
Size: tx.Size(),
ModTime: fi.ModTime(),
}
// Append to snapshot writer.
sw.Manifest.Files = append(sw.Manifest.Files, f)
sw.FileWriters[f.Name] = &boltTxCloser{tx}
return nil
}
// boltTxCloser wraps a Bolt transaction to implement io.Closer.
type boltTxCloser struct {
*bolt.Tx
}
// Close rolls back the transaction.
func (tx *boltTxCloser) Close() error { return tx.Rollback() }
// NopWriteToCloser returns an io.WriterTo that implements io.Closer.
func NopWriteToCloser(w io.WriterTo) interface {
io.WriterTo
io.Closer
} {
return &nopWriteToCloser{w}
}
type nopWriteToCloser struct {
io.WriterTo
}
func (w *nopWriteToCloser) Close() error { return nil }

343
vendor/github.com/influxdata/influxdb/tsdb/store.go generated vendored Normal file
View File

@@ -0,0 +1,343 @@
package tsdb
import (
"fmt"
"io/ioutil"
"log"
"os"
"path/filepath"
"strconv"
"strings"
"sync"
"time"
"github.com/influxdb/influxdb/influxql"
)
func NewStore(path string) *Store {
return &Store{
path: path,
MaxWALSize: DefaultMaxWALSize,
WALFlushInterval: DefaultWALFlushInterval,
WALPartitionFlushDelay: DefaultWALPartitionFlushDelay,
Logger: log.New(os.Stderr, "[store] ", log.LstdFlags),
}
}
var (
ErrShardNotFound = fmt.Errorf("shard not found")
)
type Store struct {
mu sync.RWMutex
path string
databaseIndexes map[string]*DatabaseIndex
shards map[uint64]*Shard
MaxWALSize int
WALFlushInterval time.Duration
WALPartitionFlushDelay time.Duration
Logger *log.Logger
}
// Path returns the store's root path.
func (s *Store) Path() string { return s.path }
func (s *Store) CreateShard(database, retentionPolicy string, shardID uint64) error {
s.mu.Lock()
defer s.mu.Unlock()
// shard already exists
if _, ok := s.shards[shardID]; ok {
return nil
}
// created the db and retention policy dirs if they don't exist
if err := os.MkdirAll(filepath.Join(s.path, database, retentionPolicy), 0700); err != nil {
return err
}
// create the database index if it does not exist
db, ok := s.databaseIndexes[database]
if !ok {
db = NewDatabaseIndex()
s.databaseIndexes[database] = db
}
shardPath := filepath.Join(s.path, database, retentionPolicy, strconv.FormatUint(shardID, 10))
shard := s.newShard(db, shardPath)
if err := shard.Open(); err != nil {
return err
}
s.shards[shardID] = shard
return nil
}
// DeleteShard removes a shard from disk.
func (s *Store) DeleteShard(shardID uint64) error {
s.mu.Lock()
defer s.mu.Unlock()
// ensure shard exists
sh, ok := s.shards[shardID]
if !ok {
return nil
}
if err := sh.Close(); err != nil {
return err
}
if err := os.Remove(sh.path); err != nil {
return err
}
delete(s.shards, shardID)
return nil
}
// newShard returns a shard and copies configuration settings from the store.
func (s *Store) newShard(index *DatabaseIndex, path string) *Shard {
sh := NewShard(index, path)
sh.MaxWALSize = s.MaxWALSize
sh.WALFlushInterval = s.WALFlushInterval
sh.WALPartitionFlushDelay = s.WALPartitionFlushDelay
return sh
}
// DeleteDatabase will close all shards associated with a database and remove the directory and files from disk.
func (s *Store) DeleteDatabase(name string, shardIDs []uint64) error {
s.mu.Lock()
defer s.mu.Unlock()
for _, id := range shardIDs {
shard := s.shards[id]
if shard != nil {
shard.Close()
}
}
if err := os.RemoveAll(filepath.Join(s.path, name)); err != nil {
return err
}
delete(s.databaseIndexes, name)
return nil
}
func (s *Store) Shard(shardID uint64) *Shard {
s.mu.RLock()
defer s.mu.RUnlock()
return s.shards[shardID]
}
// ShardIDs returns a slice of all ShardIDs under management.
func (s *Store) ShardIDs() []uint64 {
ids := make([]uint64, 0, len(s.shards))
for i, _ := range s.shards {
ids = append(ids, i)
}
return ids
}
func (s *Store) ValidateAggregateFieldsInStatement(shardID uint64, measurementName string, stmt *influxql.SelectStatement) error {
s.mu.RLock()
shard := s.shards[shardID]
s.mu.RUnlock()
if shard == nil {
return ErrShardNotFound
}
return shard.ValidateAggregateFieldsInStatement(measurementName, stmt)
}
func (s *Store) DatabaseIndex(name string) *DatabaseIndex {
s.mu.RLock()
defer s.mu.RUnlock()
return s.databaseIndexes[name]
}
func (s *Store) Measurement(database, name string) *Measurement {
s.mu.RLock()
db := s.databaseIndexes[database]
s.mu.RUnlock()
if db == nil {
return nil
}
return db.Measurement(name)
}
// deleteSeries loops through the local shards and deletes the series data and metadata for the passed in series keys
func (s *Store) deleteSeries(keys []string) error {
s.mu.RLock()
defer s.mu.RUnlock()
for _, sh := range s.shards {
if err := sh.deleteSeries(keys); err != nil {
return err
}
}
return nil
}
// deleteMeasurement loops through the local shards and removes the measurement field encodings from each shard
func (s *Store) deleteMeasurement(name string, seriesKeys []string) error {
s.mu.RLock()
defer s.mu.RUnlock()
for _, sh := range s.shards {
if err := sh.deleteMeasurement(name, seriesKeys); err != nil {
return err
}
}
return nil
}
func (s *Store) loadIndexes() error {
dbs, err := ioutil.ReadDir(s.path)
if err != nil {
return err
}
for _, db := range dbs {
if !db.IsDir() {
s.Logger.Printf("Skipping database dir: %s. Not a directory", db.Name())
continue
}
s.databaseIndexes[db.Name()] = NewDatabaseIndex()
}
return nil
}
func (s *Store) loadShards() error {
// loop through the current database indexes
for db := range s.databaseIndexes {
rps, err := ioutil.ReadDir(filepath.Join(s.path, db))
if err != nil {
return err
}
for _, rp := range rps {
// retention policies should be directories. Skip anything that is not a dir.
if !rp.IsDir() {
s.Logger.Printf("Skipping retention policy dir: %s. Not a directory", rp.Name())
continue
}
shards, err := ioutil.ReadDir(filepath.Join(s.path, db, rp.Name()))
if err != nil {
return err
}
for _, sh := range shards {
path := filepath.Join(s.path, db, rp.Name(), sh.Name())
// Shard file names are numeric shardIDs
shardID, err := strconv.ParseUint(sh.Name(), 10, 64)
if err != nil {
s.Logger.Printf("Skipping shard: %s. Not a valid path", rp.Name())
continue
}
shard := s.newShard(s.databaseIndexes[db], path)
shard.Open()
s.shards[shardID] = shard
}
}
}
return nil
}
func (s *Store) Open() error {
s.mu.Lock()
defer s.mu.Unlock()
s.shards = map[uint64]*Shard{}
s.databaseIndexes = map[string]*DatabaseIndex{}
// Create directory.
if err := os.MkdirAll(s.path, 0777); err != nil {
return err
}
// TODO: Start AE for Node
if err := s.loadIndexes(); err != nil {
return err
}
if err := s.loadShards(); err != nil {
return err
}
return nil
}
func (s *Store) WriteToShard(shardID uint64, points []Point) error {
s.mu.RLock()
defer s.mu.RUnlock()
sh, ok := s.shards[shardID]
if !ok {
return ErrShardNotFound
}
return sh.WritePoints(points)
}
// Flush forces all shards to write their WAL data to the index.
func (s *Store) Flush() error {
s.mu.RLock()
defer s.mu.RUnlock()
for shardID, sh := range s.shards {
if err := sh.Flush(s.WALPartitionFlushDelay); err != nil {
return fmt.Errorf("flush: shard=%d, err=%s", shardID, err)
}
}
return nil
}
func (s *Store) CreateMapper(shardID uint64, query string, chunkSize int) (Mapper, error) {
q, err := influxql.NewParser(strings.NewReader(query)).ParseStatement()
if err != nil {
return nil, err
}
stmt, ok := q.(*influxql.SelectStatement)
if !ok {
return nil, fmt.Errorf("query is not a SELECT statement: %s", err.Error())
}
shard := s.Shard(shardID)
if shard == nil {
// This can happen if the shard has been assigned, but hasn't actually been created yet.
return nil, nil
}
if (stmt.IsRawQuery && !stmt.HasDistinct()) || stmt.IsSimpleDerivative() {
return NewRawMapper(shard, stmt, chunkSize), nil
}
return NewAggMapper(shard, stmt), nil
}
func (s *Store) Close() error {
s.mu.Lock()
defer s.mu.Unlock()
for _, sh := range s.shards {
if err := sh.Close(); err != nil {
return err
}
}
s.shards = nil
s.databaseIndexes = nil
return nil
}
// IsRetryable returns true if this error is temporary and could be retried
func IsRetryable(err error) bool {
if err == nil {
return true
}
if strings.Contains(err.Error(), "field type conflict") {
return false
}
return true
}