Add prom timer to stress
Signed-off-by: Michael Crosby <crosbymichael@gmail.com>
This commit is contained in:
parent
ca5f16c33e
commit
4d55298aab
@ -4,6 +4,7 @@ import (
|
||||
"context"
|
||||
"encoding/json"
|
||||
"fmt"
|
||||
"net/http"
|
||||
"os"
|
||||
"os/signal"
|
||||
"runtime"
|
||||
@ -15,6 +16,7 @@ import (
|
||||
"github.com/containerd/containerd/containers"
|
||||
"github.com/containerd/containerd/namespaces"
|
||||
"github.com/containerd/containerd/oci"
|
||||
metrics "github.com/docker/go-metrics"
|
||||
"github.com/sirupsen/logrus"
|
||||
"github.com/urfave/cli"
|
||||
)
|
||||
@ -97,6 +99,10 @@ func main() {
|
||||
Name: "json,j",
|
||||
Usage: "output results in json format",
|
||||
},
|
||||
cli.StringFlag{
|
||||
Name: "metrics,m",
|
||||
Usage: "address to serve the metrics API",
|
||||
},
|
||||
}
|
||||
app.Before = func(context *cli.Context) error {
|
||||
if context.GlobalBool("debug") {
|
||||
@ -113,7 +119,11 @@ func main() {
|
||||
Duration: context.GlobalDuration("duration"),
|
||||
Concurrency: context.GlobalInt("concurrent"),
|
||||
Exec: context.GlobalBool("exec"),
|
||||
Json: context.GlobalBool("json"),
|
||||
JSON: context.GlobalBool("json"),
|
||||
Metrics: context.GlobalString("metrics"),
|
||||
}
|
||||
if config.Metrics != "" {
|
||||
return serve(config)
|
||||
}
|
||||
return test(config)
|
||||
}
|
||||
@ -128,13 +138,23 @@ type config struct {
|
||||
Duration time.Duration
|
||||
Address string
|
||||
Exec bool
|
||||
Json bool
|
||||
JSON bool
|
||||
Metrics string
|
||||
}
|
||||
|
||||
func (c config) newClient() (*containerd.Client, error) {
|
||||
return containerd.New(c.Address)
|
||||
}
|
||||
|
||||
func serve(c config) error {
|
||||
go func() {
|
||||
if err := http.ListenAndServe(c.Metrics, metrics.Handler()); err != nil {
|
||||
logrus.WithError(err).Error("listen and serve")
|
||||
}
|
||||
}()
|
||||
return test(c)
|
||||
}
|
||||
|
||||
func test(c config) error {
|
||||
var (
|
||||
wg sync.WaitGroup
|
||||
@ -212,7 +232,7 @@ func test(c config) error {
|
||||
results.ContainersPerSecond,
|
||||
results.SecondsPerContainer,
|
||||
)
|
||||
if c.Json {
|
||||
if c.JSON {
|
||||
if err := json.NewEncoder(os.Stdout).Encode(results); err != nil {
|
||||
return err
|
||||
}
|
||||
|
@ -8,14 +8,26 @@ import (
|
||||
"strings"
|
||||
"sync"
|
||||
"syscall"
|
||||
"time"
|
||||
|
||||
"github.com/Sirupsen/logrus"
|
||||
"github.com/containerd/containerd"
|
||||
"github.com/containerd/containerd/cio"
|
||||
"github.com/containerd/containerd/oci"
|
||||
metrics "github.com/docker/go-metrics"
|
||||
specs "github.com/opencontainers/runtime-spec/specs-go"
|
||||
"github.com/sirupsen/logrus"
|
||||
)
|
||||
|
||||
var ct metrics.Timer
|
||||
|
||||
func init() {
|
||||
ns := metrics.NewNamespace("stress", "", nil)
|
||||
// if you want more fine grained metrics then you can drill down with the metrics in prom that
|
||||
// containerd is outputing
|
||||
ct = ns.NewTimer("run", "Run time of a full container during the test")
|
||||
metrics.Register(ns)
|
||||
}
|
||||
|
||||
type worker struct {
|
||||
id int
|
||||
wg *sync.WaitGroup
|
||||
@ -43,6 +55,7 @@ func (w *worker) run(ctx, tctx context.Context) {
|
||||
w.count++
|
||||
id := w.getID()
|
||||
logrus.Debugf("starting container %s", id)
|
||||
start := time.Now()
|
||||
if err := w.runContainer(ctx, id); err != nil {
|
||||
if err != context.DeadlineExceeded ||
|
||||
!strings.Contains(err.Error(), context.DeadlineExceeded.Error()) {
|
||||
@ -50,7 +63,10 @@ func (w *worker) run(ctx, tctx context.Context) {
|
||||
logrus.WithError(err).Errorf("running container %s", id)
|
||||
|
||||
}
|
||||
continue
|
||||
}
|
||||
// only log times are success so we don't scew the results from failures that go really fast
|
||||
ct.UpdateSince(start)
|
||||
}
|
||||
}
|
||||
|
||||
|
Loading…
Reference in New Issue
Block a user