Merge pull request #7631 from thaJeztah/strings_cut

replace strings.Split(N) for strings.Cut() or alternatives
This commit is contained in:
Kazuyoshi Kato 2022-11-10 15:28:22 -08:00 committed by GitHub
commit 02484f5e05
No known key found for this signature in database
GPG Key ID: 4AEE18F83AFDEB23
20 changed files with 116 additions and 111 deletions

View File

@ -206,13 +206,13 @@ func parseStat(data string) (stat Stat, err error) {
return stat, fmt.Errorf("invalid stat data: %q", data)
}
parts := strings.SplitN(data[:i], "(", 2)
if len(parts) != 2 {
val, name, ok := strings.Cut(data[:i], "(")
if !ok {
return stat, fmt.Errorf("invalid stat data: %q", data)
}
stat.Name = parts[1]
_, err = fmt.Sscanf(parts[0], "%d", &stat.PID)
stat.Name = name
_, err = fmt.Sscanf(val, "%d", &stat.PID)
if err != nil {
return stat, err
}
@ -220,7 +220,7 @@ func parseStat(data string) (stat Stat, err error) {
// parts indexes should be offset by 3 from the field number given
// proc(5), because parts is zero-indexed and we've removed fields
// one (PID) and two (Name) in the paren-split.
parts = strings.Split(data[i+2:], " ")
parts := strings.Split(data[i+2:], " ")
fmt.Sscanf(parts[22-3], "%d", &stat.StartTime)
fmt.Sscanf(parts[4-3], "%d", &stat.PPID)
return stat, nil

View File

@ -230,13 +230,10 @@ func ObjectWithLabelArgs(clicontext *cli.Context) (string, map[string]string) {
func LabelArgs(labelStrings []string) map[string]string {
labels := make(map[string]string, len(labelStrings))
for _, label := range labelStrings {
parts := strings.SplitN(label, "=", 2)
key := parts[0]
value := "true"
if len(parts) > 1 {
value = parts[1]
key, value, ok := strings.Cut(label, "=")
if !ok {
value = "true"
}
labels[key] = value
}
@ -247,11 +244,11 @@ func LabelArgs(labelStrings []string) map[string]string {
func AnnotationArgs(annoStrings []string) (map[string]string, error) {
annotations := make(map[string]string, len(annoStrings))
for _, anno := range annoStrings {
parts := strings.SplitN(anno, "=", 2)
if len(parts) != 2 {
key, value, ok := strings.Cut(anno, "=")
if !ok {
return nil, fmt.Errorf("invalid key=value format annotation: %v", anno)
}
annotations[parts[0]] = parts[1]
annotations[key] = value
}
return annotations, nil
}

View File

@ -128,12 +128,8 @@ var createCommand = cli.Command{
if len(labelstr) > 0 {
labels := map[string]string{}
for _, lstr := range labelstr {
l := strings.SplitN(lstr, "=", 2)
if len(l) == 1 {
labels[l[0]] = ""
} else {
labels[l[0]] = l[1]
}
k, v, _ := strings.Cut(lstr, "=")
labels[k] = v
}
opts = append(opts, leases.WithLabels(labels))
}

View File

@ -62,13 +62,11 @@ func parseMountFlag(m string) (specs.Mount, error) {
}
for _, field := range fields {
v := strings.SplitN(field, "=", 2)
if len(v) < 2 {
key, val, ok := strings.Cut(field, "=")
if !ok {
return mount, fmt.Errorf("invalid mount specification: expected key=val")
}
key := v[0]
val := v[1]
switch key {
case "type":
mount.Type = val

View File

@ -310,16 +310,16 @@ func NewContainer(ctx gocontext.Context, client *containerd.Client, context *cli
joinNs := context.StringSlice("with-ns")
for _, ns := range joinNs {
parts := strings.Split(ns, ":")
if len(parts) != 2 {
nsType, nsPath, ok := strings.Cut(ns, ":")
if !ok {
return nil, errors.New("joining a Linux namespace using --with-ns requires the format 'nstype:path'")
}
if !validNamespace(parts[0]) {
return nil, errors.New("the Linux namespace type specified in --with-ns is not valid: " + parts[0])
if !validNamespace(nsType) {
return nil, errors.New("the Linux namespace type specified in --with-ns is not valid: " + nsType)
}
opts = append(opts, oci.WithLinuxNamespace(specs.LinuxNamespace{
Type: specs.LinuxNamespaceType(parts[0]),
Path: parts[1],
Type: specs.LinuxNamespaceType(nsType),
Path: nsPath,
}))
}
if context.IsSet("gpus") {
@ -457,7 +457,8 @@ func getNewTaskOpts(context *cli.Context) []containerd.NewTaskOpts {
}
func parseIDMapping(mapping string) (specs.LinuxIDMapping, error) {
parts := strings.Split(mapping, ":")
// We expect 3 parts, but limit to 4 to allow detection of invalid values.
parts := strings.SplitN(mapping, ":", 4)
if len(parts) != 3 {
return specs.LinuxIDMapping{}, errors.New("user namespace mappings require the format `container-id:host-id:size`")
}

View File

@ -152,14 +152,14 @@ func NewContainer(ctx gocontext.Context, client *containerd.Client, context *cli
opts = append(opts, oci.WithWindowsCPUMaximum(uint16(cmax)))
}
for _, dev := range context.StringSlice("device") {
parts := strings.Split(dev, "://")
if len(parts) != 2 {
idType, devID, ok := strings.Cut(dev, "://")
if !ok {
return nil, errors.New("devices must be in the format IDType://ID")
}
if parts[0] == "" {
if idType == "" {
return nil, errors.New("devices must have a non-empty IDType")
}
opts = append(opts, oci.WithWindowsDevice(parts[0], parts[1]))
opts = append(opts, oci.WithWindowsDevice(idType, devID))
}
}

View File

@ -43,11 +43,10 @@ func run() error {
}
dir := flag.Arg(1)
parts := strings.SplitN(flag.Arg(0), ".", 2)
if len(parts) != 2 {
name, section, ok := strings.Cut(flag.Arg(0), ".")
if !ok {
return fmt.Errorf("invalid name '%s': name does not contain man page section", flag.Arg(0))
}
name, section := parts[0], parts[1]
appName, ok := apps[name]
if !ok {

View File

@ -100,9 +100,7 @@ type data struct {
func cleanProfileName(profile string) string {
// Normally profiles are suffixed by " (enforce)". AppArmor profiles cannot
// contain spaces so this doesn't restrict daemon profile names.
if parts := strings.SplitN(profile, " ", 2); len(parts) >= 1 {
profile = parts[0]
}
profile, _, _ = strings.Cut(profile, " ")
if profile == "" {
profile = "unconfined"
}
@ -184,17 +182,18 @@ func parseVersion(output string) (int, error) {
// Copyright (C) 1999-2008 Novell Inc.
// Copyright 2009-2012 Canonical Ltd.
lines := strings.SplitN(output, "\n", 2)
words := strings.Split(lines[0], " ")
version := words[len(words)-1]
version, _, _ := strings.Cut(output, "\n")
if i := strings.LastIndex(version, " "); i >= 0 {
version = version[i+1:]
}
// trim "-beta1" suffix from version="3.0.0-beta1" if exists
version = strings.SplitN(version, "-", 2)[0]
version, _, _ = strings.Cut(version, "-")
// also trim tilde
version = strings.SplitN(version, "~", 2)[0]
version, _, _ = strings.Cut(version, "~")
// split by major minor version
v := strings.Split(version, ".")
v := strings.SplitN(version, ".", 4)
if len(v) == 0 || len(v) > 3 {
return -1, fmt.Errorf("parsing version failed for output: `%s`", output)
}

View File

@ -38,7 +38,9 @@ const (
MediaTypeDockerSchema2Config = "application/vnd.docker.container.image.v1+json"
MediaTypeDockerSchema2Manifest = "application/vnd.docker.distribution.manifest.v2+json"
MediaTypeDockerSchema2ManifestList = "application/vnd.docker.distribution.manifest.list.v2+json"
// Checkpoint/Restore Media Types
MediaTypeContainerd1Checkpoint = "application/vnd.containerd.container.criu.checkpoint.criu.tar"
MediaTypeContainerd1CheckpointPreDump = "application/vnd.containerd.container.criu.checkpoint.predump.tar"
MediaTypeContainerd1Resource = "application/vnd.containerd.container.resource.tar"
@ -47,9 +49,12 @@ const (
MediaTypeContainerd1CheckpointOptions = "application/vnd.containerd.container.checkpoint.options.v1+proto"
MediaTypeContainerd1CheckpointRuntimeName = "application/vnd.containerd.container.checkpoint.runtime.name"
MediaTypeContainerd1CheckpointRuntimeOptions = "application/vnd.containerd.container.checkpoint.runtime.options+proto"
// Legacy Docker schema1 manifest
// MediaTypeDockerSchema1Manifest is the legacy Docker schema1 manifest
MediaTypeDockerSchema1Manifest = "application/vnd.docker.distribution.manifest.v1+prettyjws"
// Encypted media types
// Encrypted media types
MediaTypeImageLayerEncrypted = ocispec.MediaTypeImageLayer + "+encrypted"
MediaTypeImageLayerGzipEncrypted = ocispec.MediaTypeImageLayerGzip + "+encrypted"
)
@ -93,16 +98,23 @@ func DiffCompression(ctx context.Context, mediaType string) (string, error) {
// parseMediaTypes splits the media type into the base type and
// an array of sorted extensions
func parseMediaTypes(mt string) (string, []string) {
func parseMediaTypes(mt string) (mediaType string, suffixes []string) {
if mt == "" {
return "", []string{}
}
mediaType, ext, ok := strings.Cut(mt, "+")
if !ok {
return mediaType, []string{}
}
s := strings.Split(mt, "+")
ext := s[1:]
sort.Strings(ext)
return s[0], ext
// Splitting the extensions following the mediatype "(+)gzip+encrypted".
// We expect this to be a limited list, so add an arbitrary limit (50).
//
// Note that DiffCompression is only using the last element, so perhaps we
// should split on the last "+" only.
suffixes = strings.SplitN(ext, "+", 50)
sort.Strings(suffixes)
return mediaType, suffixes
}
// IsNonDistributable returns true if the media type is non-distributable.
@ -118,8 +130,7 @@ func IsLayerType(mt string) bool {
}
// Parse Docker media types, strip off any + suffixes first
base, _ := parseMediaTypes(mt)
switch base {
switch base, _ := parseMediaTypes(mt); base {
case MediaTypeDockerSchema2Layer, MediaTypeDockerSchema2LayerGzip,
MediaTypeDockerSchema2LayerForeign, MediaTypeDockerSchema2LayerForeignGzip:
return true

View File

@ -141,10 +141,10 @@ func checkContainerLog(t *testing.T, log string, messages []string) {
lines := strings.Split(strings.TrimSpace(log), "\n")
require.Len(t, lines, len(messages), "log line number should match")
for i, line := range lines {
parts := strings.SplitN(line, " ", 2)
require.Len(t, parts, 2)
_, err := time.Parse(time.RFC3339Nano, parts[0])
ts, msg, ok := strings.Cut(line, " ")
require.True(t, ok)
_, err := time.Parse(time.RFC3339Nano, ts)
assert.NoError(t, err, "timestamp should be in RFC3339Nano format")
assert.Equal(t, messages[i], parts[1], "log content should match")
assert.Equal(t, messages[i], msg, "log content should match")
}
}

View File

@ -509,14 +509,14 @@ func PidEnvs(pid int) (map[string]string, error) {
res := make(map[string]string)
for _, value := range values {
value := strings.TrimSpace(string(value))
value = bytes.TrimSpace(value)
if len(value) == 0 {
continue
}
parts := strings.SplitN(value, "=", 2)
if len(parts) == 2 {
res[parts[0]] = parts[1]
k, v, ok := strings.Cut(string(value), "=")
if ok {
res[k] = v
}
}
return res, nil

View File

@ -349,12 +349,7 @@ func ensureCNIAddRunning(t *testing.T, sbName string) error {
}
for _, arg := range strings.Split(args, ";") {
kv := strings.SplitN(arg, "=", 2)
if len(kv) != 2 {
continue
}
if kv[0] == "K8S_POD_NAME" && kv[1] == sbName {
if arg == "K8S_POD_NAME="+sbName {
return true, nil
}
}

View File

@ -443,13 +443,10 @@ func (c *gcContext) references(ctx context.Context, tx *bolt.Tx, node gc.Node, f
return c.sendLabelRefs(node.Namespace, bkt, fn)
case ResourceSnapshot, resourceSnapshotFlat:
parts := strings.SplitN(node.Key, "/", 2)
if len(parts) != 2 {
ss, name, ok := strings.Cut(node.Key, "/")
if !ok {
return fmt.Errorf("invalid snapshot gc key %s", node.Key)
}
ss := parts[0]
name := parts[1]
bkt := getBucket(tx, bucketKeyVersion, []byte(node.Namespace), bucketKeyObjectSnapshots, []byte(ss), []byte(name))
if bkt == nil {
// Node may be created from dead edge
@ -563,7 +560,7 @@ func (c *gcContext) scanAll(ctx context.Context, tx *bolt.Tx, fn func(ctx contex
}
c.all(func(n gc.Node) {
fn(ctx, n)
_ = fn(ctx, n)
})
return nil
@ -598,14 +595,14 @@ func (c *gcContext) remove(ctx context.Context, tx *bolt.Tx, node gc.Node) error
case ResourceSnapshot:
sbkt := nsbkt.Bucket(bucketKeyObjectSnapshots)
if sbkt != nil {
parts := strings.SplitN(node.Key, "/", 2)
if len(parts) != 2 {
ss, key, ok := strings.Cut(node.Key, "/")
if !ok {
return fmt.Errorf("invalid snapshot gc key %s", node.Key)
}
ssbkt := sbkt.Bucket([]byte(parts[0]))
ssbkt := sbkt.Bucket([]byte(ss))
if ssbkt != nil {
log.G(ctx).WithField("key", parts[1]).WithField("snapshotter", parts[0]).Debug("remove snapshot")
return ssbkt.DeleteBucket([]byte(parts[1]))
log.G(ctx).WithField("key", key).WithField("snapshotter", ss).Debug("remove snapshot")
return ssbkt.DeleteBucket([]byte(key))
}
}
case ResourceLease:

View File

@ -187,23 +187,23 @@ func replaceOrAppendEnvValues(defaults, overrides []string) []string {
cache := make(map[string]int, len(defaults))
results := make([]string, 0, len(defaults))
for i, e := range defaults {
parts := strings.SplitN(e, "=", 2)
k, _, _ := strings.Cut(e, "=")
results = append(results, e)
cache[parts[0]] = i
cache[k] = i
}
for _, value := range overrides {
// Values w/o = means they want this env to be removed/unset.
if !strings.Contains(value, "=") {
if i, exists := cache[value]; exists {
k, _, ok := strings.Cut(value, "=")
if !ok {
if i, exists := cache[k]; exists {
results[i] = "" // Used to indicate it should be removed
}
continue
}
// Just do a normal set/update
parts := strings.SplitN(value, "=", 2)
if i, exists := cache[parts[0]]; exists {
if i, exists := cache[k]; exists {
results[i] = value
} else {
results = append(results, value)

View File

@ -80,15 +80,14 @@ func ParseProcPIDStatus(r io.Reader) (map[Type]uint64, error) {
scanner := bufio.NewScanner(r)
for scanner.Scan() {
line := scanner.Text()
pair := strings.SplitN(line, ":", 2)
if len(pair) != 2 {
k, v, ok := strings.Cut(line, ":")
if !ok {
continue
}
k := strings.TrimSpace(pair[0])
v := strings.TrimSpace(pair[1])
k = strings.TrimSpace(k)
switch k {
case "CapInh", "CapPrm", "CapEff", "CapBnd", "CapAmb":
ui64, err := strconv.ParseUint(v, 16, 64)
ui64, err := strconv.ParseUint(strings.TrimSpace(v), 16, 64)
if err != nil {
return nil, fmt.Errorf("failed to parse line %q", line)
}

View File

@ -243,15 +243,15 @@ func WithDevices(config *runtime.ContainerConfig) oci.SpecOpts {
hostPath := device.HostPath
if strings.HasPrefix(hostPath, "class/") {
hostPath = strings.Replace(hostPath, "class/", "class://", 1)
hostPath = "class://" + strings.TrimPrefix(hostPath, "class/")
}
splitParts := strings.SplitN(hostPath, "://", 2)
if len(splitParts) != 2 {
idType, id, ok := strings.Cut(hostPath, "://")
if !ok {
return fmt.Errorf("unrecognised HostPath format %v, must match IDType://ID", device.HostPath)
}
o := oci.WithWindowsDevice(splitParts[0], splitParts[1])
o := oci.WithWindowsDevice(idType, id)
if err := o(ctx, client, c, s); err != nil {
return fmt.Errorf("failed adding device with HostPath %v: %w", device.HostPath, err)
}

View File

@ -233,11 +233,10 @@ func ParseAuth(auth *runtime.AuthConfig, host string) (string, string, error) {
if err != nil {
return "", "", err
}
fields := strings.SplitN(string(decoded), ":", 2)
if len(fields) != 2 {
user, passwd, ok := strings.Cut(string(decoded), ":")
if !ok {
return "", "", fmt.Errorf("invalid decoded auth: %q", decoded)
}
user, passwd := fields[0], fields[1]
return user, strings.Trim(passwd, "\x00"), nil
}
// TODO(random-liu): Support RegistryToken.

View File

@ -232,11 +232,10 @@ func ParseAuth(auth *runtime.AuthConfig, host string) (string, string, error) {
if err != nil {
return "", "", err
}
fields := strings.SplitN(string(decoded), ":", 2)
if len(fields) != 2 {
user, passwd, ok := strings.Cut(string(decoded), ":")
if !ok {
return "", "", fmt.Errorf("invalid decoded auth: %q", decoded)
}
user, passwd := fields[0], fields[1]
return user, strings.Trim(passwd, "\x00"), nil
}
// TODO(random-liu): Support RegistryToken.

View File

@ -104,17 +104,17 @@ func (c *Config) ValidateV2() error {
return nil
}
for _, p := range c.DisabledPlugins {
if len(strings.Split(p, ".")) < 4 {
if !strings.HasPrefix(p, "io.containerd.") || len(strings.SplitN(p, ".", 4)) < 4 {
return fmt.Errorf("invalid disabled plugin URI %q expect io.containerd.x.vx", p)
}
}
for _, p := range c.RequiredPlugins {
if len(strings.Split(p, ".")) < 4 {
if !strings.HasPrefix(p, "io.containerd.") || len(strings.SplitN(p, ".", 4)) < 4 {
return fmt.Errorf("invalid required plugin URI %q expect io.containerd.x.vx", p)
}
}
for p := range c.Plugins {
if len(strings.Split(p, ".")) < 4 {
if !strings.HasPrefix(p, "io.containerd.") || len(strings.SplitN(p, ".", 4)) < 4 {
return fmt.Errorf("invalid plugin key URI %q expect io.containerd.x.vx", p)
}
}

View File

@ -31,8 +31,8 @@ func TestMergeConfigs(t *testing.T) {
a := &Config{
Version: 2,
Root: "old_root",
RequiredPlugins: []string{"old_plugin"},
DisabledPlugins: []string{"old_plugin"},
RequiredPlugins: []string{"io.containerd.old_plugin.v1"},
DisabledPlugins: []string{"io.containerd.old_plugin.v1"},
State: "old_state",
OOMScore: 1,
Timeouts: map[string]string{"a": "1"},
@ -40,8 +40,9 @@ func TestMergeConfigs(t *testing.T) {
}
b := &Config{
Version: 2,
Root: "new_root",
RequiredPlugins: []string{"new_plugin1", "new_plugin2"},
RequiredPlugins: []string{"io.containerd.new_plugin1.v1", "io.containerd.new_plugin2.v1"},
OOMScore: 2,
Timeouts: map[string]string{"b": "2"},
StreamProcessors: map[string]StreamProcessor{"1": {Path: "3"}},
@ -54,10 +55,24 @@ func TestMergeConfigs(t *testing.T) {
assert.Equal(t, "new_root", a.Root)
assert.Equal(t, "old_state", a.State)
assert.Equal(t, 2, a.OOMScore)
assert.Equal(t, []string{"old_plugin", "new_plugin1", "new_plugin2"}, a.RequiredPlugins)
assert.Equal(t, []string{"old_plugin"}, a.DisabledPlugins)
assert.Equal(t, []string{"io.containerd.old_plugin.v1", "io.containerd.new_plugin1.v1", "io.containerd.new_plugin2.v1"}, a.RequiredPlugins)
assert.Equal(t, []string{"io.containerd.old_plugin.v1"}, a.DisabledPlugins)
assert.Equal(t, map[string]string{"a": "1", "b": "2"}, a.Timeouts)
assert.Equal(t, map[string]StreamProcessor{"1": {Path: "3"}, "2": {Path: "5"}}, a.StreamProcessors)
// Verify overrides for integers
// https://github.com/containerd/containerd/blob/v1.6.0/services/server/config/config.go#L322-L323
a = &Config{Version: 2, OOMScore: 1}
b = &Config{Version: 2, OOMScore: 0} // OOMScore "not set / default"
err = mergeConfig(a, b)
assert.NoError(t, err)
assert.Equal(t, 1, a.OOMScore)
a = &Config{Version: 2, OOMScore: 1}
b = &Config{Version: 2, OOMScore: 0} // OOMScore "not set / default"
err = mergeConfig(a, b)
assert.NoError(t, err)
assert.Equal(t, 1, a.OOMScore)
}
func TestResolveImports(t *testing.T) {