remotes/docker: close connection if no more data
Close connection if no more data. It's to fix false alert filed by image pull progress. ``` dst = OpenWriter (--> Content Store) src = Fetch Open (--> Registry) Mark it as active request Copy(dst, src) (--> Keep updating total received bytes) ^ | (Active Request > 0, but total received bytes won't be updated) v defer src.Close() content.Commit(dst) ``` Before migrating to transfer service, CRI plugin doesn't limit global concurrent downloads for ImagePulls. Each ImagePull requests have 3 concurrent goroutines to download blob and 1 goroutine to unpack blob. Like ext4 filesystem [1][1], the fsync from content.Commit may sync unrelated dirty pages into disk. The host is running under IO pressure, and then the content.Commit will take long time and block other goroutines. If httpreadseeker doesn't close the connection after io.EOF, this connection will be considered as active. The pull progress reporter reports there is no bytes transfered and cancels the ImagePull. The original 1-minute timeout[2][2] is from kubelet settting. Since CRI-plugin can't limit the total concurrent downloads, this patch is to update 1-minute to 5-minutes to prevent from unexpected cancel. [1]: https://lwn.net/Articles/842385/ [2]: https://github.com/kubernetes/kubernetes/blob/release-1.23/pkg/kubelet/config/flags.go#L45-L48 Signed-off-by: Wei Fu <fuweid89@gmail.com>
This commit is contained in:
parent
7f410ae05a
commit
80dd779deb
@ -30,6 +30,30 @@ import (
|
||||
"github.com/containerd/containerd/v2/pkg/deprecation"
|
||||
)
|
||||
|
||||
const (
|
||||
// defaultImagePullProgressTimeoutDuration is the default value of imagePullProgressTimeout.
|
||||
//
|
||||
// NOTE:
|
||||
//
|
||||
// This ImagePullProgressTimeout feature is ported from kubelet/dockershim's
|
||||
// --image-pull-progress-deadline. The original value is 1m0. Unlike docker
|
||||
// daemon, the containerd doesn't have global concurrent download limitation
|
||||
// before migrating to Transfer Service. If kubelet runs with concurrent
|
||||
// image pull, the node will run under IO pressure. The ImagePull process
|
||||
// could be impacted by self, if the target image is large one with a
|
||||
// lot of layers. And also both container's writable layers and image's storage
|
||||
// share one disk. The ImagePull process commits blob to content store
|
||||
// with fsync, which might bring the unrelated files' dirty pages into
|
||||
// disk in one transaction [1]. The 1m0 value isn't good enough. Based
|
||||
// on #9347 case and kubernetes community's usage [2], the default value
|
||||
// is updated to 5m0. If end-user still runs into unexpected cancel,
|
||||
// they need to config it based on their environment.
|
||||
//
|
||||
// [1]: Fast commits for ext4 - https://lwn.net/Articles/842385/
|
||||
// [2]: https://github.com/kubernetes/kubernetes/blob/1635c380b26a1d8cc25d36e9feace9797f4bae3c/cluster/gce/util.sh#L882
|
||||
defaultImagePullProgressTimeoutDuration = 5 * time.Minute
|
||||
)
|
||||
|
||||
type SandboxControllerMode string
|
||||
|
||||
const (
|
||||
|
@ -19,8 +19,6 @@
|
||||
package config
|
||||
|
||||
import (
|
||||
"time"
|
||||
|
||||
containerd "github.com/containerd/containerd/v2/client"
|
||||
"github.com/pelletier/go-toml/v2"
|
||||
"k8s.io/kubelet/pkg/cri/streaming"
|
||||
@ -100,7 +98,7 @@ func DefaultConfig() PluginConfig {
|
||||
},
|
||||
EnableCDI: false,
|
||||
CDISpecDirs: []string{"/etc/cdi", "/var/run/cdi"},
|
||||
ImagePullProgressTimeout: time.Minute.String(),
|
||||
ImagePullProgressTimeout: defaultImagePullProgressTimeoutDuration.String(),
|
||||
DrainExecSyncIOTimeout: "0s",
|
||||
EnableUnprivilegedPorts: true,
|
||||
EnableUnprivilegedICMP: true,
|
||||
|
@ -19,7 +19,6 @@ package config
|
||||
import (
|
||||
"os"
|
||||
"path/filepath"
|
||||
"time"
|
||||
|
||||
containerd "github.com/containerd/containerd/v2/client"
|
||||
"k8s.io/kubelet/pkg/cri/streaming"
|
||||
@ -84,7 +83,7 @@ func DefaultConfig() PluginConfig {
|
||||
ImageDecryption: ImageDecryption{
|
||||
KeyModel: KeyModelNode,
|
||||
},
|
||||
ImagePullProgressTimeout: time.Minute.String(),
|
||||
ImagePullProgressTimeout: defaultImagePullProgressTimeoutDuration.String(),
|
||||
DrainExecSyncIOTimeout: "0s",
|
||||
}
|
||||
}
|
||||
|
@ -616,9 +616,9 @@ func (reporter *pullProgressReporter) start(ctx context.Context) {
|
||||
WithField("activeReqs", activeReqs).
|
||||
WithField("totalBytesRead", bytesRead).
|
||||
WithField("lastSeenBytesRead", lastSeenBytesRead).
|
||||
WithField("lastSeenTimestamp", lastSeenTimestamp).
|
||||
WithField("lastSeenTimestamp", lastSeenTimestamp.Format(time.RFC3339)).
|
||||
WithField("reportInterval", reportInterval).
|
||||
Tracef("progress for image pull")
|
||||
Debugf("progress for image pull")
|
||||
|
||||
if activeReqs == 0 || bytesRead > lastSeenBytesRead {
|
||||
lastSeenBytesRead = bytesRead
|
||||
|
@ -76,6 +76,16 @@ func (hrs *httpReadSeeker) Read(p []byte) (n int, err error) {
|
||||
if _, err2 := hrs.reader(); err2 == nil {
|
||||
return n, nil
|
||||
}
|
||||
} else if err == io.EOF {
|
||||
// The CRI's imagePullProgressTimeout relies on responseBody.Close to
|
||||
// update the process monitor's status. If the err is io.EOF, close
|
||||
// the connection since there is no more available data.
|
||||
if hrs.rc != nil {
|
||||
if clsErr := hrs.rc.Close(); clsErr != nil {
|
||||
log.L.WithError(clsErr).Error("httpReadSeeker: failed to close ReadCloser after io.EOF")
|
||||
}
|
||||
hrs.rc = nil
|
||||
}
|
||||
}
|
||||
return
|
||||
}
|
||||
|
Loading…
Reference in New Issue
Block a user