const managedHostsHeader … const managedHostsHeaderWithHostNetwork … const PodInitializing … const ContainerCreating … const kubeletUser … // parseGetSubIdsOutput parses the output from the `getsubids` tool, which is used to query subordinate user or group ID ranges for // a given user or group. getsubids produces a line for each mapping configured. // Here we expect that there is a single mapping, and the same values are used for the subordinate user and group ID ranges. // The output is something like: // $ getsubids kubelet // 0: kubelet 65536 2147483648 // $ getsubids -g kubelet // 0: kubelet 65536 2147483648 func parseGetSubIdsOutput(input string) (uint32, uint32, error) { … } // getKubeletMappings returns the range of IDs that can be used to configure user namespaces. // If subordinate user or group ID ranges are specified for the kubelet user and the getsubids tool // is installed, then the single mapping specified both for user and group IDs will be used. // If the tool is not installed, or there are no IDs configured, the default mapping is returned. // The default mapping includes the entire IDs range except IDs below 65536. func (kl *Kubelet) getKubeletMappings() (uint32, uint32, error) { … } // Get a list of pods that have data directories. func (kl *Kubelet) listPodsFromDisk() ([]types.UID, error) { … } // GetActivePods returns pods that have been admitted to the kubelet that // are not fully terminated. This is mapped to the "desired state" of the // kubelet - what pods should be running. // // WARNING: Currently this list does not include pods that have been force // deleted but may still be terminating, which means resources assigned to // those pods during admission may still be in use. See // https://github.com/kubernetes/kubernetes/issues/104824 func (kl *Kubelet) GetActivePods() []*v1.Pod { … } // getAllocatedPods returns the active pods (see GetActivePods), but updates the pods to their // allocated state. func (kl *Kubelet) getAllocatedPods() []*v1.Pod { … } // makeBlockVolumes maps the raw block devices specified in the path of the container // Experimental func (kl *Kubelet) makeBlockVolumes(pod *v1.Pod, container *v1.Container, podVolumes kubecontainer.VolumeMap, blkutil volumepathhandler.BlockVolumePathHandler) ([]kubecontainer.DeviceInfo, error) { … } // shouldMountHostsFile checks if the nodes /etc/hosts should be mounted // Kubernetes only mounts on /etc/hosts if: // - container is not an infrastructure (pause) container // - container is not already mounting on /etc/hosts // Kubernetes will not mount /etc/hosts if: // - the Pod is on the pod network and PodIP has not yet been set (e.g., Pod sandbox is being created). // - the Pod is on Windows, and contains a hostProcess container. func shouldMountHostsFile(pod *v1.Pod, podIPs []string) bool { … } // makeMounts determines the mount points for the given container. func makeMounts(pod *v1.Pod, podDir string, container *v1.Container, hostName, hostDomain string, podIPs []string, podVolumes kubecontainer.VolumeMap, hu hostutil.HostUtils, subpather subpath.Interface, expandEnvs []kubecontainer.EnvVar, supportsRRO bool, imageVolumes kubecontainer.ImageVolumes) ([]kubecontainer.Mount, func(), error) { … } // translateMountPropagation transforms v1.MountPropagationMode to // runtimeapi.MountPropagation. func translateMountPropagation(mountMode *v1.MountPropagationMode) (runtimeapi.MountPropagation, error) { … } // getEtcHostsPath returns the full host-side path to a pod's generated /etc/hosts file func getEtcHostsPath(podDir string) string { … } // makeHostsMount makes the mountpoint for the hosts file that the containers // in a pod are injected with. podIPs is provided instead of podIP as podIPs // are present even if dual-stack feature flag is not enabled. func makeHostsMount(podDir string, podIPs []string, hostName, hostDomainName string, hostAliases []v1.HostAlias, useHostNetwork bool) (*kubecontainer.Mount, error) { … } // ensureHostsFile ensures that the given host file has an up-to-date ip, host // name, and domain name. func ensureHostsFile(fileName string, hostIPs []string, hostName, hostDomainName string, hostAliases []v1.HostAlias, useHostNetwork bool) error { … } // nodeHostsFileContent reads the content of node's hosts file. func nodeHostsFileContent(hostsFilePath string, hostAliases []v1.HostAlias) ([]byte, error) { … } // managedHostsFileContent generates the content of the managed etc hosts based on Pod IPs and other // information. func managedHostsFileContent(hostIPs []string, hostName, hostDomainName string, hostAliases []v1.HostAlias) []byte { … } func hostsEntriesFromHostAliases(hostAliases []v1.HostAlias) []byte { … } // truncatePodHostnameIfNeeded truncates the pod hostname if it's longer than 63 chars. func truncatePodHostnameIfNeeded(podName, hostname string) (string, error) { … } // GetOrCreateUserNamespaceMappings returns the configuration for the sandbox user namespace func (kl *Kubelet) GetOrCreateUserNamespaceMappings(pod *v1.Pod, runtimeHandler string) (*runtimeapi.UserNamespace, error) { … } // GeneratePodHostNameAndDomain creates a hostname and domain name for a pod, // given that pod's spec and annotations or returns an error. func (kl *Kubelet) GeneratePodHostNameAndDomain(pod *v1.Pod) (string, string, error) { … } // GetPodCgroupParent gets pod cgroup parent from container manager. func (kl *Kubelet) GetPodCgroupParent(pod *v1.Pod) string { … } // GenerateRunContainerOptions generates the RunContainerOptions, which can be used by // the container runtime to set parameters for launching a container. func (kl *Kubelet) GenerateRunContainerOptions(ctx context.Context, pod *v1.Pod, container *v1.Container, podIP string, podIPs []string, imageVolumes kubecontainer.ImageVolumes) (*kubecontainer.RunContainerOptions, func(), error) { … } var masterServices … // getServiceEnvVarMap makes a map[string]string of env vars for services a // pod in namespace ns should see. func (kl *Kubelet) getServiceEnvVarMap(ns string, enableServiceLinks bool) (map[string]string, error) { … } // Make the environment variables for a pod in the given namespace. func (kl *Kubelet) makeEnvironmentVariables(pod *v1.Pod, container *v1.Container, podIP string, podIPs []string) ([]kubecontainer.EnvVar, error) { … } // podFieldSelectorRuntimeValue returns the runtime value of the given // selector for a pod. func (kl *Kubelet) podFieldSelectorRuntimeValue(fs *v1.ObjectFieldSelector, pod *v1.Pod, podIP string, podIPs []string) (string, error) { … } // containerResourceRuntimeValue returns the value of the provided container resource func containerResourceRuntimeValue(fs *v1.ResourceFieldSelector, pod *v1.Pod, container *v1.Container) (string, error) { … } // killPod instructs the container runtime to kill the pod. This method requires that // the pod status contains the result of the last syncPod, otherwise it may fail to // terminate newly created containers and sandboxes. func (kl *Kubelet) killPod(ctx context.Context, pod *v1.Pod, p kubecontainer.Pod, gracePeriodOverride *int64) error { … } // makePodDataDirs creates the dirs for the pod datas. func (kl *Kubelet) makePodDataDirs(pod *v1.Pod) error { … } // getPullSecretsForPod inspects the Pod and retrieves the referenced pull // secrets. func (kl *Kubelet) getPullSecretsForPod(pod *v1.Pod) []v1.Secret { … } // PodCouldHaveRunningContainers returns true if the pod with the given UID could still have running // containers. This returns false if the pod has not yet been started or the pod is unknown. func (kl *Kubelet) PodCouldHaveRunningContainers(pod *v1.Pod) bool { … } // PodIsFinished returns true if SyncTerminatedPod is finished, ie. // all required node-level resources that a pod was consuming have // been reclaimed by the kubelet. func (kl *Kubelet) PodIsFinished(pod *v1.Pod) bool { … } // filterOutInactivePods returns pods that are not in a terminal phase // or are known to be fully terminated. This method should only be used // when the set of pods being filtered is upstream of the pod worker, i.e. // the pods the pod manager is aware of. func (kl *Kubelet) filterOutInactivePods(pods []*v1.Pod) []*v1.Pod { … } // isAdmittedPodTerminal returns true if the provided config source pod is in // a terminal phase, or if the Kubelet has already indicated the pod has reached // a terminal phase but the config source has not accepted it yet. This method // should only be used within the pod configuration loops that notify the pod // worker, other components should treat the pod worker as authoritative. func (kl *Kubelet) isAdmittedPodTerminal(pod *v1.Pod) bool { … } // removeOrphanedPodStatuses removes obsolete entries in podStatus where // the pod is no longer considered bound to this node. func (kl *Kubelet) removeOrphanedPodStatuses(pods []*v1.Pod, mirrorPods []*v1.Pod) { … } // HandlePodCleanups performs a series of cleanup work, including terminating // pod workers, killing unwanted pods, and removing orphaned volumes/pod // directories. No config changes are sent to pod workers while this method // is executing which means no new pods can appear. After this method completes // the desired state of the kubelet should be reconciled with the actual state // in the pod worker and other pod-related components. // // This function is executed by the main sync loop, so it must execute quickly // and all nested calls should be asynchronous. Any slow reconciliation actions // should be performed by other components (like the volume manager). The duration // of this call is the minimum latency for static pods to be restarted if they // are updated with a fixed UID (most should use a dynamic UID), and no config // updates are delivered to the pod workers while this method is running. func (kl *Kubelet) HandlePodCleanups(ctx context.Context) error { … } // filterTerminalPodsToDelete returns terminal pods which are ready to be // deleted by the status manager, but are not in pod workers. // First, the check for deletionTimestamp is a performance optimization as we // don't need to do anything with terminal pods without deletionTimestamp. // Second, the check for terminal pods is to avoid race conditions of triggering // deletion on Pending pods which are not yet added to pod workers. // Third, the check to skip pods known to pod workers is that the lifecycle of // such pods is already handled by pod workers. // Finally, we skip runtime pods as their termination is handled separately in // the HandlePodCleanups routine. func (kl *Kubelet) filterTerminalPodsToDelete(allPods []*v1.Pod, runningRuntimePods []*kubecontainer.Pod, workingPods map[types.UID]PodWorkerSync) map[types.UID]*v1.Pod { … } // splitPodsByStatic separates a list of desired pods from the pod manager into // regular or static pods. Mirror pods are not valid config sources (a mirror pod // being created cannot cause the Kubelet to start running a static pod) and are // excluded. func splitPodsByStatic(pods []*v1.Pod) (regular, static []*v1.Pod) { … } // validateContainerLogStatus returns the container ID for the desired container to retrieve logs for, based on the state // of the container. The previous flag will only return the logs for the last terminated container, otherwise, the current // running container is preferred over a previous termination. If info about the container is not available then a specific // error is returned to the end user. func (kl *Kubelet) validateContainerLogStatus(podName string, podStatus *v1.PodStatus, containerName string, previous bool) (containerID kubecontainer.ContainerID, err error) { … } // GetKubeletContainerLogs returns logs from the container // TODO: this method is returning logs of random container attempts, when it should be returning the most recent attempt // or all of them. func (kl *Kubelet) GetKubeletContainerLogs(ctx context.Context, podFullName, containerName string, logOptions *v1.PodLogOptions, stdout, stderr io.Writer) error { … } // getPhase returns the phase of a pod given its container info. func getPhase(pod *v1.Pod, info []v1.ContainerStatus, podIsTerminal bool) v1.PodPhase { … } func deleteCustomResourceFromResourceRequirements(target *v1.ResourceRequirements) { … } func (kl *Kubelet) determinePodResizeStatus(pod *v1.Pod, podStatus *v1.PodStatus) v1.PodResizeStatus { … } // generateAPIPodStatus creates the final API pod status for a pod, given the // internal pod status. This method should only be called from within sync*Pod methods. func (kl *Kubelet) generateAPIPodStatus(pod *v1.Pod, podStatus *kubecontainer.PodStatus, podIsTerminal bool) v1.PodStatus { … } // sortPodIPs return the PodIPs sorted and truncated by the cluster IP family preference. // The runtime pod status may have an arbitrary number of IPs, in an arbitrary order. // PodIPs are obtained by: func (m *kubeGenericRuntimeManager) determinePodSandboxIPs() // Pick out the first returned IP of the same IP family as the node IP // first, followed by the first IP of the opposite IP family (if any) // and use them for the Pod.Status.PodIPs and the Downward API environment variables func (kl *Kubelet) sortPodIPs(podIPs []string) []string { … } // convertStatusToAPIStatus initialize an api PodStatus for the given pod from // the given internal pod status and the previous state of the pod from the API. // It is purely transformative and does not alter the kubelet state at all. func (kl *Kubelet) convertStatusToAPIStatus(pod *v1.Pod, podStatus *kubecontainer.PodStatus, oldPodStatus v1.PodStatus) *v1.PodStatus { … } // convertToAPIContainerStatuses converts the given internal container // statuses into API container statuses. func (kl *Kubelet) convertToAPIContainerStatuses(pod *v1.Pod, podStatus *kubecontainer.PodStatus, previousStatus []v1.ContainerStatus, containers []v1.Container, hasInitContainers, isInitContainer bool) []v1.ContainerStatus { … } // ServeLogs returns logs of current machine. func (kl *Kubelet) ServeLogs(w http.ResponseWriter, req *http.Request) { … } // findContainer finds and returns the container with the given pod ID, full name, and container name. // It returns nil if not found. func (kl *Kubelet) findContainer(ctx context.Context, podFullName string, podUID types.UID, containerName string) (*kubecontainer.Container, error) { … } // RunInContainer runs a command in a container, returns the combined stdout, stderr as an array of bytes func (kl *Kubelet) RunInContainer(ctx context.Context, podFullName string, podUID types.UID, containerName string, cmd []string) ([]byte, error) { … } // GetExec gets the URL the exec will be served from, or nil if the Kubelet will serve it. func (kl *Kubelet) GetExec(ctx context.Context, podFullName string, podUID types.UID, containerName string, cmd []string, streamOpts remotecommandserver.Options) (*url.URL, error) { … } // GetAttach gets the URL the attach will be served from, or nil if the Kubelet will serve it. func (kl *Kubelet) GetAttach(ctx context.Context, podFullName string, podUID types.UID, containerName string, streamOpts remotecommandserver.Options) (*url.URL, error) { … } // GetPortForward gets the URL the port-forward will be served from, or nil if the Kubelet will serve it. func (kl *Kubelet) GetPortForward(ctx context.Context, podName, podNamespace string, podUID types.UID, portForwardOpts portforward.V4Options) (*url.URL, error) { … } // cleanupOrphanedPodCgroups removes cgroups that should no longer exist. // it reconciles the cached state of cgroupPods with the specified list of runningPods func (kl *Kubelet) cleanupOrphanedPodCgroups(pcm cm.PodContainerManager, cgroupPods map[types.UID]cm.CgroupName, possiblyRunningPods map[types.UID]sets.Empty) { … } func (kl *Kubelet) runtimeClassSupportsRecursiveReadOnlyMounts(pod *v1.Pod) bool { … } // runtimeHandlerSupportsRecursiveReadOnlyMounts checks whether the runtime handler supports recursive read-only mounts. // The kubelet feature gate is not checked here. func runtimeHandlerSupportsRecursiveReadOnlyMounts(runtimeHandlerName string, runtimeHandlers []kubecontainer.RuntimeHandler) bool { … } // resolveRecursiveReadOnly resolves the recursive read-only mount mode. func resolveRecursiveReadOnly(m v1.VolumeMount, runtimeSupportsRRO bool) (bool, error) { … }