Kubernetes 调度器实现原理( 六 )


package pluginsimport ( "context" "fmt" v1 "k8s.io/api/core/v1" "k8s.io/apimachinery/pkg/runtime" "k8s.io/klog/v2" "k8s.io/kubernetes/pkg/scheduler/framework" "simple-scheduler/pkg/scheduler/apis/config" "simple-scheduler/pkg/scheduler/apis/config/validation")const Name = "sample-plugin"type Sample struct { args*config.SampleArgs handle framework.Handle}func (s *Sample) Name() string { return Name}func (s *Sample) PreFilter(ctx context.Context, cycleState *framework.CycleState, pod *v1.Pod) (*framework.PreFilterResult, *framework.Status) { klog.V(3).Infof("prefilter pod: %v", pod.Name) return nil, nil}func (s *Sample) Filter(ctx context.Context, cycleState *framework.CycleState, pod *v1.Pod, nodeInfo *framework.NodeInfo) *framework.Status { klog.V(3).Infof("filter pod: %v, node: %v", pod.Name, nodeInfo.Node().Name) return framework.NewStatus(framework.Success, "")}func (s *Sample) PreBind(ctx context.Context, state *framework.CycleState, pod *v1.Pod, nodeName string) *framework.Status { if nodeInfo, err := s.handle.SnapshotSharedLister().NodeInfos().Get(nodeName); err != nil {return framework.NewStatus(framework.Error, fmt.Sprintf("prebind get node: %s info error: %s", nodeName, err.Error())) } else {klog.V(3).Infof("prebind node info: %+v", nodeInfo.Node())return framework.NewStatus(framework.Success, "") }}func New(fpArgs runtime.Object, fh framework.Handle) (framework.Plugin, error) { args, ok := fpArgs.(*config.SampleArgs) if !ok {return nil, fmt.Errorf("got args of type %T, want *SampleArgs", fpArgs) } if err := validation.ValidateSamplePluginArgs(*args); err != nil {return nil, err } return &Sample{args:args,handle: fh, }, nil}

完整代码可以前往仓库 https://Github.com/cnych/sample-scheduler-framework 获取 。
这里还定义了一个调度去插件的参数:
 // +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Objecttype SampleArgs struct { metav1.TypeMeta FavoriteColorstring `json:"favorite_color,omitempty"` FavoriteNumber int`json:"favorite_number,omitempty"` ThanksTostring `json:"thanks_to,omitempty"`}在旧版本中提供了 framework.DecodeInto 函数可以直接将我们传递进来的参数进行转换,但是新版本必须是一个 runtime.Object 对象,所以必须实现对应的深拷贝方法,所以我们在结构体上面增加了 +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object 这个注解,然后通过 Kubernetes 源码中提供的 hack/update-gen.sh 脚本就可以自动生成对对应的深拷贝方法 。
同意在文件 register.go 中,我们需要在对 AddKnownTypes 函数的调用中添加 SampleArgs 。另外,请注意在 main.go 文件中我们导入了这里定义的 schema,它使用我们在 pkg/apis 中引入的所有配置初始化方案/配置文件 。
实现完成后,编译打包成镜像即可,然后我们就可以当成普通的应用用一个 Deployment 控制器来部署即可,由于我们需要去获取集群中的一些资源对象,所以当然需要申请 RBAC 权限,然后同样通过 --config 参数来配置我们的调度器,同样还是使用一个 KubeSchedulerConfiguration 资源对象配置,可以通过 plugins 来启用或者禁用我们实现的插件,也可以通过 pluginConfig 来传递一些参数值给插件:
 # sample-scheduler.yamlkind: ClusterRoleapiVersion: rbac.authorization.k8s.io/v1metadata:name: sample-scheduler-clusterrolerules:- apiGroups:- ""resources:- endpoints- eventsverbs:- create- get- update- apiGroups:- ""resources:- nodesverbs:- get- list- watch- apiGroups:- ""resources:- podsverbs:- delete- get- list- watch- update- apiGroups:- ""resources:- bindings- pods/bindingverbs:- create- apiGroups:- ""resources:- pods/statusverbs:- patch- update- apiGroups:- ""resources:- replicationcontrollers- servicesverbs:- get- list- watch- apiGroups:- apps- extensionsresources:- replicasetsverbs:- get- list- watch- apiGroups:- appsresources:- statefulsetsverbs:- get- list- watch- apiGroups:- policyresources:- poddisruptionbudgetsverbs:- get- list- watch- apiGroups:- ""resources:- persistentvolumeclaims- persistentvolumesverbs:- get- list- watch- apiGroups:- ""resources:- configmapsverbs:- get- list- watch- apiGroups:- "storage.k8s.io"resources:- storageclasses- csinodesverbs:- get- list- watch- apiGroups:- "coordination.k8s.io"resources:- leasesverbs:- create- get- list- update- apiGroups:- "events.k8s.io"resources:- eventsverbs:- create- patch- update---apiVersion: v1kind: ServiceAccountmetadata:name: sample-scheduler-sanamespace: kube-system---kind: ClusterRoleBindingapiVersion: rbac.authorization.k8s.io/v1metadata:name: sample-scheduler-clusterrolebindingnamespace: kube-systemroleRef:apiGroup: rbac.authorization.k8s.iokind: ClusterRolename: sample-scheduler-clusterrolesubjects:- kind: ServiceAccountname: sample-scheduler-sanamespace: kube-system---apiVersion: v1kind: ConfigMapmetadata:name: scheduler-confignamespace: kube-systemdata:scheduler-config.yaml: |apiVersion: kubescheduler.config.k8s.io/v1kind: KubeSchedulerConfigurationleaderElection:leaderElect: trueleaseDuration: 15srenewDeadline: 10sresourceLock: endpointsleasesresourceName: sample-schedulerresourceNamespace: kube-systemretryPeriod: 2sprofiles:- schedulerName: sample-schedulerplugins:preFilter:enabled:- name: "sample-plugin"filter:enabled:- name: "sample-plugin"pluginConfig:- name: sample-pluginargs:# runtime.ObjectfavorColor: "#326CE5"favorNumber: 7thanksTo: "Kubernetes"---apiVersion: apps/v1kind: Deploymentmetadata:name: sample-schedulernamespace: kube-systemlabels:component: sample-schedulerspec:selector:matchLabels:component: sample-schedulertemplate:metadata:labels:component: sample-schedulerspec:serviceAccountName: sample-scheduler-sapriorityClassName: system-cluster-criticalvolumes:- name: scheduler-configconfigMap:name: scheduler-configcontainers:- name: schedulerimage: cnych/sample-scheduler:v0.26.4imagePullPolicy: IfNotPresentcommand:- sample-scheduler- --cnotallow=/etc/kubernetes/scheduler-config.yaml- --v=3volumeMounts:- name: scheduler-configmountPath: /etc/kubernetes


推荐阅读