11
tests/README.md
Normal file
11
tests/README.md
Normal file
@@ -0,0 +1,11 @@
|
||||
|
||||
```bash
|
||||
|
||||
docker build --network host -t devkitpro/devkita64 - << EOF
|
||||
FROM devkitpro/devkita64:20231108
|
||||
RUN sed -i 's/deb.debian.org/mirrors.aliyun.com/g' /etc/apt/sources.list \
|
||||
&& apt-get update && apt-get install -y --no-install-recommends nodejs \
|
||||
&& rm -rf /var/lib/apt/lists/* /usr/share/man/*
|
||||
EOF
|
||||
|
||||
```
|
||||
62
tests/ctrl/main.go
Normal file
62
tests/ctrl/main.go
Normal file
@@ -0,0 +1,62 @@
|
||||
package tests
|
||||
|
||||
import (
|
||||
"context"
|
||||
"os"
|
||||
|
||||
appsv1 "k8s.io/api/apps/v1"
|
||||
corev1 "k8s.io/api/core/v1"
|
||||
ctrl "sigs.k8s.io/controller-runtime"
|
||||
"sigs.k8s.io/controller-runtime/pkg/client"
|
||||
"sigs.k8s.io/controller-runtime/pkg/client/config"
|
||||
"sigs.k8s.io/controller-runtime/pkg/handler"
|
||||
"sigs.k8s.io/controller-runtime/pkg/source"
|
||||
)
|
||||
|
||||
type podReconcile struct {
|
||||
client.Client
|
||||
}
|
||||
|
||||
func (r *podReconcile) Reconcile(ctx context.Context, req ctrl.Request) (ctrl.Result, error) {
|
||||
rs := &appsv1.ReplicaSet{}
|
||||
err := r.Get(ctx, req.NamespacedName, rs)
|
||||
if err != nil {
|
||||
return ctrl.Result{}, err
|
||||
}
|
||||
// DoSometing
|
||||
return ctrl.Result{}, nil
|
||||
}
|
||||
|
||||
func (r *podReconcile) InjectClient(c client.Client) error {
|
||||
r.Client = c
|
||||
return nil
|
||||
}
|
||||
|
||||
func main() {
|
||||
log := ctrl.Log.WithName("entrypoint")
|
||||
|
||||
// 初始化 manager,同时生成一个默认配置的 Cache
|
||||
mgr, err := ctrl.NewManager(config.GetConfigOrDie(), ctrl.Options{})
|
||||
if err != nil {
|
||||
log.Error(err, "unable to set up overall controller manager")
|
||||
}
|
||||
// 注册 scheme
|
||||
if err = corev1.AddToScheme(mgr.GetScheme()); err != nil {
|
||||
log.Error(err, "unable to add scheme")
|
||||
}
|
||||
|
||||
// 创建新的 controller
|
||||
err = ctrl.NewControllerManagedBy(mgr).For(&appsv1.ReplicaSet{}).
|
||||
// 当 Pod 变更时,也触发 ReplicaSet
|
||||
Watches(&source.Kind{Type: &corev1.Pod{}}, &handler.EnqueueRequestForObject{}).
|
||||
Complete(&podReconcile{})
|
||||
if err != nil {
|
||||
log.Error(err, "unable to add controller")
|
||||
}
|
||||
|
||||
// 启动 manager
|
||||
if err = mgr.Start(ctrl.SetupSignalHandler()); err != nil {
|
||||
log.Error(err, "unable to run manager")
|
||||
os.Exit(1)
|
||||
}
|
||||
}
|
||||
52
tests/informer/informer.go
Normal file
52
tests/informer/informer.go
Normal file
@@ -0,0 +1,52 @@
|
||||
package main
|
||||
|
||||
import (
|
||||
"context"
|
||||
"flag"
|
||||
"log"
|
||||
"path/filepath"
|
||||
|
||||
"k8s.io/client-go/informers"
|
||||
"k8s.io/client-go/kubernetes"
|
||||
"k8s.io/client-go/tools/cache"
|
||||
"k8s.io/client-go/tools/clientcmd"
|
||||
"k8s.io/client-go/util/homedir"
|
||||
)
|
||||
|
||||
func main() {
|
||||
var kubeconfig string
|
||||
|
||||
flag.StringVar(&kubeconfig, "kubeconfig", filepath.Join(homedir.HomeDir(), ".kube", "config"), "(optional) absolute path to the kubeconfig file")
|
||||
flag.Parse()
|
||||
// use the current context in kubeconfig
|
||||
config, err := clientcmd.BuildConfigFromFlags("", kubeconfig)
|
||||
if err != nil {
|
||||
log.Fatal(err)
|
||||
}
|
||||
|
||||
ctx := context.Background()
|
||||
|
||||
AddPod := func(obj interface{}) {
|
||||
|
||||
}
|
||||
DeletePod := func(obj interface{}) {
|
||||
|
||||
}
|
||||
UpdatePod := func(oldObj, newObj interface{}) {
|
||||
|
||||
}
|
||||
|
||||
clientset := kubernetes.NewForConfigOrDie(config)
|
||||
// 基于GVK 操作资源,假设需要操作数十种不同资源时,我们需要为每一种资源实现各自的函数
|
||||
podInformer := informers.NewSharedInformerFactory(clientset, 0).
|
||||
Core().V1().Pods().Informer()
|
||||
podInformer.AddEventHandler(cache.ResourceEventHandlerFuncs{
|
||||
AddFunc: AddPod,
|
||||
DeleteFunc: DeletePod,
|
||||
UpdateFunc: UpdatePod,
|
||||
})
|
||||
// 启动informer
|
||||
podInformer.Run(ctx.Done())
|
||||
cache.WaitForCacheSync(ctx.Done(), podInformer.HasSynced)
|
||||
// 此处没有使用workqueue,但一般都是会用workqueue 增强处理逻辑的
|
||||
}
|
||||
55
tests/watch/watch.go
Normal file
55
tests/watch/watch.go
Normal file
@@ -0,0 +1,55 @@
|
||||
package main
|
||||
|
||||
import (
|
||||
"context"
|
||||
"flag"
|
||||
"fmt"
|
||||
"log"
|
||||
"path/filepath"
|
||||
|
||||
corev1 "k8s.io/api/core/v1"
|
||||
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
|
||||
"k8s.io/apimachinery/pkg/watch"
|
||||
"k8s.io/client-go/kubernetes"
|
||||
"k8s.io/client-go/tools/clientcmd"
|
||||
"k8s.io/client-go/util/homedir"
|
||||
)
|
||||
|
||||
func main() {
|
||||
var kubeconfig string
|
||||
|
||||
flag.StringVar(&kubeconfig, "kubeconfig", filepath.Join(homedir.HomeDir(), ".kube", "config"), "(optional) absolute path to the kubeconfig file")
|
||||
flag.Parse()
|
||||
// use the current context in kubeconfig
|
||||
config, err := clientcmd.BuildConfigFromFlags("", kubeconfig)
|
||||
if err != nil {
|
||||
log.Fatal(err)
|
||||
}
|
||||
|
||||
ctx := context.Background()
|
||||
// 创建 clientset
|
||||
clientset, err := kubernetes.NewForConfig(config)
|
||||
if err != nil {
|
||||
log.Fatal(err)
|
||||
}
|
||||
// 使用 clientsent 监听 kube-system 下 Pod 对象
|
||||
list, err := clientset.CoreV1().Pods("kube-system").
|
||||
Watch(ctx, metav1.ListOptions{Watch: true})
|
||||
if err != nil {
|
||||
log.Fatal(err)
|
||||
}
|
||||
for event := range list.ResultChan() {
|
||||
pod := event.Object.(*corev1.Pod)
|
||||
switch event.Type {
|
||||
case watch.Added:
|
||||
fmt.Printf("Pod %s added\n", pod.Name)
|
||||
// todo: reconcile logic goes here
|
||||
case watch.Modified:
|
||||
fmt.Printf("Pod %s modified\n", pod.Name)
|
||||
// todo: reconcile logic goes here
|
||||
case watch.Deleted:
|
||||
fmt.Printf("Pod %s deleted\n", pod.Name)
|
||||
// todo: reconcile logic goes here
|
||||
}
|
||||
}
|
||||
}
|
||||
Reference in New Issue
Block a user