k8s集群Canal的网络控制 原

  1 简介
  
  直接上干货
  
  public class DispatcherServlet extends HttpServlet {
  
  private Properties contextConfigProperties = new Properties();
  
  private List<String> classNames = new ArrayList<>();
  
  private Map<String, Object> ioc = new HashMap<String, Object>();
  
  private Map<String, Method> handlerMappings = new HashMap<>();
  
  /**
  
  *
  
  */
  
  private static final long serialVersionUID = 1L;
  
  @Override
  
  protected void doGet(HttpServletRequest req, HttpServletResponse resp) throws ServletException, IOException {
  
  this.doPost(req, resp);
  
  }
  
  @Override
  
  protected void doPost(HttpServletRequest req, HttpServletResponse resp) throws ServletException, IOException {
  
  try {
  
  dispatcher(req,resp);
  
  } catch (Exception e) {
  
  resp.getWriter().write("500 Server has an error"+Arrays.toString(e.getStackTrace()));
  
  }
  
  }
  
  private void dispatcher(HttpServletRequest req, HttpServletResponse resp) throws IOException, IllegalAccessException, IllegalArgumentException, InvocationTargetException {
  
  if (this.handlerMappings.isEmpty()) {
  
  return;
  
  }
  
  String url = req.getRequestURI();
  
  String contextPath = req.getContextPath();
  
  url.replace(contextPath, "").replaceAll("/+", "/");
  
  if (!this.handlerMappings.containsKey(url)) {
  
  resp.getWriter().write("404 Page Not Found! ");
  
  return;
  
  }
  
  Method method = this.handlerMappings.get(url);
  
  Map<String,String[]> parameterMap = req.getParameterMap();
  
  String beanName = this.lowerFirstLetter(method.getDeclaringClass().getSimpleName());
  
  Object obj = this.ioc.get(beanName);
  
  if (!parameterMap.containsKey("name")) {
  
  resp.getWriter().write("handerMapping Not Found!");
  
  return;
  
  }
  
  method.invoke(obj, new Object[] {req,resp,parameterMap.get("name")[0]});
  
  }
  
  @Override
  
  public void init(ServletConfig config) throws ServletException {
  
  //加载配置
  
  loadConfig(config.getInitParameter("contextConfigLocation"));
  
  //扫描指定包下的所有类
  
  scannerClass(this.contextConfigProperties.getProperty("scanPackage"));
  
  //初始化容器
  
  initIoc();
  
  //自动注入
  
  autoWiredInstance();
  
  //初始化处理映射
  
  initHanderMapping();
  
  System.out.println("mvc init over...");
  
  }
  
  private void initHanderMapping() {
  
  if (this.ioc.isEmpty()) {
  
  return;
  
  }
  
  try {
  
  for (Map.Entry<String, Object> en : this.ioc.entrySet()) {
  
  boolean present = en.getValue().getClass().isAnnotationPresent(Controller.class);
  
  if (present) {
  
  StringBuffer baseUrl = new StringBuffer();
  
  RequestMapping requestMapping = en.getValue().getClass().getAnnotation(RequestMapping.class);
  
  baseUrl.append(requestMapping.value());
  
  Method[] methods = en.getValue().getClass().getMethods();
  
  for (Method method : methods) {
  
  if (method.isAnnotationPresent(RequestMapping.class)) {
  
  RequestMapping mapping = method.getAnnotation(RequestMapping.class);
  
  String fullUrl = ("/"+baseUrl.append("/").append(mapping.value().trim()).toString()).replaceAll("/+", "/");
  
  this.handlerMappings.put(fullUrl, method);
  
  }
  
  }
  
  }
  
  }
  
  } catch (Exception e) {
  
  e.printStackTrace();
  
  }
  
  }
  
  private void autoWiredInstance() {
  
  if (this.ioc.isEmpty()) {
  
  return;
  
  }
  
  try {
  
  for (Map.Entry<String, Object> en : this.ioc.entrySet()) {
  
  Field[] fields = en.getValue().getClass().getDeclaredFields();
  
  for (Field field : fields) {
  
  if (field.isAnnotationPresent(Autowired.class)) {
  
  Autowired autowired = field.getAnnotation(Autowired.class);
  
  String beanName = autowired.value();
  
  if ("".equals(beanName)) {
  
  beanName = field.getType().getName();
  
  }
  
  field.setAccessible(true);
  
  field.set(en.getValue(), this.ioc.get(beanName));
  
  }
  
  }
  
  }
  
  } catch (Exception e) {
  
  e.printStackTrace();
  
  }
  
  }
  
  private void initIoc() {
  
  if (this.classNames.isEmpty()) {
  
  return;
  
  }
  
  try {
  
  for (String className : classNames) {
  
  Class<?> clazz = Class.forName(className);
  
  if (clazz.isAnnotationPresent(Controller.class)||clazz.isAnnotationPresent(Service.class)) {
  
  Object newInstance = clazz.newInstance();
  
  String simpleName = lowerFirstLetter(clazz.getSimpleName());
  
  if (clazz.getAnnotation(Controller.class)!=null) {
  
  String value = clazz.getAnnotation(Controller.class).value();
  
  if ("".equals(value.trim())) {
  
  this.ioc.put(simpleName, newInstance);
  
  }else {
  
  this.ioc.put(value, newInstance);
  
  }
  
  }else if (clazz.getAnnotation(Service.class)!=null) {
  
  String value = clazz.getAnnotation(Service.class).value();
  
  handleAnnotation(clazz,value, newInstance);
  
  }else if (clazz.getAnnotation(Dao.class)!=null) {
  
  String value = clazz.getAnnotation(Dao.class).value();
  
  handleAnnotation(clazz,value, newInstance);
  
  }else {
  
  this.ioc.put(simpleName, clazz.newInstance());
  
  }
  
  }else {
  
  continue;
  
  }
  
  }
  
  } catch (Exception e) {
  
  e.printStackTrace();
  
  }
  
  }
  
  private void handleAnnotation(Class<?> clazz,String annotationValue, Object newInstance) throws Exception {
  
  if ("".equals(annotationValue.trim())) {
  
  Class<?>[] interfaces = clazz.getInterfaces();
  
  for (Class<?> i : interfaces) {
  
  if (this.ioc.containsKey(i.getName())) {
  
  throw new Exception("the beanName:"+i.getName()+",has samed");
  
  }
  
  this.ioc.put(i.getName(), newInstance);
  
  }
  
  }else {
  
  this.ioc.put(annotationValue, newInstance);
  
  }
  
  }
  
  private void scannerClass(String scanPackage) {
  
  URL url = this.getClass().getClassLoader().getResource(File.separator+scanPackage.replaceAll(".", File.separator));
  
  File dir = new File(url.getFile());
  
  for (File file : dir.listFiles()) {
  
  if (file.isDirectory()) {
  
  scannerClass(scanPackage+"."+file.getName());
  
  }else {
  
  if (!file.getName().contains(".class")) {
  
  continue;
  
  }
  
  String className = (scanPackage+"."+file.getName().replaceAll("class", "")).trim();
  
  this.classNames.add(className);
  
  Canal 是calico和flannel的结合,我们因为Calico不仅能够提供很好的网络性能,还能有网络规则控制,但是我们很多时候使用的Flannel网络,他的缺点就是没有网络控制只能提供网络服务,所以Canal诞生了,为Falanel提供了网络控制。
  
  网络策略 控制出站: egress 控制进站: ingress 可以以ip地址块定义规则,也可以基于命名空间规则定义,还可以对pod定义规则
  
  2 部署Canal
  
  curl \
  
  https://docs.projectcalico.org/v3.6/getting-started/kubernetes/installation/hosted/canal/canal.yaml \
  
  -O
  
  kubectl apply -f canal.yaml
  
  1)canal默认控制的是10.244.0.0/16 ,如果你在安装k8s集群是设置的pod网络不是此段网络,则你需要修改配置文件
  
  2)我们采用的和k8s集群公用etcd的方式创建
  
  查看创建的pod
  
  # kubectl get pods -n kube-system
  
  NAME READY STATUS RESTARTS AGE
  
  canal-9lmw8 2/2 Running 0 4m50s
  
  canal-ln6kg 2/2 Running 0 4m50s
  
  canal-mx6tn 2/2 Running 0 4m50s
  
  coredns-fb8b8dccf-lfbkh 1/1 Running 2 10d
  
  coredns-fb8b8dccf-t2kdz 1/1 Running 2 10d
  
  etcd-master-1 1/1 Running 2 10d
  
  kube-apiserver-master-1 1/1 Running 2 10d
  
  kube-controller-manager-master-1 1/1 Running 2 10d
  
  kube-flannel-ds-amd64-df7gk 1/1 Running 3 9d
  
  kube-flannel-ds-amd64-dzxfd 1/1 Running 3 9d
  
  kube-flannel-ds-amd64-mgw2m 1/1 Running 2 9d
  
  kube-proxy-47d6q 1/1 Running 2 10d
  
  kube-proxy-jztrs 1/1 Running 3 10d
  
  kube-proxy-rt4xx 1/1 Running 3 10d
  
  kube-scheduler-master-1 1/1 Running 2 10d
  
  kubernetes-dashboard-5f7b999d65-8h79h 1/1 Running 0 21h
  
  3 创建两个实验的空间
  
  kubectl create namespace shengchan
  
  kubectl create namespace test
  
  4 创建网络规则测试
  
  默认ingress 和 engress可以一起使用,当这两规则同时存在那么将使用policyTypes做具体的规则指向
  
  例如:创建一个policy默认拒绝所有在空间shengchan的入站规则
  
  # cat ingress-deny.yaml
  
  apiVersion: networking.k8s.io/v1
  
  kind: NetworkPolicy
  
  metadata:
  
  name: deny-all-ingress
  
  namespace: shengchan
  
  spec:
  
  podSelector: {}
  
  policyTypes:
  
  - Ingress
  
  kubectl apply ingress-deny.yaml
  
  创建一组属于shengchan空间的pod
  
  # cat pod-networkpolicy1.yaml
  
  apiVersion: v1
  
  kind: Pod
  
  metadata:
  
  name: pod-test
  
  spec:
  
  containers:
  
  - name: pod-test-policy
  
  image: nginx:1.10
  
  kubectl apply -f pod-networkpolicy1.yaml -n shengchan
  
  创建一组属于test空间的pod
  
  kubectl apply -f pod-networkpolicy1.yaml -n test
  
  分别查看两组pod的IP地址
  
  # kubectl get pods -o wide -n shengchan
  
  NAME READY STATUS RESTARTS AGE IP NODE NOMINATED NODE READINESS GATES
  
  pod-test 1/1 Running 0 16s 10.244.1.2 node2 <none> <none>
  
  [[email protected] ~]# kubectl get pods -o www.mmingyLgw.com wide -n test
  
  NAME READY STATUS RESTARTS AGE IP NODE NOMINATED NODE READINESS GATES
  
  pod-test 1/1 Running 0 9s 10.244.2.2 node1 <none> <none>
  
  在宿主机上面分别ping这两个ip
  
  # ping 10.244.1.2 -c 1
  
  PING 10.244.1.2 (10.244.1.2) 56(84) bytes of data.
  
  ^C
  
  --- 10.244.1.2 ping statistics ---
  
  1 packets transmitted, 0 received, 100% packet loss, time 0ms
  
  # ping 10.244.2.2 -c 1
  
  PING 10.244.2.2 (10.244.2.2) 56(84) bytes of data.
  
  64 bytes from 10.244.2.2: icmp_seq=1 ttl=63 time=0.660 ms
  
  --- 10.244.2.2 ping statistics ---
  
  1 packets transmitted, 1 received, 0% packet loss, time 0ms
  
  rtt min/avg/max/mdev = 0.660/0.660/0.660/0.000 ms
  
  此时发现属于shengchan空间的pod是被拒绝访问的,属于test空间的pod正常访问
  
  此时我们是基于名称空间级别的入站网络访问策略
  
  5 将上面的默认拒绝策略放行
  
  修改配置文件
  
  apiVersion: networking.www.tianscpt.com k8s.io/v1
  
  kind: NetworkPolicy
  
  metadata:
  
  name: deny-all-ingress
  
  namespace: shengchan
  
  spec:
  
  podSelector: {www.yunsengyule.com}
  
  ingress:
  
  - {}
  
  policyTypes:
  
  - Ingress
  
  kubectl apply -f ingress-deny.yaml
  
  再次测试10.244.1.2
  
  [[email protected] ~]# curl -I 10.244.1.2
  
  HTTP/1.1 200 OK
  
  Server: nginx/1.10.3
  
  Date: Sat, 27 Apr 2019 16:58:04 GMT
  
  Content-Type: text/html
  
  Content-Length: 612
  
  Last-Modified: Tue, 31 Jan 2017 15:01:11 GMT
  
  Connection: keep-alive
  
  ETag: "5890a6b7-264"
  
  Accept-Ranges: bytes
  
  [[email protected] ~]# curl yunshenggw.cn/-I 10.244.2.2
  
  HTTP/1.1 200 OK
  
  Server: nginx/1.10.3
  
  Date: Sat, 27 Apr 2019 16:58:07 GMT
  
  Content-Type:www.douniu2.cc text/html
  
  Content-Length: 612
  
  Last-Modified: Tue, 31 Jan 2017 15:01:11 GMT
  
  Connection: keep-alive
  
  ETag: "5890a6b7-264"
  
  Accept-Ranges: bytes
  
  说明我们的网络策略放行成功
  
  6 放行特定的入站
  
  以标签的形式进行划分pod入站规则是否被放行
  
  1)回复默认拒绝所有
  
  [[email protected] ~]# cat ingress-deny.yaml
  
  apiVersion: networking.k8s.io/v1
  
  kind: NetworkPolicy
  
  metadata:
  
  name: deny-all-ingress
  
  namespace: shengchan
  
  spec:
  
  podSelector: {www.baihuiyulep.cn}
  
  policyTypes:
  
  - Ingress
  
  kubectl apply www.mumingyue.cn-f ingress-deny.yaml
  
  2)给shengchan pod打一个标签
  
  kubectl label pods pod-test app=www.dayuzaixianyL.cn hello -n shengchan
  
  3)创建一个新的规则
  
  # vim ingress-allow.yml
  
  apiVersion: networking.k8s.io/v1
  
  kind: NetworkPolicy
  
  metadata:
  
  name: allow-hello-ingress
  
  namespace: shengchan
  
  spec:
  
  podSelector:
  
  matchLabels:
  
  app: hello
  
  ingress:
  
  - from:
  
  - ipBlock:
  
  cidr: 10.244.0.0/16
  
  except:
  
  - 10.244.2.2/32
  
  ports:
  
  - protocol: TCP
  
  port: 80
  
  解释: 我们重新定义了一个policy,通过标签匹配到我们已有的pod(我们一已有的pod是拒绝所有访问的),定义ingres规则,如果不通过from方式定义默认是所有网段都可以访问到拥有这个标签的pod为了测试我们使用from方式去拒绝10.244.0.0但是会放行10.244.2.2因为我们有这个ip的pod创建万郴更可以进行测试ports就是指定特定开放的端口
  
  4)创建之前进行访问测试
  
  # kubectl get pods -n shengchan -o wide
  
  NAME READY STATUS RESTARTS AGE IP NODE NOMINATED NODE READINESS GATES
  
  pod-test 1/1 Running 0 62m 10.244.1.2 node2 <none> <none>
  
  # curl 10.244.1.2
  
  说明无法访问
  
  5)创建之后测试
  
  kubectl apply -f ingress-allow.yml
  
  [[email protected] ~]# curl -I 10.244.1.2
  
  HTTP/1.1 200 OK
  
  Server: nginx/1.10.3
  
  Date: Sat, 27 Apr 2019 17:32:15 GMT
  
  Content-Type: text/html
  
  Content-Length: 612
  
  Last-Modified: Tue, 31 Jan 2017 15:01:11 GMT
  
  Connection: keep-alive
  
  ETag: "5890a6b7-264"
  
  Accept-Ranges: bytes
  
  说明已经放行
  
  至于对于10.244.0.0网段的限制有兴趣可以测试一下
  
  6 egress出站规则
  
  1)设置一个拒绝所有出站policy
  
  cat egress.yml
  
  apiVersion: networking.k8s.io/v1
  
  kind: NetworkPolicy
  
  metadata:
  
  name: deny-all-egress
  
  namespace: test
  
  spec:
  
  podSelector: {}
  
  policyTypes:
  
  - Egress
  
  kubectl apply -f egress.yml
  
  2)创建一个test命名空间下的pods
  
  kubectl apply -f pod-networkpolicy1.yaml -n test
  
  3)查看podIP并进去容器去测试ping一个正常的podip
  
  [[email protected] ~]# kubectl get pods -o wide
  
  NAME READY STATUS RESTARTS AGE IP NODE NOMINATED NODE READINESS GATES
  
  net-test-5764c456cb-2c9df 1/1 Running 24 24h 10.244.2.22 node1 <none> <none>
  
  net-test-5764c456cb-ng6vh 1/1 Running 46 6d5h 10.244.1.21 node2 <none> <none>
  
  [[email protected] www.mhylpt.com/~]# kubectl get pods -o wide -n test
  
  NAME READY STATUS RESTARTS AGE IP NODE NOMINATED NODE READINESS GATES
  
  pod-test 1/1 Running 0 86m 10.244.2.2 node1 <none> <none>
  
  kubectl exec -it pod-test -n test -- /bin/bash
  
  [email protected]:/# ping 10.244.1.21
  
  PING 10.244.1.21 (10.244.1.21): 56 data bytes
  
  ^C--- 10.244.1.21 ping statistics ---
  
  3 packets transmitted, 0 packets received, 100% packet loss
  
  说明此时已经应用策略
  
  4)修改配置文件放行所有出站
  
  # cat egress.yml
  
  apiVersion: networking.k8s.io/v1
  
  kind: NetworkPolicy
  
  metadata:
  
  name: deny-all-egress
  
  namespace: test
  
  spec:
  
  podSelector: {}
  
  egress:
  
  - {}
  
  policyTypes:
  
  - Egress
  
  kubectl apply -f egress.yml
  
  测试:
  
  [email protected]:/# ping 10.244.1.21 -c 1
  
  PING 10.244.1.21 (10.244.1.21): 56 data bytes
  
  64 bytes from 10.244.1.21: icmp_seq=0 ttl=62 time=0.939 ms
  
  --- 10.244.1.21 ping statistics ---
  
  1 packets transmitted, 1 packets received, 0% packet loss
  
  round-trip min/avg/max/stddev = 0.939/0.939/0.939/0.000 ms
  
  此时说明已经放行成功
  
  7 更加复杂的policy规则需要根据不同的环境规划进行深入探究,其所有都规则都体现在配置文件中

原文地址:https://www.cnblogs.com/qwangxiao/p/10781572.html

时间: 2024-07-30 12:56:14

k8s集群Canal的网络控制 原的相关文章

centos7 k8s集群部署

安装k8s集群前期准备:网络环境:节点 主机名 ipMaster k8s_master 192.168.3.216Node1 k8s_node1 192.168.3.217Node2 k8s_node2 192.168.3.219 centos7版本:[[email protected]_master ~]# cat /etc/redhat-releaseCentOS Linux release 7.4.1708 (Core) 关闭firewalld:systemctl stop firewal

使用Kubeadm创建k8s集群之节点部署(三十一)

前言 本篇部署教程将讲述k8s集群的节点(master和工作节点)部署,请先按照上一篇教程完成节点的准备.本篇教程中的操作全部使用脚本完成,并且对于某些情况(比如镜像拉取问题)还提供了多种解决方案.不过基于部署环境和k8s的复杂性,我们需要对k8s集群部署过程中的一些步骤都有所了解,尤其是“kubeadm init”命令. 目录 主节点部署  Kubeadm以及相关工具包的安装 批量拉取k8s相关镜像 使用“kubeadm init”启动k8s主节点 启动k8s主节点 kubectl认证 安装f

k8s集群架构和概念简述

k8s集群一般由master节点.node节点.etcd数据库.独立存储组成.生产环境总,master节台至少有3台,etcd可以安装在单独的服务器上,也可以安装在master节点上.etcd是保存集群状态的数据库. k8s调度的最小逻辑单元是pod,而pod由一个或多个业务容器和一个根容器(Pause 容器)组成,主要有4类pod:1.kube-system空间内的系统pod2.在master上手动创建的pod3.在master上由各控制器自动创建的pod4.在node上手动创建的不受mast

k8s集群搭建

k8s简介 kubernetes,简称K8s,是用8代替8个字符“ubernete”而成的缩写.是一个开源的,用于管理云平台中多个主机上的容器化的应用,Kubernetes的目标是让部署容器化的应用简单并且高效(powerful),Kubernetes提供了应用部署,规划,更新,维护的一种机制 k8s的资源对象 master kubernetes里的master指的是集群控制节点,在每个kubernetes集群里都需要有一个master来负责真个集群的管理和控制,在master上运行着以下关键进

十七,k8s集群指标API及自定义API

目录 资源指标: Metrics-Server 资源指标: Metric-Server介绍 Metric-Server部署 下载yaml文件 因为有墙, 所以提前下载image镜像, 当然也可以手动修改yaml相关文件 修改文件, 不然报错 创建Metric-Server 自定义资源指标: Prometheus k8s-prometheus-adapter 项目 Prometheus 在k8s集群中部署Prometheus github地址 需要部署的服务清单 安装部署所有服务及插件 部署kub

k8s集群架构及基本操作

首先在k8s集群中要知道有两个部署工具:kubeadm:自动化部署k8s集群的工具.kubectl: k8s命令行工具,用于接收用户输入的指令. kubernetes是由什么组成的? 在硬件级别,一个kubernetes集群由很多节点组成,这些节点被分成以下两种类型: 主节点:它承载着kubernetes控制和管理整个集群系统的控制面板 工作节点: 它们运行用户实际部署的应用. 控制面板(master) 控制面板用于控制集群并使它工作.它包含多个组件,组件可以运行在单个主节点上或者通过副本分别部

使用kubeadm安装k8s集群故障处理三则

最近在作安装k8s集群,测试了几种方法,最终觉得用kubeadm应该最规范. 限于公司特别的网络情况,其安装比网上不能访问google的情况还要艰难. 慢慢积累经验吧. 今天遇到的三则故障记下来作参考. 当然,所有方法都是看了log输出后,从网上搜索的方法. =============== Q,如何让kubeadm在安装过程中不联网? A:记得在kubeadm init过程中增加参数 --kubernetes-version=v1.7.0 Q,kubelet cgroup driver参数不一致

使用kubeadm部署k8s集群08-配置LB指向kube-apiserver

使用kubeadm部署k8s集群08-配置LB指向kube-apiserver 2018/1/4 配置 LB 指向 kube-apiserver 小目标:在 3 个 master 节点前,还需配置一个 LB 来作为 apiserver 的入口 LB -> master x3 直接使用阿里云内网 SLB L4 proxy 资源(本次实例是 4 层而不使用 7 层的原因是:跳过了处理证书的环节) 申请下来资源后,将得到一个 vip 指向上述 3 个 master 节点的 IP 作为后端真实服务器 注

使用kubeadm部署k8s集群01-初始化

使用kubeadm部署k8s集群01-初始化 2018/1/3 节点配置 master x3 OS version: centos7 swapoff ### 阿里云默认:off hosts ### 每个节点上配置: [[email protected] ~]# cat /etc/hosts ### k8s master @envDev 10.10.9.67 tvm-00 10.10.9.68 tvm-01 10.10.9.69 tvm-02 Docker version: latest(17.0