kuboard-v2.0.0-beta.3

This commit is contained in:
huanqing.shao
2020-05-31 12:46:56 +08:00
parent 2196e16b72
commit d936393bd3
50 changed files with 12564 additions and 2808 deletions

View File

@ -32,7 +32,7 @@ export default {
},
components: { },
mounted () {
axios.get('https://addons.kuboard.cn/kuboard-latest-version.json').then(resp => {
axios.get('https://addons.kuboard.cn/kuboard-latest-version.json?' + Math.random).then(resp => {
console.log(resp)
this.latestVersion = resp.data
}).catch(e => {

View File

@ -552,7 +552,7 @@ module.exports = {
children: [
'example/busybox',
'example/import',
'example/monitor',
'example/monitor-v2',
]
},
{

View File

@ -88,7 +88,7 @@ export default ({
});
}, 200)
}
console.log('sharing', sharing)
// console.log('sharing', sharing)
Vue.prototype.$isSharing = sharing
}

View File

@ -0,0 +1,359 @@
---
apiVersion: apps/v1
kind: StatefulSet
metadata:
namespace: example
name: cloud-eureka
annotations:
k8s.kuboard.cn/workload: cloud-eureka
k8s.kuboard.cn/displayName: 服务注册
k8s.kuboard.cn/service: ClusterIP
k8s.kuboard.cn/ingress: 'true'
labels:
k8s.kuboard.cn/layer: cloud
k8s.kuboard.cn/name: cloud-eureka
spec:
selector:
matchLabels:
k8s.kuboard.cn/layer: cloud
k8s.kuboard.cn/name: cloud-eureka
template:
metadata:
labels:
k8s.kuboard.cn/layer: cloud
k8s.kuboard.cn/name: cloud-eureka
spec:
imagePullSecrets:
- {}
initContainers: []
containers:
- image: 'eipsample/example-cloud-eureka:v1.0.0-alpha.1'
imagePullPolicy: Always
name: cloud-eureka
volumeMounts: []
resources: {}
env:
- name: CLOUD_EUREKA_DEFAULT_ZONE
value: 'http://cloud-eureka-0:9200/eureka'
volumes: []
replicas: 1
volumeClaimTemplates: []
serviceName: cloud-eureka
---
apiVersion: v1
kind: Service
metadata:
namespace: example
name: cloud-eureka
annotations:
k8s.kuboard.cn/workload: cloud-eureka
k8s.kuboard.cn/displayName: 服务注册
labels:
k8s.kuboard.cn/layer: cloud
k8s.kuboard.cn/name: cloud-eureka
spec:
selector:
k8s.kuboard.cn/layer: cloud
k8s.kuboard.cn/name: cloud-eureka
type: ClusterIP
ports:
- port: 9200
targetPort: 9200
protocol: TCP
name: mtfsyi
nodePort: 0
---
apiVersion: networking.k8s.io/v1beta1
kind: Ingress
metadata:
namespace: example
name: cloud-eureka
annotations:
k8s.kuboard.cn/workload: cloud-eureka
k8s.kuboard.cn/displayName: 服务注册
labels:
k8s.kuboard.cn/layer: cloud
k8s.kuboard.cn/name: cloud-eureka
spec:
rules:
- host: cloud-eureka.example.demo.kuboard.cn
http:
paths:
- path: /
backend:
serviceName: cloud-eureka
servicePort: mtfsyi
---
apiVersion: apps/v1
kind: Deployment
metadata:
namespace: example
name: db-example
annotations:
k8s.kuboard.cn/workload: db-example
k8s.kuboard.cn/displayName: db-example
k8s.kuboard.cn/service: ClusterIP
k8s.kuboard.cn/ingress: 'false'
labels:
k8s.kuboard.cn/layer: db
k8s.kuboard.cn/name: db-example
spec:
selector:
matchLabels:
k8s.kuboard.cn/layer: db
k8s.kuboard.cn/name: db-example
template:
metadata:
labels:
k8s.kuboard.cn/layer: db
k8s.kuboard.cn/name: db-example
spec:
imagePullSecrets:
- {}
initContainers: []
containers:
- image: 'eipsample/example-db-example:v1.0.0-alpha.1'
imagePullPolicy: Always
name: db-example
volumeMounts:
- name: db-example-storage
mountPath: /var/lib/mysql
subPath: mysql
resources: {}
env:
- name: MYSQL_ROOT_PASSWORD
value: 'soqjdke4#es'
volumes:
- name: db-example-storage
persistentVolumeClaim:
claimName: db-example-storage
replicas: 1
---
apiVersion: v1
kind: Service
metadata:
namespace: example
name: db-example
annotations:
k8s.kuboard.cn/workload: db-example
k8s.kuboard.cn/displayName: db-example
labels:
k8s.kuboard.cn/layer: db
k8s.kuboard.cn/name: db-example
spec:
selector:
k8s.kuboard.cn/layer: db
k8s.kuboard.cn/name: db-example
type: ClusterIP
ports:
- port: 3306
targetPort: 3306
protocol: TCP
name: fp6ksw
nodePort: 0
---
apiVersion: apps/v1
kind: Deployment
metadata:
namespace: example
name: gateway-example
annotations:
k8s.kuboard.cn/workload: gateway-example
k8s.kuboard.cn/displayName: gateway-example
k8s.kuboard.cn/service: ClusterIP
k8s.kuboard.cn/ingress: 'false'
labels:
k8s.kuboard.cn/layer: gateway
k8s.kuboard.cn/name: gateway-example
spec:
selector:
matchLabels:
k8s.kuboard.cn/layer: gateway
k8s.kuboard.cn/name: gateway-example
template:
metadata:
labels:
k8s.kuboard.cn/layer: gateway
k8s.kuboard.cn/name: gateway-example
spec:
imagePullSecrets:
- {}
initContainers: []
containers:
- image: 'eipsample/example-gateway-example:v1.0.0-alpha.1'
imagePullPolicy: Always
name: gateway-example
volumeMounts: []
resources: {}
env:
- name: CLOUD_EUREKA_DEFAULT_ZONE
value: 'http://cloud-eureka:9200/eureka'
- name: SPRING_PROFILES_ACTIVE
value: example
volumes: []
replicas: 1
---
apiVersion: v1
kind: Service
metadata:
namespace: example
name: gateway-example
annotations:
k8s.kuboard.cn/workload: gateway-example
k8s.kuboard.cn/displayName: gateway-example
labels:
k8s.kuboard.cn/layer: gateway
k8s.kuboard.cn/name: gateway-example
spec:
selector:
k8s.kuboard.cn/layer: gateway
k8s.kuboard.cn/name: gateway-example
type: ClusterIP
ports:
- port: 9201
targetPort: 9201
protocol: TCP
name: pdmd3y
nodePort: 0
---
apiVersion: apps/v1
kind: Deployment
metadata:
namespace: example
name: svc-example
annotations:
k8s.kuboard.cn/workload: svc-example
k8s.kuboard.cn/displayName: svc-example
k8s.kuboard.cn/service: none
k8s.kuboard.cn/ingress: 'false'
labels:
k8s.kuboard.cn/layer: svc
k8s.kuboard.cn/name: svc-example
spec:
selector:
matchLabels:
k8s.kuboard.cn/layer: svc
k8s.kuboard.cn/name: svc-example
template:
metadata:
labels:
k8s.kuboard.cn/layer: svc
k8s.kuboard.cn/name: svc-example
spec:
imagePullSecrets:
- {}
initContainers: []
containers:
- image: 'eipsample/example-svc-example:v1.0.0-alpha.1'
imagePullPolicy: Always
name: svc-example
volumeMounts: []
resources: {}
env:
- name: CLOUD_EUREKA_DEFAULT_ZONE
value: 'http://cloud-eureka:9200/eureka'
- name: DB_EXAMPLE_URL
value: >-
jdbc:mysql://db-example:3306/eip_db_example?characterEncoding=utf8&useSSL=false
- name: DB_EXAMPLE_USERNAME
value: eip_user
- name: DB_EXAMPLE_PASSWORD
value: 1qaz2wsx
- name: snowflake.dataCenterId
value: '1'
- name: csp.sentinel.dashboard.server
value: monitor-sentinel
volumes: []
replicas: 1
---
apiVersion: apps/v1
kind: Deployment
metadata:
namespace: example
name: web-example
annotations:
k8s.kuboard.cn/workload: web-example
k8s.kuboard.cn/displayName: web-example
k8s.kuboard.cn/service: ClusterIP
k8s.kuboard.cn/ingress: 'true'
labels:
k8s.kuboard.cn/layer: web
k8s.kuboard.cn/name: web-example
spec:
selector:
matchLabels:
k8s.kuboard.cn/layer: web
k8s.kuboard.cn/name: web-example
template:
metadata:
labels:
k8s.kuboard.cn/layer: web
k8s.kuboard.cn/name: web-example
spec:
imagePullSecrets:
- {}
initContainers: []
containers:
- image: 'eipsample/example-web-example:v1.0.0-alpha.1'
imagePullPolicy: Always
name: web-example
volumeMounts: []
resources: {}
env: []
volumes: []
replicas: 1
---
apiVersion: v1
kind: Service
metadata:
namespace: example
name: web-example
annotations:
k8s.kuboard.cn/workload: web-example
k8s.kuboard.cn/displayName: web-example
labels:
k8s.kuboard.cn/layer: web
k8s.kuboard.cn/name: web-example
spec:
selector:
k8s.kuboard.cn/layer: web
k8s.kuboard.cn/name: web-example
type: ClusterIP
ports:
- port: 80
targetPort: 80
protocol: TCP
name: mawfrp
nodePort: 0
---
apiVersion: networking.k8s.io/v1beta1
kind: Ingress
metadata:
namespace: example
name: web-example
annotations:
k8s.kuboard.cn/workload: web-example
k8s.kuboard.cn/displayName: we-example
labels:
k8s.kuboard.cn/layer: web
k8s.kuboard.cn/name: web-example
spec:
rules:
- host: web-example.example.demo.kuboard.cn
http:
paths:
- path: /
backend:
serviceName: web-example
servicePort: mawfrp

View File

@ -1,16 +1,16 @@
version: "3"
services:
kuboard:
image: eipwork/kuboard:arm
image: eipwork/kuboard:latest
ports:
- "8083:80"
environment:
KUBERNETES_SERVICE_HOST: 127.0.0.1
KUBERNETES_SERVICE_PORT_HTTPS: 6443
volumes:
- /Users/shaohuanqing/Kuboard/kuboard-workspace/nginx-lua/docker/nginx.80.conf:/etc/nginx/conf.d/nginx.80.conf
# - /Users/shaohuanqing/Kuboard/kuboard-workspace/nginx-lua/docker/nginx.80.conf:/etc/nginx/conf.d/nginx.80.conf
- /Users/shaohuanqing/Kuboard/kuboard-workspace/kuboard-press/docs:/usr/share/nginx/html
- /Users/shaohuanqing/Kuboard/kuboard-workspace/nginx-lua/docker/nginx-jwt.lua:/usr/local/openresty/nginx/jwt-lua/resty/nginx-jwt.lua
# - /Users/shaohuanqing/Kuboard/kuboard-workspace/nginx-lua/docker/nginx-jwt.lua:/usr/local/openresty/nginx/jwt-lua/resty/nginx-jwt.lua
networks:
- webnet
# web:

10
guide-v2/README.md Normal file
View File

@ -0,0 +1,10 @@
---
vssueId: 70
description: 搭建一个基于Kubernetes的私有化云平台
---
# Kubernetes JumpStart
<AdSenseTitle/>
本文目标读者:

View File

View File

@ -0,0 +1,29 @@
---
vssueId: 76
description: 通过Kuboard将一个预先定义好的SpringCloud微服务样例程序导入到Kubernetes中。
---
# 导入 example 微服务
<AdSenseTitle/>
## 前提
必须具备如下条件:
* Kubernetes 集群,版本不低于 v1.13.0
* 如果您还没有 Kubernetes 集群,请参考 [安装Kubernetes单Master节点集群](/install/install-k8s.html)
* Kuboard 微服务管理界面,版本不低于 v2.0.0-beta.3
* 请参考 [安装 Kuboard](/install/install-dashboard.html)
## 创建名称空间
创建新的名称空间,用来导入 example。
假设您已经进入了 Kuboard 名称空间界面,如下图所示:
## 导入 example
* 下载 <a :href="$withBase('/kuboard_example_v2.yaml')" download="kuboard_example.yaml">kuboard_example.yaml</a> 文件

View File

@ -0,0 +1,16 @@
---
vssueId: 77
description: 使用Kuboard在Kubernetes上安装监控套件并对example微服务实现资源层监控、中间件层监控、链路追踪和APM监控
---
# 监控 example
<AdSenseTitle/>
## 前提
必须具备如下条件:
* 已完成 [导入 example 微服务](/guide/example/import.html)
* 已配置了 NFS [StorageClass](/learning/k8s-intermediate/persistent/storage-class.html)
* 使用 kuboard-user 这个 ServiceAccount 登录 Kuboard 界面,[kuboard-user](/install/install-dashboard.html#)

View File

@ -11,7 +11,7 @@ description: 通过Kuboard将一个预先定义好的SpringCloud微服务样例
必须具备如下条件:
* Kubernetes 集群 (安装在阿里云,本文将使用阿里云的 NFS 服务作为存储资源Kuboard 也可以运行在其他云环境或者私有环境中。)
* Kubernetes 集群
* 导入 example 微服务时,要求 Kubernetes 集群版本不低于 1.14.0 <Badge type="error">重要</Badge>
* 已在集群中安装 Kuboard

Binary file not shown.

After

Width:  |  Height:  |  Size: 63 KiB

Binary file not shown.

After

Width:  |  Height:  |  Size: 70 KiB

Binary file not shown.

After

Width:  |  Height:  |  Size: 92 KiB

Binary file not shown.

After

Width:  |  Height:  |  Size: 92 KiB

Binary file not shown.

After

Width:  |  Height:  |  Size: 110 KiB

Binary file not shown.

After

Width:  |  Height:  |  Size: 123 KiB

Binary file not shown.

After

Width:  |  Height:  |  Size: 109 KiB

Binary file not shown.

After

Width:  |  Height:  |  Size: 131 KiB

Binary file not shown.

After

Width:  |  Height:  |  Size: 127 KiB

Binary file not shown.

After

Width:  |  Height:  |  Size: 144 KiB

Binary file not shown.

After

Width:  |  Height:  |  Size: 106 KiB

Binary file not shown.

After

Width:  |  Height:  |  Size: 114 KiB

Binary file not shown.

After

Width:  |  Height:  |  Size: 118 KiB

Binary file not shown.

After

Width:  |  Height:  |  Size: 147 KiB

Binary file not shown.

After

Width:  |  Height:  |  Size: 174 KiB

Binary file not shown.

After

Width:  |  Height:  |  Size: 181 KiB

Binary file not shown.

After

Width:  |  Height:  |  Size: 124 KiB

129
guide/example/monitor-v2.md Normal file
View File

@ -0,0 +1,129 @@
---
vssueId: 77
description: 使用Kuboard在Kubernetes上安装监控套件并对example微服务实现资源层监控、中间件层监控、链路追踪和APM监控
---
# 监控 example
<AdSenseTitle/>
## 前提
必须具备如下条件:
* 已完成 [导入 example 微服务](/guide/example/import.html)
* 已配置了 NFS [StorageClass](/learning/k8s-intermediate/persistent/storage-class.html)
* 使用 kuboard-user 这个 ServiceAccount 登录 Kuboard 界面,[kuboard-user](/install/install-dashboard.html#获取token)
## 安装监控套件
* 进入 Kuboard 界面后,点击右上角的 **设置** 按钮(圆形图标),然后在左侧菜单中导航到 **Kuboard** --> **套件**,如下图所示:
![image-20200531101355671](./monitor-v2.assets/image-20200531101355671.png)
* 点击图中的 **查找并安装** 按钮;
可以查询到当前套件仓库中的可选套件,如下图所示:
> 当前只提供了全局资源层监控套件,后续将逐渐提供更丰富的套件功能。
![image-20200531101454954](./monitor-v2.assets/image-20200531101454954.png)
* 点击所选择套件对应的 **安装** 按钮,可将套件所需资源从仓库中加载到您自己的 Kubernetes 集群,并进入该套件的详情页面,如下图所示:
* 点击下图中的 **开始安装之前,请点我,阅读此套件的文档** 可以了解该套件提供的文档描述;
![image-20200531101804604](./monitor-v2.assets/image-20200531101804604.png)
* 点击 **套件参数** Tab 页
***套件参数** Tab 页中,调整参数;
* 按照 **额外步骤** 的提示,执行安装前的必须动作,并勾选 **确认已完成** 按钮
* 点击 **保存** 按钮
如下图所示:
![image-20200531102323613](./monitor-v2.assets/image-20200531102323613.png)
***套件参数** 页完成保存后,将自动进入 **安装脚本** 页,如下图所示:
![image-20200531102446321](./monitor-v2.assets/image-20200531102446321.png)
* 点击 **安装** 按钮,进入 **工作负载导入** 界面,将套件所需的 Deployment/Service/ClusterRole 等对象导入到您的 Kubernetes 集群中,如下图所示:
![image-20200531102647083](./monitor-v2.assets/image-20200531102647083.png)
* 请按向导提示,完成套件所依赖对象的导入;
> * 本文不详细描述此导入过程,请在向导的引导下完成。
> * 关于第五步,调整存储卷参数,特别说明如下:
> * 建议为 monitor-grafana / monitor-prometheus 分别创建新的存储卷声明,需要您事先已经创建好了 [StorageClass](/guide/cluster/storage.html)
> * 如果您当前没有合适的存储类,为了测试套件的功能,也可以先选择 **emptyDir**,之后再设置合适的存储卷(此时,容器组被删除后,所有的监控数据将丢失);
![image-20200531103006378](./monitor-v2.assets/image-20200531103006378.png)
* 完成工作负载导入后Kuboard 将自动进入套件的 **初始化** 页面,如下图所示:
* **初始化** 页面将检测所依赖工作负载的就绪情况,如下图所示:
![image-20200531114349178](./monitor-v2.assets/image-20200531114349178.png)
* 所有被依赖的工作负载就绪之后,将展示初始化时所引用的资源,如下图所示:
![image-20200531114628628](./monitor-v2.assets/image-20200531114628628.png)
* 点击 **初始化** 页面中的 **执行初始化** 按钮Kuboard 将完成对 Grafana 的初始化动作,如下图所示:
![image-20200531114818385](./monitor-v2.assets/image-20200531114818385.png)
* 点击 **确定** 后,套件详情页将展示该套件支持的 **扩展** 信息,如下图所示:
![image-20200531115010561](./monitor-v2.assets/image-20200531115010561.png)
## 查看监控信息
完成此套件安装后,将在 Kuboard 的节点详情页、工作负载详情页显示对应的监控界面入口;
- 节点详情页
节点详情页增加如下两个监控界面入口:
- 节点资源监控
- 节点监控(含节点上的容器组)
![image-20200531115257331](./monitor-v2.assets/image-20200531115257331.png)
- 工作负载详情页
工作负载详情页增加如下三个监控界面入口:
- 容器组监控
- 所在节点监控
- 所在节点监控(含节点上的容器组)
![image-20200531115308199](./monitor-v2.assets/image-20200531115308199.png)
## 直接访问 Grafana 界面
您可能想要自己定义 Grafana 的规则,比如告警通知、授权管理等,如果需要以管理员用户登录到 Grafana 界面,请以 kuboard-user 身份登录 Kuboard并导航到名称空间 kube-system 下的 deployment / monitor-grafana 页面,点击其中的 **代理** 按钮,就可以用管理员身份登录 Grafana 界面,如下图所示:
![image-20200531115904561](./monitor-v2.assets/image-20200531115904561.png)
关于如何通过 KuboardProxy 访问 Grafana 界面,以及如何实现 Grafana 与 Kuboard 的单点认证,请参考 [KuboardProxy - Auth Proxy](/guide/proxy/auth-proxy.html)
## 开发者模式
在 Kuboard 套件的详情页面,可以点击 **开发者模式** 按钮,此时,您可以修改 Kuboard 套件安装过程的各种脚本,如:
* 增加套件参数
* 修改 YAML 安装脚本
* 修改初始化脚本、添加初始化所需资源
* 添加扩展,修改扩展脚本,实现自定义逻辑
如下图所示:
![image-20200531120441813](./monitor-v2.assets/image-20200531120441813.png)
您还可以将自己的套件提交到套件仓库。

View File

@ -2,32 +2,16 @@
description: Kuboard中的Kubernetes监控套件
---
# 监控套件 <Badge text="alpha" type="warn"/>
# 监控套件
<AdSenseTitle/>
作者已经在自己的多个项目中使用了监控套件,但是由于在使用时,需要针对具体的项目做少量的定制,因此,监控套件目前还处于 alpha 状态。
监控套件分全局监控套件和局部监控套件两种类型:
* 全局监控套件
* Prometheus + Graphana可以对 节点、节点上的容器组进行资源层监控CPU、内存、磁盘、网络等
* Prometheus + Graphana可以对 节点、节点上的容器组进行资源层监控CPU、内存、磁盘、网络等,请参考 [监控 Example](/guide/example/monitor.html)
* 局部监控套件
* Prometheus监控套件mysql/nginx/jvm
* Prometheus监控套件mysql/nginx/jvm<Badge type="warn">后续提供</Badge>
* 监控 mysql/nginx/jvm 等
* 使用 eureka 做服务发现(需要优化,改成使用 kubernetes api object 进行服务发现)
* 代码地址: [eip-monitor-prometheus](https://github.com/eip-work/eip-monitor-prometheus.git)
* 熔断及限流 Sentinel
* 用于 Spring Cloud 的熔断及限流
* 代码地址: [eip-monitor-sentinel](https://github.com/eip-work/eip-monitor-sentinel.git)
* Pinpoint 监控套件
* Pinpoint 监控套件 <Badge type="warn">后续提供</Badge>
* 用于链路追踪及APM
* 代码地址: [eip-monitor-pinpoint](https://github.com/eip-work/eip-monitor-pinpoint.git)
监控套件都使用开源监控软件,作者做监控套件的出发点主要有两个:
* 简化监控套件的安装和配置
* 将监控套件的入口嵌入到 Node、Pod、Container 的上下文当中,以便快速的定位到监控信息
如果有同学对这两个设想感兴趣,请加群 808894550 并联系群主。
> * 请参考 [监控 example](/guide/example/monitor.html) <Badge text="alpha" type="warn"/> 体验 Kuboard 在监控套件方面的设想
> * 监控套件以插件的形式整合到 Kuboard在不使用监控套件的情况下Kuboard 的所有功能都可正常工作

View File

@ -0,0 +1 @@
# 端口转发

View File

@ -58,6 +58,10 @@ Kuboard 是 Kubernetes 的一款图形化管理界面。
> 如果您参考 https://kuboard.cn 网站上提供的 Kubernetes 安装文档,可在 master 节点上执行以下命令。
> 关于版本选择:
> * 当前Kuboard 正在从 v1.0.x 向 Kuboard v2.0.0 过渡,由于 v2.0.0 仍然处于 beta 状态latest 版本仍然保持为 v1.0.9.7。新用户可以从 beta 版开始,以获得更好的体验。
> * 参考此文,[了解如何从 v1.0.x 升级到 Kuboard v2.0.x](/support/change-log/upgrade-1-2.html)
<b-tabs content-class="mt-3">
<b-tab title="稳定版">

17
install/install-k8s-ha.md Normal file
View File

@ -0,0 +1,17 @@
---
vssueId: 15
# layout: StepLayout
sharingTitle: K8S入门第一步---安装装不好还有人免费远程协助更有K8S免费教程提供你还在等什么
description: Kubernete安装文档_Kubernetes最新稳定版v1.18.x的快速安装文档_该文档由众多网友验证并在线提出修改意见_持续不断地更新和完善_并且通过QQ群提供免费在线答疑的服务
meta:
- name: keywords
content: Kubernetes安装,K8S安装,kubeadm,Kubernetes 安装,K8S 安装,k8s搭建
---
# 将已经安装的单Master节点集群升级为高可用集群
<AdSenseTitle/>
## 前提
本文假设您已经参考 [使用kubeadm安装kubernetes_v1.18.x](./install-k8s.html) 文档中的描述完成了 单 Master 节点 Kubernetes 集群的安装。

View File

@ -1,37 +0,0 @@
{
"scripts": {
"docs:dev": "vuepress dev .",
"docs:build": "vuepress build ."
},
"devDependencies": {
"@vssue/api-github-v4": "^1.2.1",
"@vssue/vuepress-plugin-vssue": "^1.2.0",
"@vuepress/plugin-google-analytics": "^1.2.0",
"@vuepress/plugin-medium-zoom": "^1.2.0",
"@vuepress/plugin-nprogress": "^1.2.0",
"babel-plugin-component": "^1.1.1",
"date-fns": "^1.30.1",
"vuepress-plugin-baidu-autopush": "^1.0.1",
"vuepress-plugin-code-copy": "^1.0.4",
"vuepress-plugin-code-switcher": "^1.0.0",
"vuepress-plugin-reading-progress": "^1.0.7",
"vuepress-plugin-seo": "^0.1.2",
"vuepress-plugin-sitemap": "^2.1.2"
},
"dependencies": {
"@vuepress/plugin-active-header-links": "^1.3.0",
"@vuepress/plugin-back-to-top": "^1.3.0",
"@vuepress/plugin-pwa": "^1.2.0",
"animated-number-vue": "^1.0.0",
"aos": "^3.0.0-beta.6",
"axios": "^0.19.0",
"bootstrap": "^4.3.1",
"bootstrap-vue": "^2.0.4",
"esm": "^3.2.25",
"npm": "^6.11.3",
"reduce-css-calc": "^2.1.6",
"vue2-animate": "^2.1.2",
"vuepress": "1.2.0",
"vuepress-plugin-named-chunks": "^1.0.2"
}
}

View File

@ -1,34 +1,36 @@
{
"scripts": {
"docs:dev": "vuepress dev .",
"dev": "vuepress dev .",
"start": "nodemon --ext md,vue --watch .vuepress --watch . --exec vuepress dev",
"docs:build": "vuepress build ."
},
"dependencies": {
"@vssue/api-github-v4": "^1.4.0",
"@vssue/vuepress-plugin-vssue": "^1.4.2",
"@vuepress/plugin-active-header-links": "^1.3.1",
"@vuepress/plugin-back-to-top": "^1.3.1",
"@vuepress/plugin-google-analytics": "^1.3.1",
"@vuepress/plugin-medium-zoom": "^1.3.1",
"@vuepress/plugin-nprogress": "^1.3.1",
"@vuepress/plugin-pwa": "^1.3.1",
"@vssue/vuepress-plugin-vssue": "^1.4.6",
"@vuepress/plugin-active-header-links": "^1.5.0",
"@vuepress/plugin-back-to-top": "^1.5.0",
"@vuepress/plugin-google-analytics": "^1.5.0",
"@vuepress/plugin-medium-zoom": "^1.5.0",
"@vuepress/plugin-nprogress": "^1.5.0",
"@vuepress/plugin-pwa": "^1.5.0",
"animated-number-vue": "^1.0.0",
"aos": "^2.3.4",
"axios": "^0.19.2",
"babel-plugin-component": "^1.1.1",
"bootstrap": "^4.4.1",
"bootstrap-vue": "^2.7.0",
"bootstrap": "^4.5.0",
"bootstrap-vue": "^2.15.0",
"date-fns": "^1.30.1",
"esm": "^3.2.25",
"reduce-css-calc": "^2.1.7",
"vue2-animate": "^2.1.3",
"vuepress": "^1.3.1",
"vuepress": "^1.5.0",
"vuepress-plugin-baidu-autopush": "^1.0.1",
"vuepress-plugin-code-copy": "^1.0.6",
"vuepress-plugin-code-switcher": "^1.0.3",
"vuepress-plugin-named-chunks": "^1.1.2",
"vuepress-plugin-reading-progress": "^1.0.8",
"vuepress-plugin-seo": "^0.1.2",
"vuepress-plugin-sitemap": "2.1.2"
"vuepress-plugin-named-chunks": "^1.1.3",
"vuepress-plugin-reading-progress": "^1.0.9",
"vuepress-plugin-seo": "^0.1.3",
"vuepress-plugin-sitemap": "2.1.2",
"nodemon": "2.0.4"
}
}

Binary file not shown.

After

Width:  |  Height:  |  Size: 27 KiB

Binary file not shown.

After

Width:  |  Height:  |  Size: 618 KiB

Binary file not shown.

After

Width:  |  Height:  |  Size: 248 KiB

Binary file not shown.

Before

Width:  |  Height:  |  Size: 155 KiB

View File

@ -0,0 +1,12 @@
* 安装授权文件
* Port-forward
* Addons
* Service:
* SessionAffinity
* 节点管理操作,驱逐、封禁等
增加个按内存排序使用量排序的功能, 在那个TOP的地方

View File

@ -1,37 +1,69 @@
Kuboard v1.0.x 的更新说明
v1.0.7 已经支持了 kubectl proxy 的功能、v1.0.8 做 Deployment 的滚动更新、v1.0.9 做 kubectl port-forward v1.0.10 做 kubectl cp、v1.0.11 做 Job 和 CronJob。
这几样做完以后,就 v1.1.0
kubectl port-forward
kubectl cp
Job / CronJob
**优化**
* 事件通知
* 可以关闭事件通知;
* 设置菜单中可以重新开启事件通知;
* 切换名称空间
* 部分情况下,切换名称空间时,应该直接进入名称空间首页;
* 高亮当前所在的名称空间;
* 日志/终端界面
* 可以调整字体大小;
**BUG修正**
* 部分情况下,切换名称空间时,内容未刷新;
调整控制台字体大小
删除 NFS StorageClass 出错
BUG:
arm 环境下,应该使用镜像: https://hub.docker.com/r/vbouchaud/nfs-client-provisioner/tags
Calico 指定网卡的方式:
- name: IP_AUTODETECTION_METHOD
value: "interface=em1"
补充文档,描述如何授权一个 ServiceAccount 访问多个名称空间
Deployment 页面,可能存在请求线程过多导致页面部分内容显示为加载状态的情况;
安装 kube-prometheus 后,与 kuboard 安装的 metrics-server 有冲突:
按照 https://github.com/coreos/kube-prometheus clone 下来后执行:
kubectl create -f manifests/setup
until kubectl get servicemonitors --all-namespaces ; do date; sleep 1; echo ""; done
kubectl create -f manifests/
**优化**
* 提示用户怎么填写 Command 命令
* 通过 YAML 文件创建对象时,如果名称空间和当前不一致,应该给出提示
* 将 secret 绑定到环境变量
* 默认 StorageClass
* 图形化实例 Service 的 NodePort/Port/targetPort
**BUG修正**
* Kubernetes 版本过低时Ingress列表页加载失败
* 部分情况下,终端界面打不开时,未弹出错误提示对话框
删除 PV 时,出现 /notsupported 错误
Start by reading through this tutorial on windows services:
https://github.com/iswix-llc/iswix-tutorials
Now read this article to understand how any old script/EXE can be made a service using srvany.exe:
https://support.microsoft.com/en-us/help/137890/how-to-create-a-user-defined-service
Take a look at my answer to see how it all comes together:
Wix installer to replace INSTSRV and SRVANY for user defined service installation
If that still isn't enough, send me an email and I'll give you a complimentary 30-60 minute session to show you.
---
* 修改套件的信息之后,需要重新 apply
* 工作负载编辑器
* 保存前对比 YAML

139
support/change-log/env.yaml Normal file
View File

@ -0,0 +1,139 @@
apiVersion: apps/v1
kind: Deployment
metadata:
labels:
app.kubernetes.io/instance: tsp
app.kubernetes.io/managed-by: Helm
app.kubernetes.io/name: tsp
app.kubernetes.io/version: 1.16.0
helm.sh/chart: tsp-0.1.0
name: tsp
namespace: default
spec:
progressDeadlineSeconds: 600
replicas: 1
revisionHistoryLimit: 10
selector:
matchLabels:
app.kubernetes.io/instance: tsp
app.kubernetes.io/name: tsp
strategy:
rollingUpdate:
maxSurge: 25%
maxUnavailable: 25%
type: RollingUpdate
template:
metadata:
creationTimestamp: null
labels:
app.kubernetes.io/instance: tsp
app.kubernetes.io/name: tsp
spec:
containers:
- env:
- name: MEM_TOTAL_MB
valueFrom:
resourceFieldRef:
divisor: "0"
resource: limits.memory
image: k8s.tf56.com:32571/tsp:latest
imagePullPolicy: Always
livenessProbe:
failureThreshold: 3
initialDelaySeconds: 120
periodSeconds: 10
successThreshold: 1
tcpSocket:
port: http
timeoutSeconds: 1
name: tsp
ports:
- containerPort: 8080
name: http
protocol: TCP
readinessProbe:
failureThreshold: 3
initialDelaySeconds: 120
periodSeconds: 10
successThreshold: 1
tcpSocket:
port: http
timeoutSeconds: 1
resources:
limits:
cpu: "2"
memory: 4000Mi
securityContext: {}
terminationMessagePath: /dev/termination-log
terminationMessagePolicy: File
volumeMounts:
- mountPath: /httx/logs
name: tsp-logs
dnsPolicy: ClusterFirst
hostAliases:
- hostnames:
- web-tffcachecloud-vip
ip: 10.77.1.18
- hostnames:
- mt-party-vip
ip: 10.77.35.205
- hostnames:
- mt-apollo-1
ip: 10.77.0.163
- hostnames:
- mt-apollo-2
ip: 10.77.0.164
- hostnames:
- mt-eureka-1
- mt-eureka-2
ip: 10.77.1.29
- hostnames:
- mt-zookeeper-vip
- mt-tffjobzk-vip
ip: 10.77.1.29
- hostnames:
- web-dfssweb-vip
- web-sms-vip
- mt-doggy-vip
- web-ram2-vip
- web-creditinvestchannel-vip
- mt-creditinvestservice-vip
- mt-tffauthservice-vip
- mt-messagecenterservice-vip
- web-goldenleopard-vip
ip: 10.77.1.17
- hostnames:
- mt-uuidserver-vip
ip: 10.77.34.12
- hostnames:
- mt-tffzk-vip
ip: 10.77.32.3
- hostnames:
- web-openapitest-1
ip: 10.7.29.39
- hostnames:
- mt-tffxxljob-1
ip: 10.77.32.182
- hostnames:
- web-athena-vip
ip: 10.7.13.222
- hostnames:
- mt-payrocketmqdispatch-vip
- mt-payrocketmqdispatch-1
- mt-payrocketmqdispatch-2
ip: 10.77.1.29
- hostnames:
- ops-cachecloud-vip
ip: 10.77.0.5
imagePullSecrets:
- name: registry-key
restartPolicy: Always
schedulerName: default-scheduler
securityContext: {}
serviceAccount: tsp
serviceAccountName: tsp
terminationGracePeriodSeconds: 30
volumes:
- name: tsp-logs
persistentVolumeClaim:
claimName: tsp-logs

View File

@ -0,0 +1,33 @@
# cat sms-h5-backend.yaml
---
apiVersion: v1
kind: Service
metadata:
annotations:
k8s.eip.work/workload: sms-h5-backend
getambassador.io/config: |
---
apiVersion: ambassador/v1
kind: Mapping
name: sms-h5-backend_mapping
prefix: /sms-h5-backend/
service: sms-h5-backend:30003
labels:
app: sms-h5-backend
name: sms-h5-backend
namespace: default
spec:
ports:
- name: hx8s7a
nodePort: 30003
port: 30003
protocol: TCP
targetPort: 30003
selector:
app: sms-h5-backend
sessionAffinity: None
type: NodePort

Binary file not shown.

After

Width:  |  Height:  |  Size: 17 KiB

Binary file not shown.

After

Width:  |  Height:  |  Size: 130 KiB

View File

@ -0,0 +1,122 @@
---
apiVersion: apps/v1
kind: Deployment
metadata:
annotations:
deployment.kubernetes.io/revision: '2'
k8s.kuboard.cn/ingress: 'false'
k8s.kuboard.cn/service: ClusterIP
k8s.kuboard.cn/workload: nginx-deployment
kubectl.kubernetes.io/last-applied-configuration: >
{"apiVersion":"apps/v1","kind":"Deployment","metadata":{"annotations":{},"labels":{"app":"nginx"},"name":"nginx-deployment","namespace":"default"},"spec":{"replicas":1,"selector":{"matchLabels":{"app":"nginx"}},"template":{"metadata":{"labels":{"app":"nginx"}},"spec":{"containers":[{"image":"nginx:1.7.9","name":"nginx"}]}}}}
creationTimestamp: '2020-05-13T13:53:22Z'
generation: 2
labels:
app: nginx
name: nginx-deployment
namespace: default
resourceVersion: '165068'
selfLink: /apis/apps/v1/namespaces/default/deployments/nginx-deployment
uid: cf76662d-4b76-49b3-95c0-0912c0d175bd
spec:
progressDeadlineSeconds: 600
replicas: 1
revisionHistoryLimit: 10
selector:
matchLabels:
app: nginx
strategy:
rollingUpdate:
maxSurge: 25%
maxUnavailable: 25%
type: RollingUpdate
template:
metadata:
labels:
app: nginx
spec:
containers:
- image: 'nginx:1.7.9'
imagePullPolicy: IfNotPresent
name: nginx
terminationMessagePath: /dev/termination-log
terminationMessagePolicy: File
dnsPolicy: ClusterFirst
restartPolicy: Always
schedulerName: default-scheduler
terminationGracePeriodSeconds: 30
status:
availableReplicas: 1
conditions:
- lastTransitionTime: '2020-05-18T11:46:29Z'
lastUpdateTime: '2020-05-18T11:46:29Z'
message: Deployment has minimum availability.
reason: MinimumReplicasAvailable
status: 'True'
type: Available
- lastTransitionTime: '2020-05-13T13:53:22Z'
lastUpdateTime: '2020-05-18T12:07:38Z'
message: ReplicaSet "nginx-deployment-564949df9d" has successfully progressed.
reason: NewReplicaSetAvailable
status: 'True'
type: Progressing
observedGeneration: 2
readyReplicas: 1
replicas: 1
updatedReplicas: 1
---
apiVersion: v1
kind: Service
metadata:
annotations:
k8s.kuboard.cn/workload: nginx-deployment
creationTimestamp: '2020-05-18T12:13:31Z'
labels:
app: nginx
name: nginx-deployment
namespace: default
resourceVersion: '167877'
selfLink: /api/v1/namespaces/default/services/nginx-deployment
uid: f00605fb-d534-4eb0-a72a-86a4128aa998
spec:
clusterIP: 10.99.104.165
ports:
- name: hcwdid
port: 80
protocol: TCP
targetPort: 80
selector:
app: nginx
sessionAffinity: None
type: ClusterIP
---
apiVersion: networking.k8s.io/v1beta1
kind: Ingress
metadata:
annotations:
kubectl.kubernetes.io/last-applied-configuration: >
{"apiVersion":"networking.k8s.io/v1beta1","kind":"Ingress","metadata":{"annotations":{"kubernetes.io/ingress.class":"nginx"},"name":"nginx-deployment","namespace":"default"},"spec":{"rules":[{"host":"a.demo.kuboard.cn","http":{"paths":[{"backend":{"serviceName":"nginx-deployment","servicePort":80},"path":"/"}]}}]}}
kubernetes.io/ingress.class: nginx
creationTimestamp: '2020-05-18T12:36:23Z'
generation: 1
name: nginx-deployment
namespace: default
resourceVersion: '168334'
selfLink: >-
/apis/networking.k8s.io/v1beta1/namespaces/default/ingresses/nginx-deployment
uid: 67bb41ae-e95e-4adc-91dd-b3d4738c4bad
spec:
rules:
- host: a.demo.kuboard.cn
http:
paths:
- backend:
serviceName: nginx-deployment
servicePort: 80
path: /
status:
loadBalancer:
ingress:
- hostname: localhost

View File

@ -0,0 +1,176 @@
---
apiVersion: apps/v1
kind: Deployment
metadata:
annotations:
deployment.kubernetes.io/revision: '1'
kubectl.kubernetes.io/last-applied-configuration: >
{"apiVersion":"apps/v1","kind":"Deployment","metadata":{"annotations":{},"labels":{"app.kubernetes.io/component":"controller","app.kubernetes.io/instance":"ingress-nginx","app.kubernetes.io/managed-by":"Helm","app.kubernetes.io/name":"ingress-nginx","app.kubernetes.io/version":"0.32.0","helm.sh/chart":"ingress-nginx-2.0.3"},"name":"ingress-nginx-controller","namespace":"ingress-nginx"},"spec":{"minReadySeconds":0,"revisionHistoryLimit":10,"selector":{"matchLabels":{"app.kubernetes.io/component":"controller","app.kubernetes.io/instance":"ingress-nginx","app.kubernetes.io/name":"ingress-nginx"}},"template":{"metadata":{"labels":{"app.kubernetes.io/component":"controller","app.kubernetes.io/instance":"ingress-nginx","app.kubernetes.io/name":"ingress-nginx"}},"spec":{"containers":[{"args":["/nginx-ingress-controller","--publish-service=ingress-nginx/ingress-nginx-controller","--election-id=ingress-controller-leader","--ingress-class=nginx","--configmap=ingress-nginx/ingress-nginx-controller","--validating-webhook=:8443","--validating-webhook-certificate=/usr/local/certificates/cert","--validating-webhook-key=/usr/local/certificates/key"],"env":[{"name":"POD_NAME","valueFrom":{"fieldRef":{"fieldPath":"metadata.name"}}},{"name":"POD_NAMESPACE","valueFrom":{"fieldRef":{"fieldPath":"metadata.namespace"}}}],"image":"quay.io/kubernetes-ingress-controller/nginx-ingress-controller:0.32.0","imagePullPolicy":"IfNotPresent","lifecycle":{"preStop":{"exec":{"command":["/wait-shutdown"]}}},"livenessProbe":{"failureThreshold":3,"httpGet":{"path":"/healthz","port":10254,"scheme":"HTTP"},"initialDelaySeconds":10,"periodSeconds":10,"successThreshold":1,"timeoutSeconds":1},"name":"controller","ports":[{"containerPort":80,"name":"http","protocol":"TCP"},{"containerPort":443,"name":"https","protocol":"TCP"},{"containerPort":8443,"name":"webhook","protocol":"TCP"}],"readinessProbe":{"failureThreshold":3,"httpGet":{"path":"/healthz","port":10254,"scheme":"HTTP"},"initialDelaySeconds":10,"periodSeconds":10,"successThreshold":1,"timeoutSeconds":1},"resources":{"requests":{"cpu":"100m","memory":"90Mi"}},"securityContext":{"allowPrivilegeEscalation":true,"capabilities":{"add":["NET_BIND_SERVICE"],"drop":["ALL"]},"runAsUser":101},"volumeMounts":[{"mountPath":"/usr/local/certificates/","name":"webhook-cert","readOnly":true}]}],"dnsPolicy":"ClusterFirst","serviceAccountName":"ingress-nginx","terminationGracePeriodSeconds":300,"volumes":[{"name":"webhook-cert","secret":{"secretName":"ingress-nginx-admission"}}]}}}}
creationTimestamp: '2020-05-13T14:22:12Z'
generation: 1
labels:
app.kubernetes.io/component: controller
app.kubernetes.io/instance: ingress-nginx
app.kubernetes.io/managed-by: Helm
app.kubernetes.io/name: ingress-nginx
app.kubernetes.io/version: 0.32.0
helm.sh/chart: ingress-nginx-2.0.3
name: ingress-nginx-controller
namespace: ingress-nginx
resourceVersion: '50463'
selfLink: /apis/apps/v1/namespaces/ingress-nginx/deployments/ingress-nginx-controller
uid: 8baad61e-27ea-4237-8aa5-f7ac289bbe65
spec:
progressDeadlineSeconds: 600
replicas: 1
revisionHistoryLimit: 10
selector:
matchLabels:
app.kubernetes.io/component: controller
app.kubernetes.io/instance: ingress-nginx
app.kubernetes.io/name: ingress-nginx
strategy:
rollingUpdate:
maxSurge: 25%
maxUnavailable: 25%
type: RollingUpdate
template:
metadata:
creationTimestamp: null
labels:
app.kubernetes.io/component: controller
app.kubernetes.io/instance: ingress-nginx
app.kubernetes.io/name: ingress-nginx
spec:
containers:
- args:
- /nginx-ingress-controller
- '--publish-service=ingress-nginx/ingress-nginx-controller'
- '--election-id=ingress-controller-leader'
- '--ingress-class=nginx'
- '--configmap=ingress-nginx/ingress-nginx-controller'
- '--validating-webhook=:8443'
- '--validating-webhook-certificate=/usr/local/certificates/cert'
- '--validating-webhook-key=/usr/local/certificates/key'
env:
- name: POD_NAME
valueFrom:
fieldRef:
apiVersion: v1
fieldPath: metadata.name
- name: POD_NAMESPACE
valueFrom:
fieldRef:
apiVersion: v1
fieldPath: metadata.namespace
image: >-
quay.io/kubernetes-ingress-controller/nginx-ingress-controller:0.32.0
imagePullPolicy: IfNotPresent
lifecycle:
preStop:
exec:
command:
- /wait-shutdown
livenessProbe:
failureThreshold: 3
httpGet:
path: /healthz
port: 10254
scheme: HTTP
initialDelaySeconds: 10
periodSeconds: 10
successThreshold: 1
timeoutSeconds: 1
name: controller
ports:
- containerPort: 80
name: http
protocol: TCP
- containerPort: 443
name: https
protocol: TCP
- containerPort: 8443
name: webhook
protocol: TCP
readinessProbe:
failureThreshold: 3
httpGet:
path: /healthz
port: 10254
scheme: HTTP
initialDelaySeconds: 10
periodSeconds: 10
successThreshold: 1
timeoutSeconds: 1
resources:
requests:
cpu: 100m
memory: 90Mi
securityContext:
allowPrivilegeEscalation: true
capabilities:
add:
- NET_BIND_SERVICE
drop:
- ALL
runAsUser: 101
terminationMessagePath: /dev/termination-log
terminationMessagePolicy: File
volumeMounts:
- mountPath: /usr/local/certificates/
name: webhook-cert
readOnly: true
dnsPolicy: ClusterFirst
restartPolicy: Always
schedulerName: default-scheduler
securityContext: {}
serviceAccount: ingress-nginx
serviceAccountName: ingress-nginx
terminationGracePeriodSeconds: 300
volumes:
- name: webhook-cert
secret:
defaultMode: 420
secretName: ingress-nginx-admission
---
apiVersion: v1
kind: Service
metadata:
annotations:
kubectl.kubernetes.io/last-applied-configuration: >
{"apiVersion":"v1","kind":"Service","metadata":{"annotations":{},"labels":{"app.kubernetes.io/component":"controller","app.kubernetes.io/instance":"ingress-nginx","app.kubernetes.io/managed-by":"Helm","app.kubernetes.io/name":"ingress-nginx","app.kubernetes.io/version":"0.32.0","helm.sh/chart":"ingress-nginx-2.0.3"},"name":"ingress-nginx-controller","namespace":"ingress-nginx"},"spec":{"externalTrafficPolicy":"Local","ports":[{"name":"http","port":80,"protocol":"TCP","targetPort":"http"},{"name":"https","port":443,"protocol":"TCP","targetPort":"https"}],"selector":{"app.kubernetes.io/component":"controller","app.kubernetes.io/instance":"ingress-nginx","app.kubernetes.io/name":"ingress-nginx"},"type":"LoadBalancer"}}
creationTimestamp: '2020-05-13T14:22:12Z'
labels:
app.kubernetes.io/component: controller
app.kubernetes.io/instance: ingress-nginx
app.kubernetes.io/managed-by: Helm
app.kubernetes.io/name: ingress-nginx
app.kubernetes.io/version: 0.32.0
helm.sh/chart: ingress-nginx-2.0.3
name: ingress-nginx-controller
namespace: ingress-nginx
resourceVersion: '50411'
selfLink: /api/v1/namespaces/ingress-nginx/services/ingress-nginx-controller
uid: 7ad2c0d7-6fbb-437e-a6e5-a60a6de27db6
spec:
clusterIP: 10.104.20.134
externalTrafficPolicy: Local
healthCheckNodePort: 32155
ports:
- name: http
nodePort: 32412
port: 80
protocol: TCP
targetPort: http
- name: https
nodePort: 31845
port: 443
protocol: TCP
targetPort: https
selector:
app.kubernetes.io/component: controller
app.kubernetes.io/instance: ingress-nginx
app.kubernetes.io/name: ingress-nginx
sessionAffinity: None
type: LoadBalancer

View File

@ -0,0 +1,42 @@
apiVersion: apps/v1
kind: StatefulSet
metadata:
name: config-center
namespace: antucd
spec:
serviceName: "config-center"
selector:
matchLabels:
app: config-center
replicas: 1
template:
metadata:
labels:
app: config-center
spec:
imagePullSecrets:
- name: $SECRET_NAME
containers:
- image: $IMAGE_NAME
name: config-center
imagePullPolicy: Always
ports:
- containerPort: 3301
---
apiVersion: v1
kind: Service
metadata:
name: config-center
namespace: antucd
labels:
app: config-center
spec:
type: NodePort
ports:
- port: 3301
name: config-center
targetPort: 3301
selector:
app: config-center

View File

@ -13,6 +13,47 @@ Kuboard v2.0.x 的更新说明
了解 [从Kuboard v1.0.x 升级到 Kuboard v2.0.x](./upgrade-1-2.html)
## v2.0.0-beta.3.b
**发布日期**
2020年5月31日
**新特性**
* 套件功能重构
* 使用 CRD 存储套件相关信息
* 优化套件安装过程的体验
* 将套件编辑器与套件安装过程整合,降低套件开发的难度(开发一个全新套件的门槛仍然比较高,但是基于已有套件改造和提升的门槛变得很低)
* 资源监控套件
* 基于 Prometheus/Grafana 提供资源监控套件
* i18n
* 可以在设置中修改语言偏好
* 提高 i18n 的完成度(工作负载查看页等)
* 日志/终端界面
* 修改前景色,以适应不同的光线环境
**优化**
* 节点详情页
* Pod 列表排序
* 完善加载出错时的错误提示信息
**BUG修正**
* Pod 详情页未显示面包屑
## v2.0.0-beta.2.c
**发布日期**
2020年5月24日
**BUG修正**
* 列表页中,删除按钮可能对应到错误的条目
## v2.0.0-beta.2
**发布日期**

8471
yarn-error.log Normal file

File diff suppressed because it is too large Load Diff

5594
yarn.lock

File diff suppressed because it is too large Load Diff