Compare commits
6 Commits
master
...
controller
Author | SHA1 | Date |
---|---|---|
Chris Lu | dcd6f8e056 | |
Chris Lu | b1706bd2ed | |
Chris Lu | 9859b72c74 | |
Chris Lu | b346842ea3 | |
Chris Lu | 15b319e331 | |
Chris Lu | 45d81b6928 |
|
@ -1,70 +0,0 @@
|
||||||
name: Go
|
|
||||||
|
|
||||||
on:
|
|
||||||
push:
|
|
||||||
branches: [ master ]
|
|
||||||
pull_request:
|
|
||||||
branches: [ master ]
|
|
||||||
|
|
||||||
jobs:
|
|
||||||
lint:
|
|
||||||
name: Lint
|
|
||||||
runs-on: ubuntu-latest
|
|
||||||
steps:
|
|
||||||
- uses: actions/checkout@v2
|
|
||||||
- name: golangci-lint
|
|
||||||
uses: golangci/golangci-lint-action@v2
|
|
||||||
with:
|
|
||||||
# Required: the version of golangci-lint is required and must be specified without patch version: we always use the latest patch version.
|
|
||||||
version: v1.31
|
|
||||||
|
|
||||||
# Optional: working directory, useful for monorepos
|
|
||||||
# working-directory: somedir
|
|
||||||
|
|
||||||
# Optional: golangci-lint command line arguments.
|
|
||||||
# TODO: remove disabled
|
|
||||||
args: --timeout=10m -D errcheck -D deadcode -D unused
|
|
||||||
|
|
||||||
# Optional: show only new issues if it's a pull request. The default value is `false`.
|
|
||||||
# only-new-issues: true
|
|
||||||
|
|
||||||
build:
|
|
||||||
name: Build
|
|
||||||
runs-on: ubuntu-latest
|
|
||||||
steps:
|
|
||||||
|
|
||||||
- name: Set up Go 1.x
|
|
||||||
uses: actions/setup-go@v2
|
|
||||||
with:
|
|
||||||
go-version: ^1.13
|
|
||||||
|
|
||||||
- name: Check out code into the Go module directory
|
|
||||||
uses: actions/checkout@v2
|
|
||||||
|
|
||||||
- name: Get dependencies
|
|
||||||
run: |
|
|
||||||
go get -v -t -d ./...
|
|
||||||
if [ -f Gopkg.toml ]; then
|
|
||||||
curl https://raw.githubusercontent.com/golang/dep/master/install.sh | sh
|
|
||||||
dep ensure
|
|
||||||
fi
|
|
||||||
|
|
||||||
- name: Build
|
|
||||||
run: go build -v .
|
|
||||||
|
|
||||||
test:
|
|
||||||
name: Test
|
|
||||||
runs-on: ubuntu-latest
|
|
||||||
steps:
|
|
||||||
|
|
||||||
- name: Set up Go 1.x
|
|
||||||
uses: actions/setup-go@v2
|
|
||||||
with:
|
|
||||||
go-version: ^1.13
|
|
||||||
|
|
||||||
- name: Check out code into the Go module directory
|
|
||||||
uses: actions/checkout@v2
|
|
||||||
|
|
||||||
- name: Test
|
|
||||||
run: make test SHELL=/bin/bash
|
|
||||||
|
|
|
@ -1,74 +0,0 @@
|
||||||
# This is a basic workflow to help you get started with Actions
|
|
||||||
|
|
||||||
name: CI
|
|
||||||
|
|
||||||
# Controls when the action will run.
|
|
||||||
on:
|
|
||||||
# Triggers the workflow on push or pull request events but only for the master branch
|
|
||||||
push:
|
|
||||||
branches: [ master ]
|
|
||||||
pull_request:
|
|
||||||
branches: [ master ]
|
|
||||||
|
|
||||||
# Allows you to run this workflow manually from the Actions tab
|
|
||||||
workflow_dispatch:
|
|
||||||
|
|
||||||
# A workflow run is made up of one or more jobs that can run sequentially or in parallel
|
|
||||||
jobs:
|
|
||||||
# This workflow contains a single job called "build"
|
|
||||||
build:
|
|
||||||
# The type of runner that the job will run on
|
|
||||||
runs-on: ubuntu-latest
|
|
||||||
|
|
||||||
# Steps represent a sequence of tasks that will be executed as part of the job
|
|
||||||
steps:
|
|
||||||
# Checks-out your repository under $GITHUB_WORKSPACE, so your job can access it
|
|
||||||
- uses: actions/checkout@v2
|
|
||||||
|
|
||||||
# Runs a single command using the runners shell
|
|
||||||
- name: Run a one-line script
|
|
||||||
run: echo Hello, world!
|
|
||||||
|
|
||||||
- name: Build and push Docker images
|
|
||||||
# You may pin to the exact commit or the version.
|
|
||||||
# uses: docker/build-push-action@e1b7f96249f2e4c8e4ac1519b9608c0d48944a1f
|
|
||||||
uses: docker/build-push-action@v2.4.0
|
|
||||||
with:
|
|
||||||
# List of extra privileged entitlement (eg. network.host,security.insecure)
|
|
||||||
allow: # optional
|
|
||||||
# List of build-time variables
|
|
||||||
build-args: # optional
|
|
||||||
# Builder instance
|
|
||||||
builder: # optional
|
|
||||||
# Build's context is the set of files located in the specified PATH or URL
|
|
||||||
context: # optional
|
|
||||||
# Path to the Dockerfile
|
|
||||||
file: # optional
|
|
||||||
# List of metadata for an image
|
|
||||||
labels: # optional
|
|
||||||
# Load is a shorthand for --output=type=docker
|
|
||||||
load: # optional, default is false
|
|
||||||
# Set the networking mode for the RUN instructions during build
|
|
||||||
network: # optional
|
|
||||||
# Do not use cache when building the image
|
|
||||||
no-cache: # optional, default is false
|
|
||||||
# List of output destinations (format: type=local,dest=path)
|
|
||||||
outputs: # optional
|
|
||||||
# List of target platforms for build
|
|
||||||
platforms: # optional
|
|
||||||
# Always attempt to pull a newer version of the image
|
|
||||||
pull: # optional, default is false
|
|
||||||
# Push is a shorthand for --output=type=registry
|
|
||||||
push: # optional, default is false
|
|
||||||
# List of secrets to expose to the build (eg. key=string, GIT_AUTH_TOKEN=mytoken)
|
|
||||||
secrets: # optional
|
|
||||||
# List of secret files to expose to the build (eg. key=filename, MY_SECRET=./secret.txt)
|
|
||||||
secret-files: # optional
|
|
||||||
# List of SSH agent socket or keys to expose to the build
|
|
||||||
ssh: # optional
|
|
||||||
# List of tags
|
|
||||||
tags: # optional
|
|
||||||
# Sets the target stage to build
|
|
||||||
target: # optional
|
|
||||||
# GitHub Token used to authenticate against a repository for Git context
|
|
||||||
github-token: # optional, default is ${{ github.token }}
|
|
|
@ -1,35 +0,0 @@
|
||||||
name: Verify
|
|
||||||
|
|
||||||
on:
|
|
||||||
push:
|
|
||||||
branches: [ master ]
|
|
||||||
pull_request:
|
|
||||||
branches: [ master ]
|
|
||||||
|
|
||||||
jobs:
|
|
||||||
verify:
|
|
||||||
name: Verify
|
|
||||||
runs-on: ubuntu-latest
|
|
||||||
steps:
|
|
||||||
|
|
||||||
- name: Set up Go 1.x
|
|
||||||
uses: actions/setup-go@v2
|
|
||||||
with:
|
|
||||||
go-version: ^1.13
|
|
||||||
|
|
||||||
- name: Check out code into the Go module directory
|
|
||||||
uses: actions/checkout@v2
|
|
||||||
|
|
||||||
- name: Get dependencies
|
|
||||||
run: |
|
|
||||||
go get -v -t -d ./...
|
|
||||||
if [ -f Gopkg.toml ]; then
|
|
||||||
curl https://raw.githubusercontent.com/golang/dep/master/install.sh | sh
|
|
||||||
dep ensure
|
|
||||||
fi
|
|
||||||
|
|
||||||
- name: Verify Codegen
|
|
||||||
run: hack/verify-codegen.sh
|
|
||||||
|
|
||||||
- name: Verify Manifests
|
|
||||||
run: hack/verify-manifests.sh
|
|
|
@ -79,6 +79,3 @@ tags
|
||||||
### GoLand ###
|
### GoLand ###
|
||||||
.idea
|
.idea
|
||||||
bin/*
|
bin/*
|
||||||
|
|
||||||
## asdf
|
|
||||||
.tool-versions
|
|
||||||
|
|
11
.travis.yml
11
.travis.yml
|
@ -1,11 +0,0 @@
|
||||||
sudo: false
|
|
||||||
language: go
|
|
||||||
go:
|
|
||||||
- 1.16.x
|
|
||||||
|
|
||||||
before_install:
|
|
||||||
- export PATH=/home/travis/gopath/bin:$PATH
|
|
||||||
|
|
||||||
install:
|
|
||||||
- export CGO_ENABLED="0"
|
|
||||||
- go env
|
|
|
@ -1,5 +1,5 @@
|
||||||
# Build the manager binary
|
# Build the manager binary
|
||||||
FROM golang:1.16 as builder
|
FROM golang:1.13 as builder
|
||||||
|
|
||||||
WORKDIR /workspace
|
WORKDIR /workspace
|
||||||
# Copy the Go Modules manifests
|
# Copy the Go Modules manifests
|
||||||
|
|
201
LICENSE
201
LICENSE
|
@ -1,201 +0,0 @@
|
||||||
Apache License
|
|
||||||
Version 2.0, January 2004
|
|
||||||
http://www.apache.org/licenses/
|
|
||||||
|
|
||||||
TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION
|
|
||||||
|
|
||||||
1. Definitions.
|
|
||||||
|
|
||||||
"License" shall mean the terms and conditions for use, reproduction,
|
|
||||||
and distribution as defined by Sections 1 through 9 of this document.
|
|
||||||
|
|
||||||
"Licensor" shall mean the copyright owner or entity authorized by
|
|
||||||
the copyright owner that is granting the License.
|
|
||||||
|
|
||||||
"Legal Entity" shall mean the union of the acting entity and all
|
|
||||||
other entities that control, are controlled by, or are under common
|
|
||||||
control with that entity. For the purposes of this definition,
|
|
||||||
"control" means (i) the power, direct or indirect, to cause the
|
|
||||||
direction or management of such entity, whether by contract or
|
|
||||||
otherwise, or (ii) ownership of fifty percent (50%) or more of the
|
|
||||||
outstanding shares, or (iii) beneficial ownership of such entity.
|
|
||||||
|
|
||||||
"You" (or "Your") shall mean an individual or Legal Entity
|
|
||||||
exercising permissions granted by this License.
|
|
||||||
|
|
||||||
"Source" form shall mean the preferred form for making modifications,
|
|
||||||
including but not limited to software source code, documentation
|
|
||||||
source, and configuration files.
|
|
||||||
|
|
||||||
"Object" form shall mean any form resulting from mechanical
|
|
||||||
transformation or translation of a Source form, including but
|
|
||||||
not limited to compiled object code, generated documentation,
|
|
||||||
and conversions to other media types.
|
|
||||||
|
|
||||||
"Work" shall mean the work of authorship, whether in Source or
|
|
||||||
Object form, made available under the License, as indicated by a
|
|
||||||
copyright notice that is included in or attached to the work
|
|
||||||
(an example is provided in the Appendix below).
|
|
||||||
|
|
||||||
"Derivative Works" shall mean any work, whether in Source or Object
|
|
||||||
form, that is based on (or derived from) the Work and for which the
|
|
||||||
editorial revisions, annotations, elaborations, or other modifications
|
|
||||||
represent, as a whole, an original work of authorship. For the purposes
|
|
||||||
of this License, Derivative Works shall not include works that remain
|
|
||||||
separable from, or merely link (or bind by name) to the interfaces of,
|
|
||||||
the Work and Derivative Works thereof.
|
|
||||||
|
|
||||||
"Contribution" shall mean any work of authorship, including
|
|
||||||
the original version of the Work and any modifications or additions
|
|
||||||
to that Work or Derivative Works thereof, that is intentionally
|
|
||||||
submitted to Licensor for inclusion in the Work by the copyright owner
|
|
||||||
or by an individual or Legal Entity authorized to submit on behalf of
|
|
||||||
the copyright owner. For the purposes of this definition, "submitted"
|
|
||||||
means any form of electronic, verbal, or written communication sent
|
|
||||||
to the Licensor or its representatives, including but not limited to
|
|
||||||
communication on electronic mailing lists, source code control systems,
|
|
||||||
and issue tracking systems that are managed by, or on behalf of, the
|
|
||||||
Licensor for the purpose of discussing and improving the Work, but
|
|
||||||
excluding communication that is conspicuously marked or otherwise
|
|
||||||
designated in writing by the copyright owner as "Not a Contribution."
|
|
||||||
|
|
||||||
"Contributor" shall mean Licensor and any individual or Legal Entity
|
|
||||||
on behalf of whom a Contribution has been received by Licensor and
|
|
||||||
subsequently incorporated within the Work.
|
|
||||||
|
|
||||||
2. Grant of Copyright License. Subject to the terms and conditions of
|
|
||||||
this License, each Contributor hereby grants to You a perpetual,
|
|
||||||
worldwide, non-exclusive, no-charge, royalty-free, irrevocable
|
|
||||||
copyright license to reproduce, prepare Derivative Works of,
|
|
||||||
publicly display, publicly perform, sublicense, and distribute the
|
|
||||||
Work and such Derivative Works in Source or Object form.
|
|
||||||
|
|
||||||
3. Grant of Patent License. Subject to the terms and conditions of
|
|
||||||
this License, each Contributor hereby grants to You a perpetual,
|
|
||||||
worldwide, non-exclusive, no-charge, royalty-free, irrevocable
|
|
||||||
(except as stated in this section) patent license to make, have made,
|
|
||||||
use, offer to sell, sell, import, and otherwise transfer the Work,
|
|
||||||
where such license applies only to those patent claims licensable
|
|
||||||
by such Contributor that are necessarily infringed by their
|
|
||||||
Contribution(s) alone or by combination of their Contribution(s)
|
|
||||||
with the Work to which such Contribution(s) was submitted. If You
|
|
||||||
institute patent litigation against any entity (including a
|
|
||||||
cross-claim or counterclaim in a lawsuit) alleging that the Work
|
|
||||||
or a Contribution incorporated within the Work constitutes direct
|
|
||||||
or contributory patent infringement, then any patent licenses
|
|
||||||
granted to You under this License for that Work shall terminate
|
|
||||||
as of the date such litigation is filed.
|
|
||||||
|
|
||||||
4. Redistribution. You may reproduce and distribute copies of the
|
|
||||||
Work or Derivative Works thereof in any medium, with or without
|
|
||||||
modifications, and in Source or Object form, provided that You
|
|
||||||
meet the following conditions:
|
|
||||||
|
|
||||||
(a) You must give any other recipients of the Work or
|
|
||||||
Derivative Works a copy of this License; and
|
|
||||||
|
|
||||||
(b) You must cause any modified files to carry prominent notices
|
|
||||||
stating that You changed the files; and
|
|
||||||
|
|
||||||
(c) You must retain, in the Source form of any Derivative Works
|
|
||||||
that You distribute, all copyright, patent, trademark, and
|
|
||||||
attribution notices from the Source form of the Work,
|
|
||||||
excluding those notices that do not pertain to any part of
|
|
||||||
the Derivative Works; and
|
|
||||||
|
|
||||||
(d) If the Work includes a "NOTICE" text file as part of its
|
|
||||||
distribution, then any Derivative Works that You distribute must
|
|
||||||
include a readable copy of the attribution notices contained
|
|
||||||
within such NOTICE file, excluding those notices that do not
|
|
||||||
pertain to any part of the Derivative Works, in at least one
|
|
||||||
of the following places: within a NOTICE text file distributed
|
|
||||||
as part of the Derivative Works; within the Source form or
|
|
||||||
documentation, if provided along with the Derivative Works; or,
|
|
||||||
within a display generated by the Derivative Works, if and
|
|
||||||
wherever such third-party notices normally appear. The contents
|
|
||||||
of the NOTICE file are for informational purposes only and
|
|
||||||
do not modify the License. You may add Your own attribution
|
|
||||||
notices within Derivative Works that You distribute, alongside
|
|
||||||
or as an addendum to the NOTICE text from the Work, provided
|
|
||||||
that such additional attribution notices cannot be construed
|
|
||||||
as modifying the License.
|
|
||||||
|
|
||||||
You may add Your own copyright statement to Your modifications and
|
|
||||||
may provide additional or different license terms and conditions
|
|
||||||
for use, reproduction, or distribution of Your modifications, or
|
|
||||||
for any such Derivative Works as a whole, provided Your use,
|
|
||||||
reproduction, and distribution of the Work otherwise complies with
|
|
||||||
the conditions stated in this License.
|
|
||||||
|
|
||||||
5. Submission of Contributions. Unless You explicitly state otherwise,
|
|
||||||
any Contribution intentionally submitted for inclusion in the Work
|
|
||||||
by You to the Licensor shall be under the terms and conditions of
|
|
||||||
this License, without any additional terms or conditions.
|
|
||||||
Notwithstanding the above, nothing herein shall supersede or modify
|
|
||||||
the terms of any separate license agreement you may have executed
|
|
||||||
with Licensor regarding such Contributions.
|
|
||||||
|
|
||||||
6. Trademarks. This License does not grant permission to use the trade
|
|
||||||
names, trademarks, service marks, or product names of the Licensor,
|
|
||||||
except as required for reasonable and customary use in describing the
|
|
||||||
origin of the Work and reproducing the content of the NOTICE file.
|
|
||||||
|
|
||||||
7. Disclaimer of Warranty. Unless required by applicable law or
|
|
||||||
agreed to in writing, Licensor provides the Work (and each
|
|
||||||
Contributor provides its Contributions) on an "AS IS" BASIS,
|
|
||||||
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
|
|
||||||
implied, including, without limitation, any warranties or conditions
|
|
||||||
of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A
|
|
||||||
PARTICULAR PURPOSE. You are solely responsible for determining the
|
|
||||||
appropriateness of using or redistributing the Work and assume any
|
|
||||||
risks associated with Your exercise of permissions under this License.
|
|
||||||
|
|
||||||
8. Limitation of Liability. In no event and under no legal theory,
|
|
||||||
whether in tort (including negligence), contract, or otherwise,
|
|
||||||
unless required by applicable law (such as deliberate and grossly
|
|
||||||
negligent acts) or agreed to in writing, shall any Contributor be
|
|
||||||
liable to You for damages, including any direct, indirect, special,
|
|
||||||
incidental, or consequential damages of any character arising as a
|
|
||||||
result of this License or out of the use or inability to use the
|
|
||||||
Work (including but not limited to damages for loss of goodwill,
|
|
||||||
work stoppage, computer failure or malfunction, or any and all
|
|
||||||
other commercial damages or losses), even if such Contributor
|
|
||||||
has been advised of the possibility of such damages.
|
|
||||||
|
|
||||||
9. Accepting Warranty or Additional Liability. While redistributing
|
|
||||||
the Work or Derivative Works thereof, You may choose to offer,
|
|
||||||
and charge a fee for, acceptance of support, warranty, indemnity,
|
|
||||||
or other liability obligations and/or rights consistent with this
|
|
||||||
License. However, in accepting such obligations, You may act only
|
|
||||||
on Your own behalf and on Your sole responsibility, not on behalf
|
|
||||||
of any other Contributor, and only if You agree to indemnify,
|
|
||||||
defend, and hold each Contributor harmless for any liability
|
|
||||||
incurred by, or claims asserted against, such Contributor by reason
|
|
||||||
of your accepting any such warranty or additional liability.
|
|
||||||
|
|
||||||
END OF TERMS AND CONDITIONS
|
|
||||||
|
|
||||||
APPENDIX: How to apply the Apache License to your work.
|
|
||||||
|
|
||||||
To apply the Apache License to your work, attach the following
|
|
||||||
boilerplate notice, with the fields enclosed by brackets "{}"
|
|
||||||
replaced with your own identifying information. (Don't include
|
|
||||||
the brackets!) The text should be enclosed in the appropriate
|
|
||||||
comment syntax for the file format. We also recommend that a
|
|
||||||
file or class name and description of purpose be included on the
|
|
||||||
same "printed page" as the copyright notice for easier
|
|
||||||
identification within third-party archives.
|
|
||||||
|
|
||||||
Copyright 2016 Chris Lu
|
|
||||||
|
|
||||||
Licensed under the Apache License, Version 2.0 (the "License");
|
|
||||||
you may not use this file except in compliance with the License.
|
|
||||||
You may obtain a copy of the License at
|
|
||||||
|
|
||||||
http://www.apache.org/licenses/LICENSE-2.0
|
|
||||||
|
|
||||||
Unless required by applicable law or agreed to in writing, software
|
|
||||||
distributed under the License is distributed on an "AS IS" BASIS,
|
|
||||||
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
|
||||||
See the License for the specific language governing permissions and
|
|
||||||
limitations under the License.
|
|
17
Makefile
17
Makefile
|
@ -1,5 +1,5 @@
|
||||||
# Current Operator version
|
# Current Operator version
|
||||||
VERSION ?= v0.0.2
|
VERSION ?= 0.0.1
|
||||||
# Default bundle image tag
|
# Default bundle image tag
|
||||||
BUNDLE_IMG ?= controller-bundle:$(VERSION)
|
BUNDLE_IMG ?= controller-bundle:$(VERSION)
|
||||||
# Options for 'bundle-build'
|
# Options for 'bundle-build'
|
||||||
|
@ -12,9 +12,9 @@ endif
|
||||||
BUNDLE_METADATA_OPTS ?= $(BUNDLE_CHANNELS) $(BUNDLE_DEFAULT_CHANNEL)
|
BUNDLE_METADATA_OPTS ?= $(BUNDLE_CHANNELS) $(BUNDLE_DEFAULT_CHANNEL)
|
||||||
|
|
||||||
# Image URL to use all building/pushing image targets
|
# Image URL to use all building/pushing image targets
|
||||||
IMG ?= gfxlabs/seaweedfs-operator:$(VERSION)
|
IMG ?= seaweedfs/operator:latest
|
||||||
# Produce CRDs that work back to Kubernetes 1.11 (no version conversion)
|
# Produce CRDs that work back to Kubernetes 1.11 (no version conversion)
|
||||||
CRD_OPTIONS ?= "crd:trivialVersions=true,preserveUnknownFields=false"
|
CRD_OPTIONS ?= "crd:trivialVersions=true"
|
||||||
|
|
||||||
# Get the currently used golang install path (in GOPATH/bin, unless GOBIN is set)
|
# Get the currently used golang install path (in GOPATH/bin, unless GOBIN is set)
|
||||||
ifeq (,$(shell go env GOBIN))
|
ifeq (,$(shell go env GOBIN))
|
||||||
|
@ -40,10 +40,6 @@ manager: generate fmt vet
|
||||||
run: generate fmt vet manifests
|
run: generate fmt vet manifests
|
||||||
go run ./main.go
|
go run ./main.go
|
||||||
|
|
||||||
debug: generate fmt vet manifests
|
|
||||||
go build -gcflags="all=-N -l" ./main.go
|
|
||||||
ENABLE_WEBHOOKS=false dlv --listen=:2345 --headless=true --api-version=2 --accept-multiclient exec main
|
|
||||||
|
|
||||||
# Install CRDs into a cluster
|
# Install CRDs into a cluster
|
||||||
install: manifests kustomize
|
install: manifests kustomize
|
||||||
$(KUSTOMIZE) build config/crd | kubectl apply -f -
|
$(KUSTOMIZE) build config/crd | kubectl apply -f -
|
||||||
|
@ -57,10 +53,6 @@ deploy: manifests kustomize
|
||||||
cd config/manager && $(KUSTOMIZE) edit set image controller=${IMG}
|
cd config/manager && $(KUSTOMIZE) edit set image controller=${IMG}
|
||||||
$(KUSTOMIZE) build config/default | kubectl apply -f -
|
$(KUSTOMIZE) build config/default | kubectl apply -f -
|
||||||
|
|
||||||
# clean up crd & controller in the configured Kubernetes cluster in ~/.kube/config
|
|
||||||
delete: manifests kustomize
|
|
||||||
$(KUSTOMIZE) build config/default | kubectl delete -f -
|
|
||||||
|
|
||||||
# Generate manifests e.g. CRD, RBAC etc.
|
# Generate manifests e.g. CRD, RBAC etc.
|
||||||
manifests: controller-gen
|
manifests: controller-gen
|
||||||
$(CONTROLLER_GEN) $(CRD_OPTIONS) rbac:roleName=manager-role webhook paths="./..." output:crd:artifacts:config=config/crd/bases
|
$(CONTROLLER_GEN) $(CRD_OPTIONS) rbac:roleName=manager-role webhook paths="./..." output:crd:artifacts:config=config/crd/bases
|
||||||
|
@ -78,8 +70,7 @@ generate: controller-gen
|
||||||
$(CONTROLLER_GEN) object:headerFile="hack/boilerplate.go.txt" paths="./..."
|
$(CONTROLLER_GEN) object:headerFile="hack/boilerplate.go.txt" paths="./..."
|
||||||
|
|
||||||
# Build the docker image
|
# Build the docker image
|
||||||
docker-build: # test
|
docker-build: test
|
||||||
echo ${IMG}
|
|
||||||
docker build . -t ${IMG}
|
docker build . -t ${IMG}
|
||||||
|
|
||||||
# Push the docker image
|
# Push the docker image
|
||||||
|
|
3
PROJECT
3
PROJECT
|
@ -1,10 +1,9 @@
|
||||||
domain: seaweedfs.com
|
domain: seaweedfs.com
|
||||||
layout: go.kubebuilder.io/v2
|
layout: go.kubebuilder.io/v2
|
||||||
repo: github.com/seaweedfs/seaweedfs-operator
|
repo: github.com/seaweedfs/seaweedfs-operator
|
||||||
projectName: seaweedfs-operator
|
|
||||||
resources:
|
resources:
|
||||||
- group: seaweed
|
- group: seaweed
|
||||||
kind: Seaweed
|
kind: Master
|
||||||
version: v1
|
version: v1
|
||||||
version: 3-alpha
|
version: 3-alpha
|
||||||
plugins:
|
plugins:
|
||||||
|
|
146
README.md
146
README.md
|
@ -1,110 +1,7 @@
|
||||||
[![Build Status](https://travis-ci.com/seaweedfs/seaweedfs-operator.svg?branch=master)](https://travis-ci.com/github/seaweedfs/seaweedfs-operator)
|
|
||||||
|
|
||||||
# SeaweedFS Operator
|
# SeaweedFS Operator
|
||||||
|
|
||||||
This [Kubernetes Operator](https://kubernetes.io/docs/concepts/extend-kubernetes/operator/) is made to easily deploy SeaweedFS onto your Kubernetes-Cluster.
|
|
||||||
|
|
||||||
The difference to [seaweedfs-csi-driver](https://github.com/seaweedfs/seaweedfs-csi-driver) is that the infrastructure (SeaweedFS) itself runs on Kubernetes as well (Master, Filer, Volume-Servers) and can as such easily scale with it as you need. It is also by far more resilent to failures then a simple systemD service in regards to handling crashing services or accidental deletes.
|
|
||||||
|
|
||||||
By using `make deploy` it will deploy a Resource of type 'Seaweed' onto your current kubectl $KUBECONFIG target (the operator itself) which by default will do nothing unless you configurate it (see examples in config/samples/).
|
|
||||||
|
|
||||||
Goals:
|
|
||||||
- [x] Automatically deploy and manage a SeaweedFS cluster.
|
|
||||||
- [x] Ability to be managed by other Operators.
|
|
||||||
- [ ] Compability with [seaweedfs-csi-driver](https://github.com/seaweedfs/seaweedfs-csi-driver)
|
|
||||||
- [x] Auto rolling upgrade and restart.
|
|
||||||
- [x] Ingress for volume server, filer and S3, to support HDFS, REST filer, S3 API and cross-cluster replication.
|
|
||||||
- [ ] Support all major cloud Kubernetes: AWS, Google, Azure.
|
|
||||||
- [ ] Scheduled backup to cloud storage: S3, Google Cloud Storage , Azure.
|
|
||||||
- [ ] Put warm data to cloud storage tier: S3, Google Cloud Storage , Azure.
|
|
||||||
- [ ] Grafana dashboard.
|
|
||||||
|
|
||||||
## Installation
|
## Installation
|
||||||
|
|
||||||
This operator uses `kustomize` to deploy. The installation process will install one for you if you do not have one.
|
|
||||||
|
|
||||||
By default, the defaulting and validation webhooks are disabled. We strongly recommend that the webhooks be enabled.
|
|
||||||
|
|
||||||
First clone the repository:
|
|
||||||
|
|
||||||
```bash
|
|
||||||
$ git clone https://github.com/seaweedfs/seaweedfs-operator --depth=1
|
|
||||||
```
|
|
||||||
|
|
||||||
To deploy the operator with webhooks enabled, make sure you have installed the `cert-manager`(Installation docs: https://cert-manager.io/docs/installation/) in your cluster, then follow the instructions in the `config/default/kustomization.yaml` file to uncomment the components you need.
|
|
||||||
|
|
||||||
Lastly, change the value of `ENABLE_WEBHOOKS` to `"true"` in `config/manager/manager.yaml`
|
|
||||||
|
|
||||||
Afterwards fire up:
|
|
||||||
```bash
|
|
||||||
$ make install
|
|
||||||
```
|
|
||||||
|
|
||||||
Then run the command to deploy the operator into your cluster:
|
|
||||||
|
|
||||||
```bash
|
|
||||||
$ make deploy
|
|
||||||
```
|
|
||||||
|
|
||||||
Verify if it was correctly deployed with:
|
|
||||||
```bash
|
|
||||||
$ kubectl get pods --all-namespaces
|
|
||||||
```
|
|
||||||
|
|
||||||
Which may return:
|
|
||||||
```bash
|
|
||||||
NAMESPACE NAME READY STATUS RESTARTS AGE
|
|
||||||
kube-system coredns-f9fd979d6-68p4c 1/1 Running 0 34m
|
|
||||||
kube-system coredns-f9fd979d6-x992t 1/1 Running 0 34m
|
|
||||||
kube-system etcd-kind-control-plane 1/1 Running 0 34m
|
|
||||||
kube-system kindnet-rp7wr 1/1 Running 0 34m
|
|
||||||
kube-system kube-apiserver-kind-control-plane 1/1 Running 0 34m
|
|
||||||
kube-system kube-controller-manager-kind-control-plane 1/1 Running 0 34m
|
|
||||||
kube-system kube-proxy-dqfg2 1/1 Running 0 34m
|
|
||||||
kube-system kube-scheduler-kind-control-plane 1/1 Running 0 34m
|
|
||||||
local-path-storage local-path-provisioner-78776bfc44-7zvxx 1/1 Running 0 34m
|
|
||||||
seaweedfs-operator-system seaweedfs-operator-controller-manager-54cc768f4c-cwz2k 2/2 Running 0 34m
|
|
||||||
```
|
|
||||||
|
|
||||||
See the next section for example usage - **__at this point you only deployed the Operator itself!__**
|
|
||||||
|
|
||||||
### You need to also deploy an configuration to get it running (see next section)!
|
|
||||||
|
|
||||||
|
|
||||||
## Configuration Examples
|
|
||||||
|
|
||||||
- Please send us your use-cases / example configs ... this is currently empty (needs to be written)
|
|
||||||
- For now see: https://github.com/seaweedfs/seaweedfs-operator/blob/master/config/samples/seaweed_v1_seaweed.yaml
|
|
||||||
````
|
|
||||||
apiVersion: seaweed.seaweedfs.com/v1
|
|
||||||
kind: Seaweed
|
|
||||||
metadata:
|
|
||||||
name: seaweed1
|
|
||||||
namespace: default
|
|
||||||
spec:
|
|
||||||
# Add fields here
|
|
||||||
image: chrislusf/seaweedfs:2.96
|
|
||||||
volumeServerDiskCount: 1
|
|
||||||
hostSuffix: seaweed.abcdefg.com
|
|
||||||
master:
|
|
||||||
replicas: 3
|
|
||||||
volumeSizeLimitMB: 1024
|
|
||||||
volume:
|
|
||||||
replicas: 1
|
|
||||||
requests:
|
|
||||||
storage: 2Gi
|
|
||||||
filer:
|
|
||||||
replicas: 2
|
|
||||||
config: |
|
|
||||||
[leveldb2]
|
|
||||||
enabled = true
|
|
||||||
dir = "/data/filerldb2"
|
|
||||||
````
|
|
||||||
|
|
||||||
|
|
||||||
## Maintenance and Uninstallation
|
|
||||||
- TBD
|
|
||||||
|
|
||||||
## Development
|
## Development
|
||||||
|
|
||||||
Follow the instructions in https://sdk.operatorframework.io/docs/building-operators/golang/quickstart/
|
Follow the instructions in https://sdk.operatorframework.io/docs/building-operators/golang/quickstart/
|
||||||
|
@ -113,42 +10,6 @@ Follow the instructions in https://sdk.operatorframework.io/docs/building-operat
|
||||||
$ git clone https://github.com/seaweedfs/seaweedfs-operator
|
$ git clone https://github.com/seaweedfs/seaweedfs-operator
|
||||||
$ cd seaweedfs-operator
|
$ cd seaweedfs-operator
|
||||||
|
|
||||||
# register the CRD with the Kubernetes
|
|
||||||
$ make deploy
|
|
||||||
|
|
||||||
# build the operator image
|
|
||||||
$ make docker-build
|
|
||||||
|
|
||||||
# load the image into Kind cluster
|
|
||||||
$ kind load docker-image chrislusf/seaweedfs-operator:v0.0.1
|
|
||||||
|
|
||||||
# From another terminal in the same directory
|
|
||||||
$ kubectl apply -f config/samples/seaweed_v1_seaweed.yaml
|
|
||||||
|
|
||||||
```
|
|
||||||
|
|
||||||
### Update the operator
|
|
||||||
```
|
|
||||||
# delete the existing operator
|
|
||||||
$ kubectl delete namespace seaweedfs-operator-system
|
|
||||||
|
|
||||||
# rebuild the operator image
|
|
||||||
$ make docker-build
|
|
||||||
|
|
||||||
# load the image into Kind cluster
|
|
||||||
$ kind load docker-image chrislusf/seaweedfs-operator:v0.0.1
|
|
||||||
|
|
||||||
# register the CRD with the Kubernetes
|
|
||||||
$ make deploy
|
|
||||||
|
|
||||||
```
|
|
||||||
|
|
||||||
### develop outside of k8s
|
|
||||||
|
|
||||||
```
|
|
||||||
$ git clone https://github.com/seaweedfs/seaweedfs-operator
|
|
||||||
$ cd seaweedfs-operator
|
|
||||||
|
|
||||||
# register the CRD with the Kubernetes
|
# register the CRD with the Kubernetes
|
||||||
$ make install
|
$ make install
|
||||||
|
|
||||||
|
@ -157,4 +18,11 @@ $ make run ENABLE_WEBHOOKS=false
|
||||||
|
|
||||||
# From another terminal in the same directory
|
# From another terminal in the same directory
|
||||||
$ kubectl apply -f config/samples/seaweed_v1_seaweed.yaml
|
$ kubectl apply -f config/samples/seaweed_v1_seaweed.yaml
|
||||||
|
|
||||||
```
|
```
|
||||||
|
|
||||||
|
## Create API and Controller
|
||||||
|
Here are the commands used to create customer resource definition (CRD)
|
||||||
|
```
|
||||||
|
operator-sdk create api --group seaweed --version v1 --kind Master --resource=true --controller=true
|
||||||
|
```
|
|
@ -1,206 +0,0 @@
|
||||||
package v1
|
|
||||||
|
|
||||||
import (
|
|
||||||
appsv1 "k8s.io/api/apps/v1"
|
|
||||||
corev1 "k8s.io/api/core/v1"
|
|
||||||
)
|
|
||||||
|
|
||||||
// ComponentAccessor is the interface to access component details, which respects the cluster-level properties
|
|
||||||
// and component-level overrides
|
|
||||||
// +kubebuilder:object:root=false
|
|
||||||
// +kubebuilder:object:generate=false
|
|
||||||
type ComponentAccessor interface {
|
|
||||||
ImagePullPolicy() corev1.PullPolicy
|
|
||||||
ImagePullSecrets() []corev1.LocalObjectReference
|
|
||||||
HostNetwork() bool
|
|
||||||
Affinity() *corev1.Affinity
|
|
||||||
PriorityClassName() *string
|
|
||||||
NodeSelector() map[string]string
|
|
||||||
Annotations() map[string]string
|
|
||||||
Tolerations() []corev1.Toleration
|
|
||||||
SchedulerName() string
|
|
||||||
DNSPolicy() corev1.DNSPolicy
|
|
||||||
BuildPodSpec() corev1.PodSpec
|
|
||||||
Env() []corev1.EnvVar
|
|
||||||
TerminationGracePeriodSeconds() *int64
|
|
||||||
StatefulSetUpdateStrategy() appsv1.StatefulSetUpdateStrategyType
|
|
||||||
}
|
|
||||||
|
|
||||||
type componentAccessorImpl struct {
|
|
||||||
imagePullPolicy corev1.PullPolicy
|
|
||||||
imagePullSecrets []corev1.LocalObjectReference
|
|
||||||
hostNetwork *bool
|
|
||||||
affinity *corev1.Affinity
|
|
||||||
priorityClassName *string
|
|
||||||
schedulerName string
|
|
||||||
clusterNodeSelector map[string]string
|
|
||||||
clusterAnnotations map[string]string
|
|
||||||
tolerations []corev1.Toleration
|
|
||||||
statefulSetUpdateStrategy appsv1.StatefulSetUpdateStrategyType
|
|
||||||
|
|
||||||
// ComponentSpec is the Component Spec
|
|
||||||
ComponentSpec *ComponentSpec
|
|
||||||
}
|
|
||||||
|
|
||||||
func (a *componentAccessorImpl) StatefulSetUpdateStrategy() appsv1.StatefulSetUpdateStrategyType {
|
|
||||||
strategy := a.ComponentSpec.StatefulSetUpdateStrategy
|
|
||||||
if len(strategy) != 0 {
|
|
||||||
return strategy
|
|
||||||
}
|
|
||||||
|
|
||||||
strategy = a.statefulSetUpdateStrategy
|
|
||||||
if len(strategy) != 0 {
|
|
||||||
return strategy
|
|
||||||
}
|
|
||||||
|
|
||||||
return appsv1.RollingUpdateStatefulSetStrategyType
|
|
||||||
}
|
|
||||||
|
|
||||||
func (a *componentAccessorImpl) ImagePullPolicy() corev1.PullPolicy {
|
|
||||||
pp := a.ComponentSpec.ImagePullPolicy
|
|
||||||
if pp == nil {
|
|
||||||
return a.imagePullPolicy
|
|
||||||
}
|
|
||||||
return *pp
|
|
||||||
}
|
|
||||||
|
|
||||||
func (a *componentAccessorImpl) ImagePullSecrets() []corev1.LocalObjectReference {
|
|
||||||
ips := a.ComponentSpec.ImagePullSecrets
|
|
||||||
if ips == nil {
|
|
||||||
return a.imagePullSecrets
|
|
||||||
}
|
|
||||||
return ips
|
|
||||||
}
|
|
||||||
|
|
||||||
func (a *componentAccessorImpl) HostNetwork() bool {
|
|
||||||
hostNetwork := a.ComponentSpec.HostNetwork
|
|
||||||
if hostNetwork == nil {
|
|
||||||
hostNetwork = a.hostNetwork
|
|
||||||
}
|
|
||||||
if hostNetwork == nil {
|
|
||||||
return false
|
|
||||||
}
|
|
||||||
return *hostNetwork
|
|
||||||
}
|
|
||||||
|
|
||||||
func (a *componentAccessorImpl) Affinity() *corev1.Affinity {
|
|
||||||
affi := a.ComponentSpec.Affinity
|
|
||||||
if affi == nil {
|
|
||||||
affi = a.affinity
|
|
||||||
}
|
|
||||||
return affi
|
|
||||||
}
|
|
||||||
|
|
||||||
func (a *componentAccessorImpl) PriorityClassName() *string {
|
|
||||||
pcn := a.ComponentSpec.PriorityClassName
|
|
||||||
if pcn == nil {
|
|
||||||
pcn = a.priorityClassName
|
|
||||||
}
|
|
||||||
return pcn
|
|
||||||
}
|
|
||||||
|
|
||||||
func (a *componentAccessorImpl) SchedulerName() string {
|
|
||||||
pcn := a.ComponentSpec.SchedulerName
|
|
||||||
if pcn == nil {
|
|
||||||
pcn = &a.schedulerName
|
|
||||||
}
|
|
||||||
return *pcn
|
|
||||||
}
|
|
||||||
|
|
||||||
func (a *componentAccessorImpl) NodeSelector() map[string]string {
|
|
||||||
sel := map[string]string{}
|
|
||||||
for k, v := range a.clusterNodeSelector {
|
|
||||||
sel[k] = v
|
|
||||||
}
|
|
||||||
for k, v := range a.ComponentSpec.NodeSelector {
|
|
||||||
sel[k] = v
|
|
||||||
}
|
|
||||||
return sel
|
|
||||||
}
|
|
||||||
|
|
||||||
func (a *componentAccessorImpl) Annotations() map[string]string {
|
|
||||||
anno := map[string]string{}
|
|
||||||
for k, v := range a.clusterAnnotations {
|
|
||||||
anno[k] = v
|
|
||||||
}
|
|
||||||
for k, v := range a.ComponentSpec.Annotations {
|
|
||||||
anno[k] = v
|
|
||||||
}
|
|
||||||
return anno
|
|
||||||
}
|
|
||||||
|
|
||||||
func (a *componentAccessorImpl) Tolerations() []corev1.Toleration {
|
|
||||||
tols := a.ComponentSpec.Tolerations
|
|
||||||
if len(tols) == 0 {
|
|
||||||
tols = a.tolerations
|
|
||||||
}
|
|
||||||
return tols
|
|
||||||
}
|
|
||||||
|
|
||||||
func (a *componentAccessorImpl) DNSPolicy() corev1.DNSPolicy {
|
|
||||||
dnsPolicy := corev1.DNSClusterFirst // same as kubernetes default
|
|
||||||
if a.HostNetwork() {
|
|
||||||
dnsPolicy = corev1.DNSClusterFirstWithHostNet
|
|
||||||
}
|
|
||||||
return dnsPolicy
|
|
||||||
}
|
|
||||||
|
|
||||||
func (a *componentAccessorImpl) BuildPodSpec() corev1.PodSpec {
|
|
||||||
spec := corev1.PodSpec{
|
|
||||||
SchedulerName: a.SchedulerName(),
|
|
||||||
Affinity: a.Affinity(),
|
|
||||||
NodeSelector: a.NodeSelector(),
|
|
||||||
HostNetwork: a.HostNetwork(),
|
|
||||||
RestartPolicy: corev1.RestartPolicyAlways,
|
|
||||||
Tolerations: a.Tolerations(),
|
|
||||||
}
|
|
||||||
if a.PriorityClassName() != nil {
|
|
||||||
spec.PriorityClassName = *a.PriorityClassName()
|
|
||||||
}
|
|
||||||
if a.ImagePullSecrets() != nil {
|
|
||||||
spec.ImagePullSecrets = a.ImagePullSecrets()
|
|
||||||
}
|
|
||||||
if a.TerminationGracePeriodSeconds() != nil {
|
|
||||||
spec.TerminationGracePeriodSeconds = a.TerminationGracePeriodSeconds()
|
|
||||||
}
|
|
||||||
return spec
|
|
||||||
}
|
|
||||||
|
|
||||||
func (a *componentAccessorImpl) Env() []corev1.EnvVar {
|
|
||||||
return a.ComponentSpec.Env
|
|
||||||
}
|
|
||||||
|
|
||||||
func (a *componentAccessorImpl) TerminationGracePeriodSeconds() *int64 {
|
|
||||||
return a.ComponentSpec.TerminationGracePeriodSeconds
|
|
||||||
}
|
|
||||||
|
|
||||||
func buildSeaweedComponentAccessor(spec *SeaweedSpec, componentSpec *ComponentSpec) ComponentAccessor {
|
|
||||||
return &componentAccessorImpl{
|
|
||||||
imagePullPolicy: spec.ImagePullPolicy,
|
|
||||||
imagePullSecrets: spec.ImagePullSecrets,
|
|
||||||
hostNetwork: spec.HostNetwork,
|
|
||||||
affinity: spec.Affinity,
|
|
||||||
schedulerName: spec.SchedulerName,
|
|
||||||
clusterNodeSelector: spec.NodeSelector,
|
|
||||||
clusterAnnotations: spec.Annotations,
|
|
||||||
tolerations: spec.Tolerations,
|
|
||||||
statefulSetUpdateStrategy: spec.StatefulSetUpdateStrategy,
|
|
||||||
|
|
||||||
ComponentSpec: componentSpec,
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
// BaseMasterSpec provides merged spec of masters
|
|
||||||
func (s *Seaweed) BaseMasterSpec() ComponentAccessor {
|
|
||||||
return buildSeaweedComponentAccessor(&s.Spec, &s.Spec.Master.ComponentSpec)
|
|
||||||
}
|
|
||||||
|
|
||||||
// BaseFilerSpec provides merged spec of filers
|
|
||||||
func (s *Seaweed) BaseFilerSpec() ComponentAccessor {
|
|
||||||
return buildSeaweedComponentAccessor(&s.Spec, &s.Spec.Filer.ComponentSpec)
|
|
||||||
}
|
|
||||||
|
|
||||||
// BaseVolumeSpec provides merged spec of volumes
|
|
||||||
func (s *Seaweed) BaseVolumeSpec() ComponentAccessor {
|
|
||||||
return buildSeaweedComponentAccessor(&s.Spec, &s.Spec.Volume.ComponentSpec)
|
|
||||||
}
|
|
|
@ -0,0 +1,65 @@
|
||||||
|
/*
|
||||||
|
|
||||||
|
|
||||||
|
Licensed under the Apache License, Version 2.0 (the "License");
|
||||||
|
you may not use this file except in compliance with the License.
|
||||||
|
You may obtain a copy of the License at
|
||||||
|
|
||||||
|
http://www.apache.org/licenses/LICENSE-2.0
|
||||||
|
|
||||||
|
Unless required by applicable law or agreed to in writing, software
|
||||||
|
distributed under the License is distributed on an "AS IS" BASIS,
|
||||||
|
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||||
|
See the License for the specific language governing permissions and
|
||||||
|
limitations under the License.
|
||||||
|
*/
|
||||||
|
|
||||||
|
package v1
|
||||||
|
|
||||||
|
import (
|
||||||
|
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
|
||||||
|
)
|
||||||
|
|
||||||
|
// EDIT THIS FILE! THIS IS SCAFFOLDING FOR YOU TO OWN!
|
||||||
|
// NOTE: json tags are required. Any new fields you add must have json tags for the fields to be serialized.
|
||||||
|
|
||||||
|
// MasterSpec defines the desired state of Master
|
||||||
|
type MasterSpec struct {
|
||||||
|
// INSERT ADDITIONAL SPEC FIELDS - desired state of cluster
|
||||||
|
// Important: Run "make" to regenerate code after modifying this file
|
||||||
|
|
||||||
|
// +kubebuilder:validation:Minimum=0
|
||||||
|
// Size is the size of the master deployment
|
||||||
|
Size int32 `json:"size"`
|
||||||
|
}
|
||||||
|
|
||||||
|
// MasterStatus defines the observed state of Master
|
||||||
|
type MasterStatus struct {
|
||||||
|
// Nodes are the names of the master pods
|
||||||
|
Nodes []string `json:"nodes"`
|
||||||
|
}
|
||||||
|
|
||||||
|
// +kubebuilder:object:root=true
|
||||||
|
// +kubebuilder:subresource:status
|
||||||
|
|
||||||
|
// Master is the Schema for the masters API
|
||||||
|
type Master struct {
|
||||||
|
metav1.TypeMeta `json:",inline"`
|
||||||
|
metav1.ObjectMeta `json:"metadata,omitempty"`
|
||||||
|
|
||||||
|
Spec MasterSpec `json:"spec,omitempty"`
|
||||||
|
Status MasterStatus `json:"status,omitempty"`
|
||||||
|
}
|
||||||
|
|
||||||
|
// +kubebuilder:object:root=true
|
||||||
|
|
||||||
|
// MasterList contains a list of Master
|
||||||
|
type MasterList struct {
|
||||||
|
metav1.TypeMeta `json:",inline"`
|
||||||
|
metav1.ListMeta `json:"metadata,omitempty"`
|
||||||
|
Items []Master `json:"items"`
|
||||||
|
}
|
||||||
|
|
||||||
|
func init() {
|
||||||
|
SchemeBuilder.Register(&Master{}, &MasterList{})
|
||||||
|
}
|
|
@ -1,269 +0,0 @@
|
||||||
/*
|
|
||||||
|
|
||||||
|
|
||||||
Licensed under the Apache License, Version 2.0 (the "License");
|
|
||||||
you may not use this file except in compliance with the License.
|
|
||||||
You may obtain a copy of the License at
|
|
||||||
|
|
||||||
http://www.apache.org/licenses/LICENSE-2.0
|
|
||||||
|
|
||||||
Unless required by applicable law or agreed to in writing, software
|
|
||||||
distributed under the License is distributed on an "AS IS" BASIS,
|
|
||||||
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
|
||||||
See the License for the specific language governing permissions and
|
|
||||||
limitations under the License.
|
|
||||||
*/
|
|
||||||
|
|
||||||
package v1
|
|
||||||
|
|
||||||
import (
|
|
||||||
appsv1 "k8s.io/api/apps/v1"
|
|
||||||
corev1 "k8s.io/api/core/v1"
|
|
||||||
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
|
|
||||||
)
|
|
||||||
|
|
||||||
// EDIT THIS FILE! THIS IS SCAFFOLDING FOR YOU TO OWN!
|
|
||||||
// NOTE: json tags are required. Any new fields you add must have json tags for the fields to be serialized.
|
|
||||||
|
|
||||||
// Constants
|
|
||||||
const (
|
|
||||||
GRPCPortDelta = 10000
|
|
||||||
|
|
||||||
MasterHTTPPort = 9333
|
|
||||||
VolumeHTTPPort = 8444
|
|
||||||
FilerHTTPPort = 8888
|
|
||||||
FilerS3Port = 8333
|
|
||||||
|
|
||||||
MasterGRPCPort = MasterHTTPPort + GRPCPortDelta
|
|
||||||
VolumeGRPCPort = VolumeHTTPPort + GRPCPortDelta
|
|
||||||
FilerGRPCPort = FilerHTTPPort + GRPCPortDelta
|
|
||||||
)
|
|
||||||
|
|
||||||
// SeaweedSpec defines the desired state of Seaweed
|
|
||||||
type SeaweedSpec struct {
|
|
||||||
// INSERT ADDITIONAL SPEC FIELDS - desired state of cluster
|
|
||||||
// Important: Run "make" to regenerate code after modifying this file
|
|
||||||
|
|
||||||
// MetricsAddress is Prometheus gateway address
|
|
||||||
MetricsAddress string `json:"metricsAddress,omitempty"`
|
|
||||||
|
|
||||||
// Image
|
|
||||||
Image string `json:"image,omitempty"`
|
|
||||||
|
|
||||||
// Version
|
|
||||||
Version string `json:"version,omitempty"`
|
|
||||||
|
|
||||||
// Master
|
|
||||||
Master *MasterSpec `json:"master,omitempty"`
|
|
||||||
|
|
||||||
// Volume
|
|
||||||
Volume *VolumeSpec `json:"volume,omitempty"`
|
|
||||||
|
|
||||||
// Filer
|
|
||||||
Filer *FilerSpec `json:"filer,omitempty"`
|
|
||||||
|
|
||||||
// SchedulerName of pods
|
|
||||||
SchedulerName string `json:"schedulerName,omitempty"`
|
|
||||||
|
|
||||||
// Persistent volume reclaim policy
|
|
||||||
PVReclaimPolicy *corev1.PersistentVolumeReclaimPolicy `json:"pvReclaimPolicy,omitempty"`
|
|
||||||
|
|
||||||
// ImagePullPolicy of pods
|
|
||||||
ImagePullPolicy corev1.PullPolicy `json:"imagePullPolicy,omitempty"`
|
|
||||||
|
|
||||||
// ImagePullSecrets is an optional list of references to secrets in the same namespace to use for pulling any of the images.
|
|
||||||
ImagePullSecrets []corev1.LocalObjectReference `json:"imagePullSecrets,omitempty"`
|
|
||||||
|
|
||||||
// Whether enable PVC reclaim for orphan PVC left by statefulset scale-in
|
|
||||||
EnablePVReclaim *bool `json:"enablePVReclaim,omitempty"`
|
|
||||||
|
|
||||||
// Whether Hostnetwork is enabled for pods
|
|
||||||
HostNetwork *bool `json:"hostNetwork,omitempty"`
|
|
||||||
|
|
||||||
// Affinity of pods
|
|
||||||
Affinity *corev1.Affinity `json:"affinity,omitempty"`
|
|
||||||
|
|
||||||
// Base node selectors of Pods, components may add or override selectors upon this respectively
|
|
||||||
NodeSelector map[string]string `json:"nodeSelector,omitempty"`
|
|
||||||
|
|
||||||
// Base annotations of Pods, components may add or override selectors upon this respectively
|
|
||||||
Annotations map[string]string `json:"annotations,omitempty"`
|
|
||||||
|
|
||||||
// Base tolerations of Pods, components may add more tolerations upon this respectively
|
|
||||||
Tolerations []corev1.Toleration `json:"tolerations,omitempty"`
|
|
||||||
|
|
||||||
// StatefulSetUpdateStrategy indicates the StatefulSetUpdateStrategy that will be
|
|
||||||
// employed to update Pods in the StatefulSet when a revision is made to
|
|
||||||
// Template.
|
|
||||||
StatefulSetUpdateStrategy appsv1.StatefulSetUpdateStrategyType `json:"statefulSetUpdateStrategy,omitempty"`
|
|
||||||
|
|
||||||
VolumeServerDiskCount int32 `json:"volumeServerDiskCount,omitempty"`
|
|
||||||
|
|
||||||
// Ingresses
|
|
||||||
HostSuffix *string `json:"hostSuffix,omitempty"`
|
|
||||||
}
|
|
||||||
|
|
||||||
// SeaweedStatus defines the observed state of Seaweed
|
|
||||||
type SeaweedStatus struct {
|
|
||||||
// INSERT ADDITIONAL STATUS FIELD - define observed state of cluster
|
|
||||||
// Important: Run "make" to regenerate code after modifying this file
|
|
||||||
}
|
|
||||||
|
|
||||||
// MasterSpec is the spec for masters
|
|
||||||
type MasterSpec struct {
|
|
||||||
ComponentSpec `json:",inline"`
|
|
||||||
corev1.ResourceRequirements `json:",inline"`
|
|
||||||
|
|
||||||
// The desired ready replicas
|
|
||||||
// +kubebuilder:validation:Minimum=1
|
|
||||||
Replicas int32 `json:"replicas"`
|
|
||||||
Service *ServiceSpec `json:"service,omitempty"`
|
|
||||||
|
|
||||||
// Config in raw toml string
|
|
||||||
Config *string `json:"config,omitempty"`
|
|
||||||
|
|
||||||
// Master-specific settings
|
|
||||||
|
|
||||||
VolumePreallocate *bool `json:"volumePreallocate,omitempty"`
|
|
||||||
VolumeSizeLimitMB *int32 `json:"volumeSizeLimitMB,omitempty"`
|
|
||||||
GarbageThreshold *string `json:"garbageThreshold,omitempty"`
|
|
||||||
PulseSeconds *int32 `json:"pulseSeconds,omitempty"`
|
|
||||||
DefaultReplication *string `json:"defaultReplication,omitempty"`
|
|
||||||
// only for testing
|
|
||||||
ConcurrentStart *bool `json:"concurrentStart,omitempty"`
|
|
||||||
}
|
|
||||||
|
|
||||||
// VolumeSpec is the spec for volume servers
|
|
||||||
type VolumeSpec struct {
|
|
||||||
ComponentSpec `json:",inline"`
|
|
||||||
corev1.ResourceRequirements `json:",inline"`
|
|
||||||
|
|
||||||
// The desired ready replicas
|
|
||||||
// +kubebuilder:validation:Minimum=1
|
|
||||||
Replicas int32 `json:"replicas"`
|
|
||||||
Service *ServiceSpec `json:"service,omitempty"`
|
|
||||||
|
|
||||||
StorageClassName *string `json:"storageClassName,omitempty"`
|
|
||||||
|
|
||||||
// Volume-specific settings
|
|
||||||
|
|
||||||
CompactionMBps *int32 `json:"compactionMBps,omitempty"`
|
|
||||||
FileSizeLimitMB *int32 `json:"fileSizeLimitMB,omitempty"`
|
|
||||||
FixJpgOrientation *bool `json:"fixJpgOrientation,omitempty"`
|
|
||||||
IdleTimeout *int32 `json:"idleTimeout,omitempty"`
|
|
||||||
MaxVolumeCounts *int32 `json:"maxVolumeCounts,omitempty"`
|
|
||||||
MinFreeSpacePercent *int32 `json:"minFreeSpacePercent,omitempty"`
|
|
||||||
}
|
|
||||||
|
|
||||||
// FilerSpec is the spec for filers
|
|
||||||
type FilerSpec struct {
|
|
||||||
ComponentSpec `json:",inline"`
|
|
||||||
corev1.ResourceRequirements `json:",inline"`
|
|
||||||
|
|
||||||
// The desired ready replicas
|
|
||||||
// +kubebuilder:validation:Minimum=1
|
|
||||||
Replicas int32 `json:"replicas"`
|
|
||||||
Service *ServiceSpec `json:"service,omitempty"`
|
|
||||||
|
|
||||||
// Config in raw toml string
|
|
||||||
Config *string `json:"config,omitempty"`
|
|
||||||
|
|
||||||
// Filer-specific settings
|
|
||||||
|
|
||||||
MaxMB *int32 `json:"maxMB,omitempty"`
|
|
||||||
}
|
|
||||||
|
|
||||||
// ComponentSpec is the base spec of each component, the fields should always accessed by the Basic<Component>Spec() method to respect the cluster-level properties
|
|
||||||
type ComponentSpec struct {
|
|
||||||
// Version of the component. Override the cluster-level version if non-empty
|
|
||||||
Version *string `json:"version,omitempty"`
|
|
||||||
|
|
||||||
// ImagePullPolicy of the component. Override the cluster-level imagePullPolicy if present
|
|
||||||
ImagePullPolicy *corev1.PullPolicy `json:"imagePullPolicy,omitempty"`
|
|
||||||
|
|
||||||
// ImagePullSecrets is an optional list of references to secrets in the same namespace to use for pulling any of the images.
|
|
||||||
ImagePullSecrets []corev1.LocalObjectReference `json:"imagePullSecrets,omitempty"`
|
|
||||||
|
|
||||||
// Whether Hostnetwork of the component is enabled. Override the cluster-level setting if present
|
|
||||||
HostNetwork *bool `json:"hostNetwork,omitempty"`
|
|
||||||
|
|
||||||
// Affinity of the component. Override the cluster-level one if present
|
|
||||||
Affinity *corev1.Affinity `json:"affinity,omitempty"`
|
|
||||||
|
|
||||||
// PriorityClassName of the component. Override the cluster-level one if present
|
|
||||||
PriorityClassName *string `json:"priorityClassName,omitempty"`
|
|
||||||
|
|
||||||
// SchedulerName of the component. Override the cluster-level one if present
|
|
||||||
SchedulerName *string `json:"schedulerName,omitempty"`
|
|
||||||
|
|
||||||
// NodeSelector of the component. Merged into the cluster-level nodeSelector if non-empty
|
|
||||||
NodeSelector map[string]string `json:"nodeSelector,omitempty"`
|
|
||||||
|
|
||||||
// Annotations of the component. Merged into the cluster-level annotations if non-empty
|
|
||||||
Annotations map[string]string `json:"annotations,omitempty"`
|
|
||||||
|
|
||||||
// Tolerations of the component. Override the cluster-level tolerations if non-empty
|
|
||||||
Tolerations []corev1.Toleration `json:"tolerations,omitempty"`
|
|
||||||
|
|
||||||
// List of environment variables to set in the container, like
|
|
||||||
// v1.Container.Env.
|
|
||||||
// Note that following env names cannot be used and may be overrided by operators
|
|
||||||
// - NAMESPACE
|
|
||||||
// - POD_IP
|
|
||||||
// - POD_NAME
|
|
||||||
Env []corev1.EnvVar `json:"env,omitempty"`
|
|
||||||
|
|
||||||
// Optional duration in seconds the pod needs to terminate gracefully. May be decreased in delete request.
|
|
||||||
// Value must be non-negative integer. The value zero indicates delete immediately.
|
|
||||||
// If this value is nil, the default grace period will be used instead.
|
|
||||||
// The grace period is the duration in seconds after the processes running in the pod are sent
|
|
||||||
// a termination signal and the time when the processes are forcibly halted with a kill signal.
|
|
||||||
// Set this value longer than the expected cleanup time for your process.
|
|
||||||
// Defaults to 30 seconds.
|
|
||||||
TerminationGracePeriodSeconds *int64 `json:"terminationGracePeriodSeconds,omitempty"`
|
|
||||||
|
|
||||||
// StatefulSetUpdateStrategy indicates the StatefulSetUpdateStrategy that will be
|
|
||||||
// employed to update Pods in the StatefulSet when a revision is made to
|
|
||||||
// Template.
|
|
||||||
StatefulSetUpdateStrategy appsv1.StatefulSetUpdateStrategyType `json:"statefulSetUpdateStrategy,omitempty"`
|
|
||||||
}
|
|
||||||
|
|
||||||
// ServiceSpec is a subset of the original k8s spec
|
|
||||||
type ServiceSpec struct {
|
|
||||||
// Type of the real kubernetes service
|
|
||||||
Type corev1.ServiceType `json:"type,omitempty"`
|
|
||||||
|
|
||||||
// Additional annotations of the kubernetes service object
|
|
||||||
Annotations map[string]string `json:"annotations,omitempty"`
|
|
||||||
|
|
||||||
// LoadBalancerIP is the loadBalancerIP of service
|
|
||||||
LoadBalancerIP *string `json:"loadBalancerIP,omitempty"`
|
|
||||||
|
|
||||||
// ClusterIP is the clusterIP of service
|
|
||||||
ClusterIP *string `json:"clusterIP,omitempty"`
|
|
||||||
}
|
|
||||||
|
|
||||||
// +kubebuilder:object:root=true
|
|
||||||
// +kubebuilder:subresource:status
|
|
||||||
|
|
||||||
// Seaweed is the Schema for the seaweeds API
|
|
||||||
type Seaweed struct {
|
|
||||||
metav1.TypeMeta `json:",inline"`
|
|
||||||
metav1.ObjectMeta `json:"metadata,omitempty"`
|
|
||||||
|
|
||||||
Spec SeaweedSpec `json:"spec,omitempty"`
|
|
||||||
Status SeaweedStatus `json:"status,omitempty"`
|
|
||||||
}
|
|
||||||
|
|
||||||
// +kubebuilder:object:root=true
|
|
||||||
|
|
||||||
// SeaweedList contains a list of Seaweed
|
|
||||||
type SeaweedList struct {
|
|
||||||
metav1.TypeMeta `json:",inline"`
|
|
||||||
metav1.ListMeta `json:"metadata,omitempty"`
|
|
||||||
Items []Seaweed `json:"items"`
|
|
||||||
}
|
|
||||||
|
|
||||||
func init() {
|
|
||||||
SchemeBuilder.Register(&Seaweed{}, &SeaweedList{})
|
|
||||||
}
|
|
|
@ -1,93 +0,0 @@
|
||||||
/*
|
|
||||||
|
|
||||||
|
|
||||||
Licensed under the Apache License, Version 2.0 (the "License");
|
|
||||||
you may not use this file except in compliance with the License.
|
|
||||||
You may obtain a copy of the License at
|
|
||||||
|
|
||||||
http://www.apache.org/licenses/LICENSE-2.0
|
|
||||||
|
|
||||||
Unless required by applicable law or agreed to in writing, software
|
|
||||||
distributed under the License is distributed on an "AS IS" BASIS,
|
|
||||||
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
|
||||||
See the License for the specific language governing permissions and
|
|
||||||
limitations under the License.
|
|
||||||
*/
|
|
||||||
|
|
||||||
package v1
|
|
||||||
|
|
||||||
import (
|
|
||||||
"errors"
|
|
||||||
|
|
||||||
corev1 "k8s.io/api/core/v1"
|
|
||||||
"k8s.io/apimachinery/pkg/api/resource"
|
|
||||||
"k8s.io/apimachinery/pkg/runtime"
|
|
||||||
utilerrors "k8s.io/apimachinery/pkg/util/errors"
|
|
||||||
ctrl "sigs.k8s.io/controller-runtime"
|
|
||||||
logf "sigs.k8s.io/controller-runtime/pkg/log"
|
|
||||||
"sigs.k8s.io/controller-runtime/pkg/webhook"
|
|
||||||
)
|
|
||||||
|
|
||||||
// log is for logging in this package.
|
|
||||||
var seaweedlog = logf.Log.WithName("seaweed-resource")
|
|
||||||
|
|
||||||
func (r *Seaweed) SetupWebhookWithManager(mgr ctrl.Manager) error {
|
|
||||||
return ctrl.NewWebhookManagedBy(mgr).
|
|
||||||
For(r).
|
|
||||||
Complete()
|
|
||||||
}
|
|
||||||
|
|
||||||
// EDIT THIS FILE! THIS IS SCAFFOLDING FOR YOU TO OWN!
|
|
||||||
|
|
||||||
// +kubebuilder:webhook:path=/mutate-seaweed-seaweedfs-com-v1-seaweed,mutating=true,failurePolicy=fail,groups=seaweed.seaweedfs.com,resources=seaweeds,verbs=create;update,versions=v1,name=mseaweed.kb.io
|
|
||||||
|
|
||||||
var _ webhook.Defaulter = &Seaweed{}
|
|
||||||
|
|
||||||
// Default implements webhook.Defaulter so a webhook will be registered for the type
|
|
||||||
func (r *Seaweed) Default() {
|
|
||||||
seaweedlog.Info("default", "name", r.Name)
|
|
||||||
|
|
||||||
// TODO(user): fill in your defaulting logic.
|
|
||||||
}
|
|
||||||
|
|
||||||
// TODO(user): change verbs to "verbs=create;update;delete" if you want to enable deletion validation.
|
|
||||||
// +kubebuilder:webhook:verbs=create;update,path=/validate-seaweed-seaweedfs-com-v1-seaweed,mutating=false,failurePolicy=fail,groups=seaweed.seaweedfs.com,resources=seaweeds,versions=v1,name=vseaweed.kb.io
|
|
||||||
|
|
||||||
var _ webhook.Validator = &Seaweed{}
|
|
||||||
|
|
||||||
// ValidateCreate implements webhook.Validator so a webhook will be registered for the type
|
|
||||||
func (r *Seaweed) ValidateCreate() error {
|
|
||||||
seaweedlog.Info("validate create", "name", r.Name)
|
|
||||||
errs := []error{}
|
|
||||||
|
|
||||||
// TODO(user): fill in your validation logic upon object creation.
|
|
||||||
if r.Spec.Master == nil {
|
|
||||||
errs = append(errs, errors.New("missing master spec"))
|
|
||||||
}
|
|
||||||
|
|
||||||
if r.Spec.Volume == nil {
|
|
||||||
errs = append(errs, errors.New("missing volume spec"))
|
|
||||||
} else {
|
|
||||||
if r.Spec.Volume.Requests[corev1.ResourceStorage].Equal(resource.MustParse("0")) {
|
|
||||||
errs = append(errs, errors.New("volume storage request cannot be zero"))
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
return utilerrors.NewAggregate(errs)
|
|
||||||
}
|
|
||||||
|
|
||||||
// ValidateUpdate implements webhook.Validator so a webhook will be registered for the type
|
|
||||||
func (r *Seaweed) ValidateUpdate(old runtime.Object) error {
|
|
||||||
seaweedlog.Info("validate update", "name", r.Name)
|
|
||||||
|
|
||||||
// TODO(user): fill in your validation logic upon object update.
|
|
||||||
return nil
|
|
||||||
}
|
|
||||||
|
|
||||||
// ValidateDelete implements webhook.Validator so a webhook will be registered for the type
|
|
||||||
func (r *Seaweed) ValidateDelete() error {
|
|
||||||
seaweedlog.Info("validate delete", "name", r.Name)
|
|
||||||
|
|
||||||
// TODO(user): fill in your validation logic upon object deletion.
|
|
||||||
return nil
|
|
||||||
}
|
|
|
@ -1,4 +1,3 @@
|
||||||
//go:build !ignore_autogenerated
|
|
||||||
// +build !ignore_autogenerated
|
// +build !ignore_autogenerated
|
||||||
|
|
||||||
/*
|
/*
|
||||||
|
@ -22,170 +21,71 @@ limitations under the License.
|
||||||
package v1
|
package v1
|
||||||
|
|
||||||
import (
|
import (
|
||||||
corev1 "k8s.io/api/core/v1"
|
runtime "k8s.io/apimachinery/pkg/runtime"
|
||||||
"k8s.io/apimachinery/pkg/runtime"
|
|
||||||
)
|
)
|
||||||
|
|
||||||
// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
|
// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
|
||||||
func (in *ComponentSpec) DeepCopyInto(out *ComponentSpec) {
|
func (in *Master) DeepCopyInto(out *Master) {
|
||||||
*out = *in
|
*out = *in
|
||||||
if in.Version != nil {
|
out.TypeMeta = in.TypeMeta
|
||||||
in, out := &in.Version, &out.Version
|
in.ObjectMeta.DeepCopyInto(&out.ObjectMeta)
|
||||||
*out = new(string)
|
out.Spec = in.Spec
|
||||||
**out = **in
|
in.Status.DeepCopyInto(&out.Status)
|
||||||
}
|
|
||||||
if in.ImagePullPolicy != nil {
|
|
||||||
in, out := &in.ImagePullPolicy, &out.ImagePullPolicy
|
|
||||||
*out = new(corev1.PullPolicy)
|
|
||||||
**out = **in
|
|
||||||
}
|
|
||||||
if in.ImagePullSecrets != nil {
|
|
||||||
in, out := &in.ImagePullSecrets, &out.ImagePullSecrets
|
|
||||||
*out = make([]corev1.LocalObjectReference, len(*in))
|
|
||||||
copy(*out, *in)
|
|
||||||
}
|
|
||||||
if in.HostNetwork != nil {
|
|
||||||
in, out := &in.HostNetwork, &out.HostNetwork
|
|
||||||
*out = new(bool)
|
|
||||||
**out = **in
|
|
||||||
}
|
|
||||||
if in.Affinity != nil {
|
|
||||||
in, out := &in.Affinity, &out.Affinity
|
|
||||||
*out = new(corev1.Affinity)
|
|
||||||
(*in).DeepCopyInto(*out)
|
|
||||||
}
|
|
||||||
if in.PriorityClassName != nil {
|
|
||||||
in, out := &in.PriorityClassName, &out.PriorityClassName
|
|
||||||
*out = new(string)
|
|
||||||
**out = **in
|
|
||||||
}
|
|
||||||
if in.SchedulerName != nil {
|
|
||||||
in, out := &in.SchedulerName, &out.SchedulerName
|
|
||||||
*out = new(string)
|
|
||||||
**out = **in
|
|
||||||
}
|
|
||||||
if in.NodeSelector != nil {
|
|
||||||
in, out := &in.NodeSelector, &out.NodeSelector
|
|
||||||
*out = make(map[string]string, len(*in))
|
|
||||||
for key, val := range *in {
|
|
||||||
(*out)[key] = val
|
|
||||||
}
|
|
||||||
}
|
|
||||||
if in.Annotations != nil {
|
|
||||||
in, out := &in.Annotations, &out.Annotations
|
|
||||||
*out = make(map[string]string, len(*in))
|
|
||||||
for key, val := range *in {
|
|
||||||
(*out)[key] = val
|
|
||||||
}
|
|
||||||
}
|
|
||||||
if in.Tolerations != nil {
|
|
||||||
in, out := &in.Tolerations, &out.Tolerations
|
|
||||||
*out = make([]corev1.Toleration, len(*in))
|
|
||||||
for i := range *in {
|
|
||||||
(*in)[i].DeepCopyInto(&(*out)[i])
|
|
||||||
}
|
|
||||||
}
|
|
||||||
if in.Env != nil {
|
|
||||||
in, out := &in.Env, &out.Env
|
|
||||||
*out = make([]corev1.EnvVar, len(*in))
|
|
||||||
for i := range *in {
|
|
||||||
(*in)[i].DeepCopyInto(&(*out)[i])
|
|
||||||
}
|
|
||||||
}
|
|
||||||
if in.TerminationGracePeriodSeconds != nil {
|
|
||||||
in, out := &in.TerminationGracePeriodSeconds, &out.TerminationGracePeriodSeconds
|
|
||||||
*out = new(int64)
|
|
||||||
**out = **in
|
|
||||||
}
|
|
||||||
}
|
}
|
||||||
|
|
||||||
// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ComponentSpec.
|
// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new Master.
|
||||||
func (in *ComponentSpec) DeepCopy() *ComponentSpec {
|
func (in *Master) DeepCopy() *Master {
|
||||||
if in == nil {
|
if in == nil {
|
||||||
return nil
|
return nil
|
||||||
}
|
}
|
||||||
out := new(ComponentSpec)
|
out := new(Master)
|
||||||
in.DeepCopyInto(out)
|
in.DeepCopyInto(out)
|
||||||
return out
|
return out
|
||||||
}
|
}
|
||||||
|
|
||||||
|
// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object.
|
||||||
|
func (in *Master) DeepCopyObject() runtime.Object {
|
||||||
|
if c := in.DeepCopy(); c != nil {
|
||||||
|
return c
|
||||||
|
}
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
|
||||||
// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
|
// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
|
||||||
func (in *FilerSpec) DeepCopyInto(out *FilerSpec) {
|
func (in *MasterList) DeepCopyInto(out *MasterList) {
|
||||||
*out = *in
|
*out = *in
|
||||||
in.ComponentSpec.DeepCopyInto(&out.ComponentSpec)
|
out.TypeMeta = in.TypeMeta
|
||||||
in.ResourceRequirements.DeepCopyInto(&out.ResourceRequirements)
|
in.ListMeta.DeepCopyInto(&out.ListMeta)
|
||||||
if in.Service != nil {
|
if in.Items != nil {
|
||||||
in, out := &in.Service, &out.Service
|
in, out := &in.Items, &out.Items
|
||||||
*out = new(ServiceSpec)
|
*out = make([]Master, len(*in))
|
||||||
(*in).DeepCopyInto(*out)
|
for i := range *in {
|
||||||
}
|
(*in)[i].DeepCopyInto(&(*out)[i])
|
||||||
if in.Config != nil {
|
}
|
||||||
in, out := &in.Config, &out.Config
|
|
||||||
*out = new(string)
|
|
||||||
**out = **in
|
|
||||||
}
|
|
||||||
if in.MaxMB != nil {
|
|
||||||
in, out := &in.MaxMB, &out.MaxMB
|
|
||||||
*out = new(int32)
|
|
||||||
**out = **in
|
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new FilerSpec.
|
// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new MasterList.
|
||||||
func (in *FilerSpec) DeepCopy() *FilerSpec {
|
func (in *MasterList) DeepCopy() *MasterList {
|
||||||
if in == nil {
|
if in == nil {
|
||||||
return nil
|
return nil
|
||||||
}
|
}
|
||||||
out := new(FilerSpec)
|
out := new(MasterList)
|
||||||
in.DeepCopyInto(out)
|
in.DeepCopyInto(out)
|
||||||
return out
|
return out
|
||||||
}
|
}
|
||||||
|
|
||||||
|
// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object.
|
||||||
|
func (in *MasterList) DeepCopyObject() runtime.Object {
|
||||||
|
if c := in.DeepCopy(); c != nil {
|
||||||
|
return c
|
||||||
|
}
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
|
||||||
// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
|
// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
|
||||||
func (in *MasterSpec) DeepCopyInto(out *MasterSpec) {
|
func (in *MasterSpec) DeepCopyInto(out *MasterSpec) {
|
||||||
*out = *in
|
*out = *in
|
||||||
in.ComponentSpec.DeepCopyInto(&out.ComponentSpec)
|
|
||||||
in.ResourceRequirements.DeepCopyInto(&out.ResourceRequirements)
|
|
||||||
if in.Service != nil {
|
|
||||||
in, out := &in.Service, &out.Service
|
|
||||||
*out = new(ServiceSpec)
|
|
||||||
(*in).DeepCopyInto(*out)
|
|
||||||
}
|
|
||||||
if in.Config != nil {
|
|
||||||
in, out := &in.Config, &out.Config
|
|
||||||
*out = new(string)
|
|
||||||
**out = **in
|
|
||||||
}
|
|
||||||
if in.VolumePreallocate != nil {
|
|
||||||
in, out := &in.VolumePreallocate, &out.VolumePreallocate
|
|
||||||
*out = new(bool)
|
|
||||||
**out = **in
|
|
||||||
}
|
|
||||||
if in.VolumeSizeLimitMB != nil {
|
|
||||||
in, out := &in.VolumeSizeLimitMB, &out.VolumeSizeLimitMB
|
|
||||||
*out = new(int32)
|
|
||||||
**out = **in
|
|
||||||
}
|
|
||||||
if in.GarbageThreshold != nil {
|
|
||||||
in, out := &in.GarbageThreshold, &out.GarbageThreshold
|
|
||||||
*out = new(string)
|
|
||||||
**out = **in
|
|
||||||
}
|
|
||||||
if in.PulseSeconds != nil {
|
|
||||||
in, out := &in.PulseSeconds, &out.PulseSeconds
|
|
||||||
*out = new(int32)
|
|
||||||
**out = **in
|
|
||||||
}
|
|
||||||
if in.DefaultReplication != nil {
|
|
||||||
in, out := &in.DefaultReplication, &out.DefaultReplication
|
|
||||||
*out = new(string)
|
|
||||||
**out = **in
|
|
||||||
}
|
|
||||||
if in.ConcurrentStart != nil {
|
|
||||||
in, out := &in.ConcurrentStart, &out.ConcurrentStart
|
|
||||||
*out = new(bool)
|
|
||||||
**out = **in
|
|
||||||
}
|
|
||||||
}
|
}
|
||||||
|
|
||||||
// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new MasterSpec.
|
// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new MasterSpec.
|
||||||
|
@ -199,245 +99,21 @@ func (in *MasterSpec) DeepCopy() *MasterSpec {
|
||||||
}
|
}
|
||||||
|
|
||||||
// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
|
// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
|
||||||
func (in *Seaweed) DeepCopyInto(out *Seaweed) {
|
func (in *MasterStatus) DeepCopyInto(out *MasterStatus) {
|
||||||
*out = *in
|
*out = *in
|
||||||
out.TypeMeta = in.TypeMeta
|
if in.Nodes != nil {
|
||||||
in.ObjectMeta.DeepCopyInto(&out.ObjectMeta)
|
in, out := &in.Nodes, &out.Nodes
|
||||||
in.Spec.DeepCopyInto(&out.Spec)
|
*out = make([]string, len(*in))
|
||||||
out.Status = in.Status
|
|
||||||
}
|
|
||||||
|
|
||||||
// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new Seaweed.
|
|
||||||
func (in *Seaweed) DeepCopy() *Seaweed {
|
|
||||||
if in == nil {
|
|
||||||
return nil
|
|
||||||
}
|
|
||||||
out := new(Seaweed)
|
|
||||||
in.DeepCopyInto(out)
|
|
||||||
return out
|
|
||||||
}
|
|
||||||
|
|
||||||
// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object.
|
|
||||||
func (in *Seaweed) DeepCopyObject() runtime.Object {
|
|
||||||
if c := in.DeepCopy(); c != nil {
|
|
||||||
return c
|
|
||||||
}
|
|
||||||
return nil
|
|
||||||
}
|
|
||||||
|
|
||||||
// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
|
|
||||||
func (in *SeaweedList) DeepCopyInto(out *SeaweedList) {
|
|
||||||
*out = *in
|
|
||||||
out.TypeMeta = in.TypeMeta
|
|
||||||
in.ListMeta.DeepCopyInto(&out.ListMeta)
|
|
||||||
if in.Items != nil {
|
|
||||||
in, out := &in.Items, &out.Items
|
|
||||||
*out = make([]Seaweed, len(*in))
|
|
||||||
for i := range *in {
|
|
||||||
(*in)[i].DeepCopyInto(&(*out)[i])
|
|
||||||
}
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new SeaweedList.
|
|
||||||
func (in *SeaweedList) DeepCopy() *SeaweedList {
|
|
||||||
if in == nil {
|
|
||||||
return nil
|
|
||||||
}
|
|
||||||
out := new(SeaweedList)
|
|
||||||
in.DeepCopyInto(out)
|
|
||||||
return out
|
|
||||||
}
|
|
||||||
|
|
||||||
// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object.
|
|
||||||
func (in *SeaweedList) DeepCopyObject() runtime.Object {
|
|
||||||
if c := in.DeepCopy(); c != nil {
|
|
||||||
return c
|
|
||||||
}
|
|
||||||
return nil
|
|
||||||
}
|
|
||||||
|
|
||||||
// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
|
|
||||||
func (in *SeaweedSpec) DeepCopyInto(out *SeaweedSpec) {
|
|
||||||
*out = *in
|
|
||||||
if in.Master != nil {
|
|
||||||
in, out := &in.Master, &out.Master
|
|
||||||
*out = new(MasterSpec)
|
|
||||||
(*in).DeepCopyInto(*out)
|
|
||||||
}
|
|
||||||
if in.Volume != nil {
|
|
||||||
in, out := &in.Volume, &out.Volume
|
|
||||||
*out = new(VolumeSpec)
|
|
||||||
(*in).DeepCopyInto(*out)
|
|
||||||
}
|
|
||||||
if in.Filer != nil {
|
|
||||||
in, out := &in.Filer, &out.Filer
|
|
||||||
*out = new(FilerSpec)
|
|
||||||
(*in).DeepCopyInto(*out)
|
|
||||||
}
|
|
||||||
if in.PVReclaimPolicy != nil {
|
|
||||||
in, out := &in.PVReclaimPolicy, &out.PVReclaimPolicy
|
|
||||||
*out = new(corev1.PersistentVolumeReclaimPolicy)
|
|
||||||
**out = **in
|
|
||||||
}
|
|
||||||
if in.ImagePullSecrets != nil {
|
|
||||||
in, out := &in.ImagePullSecrets, &out.ImagePullSecrets
|
|
||||||
*out = make([]corev1.LocalObjectReference, len(*in))
|
|
||||||
copy(*out, *in)
|
copy(*out, *in)
|
||||||
}
|
}
|
||||||
if in.EnablePVReclaim != nil {
|
|
||||||
in, out := &in.EnablePVReclaim, &out.EnablePVReclaim
|
|
||||||
*out = new(bool)
|
|
||||||
**out = **in
|
|
||||||
}
|
|
||||||
if in.HostNetwork != nil {
|
|
||||||
in, out := &in.HostNetwork, &out.HostNetwork
|
|
||||||
*out = new(bool)
|
|
||||||
**out = **in
|
|
||||||
}
|
|
||||||
if in.Affinity != nil {
|
|
||||||
in, out := &in.Affinity, &out.Affinity
|
|
||||||
*out = new(corev1.Affinity)
|
|
||||||
(*in).DeepCopyInto(*out)
|
|
||||||
}
|
|
||||||
if in.NodeSelector != nil {
|
|
||||||
in, out := &in.NodeSelector, &out.NodeSelector
|
|
||||||
*out = make(map[string]string, len(*in))
|
|
||||||
for key, val := range *in {
|
|
||||||
(*out)[key] = val
|
|
||||||
}
|
|
||||||
}
|
|
||||||
if in.Annotations != nil {
|
|
||||||
in, out := &in.Annotations, &out.Annotations
|
|
||||||
*out = make(map[string]string, len(*in))
|
|
||||||
for key, val := range *in {
|
|
||||||
(*out)[key] = val
|
|
||||||
}
|
|
||||||
}
|
|
||||||
if in.Tolerations != nil {
|
|
||||||
in, out := &in.Tolerations, &out.Tolerations
|
|
||||||
*out = make([]corev1.Toleration, len(*in))
|
|
||||||
for i := range *in {
|
|
||||||
(*in)[i].DeepCopyInto(&(*out)[i])
|
|
||||||
}
|
|
||||||
}
|
|
||||||
if in.HostSuffix != nil {
|
|
||||||
in, out := &in.HostSuffix, &out.HostSuffix
|
|
||||||
*out = new(string)
|
|
||||||
**out = **in
|
|
||||||
}
|
|
||||||
}
|
}
|
||||||
|
|
||||||
// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new SeaweedSpec.
|
// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new MasterStatus.
|
||||||
func (in *SeaweedSpec) DeepCopy() *SeaweedSpec {
|
func (in *MasterStatus) DeepCopy() *MasterStatus {
|
||||||
if in == nil {
|
if in == nil {
|
||||||
return nil
|
return nil
|
||||||
}
|
}
|
||||||
out := new(SeaweedSpec)
|
out := new(MasterStatus)
|
||||||
in.DeepCopyInto(out)
|
|
||||||
return out
|
|
||||||
}
|
|
||||||
|
|
||||||
// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
|
|
||||||
func (in *SeaweedStatus) DeepCopyInto(out *SeaweedStatus) {
|
|
||||||
*out = *in
|
|
||||||
}
|
|
||||||
|
|
||||||
// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new SeaweedStatus.
|
|
||||||
func (in *SeaweedStatus) DeepCopy() *SeaweedStatus {
|
|
||||||
if in == nil {
|
|
||||||
return nil
|
|
||||||
}
|
|
||||||
out := new(SeaweedStatus)
|
|
||||||
in.DeepCopyInto(out)
|
|
||||||
return out
|
|
||||||
}
|
|
||||||
|
|
||||||
// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
|
|
||||||
func (in *ServiceSpec) DeepCopyInto(out *ServiceSpec) {
|
|
||||||
*out = *in
|
|
||||||
if in.Annotations != nil {
|
|
||||||
in, out := &in.Annotations, &out.Annotations
|
|
||||||
*out = make(map[string]string, len(*in))
|
|
||||||
for key, val := range *in {
|
|
||||||
(*out)[key] = val
|
|
||||||
}
|
|
||||||
}
|
|
||||||
if in.LoadBalancerIP != nil {
|
|
||||||
in, out := &in.LoadBalancerIP, &out.LoadBalancerIP
|
|
||||||
*out = new(string)
|
|
||||||
**out = **in
|
|
||||||
}
|
|
||||||
if in.ClusterIP != nil {
|
|
||||||
in, out := &in.ClusterIP, &out.ClusterIP
|
|
||||||
*out = new(string)
|
|
||||||
**out = **in
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ServiceSpec.
|
|
||||||
func (in *ServiceSpec) DeepCopy() *ServiceSpec {
|
|
||||||
if in == nil {
|
|
||||||
return nil
|
|
||||||
}
|
|
||||||
out := new(ServiceSpec)
|
|
||||||
in.DeepCopyInto(out)
|
|
||||||
return out
|
|
||||||
}
|
|
||||||
|
|
||||||
// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
|
|
||||||
func (in *VolumeSpec) DeepCopyInto(out *VolumeSpec) {
|
|
||||||
*out = *in
|
|
||||||
in.ComponentSpec.DeepCopyInto(&out.ComponentSpec)
|
|
||||||
in.ResourceRequirements.DeepCopyInto(&out.ResourceRequirements)
|
|
||||||
if in.Service != nil {
|
|
||||||
in, out := &in.Service, &out.Service
|
|
||||||
*out = new(ServiceSpec)
|
|
||||||
(*in).DeepCopyInto(*out)
|
|
||||||
}
|
|
||||||
if in.StorageClassName != nil {
|
|
||||||
in, out := &in.StorageClassName, &out.StorageClassName
|
|
||||||
*out = new(string)
|
|
||||||
**out = **in
|
|
||||||
}
|
|
||||||
if in.CompactionMBps != nil {
|
|
||||||
in, out := &in.CompactionMBps, &out.CompactionMBps
|
|
||||||
*out = new(int32)
|
|
||||||
**out = **in
|
|
||||||
}
|
|
||||||
if in.FileSizeLimitMB != nil {
|
|
||||||
in, out := &in.FileSizeLimitMB, &out.FileSizeLimitMB
|
|
||||||
*out = new(int32)
|
|
||||||
**out = **in
|
|
||||||
}
|
|
||||||
if in.FixJpgOrientation != nil {
|
|
||||||
in, out := &in.FixJpgOrientation, &out.FixJpgOrientation
|
|
||||||
*out = new(bool)
|
|
||||||
**out = **in
|
|
||||||
}
|
|
||||||
if in.IdleTimeout != nil {
|
|
||||||
in, out := &in.IdleTimeout, &out.IdleTimeout
|
|
||||||
*out = new(int32)
|
|
||||||
**out = **in
|
|
||||||
}
|
|
||||||
if in.MaxVolumeCounts != nil {
|
|
||||||
in, out := &in.MaxVolumeCounts, &out.MaxVolumeCounts
|
|
||||||
*out = new(int32)
|
|
||||||
**out = **in
|
|
||||||
}
|
|
||||||
if in.MinFreeSpacePercent != nil {
|
|
||||||
in, out := &in.MinFreeSpacePercent, &out.MinFreeSpacePercent
|
|
||||||
*out = new(int32)
|
|
||||||
**out = **in
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new VolumeSpec.
|
|
||||||
func (in *VolumeSpec) DeepCopy() *VolumeSpec {
|
|
||||||
if in == nil {
|
|
||||||
return nil
|
|
||||||
}
|
|
||||||
out := new(VolumeSpec)
|
|
||||||
in.DeepCopyInto(out)
|
in.DeepCopyInto(out)
|
||||||
return out
|
return out
|
||||||
}
|
}
|
||||||
|
|
|
@ -1,7 +1,8 @@
|
||||||
# The following manifests contain a self-signed issuer CR and a certificate CR.
|
# The following manifests contain a self-signed issuer CR and a certificate CR.
|
||||||
# More document can be found at https://docs.cert-manager.io
|
# More document can be found at https://docs.cert-manager.io
|
||||||
# WARNING: Targets CertManager 1.7
|
# WARNING: Targets CertManager 0.11 check https://docs.cert-manager.io/en/latest/tasks/upgrading/index.html for
|
||||||
apiVersion: cert-manager.io/v1
|
# breaking changes
|
||||||
|
apiVersion: cert-manager.io/v1alpha2
|
||||||
kind: Issuer
|
kind: Issuer
|
||||||
metadata:
|
metadata:
|
||||||
name: selfsigned-issuer
|
name: selfsigned-issuer
|
||||||
|
@ -9,7 +10,7 @@ metadata:
|
||||||
spec:
|
spec:
|
||||||
selfSigned: {}
|
selfSigned: {}
|
||||||
---
|
---
|
||||||
apiVersion: cert-manager.io/v1
|
apiVersion: cert-manager.io/v1alpha2
|
||||||
kind: Certificate
|
kind: Certificate
|
||||||
metadata:
|
metadata:
|
||||||
name: serving-cert # this name should match the one appeared in kustomizeconfig.yaml
|
name: serving-cert # this name should match the one appeared in kustomizeconfig.yaml
|
||||||
|
|
|
@ -0,0 +1,69 @@
|
||||||
|
|
||||||
|
---
|
||||||
|
apiVersion: apiextensions.k8s.io/v1beta1
|
||||||
|
kind: CustomResourceDefinition
|
||||||
|
metadata:
|
||||||
|
annotations:
|
||||||
|
controller-gen.kubebuilder.io/version: v0.3.0
|
||||||
|
creationTimestamp: null
|
||||||
|
name: masters.seaweed.seaweedfs.com
|
||||||
|
spec:
|
||||||
|
group: seaweed.seaweedfs.com
|
||||||
|
names:
|
||||||
|
kind: Master
|
||||||
|
listKind: MasterList
|
||||||
|
plural: masters
|
||||||
|
singular: master
|
||||||
|
scope: Namespaced
|
||||||
|
subresources:
|
||||||
|
status: {}
|
||||||
|
validation:
|
||||||
|
openAPIV3Schema:
|
||||||
|
description: Master is the Schema for the masters API
|
||||||
|
properties:
|
||||||
|
apiVersion:
|
||||||
|
description: 'APIVersion defines the versioned schema of this representation
|
||||||
|
of an object. Servers should convert recognized schemas to the latest
|
||||||
|
internal value, and may reject unrecognized values. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#resources'
|
||||||
|
type: string
|
||||||
|
kind:
|
||||||
|
description: 'Kind is a string value representing the REST resource this
|
||||||
|
object represents. Servers may infer this from the endpoint the client
|
||||||
|
submits requests to. Cannot be updated. In CamelCase. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds'
|
||||||
|
type: string
|
||||||
|
metadata:
|
||||||
|
type: object
|
||||||
|
spec:
|
||||||
|
description: MasterSpec defines the desired state of Master
|
||||||
|
properties:
|
||||||
|
size:
|
||||||
|
description: Size is the size of the master deployment
|
||||||
|
format: int32
|
||||||
|
minimum: 0
|
||||||
|
type: integer
|
||||||
|
required:
|
||||||
|
- size
|
||||||
|
type: object
|
||||||
|
status:
|
||||||
|
description: MasterStatus defines the observed state of Master
|
||||||
|
properties:
|
||||||
|
nodes:
|
||||||
|
description: Nodes are the names of the master pods
|
||||||
|
items:
|
||||||
|
type: string
|
||||||
|
type: array
|
||||||
|
required:
|
||||||
|
- nodes
|
||||||
|
type: object
|
||||||
|
type: object
|
||||||
|
version: v1
|
||||||
|
versions:
|
||||||
|
- name: v1
|
||||||
|
served: true
|
||||||
|
storage: true
|
||||||
|
status:
|
||||||
|
acceptedNames:
|
||||||
|
kind: ""
|
||||||
|
plural: ""
|
||||||
|
conditions: []
|
||||||
|
storedVersions: []
|
File diff suppressed because it is too large
Load Diff
|
@ -2,12 +2,19 @@
|
||||||
# since it depends on service name and namespace that are out of this kustomize package.
|
# since it depends on service name and namespace that are out of this kustomize package.
|
||||||
# It should be run by config/default
|
# It should be run by config/default
|
||||||
resources:
|
resources:
|
||||||
- bases/seaweed.seaweedfs.com_seaweeds.yaml
|
- bases/seaweed.seaweedfs.com_masters.yaml
|
||||||
# +kubebuilder:scaffold:crdkustomizeresource
|
# +kubebuilder:scaffold:crdkustomizeresource
|
||||||
|
|
||||||
patchesStrategicMerge:
|
patchesStrategicMerge:
|
||||||
- patches/webhook_in_seaweeds.yaml
|
# [WEBHOOK] To enable webhook, uncomment all the sections with [WEBHOOK] prefix.
|
||||||
- patches/cainjection_in_seaweeds.yaml
|
# patches here are for enabling the conversion webhook for each CRD
|
||||||
|
#- patches/webhook_in_masters.yaml
|
||||||
|
# +kubebuilder:scaffold:crdkustomizewebhookpatch
|
||||||
|
|
||||||
|
# [CERTMANAGER] To enable webhook, uncomment all the sections with [CERTMANAGER] prefix.
|
||||||
|
# patches here are for enabling the CA injection for each CRD
|
||||||
|
#- patches/cainjection_in_masters.yaml
|
||||||
|
# +kubebuilder:scaffold:crdkustomizecainjectionpatch
|
||||||
|
|
||||||
# the following config is for teaching kustomize how to do kustomization for CRDs.
|
# the following config is for teaching kustomize how to do kustomization for CRDs.
|
||||||
configurations:
|
configurations:
|
||||||
|
|
|
@ -5,4 +5,4 @@ kind: CustomResourceDefinition
|
||||||
metadata:
|
metadata:
|
||||||
annotations:
|
annotations:
|
||||||
cert-manager.io/inject-ca-from: $(CERTIFICATE_NAMESPACE)/$(CERTIFICATE_NAME)
|
cert-manager.io/inject-ca-from: $(CERTIFICATE_NAMESPACE)/$(CERTIFICATE_NAME)
|
||||||
name: seaweeds.seaweed.seaweedfs.com
|
name: masters.seaweed.seaweedfs.com
|
|
@ -3,7 +3,7 @@
|
||||||
apiVersion: apiextensions.k8s.io/v1beta1
|
apiVersion: apiextensions.k8s.io/v1beta1
|
||||||
kind: CustomResourceDefinition
|
kind: CustomResourceDefinition
|
||||||
metadata:
|
metadata:
|
||||||
name: seaweeds.seaweed.seaweedfs.com
|
name: masters.seaweed.seaweedfs.com
|
||||||
spec:
|
spec:
|
||||||
conversion:
|
conversion:
|
||||||
strategy: Webhook
|
strategy: Webhook
|
|
@ -16,42 +16,55 @@ bases:
|
||||||
- ../crd
|
- ../crd
|
||||||
- ../rbac
|
- ../rbac
|
||||||
- ../manager
|
- ../manager
|
||||||
- ../webhook
|
# [WEBHOOK] To enable webhook, uncomment all the sections with [WEBHOOK] prefix including the one in
|
||||||
- ../certmanager
|
# crd/kustomization.yaml
|
||||||
- ../prometheus
|
#- ../webhook
|
||||||
|
# [CERTMANAGER] To enable cert-manager, uncomment all sections with 'CERTMANAGER'. 'WEBHOOK' components are required.
|
||||||
|
#- ../certmanager
|
||||||
|
# [PROMETHEUS] To enable prometheus monitor, uncomment all sections with 'PROMETHEUS'.
|
||||||
|
#- ../prometheus
|
||||||
|
|
||||||
patchesStrategicMerge:
|
patchesStrategicMerge:
|
||||||
# Protect the /metrics endpoint by putting it behind auth.
|
# Protect the /metrics endpoint by putting it behind auth.
|
||||||
# If you want your controller-manager to expose the /metrics
|
# If you want your controller-manager to expose the /metrics
|
||||||
# endpoint w/o any authn/z, please comment the following line.
|
# endpoint w/o any authn/z, please comment the following line.
|
||||||
- manager_auth_proxy_patch.yaml
|
- manager_auth_proxy_patch.yaml
|
||||||
- manager_webhook_patch.yaml
|
|
||||||
- webhookcainjection_patch.yaml
|
|
||||||
|
|
||||||
|
# [WEBHOOK] To enable webhook, uncomment all the sections with [WEBHOOK] prefix including the one in
|
||||||
|
# crd/kustomization.yaml
|
||||||
|
#- manager_webhook_patch.yaml
|
||||||
|
|
||||||
|
# [CERTMANAGER] To enable cert-manager, uncomment all sections with 'CERTMANAGER'.
|
||||||
|
# Uncomment 'CERTMANAGER' sections in crd/kustomization.yaml to enable the CA injection in the admission webhooks.
|
||||||
|
# 'CERTMANAGER' needs to be enabled to use ca injection
|
||||||
|
#- webhookcainjection_patch.yaml
|
||||||
|
|
||||||
|
# the following config is for teaching kustomize how to do var substitution
|
||||||
vars:
|
vars:
|
||||||
- name: CERTIFICATE_NAMESPACE # namespace of the certificate CR
|
# [CERTMANAGER] To enable cert-manager, uncomment all sections with 'CERTMANAGER' prefix.
|
||||||
objref:
|
#- name: CERTIFICATE_NAMESPACE # namespace of the certificate CR
|
||||||
kind: Certificate
|
# objref:
|
||||||
group: cert-manager.io
|
# kind: Certificate
|
||||||
version: v1alpha2
|
# group: cert-manager.io
|
||||||
name: serving-cert # this name should match the one in certificate.yaml
|
# version: v1alpha2
|
||||||
fieldref:
|
# name: serving-cert # this name should match the one in certificate.yaml
|
||||||
fieldpath: metadata.namespace
|
# fieldref:
|
||||||
- name: CERTIFICATE_NAME
|
# fieldpath: metadata.namespace
|
||||||
objref:
|
#- name: CERTIFICATE_NAME
|
||||||
kind: Certificate
|
# objref:
|
||||||
group: cert-manager.io
|
# kind: Certificate
|
||||||
version: v1alpha2
|
# group: cert-manager.io
|
||||||
name: serving-cert # this name should match the one in certificate.yaml
|
# version: v1alpha2
|
||||||
- name: SERVICE_NAMESPACE # namespace of the service
|
# name: serving-cert # this name should match the one in certificate.yaml
|
||||||
objref:
|
#- name: SERVICE_NAMESPACE # namespace of the service
|
||||||
kind: Service
|
# objref:
|
||||||
version: v1
|
# kind: Service
|
||||||
name: webhook-service
|
# version: v1
|
||||||
fieldref:
|
# name: webhook-service
|
||||||
fieldpath: metadata.namespace
|
# fieldref:
|
||||||
- name: SERVICE_NAME
|
# fieldpath: metadata.namespace
|
||||||
objref:
|
#- name: SERVICE_NAME
|
||||||
kind: Service
|
# objref:
|
||||||
version: v1
|
# kind: Service
|
||||||
name: webhook-service
|
# version: v1
|
||||||
|
# name: webhook-service
|
||||||
|
|
|
@ -1,8 +1,2 @@
|
||||||
resources:
|
resources:
|
||||||
- manager.yaml
|
- manager.yaml
|
||||||
apiVersion: kustomize.config.k8s.io/v1beta1
|
|
||||||
kind: Kustomization
|
|
||||||
images:
|
|
||||||
- name: controller
|
|
||||||
newName: chrislusf/seaweedfs-operator
|
|
||||||
newTag: v0.0.1
|
|
||||||
|
|
|
@ -28,15 +28,12 @@ spec:
|
||||||
args:
|
args:
|
||||||
- --enable-leader-election
|
- --enable-leader-election
|
||||||
image: controller:latest
|
image: controller:latest
|
||||||
env:
|
|
||||||
- name: ENABLE_WEBHOOKS
|
|
||||||
value: "true"
|
|
||||||
name: manager
|
name: manager
|
||||||
resources:
|
resources:
|
||||||
limits:
|
limits:
|
||||||
cpu: 100m
|
cpu: 100m
|
||||||
memory: 100Mi
|
memory: 30Mi
|
||||||
requests:
|
requests:
|
||||||
cpu: 100m
|
cpu: 100m
|
||||||
memory: 50Mi
|
memory: 20Mi
|
||||||
terminationGracePeriodSeconds: 10
|
terminationGracePeriodSeconds: 10
|
||||||
|
|
|
@ -1,13 +1,13 @@
|
||||||
# permissions for end users to edit seaweeds.
|
# permissions for end users to edit masters.
|
||||||
apiVersion: rbac.authorization.k8s.io/v1
|
apiVersion: rbac.authorization.k8s.io/v1
|
||||||
kind: ClusterRole
|
kind: ClusterRole
|
||||||
metadata:
|
metadata:
|
||||||
name: seaweed-editor-role
|
name: master-editor-role
|
||||||
rules:
|
rules:
|
||||||
- apiGroups:
|
- apiGroups:
|
||||||
- seaweed.seaweedfs.com
|
- seaweed.seaweedfs.com
|
||||||
resources:
|
resources:
|
||||||
- seaweeds
|
- masters
|
||||||
verbs:
|
verbs:
|
||||||
- create
|
- create
|
||||||
- delete
|
- delete
|
||||||
|
@ -19,6 +19,6 @@ rules:
|
||||||
- apiGroups:
|
- apiGroups:
|
||||||
- seaweed.seaweedfs.com
|
- seaweed.seaweedfs.com
|
||||||
resources:
|
resources:
|
||||||
- seaweeds/status
|
- masters/status
|
||||||
verbs:
|
verbs:
|
||||||
- get
|
- get
|
|
@ -1,13 +1,13 @@
|
||||||
# permissions for end users to view seaweeds.
|
# permissions for end users to view masters.
|
||||||
apiVersion: rbac.authorization.k8s.io/v1
|
apiVersion: rbac.authorization.k8s.io/v1
|
||||||
kind: ClusterRole
|
kind: ClusterRole
|
||||||
metadata:
|
metadata:
|
||||||
name: seaweed-viewer-role
|
name: master-viewer-role
|
||||||
rules:
|
rules:
|
||||||
- apiGroups:
|
- apiGroups:
|
||||||
- seaweed.seaweedfs.com
|
- seaweed.seaweedfs.com
|
||||||
resources:
|
resources:
|
||||||
- seaweeds
|
- masters
|
||||||
verbs:
|
verbs:
|
||||||
- get
|
- get
|
||||||
- list
|
- list
|
||||||
|
@ -15,6 +15,6 @@ rules:
|
||||||
- apiGroups:
|
- apiGroups:
|
||||||
- seaweed.seaweedfs.com
|
- seaweed.seaweedfs.com
|
||||||
resources:
|
resources:
|
||||||
- seaweeds/status
|
- masters/status
|
||||||
verbs:
|
verbs:
|
||||||
- get
|
- get
|
|
@ -7,64 +7,9 @@ metadata:
|
||||||
name: manager-role
|
name: manager-role
|
||||||
rules:
|
rules:
|
||||||
- apiGroups:
|
- apiGroups:
|
||||||
- apps
|
- seaweed.seaweedfs.com
|
||||||
resources:
|
resources:
|
||||||
- statefulsets
|
- masters
|
||||||
verbs:
|
|
||||||
- create
|
|
||||||
- delete
|
|
||||||
- get
|
|
||||||
- list
|
|
||||||
- patch
|
|
||||||
- update
|
|
||||||
- watch
|
|
||||||
- apiGroups:
|
|
||||||
- ""
|
|
||||||
resources:
|
|
||||||
- configmaps
|
|
||||||
verbs:
|
|
||||||
- create
|
|
||||||
- delete
|
|
||||||
- get
|
|
||||||
- list
|
|
||||||
- patch
|
|
||||||
- update
|
|
||||||
- watch
|
|
||||||
- apiGroups:
|
|
||||||
- ""
|
|
||||||
resources:
|
|
||||||
- pods
|
|
||||||
verbs:
|
|
||||||
- get
|
|
||||||
- list
|
|
||||||
- apiGroups:
|
|
||||||
- ""
|
|
||||||
resources:
|
|
||||||
- services
|
|
||||||
verbs:
|
|
||||||
- create
|
|
||||||
- delete
|
|
||||||
- get
|
|
||||||
- list
|
|
||||||
- patch
|
|
||||||
- update
|
|
||||||
- watch
|
|
||||||
- apiGroups:
|
|
||||||
- extensions
|
|
||||||
resources:
|
|
||||||
- ingresses
|
|
||||||
verbs:
|
|
||||||
- create
|
|
||||||
- delete
|
|
||||||
- get
|
|
||||||
- list
|
|
||||||
- patch
|
|
||||||
- update
|
|
||||||
- watch
|
|
||||||
- apiGroups:
|
|
||||||
- networking.k8s.io
|
|
||||||
resources:
|
|
||||||
- ingresses
|
|
||||||
verbs:
|
verbs:
|
||||||
- create
|
- create
|
||||||
- delete
|
- delete
|
||||||
|
@ -76,19 +21,7 @@ rules:
|
||||||
- apiGroups:
|
- apiGroups:
|
||||||
- seaweed.seaweedfs.com
|
- seaweed.seaweedfs.com
|
||||||
resources:
|
resources:
|
||||||
- seaweeds
|
- masters/status
|
||||||
verbs:
|
|
||||||
- create
|
|
||||||
- delete
|
|
||||||
- get
|
|
||||||
- list
|
|
||||||
- patch
|
|
||||||
- update
|
|
||||||
- watch
|
|
||||||
- apiGroups:
|
|
||||||
- seaweed.seaweedfs.com
|
|
||||||
resources:
|
|
||||||
- seaweeds/status
|
|
||||||
verbs:
|
verbs:
|
||||||
- get
|
- get
|
||||||
- patch
|
- patch
|
||||||
|
|
|
@ -1,3 +1,3 @@
|
||||||
## This file is auto-generated, do not modify ##
|
## This file is auto-generated, do not modify ##
|
||||||
resources:
|
resources:
|
||||||
- seaweed_v1_seaweed.yaml
|
- seaweed_v1_master.yaml
|
||||||
|
|
|
@ -0,0 +1,7 @@
|
||||||
|
apiVersion: seaweed.seaweedfs.com/v1
|
||||||
|
kind: Master
|
||||||
|
metadata:
|
||||||
|
name: master-sample
|
||||||
|
spec:
|
||||||
|
# Add fields here
|
||||||
|
foo: bar
|
|
@ -1,23 +0,0 @@
|
||||||
apiVersion: seaweed.seaweedfs.com/v1
|
|
||||||
kind: Seaweed
|
|
||||||
metadata:
|
|
||||||
name: seaweed1
|
|
||||||
namespace: default
|
|
||||||
spec:
|
|
||||||
# Add fields here
|
|
||||||
image: chrislusf/seaweedfs:2.96
|
|
||||||
volumeServerDiskCount: 1
|
|
||||||
hostSuffix: seaweed.abcdefg.com
|
|
||||||
master:
|
|
||||||
replicas: 3
|
|
||||||
volumeSizeLimitMB: 1024
|
|
||||||
volume:
|
|
||||||
replicas: 1
|
|
||||||
requests:
|
|
||||||
storage: 2Gi
|
|
||||||
filer:
|
|
||||||
replicas: 2
|
|
||||||
config: |
|
|
||||||
[leveldb2]
|
|
||||||
enabled = true
|
|
||||||
dir = "/data/filerldb2"
|
|
|
@ -1,54 +0,0 @@
|
||||||
|
|
||||||
---
|
|
||||||
apiVersion: admissionregistration.k8s.io/v1beta1
|
|
||||||
kind: MutatingWebhookConfiguration
|
|
||||||
metadata:
|
|
||||||
creationTimestamp: null
|
|
||||||
name: mutating-webhook-configuration
|
|
||||||
webhooks:
|
|
||||||
- clientConfig:
|
|
||||||
caBundle: Cg==
|
|
||||||
service:
|
|
||||||
name: webhook-service
|
|
||||||
namespace: system
|
|
||||||
path: /mutate-seaweed-seaweedfs-com-v1-seaweed
|
|
||||||
failurePolicy: Fail
|
|
||||||
name: mseaweed.kb.io
|
|
||||||
rules:
|
|
||||||
- apiGroups:
|
|
||||||
- seaweed.seaweedfs.com
|
|
||||||
apiVersions:
|
|
||||||
- v1
|
|
||||||
operations:
|
|
||||||
- CREATE
|
|
||||||
- UPDATE
|
|
||||||
resources:
|
|
||||||
- seaweeds
|
|
||||||
timeoutSeconds: 15
|
|
||||||
|
|
||||||
---
|
|
||||||
apiVersion: admissionregistration.k8s.io/v1beta1
|
|
||||||
kind: ValidatingWebhookConfiguration
|
|
||||||
metadata:
|
|
||||||
creationTimestamp: null
|
|
||||||
name: validating-webhook-configuration
|
|
||||||
webhooks:
|
|
||||||
- clientConfig:
|
|
||||||
caBundle: Cg==
|
|
||||||
service:
|
|
||||||
name: webhook-service
|
|
||||||
namespace: system
|
|
||||||
path: /validate-seaweed-seaweedfs-com-v1-seaweed
|
|
||||||
failurePolicy: Fail
|
|
||||||
name: vseaweed.kb.io
|
|
||||||
rules:
|
|
||||||
- apiGroups:
|
|
||||||
- seaweed.seaweedfs.com
|
|
||||||
apiVersions:
|
|
||||||
- v1
|
|
||||||
operations:
|
|
||||||
- CREATE
|
|
||||||
- UPDATE
|
|
||||||
resources:
|
|
||||||
- seaweeds
|
|
||||||
timeoutSeconds: 15
|
|
|
@ -1,108 +0,0 @@
|
||||||
package controllers
|
|
||||||
|
|
||||||
import (
|
|
||||||
"context"
|
|
||||||
|
|
||||||
"k8s.io/apimachinery/pkg/runtime"
|
|
||||||
|
|
||||||
appsv1 "k8s.io/api/apps/v1"
|
|
||||||
ctrl "sigs.k8s.io/controller-runtime"
|
|
||||||
"sigs.k8s.io/controller-runtime/pkg/controller/controllerutil"
|
|
||||||
|
|
||||||
seaweedv1 "github.com/seaweedfs/seaweedfs-operator/api/v1"
|
|
||||||
label "github.com/seaweedfs/seaweedfs-operator/controllers/label"
|
|
||||||
)
|
|
||||||
|
|
||||||
func (r *SeaweedReconciler) ensureFilerServers(seaweedCR *seaweedv1.Seaweed) (done bool, result ctrl.Result, err error) {
|
|
||||||
_ = context.Background()
|
|
||||||
_ = r.Log.WithValues("seaweed", seaweedCR.Name)
|
|
||||||
|
|
||||||
if done, result, err = r.ensureFilerPeerService(seaweedCR); done {
|
|
||||||
return
|
|
||||||
}
|
|
||||||
|
|
||||||
if done, result, err = r.ensureFilerService(seaweedCR); done {
|
|
||||||
return
|
|
||||||
}
|
|
||||||
|
|
||||||
if done, result, err = r.ensureFilerConfigMap(seaweedCR); done {
|
|
||||||
return
|
|
||||||
}
|
|
||||||
|
|
||||||
if done, result, err = r.ensureFilerStatefulSet(seaweedCR); done {
|
|
||||||
return
|
|
||||||
}
|
|
||||||
|
|
||||||
return
|
|
||||||
}
|
|
||||||
|
|
||||||
func (r *SeaweedReconciler) ensureFilerStatefulSet(seaweedCR *seaweedv1.Seaweed) (bool, ctrl.Result, error) {
|
|
||||||
log := r.Log.WithValues("sw-filer-statefulset", seaweedCR.Name)
|
|
||||||
|
|
||||||
filerStatefulSet := r.createFilerStatefulSet(seaweedCR)
|
|
||||||
if err := controllerutil.SetControllerReference(seaweedCR, filerStatefulSet, r.Scheme); err != nil {
|
|
||||||
return ReconcileResult(err)
|
|
||||||
}
|
|
||||||
_, err := r.CreateOrUpdate(filerStatefulSet, func(existing, desired runtime.Object) error {
|
|
||||||
existingStatefulSet := existing.(*appsv1.StatefulSet)
|
|
||||||
desiredStatefulSet := desired.(*appsv1.StatefulSet)
|
|
||||||
|
|
||||||
existingStatefulSet.Spec.Replicas = desiredStatefulSet.Spec.Replicas
|
|
||||||
existingStatefulSet.Spec.Template.Spec = desiredStatefulSet.Spec.Template.Spec
|
|
||||||
return nil
|
|
||||||
})
|
|
||||||
log.Info("ensure filer stateful set " + filerStatefulSet.Name)
|
|
||||||
return ReconcileResult(err)
|
|
||||||
}
|
|
||||||
|
|
||||||
func (r *SeaweedReconciler) ensureFilerPeerService(seaweedCR *seaweedv1.Seaweed) (bool, ctrl.Result, error) {
|
|
||||||
|
|
||||||
log := r.Log.WithValues("sw-filer-peer-service", seaweedCR.Name)
|
|
||||||
|
|
||||||
filerPeerService := r.createFilerPeerService(seaweedCR)
|
|
||||||
if err := controllerutil.SetControllerReference(seaweedCR, filerPeerService, r.Scheme); err != nil {
|
|
||||||
return ReconcileResult(err)
|
|
||||||
}
|
|
||||||
|
|
||||||
_, err := r.CreateOrUpdateService(filerPeerService)
|
|
||||||
log.Info("ensure filer peer service " + filerPeerService.Name)
|
|
||||||
|
|
||||||
return ReconcileResult(err)
|
|
||||||
}
|
|
||||||
|
|
||||||
func (r *SeaweedReconciler) ensureFilerService(seaweedCR *seaweedv1.Seaweed) (bool, ctrl.Result, error) {
|
|
||||||
|
|
||||||
log := r.Log.WithValues("sw-filer-service", seaweedCR.Name)
|
|
||||||
|
|
||||||
filerService := r.createFilerService(seaweedCR)
|
|
||||||
if err := controllerutil.SetControllerReference(seaweedCR, filerService, r.Scheme); err != nil {
|
|
||||||
return ReconcileResult(err)
|
|
||||||
}
|
|
||||||
_, err := r.CreateOrUpdateService(filerService)
|
|
||||||
|
|
||||||
log.Info("ensure filer service " + filerService.Name)
|
|
||||||
|
|
||||||
return ReconcileResult(err)
|
|
||||||
}
|
|
||||||
|
|
||||||
func (r *SeaweedReconciler) ensureFilerConfigMap(seaweedCR *seaweedv1.Seaweed) (bool, ctrl.Result, error) {
|
|
||||||
log := r.Log.WithValues("sw-filer-configmap", seaweedCR.Name)
|
|
||||||
|
|
||||||
filerConfigMap := r.createFilerConfigMap(seaweedCR)
|
|
||||||
if err := controllerutil.SetControllerReference(seaweedCR, filerConfigMap, r.Scheme); err != nil {
|
|
||||||
return ReconcileResult(err)
|
|
||||||
}
|
|
||||||
_, err := r.CreateOrUpdateConfigMap(filerConfigMap)
|
|
||||||
|
|
||||||
log.Info("Get filer ConfigMap " + filerConfigMap.Name)
|
|
||||||
return ReconcileResult(err)
|
|
||||||
}
|
|
||||||
|
|
||||||
func labelsForFiler(name string) map[string]string {
|
|
||||||
return map[string]string{
|
|
||||||
label.ManagedByLabelKey: "seaweedfs-operator",
|
|
||||||
label.NameLabelKey: "seaweedfs",
|
|
||||||
label.ComponentLabelKey: "filer",
|
|
||||||
label.InstanceLabelKey: name,
|
|
||||||
}
|
|
||||||
}
|
|
|
@ -1,29 +0,0 @@
|
||||||
package controllers
|
|
||||||
|
|
||||||
import (
|
|
||||||
corev1 "k8s.io/api/core/v1"
|
|
||||||
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
|
|
||||||
|
|
||||||
seaweedv1 "github.com/seaweedfs/seaweedfs-operator/api/v1"
|
|
||||||
)
|
|
||||||
|
|
||||||
func (r *SeaweedReconciler) createFilerConfigMap(m *seaweedv1.Seaweed) *corev1.ConfigMap {
|
|
||||||
labels := labelsForFiler(m.Name)
|
|
||||||
|
|
||||||
toml := ""
|
|
||||||
if m.Spec.Filer.Config != nil {
|
|
||||||
toml = *m.Spec.Filer.Config
|
|
||||||
}
|
|
||||||
|
|
||||||
dep := &corev1.ConfigMap{
|
|
||||||
ObjectMeta: metav1.ObjectMeta{
|
|
||||||
Name: m.Name + "-filer",
|
|
||||||
Namespace: m.Namespace,
|
|
||||||
Labels: labels,
|
|
||||||
},
|
|
||||||
Data: map[string]string{
|
|
||||||
"filer.toml": toml,
|
|
||||||
},
|
|
||||||
}
|
|
||||||
return dep
|
|
||||||
}
|
|
|
@ -1,85 +0,0 @@
|
||||||
package controllers
|
|
||||||
|
|
||||||
import (
|
|
||||||
"fmt"
|
|
||||||
|
|
||||||
extensionsv1beta1 "k8s.io/api/extensions/v1beta1"
|
|
||||||
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
|
|
||||||
"k8s.io/apimachinery/pkg/util/intstr"
|
|
||||||
ctrl "sigs.k8s.io/controller-runtime"
|
|
||||||
|
|
||||||
seaweedv1 "github.com/seaweedfs/seaweedfs-operator/api/v1"
|
|
||||||
)
|
|
||||||
|
|
||||||
func (r *SeaweedReconciler) createAllIngress(m *seaweedv1.Seaweed) *extensionsv1beta1.Ingress {
|
|
||||||
labels := labelsForIngress(m.Name)
|
|
||||||
|
|
||||||
dep := &extensionsv1beta1.Ingress{
|
|
||||||
ObjectMeta: metav1.ObjectMeta{
|
|
||||||
Name: m.Name + "-ingress",
|
|
||||||
Namespace: m.Namespace,
|
|
||||||
Labels: labels,
|
|
||||||
},
|
|
||||||
Spec: extensionsv1beta1.IngressSpec{
|
|
||||||
// TLS: ingressSpec.TLS,
|
|
||||||
Rules: []extensionsv1beta1.IngressRule{
|
|
||||||
{
|
|
||||||
Host: "filer." + *m.Spec.HostSuffix,
|
|
||||||
IngressRuleValue: extensionsv1beta1.IngressRuleValue{
|
|
||||||
HTTP: &extensionsv1beta1.HTTPIngressRuleValue{
|
|
||||||
Paths: []extensionsv1beta1.HTTPIngressPath{
|
|
||||||
{
|
|
||||||
Path: "/",
|
|
||||||
Backend: extensionsv1beta1.IngressBackend{
|
|
||||||
ServiceName: m.Name + "-filer",
|
|
||||||
ServicePort: intstr.FromInt(seaweedv1.FilerHTTPPort),
|
|
||||||
},
|
|
||||||
},
|
|
||||||
},
|
|
||||||
},
|
|
||||||
},
|
|
||||||
},
|
|
||||||
{
|
|
||||||
Host: "s3." + *m.Spec.HostSuffix,
|
|
||||||
IngressRuleValue: extensionsv1beta1.IngressRuleValue{
|
|
||||||
HTTP: &extensionsv1beta1.HTTPIngressRuleValue{
|
|
||||||
Paths: []extensionsv1beta1.HTTPIngressPath{
|
|
||||||
{
|
|
||||||
Path: "/",
|
|
||||||
Backend: extensionsv1beta1.IngressBackend{
|
|
||||||
ServiceName: m.Name + "-filer",
|
|
||||||
ServicePort: intstr.FromInt(seaweedv1.FilerS3Port),
|
|
||||||
},
|
|
||||||
},
|
|
||||||
},
|
|
||||||
},
|
|
||||||
},
|
|
||||||
},
|
|
||||||
},
|
|
||||||
},
|
|
||||||
}
|
|
||||||
|
|
||||||
// add ingress for volume servers
|
|
||||||
for i := 0; i < int(m.Spec.Volume.Replicas); i++ {
|
|
||||||
dep.Spec.Rules = append(dep.Spec.Rules, extensionsv1beta1.IngressRule{
|
|
||||||
Host: fmt.Sprintf("%s-volume-%d.%s", m.Name, i, *m.Spec.HostSuffix),
|
|
||||||
IngressRuleValue: extensionsv1beta1.IngressRuleValue{
|
|
||||||
HTTP: &extensionsv1beta1.HTTPIngressRuleValue{
|
|
||||||
Paths: []extensionsv1beta1.HTTPIngressPath{
|
|
||||||
{
|
|
||||||
Path: "/",
|
|
||||||
Backend: extensionsv1beta1.IngressBackend{
|
|
||||||
ServiceName: fmt.Sprintf("%s-volume-%d", m.Name, i),
|
|
||||||
ServicePort: intstr.FromInt(seaweedv1.VolumeHTTPPort),
|
|
||||||
},
|
|
||||||
},
|
|
||||||
},
|
|
||||||
},
|
|
||||||
},
|
|
||||||
})
|
|
||||||
}
|
|
||||||
|
|
||||||
// Set master instance as the owner and controller
|
|
||||||
ctrl.SetControllerReference(m, dep, r.Scheme)
|
|
||||||
return dep
|
|
||||||
}
|
|
|
@ -1,108 +0,0 @@
|
||||||
package controllers
|
|
||||||
|
|
||||||
import (
|
|
||||||
corev1 "k8s.io/api/core/v1"
|
|
||||||
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
|
|
||||||
"k8s.io/apimachinery/pkg/util/intstr"
|
|
||||||
|
|
||||||
seaweedv1 "github.com/seaweedfs/seaweedfs-operator/api/v1"
|
|
||||||
)
|
|
||||||
|
|
||||||
func (r *SeaweedReconciler) createFilerPeerService(m *seaweedv1.Seaweed) *corev1.Service {
|
|
||||||
labels := labelsForFiler(m.Name)
|
|
||||||
|
|
||||||
dep := &corev1.Service{
|
|
||||||
ObjectMeta: metav1.ObjectMeta{
|
|
||||||
Name: m.Name + "-filer-peer",
|
|
||||||
Namespace: m.Namespace,
|
|
||||||
Labels: labels,
|
|
||||||
Annotations: map[string]string{
|
|
||||||
"service.alpha.kubernetes.io/tolerate-unready-endpoints": "true",
|
|
||||||
},
|
|
||||||
},
|
|
||||||
Spec: corev1.ServiceSpec{
|
|
||||||
ClusterIP: "None",
|
|
||||||
PublishNotReadyAddresses: true,
|
|
||||||
Ports: []corev1.ServicePort{
|
|
||||||
{
|
|
||||||
Name: "filer-http",
|
|
||||||
Protocol: corev1.Protocol("TCP"),
|
|
||||||
Port: seaweedv1.FilerHTTPPort,
|
|
||||||
TargetPort: intstr.FromInt(seaweedv1.FilerHTTPPort),
|
|
||||||
},
|
|
||||||
{
|
|
||||||
Name: "filer-grpc",
|
|
||||||
Protocol: corev1.Protocol("TCP"),
|
|
||||||
Port: seaweedv1.FilerGRPCPort,
|
|
||||||
TargetPort: intstr.FromInt(seaweedv1.FilerGRPCPort),
|
|
||||||
},
|
|
||||||
{
|
|
||||||
Name: "filer-s3",
|
|
||||||
Protocol: corev1.Protocol("TCP"),
|
|
||||||
Port: seaweedv1.FilerS3Port,
|
|
||||||
TargetPort: intstr.FromInt(seaweedv1.FilerS3Port),
|
|
||||||
},
|
|
||||||
},
|
|
||||||
Selector: labels,
|
|
||||||
},
|
|
||||||
}
|
|
||||||
return dep
|
|
||||||
}
|
|
||||||
|
|
||||||
func (r *SeaweedReconciler) createFilerService(m *seaweedv1.Seaweed) *corev1.Service {
|
|
||||||
labels := labelsForFiler(m.Name)
|
|
||||||
|
|
||||||
dep := &corev1.Service{
|
|
||||||
ObjectMeta: metav1.ObjectMeta{
|
|
||||||
Name: m.Name + "-filer",
|
|
||||||
Namespace: m.Namespace,
|
|
||||||
Labels: labels,
|
|
||||||
Annotations: map[string]string{
|
|
||||||
"service.alpha.kubernetes.io/tolerate-unready-endpoints": "true",
|
|
||||||
},
|
|
||||||
},
|
|
||||||
Spec: corev1.ServiceSpec{
|
|
||||||
Type: corev1.ServiceTypeClusterIP,
|
|
||||||
PublishNotReadyAddresses: true,
|
|
||||||
Ports: []corev1.ServicePort{
|
|
||||||
{
|
|
||||||
Name: "filer-http",
|
|
||||||
Protocol: corev1.Protocol("TCP"),
|
|
||||||
Port: seaweedv1.FilerHTTPPort,
|
|
||||||
TargetPort: intstr.FromInt(seaweedv1.FilerHTTPPort),
|
|
||||||
},
|
|
||||||
{
|
|
||||||
Name: "filer-grpc",
|
|
||||||
Protocol: corev1.Protocol("TCP"),
|
|
||||||
Port: seaweedv1.FilerGRPCPort,
|
|
||||||
TargetPort: intstr.FromInt(seaweedv1.FilerGRPCPort),
|
|
||||||
},
|
|
||||||
{
|
|
||||||
Name: "filer-s3",
|
|
||||||
Protocol: corev1.Protocol("TCP"),
|
|
||||||
Port: seaweedv1.FilerS3Port,
|
|
||||||
TargetPort: intstr.FromInt(seaweedv1.FilerS3Port),
|
|
||||||
},
|
|
||||||
},
|
|
||||||
Selector: labels,
|
|
||||||
},
|
|
||||||
}
|
|
||||||
|
|
||||||
if m.Spec.Filer.Service != nil {
|
|
||||||
svcSpec := m.Spec.Filer.Service
|
|
||||||
dep.Annotations = copyAnnotations(svcSpec.Annotations)
|
|
||||||
|
|
||||||
if svcSpec.Type != "" {
|
|
||||||
dep.Spec.Type = svcSpec.Type
|
|
||||||
}
|
|
||||||
|
|
||||||
if svcSpec.ClusterIP != nil {
|
|
||||||
dep.Spec.ClusterIP = *svcSpec.ClusterIP
|
|
||||||
}
|
|
||||||
|
|
||||||
if svcSpec.LoadBalancerIP != nil {
|
|
||||||
dep.Spec.LoadBalancerIP = *svcSpec.LoadBalancerIP
|
|
||||||
}
|
|
||||||
}
|
|
||||||
return dep
|
|
||||||
}
|
|
|
@ -1,133 +0,0 @@
|
||||||
package controllers
|
|
||||||
|
|
||||||
import (
|
|
||||||
"fmt"
|
|
||||||
"strings"
|
|
||||||
|
|
||||||
appsv1 "k8s.io/api/apps/v1"
|
|
||||||
corev1 "k8s.io/api/core/v1"
|
|
||||||
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
|
|
||||||
"k8s.io/apimachinery/pkg/util/intstr"
|
|
||||||
|
|
||||||
seaweedv1 "github.com/seaweedfs/seaweedfs-operator/api/v1"
|
|
||||||
)
|
|
||||||
|
|
||||||
func buildFilerStartupScript(m *seaweedv1.Seaweed) string {
|
|
||||||
commands := []string{"weed", "-logtostderr=true", "filer"}
|
|
||||||
commands = append(commands, fmt.Sprintf("-port=%d", seaweedv1.FilerHTTPPort))
|
|
||||||
commands = append(commands, fmt.Sprintf("-ip=$(POD_NAME).%s-filer-peer.%s", m.Name, m.Namespace))
|
|
||||||
commands = append(commands, fmt.Sprintf("-master=%s", getMasterPeersString(m)))
|
|
||||||
commands = append(commands, "-s3")
|
|
||||||
|
|
||||||
return strings.Join(commands, " ")
|
|
||||||
}
|
|
||||||
|
|
||||||
func (r *SeaweedReconciler) createFilerStatefulSet(m *seaweedv1.Seaweed) *appsv1.StatefulSet {
|
|
||||||
labels := labelsForFiler(m.Name)
|
|
||||||
replicas := int32(m.Spec.Filer.Replicas)
|
|
||||||
rollingUpdatePartition := int32(0)
|
|
||||||
enableServiceLinks := false
|
|
||||||
|
|
||||||
filerPodSpec := m.BaseFilerSpec().BuildPodSpec()
|
|
||||||
filerPodSpec.Volumes = []corev1.Volume{
|
|
||||||
{
|
|
||||||
Name: "filer-config",
|
|
||||||
VolumeSource: corev1.VolumeSource{
|
|
||||||
ConfigMap: &corev1.ConfigMapVolumeSource{
|
|
||||||
LocalObjectReference: corev1.LocalObjectReference{
|
|
||||||
Name: m.Name + "-filer",
|
|
||||||
},
|
|
||||||
},
|
|
||||||
},
|
|
||||||
},
|
|
||||||
}
|
|
||||||
filerPodSpec.EnableServiceLinks = &enableServiceLinks
|
|
||||||
filerPodSpec.Containers = []corev1.Container{{
|
|
||||||
Name: "filer",
|
|
||||||
Image: m.Spec.Image,
|
|
||||||
ImagePullPolicy: m.BaseFilerSpec().ImagePullPolicy(),
|
|
||||||
Env: append(m.BaseFilerSpec().Env(), kubernetesEnvVars...),
|
|
||||||
VolumeMounts: []corev1.VolumeMount{
|
|
||||||
{
|
|
||||||
Name: "filer-config",
|
|
||||||
ReadOnly: true,
|
|
||||||
MountPath: "/etc/seaweedfs",
|
|
||||||
},
|
|
||||||
},
|
|
||||||
Command: []string{
|
|
||||||
"/bin/sh",
|
|
||||||
"-ec",
|
|
||||||
buildFilerStartupScript(m),
|
|
||||||
},
|
|
||||||
Ports: []corev1.ContainerPort{
|
|
||||||
{
|
|
||||||
ContainerPort: seaweedv1.FilerHTTPPort,
|
|
||||||
Name: "filer-http",
|
|
||||||
},
|
|
||||||
{
|
|
||||||
ContainerPort: seaweedv1.FilerGRPCPort,
|
|
||||||
Name: "filer-grpc",
|
|
||||||
},
|
|
||||||
{
|
|
||||||
ContainerPort: seaweedv1.FilerS3Port,
|
|
||||||
Name: "filer-s3",
|
|
||||||
},
|
|
||||||
},
|
|
||||||
ReadinessProbe: &corev1.Probe{
|
|
||||||
Handler: corev1.Handler{
|
|
||||||
HTTPGet: &corev1.HTTPGetAction{
|
|
||||||
Path: "/",
|
|
||||||
Port: intstr.FromInt(seaweedv1.FilerHTTPPort),
|
|
||||||
Scheme: corev1.URISchemeHTTP,
|
|
||||||
},
|
|
||||||
},
|
|
||||||
InitialDelaySeconds: 10,
|
|
||||||
TimeoutSeconds: 3,
|
|
||||||
PeriodSeconds: 15,
|
|
||||||
SuccessThreshold: 1,
|
|
||||||
FailureThreshold: 100,
|
|
||||||
},
|
|
||||||
LivenessProbe: &corev1.Probe{
|
|
||||||
Handler: corev1.Handler{
|
|
||||||
HTTPGet: &corev1.HTTPGetAction{
|
|
||||||
Path: "/",
|
|
||||||
Port: intstr.FromInt(seaweedv1.FilerHTTPPort),
|
|
||||||
Scheme: corev1.URISchemeHTTP,
|
|
||||||
},
|
|
||||||
},
|
|
||||||
InitialDelaySeconds: 20,
|
|
||||||
TimeoutSeconds: 3,
|
|
||||||
PeriodSeconds: 30,
|
|
||||||
SuccessThreshold: 1,
|
|
||||||
FailureThreshold: 6,
|
|
||||||
},
|
|
||||||
}}
|
|
||||||
|
|
||||||
dep := &appsv1.StatefulSet{
|
|
||||||
ObjectMeta: metav1.ObjectMeta{
|
|
||||||
Name: m.Name + "-filer",
|
|
||||||
Namespace: m.Namespace,
|
|
||||||
},
|
|
||||||
Spec: appsv1.StatefulSetSpec{
|
|
||||||
ServiceName: m.Name + "-filer-peer",
|
|
||||||
PodManagementPolicy: appsv1.ParallelPodManagement,
|
|
||||||
Replicas: &replicas,
|
|
||||||
UpdateStrategy: appsv1.StatefulSetUpdateStrategy{
|
|
||||||
Type: appsv1.RollingUpdateStatefulSetStrategyType,
|
|
||||||
RollingUpdate: &appsv1.RollingUpdateStatefulSetStrategy{
|
|
||||||
Partition: &rollingUpdatePartition,
|
|
||||||
},
|
|
||||||
},
|
|
||||||
Selector: &metav1.LabelSelector{
|
|
||||||
MatchLabels: labels,
|
|
||||||
},
|
|
||||||
Template: corev1.PodTemplateSpec{
|
|
||||||
ObjectMeta: metav1.ObjectMeta{
|
|
||||||
Labels: labels,
|
|
||||||
},
|
|
||||||
Spec: filerPodSpec,
|
|
||||||
},
|
|
||||||
},
|
|
||||||
}
|
|
||||||
return dep
|
|
||||||
}
|
|
|
@ -1,42 +0,0 @@
|
||||||
package controllers
|
|
||||||
|
|
||||||
import (
|
|
||||||
"github.com/seaweedfs/seaweedfs-operator/controllers/label"
|
|
||||||
ctrl "sigs.k8s.io/controller-runtime"
|
|
||||||
"sigs.k8s.io/controller-runtime/pkg/controller/controllerutil"
|
|
||||||
|
|
||||||
seaweedv1 "github.com/seaweedfs/seaweedfs-operator/api/v1"
|
|
||||||
)
|
|
||||||
|
|
||||||
func (r *SeaweedReconciler) ensureSeaweedIngress(seaweedCR *seaweedv1.Seaweed) (done bool, result ctrl.Result, err error) {
|
|
||||||
|
|
||||||
if seaweedCR.Spec.HostSuffix != nil && len(*seaweedCR.Spec.HostSuffix) != 0 {
|
|
||||||
if done, result, err = r.ensureAllIngress(seaweedCR); done {
|
|
||||||
return
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
return
|
|
||||||
}
|
|
||||||
|
|
||||||
func (r *SeaweedReconciler) ensureAllIngress(seaweedCR *seaweedv1.Seaweed) (bool, ctrl.Result, error) {
|
|
||||||
log := r.Log.WithValues("sw-ingress", seaweedCR.Name)
|
|
||||||
|
|
||||||
ingressService := r.createAllIngress(seaweedCR)
|
|
||||||
if err := controllerutil.SetControllerReference(seaweedCR, ingressService, r.Scheme); err != nil {
|
|
||||||
return ReconcileResult(err)
|
|
||||||
}
|
|
||||||
_, err := r.CreateOrUpdateIngress(ingressService)
|
|
||||||
|
|
||||||
log.Info("ensure ingress " + ingressService.Name)
|
|
||||||
return ReconcileResult(err)
|
|
||||||
}
|
|
||||||
|
|
||||||
func labelsForIngress(name string) map[string]string {
|
|
||||||
return map[string]string{
|
|
||||||
label.ManagedByLabelKey: "seaweedfs-operator",
|
|
||||||
label.NameLabelKey: "seaweedfs",
|
|
||||||
label.ComponentLabelKey: "ingress",
|
|
||||||
label.InstanceLabelKey: name,
|
|
||||||
}
|
|
||||||
}
|
|
|
@ -1,151 +0,0 @@
|
||||||
package controllers
|
|
||||||
|
|
||||||
import (
|
|
||||||
"context"
|
|
||||||
"time"
|
|
||||||
|
|
||||||
appsv1 "k8s.io/api/apps/v1"
|
|
||||||
corev1 "k8s.io/api/core/v1"
|
|
||||||
"k8s.io/apimachinery/pkg/runtime"
|
|
||||||
ctrl "sigs.k8s.io/controller-runtime"
|
|
||||||
"sigs.k8s.io/controller-runtime/pkg/client"
|
|
||||||
"sigs.k8s.io/controller-runtime/pkg/controller/controllerutil"
|
|
||||||
|
|
||||||
seaweedv1 "github.com/seaweedfs/seaweedfs-operator/api/v1"
|
|
||||||
"github.com/seaweedfs/seaweedfs-operator/controllers/label"
|
|
||||||
)
|
|
||||||
|
|
||||||
func (r *SeaweedReconciler) ensureMaster(seaweedCR *seaweedv1.Seaweed) (done bool, result ctrl.Result, err error) {
|
|
||||||
_ = context.Background()
|
|
||||||
_ = r.Log.WithValues("seaweed", seaweedCR.Name)
|
|
||||||
|
|
||||||
if done, result, err = r.ensureMasterPeerService(seaweedCR); done {
|
|
||||||
return
|
|
||||||
}
|
|
||||||
|
|
||||||
if done, result, err = r.ensureMasterService(seaweedCR); done {
|
|
||||||
return
|
|
||||||
}
|
|
||||||
|
|
||||||
if done, result, err = r.ensureMasterConfigMap(seaweedCR); done {
|
|
||||||
return
|
|
||||||
}
|
|
||||||
|
|
||||||
if done, result, err = r.ensureMasterStatefulSet(seaweedCR); done {
|
|
||||||
return
|
|
||||||
}
|
|
||||||
|
|
||||||
if seaweedCR.Spec.Master.ConcurrentStart == nil || !*seaweedCR.Spec.Master.ConcurrentStart {
|
|
||||||
if done, result, err = r.waitForMasterStatefulSet(seaweedCR); done {
|
|
||||||
return
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
return
|
|
||||||
}
|
|
||||||
|
|
||||||
func (r *SeaweedReconciler) waitForMasterStatefulSet(seaweedCR *seaweedv1.Seaweed) (bool, ctrl.Result, error) {
|
|
||||||
log := r.Log.WithValues("sw-master-statefulset", seaweedCR.Name)
|
|
||||||
|
|
||||||
podList := &corev1.PodList{}
|
|
||||||
listOpts := []client.ListOption{
|
|
||||||
client.InNamespace(seaweedCR.Namespace),
|
|
||||||
client.MatchingLabels(labelsForMaster(seaweedCR.Name)),
|
|
||||||
}
|
|
||||||
if err := r.List(context.Background(), podList, listOpts...); err != nil {
|
|
||||||
log.Error(err, "Failed to list master pods", "namespace", seaweedCR.Namespace, "name", seaweedCR.Name)
|
|
||||||
return true, ctrl.Result{RequeueAfter: 3 * time.Second}, nil
|
|
||||||
}
|
|
||||||
|
|
||||||
log.Info("pods", "count", len(podList.Items))
|
|
||||||
runningCounter := 0
|
|
||||||
for _, pod := range podList.Items {
|
|
||||||
if pod.Status.Phase == corev1.PodRunning {
|
|
||||||
for _, containerStatus := range pod.Status.ContainerStatuses {
|
|
||||||
if containerStatus.Ready {
|
|
||||||
runningCounter++
|
|
||||||
}
|
|
||||||
log.Info("pod", "name", pod.Name, "containerStatus", containerStatus)
|
|
||||||
}
|
|
||||||
} else {
|
|
||||||
log.Info("pod", "name", pod.Name, "status", pod.Status)
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
if runningCounter < int(seaweedCR.Spec.Master.Replicas)/2+1 {
|
|
||||||
log.Info("some masters are not ready", "missing", int(seaweedCR.Spec.Master.Replicas)-runningCounter)
|
|
||||||
return true, ctrl.Result{RequeueAfter: 3 * time.Second}, nil
|
|
||||||
}
|
|
||||||
|
|
||||||
log.Info("masters are ready")
|
|
||||||
return ReconcileResult(nil)
|
|
||||||
|
|
||||||
}
|
|
||||||
|
|
||||||
func (r *SeaweedReconciler) ensureMasterStatefulSet(seaweedCR *seaweedv1.Seaweed) (bool, ctrl.Result, error) {
|
|
||||||
log := r.Log.WithValues("sw-master-statefulset", seaweedCR.Name)
|
|
||||||
|
|
||||||
masterStatefulSet := r.createMasterStatefulSet(seaweedCR)
|
|
||||||
if err := controllerutil.SetControllerReference(seaweedCR, masterStatefulSet, r.Scheme); err != nil {
|
|
||||||
return ReconcileResult(err)
|
|
||||||
}
|
|
||||||
_, err := r.CreateOrUpdate(masterStatefulSet, func(existing, desired runtime.Object) error {
|
|
||||||
existingStatefulSet := existing.(*appsv1.StatefulSet)
|
|
||||||
desiredStatefulSet := desired.(*appsv1.StatefulSet)
|
|
||||||
|
|
||||||
existingStatefulSet.Spec.Replicas = desiredStatefulSet.Spec.Replicas
|
|
||||||
existingStatefulSet.Spec.Template.Spec = desiredStatefulSet.Spec.Template.Spec
|
|
||||||
return nil
|
|
||||||
})
|
|
||||||
log.Info("ensure master stateful set " + masterStatefulSet.Name)
|
|
||||||
return ReconcileResult(err)
|
|
||||||
}
|
|
||||||
|
|
||||||
func (r *SeaweedReconciler) ensureMasterConfigMap(seaweedCR *seaweedv1.Seaweed) (bool, ctrl.Result, error) {
|
|
||||||
log := r.Log.WithValues("sw-master-configmap", seaweedCR.Name)
|
|
||||||
|
|
||||||
masterConfigMap := r.createMasterConfigMap(seaweedCR)
|
|
||||||
if err := controllerutil.SetControllerReference(seaweedCR, masterConfigMap, r.Scheme); err != nil {
|
|
||||||
return ReconcileResult(err)
|
|
||||||
}
|
|
||||||
_, err := r.CreateOrUpdateConfigMap(masterConfigMap)
|
|
||||||
|
|
||||||
log.Info("Get master ConfigMap " + masterConfigMap.Name)
|
|
||||||
return ReconcileResult(err)
|
|
||||||
}
|
|
||||||
|
|
||||||
func (r *SeaweedReconciler) ensureMasterService(seaweedCR *seaweedv1.Seaweed) (bool, ctrl.Result, error) {
|
|
||||||
log := r.Log.WithValues("sw-master-service", seaweedCR.Name)
|
|
||||||
|
|
||||||
masterService := r.createMasterService(seaweedCR)
|
|
||||||
if err := controllerutil.SetControllerReference(seaweedCR, masterService, r.Scheme); err != nil {
|
|
||||||
return ReconcileResult(err)
|
|
||||||
}
|
|
||||||
_, err := r.CreateOrUpdateService(masterService)
|
|
||||||
|
|
||||||
log.Info("Get master service " + masterService.Name)
|
|
||||||
return ReconcileResult(err)
|
|
||||||
}
|
|
||||||
|
|
||||||
func (r *SeaweedReconciler) ensureMasterPeerService(seaweedCR *seaweedv1.Seaweed) (bool, ctrl.Result, error) {
|
|
||||||
log := r.Log.WithValues("sw-master-peer-service", seaweedCR.Name)
|
|
||||||
|
|
||||||
masterPeerService := r.createMasterPeerService(seaweedCR)
|
|
||||||
if err := controllerutil.SetControllerReference(seaweedCR, masterPeerService, r.Scheme); err != nil {
|
|
||||||
return ReconcileResult(err)
|
|
||||||
}
|
|
||||||
_, err := r.CreateOrUpdateService(masterPeerService)
|
|
||||||
|
|
||||||
log.Info("Get master peer service " + masterPeerService.Name)
|
|
||||||
return ReconcileResult(err)
|
|
||||||
|
|
||||||
}
|
|
||||||
|
|
||||||
func labelsForMaster(name string) map[string]string {
|
|
||||||
return map[string]string{
|
|
||||||
label.ManagedByLabelKey: "seaweedfs-operator",
|
|
||||||
label.NameLabelKey: "seaweedfs",
|
|
||||||
label.ComponentLabelKey: "master",
|
|
||||||
label.InstanceLabelKey: name,
|
|
||||||
}
|
|
||||||
}
|
|
|
@ -1,31 +0,0 @@
|
||||||
package controllers
|
|
||||||
|
|
||||||
import (
|
|
||||||
corev1 "k8s.io/api/core/v1"
|
|
||||||
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
|
|
||||||
|
|
||||||
seaweedv1 "github.com/seaweedfs/seaweedfs-operator/api/v1"
|
|
||||||
)
|
|
||||||
|
|
||||||
func (r *SeaweedReconciler) createMasterConfigMap(m *seaweedv1.Seaweed) *corev1.ConfigMap {
|
|
||||||
labels := labelsForMaster(m.Name)
|
|
||||||
|
|
||||||
toml := ""
|
|
||||||
if m.Spec.Master.Config != nil {
|
|
||||||
toml = *m.Spec.Master.Config
|
|
||||||
}
|
|
||||||
|
|
||||||
dep := &corev1.ConfigMap{
|
|
||||||
ObjectMeta: metav1.ObjectMeta{
|
|
||||||
Name: m.Name + "-master",
|
|
||||||
Namespace: m.Namespace,
|
|
||||||
Labels: labels,
|
|
||||||
},
|
|
||||||
Data: map[string]string{
|
|
||||||
"master.toml": toml,
|
|
||||||
},
|
|
||||||
}
|
|
||||||
// Set master instance as the owner and controller
|
|
||||||
// ctrl.SetControllerReference(m, dep, r.Scheme)
|
|
||||||
return dep
|
|
||||||
}
|
|
|
@ -1,97 +0,0 @@
|
||||||
package controllers
|
|
||||||
|
|
||||||
import (
|
|
||||||
corev1 "k8s.io/api/core/v1"
|
|
||||||
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
|
|
||||||
"k8s.io/apimachinery/pkg/util/intstr"
|
|
||||||
|
|
||||||
seaweedv1 "github.com/seaweedfs/seaweedfs-operator/api/v1"
|
|
||||||
)
|
|
||||||
|
|
||||||
func (r *SeaweedReconciler) createMasterPeerService(m *seaweedv1.Seaweed) *corev1.Service {
|
|
||||||
labels := labelsForMaster(m.Name)
|
|
||||||
|
|
||||||
dep := &corev1.Service{
|
|
||||||
ObjectMeta: metav1.ObjectMeta{
|
|
||||||
Name: m.Name + "-master-peer",
|
|
||||||
Namespace: m.Namespace,
|
|
||||||
Labels: labels,
|
|
||||||
Annotations: map[string]string{
|
|
||||||
"service.alpha.kubernetes.io/tolerate-unready-endpoints": "true",
|
|
||||||
},
|
|
||||||
},
|
|
||||||
Spec: corev1.ServiceSpec{
|
|
||||||
ClusterIP: "None",
|
|
||||||
PublishNotReadyAddresses: true,
|
|
||||||
Ports: []corev1.ServicePort{
|
|
||||||
{
|
|
||||||
Name: "master-http",
|
|
||||||
Protocol: corev1.Protocol("TCP"),
|
|
||||||
Port: seaweedv1.MasterHTTPPort,
|
|
||||||
TargetPort: intstr.FromInt(seaweedv1.MasterHTTPPort),
|
|
||||||
},
|
|
||||||
{
|
|
||||||
Name: "master-grpc",
|
|
||||||
Protocol: corev1.Protocol("TCP"),
|
|
||||||
Port: seaweedv1.MasterGRPCPort,
|
|
||||||
TargetPort: intstr.FromInt(seaweedv1.MasterGRPCPort),
|
|
||||||
},
|
|
||||||
},
|
|
||||||
Selector: labels,
|
|
||||||
},
|
|
||||||
}
|
|
||||||
// Set master instance as the owner and controller
|
|
||||||
// ctrl.SetControllerReference(m, dep, r.Scheme)
|
|
||||||
return dep
|
|
||||||
}
|
|
||||||
|
|
||||||
func (r *SeaweedReconciler) createMasterService(m *seaweedv1.Seaweed) *corev1.Service {
|
|
||||||
labels := labelsForMaster(m.Name)
|
|
||||||
|
|
||||||
dep := &corev1.Service{
|
|
||||||
ObjectMeta: metav1.ObjectMeta{
|
|
||||||
Name: m.Name + "-master",
|
|
||||||
Namespace: m.Namespace,
|
|
||||||
Labels: labels,
|
|
||||||
Annotations: map[string]string{
|
|
||||||
"service.alpha.kubernetes.io/tolerate-unready-endpoints": "true",
|
|
||||||
},
|
|
||||||
},
|
|
||||||
Spec: corev1.ServiceSpec{
|
|
||||||
PublishNotReadyAddresses: true,
|
|
||||||
Ports: []corev1.ServicePort{
|
|
||||||
{
|
|
||||||
Name: "master-http",
|
|
||||||
Protocol: corev1.Protocol("TCP"),
|
|
||||||
Port: seaweedv1.MasterHTTPPort,
|
|
||||||
TargetPort: intstr.FromInt(seaweedv1.MasterHTTPPort),
|
|
||||||
},
|
|
||||||
{
|
|
||||||
Name: "master-grpc",
|
|
||||||
Protocol: corev1.Protocol("TCP"),
|
|
||||||
Port: seaweedv1.MasterGRPCPort,
|
|
||||||
TargetPort: intstr.FromInt(seaweedv1.MasterGRPCPort),
|
|
||||||
},
|
|
||||||
},
|
|
||||||
Selector: labels,
|
|
||||||
},
|
|
||||||
}
|
|
||||||
|
|
||||||
if m.Spec.Master.Service != nil {
|
|
||||||
svcSpec := m.Spec.Master.Service
|
|
||||||
dep.Annotations = copyAnnotations(svcSpec.Annotations)
|
|
||||||
|
|
||||||
if svcSpec.Type != "" {
|
|
||||||
dep.Spec.Type = svcSpec.Type
|
|
||||||
}
|
|
||||||
|
|
||||||
if svcSpec.ClusterIP != nil {
|
|
||||||
dep.Spec.ClusterIP = *svcSpec.ClusterIP
|
|
||||||
}
|
|
||||||
|
|
||||||
if svcSpec.LoadBalancerIP != nil {
|
|
||||||
dep.Spec.LoadBalancerIP = *svcSpec.LoadBalancerIP
|
|
||||||
}
|
|
||||||
}
|
|
||||||
return dep
|
|
||||||
}
|
|
|
@ -1,149 +0,0 @@
|
||||||
package controllers
|
|
||||||
|
|
||||||
import (
|
|
||||||
"fmt"
|
|
||||||
"strings"
|
|
||||||
|
|
||||||
appsv1 "k8s.io/api/apps/v1"
|
|
||||||
corev1 "k8s.io/api/core/v1"
|
|
||||||
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
|
|
||||||
"k8s.io/apimachinery/pkg/util/intstr"
|
|
||||||
|
|
||||||
seaweedv1 "github.com/seaweedfs/seaweedfs-operator/api/v1"
|
|
||||||
)
|
|
||||||
|
|
||||||
func buildMasterStartupScript(m *seaweedv1.Seaweed) string {
|
|
||||||
command := []string{"weed", "-logtostderr=true", "master"}
|
|
||||||
spec := m.Spec.Master
|
|
||||||
if spec.VolumePreallocate != nil && *spec.VolumePreallocate {
|
|
||||||
command = append(command, "-volumePreallocate")
|
|
||||||
}
|
|
||||||
|
|
||||||
if spec.VolumeSizeLimitMB != nil {
|
|
||||||
command = append(command, fmt.Sprintf("-volumeSizeLimitMB=%d", *spec.VolumeSizeLimitMB))
|
|
||||||
}
|
|
||||||
|
|
||||||
if spec.GarbageThreshold != nil {
|
|
||||||
command = append(command, fmt.Sprintf("-garbageThreshold=%s", *spec.GarbageThreshold))
|
|
||||||
}
|
|
||||||
|
|
||||||
if spec.PulseSeconds != nil {
|
|
||||||
command = append(command, fmt.Sprintf("-pulseSeconds=%d", *spec.PulseSeconds))
|
|
||||||
}
|
|
||||||
|
|
||||||
if spec.DefaultReplication != nil {
|
|
||||||
command = append(command, fmt.Sprintf("-defaultReplication=%s", *spec.DefaultReplication))
|
|
||||||
}
|
|
||||||
|
|
||||||
command = append(command, fmt.Sprintf("-ip=$(POD_NAME).%s-master-peer.%s", m.Name, m.Namespace))
|
|
||||||
command = append(command, fmt.Sprintf("-peers=%s", getMasterPeersString(m)))
|
|
||||||
return strings.Join(command, " ")
|
|
||||||
}
|
|
||||||
|
|
||||||
func (r *SeaweedReconciler) createMasterStatefulSet(m *seaweedv1.Seaweed) *appsv1.StatefulSet {
|
|
||||||
labels := labelsForMaster(m.Name)
|
|
||||||
replicas := m.Spec.Master.Replicas
|
|
||||||
rollingUpdatePartition := int32(0)
|
|
||||||
enableServiceLinks := false
|
|
||||||
|
|
||||||
masterPodSpec := m.BaseMasterSpec().BuildPodSpec()
|
|
||||||
masterPodSpec.Volumes = []corev1.Volume{
|
|
||||||
{
|
|
||||||
Name: "master-config",
|
|
||||||
VolumeSource: corev1.VolumeSource{
|
|
||||||
ConfigMap: &corev1.ConfigMapVolumeSource{
|
|
||||||
LocalObjectReference: corev1.LocalObjectReference{
|
|
||||||
Name: m.Name + "-master",
|
|
||||||
},
|
|
||||||
},
|
|
||||||
},
|
|
||||||
},
|
|
||||||
}
|
|
||||||
masterPodSpec.EnableServiceLinks = &enableServiceLinks
|
|
||||||
masterPodSpec.Containers = []corev1.Container{{
|
|
||||||
Name: "master",
|
|
||||||
Image: m.Spec.Image,
|
|
||||||
ImagePullPolicy: m.BaseMasterSpec().ImagePullPolicy(),
|
|
||||||
Env: append(m.BaseMasterSpec().Env(), kubernetesEnvVars...),
|
|
||||||
VolumeMounts: []corev1.VolumeMount{
|
|
||||||
{
|
|
||||||
Name: "master-config",
|
|
||||||
ReadOnly: true,
|
|
||||||
MountPath: "/etc/seaweedfs",
|
|
||||||
},
|
|
||||||
},
|
|
||||||
Command: []string{
|
|
||||||
"/bin/sh",
|
|
||||||
"-ec",
|
|
||||||
buildMasterStartupScript(m),
|
|
||||||
},
|
|
||||||
Ports: []corev1.ContainerPort{
|
|
||||||
{
|
|
||||||
ContainerPort: seaweedv1.MasterHTTPPort,
|
|
||||||
Name: "master-http",
|
|
||||||
},
|
|
||||||
{
|
|
||||||
ContainerPort: seaweedv1.MasterGRPCPort,
|
|
||||||
Name: "master-grpc",
|
|
||||||
},
|
|
||||||
},
|
|
||||||
ReadinessProbe: &corev1.Probe{
|
|
||||||
Handler: corev1.Handler{
|
|
||||||
HTTPGet: &corev1.HTTPGetAction{
|
|
||||||
Path: "/cluster/status",
|
|
||||||
Port: intstr.FromInt(seaweedv1.MasterHTTPPort),
|
|
||||||
Scheme: corev1.URISchemeHTTP,
|
|
||||||
},
|
|
||||||
},
|
|
||||||
InitialDelaySeconds: 5,
|
|
||||||
TimeoutSeconds: 15,
|
|
||||||
PeriodSeconds: 15,
|
|
||||||
SuccessThreshold: 2,
|
|
||||||
FailureThreshold: 100,
|
|
||||||
},
|
|
||||||
LivenessProbe: &corev1.Probe{
|
|
||||||
Handler: corev1.Handler{
|
|
||||||
HTTPGet: &corev1.HTTPGetAction{
|
|
||||||
Path: "/cluster/status",
|
|
||||||
Port: intstr.FromInt(seaweedv1.MasterHTTPPort),
|
|
||||||
Scheme: corev1.URISchemeHTTP,
|
|
||||||
},
|
|
||||||
},
|
|
||||||
InitialDelaySeconds: 15,
|
|
||||||
TimeoutSeconds: 15,
|
|
||||||
PeriodSeconds: 15,
|
|
||||||
SuccessThreshold: 1,
|
|
||||||
FailureThreshold: 6,
|
|
||||||
},
|
|
||||||
}}
|
|
||||||
|
|
||||||
dep := &appsv1.StatefulSet{
|
|
||||||
ObjectMeta: metav1.ObjectMeta{
|
|
||||||
Name: m.Name + "-master",
|
|
||||||
Namespace: m.Namespace,
|
|
||||||
},
|
|
||||||
Spec: appsv1.StatefulSetSpec{
|
|
||||||
ServiceName: m.Name + "-master-peer",
|
|
||||||
PodManagementPolicy: appsv1.ParallelPodManagement,
|
|
||||||
Replicas: &replicas,
|
|
||||||
UpdateStrategy: appsv1.StatefulSetUpdateStrategy{
|
|
||||||
Type: appsv1.RollingUpdateStatefulSetStrategyType,
|
|
||||||
RollingUpdate: &appsv1.RollingUpdateStatefulSetStrategy{
|
|
||||||
Partition: &rollingUpdatePartition,
|
|
||||||
},
|
|
||||||
},
|
|
||||||
Selector: &metav1.LabelSelector{
|
|
||||||
MatchLabels: labels,
|
|
||||||
},
|
|
||||||
Template: corev1.PodTemplateSpec{
|
|
||||||
ObjectMeta: metav1.ObjectMeta{
|
|
||||||
Labels: labels,
|
|
||||||
},
|
|
||||||
Spec: masterPodSpec,
|
|
||||||
},
|
|
||||||
},
|
|
||||||
}
|
|
||||||
// Set master instance as the owner and controller
|
|
||||||
// ctrl.SetControllerReference(m, dep, r.Scheme)
|
|
||||||
return dep
|
|
||||||
}
|
|
|
@ -1,328 +0,0 @@
|
||||||
package controllers
|
|
||||||
|
|
||||||
import (
|
|
||||||
"context"
|
|
||||||
"encoding/json"
|
|
||||||
"fmt"
|
|
||||||
|
|
||||||
appsv1 "k8s.io/api/apps/v1"
|
|
||||||
corev1 "k8s.io/api/core/v1"
|
|
||||||
extensionsv1beta1 "k8s.io/api/extensions/v1beta1"
|
|
||||||
apiequality "k8s.io/apimachinery/pkg/api/equality"
|
|
||||||
"k8s.io/apimachinery/pkg/api/errors"
|
|
||||||
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
|
|
||||||
"k8s.io/apimachinery/pkg/runtime"
|
|
||||||
"k8s.io/apimachinery/pkg/runtime/schema"
|
|
||||||
"k8s.io/client-go/kubernetes/scheme"
|
|
||||||
"k8s.io/klog"
|
|
||||||
"sigs.k8s.io/controller-runtime/pkg/client"
|
|
||||||
)
|
|
||||||
|
|
||||||
// the following is adapted from tidb-operator/pkg/controller/generic_control.go
|
|
||||||
|
|
||||||
const (
|
|
||||||
// LastAppliedPodTemplate is annotation key of the last applied pod template
|
|
||||||
LastAppliedPodTemplate = "seaweedfs.com/last-applied-podtemplate"
|
|
||||||
|
|
||||||
// LastAppliedConfigAnnotation is annotation key of last applied configuration
|
|
||||||
LastAppliedConfigAnnotation = "seaweedfs.com/last-applied-configuration"
|
|
||||||
)
|
|
||||||
|
|
||||||
// MergeFn is to resolve conflicts
|
|
||||||
type MergeFn func(existing, desired runtime.Object) error
|
|
||||||
|
|
||||||
// CreateOrUpdate create an object to the Kubernetes cluster for controller, if the object to create is existed,
|
|
||||||
// call mergeFn to merge the change in new object to the existing object, then update the existing object.
|
|
||||||
// The object will also be adopted by the given controller.
|
|
||||||
func (r *SeaweedReconciler) CreateOrUpdate(obj runtime.Object, mergeFn MergeFn) (runtime.Object, error) {
|
|
||||||
|
|
||||||
// controller-runtime/client will mutate the object pointer in-place,
|
|
||||||
// to be consistent with other methods in our controller, we copy the object
|
|
||||||
// to avoid the in-place mutation here and hereafter.
|
|
||||||
desired := obj.DeepCopyObject()
|
|
||||||
|
|
||||||
// 1. try to create and see if there is any conflicts
|
|
||||||
err := r.Create(context.TODO(), desired)
|
|
||||||
if errors.IsAlreadyExists(err) {
|
|
||||||
|
|
||||||
// 2. object has already existed, merge our desired changes to it
|
|
||||||
existing, err := EmptyClone(obj)
|
|
||||||
if err != nil {
|
|
||||||
return nil, err
|
|
||||||
}
|
|
||||||
key, err := client.ObjectKeyFromObject(existing)
|
|
||||||
if err != nil {
|
|
||||||
return nil, err
|
|
||||||
}
|
|
||||||
err = r.Get(context.TODO(), key, existing)
|
|
||||||
if err != nil {
|
|
||||||
return nil, err
|
|
||||||
}
|
|
||||||
|
|
||||||
mutated := existing.DeepCopyObject()
|
|
||||||
// 4. invoke mergeFn to mutate a copy of the existing object
|
|
||||||
if err := mergeFn(mutated, desired); err != nil {
|
|
||||||
return nil, err
|
|
||||||
}
|
|
||||||
|
|
||||||
// 5. check if the copy is actually mutated
|
|
||||||
if !apiequality.Semantic.DeepEqual(existing, mutated) {
|
|
||||||
err := r.Update(context.TODO(), mutated)
|
|
||||||
return mutated, err
|
|
||||||
}
|
|
||||||
|
|
||||||
return mutated, nil
|
|
||||||
}
|
|
||||||
|
|
||||||
return desired, err
|
|
||||||
}
|
|
||||||
|
|
||||||
func (r *SeaweedReconciler) addSpecToAnnotation(d *appsv1.Deployment) error {
|
|
||||||
b, err := json.Marshal(d.Spec.Template.Spec)
|
|
||||||
if err != nil {
|
|
||||||
return err
|
|
||||||
}
|
|
||||||
if d.Annotations == nil {
|
|
||||||
d.Annotations = map[string]string{}
|
|
||||||
}
|
|
||||||
d.Annotations[LastAppliedPodTemplate] = string(b)
|
|
||||||
return nil
|
|
||||||
}
|
|
||||||
|
|
||||||
func (r *SeaweedReconciler) CreateOrUpdateDeployment(deploy *appsv1.Deployment) (*appsv1.Deployment, error) {
|
|
||||||
r.addSpecToAnnotation(deploy)
|
|
||||||
result, err := r.CreateOrUpdate(deploy, func(existing, desired runtime.Object) error {
|
|
||||||
existingDep := existing.(*appsv1.Deployment)
|
|
||||||
desiredDep := desired.(*appsv1.Deployment)
|
|
||||||
|
|
||||||
existingDep.Spec.Replicas = desiredDep.Spec.Replicas
|
|
||||||
existingDep.Labels = desiredDep.Labels
|
|
||||||
|
|
||||||
if existingDep.Annotations == nil {
|
|
||||||
existingDep.Annotations = map[string]string{}
|
|
||||||
}
|
|
||||||
for k, v := range desiredDep.Annotations {
|
|
||||||
existingDep.Annotations[k] = v
|
|
||||||
}
|
|
||||||
// only override the default strategy if it is explicitly set in the desiredDep
|
|
||||||
if string(desiredDep.Spec.Strategy.Type) != "" {
|
|
||||||
existingDep.Spec.Strategy.Type = desiredDep.Spec.Strategy.Type
|
|
||||||
if existingDep.Spec.Strategy.RollingUpdate != nil {
|
|
||||||
existingDep.Spec.Strategy.RollingUpdate = desiredDep.Spec.Strategy.RollingUpdate
|
|
||||||
}
|
|
||||||
}
|
|
||||||
// pod selector of deployment is immutable, so we don't mutate the labels of pod
|
|
||||||
for k, v := range desiredDep.Spec.Template.Annotations {
|
|
||||||
existingDep.Spec.Template.Annotations[k] = v
|
|
||||||
}
|
|
||||||
// podSpec of deployment is hard to merge, use an annotation to assist
|
|
||||||
if DeploymentPodSpecChanged(desiredDep, existingDep) {
|
|
||||||
// Record last applied spec in favor of future equality check
|
|
||||||
b, err := json.Marshal(desiredDep.Spec.Template.Spec)
|
|
||||||
if err != nil {
|
|
||||||
return err
|
|
||||||
}
|
|
||||||
existingDep.Annotations[LastAppliedConfigAnnotation] = string(b)
|
|
||||||
existingDep.Spec.Template.Spec = desiredDep.Spec.Template.Spec
|
|
||||||
}
|
|
||||||
return nil
|
|
||||||
})
|
|
||||||
if err != nil {
|
|
||||||
return nil, err
|
|
||||||
}
|
|
||||||
return result.(*appsv1.Deployment), err
|
|
||||||
}
|
|
||||||
|
|
||||||
func (r *SeaweedReconciler) CreateOrUpdateService(svc *corev1.Service) (*corev1.Service, error) {
|
|
||||||
result, err := r.CreateOrUpdate(svc, func(existing, desired runtime.Object) error {
|
|
||||||
existingSvc := existing.(*corev1.Service)
|
|
||||||
desiredSvc := desired.(*corev1.Service)
|
|
||||||
|
|
||||||
if existingSvc.Annotations == nil {
|
|
||||||
existingSvc.Annotations = map[string]string{}
|
|
||||||
}
|
|
||||||
for k, v := range desiredSvc.Annotations {
|
|
||||||
existingSvc.Annotations[k] = v
|
|
||||||
}
|
|
||||||
existingSvc.Labels = desiredSvc.Labels
|
|
||||||
equal, err := ServiceEqual(desiredSvc, existingSvc)
|
|
||||||
if err != nil {
|
|
||||||
return err
|
|
||||||
}
|
|
||||||
if !equal {
|
|
||||||
// record desiredSvc Spec in annotations in favor of future equality checks
|
|
||||||
b, err := json.Marshal(desiredSvc.Spec)
|
|
||||||
if err != nil {
|
|
||||||
return err
|
|
||||||
}
|
|
||||||
existingSvc.Annotations[LastAppliedConfigAnnotation] = string(b)
|
|
||||||
clusterIp := existingSvc.Spec.ClusterIP
|
|
||||||
ports := existingSvc.Spec.Ports
|
|
||||||
serviceType := existingSvc.Spec.Type
|
|
||||||
|
|
||||||
existingSvc.Spec = desiredSvc.Spec
|
|
||||||
existingSvc.Spec.ClusterIP = clusterIp
|
|
||||||
|
|
||||||
// If the existed service and the desired service is NodePort or LoadBalancerType, we should keep the nodePort unchanged.
|
|
||||||
if (serviceType == corev1.ServiceTypeNodePort || serviceType == corev1.ServiceTypeLoadBalancer) &&
|
|
||||||
(desiredSvc.Spec.Type == corev1.ServiceTypeNodePort || desiredSvc.Spec.Type == corev1.ServiceTypeLoadBalancer) {
|
|
||||||
for i, dport := range existingSvc.Spec.Ports {
|
|
||||||
for _, eport := range ports {
|
|
||||||
// Because the portName could be edited,
|
|
||||||
// we use Port number to link the desired Service Port and the existed Service Port in the nested loop
|
|
||||||
if dport.Port == eport.Port && dport.Protocol == eport.Protocol {
|
|
||||||
dport.NodePort = eport.NodePort
|
|
||||||
existingSvc.Spec.Ports[i] = dport
|
|
||||||
break
|
|
||||||
}
|
|
||||||
}
|
|
||||||
}
|
|
||||||
}
|
|
||||||
}
|
|
||||||
return nil
|
|
||||||
})
|
|
||||||
if err != nil {
|
|
||||||
return nil, err
|
|
||||||
}
|
|
||||||
return result.(*corev1.Service), nil
|
|
||||||
}
|
|
||||||
|
|
||||||
func (r *SeaweedReconciler) CreateOrUpdateIngress(ingress *extensionsv1beta1.Ingress) (*extensionsv1beta1.Ingress, error) {
|
|
||||||
result, err := r.CreateOrUpdate(ingress, func(existing, desired runtime.Object) error {
|
|
||||||
existingIngress := existing.(*extensionsv1beta1.Ingress)
|
|
||||||
desiredIngress := desired.(*extensionsv1beta1.Ingress)
|
|
||||||
|
|
||||||
if existingIngress.Annotations == nil {
|
|
||||||
existingIngress.Annotations = map[string]string{}
|
|
||||||
}
|
|
||||||
for k, v := range desiredIngress.Annotations {
|
|
||||||
existingIngress.Annotations[k] = v
|
|
||||||
}
|
|
||||||
existingIngress.Labels = desiredIngress.Labels
|
|
||||||
equal, err := IngressEqual(desiredIngress, existingIngress)
|
|
||||||
if err != nil {
|
|
||||||
return err
|
|
||||||
}
|
|
||||||
if !equal {
|
|
||||||
// record desiredIngress Spec in annotations in favor of future equality checks
|
|
||||||
b, err := json.Marshal(desiredIngress.Spec)
|
|
||||||
if err != nil {
|
|
||||||
return err
|
|
||||||
}
|
|
||||||
existingIngress.Annotations[LastAppliedConfigAnnotation] = string(b)
|
|
||||||
existingIngress.Spec = desiredIngress.Spec
|
|
||||||
}
|
|
||||||
return nil
|
|
||||||
})
|
|
||||||
if err != nil {
|
|
||||||
return nil, err
|
|
||||||
}
|
|
||||||
return result.(*extensionsv1beta1.Ingress), nil
|
|
||||||
}
|
|
||||||
|
|
||||||
func (r *SeaweedReconciler) CreateOrUpdateConfigMap(configMap *corev1.ConfigMap) (*corev1.ConfigMap, error) {
|
|
||||||
result, err := r.CreateOrUpdate(configMap, func(existing, desired runtime.Object) error {
|
|
||||||
existingConfigMap := existing.(*corev1.ConfigMap)
|
|
||||||
desiredConfigMap := desired.(*corev1.ConfigMap)
|
|
||||||
|
|
||||||
if existingConfigMap.Annotations == nil {
|
|
||||||
existingConfigMap.Annotations = map[string]string{}
|
|
||||||
}
|
|
||||||
for k, v := range desiredConfigMap.Annotations {
|
|
||||||
existingConfigMap.Annotations[k] = v
|
|
||||||
}
|
|
||||||
existingConfigMap.Labels = desiredConfigMap.Labels
|
|
||||||
existingConfigMap.Data = desiredConfigMap.Data
|
|
||||||
return nil
|
|
||||||
})
|
|
||||||
if err != nil {
|
|
||||||
return nil, err
|
|
||||||
}
|
|
||||||
return result.(*corev1.ConfigMap), nil
|
|
||||||
}
|
|
||||||
|
|
||||||
// EmptyClone create an clone of the resource with the same name and namespace (if namespace-scoped), with other fields unset
|
|
||||||
func EmptyClone(obj runtime.Object) (runtime.Object, error) {
|
|
||||||
meta, ok := obj.(metav1.Object)
|
|
||||||
if !ok {
|
|
||||||
return nil, fmt.Errorf("Obj %v is not a metav1.Object, cannot call EmptyClone", obj)
|
|
||||||
}
|
|
||||||
gvk, err := InferObjectKind(obj)
|
|
||||||
if err != nil {
|
|
||||||
return nil, err
|
|
||||||
}
|
|
||||||
inst, err := scheme.Scheme.New(gvk)
|
|
||||||
if err != nil {
|
|
||||||
return nil, err
|
|
||||||
}
|
|
||||||
instMeta, ok := inst.(metav1.Object)
|
|
||||||
if !ok {
|
|
||||||
return nil, fmt.Errorf("New instatnce %v created from scheme is not a metav1.Object, EmptyClone failed", inst)
|
|
||||||
}
|
|
||||||
instMeta.SetName(meta.GetName())
|
|
||||||
instMeta.SetNamespace(meta.GetNamespace())
|
|
||||||
return inst, nil
|
|
||||||
}
|
|
||||||
|
|
||||||
// InferObjectKind infers the object kind
|
|
||||||
func InferObjectKind(obj runtime.Object) (schema.GroupVersionKind, error) {
|
|
||||||
gvks, _, err := scheme.Scheme.ObjectKinds(obj)
|
|
||||||
if err != nil {
|
|
||||||
return schema.GroupVersionKind{}, err
|
|
||||||
}
|
|
||||||
if len(gvks) != 1 {
|
|
||||||
return schema.GroupVersionKind{}, fmt.Errorf("Object %v has ambigious GVK", obj)
|
|
||||||
}
|
|
||||||
return gvks[0], nil
|
|
||||||
}
|
|
||||||
|
|
||||||
// GetDeploymentLastAppliedPodTemplate set last applied pod template from Deployment's annotation
|
|
||||||
func GetDeploymentLastAppliedPodTemplate(dep *appsv1.Deployment) (*corev1.PodSpec, error) {
|
|
||||||
applied, ok := dep.Annotations[LastAppliedPodTemplate]
|
|
||||||
if !ok {
|
|
||||||
return nil, fmt.Errorf("deployment:[%s/%s] not found spec's apply config", dep.GetNamespace(), dep.GetName())
|
|
||||||
}
|
|
||||||
podSpec := &corev1.PodSpec{}
|
|
||||||
err := json.Unmarshal([]byte(applied), podSpec)
|
|
||||||
if err != nil {
|
|
||||||
return nil, err
|
|
||||||
}
|
|
||||||
return podSpec, nil
|
|
||||||
}
|
|
||||||
|
|
||||||
// DeploymentPodSpecChanged checks whether the new deployment differs with the old one's last-applied-config
|
|
||||||
func DeploymentPodSpecChanged(newDep *appsv1.Deployment, oldDep *appsv1.Deployment) bool {
|
|
||||||
lastAppliedPodTemplate, err := GetDeploymentLastAppliedPodTemplate(oldDep)
|
|
||||||
if err != nil {
|
|
||||||
klog.Warningf("error get last-applied-config of deployment %s/%s: %v", oldDep.Namespace, oldDep.Name, err)
|
|
||||||
return true
|
|
||||||
}
|
|
||||||
return !apiequality.Semantic.DeepEqual(newDep.Spec.Template.Spec, lastAppliedPodTemplate)
|
|
||||||
}
|
|
||||||
|
|
||||||
// ServiceEqual compares the new Service's spec with old Service's last applied config
|
|
||||||
func ServiceEqual(newSvc, oldSvc *corev1.Service) (bool, error) {
|
|
||||||
oldSpec := corev1.ServiceSpec{}
|
|
||||||
if lastAppliedConfig, ok := oldSvc.Annotations[LastAppliedConfigAnnotation]; ok {
|
|
||||||
err := json.Unmarshal([]byte(lastAppliedConfig), &oldSpec)
|
|
||||||
if err != nil {
|
|
||||||
klog.Errorf("unmarshal ServiceSpec: [%s/%s]'s applied config failed,error: %v", oldSvc.GetNamespace(), oldSvc.GetName(), err)
|
|
||||||
return false, err
|
|
||||||
}
|
|
||||||
return apiequality.Semantic.DeepEqual(oldSpec, newSvc.Spec), nil
|
|
||||||
}
|
|
||||||
return false, nil
|
|
||||||
}
|
|
||||||
|
|
||||||
func IngressEqual(newIngress, oldIngres *extensionsv1beta1.Ingress) (bool, error) {
|
|
||||||
oldIngressSpec := extensionsv1beta1.IngressSpec{}
|
|
||||||
if lastAppliedConfig, ok := oldIngres.Annotations[LastAppliedConfigAnnotation]; ok {
|
|
||||||
err := json.Unmarshal([]byte(lastAppliedConfig), &oldIngressSpec)
|
|
||||||
if err != nil {
|
|
||||||
klog.Errorf("unmarshal IngressSpec: [%s/%s]'s applied config failed,error: %v", oldIngres.GetNamespace(), oldIngres.GetName(), err)
|
|
||||||
return false, err
|
|
||||||
}
|
|
||||||
return apiequality.Semantic.DeepEqual(oldIngressSpec, newIngress.Spec), nil
|
|
||||||
}
|
|
||||||
return false, nil
|
|
||||||
}
|
|
|
@ -1,102 +0,0 @@
|
||||||
package controllers
|
|
||||||
|
|
||||||
import (
|
|
||||||
"context"
|
|
||||||
|
|
||||||
"k8s.io/apimachinery/pkg/runtime"
|
|
||||||
|
|
||||||
appsv1 "k8s.io/api/apps/v1"
|
|
||||||
ctrl "sigs.k8s.io/controller-runtime"
|
|
||||||
"sigs.k8s.io/controller-runtime/pkg/controller/controllerutil"
|
|
||||||
|
|
||||||
seaweedv1 "github.com/seaweedfs/seaweedfs-operator/api/v1"
|
|
||||||
label "github.com/seaweedfs/seaweedfs-operator/controllers/label"
|
|
||||||
)
|
|
||||||
|
|
||||||
func (r *SeaweedReconciler) ensureVolumeServers(seaweedCR *seaweedv1.Seaweed) (done bool, result ctrl.Result, err error) {
|
|
||||||
_ = context.Background()
|
|
||||||
_ = r.Log.WithValues("seaweed", seaweedCR.Name)
|
|
||||||
|
|
||||||
if done, result, err = r.ensureVolumeServerPeerService(seaweedCR); done {
|
|
||||||
return
|
|
||||||
}
|
|
||||||
|
|
||||||
if done, result, err = r.ensureVolumeServerServices(seaweedCR); done {
|
|
||||||
return
|
|
||||||
}
|
|
||||||
|
|
||||||
if done, result, err = r.ensureVolumeServerStatefulSet(seaweedCR); done {
|
|
||||||
return
|
|
||||||
}
|
|
||||||
|
|
||||||
return
|
|
||||||
}
|
|
||||||
|
|
||||||
func (r *SeaweedReconciler) ensureVolumeServerStatefulSet(seaweedCR *seaweedv1.Seaweed) (bool, ctrl.Result, error) {
|
|
||||||
log := r.Log.WithValues("sw-volume-statefulset", seaweedCR.Name)
|
|
||||||
|
|
||||||
volumeServerStatefulSet := r.createVolumeServerStatefulSet(seaweedCR)
|
|
||||||
if err := controllerutil.SetControllerReference(seaweedCR, volumeServerStatefulSet, r.Scheme); err != nil {
|
|
||||||
return ReconcileResult(err)
|
|
||||||
}
|
|
||||||
_, err := r.CreateOrUpdate(volumeServerStatefulSet, func(existing, desired runtime.Object) error {
|
|
||||||
existingStatefulSet := existing.(*appsv1.StatefulSet)
|
|
||||||
desiredStatefulSet := desired.(*appsv1.StatefulSet)
|
|
||||||
|
|
||||||
existingStatefulSet.Spec.Replicas = desiredStatefulSet.Spec.Replicas
|
|
||||||
existingStatefulSet.Spec.Template.Spec = desiredStatefulSet.Spec.Template.Spec
|
|
||||||
return nil
|
|
||||||
})
|
|
||||||
|
|
||||||
log.Info("ensure volume stateful set " + volumeServerStatefulSet.Name)
|
|
||||||
return ReconcileResult(err)
|
|
||||||
}
|
|
||||||
|
|
||||||
func (r *SeaweedReconciler) ensureVolumeServerPeerService(seaweedCR *seaweedv1.Seaweed) (bool, ctrl.Result, error) {
|
|
||||||
|
|
||||||
log := r.Log.WithValues("sw-volume-peer-service", seaweedCR.Name)
|
|
||||||
|
|
||||||
volumeServerPeerService := r.createVolumeServerPeerService(seaweedCR)
|
|
||||||
if err := controllerutil.SetControllerReference(seaweedCR, volumeServerPeerService, r.Scheme); err != nil {
|
|
||||||
return ReconcileResult(err)
|
|
||||||
}
|
|
||||||
_, err := r.CreateOrUpdateService(volumeServerPeerService)
|
|
||||||
|
|
||||||
log.Info("ensure volume peer service " + volumeServerPeerService.Name)
|
|
||||||
return ReconcileResult(err)
|
|
||||||
}
|
|
||||||
|
|
||||||
func (r *SeaweedReconciler) ensureVolumeServerServices(seaweedCR *seaweedv1.Seaweed) (bool, ctrl.Result, error) {
|
|
||||||
|
|
||||||
for i := 0; i < int(seaweedCR.Spec.Volume.Replicas); i++ {
|
|
||||||
done, result, err := r.ensureVolumeServerService(seaweedCR, i)
|
|
||||||
if done {
|
|
||||||
return done, result, err
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
return ReconcileResult(nil)
|
|
||||||
}
|
|
||||||
|
|
||||||
func (r *SeaweedReconciler) ensureVolumeServerService(seaweedCR *seaweedv1.Seaweed, i int) (bool, ctrl.Result, error) {
|
|
||||||
|
|
||||||
log := r.Log.WithValues("sw-volume-service", seaweedCR.Name, "index", i)
|
|
||||||
|
|
||||||
volumeServerService := r.createVolumeServerService(seaweedCR, i)
|
|
||||||
if err := controllerutil.SetControllerReference(seaweedCR, volumeServerService, r.Scheme); err != nil {
|
|
||||||
return ReconcileResult(err)
|
|
||||||
}
|
|
||||||
_, err := r.CreateOrUpdateService(volumeServerService)
|
|
||||||
|
|
||||||
log.Info("ensure volume service "+volumeServerService.Name, "index", i)
|
|
||||||
return ReconcileResult(err)
|
|
||||||
}
|
|
||||||
|
|
||||||
func labelsForVolumeServer(name string) map[string]string {
|
|
||||||
return map[string]string{
|
|
||||||
label.ManagedByLabelKey: "seaweedfs-operator",
|
|
||||||
label.NameLabelKey: "seaweedfs",
|
|
||||||
label.ComponentLabelKey: "volume",
|
|
||||||
label.InstanceLabelKey: name,
|
|
||||||
}
|
|
||||||
}
|
|
|
@ -1,100 +0,0 @@
|
||||||
package controllers
|
|
||||||
|
|
||||||
import (
|
|
||||||
"fmt"
|
|
||||||
|
|
||||||
"github.com/seaweedfs/seaweedfs-operator/controllers/label"
|
|
||||||
corev1 "k8s.io/api/core/v1"
|
|
||||||
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
|
|
||||||
"k8s.io/apimachinery/pkg/util/intstr"
|
|
||||||
|
|
||||||
seaweedv1 "github.com/seaweedfs/seaweedfs-operator/api/v1"
|
|
||||||
)
|
|
||||||
|
|
||||||
func (r *SeaweedReconciler) createVolumeServerPeerService(m *seaweedv1.Seaweed) *corev1.Service {
|
|
||||||
labels := labelsForVolumeServer(m.Name)
|
|
||||||
|
|
||||||
dep := &corev1.Service{
|
|
||||||
ObjectMeta: metav1.ObjectMeta{
|
|
||||||
Name: m.Name + "-volume-peer",
|
|
||||||
Namespace: m.Namespace,
|
|
||||||
Labels: labels,
|
|
||||||
Annotations: map[string]string{
|
|
||||||
"service.alpha.kubernetes.io/tolerate-unready-endpoints": "true",
|
|
||||||
},
|
|
||||||
},
|
|
||||||
Spec: corev1.ServiceSpec{
|
|
||||||
ClusterIP: "None",
|
|
||||||
PublishNotReadyAddresses: true,
|
|
||||||
Ports: []corev1.ServicePort{
|
|
||||||
{
|
|
||||||
Name: "volume-http",
|
|
||||||
Protocol: corev1.Protocol("TCP"),
|
|
||||||
Port: seaweedv1.VolumeHTTPPort,
|
|
||||||
TargetPort: intstr.FromInt(seaweedv1.VolumeHTTPPort),
|
|
||||||
},
|
|
||||||
{
|
|
||||||
Name: "volume-grpc",
|
|
||||||
Protocol: corev1.Protocol("TCP"),
|
|
||||||
Port: seaweedv1.VolumeGRPCPort,
|
|
||||||
TargetPort: intstr.FromInt(seaweedv1.VolumeGRPCPort),
|
|
||||||
},
|
|
||||||
},
|
|
||||||
Selector: labels,
|
|
||||||
},
|
|
||||||
}
|
|
||||||
return dep
|
|
||||||
}
|
|
||||||
func (r *SeaweedReconciler) createVolumeServerService(m *seaweedv1.Seaweed, i int) *corev1.Service {
|
|
||||||
labels := labelsForVolumeServer(m.Name)
|
|
||||||
serviceName := fmt.Sprintf("%s-volume-%d", m.Name, i)
|
|
||||||
labels[label.PodName] = serviceName
|
|
||||||
|
|
||||||
dep := &corev1.Service{
|
|
||||||
ObjectMeta: metav1.ObjectMeta{
|
|
||||||
Name: serviceName,
|
|
||||||
Namespace: m.Namespace,
|
|
||||||
Labels: labels,
|
|
||||||
Annotations: map[string]string{
|
|
||||||
"service.alpha.kubernetes.io/tolerate-unready-endpoints": "true",
|
|
||||||
},
|
|
||||||
},
|
|
||||||
Spec: corev1.ServiceSpec{
|
|
||||||
PublishNotReadyAddresses: true,
|
|
||||||
Ports: []corev1.ServicePort{
|
|
||||||
{
|
|
||||||
Name: "volume-http",
|
|
||||||
Protocol: corev1.Protocol("TCP"),
|
|
||||||
Port: seaweedv1.VolumeHTTPPort,
|
|
||||||
TargetPort: intstr.FromInt(seaweedv1.VolumeHTTPPort),
|
|
||||||
},
|
|
||||||
{
|
|
||||||
Name: "volume-grpc",
|
|
||||||
Protocol: corev1.Protocol("TCP"),
|
|
||||||
Port: seaweedv1.VolumeGRPCPort,
|
|
||||||
TargetPort: intstr.FromInt(seaweedv1.VolumeGRPCPort),
|
|
||||||
},
|
|
||||||
},
|
|
||||||
Selector: labels,
|
|
||||||
},
|
|
||||||
}
|
|
||||||
|
|
||||||
if m.Spec.Volume.Service != nil {
|
|
||||||
svcSpec := m.Spec.Volume.Service
|
|
||||||
dep.Annotations = copyAnnotations(svcSpec.Annotations)
|
|
||||||
|
|
||||||
if svcSpec.Type != "" {
|
|
||||||
dep.Spec.Type = svcSpec.Type
|
|
||||||
}
|
|
||||||
|
|
||||||
if svcSpec.ClusterIP != nil {
|
|
||||||
dep.Spec.ClusterIP = *svcSpec.ClusterIP
|
|
||||||
}
|
|
||||||
|
|
||||||
if svcSpec.LoadBalancerIP != nil {
|
|
||||||
dep.Spec.LoadBalancerIP = *svcSpec.LoadBalancerIP
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
return dep
|
|
||||||
}
|
|
|
@ -1,159 +0,0 @@
|
||||||
package controllers
|
|
||||||
|
|
||||||
import (
|
|
||||||
"fmt"
|
|
||||||
"strings"
|
|
||||||
|
|
||||||
appsv1 "k8s.io/api/apps/v1"
|
|
||||||
corev1 "k8s.io/api/core/v1"
|
|
||||||
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
|
|
||||||
"k8s.io/apimachinery/pkg/util/intstr"
|
|
||||||
|
|
||||||
seaweedv1 "github.com/seaweedfs/seaweedfs-operator/api/v1"
|
|
||||||
)
|
|
||||||
|
|
||||||
func buildVolumeServerStartupScript(m *seaweedv1.Seaweed, dirs []string) string {
|
|
||||||
commands := []string{"weed", "-logtostderr=true", "volume"}
|
|
||||||
commands = append(commands, fmt.Sprintf("-port=%d", seaweedv1.VolumeHTTPPort))
|
|
||||||
commands = append(commands, "-max=0")
|
|
||||||
commands = append(commands, fmt.Sprintf("-ip=$(POD_NAME).%s-volume-peer.%s", m.Name, m.Namespace))
|
|
||||||
if m.Spec.HostSuffix != nil && *m.Spec.HostSuffix != "" {
|
|
||||||
commands = append(commands, fmt.Sprintf("-publicUrl=$(POD_NAME).%s", *m.Spec.HostSuffix))
|
|
||||||
}
|
|
||||||
commands = append(commands, fmt.Sprintf("-mserver=%s", getMasterPeersString(m)))
|
|
||||||
commands = append(commands, fmt.Sprintf("-dir=%s", strings.Join(dirs, ",")))
|
|
||||||
|
|
||||||
return strings.Join(commands, " ")
|
|
||||||
}
|
|
||||||
|
|
||||||
func (r *SeaweedReconciler) createVolumeServerStatefulSet(m *seaweedv1.Seaweed) *appsv1.StatefulSet {
|
|
||||||
labels := labelsForVolumeServer(m.Name)
|
|
||||||
replicas := int32(m.Spec.Volume.Replicas)
|
|
||||||
rollingUpdatePartition := int32(0)
|
|
||||||
enableServiceLinks := false
|
|
||||||
|
|
||||||
volumeCount := int(m.Spec.VolumeServerDiskCount)
|
|
||||||
volumeRequests := corev1.ResourceList{
|
|
||||||
corev1.ResourceStorage: m.Spec.Volume.Requests[corev1.ResourceStorage],
|
|
||||||
}
|
|
||||||
|
|
||||||
// connect all the disks
|
|
||||||
var volumeMounts []corev1.VolumeMount
|
|
||||||
var volumes []corev1.Volume
|
|
||||||
var persistentVolumeClaims []corev1.PersistentVolumeClaim
|
|
||||||
var dirs []string
|
|
||||||
for i := 0; i < volumeCount; i++ {
|
|
||||||
volumeMounts = append(volumeMounts, corev1.VolumeMount{
|
|
||||||
Name: fmt.Sprintf("mount%d", i),
|
|
||||||
ReadOnly: false,
|
|
||||||
MountPath: fmt.Sprintf("/data%d/", i),
|
|
||||||
})
|
|
||||||
volumes = append(volumes, corev1.Volume{
|
|
||||||
Name: fmt.Sprintf("mount%d", i),
|
|
||||||
VolumeSource: corev1.VolumeSource{
|
|
||||||
PersistentVolumeClaim: &corev1.PersistentVolumeClaimVolumeSource{
|
|
||||||
ClaimName: fmt.Sprintf("mount%d", i),
|
|
||||||
ReadOnly: false,
|
|
||||||
},
|
|
||||||
},
|
|
||||||
})
|
|
||||||
persistentVolumeClaims = append(persistentVolumeClaims, corev1.PersistentVolumeClaim{
|
|
||||||
ObjectMeta: metav1.ObjectMeta{
|
|
||||||
Name: fmt.Sprintf("mount%d", i),
|
|
||||||
},
|
|
||||||
Spec: corev1.PersistentVolumeClaimSpec{
|
|
||||||
StorageClassName: m.Spec.Volume.StorageClassName,
|
|
||||||
AccessModes: []corev1.PersistentVolumeAccessMode{
|
|
||||||
corev1.ReadWriteOnce,
|
|
||||||
},
|
|
||||||
Resources: corev1.ResourceRequirements{
|
|
||||||
Requests: volumeRequests,
|
|
||||||
},
|
|
||||||
},
|
|
||||||
})
|
|
||||||
dirs = append(dirs, fmt.Sprintf("/data%d", i))
|
|
||||||
}
|
|
||||||
|
|
||||||
volumePodSpec := m.BaseVolumeSpec().BuildPodSpec()
|
|
||||||
volumePodSpec.EnableServiceLinks = &enableServiceLinks
|
|
||||||
volumePodSpec.Containers = []corev1.Container{{
|
|
||||||
Name: "volume",
|
|
||||||
Image: m.Spec.Image,
|
|
||||||
ImagePullPolicy: m.BaseVolumeSpec().ImagePullPolicy(),
|
|
||||||
Env: append(m.BaseVolumeSpec().Env(), kubernetesEnvVars...),
|
|
||||||
Command: []string{
|
|
||||||
"/bin/sh",
|
|
||||||
"-ec",
|
|
||||||
buildVolumeServerStartupScript(m, dirs),
|
|
||||||
},
|
|
||||||
Ports: []corev1.ContainerPort{
|
|
||||||
{
|
|
||||||
ContainerPort: seaweedv1.VolumeHTTPPort,
|
|
||||||
Name: "volume-http",
|
|
||||||
},
|
|
||||||
{
|
|
||||||
ContainerPort: seaweedv1.VolumeGRPCPort,
|
|
||||||
Name: "volume-grpc",
|
|
||||||
},
|
|
||||||
},
|
|
||||||
ReadinessProbe: &corev1.Probe{
|
|
||||||
Handler: corev1.Handler{
|
|
||||||
HTTPGet: &corev1.HTTPGetAction{
|
|
||||||
Path: "/status",
|
|
||||||
Port: intstr.FromInt(seaweedv1.VolumeHTTPPort),
|
|
||||||
Scheme: corev1.URISchemeHTTP,
|
|
||||||
},
|
|
||||||
},
|
|
||||||
InitialDelaySeconds: 15,
|
|
||||||
TimeoutSeconds: 5,
|
|
||||||
PeriodSeconds: 90,
|
|
||||||
SuccessThreshold: 1,
|
|
||||||
FailureThreshold: 100,
|
|
||||||
},
|
|
||||||
LivenessProbe: &corev1.Probe{
|
|
||||||
Handler: corev1.Handler{
|
|
||||||
HTTPGet: &corev1.HTTPGetAction{
|
|
||||||
Path: "/status",
|
|
||||||
Port: intstr.FromInt(seaweedv1.VolumeHTTPPort),
|
|
||||||
Scheme: corev1.URISchemeHTTP,
|
|
||||||
},
|
|
||||||
},
|
|
||||||
InitialDelaySeconds: 20,
|
|
||||||
TimeoutSeconds: 5,
|
|
||||||
PeriodSeconds: 90,
|
|
||||||
SuccessThreshold: 1,
|
|
||||||
FailureThreshold: 6,
|
|
||||||
},
|
|
||||||
VolumeMounts: volumeMounts,
|
|
||||||
}}
|
|
||||||
volumePodSpec.Volumes = volumes
|
|
||||||
|
|
||||||
dep := &appsv1.StatefulSet{
|
|
||||||
ObjectMeta: metav1.ObjectMeta{
|
|
||||||
Name: m.Name + "-volume",
|
|
||||||
Namespace: m.Namespace,
|
|
||||||
},
|
|
||||||
Spec: appsv1.StatefulSetSpec{
|
|
||||||
ServiceName: m.Name + "-volume-peer",
|
|
||||||
PodManagementPolicy: appsv1.ParallelPodManagement,
|
|
||||||
Replicas: &replicas,
|
|
||||||
UpdateStrategy: appsv1.StatefulSetUpdateStrategy{
|
|
||||||
Type: appsv1.RollingUpdateStatefulSetStrategyType,
|
|
||||||
RollingUpdate: &appsv1.RollingUpdateStatefulSetStrategy{
|
|
||||||
Partition: &rollingUpdatePartition,
|
|
||||||
},
|
|
||||||
},
|
|
||||||
Selector: &metav1.LabelSelector{
|
|
||||||
MatchLabels: labels,
|
|
||||||
},
|
|
||||||
Template: corev1.PodTemplateSpec{
|
|
||||||
ObjectMeta: metav1.ObjectMeta{
|
|
||||||
Labels: labels,
|
|
||||||
},
|
|
||||||
Spec: volumePodSpec,
|
|
||||||
},
|
|
||||||
VolumeClaimTemplates: persistentVolumeClaims,
|
|
||||||
},
|
|
||||||
}
|
|
||||||
return dep
|
|
||||||
}
|
|
|
@ -1,73 +0,0 @@
|
||||||
package controllers
|
|
||||||
|
|
||||||
import (
|
|
||||||
"fmt"
|
|
||||||
"strings"
|
|
||||||
|
|
||||||
seaweedv1 "github.com/seaweedfs/seaweedfs-operator/api/v1"
|
|
||||||
corev1 "k8s.io/api/core/v1"
|
|
||||||
ctrl "sigs.k8s.io/controller-runtime"
|
|
||||||
)
|
|
||||||
|
|
||||||
const (
|
|
||||||
masterPeerAddressPattern = "%s-master-%d.%s-master-peer.%s:9333"
|
|
||||||
)
|
|
||||||
|
|
||||||
var (
|
|
||||||
kubernetesEnvVars = []corev1.EnvVar{
|
|
||||||
{
|
|
||||||
Name: "POD_IP",
|
|
||||||
ValueFrom: &corev1.EnvVarSource{
|
|
||||||
FieldRef: &corev1.ObjectFieldSelector{
|
|
||||||
FieldPath: "status.podIP",
|
|
||||||
},
|
|
||||||
},
|
|
||||||
},
|
|
||||||
{
|
|
||||||
Name: "POD_NAME",
|
|
||||||
ValueFrom: &corev1.EnvVarSource{
|
|
||||||
FieldRef: &corev1.ObjectFieldSelector{
|
|
||||||
FieldPath: "metadata.name",
|
|
||||||
},
|
|
||||||
},
|
|
||||||
},
|
|
||||||
{
|
|
||||||
Name: "NAMESPACE",
|
|
||||||
ValueFrom: &corev1.EnvVarSource{
|
|
||||||
FieldRef: &corev1.ObjectFieldSelector{
|
|
||||||
FieldPath: "metadata.namespace",
|
|
||||||
},
|
|
||||||
},
|
|
||||||
},
|
|
||||||
}
|
|
||||||
)
|
|
||||||
|
|
||||||
func ReconcileResult(err error) (bool, ctrl.Result, error) {
|
|
||||||
if err != nil {
|
|
||||||
return true, ctrl.Result{}, err
|
|
||||||
}
|
|
||||||
return false, ctrl.Result{}, nil
|
|
||||||
}
|
|
||||||
|
|
||||||
func getMasterAddresses(namespace string, name string, replicas int32) []string {
|
|
||||||
peersAddresses := make([]string, 0, replicas)
|
|
||||||
for i := int32(0); i < replicas; i++ {
|
|
||||||
peersAddresses = append(peersAddresses, fmt.Sprintf(masterPeerAddressPattern, name, i, name, namespace))
|
|
||||||
}
|
|
||||||
return peersAddresses
|
|
||||||
}
|
|
||||||
|
|
||||||
func getMasterPeersString(m *seaweedv1.Seaweed) string {
|
|
||||||
return strings.Join(getMasterAddresses(m.Namespace, m.Name, m.Spec.Master.Replicas), ",")
|
|
||||||
}
|
|
||||||
|
|
||||||
func copyAnnotations(src map[string]string) map[string]string {
|
|
||||||
if src == nil {
|
|
||||||
return nil
|
|
||||||
}
|
|
||||||
dst := map[string]string{}
|
|
||||||
for k, v := range src {
|
|
||||||
dst[k] = v
|
|
||||||
}
|
|
||||||
return dst
|
|
||||||
}
|
|
|
@ -1,22 +0,0 @@
|
||||||
package label
|
|
||||||
|
|
||||||
const (
|
|
||||||
// The following labels are recommended by kubernetes https://kubernetes.io/docs/concepts/overview/working-with-objects/common-labels/
|
|
||||||
|
|
||||||
// ManagedByLabelKey is Kubernetes recommended label key, it represents the tool being used to manage the operation of an application
|
|
||||||
// For resources managed by SeaweedFS Operator, its value is always seaweedfs-operator
|
|
||||||
ManagedByLabelKey string = "app.kubernetes.io/managed-by"
|
|
||||||
// ComponentLabelKey is Kubernetes recommended label key, it represents the component within the architecture
|
|
||||||
ComponentLabelKey string = "app.kubernetes.io/component"
|
|
||||||
// NameLabelKey is Kubernetes recommended label key, it represents the name of the application
|
|
||||||
NameLabelKey string = "app.kubernetes.io/name"
|
|
||||||
// InstanceLabelKey is Kubernetes recommended label key, it represents a unique name identifying the instance of an application
|
|
||||||
// It's set by helm when installing a release
|
|
||||||
InstanceLabelKey string = "app.kubernetes.io/instance"
|
|
||||||
// VersionLabelKey is Kubernetes recommended label key, it represents the version of the app
|
|
||||||
VersionLabelKey string = "app.kubernetes.io/version"
|
|
||||||
|
|
||||||
// PodName is to select pod by name
|
|
||||||
// https://kubernetes.io/docs/concepts/workloads/controllers/statefulset/#pod-selector
|
|
||||||
PodName string = "statefulset.kubernetes.io/pod-name"
|
|
||||||
)
|
|
|
@ -0,0 +1,181 @@
|
||||||
|
/*
|
||||||
|
|
||||||
|
|
||||||
|
Licensed under the Apache License, Version 2.0 (the "License");
|
||||||
|
you may not use this file except in compliance with the License.
|
||||||
|
You may obtain a copy of the License at
|
||||||
|
|
||||||
|
http://www.apache.org/licenses/LICENSE-2.0
|
||||||
|
|
||||||
|
Unless required by applicable law or agreed to in writing, software
|
||||||
|
distributed under the License is distributed on an "AS IS" BASIS,
|
||||||
|
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||||
|
See the License for the specific language governing permissions and
|
||||||
|
limitations under the License.
|
||||||
|
*/
|
||||||
|
|
||||||
|
package controllers
|
||||||
|
|
||||||
|
import (
|
||||||
|
appsv1 "k8s.io/api/apps/v1"
|
||||||
|
corev1 "k8s.io/api/core/v1"
|
||||||
|
"context"
|
||||||
|
"k8s.io/apimachinery/pkg/api/errors"
|
||||||
|
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
|
||||||
|
"k8s.io/apimachinery/pkg/types"
|
||||||
|
"reflect"
|
||||||
|
|
||||||
|
"github.com/go-logr/logr"
|
||||||
|
"k8s.io/apimachinery/pkg/runtime"
|
||||||
|
ctrl "sigs.k8s.io/controller-runtime"
|
||||||
|
"sigs.k8s.io/controller-runtime/pkg/client"
|
||||||
|
|
||||||
|
seaweedv1 "github.com/seaweedfs/seaweedfs-operator/api/v1"
|
||||||
|
)
|
||||||
|
|
||||||
|
// MasterReconciler reconciles a Master object
|
||||||
|
type MasterReconciler struct {
|
||||||
|
client.Client
|
||||||
|
Log logr.Logger
|
||||||
|
Scheme *runtime.Scheme
|
||||||
|
}
|
||||||
|
|
||||||
|
// +kubebuilder:rbac:groups=seaweed.seaweedfs.com,resources=masters,verbs=get;list;watch;create;update;patch;delete
|
||||||
|
// +kubebuilder:rbac:groups=seaweed.seaweedfs.com,resources=masters/status,verbs=get;update;patch
|
||||||
|
|
||||||
|
func (r *MasterReconciler) Reconcile(req ctrl.Request) (ctrl.Result, error) {
|
||||||
|
ctx := context.Background()
|
||||||
|
log := r.Log.WithValues("master", req.NamespacedName)
|
||||||
|
|
||||||
|
// Fetch the Master instance
|
||||||
|
master := &seaweedv1.Master{}
|
||||||
|
err := r.Get(ctx, req.NamespacedName, master)
|
||||||
|
if err != nil {
|
||||||
|
if errors.IsNotFound(err) {
|
||||||
|
// Request object not found, could have been deleted after reconcile request.
|
||||||
|
// Owned objects are automatically garbage collected. For additional cleanup logic use finalizers.
|
||||||
|
// Return and don't requeue
|
||||||
|
log.Info("Master resource not found. Ignoring since object must be deleted")
|
||||||
|
return ctrl.Result{}, nil
|
||||||
|
}
|
||||||
|
// Error reading the object - requeue the request.
|
||||||
|
log.Error(err, "Failed to get Master")
|
||||||
|
return ctrl.Result{}, err
|
||||||
|
}
|
||||||
|
|
||||||
|
// Check if the deployment already exists, if not create a new one
|
||||||
|
found := &appsv1.Deployment{}
|
||||||
|
err = r.Get(ctx, types.NamespacedName{Name: master.Name, Namespace: master.Namespace}, found)
|
||||||
|
if err != nil && errors.IsNotFound(err) {
|
||||||
|
// Define a new deployment
|
||||||
|
dep := r.deploymentForMaster(master)
|
||||||
|
log.Info("Creating a new Deployment", "Deployment.Namespace", dep.Namespace, "Deployment.Name", dep.Name)
|
||||||
|
err = r.Create(ctx, dep)
|
||||||
|
if err != nil {
|
||||||
|
log.Error(err, "Failed to create new Deployment", "Deployment.Namespace", dep.Namespace, "Deployment.Name", dep.Name)
|
||||||
|
return ctrl.Result{}, err
|
||||||
|
}
|
||||||
|
// Deployment created successfully - return and requeue
|
||||||
|
return ctrl.Result{Requeue: true}, nil
|
||||||
|
} else if err != nil {
|
||||||
|
log.Error(err, "Failed to get Deployment")
|
||||||
|
return ctrl.Result{}, err
|
||||||
|
}
|
||||||
|
|
||||||
|
// Ensure the deployment size is the same as the spec
|
||||||
|
size := master.Spec.Size
|
||||||
|
if *found.Spec.Replicas != size {
|
||||||
|
found.Spec.Replicas = &size
|
||||||
|
err = r.Update(ctx, found)
|
||||||
|
if err != nil {
|
||||||
|
log.Error(err, "Failed to update Deployment", "Deployment.Namespace", found.Namespace, "Deployment.Name", found.Name)
|
||||||
|
return ctrl.Result{}, err
|
||||||
|
}
|
||||||
|
// Spec updated - return and requeue
|
||||||
|
return ctrl.Result{Requeue: true}, nil
|
||||||
|
}
|
||||||
|
|
||||||
|
// Update the Master status with the pod names
|
||||||
|
// List the pods for this master's deployment
|
||||||
|
podList := &corev1.PodList{}
|
||||||
|
listOpts := []client.ListOption{
|
||||||
|
client.InNamespace(master.Namespace),
|
||||||
|
client.MatchingLabels(labelsForMaster(master.Name)),
|
||||||
|
}
|
||||||
|
if err = r.List(ctx, podList, listOpts...); err != nil {
|
||||||
|
log.Error(err, "Failed to list pods", "Master.Namespace", master.Namespace, "Master.Name", master.Name)
|
||||||
|
return ctrl.Result{}, err
|
||||||
|
}
|
||||||
|
podNames := getPodNames(podList.Items)
|
||||||
|
|
||||||
|
// Update status.Nodes if needed
|
||||||
|
if !reflect.DeepEqual(podNames, master.Status.Nodes) {
|
||||||
|
master.Status.Nodes = podNames
|
||||||
|
err := r.Status().Update(ctx, master)
|
||||||
|
if err != nil {
|
||||||
|
log.Error(err, "Failed to update Master status")
|
||||||
|
return ctrl.Result{}, err
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
return ctrl.Result{}, nil
|
||||||
|
}
|
||||||
|
|
||||||
|
// deploymentForMaster returns a master Deployment object
|
||||||
|
func (r *MasterReconciler) deploymentForMaster(m *seaweedv1.Master) *appsv1.Deployment {
|
||||||
|
ls := labelsForMaster(m.Name)
|
||||||
|
replicas := m.Spec.Size
|
||||||
|
|
||||||
|
dep := &appsv1.Deployment{
|
||||||
|
ObjectMeta: metav1.ObjectMeta{
|
||||||
|
Name: m.Name,
|
||||||
|
Namespace: m.Namespace,
|
||||||
|
},
|
||||||
|
Spec: appsv1.DeploymentSpec{
|
||||||
|
Replicas: &replicas,
|
||||||
|
Selector: &metav1.LabelSelector{
|
||||||
|
MatchLabels: ls,
|
||||||
|
},
|
||||||
|
Template: corev1.PodTemplateSpec{
|
||||||
|
ObjectMeta: metav1.ObjectMeta{
|
||||||
|
Labels: ls,
|
||||||
|
},
|
||||||
|
Spec: corev1.PodSpec{
|
||||||
|
Containers: []corev1.Container{{
|
||||||
|
Image: "Master:1.4.36-alpine",
|
||||||
|
Name: "Master",
|
||||||
|
Command: []string{"Master", "-m=64", "-o", "modern", "-v"},
|
||||||
|
Ports: []corev1.ContainerPort{{
|
||||||
|
ContainerPort: 11211,
|
||||||
|
Name: "Master",
|
||||||
|
}},
|
||||||
|
}},
|
||||||
|
},
|
||||||
|
},
|
||||||
|
},
|
||||||
|
}
|
||||||
|
// Set Master instance as the owner and controller
|
||||||
|
ctrl.SetControllerReference(m, dep, r.Scheme)
|
||||||
|
return dep
|
||||||
|
}
|
||||||
|
|
||||||
|
// labelsForMaster returns the labels for selecting the resources
|
||||||
|
// belonging to the given Master CR name.
|
||||||
|
func labelsForMaster(name string) map[string]string {
|
||||||
|
return map[string]string{"app": "Master", "Master_cr": name}
|
||||||
|
}
|
||||||
|
|
||||||
|
// getPodNames returns the pod names of the array of pods passed in
|
||||||
|
func getPodNames(pods []corev1.Pod) []string {
|
||||||
|
var podNames []string
|
||||||
|
for _, pod := range pods {
|
||||||
|
podNames = append(podNames, pod.Name)
|
||||||
|
}
|
||||||
|
return podNames
|
||||||
|
}
|
||||||
|
|
||||||
|
func (r *MasterReconciler) SetupWithManager(mgr ctrl.Manager) error {
|
||||||
|
return ctrl.NewControllerManagedBy(mgr).
|
||||||
|
For(&seaweedv1.Master{}).
|
||||||
|
Complete(r)
|
||||||
|
}
|
|
@ -1,109 +0,0 @@
|
||||||
/*
|
|
||||||
|
|
||||||
|
|
||||||
Licensed under the Apache License, Version 2.0 (the "License");
|
|
||||||
you may not use this file except in compliance with the License.
|
|
||||||
You may obtain a copy of the License at
|
|
||||||
|
|
||||||
http://www.apache.org/licenses/LICENSE-2.0
|
|
||||||
|
|
||||||
Unless required by applicable law or agreed to in writing, software
|
|
||||||
distributed under the License is distributed on an "AS IS" BASIS,
|
|
||||||
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
|
||||||
See the License for the specific language governing permissions and
|
|
||||||
limitations under the License.
|
|
||||||
*/
|
|
||||||
|
|
||||||
package controllers
|
|
||||||
|
|
||||||
import (
|
|
||||||
"context"
|
|
||||||
"time"
|
|
||||||
|
|
||||||
"github.com/go-logr/logr"
|
|
||||||
"k8s.io/apimachinery/pkg/api/errors"
|
|
||||||
"k8s.io/apimachinery/pkg/runtime"
|
|
||||||
ctrl "sigs.k8s.io/controller-runtime"
|
|
||||||
"sigs.k8s.io/controller-runtime/pkg/client"
|
|
||||||
|
|
||||||
seaweedv1 "github.com/seaweedfs/seaweedfs-operator/api/v1"
|
|
||||||
)
|
|
||||||
|
|
||||||
// SeaweedReconciler reconciles a Seaweed object
|
|
||||||
type SeaweedReconciler struct {
|
|
||||||
client.Client
|
|
||||||
Log logr.Logger
|
|
||||||
Scheme *runtime.Scheme
|
|
||||||
}
|
|
||||||
|
|
||||||
// +kubebuilder:rbac:groups=seaweed.seaweedfs.com,resources=seaweeds,verbs=get;list;watch;create;update;patch;delete
|
|
||||||
// +kubebuilder:rbac:groups=seaweed.seaweedfs.com,resources=seaweeds/status,verbs=get;update;patch
|
|
||||||
// +kubebuilder:rbac:groups=apps,resources=statefulsets,verbs=get;list;watch;create;update;patch;delete
|
|
||||||
// +kubebuilder:rbac:groups=core,resources=services,verbs=get;list;watch;create;update;patch;delete
|
|
||||||
// +kubebuilder:rbac:groups=core,resources=configmaps,verbs=get;list;watch;create;update;patch;delete
|
|
||||||
// +kubebuilder:rbac:groups=extensions,resources=ingresses,verbs=get;list;watch;create;update;patch;delete
|
|
||||||
// +kubebuilder:rbac:groups=networking.k8s.io,resources=ingresses,verbs=get;list;watch;create;update;patch;delete
|
|
||||||
// +kubebuilder:rbac:groups=core,resources=pods,verbs=get;list;
|
|
||||||
|
|
||||||
// Reconcile implements the reconcilation logic
|
|
||||||
func (r *SeaweedReconciler) Reconcile(req ctrl.Request) (ctrl.Result, error) {
|
|
||||||
ctx := context.Background()
|
|
||||||
log := r.Log.WithValues("seaweed", req.NamespacedName)
|
|
||||||
|
|
||||||
log.Info("start Reconcile ...")
|
|
||||||
|
|
||||||
seaweedCR, done, result, err := r.findSeaweedCustomResourceInstance(ctx, log, req)
|
|
||||||
if done {
|
|
||||||
return result, err
|
|
||||||
}
|
|
||||||
|
|
||||||
if done, result, err = r.ensureMaster(seaweedCR); done {
|
|
||||||
return result, err
|
|
||||||
}
|
|
||||||
|
|
||||||
if done, result, err = r.ensureVolumeServers(seaweedCR); done {
|
|
||||||
return result, err
|
|
||||||
}
|
|
||||||
|
|
||||||
if done, result, err = r.ensureFilerServers(seaweedCR); done {
|
|
||||||
return result, err
|
|
||||||
}
|
|
||||||
|
|
||||||
if done, result, err = r.ensureSeaweedIngress(seaweedCR); done {
|
|
||||||
return result, err
|
|
||||||
}
|
|
||||||
|
|
||||||
if false {
|
|
||||||
if done, result, err = r.maintenance(seaweedCR); done {
|
|
||||||
return result, err
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
return ctrl.Result{RequeueAfter: 5 * time.Second}, nil
|
|
||||||
}
|
|
||||||
|
|
||||||
func (r *SeaweedReconciler) findSeaweedCustomResourceInstance(ctx context.Context, log logr.Logger, req ctrl.Request) (*seaweedv1.Seaweed, bool, ctrl.Result, error) {
|
|
||||||
// fetch the master instance
|
|
||||||
seaweedCR := &seaweedv1.Seaweed{}
|
|
||||||
err := r.Get(ctx, req.NamespacedName, seaweedCR)
|
|
||||||
if err != nil {
|
|
||||||
if errors.IsNotFound(err) {
|
|
||||||
// Request object not found, could have been deleted after reconcile request.
|
|
||||||
// Owned objects are automatically garbage collected. For additional cleanup logic use finalizers.
|
|
||||||
// Return and don't requeue
|
|
||||||
log.Info("Seaweed CR not found. Ignoring since object must be deleted")
|
|
||||||
return nil, true, ctrl.Result{RequeueAfter: time.Second * 5}, nil
|
|
||||||
}
|
|
||||||
// Error reading the object - requeue the request.
|
|
||||||
log.Error(err, "Failed to get SeaweedCR")
|
|
||||||
return nil, true, ctrl.Result{}, err
|
|
||||||
}
|
|
||||||
log.Info("Get master " + seaweedCR.Name)
|
|
||||||
return seaweedCR, false, ctrl.Result{}, nil
|
|
||||||
}
|
|
||||||
|
|
||||||
func (r *SeaweedReconciler) SetupWithManager(mgr ctrl.Manager) error {
|
|
||||||
return ctrl.NewControllerManagedBy(mgr).
|
|
||||||
For(&seaweedv1.Seaweed{}).
|
|
||||||
Complete(r)
|
|
||||||
}
|
|
|
@ -1,94 +0,0 @@
|
||||||
package controllers
|
|
||||||
|
|
||||||
import (
|
|
||||||
"context"
|
|
||||||
"time"
|
|
||||||
|
|
||||||
. "github.com/onsi/ginkgo"
|
|
||||||
. "github.com/onsi/gomega"
|
|
||||||
appsv1 "k8s.io/api/apps/v1"
|
|
||||||
corev1 "k8s.io/api/core/v1"
|
|
||||||
"k8s.io/apimachinery/pkg/api/resource"
|
|
||||||
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
|
|
||||||
"k8s.io/apimachinery/pkg/types"
|
|
||||||
|
|
||||||
seaweedv1 "github.com/seaweedfs/seaweedfs-operator/api/v1"
|
|
||||||
)
|
|
||||||
|
|
||||||
var (
|
|
||||||
TrueValue = true
|
|
||||||
FalseVallue = false
|
|
||||||
)
|
|
||||||
|
|
||||||
var _ = Describe("Seaweed Controller", func() {
|
|
||||||
Context("Basic Functionality", func() {
|
|
||||||
It("Should create StatefulSets", func() {
|
|
||||||
By("By creating a new Seaweed", func() {
|
|
||||||
const (
|
|
||||||
namespace = "default"
|
|
||||||
name = "test-seaweed"
|
|
||||||
|
|
||||||
timeout = time.Second * 30
|
|
||||||
interval = time.Millisecond * 250
|
|
||||||
)
|
|
||||||
|
|
||||||
ctx := context.Background()
|
|
||||||
seaweed := &seaweedv1.Seaweed{
|
|
||||||
ObjectMeta: metav1.ObjectMeta{
|
|
||||||
Namespace: namespace,
|
|
||||||
Name: name,
|
|
||||||
},
|
|
||||||
Spec: seaweedv1.SeaweedSpec{
|
|
||||||
Image: "chrislusf/seaweedfs:2.96",
|
|
||||||
VolumeServerDiskCount: 1,
|
|
||||||
Master: &seaweedv1.MasterSpec{
|
|
||||||
Replicas: 3,
|
|
||||||
ConcurrentStart: &TrueValue,
|
|
||||||
},
|
|
||||||
Volume: &seaweedv1.VolumeSpec{
|
|
||||||
Replicas: 1,
|
|
||||||
ResourceRequirements: corev1.ResourceRequirements{
|
|
||||||
Requests: corev1.ResourceList{
|
|
||||||
corev1.ResourceStorage: resource.MustParse("1Gi"),
|
|
||||||
},
|
|
||||||
},
|
|
||||||
},
|
|
||||||
Filer: &seaweedv1.FilerSpec{
|
|
||||||
Replicas: 2,
|
|
||||||
},
|
|
||||||
},
|
|
||||||
}
|
|
||||||
Expect(k8sClient.Create(ctx, seaweed)).Should(Succeed())
|
|
||||||
|
|
||||||
masterKey := types.NamespacedName{Name: name + "-master", Namespace: namespace}
|
|
||||||
volumeKey := types.NamespacedName{Name: name + "-volume", Namespace: namespace}
|
|
||||||
filerKey := types.NamespacedName{Name: name + "-filer", Namespace: namespace}
|
|
||||||
|
|
||||||
masterSts := &appsv1.StatefulSet{}
|
|
||||||
volumeSts := &appsv1.StatefulSet{}
|
|
||||||
filerSts := &appsv1.StatefulSet{}
|
|
||||||
|
|
||||||
Eventually(func() bool {
|
|
||||||
err := k8sClient.Get(ctx, masterKey, masterSts)
|
|
||||||
return err == nil
|
|
||||||
}, timeout, interval).Should(BeTrue())
|
|
||||||
Expect(masterSts.Spec.Replicas).ShouldNot(BeNil())
|
|
||||||
Expect(*masterSts.Spec.Replicas).Should(Equal(seaweed.Spec.Master.Replicas))
|
|
||||||
|
|
||||||
Eventually(func() bool {
|
|
||||||
err := k8sClient.Get(ctx, volumeKey, volumeSts)
|
|
||||||
return err == nil
|
|
||||||
}, timeout, interval).Should(BeTrue())
|
|
||||||
Expect(volumeSts.Spec.Replicas).ShouldNot(BeNil())
|
|
||||||
Expect(*volumeSts.Spec.Replicas).Should(Equal(seaweed.Spec.Volume.Replicas))
|
|
||||||
|
|
||||||
Eventually(func() bool {
|
|
||||||
err := k8sClient.Get(ctx, filerKey, filerSts)
|
|
||||||
return err == nil
|
|
||||||
}, timeout, interval).Should(BeTrue())
|
|
||||||
Expect(filerSts.Spec.Replicas).ShouldNot(BeNil())
|
|
||||||
Expect(*filerSts.Spec.Replicas).Should(Equal(seaweed.Spec.Filer.Replicas))
|
|
||||||
})
|
|
||||||
})
|
|
||||||
})
|
|
||||||
})
|
|
|
@ -1,37 +0,0 @@
|
||||||
package controllers
|
|
||||||
|
|
||||||
import (
|
|
||||||
"io/ioutil"
|
|
||||||
"os"
|
|
||||||
|
|
||||||
seaweedv1 "github.com/seaweedfs/seaweedfs-operator/api/v1"
|
|
||||||
"github.com/seaweedfs/seaweedfs-operator/controllers/swadmin"
|
|
||||||
ctrl "sigs.k8s.io/controller-runtime"
|
|
||||||
)
|
|
||||||
|
|
||||||
func (r *SeaweedReconciler) maintenance(m *seaweedv1.Seaweed) (done bool, result ctrl.Result, err error) {
|
|
||||||
|
|
||||||
masters := getMasterPeersString(m)
|
|
||||||
|
|
||||||
r.Log.V(0).Info("wait to connect to masters", "masters", masters)
|
|
||||||
|
|
||||||
// this step blocks since the operator can not access the masters when running from outside of the k8s cluster
|
|
||||||
sa := swadmin.NewSeaweedAdmin(masters, ioutil.Discard)
|
|
||||||
|
|
||||||
// For now this is an example of the admin commands
|
|
||||||
// master by default has some maintenance commands already.
|
|
||||||
r.Log.V(0).Info("volume.list")
|
|
||||||
sa.Output = os.Stdout
|
|
||||||
if err := sa.ProcessCommand("volume.list"); err != nil {
|
|
||||||
r.Log.V(0).Info("volume.list", "error", err)
|
|
||||||
}
|
|
||||||
|
|
||||||
sa.ProcessCommand("lock")
|
|
||||||
if err := sa.ProcessCommand("volume.balance -force"); err != nil {
|
|
||||||
r.Log.V(0).Info("volume.balance", "error", err)
|
|
||||||
}
|
|
||||||
sa.ProcessCommand("unlock")
|
|
||||||
|
|
||||||
return ReconcileResult(nil)
|
|
||||||
|
|
||||||
}
|
|
|
@ -24,7 +24,6 @@ import (
|
||||||
. "github.com/onsi/gomega"
|
. "github.com/onsi/gomega"
|
||||||
"k8s.io/client-go/kubernetes/scheme"
|
"k8s.io/client-go/kubernetes/scheme"
|
||||||
"k8s.io/client-go/rest"
|
"k8s.io/client-go/rest"
|
||||||
ctrl "sigs.k8s.io/controller-runtime"
|
|
||||||
"sigs.k8s.io/controller-runtime/pkg/client"
|
"sigs.k8s.io/controller-runtime/pkg/client"
|
||||||
"sigs.k8s.io/controller-runtime/pkg/envtest"
|
"sigs.k8s.io/controller-runtime/pkg/envtest"
|
||||||
"sigs.k8s.io/controller-runtime/pkg/envtest/printer"
|
"sigs.k8s.io/controller-runtime/pkg/envtest/printer"
|
||||||
|
@ -69,23 +68,6 @@ var _ = BeforeSuite(func(done Done) {
|
||||||
// +kubebuilder:scaffold:scheme
|
// +kubebuilder:scaffold:scheme
|
||||||
|
|
||||||
k8sClient, err = client.New(cfg, client.Options{Scheme: scheme.Scheme})
|
k8sClient, err = client.New(cfg, client.Options{Scheme: scheme.Scheme})
|
||||||
k8sManager, err := ctrl.NewManager(cfg, ctrl.Options{
|
|
||||||
Scheme: scheme.Scheme,
|
|
||||||
})
|
|
||||||
Expect(err).ToNot(HaveOccurred())
|
|
||||||
|
|
||||||
err = (&SeaweedReconciler{
|
|
||||||
Client: k8sManager.GetClient(),
|
|
||||||
Log: ctrl.Log.WithName("controllers").WithName("Seaweed"),
|
|
||||||
Scheme: k8sManager.GetScheme(),
|
|
||||||
}).SetupWithManager(k8sManager)
|
|
||||||
Expect(err).ToNot(HaveOccurred())
|
|
||||||
|
|
||||||
go func() {
|
|
||||||
err = k8sManager.Start(ctrl.SetupSignalHandler())
|
|
||||||
Expect(err).ToNot(HaveOccurred())
|
|
||||||
}()
|
|
||||||
|
|
||||||
Expect(err).ToNot(HaveOccurred())
|
Expect(err).ToNot(HaveOccurred())
|
||||||
Expect(k8sClient).ToNot(BeNil())
|
Expect(k8sClient).ToNot(BeNil())
|
||||||
|
|
||||||
|
|
|
@ -1,67 +0,0 @@
|
||||||
package swadmin
|
|
||||||
|
|
||||||
import (
|
|
||||||
"fmt"
|
|
||||||
"io"
|
|
||||||
"regexp"
|
|
||||||
"strings"
|
|
||||||
|
|
||||||
"github.com/chrislusf/seaweedfs/weed/shell"
|
|
||||||
"google.golang.org/grpc"
|
|
||||||
)
|
|
||||||
|
|
||||||
type SeaweedAdmin struct {
|
|
||||||
commandReg *regexp.Regexp
|
|
||||||
commandEnv *shell.CommandEnv
|
|
||||||
Output io.Writer
|
|
||||||
}
|
|
||||||
|
|
||||||
func NewSeaweedAdmin(masters string, output io.Writer) *SeaweedAdmin {
|
|
||||||
var shellOptions shell.ShellOptions
|
|
||||||
shellOptions.GrpcDialOption = grpc.WithInsecure()
|
|
||||||
shellOptions.Masters = &masters
|
|
||||||
|
|
||||||
commandEnv := shell.NewCommandEnv(shellOptions)
|
|
||||||
reg, _ := regexp.Compile(`'.*?'|".*?"|\S+`)
|
|
||||||
|
|
||||||
go commandEnv.MasterClient.LoopConnectToMaster()
|
|
||||||
|
|
||||||
return &SeaweedAdmin{
|
|
||||||
commandEnv: commandEnv,
|
|
||||||
commandReg: reg,
|
|
||||||
Output: output,
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
// ProcessCommands cmds can be semi-colon separated commands
|
|
||||||
func (sa *SeaweedAdmin) ProcessCommands(cmds string) error {
|
|
||||||
for _, c := range strings.Split(cmds, ";") {
|
|
||||||
if err := sa.ProcessCommand(c); err != nil {
|
|
||||||
return err
|
|
||||||
}
|
|
||||||
}
|
|
||||||
return nil
|
|
||||||
}
|
|
||||||
|
|
||||||
func (sa *SeaweedAdmin) ProcessCommand(cmd string) error {
|
|
||||||
sa.commandEnv.MasterClient.WaitUntilConnected()
|
|
||||||
cmds := sa.commandReg.FindAllString(cmd, -1)
|
|
||||||
if len(cmds) == 0 {
|
|
||||||
return nil
|
|
||||||
}
|
|
||||||
|
|
||||||
args := make([]string, len(cmds[1:]))
|
|
||||||
|
|
||||||
for i := range args {
|
|
||||||
args[i] = strings.Trim(string(cmds[1+i]), "\"'")
|
|
||||||
}
|
|
||||||
|
|
||||||
for _, c := range shell.Commands {
|
|
||||||
if c.Name() == cmds[0] || c.Name() == "fs."+cmds[0] {
|
|
||||||
return c.Do(args, sa.commandEnv, sa.Output)
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
return fmt.Errorf("unknown command: %v", cmd)
|
|
||||||
|
|
||||||
}
|
|
95
go.mod
95
go.mod
|
@ -1,101 +1,12 @@
|
||||||
module github.com/seaweedfs/seaweedfs-operator
|
module github.com/seaweedfs/seaweedfs-operator
|
||||||
|
|
||||||
go 1.17
|
go 1.13
|
||||||
|
|
||||||
require (
|
require (
|
||||||
github.com/chrislusf/seaweedfs v0.0.0-20211103083639-3c245c69d369
|
|
||||||
github.com/go-logr/logr v0.1.0
|
github.com/go-logr/logr v0.1.0
|
||||||
github.com/onsi/ginkgo v1.14.2
|
github.com/onsi/ginkgo v1.11.0
|
||||||
github.com/onsi/gomega v1.10.4
|
github.com/onsi/gomega v1.8.1
|
||||||
google.golang.org/grpc v1.40.0
|
|
||||||
k8s.io/api v0.18.2
|
|
||||||
k8s.io/apimachinery v0.18.2
|
k8s.io/apimachinery v0.18.2
|
||||||
k8s.io/client-go v0.18.2
|
k8s.io/client-go v0.18.2
|
||||||
k8s.io/klog v1.0.0
|
|
||||||
sigs.k8s.io/controller-runtime v0.6.0
|
sigs.k8s.io/controller-runtime v0.6.0
|
||||||
)
|
)
|
||||||
|
|
||||||
require (
|
|
||||||
cloud.google.com/go v0.94.1 // indirect
|
|
||||||
github.com/aws/aws-sdk-go v1.35.3 // indirect
|
|
||||||
github.com/beorn7/perks v1.0.1 // indirect
|
|
||||||
github.com/cespare/xxhash/v2 v2.1.1 // indirect
|
|
||||||
github.com/davecgh/go-spew v1.1.1 // indirect
|
|
||||||
github.com/disintegration/imaging v1.6.2 // indirect
|
|
||||||
github.com/evanphx/json-patch v4.5.0+incompatible // indirect
|
|
||||||
github.com/fsnotify/fsnotify v1.4.9 // indirect
|
|
||||||
github.com/go-errors/errors v1.1.1 // indirect
|
|
||||||
github.com/go-logr/zapr v0.1.0 // indirect
|
|
||||||
github.com/gogo/protobuf v1.3.2 // indirect
|
|
||||||
github.com/golang-jwt/jwt v3.2.1+incompatible // indirect
|
|
||||||
github.com/golang/groupcache v0.0.0-20210331224755-41bb18bfe9da // indirect
|
|
||||||
github.com/golang/protobuf v1.5.2 // indirect
|
|
||||||
github.com/golang/snappy v0.0.4 // indirect
|
|
||||||
github.com/google/btree v1.0.0 // indirect
|
|
||||||
github.com/google/go-cmp v0.5.6 // indirect
|
|
||||||
github.com/google/gofuzz v1.1.0 // indirect
|
|
||||||
github.com/google/uuid v1.3.0 // indirect
|
|
||||||
github.com/googleapis/gax-go/v2 v2.1.0 // indirect
|
|
||||||
github.com/googleapis/gnostic v0.3.1 // indirect
|
|
||||||
github.com/grpc-ecosystem/go-grpc-middleware v1.1.0 // indirect
|
|
||||||
github.com/hashicorp/golang-lru v0.5.1 // indirect
|
|
||||||
github.com/hashicorp/hcl v1.0.0 // indirect
|
|
||||||
github.com/imdario/mergo v0.3.6 // indirect
|
|
||||||
github.com/jmespath/go-jmespath v0.4.0 // indirect
|
|
||||||
github.com/json-iterator/go v1.1.11 // indirect
|
|
||||||
github.com/karlseguin/ccache/v2 v2.0.7 // indirect
|
|
||||||
github.com/klauspost/cpuid v1.2.1 // indirect
|
|
||||||
github.com/klauspost/reedsolomon v1.9.2 // indirect
|
|
||||||
github.com/magiconair/properties v1.8.1 // indirect
|
|
||||||
github.com/mattn/go-runewidth v0.0.7 // indirect
|
|
||||||
github.com/matttproud/golang_protobuf_extensions v1.0.1 // indirect
|
|
||||||
github.com/mitchellh/mapstructure v1.1.2 // indirect
|
|
||||||
github.com/modern-go/concurrent v0.0.0-20180306012644-bacd9c7ef1dd // indirect
|
|
||||||
github.com/modern-go/reflect2 v1.0.1 // indirect
|
|
||||||
github.com/nxadm/tail v1.4.4 // indirect
|
|
||||||
github.com/pelletier/go-toml v1.7.0 // indirect
|
|
||||||
github.com/peterh/liner v1.1.0 // indirect
|
|
||||||
github.com/pkg/errors v0.9.1 // indirect
|
|
||||||
github.com/prometheus/client_golang v1.11.0 // indirect
|
|
||||||
github.com/prometheus/client_model v0.2.0 // indirect
|
|
||||||
github.com/prometheus/common v0.26.0 // indirect
|
|
||||||
github.com/prometheus/procfs v0.6.0 // indirect
|
|
||||||
github.com/seaweedfs/goexif v1.0.2 // indirect
|
|
||||||
github.com/spf13/afero v1.6.0 // indirect
|
|
||||||
github.com/spf13/cast v1.3.0 // indirect
|
|
||||||
github.com/spf13/jwalterweatherman v1.1.0 // indirect
|
|
||||||
github.com/spf13/pflag v1.0.5 // indirect
|
|
||||||
github.com/spf13/viper v1.4.0 // indirect
|
|
||||||
github.com/syndtr/goleveldb v1.0.1-0.20190318030020-c3a204f8e965 // indirect
|
|
||||||
github.com/tylertreat/BoomFilters v0.0.0-20210315201527-1a82519a3e43 // indirect
|
|
||||||
github.com/valyala/bytebufferpool v1.0.0 // indirect
|
|
||||||
github.com/viant/ptrie v0.3.0 // indirect
|
|
||||||
github.com/viant/toolbox v0.33.2 // indirect
|
|
||||||
go.uber.org/atomic v1.7.0 // indirect
|
|
||||||
go.uber.org/multierr v1.7.0 // indirect
|
|
||||||
go.uber.org/zap v1.17.0 // indirect
|
|
||||||
golang.org/x/crypto v0.0.0-20201216223049-8b5274cf687f // indirect
|
|
||||||
golang.org/x/image v0.0.0-20200119044424-58c23975cae1 // indirect
|
|
||||||
golang.org/x/net v0.0.0-20210813160813-60bc85c4be6d // indirect
|
|
||||||
golang.org/x/oauth2 v0.0.0-20210819190943-2bc19b11175f // indirect
|
|
||||||
golang.org/x/sys v0.0.0-20210908233432-aa78b53d3365 // indirect
|
|
||||||
golang.org/x/term v0.0.0-20201126162022-7de9c90e9dd1 // indirect
|
|
||||||
golang.org/x/text v0.3.6 // indirect
|
|
||||||
golang.org/x/time v0.0.0-20191024005414-555d28b269f0 // indirect
|
|
||||||
golang.org/x/xerrors v0.0.0-20200804184101-5ec99f83aff1 // indirect
|
|
||||||
gomodules.xyz/jsonpatch/v2 v2.0.1 // indirect
|
|
||||||
google.golang.org/api v0.57.0 // indirect
|
|
||||||
google.golang.org/appengine v1.6.7 // indirect
|
|
||||||
google.golang.org/genproto v0.0.0-20210921142501-181ce0d877f6 // indirect
|
|
||||||
google.golang.org/protobuf v1.27.1 // indirect
|
|
||||||
gopkg.in/check.v1 v1.0.0-20201130134442-10cb98267c6c // indirect
|
|
||||||
gopkg.in/fsnotify.v1 v1.4.7 // indirect
|
|
||||||
gopkg.in/inf.v0 v0.9.1 // indirect
|
|
||||||
gopkg.in/tomb.v1 v1.0.0-20141024135613-dd632973f1e7 // indirect
|
|
||||||
gopkg.in/yaml.v2 v2.4.0 // indirect
|
|
||||||
k8s.io/apiextensions-apiserver v0.18.2 // indirect
|
|
||||||
k8s.io/kube-openapi v0.0.0-20200121204235-bf4fb3bd569c // indirect
|
|
||||||
k8s.io/utils v0.0.0-20200324210504-a9aa75ae1b89 // indirect
|
|
||||||
sigs.k8s.io/structured-merge-diff/v3 v3.0.0 // indirect
|
|
||||||
sigs.k8s.io/yaml v1.2.0 // indirect
|
|
||||||
)
|
|
||||||
|
|
|
@ -1,32 +0,0 @@
|
||||||
#!/usr/bin/env bash
|
|
||||||
|
|
||||||
set -o errexit
|
|
||||||
set -o nounset
|
|
||||||
set -o pipefail
|
|
||||||
ROOT=$(unset CDPATH && cd $(dirname "${BASH_SOURCE[0]}")/.. && pwd)
|
|
||||||
|
|
||||||
DIFFROOT="${ROOT}/api"
|
|
||||||
TMP_DIFFROOT="${ROOT}/_tmp/api"
|
|
||||||
_tmp="${ROOT}/_tmp"
|
|
||||||
|
|
||||||
cleanup() {
|
|
||||||
rm -rf "${_tmp}"
|
|
||||||
}
|
|
||||||
trap "cleanup" EXIT SIGINT
|
|
||||||
|
|
||||||
cleanup
|
|
||||||
|
|
||||||
mkdir -p "${TMP_DIFFROOT}"
|
|
||||||
cp -a "${DIFFROOT}"/* "${TMP_DIFFROOT}"
|
|
||||||
|
|
||||||
make generate
|
|
||||||
echo "diffing ${DIFFROOT} against freshly generated codegen"
|
|
||||||
ret=0
|
|
||||||
diff -Naupr "${DIFFROOT}" "${TMP_DIFFROOT}" || ret=$?
|
|
||||||
cp -a "${TMP_DIFFROOT}"/* "${DIFFROOT}"
|
|
||||||
if [[ $ret -eq 0 ]]; then
|
|
||||||
echo "${DIFFROOT} up to date."
|
|
||||||
else
|
|
||||||
echo "${DIFFROOT} is out of date. Please run make generate"
|
|
||||||
exit 1
|
|
||||||
fi
|
|
|
@ -1,32 +0,0 @@
|
||||||
#!/usr/bin/env bash
|
|
||||||
|
|
||||||
set -o errexit
|
|
||||||
set -o nounset
|
|
||||||
set -o pipefail
|
|
||||||
ROOT=$(unset CDPATH && cd $(dirname "${BASH_SOURCE[0]}")/.. && pwd)
|
|
||||||
|
|
||||||
DIFFROOT="${ROOT}/config"
|
|
||||||
TMP_DIFFROOT="${ROOT}/_tmp/config"
|
|
||||||
_tmp="${ROOT}/_tmp"
|
|
||||||
|
|
||||||
cleanup() {
|
|
||||||
rm -rf "${_tmp}"
|
|
||||||
}
|
|
||||||
trap "cleanup" EXIT SIGINT
|
|
||||||
|
|
||||||
cleanup
|
|
||||||
|
|
||||||
mkdir -p "${TMP_DIFFROOT}"
|
|
||||||
cp -a "${DIFFROOT}"/* "${TMP_DIFFROOT}"
|
|
||||||
|
|
||||||
make manifests
|
|
||||||
echo "diffing ${DIFFROOT} against freshly generated manifests"
|
|
||||||
ret=0
|
|
||||||
diff -Naupr "${DIFFROOT}" "${TMP_DIFFROOT}" || ret=$?
|
|
||||||
cp -a "${TMP_DIFFROOT}"/* "${DIFFROOT}"
|
|
||||||
if [[ $ret -eq 0 ]]; then
|
|
||||||
echo "${DIFFROOT} up to date."
|
|
||||||
else
|
|
||||||
echo "${DIFFROOT} is out of date. Please run make manifests"
|
|
||||||
exit 1
|
|
||||||
fi
|
|
13
main.go
13
main.go
|
@ -67,21 +67,14 @@ func main() {
|
||||||
os.Exit(1)
|
os.Exit(1)
|
||||||
}
|
}
|
||||||
|
|
||||||
if err = (&controllers.SeaweedReconciler{
|
if err = (&controllers.MasterReconciler{
|
||||||
Client: mgr.GetClient(),
|
Client: mgr.GetClient(),
|
||||||
Log: ctrl.Log.WithName("controllers").WithName("Seaweed"),
|
Log: ctrl.Log.WithName("controllers").WithName("Master"),
|
||||||
Scheme: mgr.GetScheme(),
|
Scheme: mgr.GetScheme(),
|
||||||
}).SetupWithManager(mgr); err != nil {
|
}).SetupWithManager(mgr); err != nil {
|
||||||
setupLog.Error(err, "unable to create controller", "controller", "Seaweed")
|
setupLog.Error(err, "unable to create controller", "controller", "Master")
|
||||||
os.Exit(1)
|
os.Exit(1)
|
||||||
}
|
}
|
||||||
|
|
||||||
if os.Getenv("ENABLE_WEBHOOKS") != "false" {
|
|
||||||
if err = (&seaweedv1.Seaweed{}).SetupWebhookWithManager(mgr); err != nil {
|
|
||||||
setupLog.Error(err, "unable to create webhook", "webhook", "Seaweed")
|
|
||||||
os.Exit(1)
|
|
||||||
}
|
|
||||||
}
|
|
||||||
// +kubebuilder:scaffold:builder
|
// +kubebuilder:scaffold:builder
|
||||||
|
|
||||||
setupLog.Info("starting manager")
|
setupLog.Info("starting manager")
|
||||||
|
|
|
@ -4,7 +4,6 @@
|
||||||
<component name="NewModuleRootManager" inherit-compiler-output="true">
|
<component name="NewModuleRootManager" inherit-compiler-output="true">
|
||||||
<exclude-output />
|
<exclude-output />
|
||||||
<content url="file://$MODULE_DIR$" />
|
<content url="file://$MODULE_DIR$" />
|
||||||
<orderEntry type="inheritedJdk" />
|
|
||||||
<orderEntry type="sourceFolder" forTests="false" />
|
<orderEntry type="sourceFolder" forTests="false" />
|
||||||
</component>
|
</component>
|
||||||
</module>
|
</module>
|
Loading…
Reference in New Issue