Compare commits

..

233 Commits

Author SHA1 Message Date
a 1628f83514 Merge pull request '2.96' (#2) from github-mirrors/seaweedfs-operator:master into master
Reviewed-on: https://git.tuxpa.in/a/seaweedfs-operator/pulls/2
2022-03-30 03:24:54 +00:00
chrislu fbd37dd3eb 2.96 2022-03-27 18:58:34 -07:00
elee c6eaadf4af added timeout for do 2022-03-20 17:55:56 -05:00
elee 4d68bf590f more small changes, modify gitignore 2022-03-20 02:22:31 -05:00
elee 9c3d010b99 change certificate target 2022-03-20 02:11:05 -05:00
elee 675cbc0d03 remoting 2022-03-20 02:06:33 -05:00
a fb93d78b80 Merge pull request 'master' (#1) from github-mirrors/seaweedfs-operator:master into master
Reviewed-on: https://git.tuxpa.in/a/seaweedfs-operator/pulls/1
2022-03-20 07:06:19 +00:00
chrislu 7240de7226 2.93 2022-03-06 19:54:46 -08:00
chrislu 28cb170c5f 2.92 2022-02-28 15:55:13 -08:00
chrislu 63a2db701f 2.91 2022-02-27 12:50:25 -08:00
elee 774b444461 emable webhooks by default 2022-02-27 06:58:14 -06:00
chrislu ddd5521de2 avoid logs in tmp folder 2022-02-27 02:43:43 -08:00
chrislu dc13e7ca90 2.90 2022-02-20 23:00:30 -08:00
chrislu 0ccce292e0 2.89 2022-02-14 03:36:37 -08:00
chrislu 20b6cd7411 Merge branch 'master' of https://github.com/seaweedfs/seaweedfs-operator 2022-01-30 22:10:54 -08:00
chrislu 2b2737e627 2.88 2022-01-30 22:10:51 -08:00
Chris Lu 6e550b8d87
Merge pull request #58 from Kryptonite-RU/add-defaultreplication-cmdline 2022-01-26 10:10:12 -08:00
Grigoriy Narizhnyy aed8db644d Add defaultReplication command line parameter from CR value 2022-01-26 17:15:48 +03:00
chrislu 35eb4debb5 2.86 2022-01-18 09:47:40 -08:00
chrislu c329457293 2.84 2022-01-02 17:25:06 -08:00
chrislu 242fe939ce fix test 2021-12-25 12:44:41 -08:00
Chris Lu c23330d8af
Merge pull request #56 from robinbraemer/patch-1
update master StatefulSets replicas
2021-12-25 12:18:43 -08:00
Robin Brämer ec732cecaf
update master StatefulSets replicas 2021-12-25 20:34:28 +01:00
chrislu 061d8c0784 2.83 2021-12-25 01:27:48 -08:00
chrislu 0fbed44878 2.81 2021-12-05 18:21:34 -08:00
Chris Lu 654403bd17 2.80 2021-11-29 01:13:48 -08:00
Chris Lu cbb981aad9 2.77 2021-11-07 21:28:26 -08:00
Chris Lu d53367b5d3 fix compilation 2021-11-03 02:21:51 -07:00
Chris Lu f67be3d76c update module 2021-11-03 02:19:37 -07:00
Chris Lu 91b6927e12 go mod download cloud.google.com/go/storage 2021-11-03 02:03:53 -07:00
Chris Lu 624ef883e5 update go mod 2021-11-03 01:59:34 -07:00
Chris Lu 6c1647a778 sync with main repo 2021-11-03 01:44:06 -07:00
Chris Lu 3e43cc27b7 update to use latest grpc API 2021-11-03 01:38:38 -07:00
Chris Lu 179810df8e 2.76 2021-10-31 20:32:38 -07:00
Chris Lu 1384065a91 added apache 2.0 license
fix https://github.com/seaweedfs/seaweedfs-operator/issues/51
2021-10-20 11:33:21 -07:00
Chris Lu 1a5f79e36b 2.74 2021-10-18 21:17:56 -07:00
Chris Lu 39610ab6f6 2.72 2021-10-17 18:42:02 -07:00
Chris Lu 7b5c2aa103 2.71 2021-10-13 21:42:18 -07:00
Chris Lu aa5393d4c4 2.70 2021-09-26 17:38:25 -07:00
Chris Lu 23ec53e93d 2.69 2021-09-26 17:17:12 -07:00
Chris Lu 0c5179c19e
Merge pull request #50 from IxDay/preserve_unknown 2021-09-15 09:11:03 -07:00
Maxime Vidori 85c6271843 Add preserveUnknownFields to false 2021-09-15 12:49:33 +02:00
Chris Lu fddd24dfb6 2.68 2021-09-13 23:09:30 -07:00
Chris Lu de4505f59e 2.66 2021-09-06 10:53:02 -07:00
Chris Lu 2a72636987
Merge pull request #47 from arthurlogilab/patch-1
[manager] Bump up the limits and requests
2021-07-20 01:10:53 -07:00
Arthur Lutz cb1fb247c7
[manager] Bump up the limits and requests
Fixes #46
2021-07-20 10:08:54 +02:00
Chris Lu d1aeaa434c 2.59 2021-07-15 15:59:49 -07:00
Chris Lu 07abcff15a sync version to 2.58 2021-07-15 15:08:11 -07:00
Chris Lu 49c2775a5b skip test which is failing on etcd 2021-07-15 15:08:11 -07:00
Chris Lu 13f8916807
Create README.md 2021-07-15 14:01:24 -07:00
Chris Lu 5f492cba87
Update README.md 2021-07-15 14:01:14 -07:00
Chris Lu 86df57f471
Update README.md 2021-07-15 13:58:16 -07:00
Chris Lu 736b629071 2.56 2021-06-27 23:34:16 -07:00
thiscantbeserious 0909c8a62d
Update README.md 2021-06-25 11:12:19 +02:00
thiscantbeserious f3434d3873
Update README.md 2021-06-25 11:07:58 +02:00
thiscantbeserious 94b66c57d6
Update README.md 2021-06-25 11:06:51 +02:00
thiscantbeserious b891044c66
Update README.md 2021-06-25 11:06:25 +02:00
Chris Lu 682dcbae7c
Merge pull request #43 from seaweedfs/readme_adjustments
Readme adjustments
2021-06-25 01:55:10 -07:00
thiscantbeserious 45599dc10f
Update README.md 2021-06-25 10:17:25 +02:00
Chris Lu 936045790e 2.54 2021-06-19 03:57:23 -07:00
Chris Lu 8c7f7ea97b 2.53 2021-06-13 17:23:04 -07:00
thiscantbeserious 9623cd352e
Update README.md 2021-06-13 12:52:05 +02:00
thiscantbeserious e4e09b32b1
Update README.md 2021-06-10 19:06:00 +02:00
thiscantbeserious 9684d464be
Update README.md 2021-06-10 19:03:55 +02:00
Chris Lu 8d981305be
Update README.md 2021-06-09 13:11:02 -07:00
Chris Lu 297594054e
Update README.md 2021-06-09 13:10:46 -07:00
Chris Lu ca033321ad
Update README.md 2021-06-09 13:10:31 -07:00
Chris Lu feb26f3391
Update README.md 2021-06-09 13:07:56 -07:00
Chris Lu f2248552e5 simple change to test travis build 2021-06-09 12:32:27 -07:00
Chris Lu 9a83337903 Create .travis.yml 2021-06-09 12:22:10 -07:00
Chris Lu ce0ed06111 2.52 2021-06-07 14:06:26 -07:00
Chris Lu 3dd57e8048 2.51 2021-06-06 21:53:44 -07:00
Chris Lu 66f839a186 2.46 2021-05-10 22:36:08 -07:00
Chris Lu 81620f8522 2.44 2021-05-09 23:29:02 -07:00
Chris Lu 98713bbadc
Create main.yml 2021-05-06 11:04:02 -07:00
Chris Lu d9c33ee455 fix s3 ingress 2021-05-02 20:58:36 -07:00
Chris Lu b08d5ce9d1 2.43 2021-05-01 01:08:10 -07:00
Chris Lu 69935bd804 2.42 2021-04-30 11:56:51 -07:00
Chris Lu 1053ae7d62 fix typo
fix https://github.com/seaweedfs/seaweedfs-operator/issues/37
2021-04-25 21:54:38 -07:00
Chris Lu 42358f80d8
Merge pull request #36 from q8s-io/master 2021-04-25 12:00:00 -07:00
70data 2e1ad274a4 remove redis from go.mod 2021-04-26 00:25:46 +08:00
70data d82865f160 fix containerStatus.Ready 2021-04-26 00:14:41 +08:00
Chris Lu 2d063b6ad5
Merge pull request #34 from q8s-io/master
add make delete
2021-04-24 23:42:59 -07:00
70data 089913ba09 update 2021-04-25 13:34:01 +08:00
千夜 65a0950ef9
Merge pull request #1 from seaweedfs/master
2.41
2021-04-25 10:17:41 +08:00
Chris Lu 4233a8b38d 2.41 2021-04-24 16:54:51 -07:00
Chris Lu 6bde7140d5
Merge pull request #33 from kvaster/peers-namespace
Always use dns names with namespace
2021-04-23 17:31:39 -07:00
Viktor Kuzmin 10a3ab6de9 We should use names with namespace to allow direct fuse connections to volume servers for pods from other namespaces 2021-04-23 23:55:56 +03:00
Chris Lu 2284b6c617 2.40 2021-04-18 13:58:09 -07:00
Chris Lu 44d1ebfad1 2.39 2021-04-11 19:47:33 -07:00
Chris Lu 6dc9f6cb10 2.38 2021-04-05 19:42:06 -07:00
Chris Lu 9acefe82e6 2.37 2021-04-04 18:46:06 -07:00
Chris Lu 1cc1fe0407 2.36 2021-03-28 19:09:53 -07:00
Chris Lu 98492a3716 2.35 2021-03-22 00:05:32 -07:00
Chris Lu 51df653787 2.34 2021-03-16 03:02:06 -07:00
Chris Lu 3caaadad4c 2.33 2021-03-16 00:35:49 -07:00
Chris Lu 64a19b4f01 2.32 2021-03-14 21:31:40 -07:00
Chris Lu 00b81cfb2c 2.31 2021-03-09 12:52:25 -08:00
Chris Lu b8059368c2 2.30 2021-03-07 15:01:33 -08:00
Chris Lu 4d7b49a062 2.29 2021-02-28 18:08:43 -08:00
Chris Lu 33545b39d5 Update seaweedfs-operator.iml 2021-02-25 08:25:10 -08:00
Chris Lu a48007e6d9 2.28 2021-02-22 22:59:23 -08:00
Chris Lu 3679d1b118 2.27 2021-02-21 19:30:38 -08:00
Chris Lu 78bc8ad6d8 2.26 2021-02-15 13:39:30 -08:00
Chris Lu bf998078fc 2.25 2021-02-14 22:24:58 -08:00
Chris Lu 96d8d65530 2.24 2021-02-08 00:16:14 -08:00
Chris Lu a0e3c1a7a7 add group extensions for ingress 2021-02-07 03:10:42 -08:00
Chris Lu c081a57cf5 adjust docker image name 2021-02-06 00:28:05 -08:00
Chris Lu a3c2c3203c 2.23 2021-01-31 20:18:42 -08:00
Chris Lu 4599bd7866 2.21 2021-01-18 01:25:53 -08:00
Chris Lu 30058883f6 sync with seaweedfs 2.20 2021-01-09 23:19:40 -08:00
Chris Lu f493b8b0f2 2.20 2021-01-09 23:02:33 -08:00
Chris Lu de183328a5 2.13 2020-11-29 17:00:54 -08:00
Howard Lau 58df469ae5
Fix path 2020-11-28 09:31:41 +08:00
Howard Lau 9b3de719c3
Update README.md 2020-11-28 09:31:24 +08:00
Howard Lau b0f384e3e9
Disable webhooks by default 2020-11-28 09:28:07 +08:00
Howard Lau d86dd68fdd
Update README.md 2020-11-28 09:26:47 +08:00
Chris Lu fa393ffc32 2.12 2020-11-22 17:18:14 -08:00
Chris Lu 014ac322ce 2.11 2020-11-22 17:18:14 -08:00
Howard Lau 8f40aee634
Add instructions for end-user deployment 2020-11-15 11:18:19 +08:00
Chris Lu 62be67dfda Update Makefile 2020-11-14 13:31:54 -08:00
Chris Lu 0e602afa62 2.10 2020-11-10 23:17:34 -08:00
Chris Lu 2573216733 add ingress for volume servers
fix https://github.com/seaweedfs/seaweedfs-operator/issues/16
2020-11-10 00:11:17 -08:00
Chris Lu e853e2870f add service for each volume server
related to https://github.com/seaweedfs/seaweedfs-operator/issues/16
2020-11-09 23:48:22 -08:00
Chris Lu 15fc33c506 add back steps to develop outside of k8s 2020-11-09 23:28:13 -08:00
Chris Lu 98017e9cb0 fix seaweed client command 2020-11-08 23:59:02 -08:00
Chris Lu 831adc091f fix image comparison 2020-11-08 23:58:48 -08:00
Chris Lu ce9dc2bee8 Update README.md 2020-11-08 23:29:57 -08:00
Chris Lu 5a4f61591d adjust for tests 2020-11-08 23:02:09 -08:00
Chris Lu fab237e7ef fix expected master count 2020-11-08 22:34:04 -08:00
Chris Lu 6bb6a1bae6 for the quorum 2020-11-08 18:22:26 -08:00
Chris Lu cb86e4a770 ensure the image version matches expected version
fix https://github.com/seaweedfs/seaweedfs-operator/issues/25
2020-11-08 18:20:33 -08:00
Chris Lu 8342be9b1c proceed only when masters are all ready
fix https://github.com/seaweedfs/seaweedfs-operator/issues/21
2020-11-08 17:41:12 -08:00
Chris Lu 8585daff05 go fmt 2020-11-08 17:40:30 -08:00
Chris Lu 411cb7bc7d fix odd number of masters 2020-11-08 17:40:16 -08:00
Chris Lu b9377c0da1 adjust version 2020-11-08 17:40:00 -08:00
Chris Lu 9ba4b60d94 re-run reconcile every 5 seconds, to check node status changes 2020-11-08 17:00:41 -08:00
Chris Lu eccad4af1e unused functions 2020-11-08 16:46:16 -08:00
Chris Lu 771312ddfb add steps to develop inside k8s 2020-11-08 16:23:17 -08:00
Chris Lu b0f01a5e1a add namespace to master peers list 2020-11-08 16:12:31 -08:00
Chris Lu 3ecd205e8e add a seaweed admin
related to https://github.com/seaweedfs/seaweedfs-operator/issues/23
2020-11-08 00:55:56 -08:00
Chris Lu 291b479395 filer, volume stateful set detect changes 2020-11-04 19:01:19 -08:00
Chris Lu 95dc4a247d master statefulset ensure consistent style
fix https://github.com/seaweedfs/seaweedfs-operator/issues/19
2020-11-04 19:00:34 -08:00
Chris Lu 46806156b6 add filer and s3 ingress 2020-11-04 14:30:15 -08:00
Chris Lu 31c843cae4 set volumeSizeLimitMB to 1024 2020-11-03 23:15:57 -08:00
Chris Lu 9b29d3a377 add sample filer stor setting 2020-11-03 21:29:14 -08:00
Chris Lu 255a6487c7 fix filer configmap 2020-11-03 20:47:45 -08:00
Chris Lu db16a4b2cb minor 2020-11-03 20:26:15 -08:00
Chris Lu ac68889024 detect ENABLE_WEBHOOKS!=false 2020-11-03 20:24:19 -08:00
Chris Lu 561e7fcc3a add filer.toml to /etc/seaweedfs 2020-11-03 20:01:19 -08:00
Chris Lu 7399cc6e30 fix filer replica 2020-11-03 18:26:28 -08:00
Chris Lu eef877dacb 2.08 2020-11-01 13:04:12 -08:00
Chris Lu 10f6541376
Merge pull request #15 from howardlau1999/test
Add some basic tests
2020-11-01 02:29:38 -08:00
Howard Lau 647b74f909
fix wrong if
Signed-off-by: Howard Lau <howardlau1999@hotmail.com>
2020-11-01 09:34:52 +00:00
Howard Lau 152049b5cf
fix lint
Signed-off-by: Howard Lau <howardlau1999@hotmail.com>
2020-11-01 09:32:43 +00:00
Howard Lau 08430619b9
fix lint
Signed-off-by: Howard Lau <howardlau1999@hotmail.com>
2020-11-01 09:28:08 +00:00
Howard Lau 89bf4e4e0e
Add tests
Signed-off-by: Howard Lau <howardlau1999@hotmail.com>
2020-11-01 09:24:13 +00:00
Chris Lu 0a09f85cbd
Merge pull request #14 from howardlau1999/webhook
Add defaulting/validating webhook
2020-11-01 01:01:59 -08:00
Howard Lau 6437abce16
less verbose
Signed-off-by: Howard Lau <howardlau1999@hotmail.com>
2020-11-01 08:50:31 +00:00
Howard Lau 9d134da582
Aggregate errors
Signed-off-by: Howard Lau <howardlau1999@hotmail.com>
2020-11-01 08:21:01 +00:00
Howard Lau f0d0622b15 Add validation webhook
Signed-off-by: Howard Lau <howardlau1999@hotmail.com>
2020-11-01 08:16:22 +00:00
Howard Lau 2d2148e96c
Merge workflows
Signed-off-by: Howard Lau <howardlau1999@hotmail.com>
2020-11-01 07:02:22 +00:00
Howard Lau a1771c26cb
Fix workflow
Signed-off-by: Howard Lau <howardlau1999@hotmail.com>
2020-11-01 07:00:39 +00:00
Howard Lau 275515dffc
Split test steps
Signed-off-by: Howard Lau <howardlau1999@hotmail.com>
2020-11-01 06:59:41 +00:00
Howard Lau 7eed56f5eb Use Makefile test
Signed-off-by: Howard Lau <howardlau1999@hotmail.com>
2020-11-01 06:55:19 +00:00
Howard Lau f9b565e70a
Merge pull request #13 from howardlau1999/fix
Use Kubernetes recommended labels
2020-11-01 14:49:30 +08:00
Howard Lau 753e8e8c00
Add managed-by
Signed-off-by: Howard Lau <howardlau1999@hotmail.com>
2020-11-01 06:46:34 +00:00
Howard Lau eb051a3202
Use Kubernetes recommended labels
Signed-off-by: Howard Lau <howardlau1999@hotmail.com>
2020-11-01 06:43:45 +00:00
Howard Lau af2f704fa8
Merge pull request #12 from howardlau1999/fix
Fix make install
2020-11-01 13:02:43 +08:00
Howard Lau 7ccf5efcf1 Merge branch 'master' into fix 2020-11-01 04:40:41 +00:00
Howard Lau 6ae2bba475
Increase lint timeout
Signed-off-by: Howard Lau <howardlau1999@hotmail.com>
2020-10-30 15:59:29 +00:00
Howard Lau 034a36489d
Add golangci-lint
Signed-off-by: Howard Lau <howardlau1999@hotmail.com>
2020-10-30 15:55:35 +00:00
Chris Lu 740ac81a57
Merge pull request #11 from howardlau1999/filerpeer
Support Filer Peers
2020-10-30 02:55:13 -07:00
Howard Lau a9ea14b399
fix port
Signed-off-by: Howard Lau <howardlau1999@hotmail.com>
2020-10-30 09:23:54 +00:00
Howard Lau e8ba79aae6
add filer peers on filer startup
Signed-off-by: Howard Lau <howardlau1999@hotmail.com>
2020-10-30 09:15:16 +00:00
Howard Lau cc2c614d43
fix compatibility problem
Signed-off-by: Howard Lau <howardlau1999@hotmail.com>
2020-10-30 09:10:05 +00:00
Chris Lu ec5ccbf5c5
Merge pull request #10 from howardlau1999/peersvc
Add headless peer services for StatefulSet
2020-10-30 01:05:37 -07:00
Howard Lau cbd151aa2a
fix service
Signed-off-by: Howard Lau <howardlau1999@hotmail.com>
2020-10-30 07:14:24 +00:00
Howard Lau ee1d99b4ab
tidy go.mod go.sum
Signed-off-by: Howard Lau <howardlau1999@hotmail.com>
2020-10-30 07:11:27 +00:00
Howard Lau 2b486417bb
split comments
Signed-off-by: Howard Lau <howardlau1999@hotmail.com>
2020-10-30 07:09:39 +00:00
Howard Lau 223d464e45
do not sleep
Signed-off-by: Howard Lau <howardlau1999@hotmail.com>
2020-10-30 06:57:51 +00:00
Howard Lau 53536d56db
fix name
Signed-off-by: Howard Lau <howardlau1999@hotmail.com>
2020-10-30 06:57:02 +00:00
Howard Lau 7b11a10862
fix master name
Signed-off-by: Howard Lau <howardlau1999@hotmail.com>
2020-10-30 06:44:51 +00:00
Howard Lau a8c8fd65bc
add config
Signed-off-by: Howard Lau <howardlau1999@hotmail.com>
2020-10-30 06:42:00 +00:00
Howard Lau 5bc96e22b1 Merge branch 'master' into peersvc 2020-10-30 05:15:56 +00:00
Howard Lau 0bcbcd1d0b
Add verify codegen and manifests
Signed-off-by: Howard Lau <howardlau1999@hotmail.com>
2020-10-30 05:15:18 +00:00
Howard Lau 8c390ce083
fix float issues
Signed-off-by: Howard Lau <howardlau1999@hotmail.com>
2020-10-30 03:09:04 +00:00
Chris Lu 7f453bdf05
Create go.yml 2020-10-29 20:00:50 -07:00
Howard Lau 95b9af71fa
ownerReference for cm
Signed-off-by: Howard Lau <howardlau1999@hotmail.com>
2020-10-30 02:42:34 +00:00
Howard Lau e7b287bb6f Merge branch 'master' into peersvc 2020-10-30 02:40:34 +00:00
Chris Lu 4d2cd6ce12
Merge pull request #7 from howardlau1999/config
Support raw TOML config for master and filer
2020-10-29 08:52:56 -07:00
Chris Lu 9e229fc647
Merge pull request #9 from howardlau1999/owner
Set ownerReference for GC
2020-10-29 08:51:33 -07:00
Chris Lu 5341512045
Merge pull request #8 from howardlau1999/rbac
Fix RBAC problem and restore metrics
2020-10-29 08:48:58 -07:00
Howard Lau 0d53ed3402
Set ownerReference for GC
Signed-off-by: Howard Lau <howardlau1999@hotmail.com>
2020-10-29 14:31:06 +00:00
Howard Lau a762e9c866 Merge branch 'rbac' into peersvc 2020-10-29 12:00:40 +00:00
Howard Lau c5fbce3700
Make sure the operator is runnable
Signed-off-by: Howard Lau <howardlau1999@hotmail.com>
2020-10-29 11:56:59 +00:00
Howard Lau e8baea6ae4
fix rbac
Signed-off-by: Howard Lau <howardlau1999@hotmail.com>
2020-10-29 11:37:49 +00:00
Howard Lau 92172f43a5
restore metrics
Signed-off-by: Howard Lau <howardlau1999@hotmail.com>
2020-10-29 11:30:45 +00:00
Howard Lau 6c601549f9
rbac
Signed-off-by: Howard Lau <howardlau1999@hotmail.com>
2020-10-29 11:24:38 +00:00
Howard Lau 2c0d8fac02
fix
Signed-off-by: Howard Lau <howardlau1999@hotmail.com>
2020-10-29 09:03:30 +00:00
Howard Lau f8e325e446
create peer svc for sts
Signed-off-by: Howard Lau <howardlau1999@hotmail.com>
2020-10-29 08:57:16 +00:00
Howard Lau 3722dcdc0e
fix mountpath
Signed-off-by: Howard Lau <howardlau1999@hotmail.com>
2020-10-28 11:33:16 +00:00
Howard Lau a4b872fbbc
format
Signed-off-by: Howard Lau <howardlau1999@hotmail.com>
2020-10-28 11:31:02 +00:00
Howard Lau 6ee14a018f
Reconcile ConfigMap
Signed-off-by: Howard Lau <howardlau1999@hotmail.com>
2020-10-28 11:29:11 +00:00
Howard Lau 09d121bcb5
Support raw TOML config for master and filer
Signed-off-by: Howard Lau <howardlau1999@hotmail.com>
2020-10-28 11:19:17 +00:00
Chris Lu c06204b5b6
Merge pull request #5 from howardlau1999/refactor
Use accessor to merge config and extract magic numbers
2020-10-28 00:40:24 -07:00
Howard Lau d6e3cf7be9
use accessor to merge config and extract magic numbers
Signed-off-by: Howard Lau <howardlau1999@hotmail.com>
2020-10-28 07:06:37 +00:00
Chris Lu 1d54f9b9e3
Merge pull request #4 from howardlau1999/refactor
Refactor the CRD to allow users to configure each component separately
2020-10-27 22:16:19 -07:00
Howard Lau ef177093c7
remove unused field
Signed-off-by: Howard Lau <howardlau1999@hotmail.com>
2020-10-28 05:09:01 +00:00
Howard Lau 4f59aa4ada
refactor to multiple specs
Signed-off-by: Howard Lau <howardlau1999@hotmail.com>
2020-10-28 05:06:25 +00:00
Chris Lu 7e57102a24
Update README.md 2020-10-27 00:58:58 -07:00
Chris Lu 32b17068ca
Update README.md 2020-10-27 00:56:59 -07:00
Chris Lu 38c4d034d3
Update README.md 2020-10-27 00:54:25 -07:00
Chris Lu e557a193f5
Update README.md 2020-10-27 00:37:07 -07:00
Chris Lu 9df02c313d 2.07 2020-10-25 22:39:05 -07:00
Chris Lu 937ea45c04 add volume server disks 2020-10-20 00:17:14 -07:00
Chris Lu 80ba5abbae probe volume, filer. 2020-10-18 22:56:12 -07:00
Chris Lu 1a443616b6 master probe 2020-10-18 22:49:11 -07:00
Chris Lu 4b67dd3791 simplify 2020-10-18 01:59:38 -07:00
Chris Lu fb816896d7 disable metrics 2020-10-18 00:58:15 -07:00
Chris Lu 8ad7ee2d50 create ingress, simplify ensure filer statefulset 2020-10-18 00:12:33 -07:00
Chris Lu 9a4df148b8 skip metrics 2020-10-18 00:11:31 -07:00
Chris Lu f734443207 add debug 2020-10-18 00:06:49 -07:00
Chris Lu a5c5e85f2a simplify 2020-10-18 00:06:40 -07:00
Chris Lu 58aa518ba7 simplify 2020-10-17 20:52:13 -07:00
Chris Lu 4df48f08de adjust logs 2020-10-17 14:07:06 -07:00
Chris Lu 95164dc830 add filer nodeport service 2020-10-17 13:51:07 -07:00
Chris Lu b2274f19c1 2.05 2020-10-17 02:42:48 -07:00
Chris Lu 4bb6f99cd8 support upgrade 2020-10-17 02:25:23 -07:00
Chris Lu 4ea680c4c9 fix filer starting 2020-10-17 00:59:38 -07:00
Chris Lu a55feda96d support adjustable volume count and filer count 2020-10-16 23:25:46 -07:00
Chris Lu 30e457b814 merge s3 with filer 2020-10-14 21:53:09 -07:00
Chris Lu c7e0392c1c refactor 2020-10-14 21:45:53 -07:00
59 changed files with 8322 additions and 495 deletions

70
.github/workflows/go.yml vendored Normal file
View File

@ -0,0 +1,70 @@
name: Go
on:
push:
branches: [ master ]
pull_request:
branches: [ master ]
jobs:
lint:
name: Lint
runs-on: ubuntu-latest
steps:
- uses: actions/checkout@v2
- name: golangci-lint
uses: golangci/golangci-lint-action@v2
with:
# Required: the version of golangci-lint is required and must be specified without patch version: we always use the latest patch version.
version: v1.31
# Optional: working directory, useful for monorepos
# working-directory: somedir
# Optional: golangci-lint command line arguments.
# TODO: remove disabled
args: --timeout=10m -D errcheck -D deadcode -D unused
# Optional: show only new issues if it's a pull request. The default value is `false`.
# only-new-issues: true
build:
name: Build
runs-on: ubuntu-latest
steps:
- name: Set up Go 1.x
uses: actions/setup-go@v2
with:
go-version: ^1.13
- name: Check out code into the Go module directory
uses: actions/checkout@v2
- name: Get dependencies
run: |
go get -v -t -d ./...
if [ -f Gopkg.toml ]; then
curl https://raw.githubusercontent.com/golang/dep/master/install.sh | sh
dep ensure
fi
- name: Build
run: go build -v .
test:
name: Test
runs-on: ubuntu-latest
steps:
- name: Set up Go 1.x
uses: actions/setup-go@v2
with:
go-version: ^1.13
- name: Check out code into the Go module directory
uses: actions/checkout@v2
- name: Test
run: make test SHELL=/bin/bash

74
.github/workflows/main.yml vendored Normal file
View File

@ -0,0 +1,74 @@
# This is a basic workflow to help you get started with Actions
name: CI
# Controls when the action will run.
on:
# Triggers the workflow on push or pull request events but only for the master branch
push:
branches: [ master ]
pull_request:
branches: [ master ]
# Allows you to run this workflow manually from the Actions tab
workflow_dispatch:
# A workflow run is made up of one or more jobs that can run sequentially or in parallel
jobs:
# This workflow contains a single job called "build"
build:
# The type of runner that the job will run on
runs-on: ubuntu-latest
# Steps represent a sequence of tasks that will be executed as part of the job
steps:
# Checks-out your repository under $GITHUB_WORKSPACE, so your job can access it
- uses: actions/checkout@v2
# Runs a single command using the runners shell
- name: Run a one-line script
run: echo Hello, world!
- name: Build and push Docker images
# You may pin to the exact commit or the version.
# uses: docker/build-push-action@e1b7f96249f2e4c8e4ac1519b9608c0d48944a1f
uses: docker/build-push-action@v2.4.0
with:
# List of extra privileged entitlement (eg. network.host,security.insecure)
allow: # optional
# List of build-time variables
build-args: # optional
# Builder instance
builder: # optional
# Build's context is the set of files located in the specified PATH or URL
context: # optional
# Path to the Dockerfile
file: # optional
# List of metadata for an image
labels: # optional
# Load is a shorthand for --output=type=docker
load: # optional, default is false
# Set the networking mode for the RUN instructions during build
network: # optional
# Do not use cache when building the image
no-cache: # optional, default is false
# List of output destinations (format: type=local,dest=path)
outputs: # optional
# List of target platforms for build
platforms: # optional
# Always attempt to pull a newer version of the image
pull: # optional, default is false
# Push is a shorthand for --output=type=registry
push: # optional, default is false
# List of secrets to expose to the build (eg. key=string, GIT_AUTH_TOKEN=mytoken)
secrets: # optional
# List of secret files to expose to the build (eg. key=filename, MY_SECRET=./secret.txt)
secret-files: # optional
# List of SSH agent socket or keys to expose to the build
ssh: # optional
# List of tags
tags: # optional
# Sets the target stage to build
target: # optional
# GitHub Token used to authenticate against a repository for Git context
github-token: # optional, default is ${{ github.token }}

35
.github/workflows/verify.yml vendored Normal file
View File

@ -0,0 +1,35 @@
name: Verify
on:
push:
branches: [ master ]
pull_request:
branches: [ master ]
jobs:
verify:
name: Verify
runs-on: ubuntu-latest
steps:
- name: Set up Go 1.x
uses: actions/setup-go@v2
with:
go-version: ^1.13
- name: Check out code into the Go module directory
uses: actions/checkout@v2
- name: Get dependencies
run: |
go get -v -t -d ./...
if [ -f Gopkg.toml ]; then
curl https://raw.githubusercontent.com/golang/dep/master/install.sh | sh
dep ensure
fi
- name: Verify Codegen
run: hack/verify-codegen.sh
- name: Verify Manifests
run: hack/verify-manifests.sh

3
.gitignore vendored
View File

@ -79,3 +79,6 @@ tags
### GoLand ###
.idea
bin/*
## asdf
.tool-versions

11
.travis.yml Normal file
View File

@ -0,0 +1,11 @@
sudo: false
language: go
go:
- 1.16.x
before_install:
- export PATH=/home/travis/gopath/bin:$PATH
install:
- export CGO_ENABLED="0"
- go env

View File

@ -1,5 +1,5 @@
# Build the manager binary
FROM golang:1.13 as builder
FROM golang:1.16 as builder
WORKDIR /workspace
# Copy the Go Modules manifests

201
LICENSE Normal file
View File

@ -0,0 +1,201 @@
Apache License
Version 2.0, January 2004
http://www.apache.org/licenses/
TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION
1. Definitions.
"License" shall mean the terms and conditions for use, reproduction,
and distribution as defined by Sections 1 through 9 of this document.
"Licensor" shall mean the copyright owner or entity authorized by
the copyright owner that is granting the License.
"Legal Entity" shall mean the union of the acting entity and all
other entities that control, are controlled by, or are under common
control with that entity. For the purposes of this definition,
"control" means (i) the power, direct or indirect, to cause the
direction or management of such entity, whether by contract or
otherwise, or (ii) ownership of fifty percent (50%) or more of the
outstanding shares, or (iii) beneficial ownership of such entity.
"You" (or "Your") shall mean an individual or Legal Entity
exercising permissions granted by this License.
"Source" form shall mean the preferred form for making modifications,
including but not limited to software source code, documentation
source, and configuration files.
"Object" form shall mean any form resulting from mechanical
transformation or translation of a Source form, including but
not limited to compiled object code, generated documentation,
and conversions to other media types.
"Work" shall mean the work of authorship, whether in Source or
Object form, made available under the License, as indicated by a
copyright notice that is included in or attached to the work
(an example is provided in the Appendix below).
"Derivative Works" shall mean any work, whether in Source or Object
form, that is based on (or derived from) the Work and for which the
editorial revisions, annotations, elaborations, or other modifications
represent, as a whole, an original work of authorship. For the purposes
of this License, Derivative Works shall not include works that remain
separable from, or merely link (or bind by name) to the interfaces of,
the Work and Derivative Works thereof.
"Contribution" shall mean any work of authorship, including
the original version of the Work and any modifications or additions
to that Work or Derivative Works thereof, that is intentionally
submitted to Licensor for inclusion in the Work by the copyright owner
or by an individual or Legal Entity authorized to submit on behalf of
the copyright owner. For the purposes of this definition, "submitted"
means any form of electronic, verbal, or written communication sent
to the Licensor or its representatives, including but not limited to
communication on electronic mailing lists, source code control systems,
and issue tracking systems that are managed by, or on behalf of, the
Licensor for the purpose of discussing and improving the Work, but
excluding communication that is conspicuously marked or otherwise
designated in writing by the copyright owner as "Not a Contribution."
"Contributor" shall mean Licensor and any individual or Legal Entity
on behalf of whom a Contribution has been received by Licensor and
subsequently incorporated within the Work.
2. Grant of Copyright License. Subject to the terms and conditions of
this License, each Contributor hereby grants to You a perpetual,
worldwide, non-exclusive, no-charge, royalty-free, irrevocable
copyright license to reproduce, prepare Derivative Works of,
publicly display, publicly perform, sublicense, and distribute the
Work and such Derivative Works in Source or Object form.
3. Grant of Patent License. Subject to the terms and conditions of
this License, each Contributor hereby grants to You a perpetual,
worldwide, non-exclusive, no-charge, royalty-free, irrevocable
(except as stated in this section) patent license to make, have made,
use, offer to sell, sell, import, and otherwise transfer the Work,
where such license applies only to those patent claims licensable
by such Contributor that are necessarily infringed by their
Contribution(s) alone or by combination of their Contribution(s)
with the Work to which such Contribution(s) was submitted. If You
institute patent litigation against any entity (including a
cross-claim or counterclaim in a lawsuit) alleging that the Work
or a Contribution incorporated within the Work constitutes direct
or contributory patent infringement, then any patent licenses
granted to You under this License for that Work shall terminate
as of the date such litigation is filed.
4. Redistribution. You may reproduce and distribute copies of the
Work or Derivative Works thereof in any medium, with or without
modifications, and in Source or Object form, provided that You
meet the following conditions:
(a) You must give any other recipients of the Work or
Derivative Works a copy of this License; and
(b) You must cause any modified files to carry prominent notices
stating that You changed the files; and
(c) You must retain, in the Source form of any Derivative Works
that You distribute, all copyright, patent, trademark, and
attribution notices from the Source form of the Work,
excluding those notices that do not pertain to any part of
the Derivative Works; and
(d) If the Work includes a "NOTICE" text file as part of its
distribution, then any Derivative Works that You distribute must
include a readable copy of the attribution notices contained
within such NOTICE file, excluding those notices that do not
pertain to any part of the Derivative Works, in at least one
of the following places: within a NOTICE text file distributed
as part of the Derivative Works; within the Source form or
documentation, if provided along with the Derivative Works; or,
within a display generated by the Derivative Works, if and
wherever such third-party notices normally appear. The contents
of the NOTICE file are for informational purposes only and
do not modify the License. You may add Your own attribution
notices within Derivative Works that You distribute, alongside
or as an addendum to the NOTICE text from the Work, provided
that such additional attribution notices cannot be construed
as modifying the License.
You may add Your own copyright statement to Your modifications and
may provide additional or different license terms and conditions
for use, reproduction, or distribution of Your modifications, or
for any such Derivative Works as a whole, provided Your use,
reproduction, and distribution of the Work otherwise complies with
the conditions stated in this License.
5. Submission of Contributions. Unless You explicitly state otherwise,
any Contribution intentionally submitted for inclusion in the Work
by You to the Licensor shall be under the terms and conditions of
this License, without any additional terms or conditions.
Notwithstanding the above, nothing herein shall supersede or modify
the terms of any separate license agreement you may have executed
with Licensor regarding such Contributions.
6. Trademarks. This License does not grant permission to use the trade
names, trademarks, service marks, or product names of the Licensor,
except as required for reasonable and customary use in describing the
origin of the Work and reproducing the content of the NOTICE file.
7. Disclaimer of Warranty. Unless required by applicable law or
agreed to in writing, Licensor provides the Work (and each
Contributor provides its Contributions) on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
implied, including, without limitation, any warranties or conditions
of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A
PARTICULAR PURPOSE. You are solely responsible for determining the
appropriateness of using or redistributing the Work and assume any
risks associated with Your exercise of permissions under this License.
8. Limitation of Liability. In no event and under no legal theory,
whether in tort (including negligence), contract, or otherwise,
unless required by applicable law (such as deliberate and grossly
negligent acts) or agreed to in writing, shall any Contributor be
liable to You for damages, including any direct, indirect, special,
incidental, or consequential damages of any character arising as a
result of this License or out of the use or inability to use the
Work (including but not limited to damages for loss of goodwill,
work stoppage, computer failure or malfunction, or any and all
other commercial damages or losses), even if such Contributor
has been advised of the possibility of such damages.
9. Accepting Warranty or Additional Liability. While redistributing
the Work or Derivative Works thereof, You may choose to offer,
and charge a fee for, acceptance of support, warranty, indemnity,
or other liability obligations and/or rights consistent with this
License. However, in accepting such obligations, You may act only
on Your own behalf and on Your sole responsibility, not on behalf
of any other Contributor, and only if You agree to indemnify,
defend, and hold each Contributor harmless for any liability
incurred by, or claims asserted against, such Contributor by reason
of your accepting any such warranty or additional liability.
END OF TERMS AND CONDITIONS
APPENDIX: How to apply the Apache License to your work.
To apply the Apache License to your work, attach the following
boilerplate notice, with the fields enclosed by brackets "{}"
replaced with your own identifying information. (Don't include
the brackets!) The text should be enclosed in the appropriate
comment syntax for the file format. We also recommend that a
file or class name and description of purpose be included on the
same "printed page" as the copyright notice for easier
identification within third-party archives.
Copyright 2016 Chris Lu
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.

View File

@ -1,5 +1,5 @@
# Current Operator version
VERSION ?= 0.0.1
VERSION ?= v0.0.2
# Default bundle image tag
BUNDLE_IMG ?= controller-bundle:$(VERSION)
# Options for 'bundle-build'
@ -12,9 +12,9 @@ endif
BUNDLE_METADATA_OPTS ?= $(BUNDLE_CHANNELS) $(BUNDLE_DEFAULT_CHANNEL)
# Image URL to use all building/pushing image targets
IMG ?= seaweedfs/operator:latest
IMG ?= gfxlabs/seaweedfs-operator:$(VERSION)
# Produce CRDs that work back to Kubernetes 1.11 (no version conversion)
CRD_OPTIONS ?= "crd:trivialVersions=true"
CRD_OPTIONS ?= "crd:trivialVersions=true,preserveUnknownFields=false"
# Get the currently used golang install path (in GOPATH/bin, unless GOBIN is set)
ifeq (,$(shell go env GOBIN))
@ -40,6 +40,10 @@ manager: generate fmt vet
run: generate fmt vet manifests
go run ./main.go
debug: generate fmt vet manifests
go build -gcflags="all=-N -l" ./main.go
ENABLE_WEBHOOKS=false dlv --listen=:2345 --headless=true --api-version=2 --accept-multiclient exec main
# Install CRDs into a cluster
install: manifests kustomize
$(KUSTOMIZE) build config/crd | kubectl apply -f -
@ -53,6 +57,10 @@ deploy: manifests kustomize
cd config/manager && $(KUSTOMIZE) edit set image controller=${IMG}
$(KUSTOMIZE) build config/default | kubectl apply -f -
# clean up crd & controller in the configured Kubernetes cluster in ~/.kube/config
delete: manifests kustomize
$(KUSTOMIZE) build config/default | kubectl delete -f -
# Generate manifests e.g. CRD, RBAC etc.
manifests: controller-gen
$(CONTROLLER_GEN) $(CRD_OPTIONS) rbac:roleName=manager-role webhook paths="./..." output:crd:artifacts:config=config/crd/bases
@ -70,7 +78,8 @@ generate: controller-gen
$(CONTROLLER_GEN) object:headerFile="hack/boilerplate.go.txt" paths="./..."
# Build the docker image
docker-build: test
docker-build: # test
echo ${IMG}
docker build . -t ${IMG}
# Push the docker image

View File

@ -1,9 +1,10 @@
domain: seaweedfs.com
layout: go.kubebuilder.io/v2
repo: github.com/seaweedfs/seaweedfs-operator
projectName: seaweedfs-operator
resources:
- group: seaweed
kind: Master
kind: Seaweed
version: v1
version: 3-alpha
plugins:

146
README.md
View File

@ -1,7 +1,110 @@
[![Build Status](https://travis-ci.com/seaweedfs/seaweedfs-operator.svg?branch=master)](https://travis-ci.com/github/seaweedfs/seaweedfs-operator)
# SeaweedFS Operator
This [Kubernetes Operator](https://kubernetes.io/docs/concepts/extend-kubernetes/operator/) is made to easily deploy SeaweedFS onto your Kubernetes-Cluster.
The difference to [seaweedfs-csi-driver](https://github.com/seaweedfs/seaweedfs-csi-driver) is that the infrastructure (SeaweedFS) itself runs on Kubernetes as well (Master, Filer, Volume-Servers) and can as such easily scale with it as you need. It is also by far more resilent to failures then a simple systemD service in regards to handling crashing services or accidental deletes.
By using `make deploy` it will deploy a Resource of type 'Seaweed' onto your current kubectl $KUBECONFIG target (the operator itself) which by default will do nothing unless you configurate it (see examples in config/samples/).
Goals:
- [x] Automatically deploy and manage a SeaweedFS cluster.
- [x] Ability to be managed by other Operators.
- [ ] Compability with [seaweedfs-csi-driver](https://github.com/seaweedfs/seaweedfs-csi-driver)
- [x] Auto rolling upgrade and restart.
- [x] Ingress for volume server, filer and S3, to support HDFS, REST filer, S3 API and cross-cluster replication.
- [ ] Support all major cloud Kubernetes: AWS, Google, Azure.
- [ ] Scheduled backup to cloud storage: S3, Google Cloud Storage , Azure.
- [ ] Put warm data to cloud storage tier: S3, Google Cloud Storage , Azure.
- [ ] Grafana dashboard.
## Installation
This operator uses `kustomize` to deploy. The installation process will install one for you if you do not have one.
By default, the defaulting and validation webhooks are disabled. We strongly recommend that the webhooks be enabled.
First clone the repository:
```bash
$ git clone https://github.com/seaweedfs/seaweedfs-operator --depth=1
```
To deploy the operator with webhooks enabled, make sure you have installed the `cert-manager`(Installation docs: https://cert-manager.io/docs/installation/) in your cluster, then follow the instructions in the `config/default/kustomization.yaml` file to uncomment the components you need.
Lastly, change the value of `ENABLE_WEBHOOKS` to `"true"` in `config/manager/manager.yaml`
Afterwards fire up:
```bash
$ make install
```
Then run the command to deploy the operator into your cluster:
```bash
$ make deploy
```
Verify if it was correctly deployed with:
```bash
$ kubectl get pods --all-namespaces
```
Which may return:
```bash
NAMESPACE NAME READY STATUS RESTARTS AGE
kube-system coredns-f9fd979d6-68p4c 1/1 Running 0 34m
kube-system coredns-f9fd979d6-x992t 1/1 Running 0 34m
kube-system etcd-kind-control-plane 1/1 Running 0 34m
kube-system kindnet-rp7wr 1/1 Running 0 34m
kube-system kube-apiserver-kind-control-plane 1/1 Running 0 34m
kube-system kube-controller-manager-kind-control-plane 1/1 Running 0 34m
kube-system kube-proxy-dqfg2 1/1 Running 0 34m
kube-system kube-scheduler-kind-control-plane 1/1 Running 0 34m
local-path-storage local-path-provisioner-78776bfc44-7zvxx 1/1 Running 0 34m
seaweedfs-operator-system seaweedfs-operator-controller-manager-54cc768f4c-cwz2k 2/2 Running 0 34m
```
See the next section for example usage - **__at this point you only deployed the Operator itself!__**
### You need to also deploy an configuration to get it running (see next section)!
## Configuration Examples
- Please send us your use-cases / example configs ... this is currently empty (needs to be written)
- For now see: https://github.com/seaweedfs/seaweedfs-operator/blob/master/config/samples/seaweed_v1_seaweed.yaml
````
apiVersion: seaweed.seaweedfs.com/v1
kind: Seaweed
metadata:
name: seaweed1
namespace: default
spec:
# Add fields here
image: chrislusf/seaweedfs:2.96
volumeServerDiskCount: 1
hostSuffix: seaweed.abcdefg.com
master:
replicas: 3
volumeSizeLimitMB: 1024
volume:
replicas: 1
requests:
storage: 2Gi
filer:
replicas: 2
config: |
[leveldb2]
enabled = true
dir = "/data/filerldb2"
````
## Maintenance and Uninstallation
- TBD
## Development
Follow the instructions in https://sdk.operatorframework.io/docs/building-operators/golang/quickstart/
@ -10,6 +113,42 @@ Follow the instructions in https://sdk.operatorframework.io/docs/building-operat
$ git clone https://github.com/seaweedfs/seaweedfs-operator
$ cd seaweedfs-operator
# register the CRD with the Kubernetes
$ make deploy
# build the operator image
$ make docker-build
# load the image into Kind cluster
$ kind load docker-image chrislusf/seaweedfs-operator:v0.0.1
# From another terminal in the same directory
$ kubectl apply -f config/samples/seaweed_v1_seaweed.yaml
```
### Update the operator
```
# delete the existing operator
$ kubectl delete namespace seaweedfs-operator-system
# rebuild the operator image
$ make docker-build
# load the image into Kind cluster
$ kind load docker-image chrislusf/seaweedfs-operator:v0.0.1
# register the CRD with the Kubernetes
$ make deploy
```
### develop outside of k8s
```
$ git clone https://github.com/seaweedfs/seaweedfs-operator
$ cd seaweedfs-operator
# register the CRD with the Kubernetes
$ make install
@ -18,11 +157,4 @@ $ make run ENABLE_WEBHOOKS=false
# From another terminal in the same directory
$ kubectl apply -f config/samples/seaweed_v1_seaweed.yaml
```
## Create API and Controller
Here are the commands used to create customer resource definition (CRD)
```
operator-sdk create api --group seaweed --version v1 --kind Master --resource=true --controller=true
```

View File

@ -0,0 +1,206 @@
package v1
import (
appsv1 "k8s.io/api/apps/v1"
corev1 "k8s.io/api/core/v1"
)
// ComponentAccessor is the interface to access component details, which respects the cluster-level properties
// and component-level overrides
// +kubebuilder:object:root=false
// +kubebuilder:object:generate=false
type ComponentAccessor interface {
ImagePullPolicy() corev1.PullPolicy
ImagePullSecrets() []corev1.LocalObjectReference
HostNetwork() bool
Affinity() *corev1.Affinity
PriorityClassName() *string
NodeSelector() map[string]string
Annotations() map[string]string
Tolerations() []corev1.Toleration
SchedulerName() string
DNSPolicy() corev1.DNSPolicy
BuildPodSpec() corev1.PodSpec
Env() []corev1.EnvVar
TerminationGracePeriodSeconds() *int64
StatefulSetUpdateStrategy() appsv1.StatefulSetUpdateStrategyType
}
type componentAccessorImpl struct {
imagePullPolicy corev1.PullPolicy
imagePullSecrets []corev1.LocalObjectReference
hostNetwork *bool
affinity *corev1.Affinity
priorityClassName *string
schedulerName string
clusterNodeSelector map[string]string
clusterAnnotations map[string]string
tolerations []corev1.Toleration
statefulSetUpdateStrategy appsv1.StatefulSetUpdateStrategyType
// ComponentSpec is the Component Spec
ComponentSpec *ComponentSpec
}
func (a *componentAccessorImpl) StatefulSetUpdateStrategy() appsv1.StatefulSetUpdateStrategyType {
strategy := a.ComponentSpec.StatefulSetUpdateStrategy
if len(strategy) != 0 {
return strategy
}
strategy = a.statefulSetUpdateStrategy
if len(strategy) != 0 {
return strategy
}
return appsv1.RollingUpdateStatefulSetStrategyType
}
func (a *componentAccessorImpl) ImagePullPolicy() corev1.PullPolicy {
pp := a.ComponentSpec.ImagePullPolicy
if pp == nil {
return a.imagePullPolicy
}
return *pp
}
func (a *componentAccessorImpl) ImagePullSecrets() []corev1.LocalObjectReference {
ips := a.ComponentSpec.ImagePullSecrets
if ips == nil {
return a.imagePullSecrets
}
return ips
}
func (a *componentAccessorImpl) HostNetwork() bool {
hostNetwork := a.ComponentSpec.HostNetwork
if hostNetwork == nil {
hostNetwork = a.hostNetwork
}
if hostNetwork == nil {
return false
}
return *hostNetwork
}
func (a *componentAccessorImpl) Affinity() *corev1.Affinity {
affi := a.ComponentSpec.Affinity
if affi == nil {
affi = a.affinity
}
return affi
}
func (a *componentAccessorImpl) PriorityClassName() *string {
pcn := a.ComponentSpec.PriorityClassName
if pcn == nil {
pcn = a.priorityClassName
}
return pcn
}
func (a *componentAccessorImpl) SchedulerName() string {
pcn := a.ComponentSpec.SchedulerName
if pcn == nil {
pcn = &a.schedulerName
}
return *pcn
}
func (a *componentAccessorImpl) NodeSelector() map[string]string {
sel := map[string]string{}
for k, v := range a.clusterNodeSelector {
sel[k] = v
}
for k, v := range a.ComponentSpec.NodeSelector {
sel[k] = v
}
return sel
}
func (a *componentAccessorImpl) Annotations() map[string]string {
anno := map[string]string{}
for k, v := range a.clusterAnnotations {
anno[k] = v
}
for k, v := range a.ComponentSpec.Annotations {
anno[k] = v
}
return anno
}
func (a *componentAccessorImpl) Tolerations() []corev1.Toleration {
tols := a.ComponentSpec.Tolerations
if len(tols) == 0 {
tols = a.tolerations
}
return tols
}
func (a *componentAccessorImpl) DNSPolicy() corev1.DNSPolicy {
dnsPolicy := corev1.DNSClusterFirst // same as kubernetes default
if a.HostNetwork() {
dnsPolicy = corev1.DNSClusterFirstWithHostNet
}
return dnsPolicy
}
func (a *componentAccessorImpl) BuildPodSpec() corev1.PodSpec {
spec := corev1.PodSpec{
SchedulerName: a.SchedulerName(),
Affinity: a.Affinity(),
NodeSelector: a.NodeSelector(),
HostNetwork: a.HostNetwork(),
RestartPolicy: corev1.RestartPolicyAlways,
Tolerations: a.Tolerations(),
}
if a.PriorityClassName() != nil {
spec.PriorityClassName = *a.PriorityClassName()
}
if a.ImagePullSecrets() != nil {
spec.ImagePullSecrets = a.ImagePullSecrets()
}
if a.TerminationGracePeriodSeconds() != nil {
spec.TerminationGracePeriodSeconds = a.TerminationGracePeriodSeconds()
}
return spec
}
func (a *componentAccessorImpl) Env() []corev1.EnvVar {
return a.ComponentSpec.Env
}
func (a *componentAccessorImpl) TerminationGracePeriodSeconds() *int64 {
return a.ComponentSpec.TerminationGracePeriodSeconds
}
func buildSeaweedComponentAccessor(spec *SeaweedSpec, componentSpec *ComponentSpec) ComponentAccessor {
return &componentAccessorImpl{
imagePullPolicy: spec.ImagePullPolicy,
imagePullSecrets: spec.ImagePullSecrets,
hostNetwork: spec.HostNetwork,
affinity: spec.Affinity,
schedulerName: spec.SchedulerName,
clusterNodeSelector: spec.NodeSelector,
clusterAnnotations: spec.Annotations,
tolerations: spec.Tolerations,
statefulSetUpdateStrategy: spec.StatefulSetUpdateStrategy,
ComponentSpec: componentSpec,
}
}
// BaseMasterSpec provides merged spec of masters
func (s *Seaweed) BaseMasterSpec() ComponentAccessor {
return buildSeaweedComponentAccessor(&s.Spec, &s.Spec.Master.ComponentSpec)
}
// BaseFilerSpec provides merged spec of filers
func (s *Seaweed) BaseFilerSpec() ComponentAccessor {
return buildSeaweedComponentAccessor(&s.Spec, &s.Spec.Filer.ComponentSpec)
}
// BaseVolumeSpec provides merged spec of volumes
func (s *Seaweed) BaseVolumeSpec() ComponentAccessor {
return buildSeaweedComponentAccessor(&s.Spec, &s.Spec.Volume.ComponentSpec)
}

View File

@ -1,65 +0,0 @@
/*
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package v1
import (
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
)
// EDIT THIS FILE! THIS IS SCAFFOLDING FOR YOU TO OWN!
// NOTE: json tags are required. Any new fields you add must have json tags for the fields to be serialized.
// MasterSpec defines the desired state of Master
type MasterSpec struct {
// INSERT ADDITIONAL SPEC FIELDS - desired state of cluster
// Important: Run "make" to regenerate code after modifying this file
// +kubebuilder:validation:Minimum=0
// Size is the size of the master deployment
Size int32 `json:"size"`
}
// MasterStatus defines the observed state of Master
type MasterStatus struct {
// Nodes are the names of the master pods
Nodes []string `json:"nodes"`
}
// +kubebuilder:object:root=true
// +kubebuilder:subresource:status
// Master is the Schema for the masters API
type Master struct {
metav1.TypeMeta `json:",inline"`
metav1.ObjectMeta `json:"metadata,omitempty"`
Spec MasterSpec `json:"spec,omitempty"`
Status MasterStatus `json:"status,omitempty"`
}
// +kubebuilder:object:root=true
// MasterList contains a list of Master
type MasterList struct {
metav1.TypeMeta `json:",inline"`
metav1.ListMeta `json:"metadata,omitempty"`
Items []Master `json:"items"`
}
func init() {
SchemeBuilder.Register(&Master{}, &MasterList{})
}

269
api/v1/seaweed_types.go Normal file
View File

@ -0,0 +1,269 @@
/*
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package v1
import (
appsv1 "k8s.io/api/apps/v1"
corev1 "k8s.io/api/core/v1"
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
)
// EDIT THIS FILE! THIS IS SCAFFOLDING FOR YOU TO OWN!
// NOTE: json tags are required. Any new fields you add must have json tags for the fields to be serialized.
// Constants
const (
GRPCPortDelta = 10000
MasterHTTPPort = 9333
VolumeHTTPPort = 8444
FilerHTTPPort = 8888
FilerS3Port = 8333
MasterGRPCPort = MasterHTTPPort + GRPCPortDelta
VolumeGRPCPort = VolumeHTTPPort + GRPCPortDelta
FilerGRPCPort = FilerHTTPPort + GRPCPortDelta
)
// SeaweedSpec defines the desired state of Seaweed
type SeaweedSpec struct {
// INSERT ADDITIONAL SPEC FIELDS - desired state of cluster
// Important: Run "make" to regenerate code after modifying this file
// MetricsAddress is Prometheus gateway address
MetricsAddress string `json:"metricsAddress,omitempty"`
// Image
Image string `json:"image,omitempty"`
// Version
Version string `json:"version,omitempty"`
// Master
Master *MasterSpec `json:"master,omitempty"`
// Volume
Volume *VolumeSpec `json:"volume,omitempty"`
// Filer
Filer *FilerSpec `json:"filer,omitempty"`
// SchedulerName of pods
SchedulerName string `json:"schedulerName,omitempty"`
// Persistent volume reclaim policy
PVReclaimPolicy *corev1.PersistentVolumeReclaimPolicy `json:"pvReclaimPolicy,omitempty"`
// ImagePullPolicy of pods
ImagePullPolicy corev1.PullPolicy `json:"imagePullPolicy,omitempty"`
// ImagePullSecrets is an optional list of references to secrets in the same namespace to use for pulling any of the images.
ImagePullSecrets []corev1.LocalObjectReference `json:"imagePullSecrets,omitempty"`
// Whether enable PVC reclaim for orphan PVC left by statefulset scale-in
EnablePVReclaim *bool `json:"enablePVReclaim,omitempty"`
// Whether Hostnetwork is enabled for pods
HostNetwork *bool `json:"hostNetwork,omitempty"`
// Affinity of pods
Affinity *corev1.Affinity `json:"affinity,omitempty"`
// Base node selectors of Pods, components may add or override selectors upon this respectively
NodeSelector map[string]string `json:"nodeSelector,omitempty"`
// Base annotations of Pods, components may add or override selectors upon this respectively
Annotations map[string]string `json:"annotations,omitempty"`
// Base tolerations of Pods, components may add more tolerations upon this respectively
Tolerations []corev1.Toleration `json:"tolerations,omitempty"`
// StatefulSetUpdateStrategy indicates the StatefulSetUpdateStrategy that will be
// employed to update Pods in the StatefulSet when a revision is made to
// Template.
StatefulSetUpdateStrategy appsv1.StatefulSetUpdateStrategyType `json:"statefulSetUpdateStrategy,omitempty"`
VolumeServerDiskCount int32 `json:"volumeServerDiskCount,omitempty"`
// Ingresses
HostSuffix *string `json:"hostSuffix,omitempty"`
}
// SeaweedStatus defines the observed state of Seaweed
type SeaweedStatus struct {
// INSERT ADDITIONAL STATUS FIELD - define observed state of cluster
// Important: Run "make" to regenerate code after modifying this file
}
// MasterSpec is the spec for masters
type MasterSpec struct {
ComponentSpec `json:",inline"`
corev1.ResourceRequirements `json:",inline"`
// The desired ready replicas
// +kubebuilder:validation:Minimum=1
Replicas int32 `json:"replicas"`
Service *ServiceSpec `json:"service,omitempty"`
// Config in raw toml string
Config *string `json:"config,omitempty"`
// Master-specific settings
VolumePreallocate *bool `json:"volumePreallocate,omitempty"`
VolumeSizeLimitMB *int32 `json:"volumeSizeLimitMB,omitempty"`
GarbageThreshold *string `json:"garbageThreshold,omitempty"`
PulseSeconds *int32 `json:"pulseSeconds,omitempty"`
DefaultReplication *string `json:"defaultReplication,omitempty"`
// only for testing
ConcurrentStart *bool `json:"concurrentStart,omitempty"`
}
// VolumeSpec is the spec for volume servers
type VolumeSpec struct {
ComponentSpec `json:",inline"`
corev1.ResourceRequirements `json:",inline"`
// The desired ready replicas
// +kubebuilder:validation:Minimum=1
Replicas int32 `json:"replicas"`
Service *ServiceSpec `json:"service,omitempty"`
StorageClassName *string `json:"storageClassName,omitempty"`
// Volume-specific settings
CompactionMBps *int32 `json:"compactionMBps,omitempty"`
FileSizeLimitMB *int32 `json:"fileSizeLimitMB,omitempty"`
FixJpgOrientation *bool `json:"fixJpgOrientation,omitempty"`
IdleTimeout *int32 `json:"idleTimeout,omitempty"`
MaxVolumeCounts *int32 `json:"maxVolumeCounts,omitempty"`
MinFreeSpacePercent *int32 `json:"minFreeSpacePercent,omitempty"`
}
// FilerSpec is the spec for filers
type FilerSpec struct {
ComponentSpec `json:",inline"`
corev1.ResourceRequirements `json:",inline"`
// The desired ready replicas
// +kubebuilder:validation:Minimum=1
Replicas int32 `json:"replicas"`
Service *ServiceSpec `json:"service,omitempty"`
// Config in raw toml string
Config *string `json:"config,omitempty"`
// Filer-specific settings
MaxMB *int32 `json:"maxMB,omitempty"`
}
// ComponentSpec is the base spec of each component, the fields should always accessed by the Basic<Component>Spec() method to respect the cluster-level properties
type ComponentSpec struct {
// Version of the component. Override the cluster-level version if non-empty
Version *string `json:"version,omitempty"`
// ImagePullPolicy of the component. Override the cluster-level imagePullPolicy if present
ImagePullPolicy *corev1.PullPolicy `json:"imagePullPolicy,omitempty"`
// ImagePullSecrets is an optional list of references to secrets in the same namespace to use for pulling any of the images.
ImagePullSecrets []corev1.LocalObjectReference `json:"imagePullSecrets,omitempty"`
// Whether Hostnetwork of the component is enabled. Override the cluster-level setting if present
HostNetwork *bool `json:"hostNetwork,omitempty"`
// Affinity of the component. Override the cluster-level one if present
Affinity *corev1.Affinity `json:"affinity,omitempty"`
// PriorityClassName of the component. Override the cluster-level one if present
PriorityClassName *string `json:"priorityClassName,omitempty"`
// SchedulerName of the component. Override the cluster-level one if present
SchedulerName *string `json:"schedulerName,omitempty"`
// NodeSelector of the component. Merged into the cluster-level nodeSelector if non-empty
NodeSelector map[string]string `json:"nodeSelector,omitempty"`
// Annotations of the component. Merged into the cluster-level annotations if non-empty
Annotations map[string]string `json:"annotations,omitempty"`
// Tolerations of the component. Override the cluster-level tolerations if non-empty
Tolerations []corev1.Toleration `json:"tolerations,omitempty"`
// List of environment variables to set in the container, like
// v1.Container.Env.
// Note that following env names cannot be used and may be overrided by operators
// - NAMESPACE
// - POD_IP
// - POD_NAME
Env []corev1.EnvVar `json:"env,omitempty"`
// Optional duration in seconds the pod needs to terminate gracefully. May be decreased in delete request.
// Value must be non-negative integer. The value zero indicates delete immediately.
// If this value is nil, the default grace period will be used instead.
// The grace period is the duration in seconds after the processes running in the pod are sent
// a termination signal and the time when the processes are forcibly halted with a kill signal.
// Set this value longer than the expected cleanup time for your process.
// Defaults to 30 seconds.
TerminationGracePeriodSeconds *int64 `json:"terminationGracePeriodSeconds,omitempty"`
// StatefulSetUpdateStrategy indicates the StatefulSetUpdateStrategy that will be
// employed to update Pods in the StatefulSet when a revision is made to
// Template.
StatefulSetUpdateStrategy appsv1.StatefulSetUpdateStrategyType `json:"statefulSetUpdateStrategy,omitempty"`
}
// ServiceSpec is a subset of the original k8s spec
type ServiceSpec struct {
// Type of the real kubernetes service
Type corev1.ServiceType `json:"type,omitempty"`
// Additional annotations of the kubernetes service object
Annotations map[string]string `json:"annotations,omitempty"`
// LoadBalancerIP is the loadBalancerIP of service
LoadBalancerIP *string `json:"loadBalancerIP,omitempty"`
// ClusterIP is the clusterIP of service
ClusterIP *string `json:"clusterIP,omitempty"`
}
// +kubebuilder:object:root=true
// +kubebuilder:subresource:status
// Seaweed is the Schema for the seaweeds API
type Seaweed struct {
metav1.TypeMeta `json:",inline"`
metav1.ObjectMeta `json:"metadata,omitempty"`
Spec SeaweedSpec `json:"spec,omitempty"`
Status SeaweedStatus `json:"status,omitempty"`
}
// +kubebuilder:object:root=true
// SeaweedList contains a list of Seaweed
type SeaweedList struct {
metav1.TypeMeta `json:",inline"`
metav1.ListMeta `json:"metadata,omitempty"`
Items []Seaweed `json:"items"`
}
func init() {
SchemeBuilder.Register(&Seaweed{}, &SeaweedList{})
}

93
api/v1/seaweed_webhook.go Normal file
View File

@ -0,0 +1,93 @@
/*
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package v1
import (
"errors"
corev1 "k8s.io/api/core/v1"
"k8s.io/apimachinery/pkg/api/resource"
"k8s.io/apimachinery/pkg/runtime"
utilerrors "k8s.io/apimachinery/pkg/util/errors"
ctrl "sigs.k8s.io/controller-runtime"
logf "sigs.k8s.io/controller-runtime/pkg/log"
"sigs.k8s.io/controller-runtime/pkg/webhook"
)
// log is for logging in this package.
var seaweedlog = logf.Log.WithName("seaweed-resource")
func (r *Seaweed) SetupWebhookWithManager(mgr ctrl.Manager) error {
return ctrl.NewWebhookManagedBy(mgr).
For(r).
Complete()
}
// EDIT THIS FILE! THIS IS SCAFFOLDING FOR YOU TO OWN!
// +kubebuilder:webhook:path=/mutate-seaweed-seaweedfs-com-v1-seaweed,mutating=true,failurePolicy=fail,groups=seaweed.seaweedfs.com,resources=seaweeds,verbs=create;update,versions=v1,name=mseaweed.kb.io
var _ webhook.Defaulter = &Seaweed{}
// Default implements webhook.Defaulter so a webhook will be registered for the type
func (r *Seaweed) Default() {
seaweedlog.Info("default", "name", r.Name)
// TODO(user): fill in your defaulting logic.
}
// TODO(user): change verbs to "verbs=create;update;delete" if you want to enable deletion validation.
// +kubebuilder:webhook:verbs=create;update,path=/validate-seaweed-seaweedfs-com-v1-seaweed,mutating=false,failurePolicy=fail,groups=seaweed.seaweedfs.com,resources=seaweeds,versions=v1,name=vseaweed.kb.io
var _ webhook.Validator = &Seaweed{}
// ValidateCreate implements webhook.Validator so a webhook will be registered for the type
func (r *Seaweed) ValidateCreate() error {
seaweedlog.Info("validate create", "name", r.Name)
errs := []error{}
// TODO(user): fill in your validation logic upon object creation.
if r.Spec.Master == nil {
errs = append(errs, errors.New("missing master spec"))
}
if r.Spec.Volume == nil {
errs = append(errs, errors.New("missing volume spec"))
} else {
if r.Spec.Volume.Requests[corev1.ResourceStorage].Equal(resource.MustParse("0")) {
errs = append(errs, errors.New("volume storage request cannot be zero"))
}
}
return utilerrors.NewAggregate(errs)
}
// ValidateUpdate implements webhook.Validator so a webhook will be registered for the type
func (r *Seaweed) ValidateUpdate(old runtime.Object) error {
seaweedlog.Info("validate update", "name", r.Name)
// TODO(user): fill in your validation logic upon object update.
return nil
}
// ValidateDelete implements webhook.Validator so a webhook will be registered for the type
func (r *Seaweed) ValidateDelete() error {
seaweedlog.Info("validate delete", "name", r.Name)
// TODO(user): fill in your validation logic upon object deletion.
return nil
}

View File

@ -1,3 +1,4 @@
//go:build !ignore_autogenerated
// +build !ignore_autogenerated
/*
@ -21,71 +22,170 @@ limitations under the License.
package v1
import (
runtime "k8s.io/apimachinery/pkg/runtime"
corev1 "k8s.io/api/core/v1"
"k8s.io/apimachinery/pkg/runtime"
)
// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
func (in *Master) DeepCopyInto(out *Master) {
func (in *ComponentSpec) DeepCopyInto(out *ComponentSpec) {
*out = *in
out.TypeMeta = in.TypeMeta
in.ObjectMeta.DeepCopyInto(&out.ObjectMeta)
out.Spec = in.Spec
in.Status.DeepCopyInto(&out.Status)
}
// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new Master.
func (in *Master) DeepCopy() *Master {
if in == nil {
return nil
if in.Version != nil {
in, out := &in.Version, &out.Version
*out = new(string)
**out = **in
}
out := new(Master)
in.DeepCopyInto(out)
return out
}
// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object.
func (in *Master) DeepCopyObject() runtime.Object {
if c := in.DeepCopy(); c != nil {
return c
if in.ImagePullPolicy != nil {
in, out := &in.ImagePullPolicy, &out.ImagePullPolicy
*out = new(corev1.PullPolicy)
**out = **in
}
return nil
}
// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
func (in *MasterList) DeepCopyInto(out *MasterList) {
*out = *in
out.TypeMeta = in.TypeMeta
in.ListMeta.DeepCopyInto(&out.ListMeta)
if in.Items != nil {
in, out := &in.Items, &out.Items
*out = make([]Master, len(*in))
if in.ImagePullSecrets != nil {
in, out := &in.ImagePullSecrets, &out.ImagePullSecrets
*out = make([]corev1.LocalObjectReference, len(*in))
copy(*out, *in)
}
if in.HostNetwork != nil {
in, out := &in.HostNetwork, &out.HostNetwork
*out = new(bool)
**out = **in
}
if in.Affinity != nil {
in, out := &in.Affinity, &out.Affinity
*out = new(corev1.Affinity)
(*in).DeepCopyInto(*out)
}
if in.PriorityClassName != nil {
in, out := &in.PriorityClassName, &out.PriorityClassName
*out = new(string)
**out = **in
}
if in.SchedulerName != nil {
in, out := &in.SchedulerName, &out.SchedulerName
*out = new(string)
**out = **in
}
if in.NodeSelector != nil {
in, out := &in.NodeSelector, &out.NodeSelector
*out = make(map[string]string, len(*in))
for key, val := range *in {
(*out)[key] = val
}
}
if in.Annotations != nil {
in, out := &in.Annotations, &out.Annotations
*out = make(map[string]string, len(*in))
for key, val := range *in {
(*out)[key] = val
}
}
if in.Tolerations != nil {
in, out := &in.Tolerations, &out.Tolerations
*out = make([]corev1.Toleration, len(*in))
for i := range *in {
(*in)[i].DeepCopyInto(&(*out)[i])
}
}
if in.Env != nil {
in, out := &in.Env, &out.Env
*out = make([]corev1.EnvVar, len(*in))
for i := range *in {
(*in)[i].DeepCopyInto(&(*out)[i])
}
}
if in.TerminationGracePeriodSeconds != nil {
in, out := &in.TerminationGracePeriodSeconds, &out.TerminationGracePeriodSeconds
*out = new(int64)
**out = **in
}
}
// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new MasterList.
func (in *MasterList) DeepCopy() *MasterList {
// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ComponentSpec.
func (in *ComponentSpec) DeepCopy() *ComponentSpec {
if in == nil {
return nil
}
out := new(MasterList)
out := new(ComponentSpec)
in.DeepCopyInto(out)
return out
}
// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object.
func (in *MasterList) DeepCopyObject() runtime.Object {
if c := in.DeepCopy(); c != nil {
return c
// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
func (in *FilerSpec) DeepCopyInto(out *FilerSpec) {
*out = *in
in.ComponentSpec.DeepCopyInto(&out.ComponentSpec)
in.ResourceRequirements.DeepCopyInto(&out.ResourceRequirements)
if in.Service != nil {
in, out := &in.Service, &out.Service
*out = new(ServiceSpec)
(*in).DeepCopyInto(*out)
}
return nil
if in.Config != nil {
in, out := &in.Config, &out.Config
*out = new(string)
**out = **in
}
if in.MaxMB != nil {
in, out := &in.MaxMB, &out.MaxMB
*out = new(int32)
**out = **in
}
}
// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new FilerSpec.
func (in *FilerSpec) DeepCopy() *FilerSpec {
if in == nil {
return nil
}
out := new(FilerSpec)
in.DeepCopyInto(out)
return out
}
// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
func (in *MasterSpec) DeepCopyInto(out *MasterSpec) {
*out = *in
in.ComponentSpec.DeepCopyInto(&out.ComponentSpec)
in.ResourceRequirements.DeepCopyInto(&out.ResourceRequirements)
if in.Service != nil {
in, out := &in.Service, &out.Service
*out = new(ServiceSpec)
(*in).DeepCopyInto(*out)
}
if in.Config != nil {
in, out := &in.Config, &out.Config
*out = new(string)
**out = **in
}
if in.VolumePreallocate != nil {
in, out := &in.VolumePreallocate, &out.VolumePreallocate
*out = new(bool)
**out = **in
}
if in.VolumeSizeLimitMB != nil {
in, out := &in.VolumeSizeLimitMB, &out.VolumeSizeLimitMB
*out = new(int32)
**out = **in
}
if in.GarbageThreshold != nil {
in, out := &in.GarbageThreshold, &out.GarbageThreshold
*out = new(string)
**out = **in
}
if in.PulseSeconds != nil {
in, out := &in.PulseSeconds, &out.PulseSeconds
*out = new(int32)
**out = **in
}
if in.DefaultReplication != nil {
in, out := &in.DefaultReplication, &out.DefaultReplication
*out = new(string)
**out = **in
}
if in.ConcurrentStart != nil {
in, out := &in.ConcurrentStart, &out.ConcurrentStart
*out = new(bool)
**out = **in
}
}
// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new MasterSpec.
@ -99,21 +199,245 @@ func (in *MasterSpec) DeepCopy() *MasterSpec {
}
// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
func (in *MasterStatus) DeepCopyInto(out *MasterStatus) {
func (in *Seaweed) DeepCopyInto(out *Seaweed) {
*out = *in
if in.Nodes != nil {
in, out := &in.Nodes, &out.Nodes
*out = make([]string, len(*in))
copy(*out, *in)
}
out.TypeMeta = in.TypeMeta
in.ObjectMeta.DeepCopyInto(&out.ObjectMeta)
in.Spec.DeepCopyInto(&out.Spec)
out.Status = in.Status
}
// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new MasterStatus.
func (in *MasterStatus) DeepCopy() *MasterStatus {
// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new Seaweed.
func (in *Seaweed) DeepCopy() *Seaweed {
if in == nil {
return nil
}
out := new(MasterStatus)
out := new(Seaweed)
in.DeepCopyInto(out)
return out
}
// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object.
func (in *Seaweed) DeepCopyObject() runtime.Object {
if c := in.DeepCopy(); c != nil {
return c
}
return nil
}
// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
func (in *SeaweedList) DeepCopyInto(out *SeaweedList) {
*out = *in
out.TypeMeta = in.TypeMeta
in.ListMeta.DeepCopyInto(&out.ListMeta)
if in.Items != nil {
in, out := &in.Items, &out.Items
*out = make([]Seaweed, len(*in))
for i := range *in {
(*in)[i].DeepCopyInto(&(*out)[i])
}
}
}
// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new SeaweedList.
func (in *SeaweedList) DeepCopy() *SeaweedList {
if in == nil {
return nil
}
out := new(SeaweedList)
in.DeepCopyInto(out)
return out
}
// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object.
func (in *SeaweedList) DeepCopyObject() runtime.Object {
if c := in.DeepCopy(); c != nil {
return c
}
return nil
}
// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
func (in *SeaweedSpec) DeepCopyInto(out *SeaweedSpec) {
*out = *in
if in.Master != nil {
in, out := &in.Master, &out.Master
*out = new(MasterSpec)
(*in).DeepCopyInto(*out)
}
if in.Volume != nil {
in, out := &in.Volume, &out.Volume
*out = new(VolumeSpec)
(*in).DeepCopyInto(*out)
}
if in.Filer != nil {
in, out := &in.Filer, &out.Filer
*out = new(FilerSpec)
(*in).DeepCopyInto(*out)
}
if in.PVReclaimPolicy != nil {
in, out := &in.PVReclaimPolicy, &out.PVReclaimPolicy
*out = new(corev1.PersistentVolumeReclaimPolicy)
**out = **in
}
if in.ImagePullSecrets != nil {
in, out := &in.ImagePullSecrets, &out.ImagePullSecrets
*out = make([]corev1.LocalObjectReference, len(*in))
copy(*out, *in)
}
if in.EnablePVReclaim != nil {
in, out := &in.EnablePVReclaim, &out.EnablePVReclaim
*out = new(bool)
**out = **in
}
if in.HostNetwork != nil {
in, out := &in.HostNetwork, &out.HostNetwork
*out = new(bool)
**out = **in
}
if in.Affinity != nil {
in, out := &in.Affinity, &out.Affinity
*out = new(corev1.Affinity)
(*in).DeepCopyInto(*out)
}
if in.NodeSelector != nil {
in, out := &in.NodeSelector, &out.NodeSelector
*out = make(map[string]string, len(*in))
for key, val := range *in {
(*out)[key] = val
}
}
if in.Annotations != nil {
in, out := &in.Annotations, &out.Annotations
*out = make(map[string]string, len(*in))
for key, val := range *in {
(*out)[key] = val
}
}
if in.Tolerations != nil {
in, out := &in.Tolerations, &out.Tolerations
*out = make([]corev1.Toleration, len(*in))
for i := range *in {
(*in)[i].DeepCopyInto(&(*out)[i])
}
}
if in.HostSuffix != nil {
in, out := &in.HostSuffix, &out.HostSuffix
*out = new(string)
**out = **in
}
}
// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new SeaweedSpec.
func (in *SeaweedSpec) DeepCopy() *SeaweedSpec {
if in == nil {
return nil
}
out := new(SeaweedSpec)
in.DeepCopyInto(out)
return out
}
// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
func (in *SeaweedStatus) DeepCopyInto(out *SeaweedStatus) {
*out = *in
}
// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new SeaweedStatus.
func (in *SeaweedStatus) DeepCopy() *SeaweedStatus {
if in == nil {
return nil
}
out := new(SeaweedStatus)
in.DeepCopyInto(out)
return out
}
// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
func (in *ServiceSpec) DeepCopyInto(out *ServiceSpec) {
*out = *in
if in.Annotations != nil {
in, out := &in.Annotations, &out.Annotations
*out = make(map[string]string, len(*in))
for key, val := range *in {
(*out)[key] = val
}
}
if in.LoadBalancerIP != nil {
in, out := &in.LoadBalancerIP, &out.LoadBalancerIP
*out = new(string)
**out = **in
}
if in.ClusterIP != nil {
in, out := &in.ClusterIP, &out.ClusterIP
*out = new(string)
**out = **in
}
}
// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ServiceSpec.
func (in *ServiceSpec) DeepCopy() *ServiceSpec {
if in == nil {
return nil
}
out := new(ServiceSpec)
in.DeepCopyInto(out)
return out
}
// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
func (in *VolumeSpec) DeepCopyInto(out *VolumeSpec) {
*out = *in
in.ComponentSpec.DeepCopyInto(&out.ComponentSpec)
in.ResourceRequirements.DeepCopyInto(&out.ResourceRequirements)
if in.Service != nil {
in, out := &in.Service, &out.Service
*out = new(ServiceSpec)
(*in).DeepCopyInto(*out)
}
if in.StorageClassName != nil {
in, out := &in.StorageClassName, &out.StorageClassName
*out = new(string)
**out = **in
}
if in.CompactionMBps != nil {
in, out := &in.CompactionMBps, &out.CompactionMBps
*out = new(int32)
**out = **in
}
if in.FileSizeLimitMB != nil {
in, out := &in.FileSizeLimitMB, &out.FileSizeLimitMB
*out = new(int32)
**out = **in
}
if in.FixJpgOrientation != nil {
in, out := &in.FixJpgOrientation, &out.FixJpgOrientation
*out = new(bool)
**out = **in
}
if in.IdleTimeout != nil {
in, out := &in.IdleTimeout, &out.IdleTimeout
*out = new(int32)
**out = **in
}
if in.MaxVolumeCounts != nil {
in, out := &in.MaxVolumeCounts, &out.MaxVolumeCounts
*out = new(int32)
**out = **in
}
if in.MinFreeSpacePercent != nil {
in, out := &in.MinFreeSpacePercent, &out.MinFreeSpacePercent
*out = new(int32)
**out = **in
}
}
// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new VolumeSpec.
func (in *VolumeSpec) DeepCopy() *VolumeSpec {
if in == nil {
return nil
}
out := new(VolumeSpec)
in.DeepCopyInto(out)
return out
}

View File

@ -1,8 +1,7 @@
# The following manifests contain a self-signed issuer CR and a certificate CR.
# More document can be found at https://docs.cert-manager.io
# WARNING: Targets CertManager 0.11 check https://docs.cert-manager.io/en/latest/tasks/upgrading/index.html for
# breaking changes
apiVersion: cert-manager.io/v1alpha2
# WARNING: Targets CertManager 1.7
apiVersion: cert-manager.io/v1
kind: Issuer
metadata:
name: selfsigned-issuer
@ -10,7 +9,7 @@ metadata:
spec:
selfSigned: {}
---
apiVersion: cert-manager.io/v1alpha2
apiVersion: cert-manager.io/v1
kind: Certificate
metadata:
name: serving-cert # this name should match the one appeared in kustomizeconfig.yaml

View File

@ -1,69 +0,0 @@
---
apiVersion: apiextensions.k8s.io/v1beta1
kind: CustomResourceDefinition
metadata:
annotations:
controller-gen.kubebuilder.io/version: v0.3.0
creationTimestamp: null
name: masters.seaweed.seaweedfs.com
spec:
group: seaweed.seaweedfs.com
names:
kind: Master
listKind: MasterList
plural: masters
singular: master
scope: Namespaced
subresources:
status: {}
validation:
openAPIV3Schema:
description: Master is the Schema for the masters API
properties:
apiVersion:
description: 'APIVersion defines the versioned schema of this representation
of an object. Servers should convert recognized schemas to the latest
internal value, and may reject unrecognized values. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#resources'
type: string
kind:
description: 'Kind is a string value representing the REST resource this
object represents. Servers may infer this from the endpoint the client
submits requests to. Cannot be updated. In CamelCase. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds'
type: string
metadata:
type: object
spec:
description: MasterSpec defines the desired state of Master
properties:
size:
description: Size is the size of the master deployment
format: int32
minimum: 0
type: integer
required:
- size
type: object
status:
description: MasterStatus defines the observed state of Master
properties:
nodes:
description: Nodes are the names of the master pods
items:
type: string
type: array
required:
- nodes
type: object
type: object
version: v1
versions:
- name: v1
served: true
storage: true
status:
acceptedNames:
kind: ""
plural: ""
conditions: []
storedVersions: []

File diff suppressed because it is too large Load Diff

View File

@ -2,19 +2,12 @@
# since it depends on service name and namespace that are out of this kustomize package.
# It should be run by config/default
resources:
- bases/seaweed.seaweedfs.com_masters.yaml
- bases/seaweed.seaweedfs.com_seaweeds.yaml
# +kubebuilder:scaffold:crdkustomizeresource
patchesStrategicMerge:
# [WEBHOOK] To enable webhook, uncomment all the sections with [WEBHOOK] prefix.
# patches here are for enabling the conversion webhook for each CRD
#- patches/webhook_in_masters.yaml
# +kubebuilder:scaffold:crdkustomizewebhookpatch
# [CERTMANAGER] To enable webhook, uncomment all the sections with [CERTMANAGER] prefix.
# patches here are for enabling the CA injection for each CRD
#- patches/cainjection_in_masters.yaml
# +kubebuilder:scaffold:crdkustomizecainjectionpatch
- patches/webhook_in_seaweeds.yaml
- patches/cainjection_in_seaweeds.yaml
# the following config is for teaching kustomize how to do kustomization for CRDs.
configurations:

View File

@ -5,4 +5,4 @@ kind: CustomResourceDefinition
metadata:
annotations:
cert-manager.io/inject-ca-from: $(CERTIFICATE_NAMESPACE)/$(CERTIFICATE_NAME)
name: masters.seaweed.seaweedfs.com
name: seaweeds.seaweed.seaweedfs.com

View File

@ -3,7 +3,7 @@
apiVersion: apiextensions.k8s.io/v1beta1
kind: CustomResourceDefinition
metadata:
name: masters.seaweed.seaweedfs.com
name: seaweeds.seaweed.seaweedfs.com
spec:
conversion:
strategy: Webhook

View File

@ -16,55 +16,42 @@ bases:
- ../crd
- ../rbac
- ../manager
# [WEBHOOK] To enable webhook, uncomment all the sections with [WEBHOOK] prefix including the one in
# crd/kustomization.yaml
#- ../webhook
# [CERTMANAGER] To enable cert-manager, uncomment all sections with 'CERTMANAGER'. 'WEBHOOK' components are required.
#- ../certmanager
# [PROMETHEUS] To enable prometheus monitor, uncomment all sections with 'PROMETHEUS'.
#- ../prometheus
- ../webhook
- ../certmanager
- ../prometheus
patchesStrategicMerge:
# Protect the /metrics endpoint by putting it behind auth.
# If you want your controller-manager to expose the /metrics
# endpoint w/o any authn/z, please comment the following line.
- manager_auth_proxy_patch.yaml
- manager_webhook_patch.yaml
- webhookcainjection_patch.yaml
# [WEBHOOK] To enable webhook, uncomment all the sections with [WEBHOOK] prefix including the one in
# crd/kustomization.yaml
#- manager_webhook_patch.yaml
# [CERTMANAGER] To enable cert-manager, uncomment all sections with 'CERTMANAGER'.
# Uncomment 'CERTMANAGER' sections in crd/kustomization.yaml to enable the CA injection in the admission webhooks.
# 'CERTMANAGER' needs to be enabled to use ca injection
#- webhookcainjection_patch.yaml
# the following config is for teaching kustomize how to do var substitution
vars:
# [CERTMANAGER] To enable cert-manager, uncomment all sections with 'CERTMANAGER' prefix.
#- name: CERTIFICATE_NAMESPACE # namespace of the certificate CR
# objref:
# kind: Certificate
# group: cert-manager.io
# version: v1alpha2
# name: serving-cert # this name should match the one in certificate.yaml
# fieldref:
# fieldpath: metadata.namespace
#- name: CERTIFICATE_NAME
# objref:
# kind: Certificate
# group: cert-manager.io
# version: v1alpha2
# name: serving-cert # this name should match the one in certificate.yaml
#- name: SERVICE_NAMESPACE # namespace of the service
# objref:
# kind: Service
# version: v1
# name: webhook-service
# fieldref:
# fieldpath: metadata.namespace
#- name: SERVICE_NAME
# objref:
# kind: Service
# version: v1
# name: webhook-service
- name: CERTIFICATE_NAMESPACE # namespace of the certificate CR
objref:
kind: Certificate
group: cert-manager.io
version: v1alpha2
name: serving-cert # this name should match the one in certificate.yaml
fieldref:
fieldpath: metadata.namespace
- name: CERTIFICATE_NAME
objref:
kind: Certificate
group: cert-manager.io
version: v1alpha2
name: serving-cert # this name should match the one in certificate.yaml
- name: SERVICE_NAMESPACE # namespace of the service
objref:
kind: Service
version: v1
name: webhook-service
fieldref:
fieldpath: metadata.namespace
- name: SERVICE_NAME
objref:
kind: Service
version: v1
name: webhook-service

View File

@ -1,2 +1,8 @@
resources:
- manager.yaml
apiVersion: kustomize.config.k8s.io/v1beta1
kind: Kustomization
images:
- name: controller
newName: chrislusf/seaweedfs-operator
newTag: v0.0.1

View File

@ -28,12 +28,15 @@ spec:
args:
- --enable-leader-election
image: controller:latest
env:
- name: ENABLE_WEBHOOKS
value: "true"
name: manager
resources:
limits:
cpu: 100m
memory: 30Mi
memory: 100Mi
requests:
cpu: 100m
memory: 20Mi
memory: 50Mi
terminationGracePeriodSeconds: 10

View File

@ -7,9 +7,64 @@ metadata:
name: manager-role
rules:
- apiGroups:
- seaweed.seaweedfs.com
- apps
resources:
- masters
- statefulsets
verbs:
- create
- delete
- get
- list
- patch
- update
- watch
- apiGroups:
- ""
resources:
- configmaps
verbs:
- create
- delete
- get
- list
- patch
- update
- watch
- apiGroups:
- ""
resources:
- pods
verbs:
- get
- list
- apiGroups:
- ""
resources:
- services
verbs:
- create
- delete
- get
- list
- patch
- update
- watch
- apiGroups:
- extensions
resources:
- ingresses
verbs:
- create
- delete
- get
- list
- patch
- update
- watch
- apiGroups:
- networking.k8s.io
resources:
- ingresses
verbs:
- create
- delete
@ -21,7 +76,19 @@ rules:
- apiGroups:
- seaweed.seaweedfs.com
resources:
- masters/status
- seaweeds
verbs:
- create
- delete
- get
- list
- patch
- update
- watch
- apiGroups:
- seaweed.seaweedfs.com
resources:
- seaweeds/status
verbs:
- get
- patch

View File

@ -1,13 +1,13 @@
# permissions for end users to edit masters.
# permissions for end users to edit seaweeds.
apiVersion: rbac.authorization.k8s.io/v1
kind: ClusterRole
metadata:
name: master-editor-role
name: seaweed-editor-role
rules:
- apiGroups:
- seaweed.seaweedfs.com
resources:
- masters
- seaweeds
verbs:
- create
- delete
@ -19,6 +19,6 @@ rules:
- apiGroups:
- seaweed.seaweedfs.com
resources:
- masters/status
- seaweeds/status
verbs:
- get

View File

@ -1,13 +1,13 @@
# permissions for end users to view masters.
# permissions for end users to view seaweeds.
apiVersion: rbac.authorization.k8s.io/v1
kind: ClusterRole
metadata:
name: master-viewer-role
name: seaweed-viewer-role
rules:
- apiGroups:
- seaweed.seaweedfs.com
resources:
- masters
- seaweeds
verbs:
- get
- list
@ -15,6 +15,6 @@ rules:
- apiGroups:
- seaweed.seaweedfs.com
resources:
- masters/status
- seaweeds/status
verbs:
- get

View File

@ -1,3 +1,3 @@
## This file is auto-generated, do not modify ##
resources:
- seaweed_v1_master.yaml
- seaweed_v1_seaweed.yaml

View File

@ -1,7 +0,0 @@
apiVersion: seaweed.seaweedfs.com/v1
kind: Master
metadata:
name: master-sample
spec:
# Add fields here
foo: bar

View File

@ -0,0 +1,23 @@
apiVersion: seaweed.seaweedfs.com/v1
kind: Seaweed
metadata:
name: seaweed1
namespace: default
spec:
# Add fields here
image: chrislusf/seaweedfs:2.96
volumeServerDiskCount: 1
hostSuffix: seaweed.abcdefg.com
master:
replicas: 3
volumeSizeLimitMB: 1024
volume:
replicas: 1
requests:
storage: 2Gi
filer:
replicas: 2
config: |
[leveldb2]
enabled = true
dir = "/data/filerldb2"

View File

@ -0,0 +1,54 @@
---
apiVersion: admissionregistration.k8s.io/v1beta1
kind: MutatingWebhookConfiguration
metadata:
creationTimestamp: null
name: mutating-webhook-configuration
webhooks:
- clientConfig:
caBundle: Cg==
service:
name: webhook-service
namespace: system
path: /mutate-seaweed-seaweedfs-com-v1-seaweed
failurePolicy: Fail
name: mseaweed.kb.io
rules:
- apiGroups:
- seaweed.seaweedfs.com
apiVersions:
- v1
operations:
- CREATE
- UPDATE
resources:
- seaweeds
timeoutSeconds: 15
---
apiVersion: admissionregistration.k8s.io/v1beta1
kind: ValidatingWebhookConfiguration
metadata:
creationTimestamp: null
name: validating-webhook-configuration
webhooks:
- clientConfig:
caBundle: Cg==
service:
name: webhook-service
namespace: system
path: /validate-seaweed-seaweedfs-com-v1-seaweed
failurePolicy: Fail
name: vseaweed.kb.io
rules:
- apiGroups:
- seaweed.seaweedfs.com
apiVersions:
- v1
operations:
- CREATE
- UPDATE
resources:
- seaweeds
timeoutSeconds: 15

View File

@ -0,0 +1,108 @@
package controllers
import (
"context"
"k8s.io/apimachinery/pkg/runtime"
appsv1 "k8s.io/api/apps/v1"
ctrl "sigs.k8s.io/controller-runtime"
"sigs.k8s.io/controller-runtime/pkg/controller/controllerutil"
seaweedv1 "github.com/seaweedfs/seaweedfs-operator/api/v1"
label "github.com/seaweedfs/seaweedfs-operator/controllers/label"
)
func (r *SeaweedReconciler) ensureFilerServers(seaweedCR *seaweedv1.Seaweed) (done bool, result ctrl.Result, err error) {
_ = context.Background()
_ = r.Log.WithValues("seaweed", seaweedCR.Name)
if done, result, err = r.ensureFilerPeerService(seaweedCR); done {
return
}
if done, result, err = r.ensureFilerService(seaweedCR); done {
return
}
if done, result, err = r.ensureFilerConfigMap(seaweedCR); done {
return
}
if done, result, err = r.ensureFilerStatefulSet(seaweedCR); done {
return
}
return
}
func (r *SeaweedReconciler) ensureFilerStatefulSet(seaweedCR *seaweedv1.Seaweed) (bool, ctrl.Result, error) {
log := r.Log.WithValues("sw-filer-statefulset", seaweedCR.Name)
filerStatefulSet := r.createFilerStatefulSet(seaweedCR)
if err := controllerutil.SetControllerReference(seaweedCR, filerStatefulSet, r.Scheme); err != nil {
return ReconcileResult(err)
}
_, err := r.CreateOrUpdate(filerStatefulSet, func(existing, desired runtime.Object) error {
existingStatefulSet := existing.(*appsv1.StatefulSet)
desiredStatefulSet := desired.(*appsv1.StatefulSet)
existingStatefulSet.Spec.Replicas = desiredStatefulSet.Spec.Replicas
existingStatefulSet.Spec.Template.Spec = desiredStatefulSet.Spec.Template.Spec
return nil
})
log.Info("ensure filer stateful set " + filerStatefulSet.Name)
return ReconcileResult(err)
}
func (r *SeaweedReconciler) ensureFilerPeerService(seaweedCR *seaweedv1.Seaweed) (bool, ctrl.Result, error) {
log := r.Log.WithValues("sw-filer-peer-service", seaweedCR.Name)
filerPeerService := r.createFilerPeerService(seaweedCR)
if err := controllerutil.SetControllerReference(seaweedCR, filerPeerService, r.Scheme); err != nil {
return ReconcileResult(err)
}
_, err := r.CreateOrUpdateService(filerPeerService)
log.Info("ensure filer peer service " + filerPeerService.Name)
return ReconcileResult(err)
}
func (r *SeaweedReconciler) ensureFilerService(seaweedCR *seaweedv1.Seaweed) (bool, ctrl.Result, error) {
log := r.Log.WithValues("sw-filer-service", seaweedCR.Name)
filerService := r.createFilerService(seaweedCR)
if err := controllerutil.SetControllerReference(seaweedCR, filerService, r.Scheme); err != nil {
return ReconcileResult(err)
}
_, err := r.CreateOrUpdateService(filerService)
log.Info("ensure filer service " + filerService.Name)
return ReconcileResult(err)
}
func (r *SeaweedReconciler) ensureFilerConfigMap(seaweedCR *seaweedv1.Seaweed) (bool, ctrl.Result, error) {
log := r.Log.WithValues("sw-filer-configmap", seaweedCR.Name)
filerConfigMap := r.createFilerConfigMap(seaweedCR)
if err := controllerutil.SetControllerReference(seaweedCR, filerConfigMap, r.Scheme); err != nil {
return ReconcileResult(err)
}
_, err := r.CreateOrUpdateConfigMap(filerConfigMap)
log.Info("Get filer ConfigMap " + filerConfigMap.Name)
return ReconcileResult(err)
}
func labelsForFiler(name string) map[string]string {
return map[string]string{
label.ManagedByLabelKey: "seaweedfs-operator",
label.NameLabelKey: "seaweedfs",
label.ComponentLabelKey: "filer",
label.InstanceLabelKey: name,
}
}

View File

@ -0,0 +1,29 @@
package controllers
import (
corev1 "k8s.io/api/core/v1"
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
seaweedv1 "github.com/seaweedfs/seaweedfs-operator/api/v1"
)
func (r *SeaweedReconciler) createFilerConfigMap(m *seaweedv1.Seaweed) *corev1.ConfigMap {
labels := labelsForFiler(m.Name)
toml := ""
if m.Spec.Filer.Config != nil {
toml = *m.Spec.Filer.Config
}
dep := &corev1.ConfigMap{
ObjectMeta: metav1.ObjectMeta{
Name: m.Name + "-filer",
Namespace: m.Namespace,
Labels: labels,
},
Data: map[string]string{
"filer.toml": toml,
},
}
return dep
}

View File

@ -0,0 +1,85 @@
package controllers
import (
"fmt"
extensionsv1beta1 "k8s.io/api/extensions/v1beta1"
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
"k8s.io/apimachinery/pkg/util/intstr"
ctrl "sigs.k8s.io/controller-runtime"
seaweedv1 "github.com/seaweedfs/seaweedfs-operator/api/v1"
)
func (r *SeaweedReconciler) createAllIngress(m *seaweedv1.Seaweed) *extensionsv1beta1.Ingress {
labels := labelsForIngress(m.Name)
dep := &extensionsv1beta1.Ingress{
ObjectMeta: metav1.ObjectMeta{
Name: m.Name + "-ingress",
Namespace: m.Namespace,
Labels: labels,
},
Spec: extensionsv1beta1.IngressSpec{
// TLS: ingressSpec.TLS,
Rules: []extensionsv1beta1.IngressRule{
{
Host: "filer." + *m.Spec.HostSuffix,
IngressRuleValue: extensionsv1beta1.IngressRuleValue{
HTTP: &extensionsv1beta1.HTTPIngressRuleValue{
Paths: []extensionsv1beta1.HTTPIngressPath{
{
Path: "/",
Backend: extensionsv1beta1.IngressBackend{
ServiceName: m.Name + "-filer",
ServicePort: intstr.FromInt(seaweedv1.FilerHTTPPort),
},
},
},
},
},
},
{
Host: "s3." + *m.Spec.HostSuffix,
IngressRuleValue: extensionsv1beta1.IngressRuleValue{
HTTP: &extensionsv1beta1.HTTPIngressRuleValue{
Paths: []extensionsv1beta1.HTTPIngressPath{
{
Path: "/",
Backend: extensionsv1beta1.IngressBackend{
ServiceName: m.Name + "-filer",
ServicePort: intstr.FromInt(seaweedv1.FilerS3Port),
},
},
},
},
},
},
},
},
}
// add ingress for volume servers
for i := 0; i < int(m.Spec.Volume.Replicas); i++ {
dep.Spec.Rules = append(dep.Spec.Rules, extensionsv1beta1.IngressRule{
Host: fmt.Sprintf("%s-volume-%d.%s", m.Name, i, *m.Spec.HostSuffix),
IngressRuleValue: extensionsv1beta1.IngressRuleValue{
HTTP: &extensionsv1beta1.HTTPIngressRuleValue{
Paths: []extensionsv1beta1.HTTPIngressPath{
{
Path: "/",
Backend: extensionsv1beta1.IngressBackend{
ServiceName: fmt.Sprintf("%s-volume-%d", m.Name, i),
ServicePort: intstr.FromInt(seaweedv1.VolumeHTTPPort),
},
},
},
},
},
})
}
// Set master instance as the owner and controller
ctrl.SetControllerReference(m, dep, r.Scheme)
return dep
}

View File

@ -0,0 +1,108 @@
package controllers
import (
corev1 "k8s.io/api/core/v1"
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
"k8s.io/apimachinery/pkg/util/intstr"
seaweedv1 "github.com/seaweedfs/seaweedfs-operator/api/v1"
)
func (r *SeaweedReconciler) createFilerPeerService(m *seaweedv1.Seaweed) *corev1.Service {
labels := labelsForFiler(m.Name)
dep := &corev1.Service{
ObjectMeta: metav1.ObjectMeta{
Name: m.Name + "-filer-peer",
Namespace: m.Namespace,
Labels: labels,
Annotations: map[string]string{
"service.alpha.kubernetes.io/tolerate-unready-endpoints": "true",
},
},
Spec: corev1.ServiceSpec{
ClusterIP: "None",
PublishNotReadyAddresses: true,
Ports: []corev1.ServicePort{
{
Name: "filer-http",
Protocol: corev1.Protocol("TCP"),
Port: seaweedv1.FilerHTTPPort,
TargetPort: intstr.FromInt(seaweedv1.FilerHTTPPort),
},
{
Name: "filer-grpc",
Protocol: corev1.Protocol("TCP"),
Port: seaweedv1.FilerGRPCPort,
TargetPort: intstr.FromInt(seaweedv1.FilerGRPCPort),
},
{
Name: "filer-s3",
Protocol: corev1.Protocol("TCP"),
Port: seaweedv1.FilerS3Port,
TargetPort: intstr.FromInt(seaweedv1.FilerS3Port),
},
},
Selector: labels,
},
}
return dep
}
func (r *SeaweedReconciler) createFilerService(m *seaweedv1.Seaweed) *corev1.Service {
labels := labelsForFiler(m.Name)
dep := &corev1.Service{
ObjectMeta: metav1.ObjectMeta{
Name: m.Name + "-filer",
Namespace: m.Namespace,
Labels: labels,
Annotations: map[string]string{
"service.alpha.kubernetes.io/tolerate-unready-endpoints": "true",
},
},
Spec: corev1.ServiceSpec{
Type: corev1.ServiceTypeClusterIP,
PublishNotReadyAddresses: true,
Ports: []corev1.ServicePort{
{
Name: "filer-http",
Protocol: corev1.Protocol("TCP"),
Port: seaweedv1.FilerHTTPPort,
TargetPort: intstr.FromInt(seaweedv1.FilerHTTPPort),
},
{
Name: "filer-grpc",
Protocol: corev1.Protocol("TCP"),
Port: seaweedv1.FilerGRPCPort,
TargetPort: intstr.FromInt(seaweedv1.FilerGRPCPort),
},
{
Name: "filer-s3",
Protocol: corev1.Protocol("TCP"),
Port: seaweedv1.FilerS3Port,
TargetPort: intstr.FromInt(seaweedv1.FilerS3Port),
},
},
Selector: labels,
},
}
if m.Spec.Filer.Service != nil {
svcSpec := m.Spec.Filer.Service
dep.Annotations = copyAnnotations(svcSpec.Annotations)
if svcSpec.Type != "" {
dep.Spec.Type = svcSpec.Type
}
if svcSpec.ClusterIP != nil {
dep.Spec.ClusterIP = *svcSpec.ClusterIP
}
if svcSpec.LoadBalancerIP != nil {
dep.Spec.LoadBalancerIP = *svcSpec.LoadBalancerIP
}
}
return dep
}

View File

@ -0,0 +1,133 @@
package controllers
import (
"fmt"
"strings"
appsv1 "k8s.io/api/apps/v1"
corev1 "k8s.io/api/core/v1"
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
"k8s.io/apimachinery/pkg/util/intstr"
seaweedv1 "github.com/seaweedfs/seaweedfs-operator/api/v1"
)
func buildFilerStartupScript(m *seaweedv1.Seaweed) string {
commands := []string{"weed", "-logtostderr=true", "filer"}
commands = append(commands, fmt.Sprintf("-port=%d", seaweedv1.FilerHTTPPort))
commands = append(commands, fmt.Sprintf("-ip=$(POD_NAME).%s-filer-peer.%s", m.Name, m.Namespace))
commands = append(commands, fmt.Sprintf("-master=%s", getMasterPeersString(m)))
commands = append(commands, "-s3")
return strings.Join(commands, " ")
}
func (r *SeaweedReconciler) createFilerStatefulSet(m *seaweedv1.Seaweed) *appsv1.StatefulSet {
labels := labelsForFiler(m.Name)
replicas := int32(m.Spec.Filer.Replicas)
rollingUpdatePartition := int32(0)
enableServiceLinks := false
filerPodSpec := m.BaseFilerSpec().BuildPodSpec()
filerPodSpec.Volumes = []corev1.Volume{
{
Name: "filer-config",
VolumeSource: corev1.VolumeSource{
ConfigMap: &corev1.ConfigMapVolumeSource{
LocalObjectReference: corev1.LocalObjectReference{
Name: m.Name + "-filer",
},
},
},
},
}
filerPodSpec.EnableServiceLinks = &enableServiceLinks
filerPodSpec.Containers = []corev1.Container{{
Name: "filer",
Image: m.Spec.Image,
ImagePullPolicy: m.BaseFilerSpec().ImagePullPolicy(),
Env: append(m.BaseFilerSpec().Env(), kubernetesEnvVars...),
VolumeMounts: []corev1.VolumeMount{
{
Name: "filer-config",
ReadOnly: true,
MountPath: "/etc/seaweedfs",
},
},
Command: []string{
"/bin/sh",
"-ec",
buildFilerStartupScript(m),
},
Ports: []corev1.ContainerPort{
{
ContainerPort: seaweedv1.FilerHTTPPort,
Name: "filer-http",
},
{
ContainerPort: seaweedv1.FilerGRPCPort,
Name: "filer-grpc",
},
{
ContainerPort: seaweedv1.FilerS3Port,
Name: "filer-s3",
},
},
ReadinessProbe: &corev1.Probe{
Handler: corev1.Handler{
HTTPGet: &corev1.HTTPGetAction{
Path: "/",
Port: intstr.FromInt(seaweedv1.FilerHTTPPort),
Scheme: corev1.URISchemeHTTP,
},
},
InitialDelaySeconds: 10,
TimeoutSeconds: 3,
PeriodSeconds: 15,
SuccessThreshold: 1,
FailureThreshold: 100,
},
LivenessProbe: &corev1.Probe{
Handler: corev1.Handler{
HTTPGet: &corev1.HTTPGetAction{
Path: "/",
Port: intstr.FromInt(seaweedv1.FilerHTTPPort),
Scheme: corev1.URISchemeHTTP,
},
},
InitialDelaySeconds: 20,
TimeoutSeconds: 3,
PeriodSeconds: 30,
SuccessThreshold: 1,
FailureThreshold: 6,
},
}}
dep := &appsv1.StatefulSet{
ObjectMeta: metav1.ObjectMeta{
Name: m.Name + "-filer",
Namespace: m.Namespace,
},
Spec: appsv1.StatefulSetSpec{
ServiceName: m.Name + "-filer-peer",
PodManagementPolicy: appsv1.ParallelPodManagement,
Replicas: &replicas,
UpdateStrategy: appsv1.StatefulSetUpdateStrategy{
Type: appsv1.RollingUpdateStatefulSetStrategyType,
RollingUpdate: &appsv1.RollingUpdateStatefulSetStrategy{
Partition: &rollingUpdatePartition,
},
},
Selector: &metav1.LabelSelector{
MatchLabels: labels,
},
Template: corev1.PodTemplateSpec{
ObjectMeta: metav1.ObjectMeta{
Labels: labels,
},
Spec: filerPodSpec,
},
},
}
return dep
}

View File

@ -0,0 +1,42 @@
package controllers
import (
"github.com/seaweedfs/seaweedfs-operator/controllers/label"
ctrl "sigs.k8s.io/controller-runtime"
"sigs.k8s.io/controller-runtime/pkg/controller/controllerutil"
seaweedv1 "github.com/seaweedfs/seaweedfs-operator/api/v1"
)
func (r *SeaweedReconciler) ensureSeaweedIngress(seaweedCR *seaweedv1.Seaweed) (done bool, result ctrl.Result, err error) {
if seaweedCR.Spec.HostSuffix != nil && len(*seaweedCR.Spec.HostSuffix) != 0 {
if done, result, err = r.ensureAllIngress(seaweedCR); done {
return
}
}
return
}
func (r *SeaweedReconciler) ensureAllIngress(seaweedCR *seaweedv1.Seaweed) (bool, ctrl.Result, error) {
log := r.Log.WithValues("sw-ingress", seaweedCR.Name)
ingressService := r.createAllIngress(seaweedCR)
if err := controllerutil.SetControllerReference(seaweedCR, ingressService, r.Scheme); err != nil {
return ReconcileResult(err)
}
_, err := r.CreateOrUpdateIngress(ingressService)
log.Info("ensure ingress " + ingressService.Name)
return ReconcileResult(err)
}
func labelsForIngress(name string) map[string]string {
return map[string]string{
label.ManagedByLabelKey: "seaweedfs-operator",
label.NameLabelKey: "seaweedfs",
label.ComponentLabelKey: "ingress",
label.InstanceLabelKey: name,
}
}

View File

@ -0,0 +1,151 @@
package controllers
import (
"context"
"time"
appsv1 "k8s.io/api/apps/v1"
corev1 "k8s.io/api/core/v1"
"k8s.io/apimachinery/pkg/runtime"
ctrl "sigs.k8s.io/controller-runtime"
"sigs.k8s.io/controller-runtime/pkg/client"
"sigs.k8s.io/controller-runtime/pkg/controller/controllerutil"
seaweedv1 "github.com/seaweedfs/seaweedfs-operator/api/v1"
"github.com/seaweedfs/seaweedfs-operator/controllers/label"
)
func (r *SeaweedReconciler) ensureMaster(seaweedCR *seaweedv1.Seaweed) (done bool, result ctrl.Result, err error) {
_ = context.Background()
_ = r.Log.WithValues("seaweed", seaweedCR.Name)
if done, result, err = r.ensureMasterPeerService(seaweedCR); done {
return
}
if done, result, err = r.ensureMasterService(seaweedCR); done {
return
}
if done, result, err = r.ensureMasterConfigMap(seaweedCR); done {
return
}
if done, result, err = r.ensureMasterStatefulSet(seaweedCR); done {
return
}
if seaweedCR.Spec.Master.ConcurrentStart == nil || !*seaweedCR.Spec.Master.ConcurrentStart {
if done, result, err = r.waitForMasterStatefulSet(seaweedCR); done {
return
}
}
return
}
func (r *SeaweedReconciler) waitForMasterStatefulSet(seaweedCR *seaweedv1.Seaweed) (bool, ctrl.Result, error) {
log := r.Log.WithValues("sw-master-statefulset", seaweedCR.Name)
podList := &corev1.PodList{}
listOpts := []client.ListOption{
client.InNamespace(seaweedCR.Namespace),
client.MatchingLabels(labelsForMaster(seaweedCR.Name)),
}
if err := r.List(context.Background(), podList, listOpts...); err != nil {
log.Error(err, "Failed to list master pods", "namespace", seaweedCR.Namespace, "name", seaweedCR.Name)
return true, ctrl.Result{RequeueAfter: 3 * time.Second}, nil
}
log.Info("pods", "count", len(podList.Items))
runningCounter := 0
for _, pod := range podList.Items {
if pod.Status.Phase == corev1.PodRunning {
for _, containerStatus := range pod.Status.ContainerStatuses {
if containerStatus.Ready {
runningCounter++
}
log.Info("pod", "name", pod.Name, "containerStatus", containerStatus)
}
} else {
log.Info("pod", "name", pod.Name, "status", pod.Status)
}
}
if runningCounter < int(seaweedCR.Spec.Master.Replicas)/2+1 {
log.Info("some masters are not ready", "missing", int(seaweedCR.Spec.Master.Replicas)-runningCounter)
return true, ctrl.Result{RequeueAfter: 3 * time.Second}, nil
}
log.Info("masters are ready")
return ReconcileResult(nil)
}
func (r *SeaweedReconciler) ensureMasterStatefulSet(seaweedCR *seaweedv1.Seaweed) (bool, ctrl.Result, error) {
log := r.Log.WithValues("sw-master-statefulset", seaweedCR.Name)
masterStatefulSet := r.createMasterStatefulSet(seaweedCR)
if err := controllerutil.SetControllerReference(seaweedCR, masterStatefulSet, r.Scheme); err != nil {
return ReconcileResult(err)
}
_, err := r.CreateOrUpdate(masterStatefulSet, func(existing, desired runtime.Object) error {
existingStatefulSet := existing.(*appsv1.StatefulSet)
desiredStatefulSet := desired.(*appsv1.StatefulSet)
existingStatefulSet.Spec.Replicas = desiredStatefulSet.Spec.Replicas
existingStatefulSet.Spec.Template.Spec = desiredStatefulSet.Spec.Template.Spec
return nil
})
log.Info("ensure master stateful set " + masterStatefulSet.Name)
return ReconcileResult(err)
}
func (r *SeaweedReconciler) ensureMasterConfigMap(seaweedCR *seaweedv1.Seaweed) (bool, ctrl.Result, error) {
log := r.Log.WithValues("sw-master-configmap", seaweedCR.Name)
masterConfigMap := r.createMasterConfigMap(seaweedCR)
if err := controllerutil.SetControllerReference(seaweedCR, masterConfigMap, r.Scheme); err != nil {
return ReconcileResult(err)
}
_, err := r.CreateOrUpdateConfigMap(masterConfigMap)
log.Info("Get master ConfigMap " + masterConfigMap.Name)
return ReconcileResult(err)
}
func (r *SeaweedReconciler) ensureMasterService(seaweedCR *seaweedv1.Seaweed) (bool, ctrl.Result, error) {
log := r.Log.WithValues("sw-master-service", seaweedCR.Name)
masterService := r.createMasterService(seaweedCR)
if err := controllerutil.SetControllerReference(seaweedCR, masterService, r.Scheme); err != nil {
return ReconcileResult(err)
}
_, err := r.CreateOrUpdateService(masterService)
log.Info("Get master service " + masterService.Name)
return ReconcileResult(err)
}
func (r *SeaweedReconciler) ensureMasterPeerService(seaweedCR *seaweedv1.Seaweed) (bool, ctrl.Result, error) {
log := r.Log.WithValues("sw-master-peer-service", seaweedCR.Name)
masterPeerService := r.createMasterPeerService(seaweedCR)
if err := controllerutil.SetControllerReference(seaweedCR, masterPeerService, r.Scheme); err != nil {
return ReconcileResult(err)
}
_, err := r.CreateOrUpdateService(masterPeerService)
log.Info("Get master peer service " + masterPeerService.Name)
return ReconcileResult(err)
}
func labelsForMaster(name string) map[string]string {
return map[string]string{
label.ManagedByLabelKey: "seaweedfs-operator",
label.NameLabelKey: "seaweedfs",
label.ComponentLabelKey: "master",
label.InstanceLabelKey: name,
}
}

View File

@ -0,0 +1,31 @@
package controllers
import (
corev1 "k8s.io/api/core/v1"
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
seaweedv1 "github.com/seaweedfs/seaweedfs-operator/api/v1"
)
func (r *SeaweedReconciler) createMasterConfigMap(m *seaweedv1.Seaweed) *corev1.ConfigMap {
labels := labelsForMaster(m.Name)
toml := ""
if m.Spec.Master.Config != nil {
toml = *m.Spec.Master.Config
}
dep := &corev1.ConfigMap{
ObjectMeta: metav1.ObjectMeta{
Name: m.Name + "-master",
Namespace: m.Namespace,
Labels: labels,
},
Data: map[string]string{
"master.toml": toml,
},
}
// Set master instance as the owner and controller
// ctrl.SetControllerReference(m, dep, r.Scheme)
return dep
}

View File

@ -0,0 +1,97 @@
package controllers
import (
corev1 "k8s.io/api/core/v1"
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
"k8s.io/apimachinery/pkg/util/intstr"
seaweedv1 "github.com/seaweedfs/seaweedfs-operator/api/v1"
)
func (r *SeaweedReconciler) createMasterPeerService(m *seaweedv1.Seaweed) *corev1.Service {
labels := labelsForMaster(m.Name)
dep := &corev1.Service{
ObjectMeta: metav1.ObjectMeta{
Name: m.Name + "-master-peer",
Namespace: m.Namespace,
Labels: labels,
Annotations: map[string]string{
"service.alpha.kubernetes.io/tolerate-unready-endpoints": "true",
},
},
Spec: corev1.ServiceSpec{
ClusterIP: "None",
PublishNotReadyAddresses: true,
Ports: []corev1.ServicePort{
{
Name: "master-http",
Protocol: corev1.Protocol("TCP"),
Port: seaweedv1.MasterHTTPPort,
TargetPort: intstr.FromInt(seaweedv1.MasterHTTPPort),
},
{
Name: "master-grpc",
Protocol: corev1.Protocol("TCP"),
Port: seaweedv1.MasterGRPCPort,
TargetPort: intstr.FromInt(seaweedv1.MasterGRPCPort),
},
},
Selector: labels,
},
}
// Set master instance as the owner and controller
// ctrl.SetControllerReference(m, dep, r.Scheme)
return dep
}
func (r *SeaweedReconciler) createMasterService(m *seaweedv1.Seaweed) *corev1.Service {
labels := labelsForMaster(m.Name)
dep := &corev1.Service{
ObjectMeta: metav1.ObjectMeta{
Name: m.Name + "-master",
Namespace: m.Namespace,
Labels: labels,
Annotations: map[string]string{
"service.alpha.kubernetes.io/tolerate-unready-endpoints": "true",
},
},
Spec: corev1.ServiceSpec{
PublishNotReadyAddresses: true,
Ports: []corev1.ServicePort{
{
Name: "master-http",
Protocol: corev1.Protocol("TCP"),
Port: seaweedv1.MasterHTTPPort,
TargetPort: intstr.FromInt(seaweedv1.MasterHTTPPort),
},
{
Name: "master-grpc",
Protocol: corev1.Protocol("TCP"),
Port: seaweedv1.MasterGRPCPort,
TargetPort: intstr.FromInt(seaweedv1.MasterGRPCPort),
},
},
Selector: labels,
},
}
if m.Spec.Master.Service != nil {
svcSpec := m.Spec.Master.Service
dep.Annotations = copyAnnotations(svcSpec.Annotations)
if svcSpec.Type != "" {
dep.Spec.Type = svcSpec.Type
}
if svcSpec.ClusterIP != nil {
dep.Spec.ClusterIP = *svcSpec.ClusterIP
}
if svcSpec.LoadBalancerIP != nil {
dep.Spec.LoadBalancerIP = *svcSpec.LoadBalancerIP
}
}
return dep
}

View File

@ -0,0 +1,149 @@
package controllers
import (
"fmt"
"strings"
appsv1 "k8s.io/api/apps/v1"
corev1 "k8s.io/api/core/v1"
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
"k8s.io/apimachinery/pkg/util/intstr"
seaweedv1 "github.com/seaweedfs/seaweedfs-operator/api/v1"
)
func buildMasterStartupScript(m *seaweedv1.Seaweed) string {
command := []string{"weed", "-logtostderr=true", "master"}
spec := m.Spec.Master
if spec.VolumePreallocate != nil && *spec.VolumePreallocate {
command = append(command, "-volumePreallocate")
}
if spec.VolumeSizeLimitMB != nil {
command = append(command, fmt.Sprintf("-volumeSizeLimitMB=%d", *spec.VolumeSizeLimitMB))
}
if spec.GarbageThreshold != nil {
command = append(command, fmt.Sprintf("-garbageThreshold=%s", *spec.GarbageThreshold))
}
if spec.PulseSeconds != nil {
command = append(command, fmt.Sprintf("-pulseSeconds=%d", *spec.PulseSeconds))
}
if spec.DefaultReplication != nil {
command = append(command, fmt.Sprintf("-defaultReplication=%s", *spec.DefaultReplication))
}
command = append(command, fmt.Sprintf("-ip=$(POD_NAME).%s-master-peer.%s", m.Name, m.Namespace))
command = append(command, fmt.Sprintf("-peers=%s", getMasterPeersString(m)))
return strings.Join(command, " ")
}
func (r *SeaweedReconciler) createMasterStatefulSet(m *seaweedv1.Seaweed) *appsv1.StatefulSet {
labels := labelsForMaster(m.Name)
replicas := m.Spec.Master.Replicas
rollingUpdatePartition := int32(0)
enableServiceLinks := false
masterPodSpec := m.BaseMasterSpec().BuildPodSpec()
masterPodSpec.Volumes = []corev1.Volume{
{
Name: "master-config",
VolumeSource: corev1.VolumeSource{
ConfigMap: &corev1.ConfigMapVolumeSource{
LocalObjectReference: corev1.LocalObjectReference{
Name: m.Name + "-master",
},
},
},
},
}
masterPodSpec.EnableServiceLinks = &enableServiceLinks
masterPodSpec.Containers = []corev1.Container{{
Name: "master",
Image: m.Spec.Image,
ImagePullPolicy: m.BaseMasterSpec().ImagePullPolicy(),
Env: append(m.BaseMasterSpec().Env(), kubernetesEnvVars...),
VolumeMounts: []corev1.VolumeMount{
{
Name: "master-config",
ReadOnly: true,
MountPath: "/etc/seaweedfs",
},
},
Command: []string{
"/bin/sh",
"-ec",
buildMasterStartupScript(m),
},
Ports: []corev1.ContainerPort{
{
ContainerPort: seaweedv1.MasterHTTPPort,
Name: "master-http",
},
{
ContainerPort: seaweedv1.MasterGRPCPort,
Name: "master-grpc",
},
},
ReadinessProbe: &corev1.Probe{
Handler: corev1.Handler{
HTTPGet: &corev1.HTTPGetAction{
Path: "/cluster/status",
Port: intstr.FromInt(seaweedv1.MasterHTTPPort),
Scheme: corev1.URISchemeHTTP,
},
},
InitialDelaySeconds: 5,
TimeoutSeconds: 15,
PeriodSeconds: 15,
SuccessThreshold: 2,
FailureThreshold: 100,
},
LivenessProbe: &corev1.Probe{
Handler: corev1.Handler{
HTTPGet: &corev1.HTTPGetAction{
Path: "/cluster/status",
Port: intstr.FromInt(seaweedv1.MasterHTTPPort),
Scheme: corev1.URISchemeHTTP,
},
},
InitialDelaySeconds: 15,
TimeoutSeconds: 15,
PeriodSeconds: 15,
SuccessThreshold: 1,
FailureThreshold: 6,
},
}}
dep := &appsv1.StatefulSet{
ObjectMeta: metav1.ObjectMeta{
Name: m.Name + "-master",
Namespace: m.Namespace,
},
Spec: appsv1.StatefulSetSpec{
ServiceName: m.Name + "-master-peer",
PodManagementPolicy: appsv1.ParallelPodManagement,
Replicas: &replicas,
UpdateStrategy: appsv1.StatefulSetUpdateStrategy{
Type: appsv1.RollingUpdateStatefulSetStrategyType,
RollingUpdate: &appsv1.RollingUpdateStatefulSetStrategy{
Partition: &rollingUpdatePartition,
},
},
Selector: &metav1.LabelSelector{
MatchLabels: labels,
},
Template: corev1.PodTemplateSpec{
ObjectMeta: metav1.ObjectMeta{
Labels: labels,
},
Spec: masterPodSpec,
},
},
}
// Set master instance as the owner and controller
// ctrl.SetControllerReference(m, dep, r.Scheme)
return dep
}

View File

@ -0,0 +1,328 @@
package controllers
import (
"context"
"encoding/json"
"fmt"
appsv1 "k8s.io/api/apps/v1"
corev1 "k8s.io/api/core/v1"
extensionsv1beta1 "k8s.io/api/extensions/v1beta1"
apiequality "k8s.io/apimachinery/pkg/api/equality"
"k8s.io/apimachinery/pkg/api/errors"
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
"k8s.io/apimachinery/pkg/runtime"
"k8s.io/apimachinery/pkg/runtime/schema"
"k8s.io/client-go/kubernetes/scheme"
"k8s.io/klog"
"sigs.k8s.io/controller-runtime/pkg/client"
)
// the following is adapted from tidb-operator/pkg/controller/generic_control.go
const (
// LastAppliedPodTemplate is annotation key of the last applied pod template
LastAppliedPodTemplate = "seaweedfs.com/last-applied-podtemplate"
// LastAppliedConfigAnnotation is annotation key of last applied configuration
LastAppliedConfigAnnotation = "seaweedfs.com/last-applied-configuration"
)
// MergeFn is to resolve conflicts
type MergeFn func(existing, desired runtime.Object) error
// CreateOrUpdate create an object to the Kubernetes cluster for controller, if the object to create is existed,
// call mergeFn to merge the change in new object to the existing object, then update the existing object.
// The object will also be adopted by the given controller.
func (r *SeaweedReconciler) CreateOrUpdate(obj runtime.Object, mergeFn MergeFn) (runtime.Object, error) {
// controller-runtime/client will mutate the object pointer in-place,
// to be consistent with other methods in our controller, we copy the object
// to avoid the in-place mutation here and hereafter.
desired := obj.DeepCopyObject()
// 1. try to create and see if there is any conflicts
err := r.Create(context.TODO(), desired)
if errors.IsAlreadyExists(err) {
// 2. object has already existed, merge our desired changes to it
existing, err := EmptyClone(obj)
if err != nil {
return nil, err
}
key, err := client.ObjectKeyFromObject(existing)
if err != nil {
return nil, err
}
err = r.Get(context.TODO(), key, existing)
if err != nil {
return nil, err
}
mutated := existing.DeepCopyObject()
// 4. invoke mergeFn to mutate a copy of the existing object
if err := mergeFn(mutated, desired); err != nil {
return nil, err
}
// 5. check if the copy is actually mutated
if !apiequality.Semantic.DeepEqual(existing, mutated) {
err := r.Update(context.TODO(), mutated)
return mutated, err
}
return mutated, nil
}
return desired, err
}
func (r *SeaweedReconciler) addSpecToAnnotation(d *appsv1.Deployment) error {
b, err := json.Marshal(d.Spec.Template.Spec)
if err != nil {
return err
}
if d.Annotations == nil {
d.Annotations = map[string]string{}
}
d.Annotations[LastAppliedPodTemplate] = string(b)
return nil
}
func (r *SeaweedReconciler) CreateOrUpdateDeployment(deploy *appsv1.Deployment) (*appsv1.Deployment, error) {
r.addSpecToAnnotation(deploy)
result, err := r.CreateOrUpdate(deploy, func(existing, desired runtime.Object) error {
existingDep := existing.(*appsv1.Deployment)
desiredDep := desired.(*appsv1.Deployment)
existingDep.Spec.Replicas = desiredDep.Spec.Replicas
existingDep.Labels = desiredDep.Labels
if existingDep.Annotations == nil {
existingDep.Annotations = map[string]string{}
}
for k, v := range desiredDep.Annotations {
existingDep.Annotations[k] = v
}
// only override the default strategy if it is explicitly set in the desiredDep
if string(desiredDep.Spec.Strategy.Type) != "" {
existingDep.Spec.Strategy.Type = desiredDep.Spec.Strategy.Type
if existingDep.Spec.Strategy.RollingUpdate != nil {
existingDep.Spec.Strategy.RollingUpdate = desiredDep.Spec.Strategy.RollingUpdate
}
}
// pod selector of deployment is immutable, so we don't mutate the labels of pod
for k, v := range desiredDep.Spec.Template.Annotations {
existingDep.Spec.Template.Annotations[k] = v
}
// podSpec of deployment is hard to merge, use an annotation to assist
if DeploymentPodSpecChanged(desiredDep, existingDep) {
// Record last applied spec in favor of future equality check
b, err := json.Marshal(desiredDep.Spec.Template.Spec)
if err != nil {
return err
}
existingDep.Annotations[LastAppliedConfigAnnotation] = string(b)
existingDep.Spec.Template.Spec = desiredDep.Spec.Template.Spec
}
return nil
})
if err != nil {
return nil, err
}
return result.(*appsv1.Deployment), err
}
func (r *SeaweedReconciler) CreateOrUpdateService(svc *corev1.Service) (*corev1.Service, error) {
result, err := r.CreateOrUpdate(svc, func(existing, desired runtime.Object) error {
existingSvc := existing.(*corev1.Service)
desiredSvc := desired.(*corev1.Service)
if existingSvc.Annotations == nil {
existingSvc.Annotations = map[string]string{}
}
for k, v := range desiredSvc.Annotations {
existingSvc.Annotations[k] = v
}
existingSvc.Labels = desiredSvc.Labels
equal, err := ServiceEqual(desiredSvc, existingSvc)
if err != nil {
return err
}
if !equal {
// record desiredSvc Spec in annotations in favor of future equality checks
b, err := json.Marshal(desiredSvc.Spec)
if err != nil {
return err
}
existingSvc.Annotations[LastAppliedConfigAnnotation] = string(b)
clusterIp := existingSvc.Spec.ClusterIP
ports := existingSvc.Spec.Ports
serviceType := existingSvc.Spec.Type
existingSvc.Spec = desiredSvc.Spec
existingSvc.Spec.ClusterIP = clusterIp
// If the existed service and the desired service is NodePort or LoadBalancerType, we should keep the nodePort unchanged.
if (serviceType == corev1.ServiceTypeNodePort || serviceType == corev1.ServiceTypeLoadBalancer) &&
(desiredSvc.Spec.Type == corev1.ServiceTypeNodePort || desiredSvc.Spec.Type == corev1.ServiceTypeLoadBalancer) {
for i, dport := range existingSvc.Spec.Ports {
for _, eport := range ports {
// Because the portName could be edited,
// we use Port number to link the desired Service Port and the existed Service Port in the nested loop
if dport.Port == eport.Port && dport.Protocol == eport.Protocol {
dport.NodePort = eport.NodePort
existingSvc.Spec.Ports[i] = dport
break
}
}
}
}
}
return nil
})
if err != nil {
return nil, err
}
return result.(*corev1.Service), nil
}
func (r *SeaweedReconciler) CreateOrUpdateIngress(ingress *extensionsv1beta1.Ingress) (*extensionsv1beta1.Ingress, error) {
result, err := r.CreateOrUpdate(ingress, func(existing, desired runtime.Object) error {
existingIngress := existing.(*extensionsv1beta1.Ingress)
desiredIngress := desired.(*extensionsv1beta1.Ingress)
if existingIngress.Annotations == nil {
existingIngress.Annotations = map[string]string{}
}
for k, v := range desiredIngress.Annotations {
existingIngress.Annotations[k] = v
}
existingIngress.Labels = desiredIngress.Labels
equal, err := IngressEqual(desiredIngress, existingIngress)
if err != nil {
return err
}
if !equal {
// record desiredIngress Spec in annotations in favor of future equality checks
b, err := json.Marshal(desiredIngress.Spec)
if err != nil {
return err
}
existingIngress.Annotations[LastAppliedConfigAnnotation] = string(b)
existingIngress.Spec = desiredIngress.Spec
}
return nil
})
if err != nil {
return nil, err
}
return result.(*extensionsv1beta1.Ingress), nil
}
func (r *SeaweedReconciler) CreateOrUpdateConfigMap(configMap *corev1.ConfigMap) (*corev1.ConfigMap, error) {
result, err := r.CreateOrUpdate(configMap, func(existing, desired runtime.Object) error {
existingConfigMap := existing.(*corev1.ConfigMap)
desiredConfigMap := desired.(*corev1.ConfigMap)
if existingConfigMap.Annotations == nil {
existingConfigMap.Annotations = map[string]string{}
}
for k, v := range desiredConfigMap.Annotations {
existingConfigMap.Annotations[k] = v
}
existingConfigMap.Labels = desiredConfigMap.Labels
existingConfigMap.Data = desiredConfigMap.Data
return nil
})
if err != nil {
return nil, err
}
return result.(*corev1.ConfigMap), nil
}
// EmptyClone create an clone of the resource with the same name and namespace (if namespace-scoped), with other fields unset
func EmptyClone(obj runtime.Object) (runtime.Object, error) {
meta, ok := obj.(metav1.Object)
if !ok {
return nil, fmt.Errorf("Obj %v is not a metav1.Object, cannot call EmptyClone", obj)
}
gvk, err := InferObjectKind(obj)
if err != nil {
return nil, err
}
inst, err := scheme.Scheme.New(gvk)
if err != nil {
return nil, err
}
instMeta, ok := inst.(metav1.Object)
if !ok {
return nil, fmt.Errorf("New instatnce %v created from scheme is not a metav1.Object, EmptyClone failed", inst)
}
instMeta.SetName(meta.GetName())
instMeta.SetNamespace(meta.GetNamespace())
return inst, nil
}
// InferObjectKind infers the object kind
func InferObjectKind(obj runtime.Object) (schema.GroupVersionKind, error) {
gvks, _, err := scheme.Scheme.ObjectKinds(obj)
if err != nil {
return schema.GroupVersionKind{}, err
}
if len(gvks) != 1 {
return schema.GroupVersionKind{}, fmt.Errorf("Object %v has ambigious GVK", obj)
}
return gvks[0], nil
}
// GetDeploymentLastAppliedPodTemplate set last applied pod template from Deployment's annotation
func GetDeploymentLastAppliedPodTemplate(dep *appsv1.Deployment) (*corev1.PodSpec, error) {
applied, ok := dep.Annotations[LastAppliedPodTemplate]
if !ok {
return nil, fmt.Errorf("deployment:[%s/%s] not found spec's apply config", dep.GetNamespace(), dep.GetName())
}
podSpec := &corev1.PodSpec{}
err := json.Unmarshal([]byte(applied), podSpec)
if err != nil {
return nil, err
}
return podSpec, nil
}
// DeploymentPodSpecChanged checks whether the new deployment differs with the old one's last-applied-config
func DeploymentPodSpecChanged(newDep *appsv1.Deployment, oldDep *appsv1.Deployment) bool {
lastAppliedPodTemplate, err := GetDeploymentLastAppliedPodTemplate(oldDep)
if err != nil {
klog.Warningf("error get last-applied-config of deployment %s/%s: %v", oldDep.Namespace, oldDep.Name, err)
return true
}
return !apiequality.Semantic.DeepEqual(newDep.Spec.Template.Spec, lastAppliedPodTemplate)
}
// ServiceEqual compares the new Service's spec with old Service's last applied config
func ServiceEqual(newSvc, oldSvc *corev1.Service) (bool, error) {
oldSpec := corev1.ServiceSpec{}
if lastAppliedConfig, ok := oldSvc.Annotations[LastAppliedConfigAnnotation]; ok {
err := json.Unmarshal([]byte(lastAppliedConfig), &oldSpec)
if err != nil {
klog.Errorf("unmarshal ServiceSpec: [%s/%s]'s applied config failed,error: %v", oldSvc.GetNamespace(), oldSvc.GetName(), err)
return false, err
}
return apiequality.Semantic.DeepEqual(oldSpec, newSvc.Spec), nil
}
return false, nil
}
func IngressEqual(newIngress, oldIngres *extensionsv1beta1.Ingress) (bool, error) {
oldIngressSpec := extensionsv1beta1.IngressSpec{}
if lastAppliedConfig, ok := oldIngres.Annotations[LastAppliedConfigAnnotation]; ok {
err := json.Unmarshal([]byte(lastAppliedConfig), &oldIngressSpec)
if err != nil {
klog.Errorf("unmarshal IngressSpec: [%s/%s]'s applied config failed,error: %v", oldIngres.GetNamespace(), oldIngres.GetName(), err)
return false, err
}
return apiequality.Semantic.DeepEqual(oldIngressSpec, newIngress.Spec), nil
}
return false, nil
}

View File

@ -0,0 +1,102 @@
package controllers
import (
"context"
"k8s.io/apimachinery/pkg/runtime"
appsv1 "k8s.io/api/apps/v1"
ctrl "sigs.k8s.io/controller-runtime"
"sigs.k8s.io/controller-runtime/pkg/controller/controllerutil"
seaweedv1 "github.com/seaweedfs/seaweedfs-operator/api/v1"
label "github.com/seaweedfs/seaweedfs-operator/controllers/label"
)
func (r *SeaweedReconciler) ensureVolumeServers(seaweedCR *seaweedv1.Seaweed) (done bool, result ctrl.Result, err error) {
_ = context.Background()
_ = r.Log.WithValues("seaweed", seaweedCR.Name)
if done, result, err = r.ensureVolumeServerPeerService(seaweedCR); done {
return
}
if done, result, err = r.ensureVolumeServerServices(seaweedCR); done {
return
}
if done, result, err = r.ensureVolumeServerStatefulSet(seaweedCR); done {
return
}
return
}
func (r *SeaweedReconciler) ensureVolumeServerStatefulSet(seaweedCR *seaweedv1.Seaweed) (bool, ctrl.Result, error) {
log := r.Log.WithValues("sw-volume-statefulset", seaweedCR.Name)
volumeServerStatefulSet := r.createVolumeServerStatefulSet(seaweedCR)
if err := controllerutil.SetControllerReference(seaweedCR, volumeServerStatefulSet, r.Scheme); err != nil {
return ReconcileResult(err)
}
_, err := r.CreateOrUpdate(volumeServerStatefulSet, func(existing, desired runtime.Object) error {
existingStatefulSet := existing.(*appsv1.StatefulSet)
desiredStatefulSet := desired.(*appsv1.StatefulSet)
existingStatefulSet.Spec.Replicas = desiredStatefulSet.Spec.Replicas
existingStatefulSet.Spec.Template.Spec = desiredStatefulSet.Spec.Template.Spec
return nil
})
log.Info("ensure volume stateful set " + volumeServerStatefulSet.Name)
return ReconcileResult(err)
}
func (r *SeaweedReconciler) ensureVolumeServerPeerService(seaweedCR *seaweedv1.Seaweed) (bool, ctrl.Result, error) {
log := r.Log.WithValues("sw-volume-peer-service", seaweedCR.Name)
volumeServerPeerService := r.createVolumeServerPeerService(seaweedCR)
if err := controllerutil.SetControllerReference(seaweedCR, volumeServerPeerService, r.Scheme); err != nil {
return ReconcileResult(err)
}
_, err := r.CreateOrUpdateService(volumeServerPeerService)
log.Info("ensure volume peer service " + volumeServerPeerService.Name)
return ReconcileResult(err)
}
func (r *SeaweedReconciler) ensureVolumeServerServices(seaweedCR *seaweedv1.Seaweed) (bool, ctrl.Result, error) {
for i := 0; i < int(seaweedCR.Spec.Volume.Replicas); i++ {
done, result, err := r.ensureVolumeServerService(seaweedCR, i)
if done {
return done, result, err
}
}
return ReconcileResult(nil)
}
func (r *SeaweedReconciler) ensureVolumeServerService(seaweedCR *seaweedv1.Seaweed, i int) (bool, ctrl.Result, error) {
log := r.Log.WithValues("sw-volume-service", seaweedCR.Name, "index", i)
volumeServerService := r.createVolumeServerService(seaweedCR, i)
if err := controllerutil.SetControllerReference(seaweedCR, volumeServerService, r.Scheme); err != nil {
return ReconcileResult(err)
}
_, err := r.CreateOrUpdateService(volumeServerService)
log.Info("ensure volume service "+volumeServerService.Name, "index", i)
return ReconcileResult(err)
}
func labelsForVolumeServer(name string) map[string]string {
return map[string]string{
label.ManagedByLabelKey: "seaweedfs-operator",
label.NameLabelKey: "seaweedfs",
label.ComponentLabelKey: "volume",
label.InstanceLabelKey: name,
}
}

View File

@ -0,0 +1,100 @@
package controllers
import (
"fmt"
"github.com/seaweedfs/seaweedfs-operator/controllers/label"
corev1 "k8s.io/api/core/v1"
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
"k8s.io/apimachinery/pkg/util/intstr"
seaweedv1 "github.com/seaweedfs/seaweedfs-operator/api/v1"
)
func (r *SeaweedReconciler) createVolumeServerPeerService(m *seaweedv1.Seaweed) *corev1.Service {
labels := labelsForVolumeServer(m.Name)
dep := &corev1.Service{
ObjectMeta: metav1.ObjectMeta{
Name: m.Name + "-volume-peer",
Namespace: m.Namespace,
Labels: labels,
Annotations: map[string]string{
"service.alpha.kubernetes.io/tolerate-unready-endpoints": "true",
},
},
Spec: corev1.ServiceSpec{
ClusterIP: "None",
PublishNotReadyAddresses: true,
Ports: []corev1.ServicePort{
{
Name: "volume-http",
Protocol: corev1.Protocol("TCP"),
Port: seaweedv1.VolumeHTTPPort,
TargetPort: intstr.FromInt(seaweedv1.VolumeHTTPPort),
},
{
Name: "volume-grpc",
Protocol: corev1.Protocol("TCP"),
Port: seaweedv1.VolumeGRPCPort,
TargetPort: intstr.FromInt(seaweedv1.VolumeGRPCPort),
},
},
Selector: labels,
},
}
return dep
}
func (r *SeaweedReconciler) createVolumeServerService(m *seaweedv1.Seaweed, i int) *corev1.Service {
labels := labelsForVolumeServer(m.Name)
serviceName := fmt.Sprintf("%s-volume-%d", m.Name, i)
labels[label.PodName] = serviceName
dep := &corev1.Service{
ObjectMeta: metav1.ObjectMeta{
Name: serviceName,
Namespace: m.Namespace,
Labels: labels,
Annotations: map[string]string{
"service.alpha.kubernetes.io/tolerate-unready-endpoints": "true",
},
},
Spec: corev1.ServiceSpec{
PublishNotReadyAddresses: true,
Ports: []corev1.ServicePort{
{
Name: "volume-http",
Protocol: corev1.Protocol("TCP"),
Port: seaweedv1.VolumeHTTPPort,
TargetPort: intstr.FromInt(seaweedv1.VolumeHTTPPort),
},
{
Name: "volume-grpc",
Protocol: corev1.Protocol("TCP"),
Port: seaweedv1.VolumeGRPCPort,
TargetPort: intstr.FromInt(seaweedv1.VolumeGRPCPort),
},
},
Selector: labels,
},
}
if m.Spec.Volume.Service != nil {
svcSpec := m.Spec.Volume.Service
dep.Annotations = copyAnnotations(svcSpec.Annotations)
if svcSpec.Type != "" {
dep.Spec.Type = svcSpec.Type
}
if svcSpec.ClusterIP != nil {
dep.Spec.ClusterIP = *svcSpec.ClusterIP
}
if svcSpec.LoadBalancerIP != nil {
dep.Spec.LoadBalancerIP = *svcSpec.LoadBalancerIP
}
}
return dep
}

View File

@ -0,0 +1,159 @@
package controllers
import (
"fmt"
"strings"
appsv1 "k8s.io/api/apps/v1"
corev1 "k8s.io/api/core/v1"
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
"k8s.io/apimachinery/pkg/util/intstr"
seaweedv1 "github.com/seaweedfs/seaweedfs-operator/api/v1"
)
func buildVolumeServerStartupScript(m *seaweedv1.Seaweed, dirs []string) string {
commands := []string{"weed", "-logtostderr=true", "volume"}
commands = append(commands, fmt.Sprintf("-port=%d", seaweedv1.VolumeHTTPPort))
commands = append(commands, "-max=0")
commands = append(commands, fmt.Sprintf("-ip=$(POD_NAME).%s-volume-peer.%s", m.Name, m.Namespace))
if m.Spec.HostSuffix != nil && *m.Spec.HostSuffix != "" {
commands = append(commands, fmt.Sprintf("-publicUrl=$(POD_NAME).%s", *m.Spec.HostSuffix))
}
commands = append(commands, fmt.Sprintf("-mserver=%s", getMasterPeersString(m)))
commands = append(commands, fmt.Sprintf("-dir=%s", strings.Join(dirs, ",")))
return strings.Join(commands, " ")
}
func (r *SeaweedReconciler) createVolumeServerStatefulSet(m *seaweedv1.Seaweed) *appsv1.StatefulSet {
labels := labelsForVolumeServer(m.Name)
replicas := int32(m.Spec.Volume.Replicas)
rollingUpdatePartition := int32(0)
enableServiceLinks := false
volumeCount := int(m.Spec.VolumeServerDiskCount)
volumeRequests := corev1.ResourceList{
corev1.ResourceStorage: m.Spec.Volume.Requests[corev1.ResourceStorage],
}
// connect all the disks
var volumeMounts []corev1.VolumeMount
var volumes []corev1.Volume
var persistentVolumeClaims []corev1.PersistentVolumeClaim
var dirs []string
for i := 0; i < volumeCount; i++ {
volumeMounts = append(volumeMounts, corev1.VolumeMount{
Name: fmt.Sprintf("mount%d", i),
ReadOnly: false,
MountPath: fmt.Sprintf("/data%d/", i),
})
volumes = append(volumes, corev1.Volume{
Name: fmt.Sprintf("mount%d", i),
VolumeSource: corev1.VolumeSource{
PersistentVolumeClaim: &corev1.PersistentVolumeClaimVolumeSource{
ClaimName: fmt.Sprintf("mount%d", i),
ReadOnly: false,
},
},
})
persistentVolumeClaims = append(persistentVolumeClaims, corev1.PersistentVolumeClaim{
ObjectMeta: metav1.ObjectMeta{
Name: fmt.Sprintf("mount%d", i),
},
Spec: corev1.PersistentVolumeClaimSpec{
StorageClassName: m.Spec.Volume.StorageClassName,
AccessModes: []corev1.PersistentVolumeAccessMode{
corev1.ReadWriteOnce,
},
Resources: corev1.ResourceRequirements{
Requests: volumeRequests,
},
},
})
dirs = append(dirs, fmt.Sprintf("/data%d", i))
}
volumePodSpec := m.BaseVolumeSpec().BuildPodSpec()
volumePodSpec.EnableServiceLinks = &enableServiceLinks
volumePodSpec.Containers = []corev1.Container{{
Name: "volume",
Image: m.Spec.Image,
ImagePullPolicy: m.BaseVolumeSpec().ImagePullPolicy(),
Env: append(m.BaseVolumeSpec().Env(), kubernetesEnvVars...),
Command: []string{
"/bin/sh",
"-ec",
buildVolumeServerStartupScript(m, dirs),
},
Ports: []corev1.ContainerPort{
{
ContainerPort: seaweedv1.VolumeHTTPPort,
Name: "volume-http",
},
{
ContainerPort: seaweedv1.VolumeGRPCPort,
Name: "volume-grpc",
},
},
ReadinessProbe: &corev1.Probe{
Handler: corev1.Handler{
HTTPGet: &corev1.HTTPGetAction{
Path: "/status",
Port: intstr.FromInt(seaweedv1.VolumeHTTPPort),
Scheme: corev1.URISchemeHTTP,
},
},
InitialDelaySeconds: 15,
TimeoutSeconds: 5,
PeriodSeconds: 90,
SuccessThreshold: 1,
FailureThreshold: 100,
},
LivenessProbe: &corev1.Probe{
Handler: corev1.Handler{
HTTPGet: &corev1.HTTPGetAction{
Path: "/status",
Port: intstr.FromInt(seaweedv1.VolumeHTTPPort),
Scheme: corev1.URISchemeHTTP,
},
},
InitialDelaySeconds: 20,
TimeoutSeconds: 5,
PeriodSeconds: 90,
SuccessThreshold: 1,
FailureThreshold: 6,
},
VolumeMounts: volumeMounts,
}}
volumePodSpec.Volumes = volumes
dep := &appsv1.StatefulSet{
ObjectMeta: metav1.ObjectMeta{
Name: m.Name + "-volume",
Namespace: m.Namespace,
},
Spec: appsv1.StatefulSetSpec{
ServiceName: m.Name + "-volume-peer",
PodManagementPolicy: appsv1.ParallelPodManagement,
Replicas: &replicas,
UpdateStrategy: appsv1.StatefulSetUpdateStrategy{
Type: appsv1.RollingUpdateStatefulSetStrategyType,
RollingUpdate: &appsv1.RollingUpdateStatefulSetStrategy{
Partition: &rollingUpdatePartition,
},
},
Selector: &metav1.LabelSelector{
MatchLabels: labels,
},
Template: corev1.PodTemplateSpec{
ObjectMeta: metav1.ObjectMeta{
Labels: labels,
},
Spec: volumePodSpec,
},
VolumeClaimTemplates: persistentVolumeClaims,
},
}
return dep
}

73
controllers/helper.go Normal file
View File

@ -0,0 +1,73 @@
package controllers
import (
"fmt"
"strings"
seaweedv1 "github.com/seaweedfs/seaweedfs-operator/api/v1"
corev1 "k8s.io/api/core/v1"
ctrl "sigs.k8s.io/controller-runtime"
)
const (
masterPeerAddressPattern = "%s-master-%d.%s-master-peer.%s:9333"
)
var (
kubernetesEnvVars = []corev1.EnvVar{
{
Name: "POD_IP",
ValueFrom: &corev1.EnvVarSource{
FieldRef: &corev1.ObjectFieldSelector{
FieldPath: "status.podIP",
},
},
},
{
Name: "POD_NAME",
ValueFrom: &corev1.EnvVarSource{
FieldRef: &corev1.ObjectFieldSelector{
FieldPath: "metadata.name",
},
},
},
{
Name: "NAMESPACE",
ValueFrom: &corev1.EnvVarSource{
FieldRef: &corev1.ObjectFieldSelector{
FieldPath: "metadata.namespace",
},
},
},
}
)
func ReconcileResult(err error) (bool, ctrl.Result, error) {
if err != nil {
return true, ctrl.Result{}, err
}
return false, ctrl.Result{}, nil
}
func getMasterAddresses(namespace string, name string, replicas int32) []string {
peersAddresses := make([]string, 0, replicas)
for i := int32(0); i < replicas; i++ {
peersAddresses = append(peersAddresses, fmt.Sprintf(masterPeerAddressPattern, name, i, name, namespace))
}
return peersAddresses
}
func getMasterPeersString(m *seaweedv1.Seaweed) string {
return strings.Join(getMasterAddresses(m.Namespace, m.Name, m.Spec.Master.Replicas), ",")
}
func copyAnnotations(src map[string]string) map[string]string {
if src == nil {
return nil
}
dst := map[string]string{}
for k, v := range src {
dst[k] = v
}
return dst
}

View File

@ -0,0 +1,22 @@
package label
const (
// The following labels are recommended by kubernetes https://kubernetes.io/docs/concepts/overview/working-with-objects/common-labels/
// ManagedByLabelKey is Kubernetes recommended label key, it represents the tool being used to manage the operation of an application
// For resources managed by SeaweedFS Operator, its value is always seaweedfs-operator
ManagedByLabelKey string = "app.kubernetes.io/managed-by"
// ComponentLabelKey is Kubernetes recommended label key, it represents the component within the architecture
ComponentLabelKey string = "app.kubernetes.io/component"
// NameLabelKey is Kubernetes recommended label key, it represents the name of the application
NameLabelKey string = "app.kubernetes.io/name"
// InstanceLabelKey is Kubernetes recommended label key, it represents a unique name identifying the instance of an application
// It's set by helm when installing a release
InstanceLabelKey string = "app.kubernetes.io/instance"
// VersionLabelKey is Kubernetes recommended label key, it represents the version of the app
VersionLabelKey string = "app.kubernetes.io/version"
// PodName is to select pod by name
// https://kubernetes.io/docs/concepts/workloads/controllers/statefulset/#pod-selector
PodName string = "statefulset.kubernetes.io/pod-name"
)

View File

@ -1,181 +0,0 @@
/*
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package controllers
import (
appsv1 "k8s.io/api/apps/v1"
corev1 "k8s.io/api/core/v1"
"context"
"k8s.io/apimachinery/pkg/api/errors"
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
"k8s.io/apimachinery/pkg/types"
"reflect"
"github.com/go-logr/logr"
"k8s.io/apimachinery/pkg/runtime"
ctrl "sigs.k8s.io/controller-runtime"
"sigs.k8s.io/controller-runtime/pkg/client"
seaweedv1 "github.com/seaweedfs/seaweedfs-operator/api/v1"
)
// MasterReconciler reconciles a Master object
type MasterReconciler struct {
client.Client
Log logr.Logger
Scheme *runtime.Scheme
}
// +kubebuilder:rbac:groups=seaweed.seaweedfs.com,resources=masters,verbs=get;list;watch;create;update;patch;delete
// +kubebuilder:rbac:groups=seaweed.seaweedfs.com,resources=masters/status,verbs=get;update;patch
func (r *MasterReconciler) Reconcile(req ctrl.Request) (ctrl.Result, error) {
ctx := context.Background()
log := r.Log.WithValues("master", req.NamespacedName)
// Fetch the Master instance
master := &seaweedv1.Master{}
err := r.Get(ctx, req.NamespacedName, master)
if err != nil {
if errors.IsNotFound(err) {
// Request object not found, could have been deleted after reconcile request.
// Owned objects are automatically garbage collected. For additional cleanup logic use finalizers.
// Return and don't requeue
log.Info("Master resource not found. Ignoring since object must be deleted")
return ctrl.Result{}, nil
}
// Error reading the object - requeue the request.
log.Error(err, "Failed to get Master")
return ctrl.Result{}, err
}
// Check if the deployment already exists, if not create a new one
found := &appsv1.Deployment{}
err = r.Get(ctx, types.NamespacedName{Name: master.Name, Namespace: master.Namespace}, found)
if err != nil && errors.IsNotFound(err) {
// Define a new deployment
dep := r.deploymentForMaster(master)
log.Info("Creating a new Deployment", "Deployment.Namespace", dep.Namespace, "Deployment.Name", dep.Name)
err = r.Create(ctx, dep)
if err != nil {
log.Error(err, "Failed to create new Deployment", "Deployment.Namespace", dep.Namespace, "Deployment.Name", dep.Name)
return ctrl.Result{}, err
}
// Deployment created successfully - return and requeue
return ctrl.Result{Requeue: true}, nil
} else if err != nil {
log.Error(err, "Failed to get Deployment")
return ctrl.Result{}, err
}
// Ensure the deployment size is the same as the spec
size := master.Spec.Size
if *found.Spec.Replicas != size {
found.Spec.Replicas = &size
err = r.Update(ctx, found)
if err != nil {
log.Error(err, "Failed to update Deployment", "Deployment.Namespace", found.Namespace, "Deployment.Name", found.Name)
return ctrl.Result{}, err
}
// Spec updated - return and requeue
return ctrl.Result{Requeue: true}, nil
}
// Update the Master status with the pod names
// List the pods for this master's deployment
podList := &corev1.PodList{}
listOpts := []client.ListOption{
client.InNamespace(master.Namespace),
client.MatchingLabels(labelsForMaster(master.Name)),
}
if err = r.List(ctx, podList, listOpts...); err != nil {
log.Error(err, "Failed to list pods", "Master.Namespace", master.Namespace, "Master.Name", master.Name)
return ctrl.Result{}, err
}
podNames := getPodNames(podList.Items)
// Update status.Nodes if needed
if !reflect.DeepEqual(podNames, master.Status.Nodes) {
master.Status.Nodes = podNames
err := r.Status().Update(ctx, master)
if err != nil {
log.Error(err, "Failed to update Master status")
return ctrl.Result{}, err
}
}
return ctrl.Result{}, nil
}
// deploymentForMaster returns a master Deployment object
func (r *MasterReconciler) deploymentForMaster(m *seaweedv1.Master) *appsv1.Deployment {
ls := labelsForMaster(m.Name)
replicas := m.Spec.Size
dep := &appsv1.Deployment{
ObjectMeta: metav1.ObjectMeta{
Name: m.Name,
Namespace: m.Namespace,
},
Spec: appsv1.DeploymentSpec{
Replicas: &replicas,
Selector: &metav1.LabelSelector{
MatchLabels: ls,
},
Template: corev1.PodTemplateSpec{
ObjectMeta: metav1.ObjectMeta{
Labels: ls,
},
Spec: corev1.PodSpec{
Containers: []corev1.Container{{
Image: "Master:1.4.36-alpine",
Name: "Master",
Command: []string{"Master", "-m=64", "-o", "modern", "-v"},
Ports: []corev1.ContainerPort{{
ContainerPort: 11211,
Name: "Master",
}},
}},
},
},
},
}
// Set Master instance as the owner and controller
ctrl.SetControllerReference(m, dep, r.Scheme)
return dep
}
// labelsForMaster returns the labels for selecting the resources
// belonging to the given Master CR name.
func labelsForMaster(name string) map[string]string {
return map[string]string{"app": "Master", "Master_cr": name}
}
// getPodNames returns the pod names of the array of pods passed in
func getPodNames(pods []corev1.Pod) []string {
var podNames []string
for _, pod := range pods {
podNames = append(podNames, pod.Name)
}
return podNames
}
func (r *MasterReconciler) SetupWithManager(mgr ctrl.Manager) error {
return ctrl.NewControllerManagedBy(mgr).
For(&seaweedv1.Master{}).
Complete(r)
}

View File

@ -0,0 +1,109 @@
/*
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package controllers
import (
"context"
"time"
"github.com/go-logr/logr"
"k8s.io/apimachinery/pkg/api/errors"
"k8s.io/apimachinery/pkg/runtime"
ctrl "sigs.k8s.io/controller-runtime"
"sigs.k8s.io/controller-runtime/pkg/client"
seaweedv1 "github.com/seaweedfs/seaweedfs-operator/api/v1"
)
// SeaweedReconciler reconciles a Seaweed object
type SeaweedReconciler struct {
client.Client
Log logr.Logger
Scheme *runtime.Scheme
}
// +kubebuilder:rbac:groups=seaweed.seaweedfs.com,resources=seaweeds,verbs=get;list;watch;create;update;patch;delete
// +kubebuilder:rbac:groups=seaweed.seaweedfs.com,resources=seaweeds/status,verbs=get;update;patch
// +kubebuilder:rbac:groups=apps,resources=statefulsets,verbs=get;list;watch;create;update;patch;delete
// +kubebuilder:rbac:groups=core,resources=services,verbs=get;list;watch;create;update;patch;delete
// +kubebuilder:rbac:groups=core,resources=configmaps,verbs=get;list;watch;create;update;patch;delete
// +kubebuilder:rbac:groups=extensions,resources=ingresses,verbs=get;list;watch;create;update;patch;delete
// +kubebuilder:rbac:groups=networking.k8s.io,resources=ingresses,verbs=get;list;watch;create;update;patch;delete
// +kubebuilder:rbac:groups=core,resources=pods,verbs=get;list;
// Reconcile implements the reconcilation logic
func (r *SeaweedReconciler) Reconcile(req ctrl.Request) (ctrl.Result, error) {
ctx := context.Background()
log := r.Log.WithValues("seaweed", req.NamespacedName)
log.Info("start Reconcile ...")
seaweedCR, done, result, err := r.findSeaweedCustomResourceInstance(ctx, log, req)
if done {
return result, err
}
if done, result, err = r.ensureMaster(seaweedCR); done {
return result, err
}
if done, result, err = r.ensureVolumeServers(seaweedCR); done {
return result, err
}
if done, result, err = r.ensureFilerServers(seaweedCR); done {
return result, err
}
if done, result, err = r.ensureSeaweedIngress(seaweedCR); done {
return result, err
}
if false {
if done, result, err = r.maintenance(seaweedCR); done {
return result, err
}
}
return ctrl.Result{RequeueAfter: 5 * time.Second}, nil
}
func (r *SeaweedReconciler) findSeaweedCustomResourceInstance(ctx context.Context, log logr.Logger, req ctrl.Request) (*seaweedv1.Seaweed, bool, ctrl.Result, error) {
// fetch the master instance
seaweedCR := &seaweedv1.Seaweed{}
err := r.Get(ctx, req.NamespacedName, seaweedCR)
if err != nil {
if errors.IsNotFound(err) {
// Request object not found, could have been deleted after reconcile request.
// Owned objects are automatically garbage collected. For additional cleanup logic use finalizers.
// Return and don't requeue
log.Info("Seaweed CR not found. Ignoring since object must be deleted")
return nil, true, ctrl.Result{RequeueAfter: time.Second * 5}, nil
}
// Error reading the object - requeue the request.
log.Error(err, "Failed to get SeaweedCR")
return nil, true, ctrl.Result{}, err
}
log.Info("Get master " + seaweedCR.Name)
return seaweedCR, false, ctrl.Result{}, nil
}
func (r *SeaweedReconciler) SetupWithManager(mgr ctrl.Manager) error {
return ctrl.NewControllerManagedBy(mgr).
For(&seaweedv1.Seaweed{}).
Complete(r)
}

View File

@ -0,0 +1,94 @@
package controllers
import (
"context"
"time"
. "github.com/onsi/ginkgo"
. "github.com/onsi/gomega"
appsv1 "k8s.io/api/apps/v1"
corev1 "k8s.io/api/core/v1"
"k8s.io/apimachinery/pkg/api/resource"
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
"k8s.io/apimachinery/pkg/types"
seaweedv1 "github.com/seaweedfs/seaweedfs-operator/api/v1"
)
var (
TrueValue = true
FalseVallue = false
)
var _ = Describe("Seaweed Controller", func() {
Context("Basic Functionality", func() {
It("Should create StatefulSets", func() {
By("By creating a new Seaweed", func() {
const (
namespace = "default"
name = "test-seaweed"
timeout = time.Second * 30
interval = time.Millisecond * 250
)
ctx := context.Background()
seaweed := &seaweedv1.Seaweed{
ObjectMeta: metav1.ObjectMeta{
Namespace: namespace,
Name: name,
},
Spec: seaweedv1.SeaweedSpec{
Image: "chrislusf/seaweedfs:2.96",
VolumeServerDiskCount: 1,
Master: &seaweedv1.MasterSpec{
Replicas: 3,
ConcurrentStart: &TrueValue,
},
Volume: &seaweedv1.VolumeSpec{
Replicas: 1,
ResourceRequirements: corev1.ResourceRequirements{
Requests: corev1.ResourceList{
corev1.ResourceStorage: resource.MustParse("1Gi"),
},
},
},
Filer: &seaweedv1.FilerSpec{
Replicas: 2,
},
},
}
Expect(k8sClient.Create(ctx, seaweed)).Should(Succeed())
masterKey := types.NamespacedName{Name: name + "-master", Namespace: namespace}
volumeKey := types.NamespacedName{Name: name + "-volume", Namespace: namespace}
filerKey := types.NamespacedName{Name: name + "-filer", Namespace: namespace}
masterSts := &appsv1.StatefulSet{}
volumeSts := &appsv1.StatefulSet{}
filerSts := &appsv1.StatefulSet{}
Eventually(func() bool {
err := k8sClient.Get(ctx, masterKey, masterSts)
return err == nil
}, timeout, interval).Should(BeTrue())
Expect(masterSts.Spec.Replicas).ShouldNot(BeNil())
Expect(*masterSts.Spec.Replicas).Should(Equal(seaweed.Spec.Master.Replicas))
Eventually(func() bool {
err := k8sClient.Get(ctx, volumeKey, volumeSts)
return err == nil
}, timeout, interval).Should(BeTrue())
Expect(volumeSts.Spec.Replicas).ShouldNot(BeNil())
Expect(*volumeSts.Spec.Replicas).Should(Equal(seaweed.Spec.Volume.Replicas))
Eventually(func() bool {
err := k8sClient.Get(ctx, filerKey, filerSts)
return err == nil
}, timeout, interval).Should(BeTrue())
Expect(filerSts.Spec.Replicas).ShouldNot(BeNil())
Expect(*filerSts.Spec.Replicas).Should(Equal(seaweed.Spec.Filer.Replicas))
})
})
})
})

View File

@ -0,0 +1,37 @@
package controllers
import (
"io/ioutil"
"os"
seaweedv1 "github.com/seaweedfs/seaweedfs-operator/api/v1"
"github.com/seaweedfs/seaweedfs-operator/controllers/swadmin"
ctrl "sigs.k8s.io/controller-runtime"
)
func (r *SeaweedReconciler) maintenance(m *seaweedv1.Seaweed) (done bool, result ctrl.Result, err error) {
masters := getMasterPeersString(m)
r.Log.V(0).Info("wait to connect to masters", "masters", masters)
// this step blocks since the operator can not access the masters when running from outside of the k8s cluster
sa := swadmin.NewSeaweedAdmin(masters, ioutil.Discard)
// For now this is an example of the admin commands
// master by default has some maintenance commands already.
r.Log.V(0).Info("volume.list")
sa.Output = os.Stdout
if err := sa.ProcessCommand("volume.list"); err != nil {
r.Log.V(0).Info("volume.list", "error", err)
}
sa.ProcessCommand("lock")
if err := sa.ProcessCommand("volume.balance -force"); err != nil {
r.Log.V(0).Info("volume.balance", "error", err)
}
sa.ProcessCommand("unlock")
return ReconcileResult(nil)
}

View File

@ -24,6 +24,7 @@ import (
. "github.com/onsi/gomega"
"k8s.io/client-go/kubernetes/scheme"
"k8s.io/client-go/rest"
ctrl "sigs.k8s.io/controller-runtime"
"sigs.k8s.io/controller-runtime/pkg/client"
"sigs.k8s.io/controller-runtime/pkg/envtest"
"sigs.k8s.io/controller-runtime/pkg/envtest/printer"
@ -68,6 +69,23 @@ var _ = BeforeSuite(func(done Done) {
// +kubebuilder:scaffold:scheme
k8sClient, err = client.New(cfg, client.Options{Scheme: scheme.Scheme})
k8sManager, err := ctrl.NewManager(cfg, ctrl.Options{
Scheme: scheme.Scheme,
})
Expect(err).ToNot(HaveOccurred())
err = (&SeaweedReconciler{
Client: k8sManager.GetClient(),
Log: ctrl.Log.WithName("controllers").WithName("Seaweed"),
Scheme: k8sManager.GetScheme(),
}).SetupWithManager(k8sManager)
Expect(err).ToNot(HaveOccurred())
go func() {
err = k8sManager.Start(ctrl.SetupSignalHandler())
Expect(err).ToNot(HaveOccurred())
}()
Expect(err).ToNot(HaveOccurred())
Expect(k8sClient).ToNot(BeNil())

View File

@ -0,0 +1,67 @@
package swadmin
import (
"fmt"
"io"
"regexp"
"strings"
"github.com/chrislusf/seaweedfs/weed/shell"
"google.golang.org/grpc"
)
type SeaweedAdmin struct {
commandReg *regexp.Regexp
commandEnv *shell.CommandEnv
Output io.Writer
}
func NewSeaweedAdmin(masters string, output io.Writer) *SeaweedAdmin {
var shellOptions shell.ShellOptions
shellOptions.GrpcDialOption = grpc.WithInsecure()
shellOptions.Masters = &masters
commandEnv := shell.NewCommandEnv(shellOptions)
reg, _ := regexp.Compile(`'.*?'|".*?"|\S+`)
go commandEnv.MasterClient.LoopConnectToMaster()
return &SeaweedAdmin{
commandEnv: commandEnv,
commandReg: reg,
Output: output,
}
}
// ProcessCommands cmds can be semi-colon separated commands
func (sa *SeaweedAdmin) ProcessCommands(cmds string) error {
for _, c := range strings.Split(cmds, ";") {
if err := sa.ProcessCommand(c); err != nil {
return err
}
}
return nil
}
func (sa *SeaweedAdmin) ProcessCommand(cmd string) error {
sa.commandEnv.MasterClient.WaitUntilConnected()
cmds := sa.commandReg.FindAllString(cmd, -1)
if len(cmds) == 0 {
return nil
}
args := make([]string, len(cmds[1:]))
for i := range args {
args[i] = strings.Trim(string(cmds[1+i]), "\"'")
}
for _, c := range shell.Commands {
if c.Name() == cmds[0] || c.Name() == "fs."+cmds[0] {
return c.Do(args, sa.commandEnv, sa.Output)
}
}
return fmt.Errorf("unknown command: %v", cmd)
}

95
go.mod
View File

@ -1,12 +1,101 @@
module github.com/seaweedfs/seaweedfs-operator
go 1.13
go 1.17
require (
github.com/chrislusf/seaweedfs v0.0.0-20211103083639-3c245c69d369
github.com/go-logr/logr v0.1.0
github.com/onsi/ginkgo v1.11.0
github.com/onsi/gomega v1.8.1
github.com/onsi/ginkgo v1.14.2
github.com/onsi/gomega v1.10.4
google.golang.org/grpc v1.40.0
k8s.io/api v0.18.2
k8s.io/apimachinery v0.18.2
k8s.io/client-go v0.18.2
k8s.io/klog v1.0.0
sigs.k8s.io/controller-runtime v0.6.0
)
require (
cloud.google.com/go v0.94.1 // indirect
github.com/aws/aws-sdk-go v1.35.3 // indirect
github.com/beorn7/perks v1.0.1 // indirect
github.com/cespare/xxhash/v2 v2.1.1 // indirect
github.com/davecgh/go-spew v1.1.1 // indirect
github.com/disintegration/imaging v1.6.2 // indirect
github.com/evanphx/json-patch v4.5.0+incompatible // indirect
github.com/fsnotify/fsnotify v1.4.9 // indirect
github.com/go-errors/errors v1.1.1 // indirect
github.com/go-logr/zapr v0.1.0 // indirect
github.com/gogo/protobuf v1.3.2 // indirect
github.com/golang-jwt/jwt v3.2.1+incompatible // indirect
github.com/golang/groupcache v0.0.0-20210331224755-41bb18bfe9da // indirect
github.com/golang/protobuf v1.5.2 // indirect
github.com/golang/snappy v0.0.4 // indirect
github.com/google/btree v1.0.0 // indirect
github.com/google/go-cmp v0.5.6 // indirect
github.com/google/gofuzz v1.1.0 // indirect
github.com/google/uuid v1.3.0 // indirect
github.com/googleapis/gax-go/v2 v2.1.0 // indirect
github.com/googleapis/gnostic v0.3.1 // indirect
github.com/grpc-ecosystem/go-grpc-middleware v1.1.0 // indirect
github.com/hashicorp/golang-lru v0.5.1 // indirect
github.com/hashicorp/hcl v1.0.0 // indirect
github.com/imdario/mergo v0.3.6 // indirect
github.com/jmespath/go-jmespath v0.4.0 // indirect
github.com/json-iterator/go v1.1.11 // indirect
github.com/karlseguin/ccache/v2 v2.0.7 // indirect
github.com/klauspost/cpuid v1.2.1 // indirect
github.com/klauspost/reedsolomon v1.9.2 // indirect
github.com/magiconair/properties v1.8.1 // indirect
github.com/mattn/go-runewidth v0.0.7 // indirect
github.com/matttproud/golang_protobuf_extensions v1.0.1 // indirect
github.com/mitchellh/mapstructure v1.1.2 // indirect
github.com/modern-go/concurrent v0.0.0-20180306012644-bacd9c7ef1dd // indirect
github.com/modern-go/reflect2 v1.0.1 // indirect
github.com/nxadm/tail v1.4.4 // indirect
github.com/pelletier/go-toml v1.7.0 // indirect
github.com/peterh/liner v1.1.0 // indirect
github.com/pkg/errors v0.9.1 // indirect
github.com/prometheus/client_golang v1.11.0 // indirect
github.com/prometheus/client_model v0.2.0 // indirect
github.com/prometheus/common v0.26.0 // indirect
github.com/prometheus/procfs v0.6.0 // indirect
github.com/seaweedfs/goexif v1.0.2 // indirect
github.com/spf13/afero v1.6.0 // indirect
github.com/spf13/cast v1.3.0 // indirect
github.com/spf13/jwalterweatherman v1.1.0 // indirect
github.com/spf13/pflag v1.0.5 // indirect
github.com/spf13/viper v1.4.0 // indirect
github.com/syndtr/goleveldb v1.0.1-0.20190318030020-c3a204f8e965 // indirect
github.com/tylertreat/BoomFilters v0.0.0-20210315201527-1a82519a3e43 // indirect
github.com/valyala/bytebufferpool v1.0.0 // indirect
github.com/viant/ptrie v0.3.0 // indirect
github.com/viant/toolbox v0.33.2 // indirect
go.uber.org/atomic v1.7.0 // indirect
go.uber.org/multierr v1.7.0 // indirect
go.uber.org/zap v1.17.0 // indirect
golang.org/x/crypto v0.0.0-20201216223049-8b5274cf687f // indirect
golang.org/x/image v0.0.0-20200119044424-58c23975cae1 // indirect
golang.org/x/net v0.0.0-20210813160813-60bc85c4be6d // indirect
golang.org/x/oauth2 v0.0.0-20210819190943-2bc19b11175f // indirect
golang.org/x/sys v0.0.0-20210908233432-aa78b53d3365 // indirect
golang.org/x/term v0.0.0-20201126162022-7de9c90e9dd1 // indirect
golang.org/x/text v0.3.6 // indirect
golang.org/x/time v0.0.0-20191024005414-555d28b269f0 // indirect
golang.org/x/xerrors v0.0.0-20200804184101-5ec99f83aff1 // indirect
gomodules.xyz/jsonpatch/v2 v2.0.1 // indirect
google.golang.org/api v0.57.0 // indirect
google.golang.org/appengine v1.6.7 // indirect
google.golang.org/genproto v0.0.0-20210921142501-181ce0d877f6 // indirect
google.golang.org/protobuf v1.27.1 // indirect
gopkg.in/check.v1 v1.0.0-20201130134442-10cb98267c6c // indirect
gopkg.in/fsnotify.v1 v1.4.7 // indirect
gopkg.in/inf.v0 v0.9.1 // indirect
gopkg.in/tomb.v1 v1.0.0-20141024135613-dd632973f1e7 // indirect
gopkg.in/yaml.v2 v2.4.0 // indirect
k8s.io/apiextensions-apiserver v0.18.2 // indirect
k8s.io/kube-openapi v0.0.0-20200121204235-bf4fb3bd569c // indirect
k8s.io/utils v0.0.0-20200324210504-a9aa75ae1b89 // indirect
sigs.k8s.io/structured-merge-diff/v3 v3.0.0 // indirect
sigs.k8s.io/yaml v1.2.0 // indirect
)

1081
go.sum

File diff suppressed because it is too large Load Diff

32
hack/verify-codegen.sh Executable file
View File

@ -0,0 +1,32 @@
#!/usr/bin/env bash
set -o errexit
set -o nounset
set -o pipefail
ROOT=$(unset CDPATH && cd $(dirname "${BASH_SOURCE[0]}")/.. && pwd)
DIFFROOT="${ROOT}/api"
TMP_DIFFROOT="${ROOT}/_tmp/api"
_tmp="${ROOT}/_tmp"
cleanup() {
rm -rf "${_tmp}"
}
trap "cleanup" EXIT SIGINT
cleanup
mkdir -p "${TMP_DIFFROOT}"
cp -a "${DIFFROOT}"/* "${TMP_DIFFROOT}"
make generate
echo "diffing ${DIFFROOT} against freshly generated codegen"
ret=0
diff -Naupr "${DIFFROOT}" "${TMP_DIFFROOT}" || ret=$?
cp -a "${TMP_DIFFROOT}"/* "${DIFFROOT}"
if [[ $ret -eq 0 ]]; then
echo "${DIFFROOT} up to date."
else
echo "${DIFFROOT} is out of date. Please run make generate"
exit 1
fi

32
hack/verify-manifests.sh Executable file
View File

@ -0,0 +1,32 @@
#!/usr/bin/env bash
set -o errexit
set -o nounset
set -o pipefail
ROOT=$(unset CDPATH && cd $(dirname "${BASH_SOURCE[0]}")/.. && pwd)
DIFFROOT="${ROOT}/config"
TMP_DIFFROOT="${ROOT}/_tmp/config"
_tmp="${ROOT}/_tmp"
cleanup() {
rm -rf "${_tmp}"
}
trap "cleanup" EXIT SIGINT
cleanup
mkdir -p "${TMP_DIFFROOT}"
cp -a "${DIFFROOT}"/* "${TMP_DIFFROOT}"
make manifests
echo "diffing ${DIFFROOT} against freshly generated manifests"
ret=0
diff -Naupr "${DIFFROOT}" "${TMP_DIFFROOT}" || ret=$?
cp -a "${TMP_DIFFROOT}"/* "${DIFFROOT}"
if [[ $ret -eq 0 ]]; then
echo "${DIFFROOT} up to date."
else
echo "${DIFFROOT} is out of date. Please run make manifests"
exit 1
fi

13
main.go
View File

@ -67,14 +67,21 @@ func main() {
os.Exit(1)
}
if err = (&controllers.MasterReconciler{
if err = (&controllers.SeaweedReconciler{
Client: mgr.GetClient(),
Log: ctrl.Log.WithName("controllers").WithName("Master"),
Log: ctrl.Log.WithName("controllers").WithName("Seaweed"),
Scheme: mgr.GetScheme(),
}).SetupWithManager(mgr); err != nil {
setupLog.Error(err, "unable to create controller", "controller", "Master")
setupLog.Error(err, "unable to create controller", "controller", "Seaweed")
os.Exit(1)
}
if os.Getenv("ENABLE_WEBHOOKS") != "false" {
if err = (&seaweedv1.Seaweed{}).SetupWebhookWithManager(mgr); err != nil {
setupLog.Error(err, "unable to create webhook", "webhook", "Seaweed")
os.Exit(1)
}
}
// +kubebuilder:scaffold:builder
setupLog.Info("starting manager")

View File

@ -4,6 +4,7 @@
<component name="NewModuleRootManager" inherit-compiler-output="true">
<exclude-output />
<content url="file://$MODULE_DIR$" />
<orderEntry type="inheritedJdk" />
<orderEntry type="sourceFolder" forTests="false" />
</component>
</module>