mirror of
https://github.com/rustfs/rustfs.git
synced 2026-01-17 17:40:38 +00:00
Compare commits
1188 Commits
1.0.0-alph
...
dev_object
| Author | SHA1 | Date | |
|---|---|---|---|
|
|
9948b1f709 | ||
|
|
4d67c1d0a6 | ||
|
|
ff264f3385 | ||
|
|
ff4da5cd47 | ||
|
|
4cc915fb45 | ||
|
|
751abeca2b | ||
|
|
5ab2ce3cfe | ||
|
|
4d26fa48d7 | ||
|
|
5c65368729 | ||
|
|
c16ce7b61b | ||
|
|
25388ba70c | ||
|
|
9cc34f9f01 | ||
|
|
02ad3d3832 | ||
|
|
1c5ba761ef | ||
|
|
7fec5b250a | ||
|
|
c861635332 | ||
|
|
f4d0a81e06 | ||
|
|
b6a5094382 | ||
|
|
9865b5aa3e | ||
|
|
5850c3e8a3 | ||
|
|
b52198d7f2 | ||
|
|
58c3a46138 | ||
|
|
bd4e7c23bb | ||
|
|
66c2a2fd93 | ||
|
|
cdd41752e0 | ||
|
|
bf407715d0 | ||
|
|
5e3c97b0e9 | ||
|
|
2f20fe9749 | ||
|
|
5e13fc3be2 | ||
|
|
1d45123968 | ||
|
|
03af4369bd | ||
|
|
88b2d893dc | ||
|
|
756e70cc93 | ||
|
|
d2a0c9fd62 | ||
|
|
d6de724517 | ||
|
|
8dfb3643ec | ||
|
|
d2857467e0 | ||
|
|
8887b7ea90 | ||
|
|
18ca25fbfd | ||
|
|
f3c1116935 | ||
|
|
2fd5ef75cf | ||
|
|
c673fa0e3d | ||
|
|
707406e4c5 | ||
|
|
8a7a6599c8 | ||
|
|
881afbfae6 | ||
|
|
e050fffcae | ||
|
|
41e378fc28 | ||
|
|
751be4bdf6 | ||
|
|
659869e8ad | ||
|
|
52da2569dd | ||
|
|
c9ff699030 | ||
|
|
3a32517f79 | ||
|
|
d5539bf22f | ||
|
|
1993a7036f | ||
|
|
0782f05388 | ||
|
|
2f697d0635 | ||
|
|
ad2c1a15b7 | ||
|
|
5ef7571211 | ||
|
|
b0174122c8 | ||
|
|
7f70ab3cd6 | ||
|
|
fefa8cb2c8 | ||
|
|
ae4c23e316 | ||
|
|
fffc79e981 | ||
|
|
4cd2c8e99c | ||
|
|
a8bd8809b0 | ||
|
|
6f7d279fce | ||
|
|
bcbd72d849 | ||
|
|
b73e4ac7e8 | ||
|
|
ea8dfa43a4 | ||
|
|
b50e88a5a0 | ||
|
|
b0447bb692 | ||
|
|
9ec22255e0 | ||
|
|
249a46bc8e | ||
|
|
920551ca92 | ||
|
|
f056b3fb44 | ||
|
|
b675e01707 | ||
|
|
1c93a5e4e0 | ||
|
|
d65c58d9de | ||
|
|
28a8977308 | ||
|
|
35cf7db9ed | ||
|
|
e1513aa00e | ||
|
|
670636b3aa | ||
|
|
2b62560f48 | ||
|
|
01a340d596 | ||
|
|
3a6e3d49b3 | ||
|
|
86a99d214c | ||
|
|
7fe325f47e | ||
|
|
671263e22c | ||
|
|
e4e923b4b2 | ||
|
|
87482e82f4 | ||
|
|
902993a133 | ||
|
|
609e55a5a2 | ||
|
|
1e3287f610 | ||
|
|
27c6030f09 | ||
|
|
1e00db816c | ||
|
|
366fd98aeb | ||
|
|
f1ef7149e3 | ||
|
|
ab88166990 | ||
|
|
34dadee8a6 | ||
|
|
021dc36f2f | ||
|
|
09b24d5d41 | ||
|
|
947ced1d92 | ||
|
|
8f01696dbe | ||
|
|
bfb4c6dfd4 | ||
|
|
992b0c2cb6 | ||
|
|
01220f88d9 | ||
|
|
42e020c0bb | ||
|
|
4e5d6ff772 | ||
|
|
ea4a225d70 | ||
|
|
d4bd8b66a9 | ||
|
|
defdcf528f | ||
|
|
69cb6ead25 | ||
|
|
0403c23492 | ||
|
|
413d581d7c | ||
|
|
1777994de7 | ||
|
|
e8efd8ef79 | ||
|
|
1380598a0c | ||
|
|
142281e96a | ||
|
|
af6bb3ae5b | ||
|
|
9734d4cfa5 | ||
|
|
9ad152c14f | ||
|
|
b40b6a17f4 | ||
|
|
eae1ccc629 | ||
|
|
295b502e2a | ||
|
|
a3214222af | ||
|
|
257e858017 | ||
|
|
9c90426032 | ||
|
|
5c7b105b4a | ||
|
|
a032c401d1 | ||
|
|
a608e0dd65 | ||
|
|
2ef98ee43a | ||
|
|
8d4e68fe5f | ||
|
|
4e5345d01f | ||
|
|
8a09a81a69 | ||
|
|
62d994c103 | ||
|
|
41373960bc | ||
|
|
bf543f9628 | ||
|
|
e24d74b1f0 | ||
|
|
f81bef28df | ||
|
|
7730e6cd3a | ||
|
|
a18f549c1f | ||
|
|
3592ffb791 | ||
|
|
ad5bb38e2b | ||
|
|
6434728aea | ||
|
|
df7e690c47 | ||
|
|
65be8145ff | ||
|
|
4ac4b35c5e | ||
|
|
ab54ff49eb | ||
|
|
a9de8e0d53 | ||
|
|
cebe84a896 | ||
|
|
96de156763 | ||
|
|
93f1b5dbf1 | ||
|
|
ff908f19e7 | ||
|
|
9b85384305 | ||
|
|
ca0bfcfc7e | ||
|
|
70a3c49b61 | ||
|
|
c43876ee46 | ||
|
|
3bd25b63c8 | ||
|
|
797d7218ee | ||
|
|
21289797c2 | ||
|
|
ff6d2fe84e | ||
|
|
f220b04074 | ||
|
|
e0c02fa5bc | ||
|
|
1a8574c96f | ||
|
|
f2c9464eb0 | ||
|
|
22d085a5c9 | ||
|
|
d0bc3dec23 | ||
|
|
0a0fb41037 | ||
|
|
845f503739 | ||
|
|
143017f925 | ||
|
|
38377f81ed | ||
|
|
f80595e048 | ||
|
|
d5ba9cdf28 | ||
|
|
a39f622402 | ||
|
|
26d4726181 | ||
|
|
0b4c050b21 | ||
|
|
cf56a0650e | ||
|
|
31adf1486d | ||
|
|
be5bdc0b20 | ||
|
|
bf1bb0823b | ||
|
|
45249ccf28 | ||
|
|
022304d046 | ||
|
|
3b2b4f08fc | ||
|
|
e9a31279bd | ||
|
|
6f50b69c5f | ||
|
|
e0bfedae75 | ||
|
|
c1355481cb | ||
|
|
7c119d6c92 | ||
|
|
eeabaa71fb | ||
|
|
2ec135c9c0 | ||
|
|
21bff0075f | ||
|
|
2a86608ee9 | ||
|
|
84ae72f71d | ||
|
|
6be4eb0322 | ||
|
|
df5e3dad27 | ||
|
|
f31d9c3f97 | ||
|
|
301a7a20c5 | ||
|
|
fc67fbfadb | ||
|
|
89709184c2 | ||
|
|
2949b7d203 | ||
|
|
4e68d72de9 | ||
|
|
02cef27230 | ||
|
|
ba5cbcb3f1 | ||
|
|
922330ded2 | ||
|
|
87c63193ae | ||
|
|
835b7fbac5 | ||
|
|
fe456be075 | ||
|
|
b492342e55 | ||
|
|
b80f64d9b4 | ||
|
|
dbbcd2a21b | ||
|
|
fc9d433038 | ||
|
|
d852f62757 | ||
|
|
b0f14e69f6 | ||
|
|
6584bf9607 | ||
|
|
84f9989752 | ||
|
|
a01897211e | ||
|
|
88159507e3 | ||
|
|
91eb469a7d | ||
|
|
e251ffe85d | ||
|
|
91cfc335ba | ||
|
|
3985330da3 | ||
|
|
d8f9161ae8 | ||
|
|
aef13a77a3 | ||
|
|
44a514246b | ||
|
|
5364779766 | ||
|
|
7eefafaad5 | ||
|
|
6e17a91919 | ||
|
|
6f2b4b34b9 | ||
|
|
dbb5625199 | ||
|
|
89785dc06b | ||
|
|
46c3134487 | ||
|
|
b1a6da8d73 | ||
|
|
dde551ef94 | ||
|
|
65f036442a | ||
|
|
2167f5e728 | ||
|
|
9fc284bef3 | ||
|
|
b57b92e382 | ||
|
|
0d28c1cbf8 | ||
|
|
e3af3d1b94 | ||
|
|
7411283fc8 | ||
|
|
c331365ebf | ||
|
|
0e9d1d63d3 | ||
|
|
4e9e63dff6 | ||
|
|
ad528935ad | ||
|
|
e634ffdd23 | ||
|
|
165660e106 | ||
|
|
14425be416 | ||
|
|
47afb1a651 | ||
|
|
95d59d7206 | ||
|
|
e2b7a9772c | ||
|
|
5ccb08e73f | ||
|
|
778ca76607 | ||
|
|
c78338df9f | ||
|
|
adf401e09a | ||
|
|
bb6324b4e5 | ||
|
|
9b6170e94f | ||
|
|
8d865eb048 | ||
|
|
8fefb8f7cb | ||
|
|
443fbeee38 | ||
|
|
31addf01dd | ||
|
|
2a6554a3c6 | ||
|
|
691a0bed5d | ||
|
|
5088f51236 | ||
|
|
aeb1beab17 | ||
|
|
d4b4ef1108 | ||
|
|
16bbe517b4 | ||
|
|
f1add9dc58 | ||
|
|
2363a3706f | ||
|
|
08e8c7258c | ||
|
|
5df2bdb063 | ||
|
|
115d9ea780 | ||
|
|
70b65343ab | ||
|
|
77c7146677 | ||
|
|
980ecb8bd3 | ||
|
|
0408f29301 | ||
|
|
1d7a97197a | ||
|
|
5152be44f0 | ||
|
|
a36730220e | ||
|
|
54069d7cc3 | ||
|
|
abf3c5a5b5 | ||
|
|
c2dd6e2a6a | ||
|
|
0cd10a9352 | ||
|
|
3cc4eb55b3 | ||
|
|
c1f781256f | ||
|
|
4fa63bec31 | ||
|
|
56ce9d2776 | ||
|
|
c16bd2fec1 | ||
|
|
e25d444cdf | ||
|
|
58b08ddbd1 | ||
|
|
015eeb0c9f | ||
|
|
5e36f7f107 | ||
|
|
662563c216 | ||
|
|
35065bc65a | ||
|
|
b9cf1765f8 | ||
|
|
15c815cae6 | ||
|
|
85368b38d3 | ||
|
|
61ea8fc988 | ||
|
|
928ccb1c80 | ||
|
|
48ec993b60 | ||
|
|
983f4f0316 | ||
|
|
b03d17bc97 | ||
|
|
cba5eb2cc3 | ||
|
|
b221458d4d | ||
|
|
de5fdb9299 | ||
|
|
fe7abc2611 | ||
|
|
344fc6fb91 | ||
|
|
5a34318947 | ||
|
|
6c363c8149 | ||
|
|
76948cae65 | ||
|
|
689d3ae03e | ||
|
|
8f0e239f3d | ||
|
|
a4a5170c1c | ||
|
|
0f87cb1e72 | ||
|
|
b7a7dcead8 | ||
|
|
1cfd459b83 | ||
|
|
8d3d11b07e | ||
|
|
e3a81df926 | ||
|
|
3cc2f25450 | ||
|
|
fd7e3cabca | ||
|
|
79be7370ed | ||
|
|
375012bd17 | ||
|
|
7c186a84c6 | ||
|
|
59a66bdbd6 | ||
|
|
4e05fab8e0 | ||
|
|
39950a4ca2 | ||
|
|
a770600fa7 | ||
|
|
b39b8838cc | ||
|
|
aca64b8c36 | ||
|
|
f50c320fda | ||
|
|
00de1d2c89 | ||
|
|
1b5a181c1c | ||
|
|
074c66fa00 | ||
|
|
a4aa197a37 | ||
|
|
25e70692ec | ||
|
|
ec2dd25f92 | ||
|
|
bded37e2be | ||
|
|
188875933e | ||
|
|
24559537ab | ||
|
|
7f4f40f3ba | ||
|
|
9c5abf6831 | ||
|
|
59c64d4458 | ||
|
|
c7394e1147 | ||
|
|
ed340c1345 | ||
|
|
2e4f444847 | ||
|
|
a4fda6d21e | ||
|
|
c615d35cd0 | ||
|
|
3a404c347c | ||
|
|
456a56cd80 | ||
|
|
587c7d41e9 | ||
|
|
6616a4862f | ||
|
|
f70b654c35 | ||
|
|
a0261b4c82 | ||
|
|
4e6f1e787b | ||
|
|
173bacdb23 | ||
|
|
27e5cafa22 | ||
|
|
1408913afe | ||
|
|
15f5962dc0 | ||
|
|
7ebbb91553 | ||
|
|
6d6a8a2d6e | ||
|
|
f4f764218d | ||
|
|
a7305de3ad | ||
|
|
38411c675d | ||
|
|
b3d43ce795 | ||
|
|
bcfcb97c8d | ||
|
|
8790e1e4db | ||
|
|
a2284af3ea | ||
|
|
b145dfd437 | ||
|
|
4d423b6510 | ||
|
|
cd238f9b5a | ||
|
|
9a6178beee | ||
|
|
5a636d3254 | ||
|
|
5a5e1f6bd1 | ||
|
|
811a9e3964 | ||
|
|
cfff691a58 | ||
|
|
b64e78246d | ||
|
|
8b7c33814f | ||
|
|
6321dd5c72 | ||
|
|
ff67a4382d | ||
|
|
bbee212b38 | ||
|
|
2f94beff93 | ||
|
|
9c062f18b6 | ||
|
|
49b68327bb | ||
|
|
8fb44cd89e | ||
|
|
07ffb83356 | ||
|
|
87790721c5 | ||
|
|
900722e800 | ||
|
|
c5d6bc20f8 | ||
|
|
6176f0b00e | ||
|
|
4dc2551e1d | ||
|
|
de4aec519b | ||
|
|
bcb9db7c59 | ||
|
|
93dcfc0cdd | ||
|
|
2f25c74b32 | ||
|
|
cebf5699a4 | ||
|
|
3d4760181d | ||
|
|
1ead472da2 | ||
|
|
4dee6d5c76 | ||
|
|
f0bf2bed74 | ||
|
|
4032e5459e | ||
|
|
1ad3a76c3c | ||
|
|
47b9d45ff8 | ||
|
|
f6b9964dcd | ||
|
|
1453a0fc5a | ||
|
|
6d344d2aff | ||
|
|
cc4a8a8e51 | ||
|
|
db1d0cf3df | ||
|
|
35a7bc310b | ||
|
|
8983db39ae | ||
|
|
ae06d87f10 | ||
|
|
d724c6132d | ||
|
|
283b2981e3 | ||
|
|
3b2df514a7 | ||
|
|
9b0f498add | ||
|
|
eedeb188f2 | ||
|
|
6727e15055 | ||
|
|
e758ef4022 | ||
|
|
4e09e0a11a | ||
|
|
693b06eefd | ||
|
|
2e4a63c4a0 | ||
|
|
1d58a07f29 | ||
|
|
53d4cb81de | ||
|
|
aaf39a4301 | ||
|
|
f50406c789 | ||
|
|
6356b3409a | ||
|
|
ec268dcb5a | ||
|
|
2ae714be31 | ||
|
|
666a3db95d | ||
|
|
895317ec80 | ||
|
|
47c4e2ac73 | ||
|
|
8413ac6be3 | ||
|
|
8aaa916b31 | ||
|
|
6a8a653f9d | ||
|
|
9866f959c1 | ||
|
|
c9f6da9732 | ||
|
|
381f5f0a53 | ||
|
|
1bed82420e | ||
|
|
f965662514 | ||
|
|
da6ec62be5 | ||
|
|
b73439bce1 | ||
|
|
23be31e1f2 | ||
|
|
eaa8ce4bf7 | ||
|
|
c0658a8223 | ||
|
|
373d97ce97 | ||
|
|
2568582271 | ||
|
|
2d285ca504 | ||
|
|
ea0393f4c0 | ||
|
|
ff18894d5d | ||
|
|
b4caff13cf | ||
|
|
c70994ee9d | ||
|
|
75fd8da685 | ||
|
|
54ca354d7e | ||
|
|
218e06a3fe | ||
|
|
a95c9b32d8 | ||
|
|
48f2fdc45d | ||
|
|
ace5857959 | ||
|
|
1022bbec1e | ||
|
|
9c19e1aa41 | ||
|
|
c338ea3c86 | ||
|
|
8cb4d72a7b | ||
|
|
2b1dcb77cc | ||
|
|
f64d6751a6 | ||
|
|
e25cd9b5f6 | ||
|
|
40f6185b24 | ||
|
|
6ee30b8bfc | ||
|
|
8284d7d92c | ||
|
|
67ae916968 | ||
|
|
990db0636a | ||
|
|
9ba6f89dc5 | ||
|
|
20e7dc919f | ||
|
|
79351da42a | ||
|
|
cb9ecd890c | ||
|
|
ca4a936c6e | ||
|
|
d7417d841f | ||
|
|
e65bee049a | ||
|
|
87d6083807 | ||
|
|
d8198cc01f | ||
|
|
1505429c5f | ||
|
|
24fe747b69 | ||
|
|
4c0fbd238b | ||
|
|
df4b285e46 | ||
|
|
1744d8f23a | ||
|
|
3695bce742 | ||
|
|
0295229e7e | ||
|
|
0122343075 | ||
|
|
475d758645 | ||
|
|
0dd21e9075 | ||
|
|
4a0bc24aa6 | ||
|
|
3d5cfab7c5 | ||
|
|
dddfb8cbc2 | ||
|
|
ec9b1262cb | ||
|
|
c844d16c2e | ||
|
|
b3890cd07d | ||
|
|
5aa8616268 | ||
|
|
784e3c82f8 | ||
|
|
984faa2e85 | ||
|
|
b562bd20bb | ||
|
|
ad6c7ca623 | ||
|
|
9e57981114 | ||
|
|
41e7dfea55 | ||
|
|
f2767239af | ||
|
|
bf5c2b6a9c | ||
|
|
7e7ab2cab3 | ||
|
|
66910bdfee | ||
|
|
c9f9edfcf5 | ||
|
|
cb9457ae09 | ||
|
|
3aee1598a9 | ||
|
|
a9dbd5f341 | ||
|
|
0ff795f67a | ||
|
|
93f7370157 | ||
|
|
cb4732fe2f | ||
|
|
e65b7975fb | ||
|
|
c0243ce329 | ||
|
|
12a1e667e2 | ||
|
|
6ec7748677 | ||
|
|
671f2b7473 | ||
|
|
1968bdc219 | ||
|
|
59c28b4f86 | ||
|
|
672f06eb80 | ||
|
|
b9712afd29 | ||
|
|
0531437c94 | ||
|
|
bb3ea541a2 | ||
|
|
e4e2fa23ce | ||
|
|
682f3009ba | ||
|
|
9bfd259b03 | ||
|
|
3266af774c | ||
|
|
36f8101c21 | ||
|
|
dd83f870b9 | ||
|
|
2c3a9a3bc8 | ||
|
|
10a4769115 | ||
|
|
dc292ca657 | ||
|
|
7fc3a500d3 | ||
|
|
ccb0f15655 | ||
|
|
426f4cf569 | ||
|
|
2585a09219 | ||
|
|
83edaef6d3 | ||
|
|
1551360dda | ||
|
|
977cef9d29 | ||
|
|
f7b63ebac1 | ||
|
|
c90a98e427 | ||
|
|
2cd2f99723 | ||
|
|
b5a1f6fcd6 | ||
|
|
b99a9b68dc | ||
|
|
3ec7f90d95 | ||
|
|
86b4cae95d | ||
|
|
c3dd28c510 | ||
|
|
bec2b2a5e0 | ||
|
|
d01d3f9c96 | ||
|
|
057b6b63b3 | ||
|
|
b522e15328 | ||
|
|
86247e59a0 | ||
|
|
7e0926febf | ||
|
|
141ad15a48 | ||
|
|
39483d90d1 | ||
|
|
c0f0823076 | ||
|
|
ebee564436 | ||
|
|
11d99845ce | ||
|
|
f0fe3e30af | ||
|
|
21713ebea7 | ||
|
|
f4f818277e | ||
|
|
3316a6e073 | ||
|
|
6246e8a675 | ||
|
|
861a714014 | ||
|
|
343aeda8ba | ||
|
|
53c1184c1f | ||
|
|
43b52b32f2 | ||
|
|
d1521c021d | ||
|
|
bd7f82ce45 | ||
|
|
3ad460c6e9 | ||
|
|
175a636755 | ||
|
|
9e482e9dd4 | ||
|
|
ce4842afd7 | ||
|
|
ffa49aecea | ||
|
|
a19c75f1ee | ||
|
|
55bf4d9597 | ||
|
|
f0149c3e04 | ||
|
|
2e1742c1f4 | ||
|
|
a0e956e61f | ||
|
|
907c606bbd | ||
|
|
baa8b62dc3 | ||
|
|
c5b8a20092 | ||
|
|
5afc578951 | ||
|
|
92a28b1639 | ||
|
|
5f6ee3f9f2 | ||
|
|
8033cc7d72 | ||
|
|
3138f23648 | ||
|
|
fffe24bd2c | ||
|
|
bc05217322 | ||
|
|
0dbc66af7a | ||
|
|
130a719465 | ||
|
|
bdf11fe3b3 | ||
|
|
1e8bea42c7 | ||
|
|
acb6f72ee7 | ||
|
|
74220acb2e | ||
|
|
a15bda1bce | ||
|
|
f8f6d7d9be | ||
|
|
0135ddcb36 | ||
|
|
cd306cd450 | ||
|
|
c4823c4c89 | ||
|
|
cf99ca2cf8 | ||
|
|
9c0364b0c3 | ||
|
|
39ffccbf45 | ||
|
|
dc42003969 | ||
|
|
55583294c9 | ||
|
|
73c3f202d0 | ||
|
|
b0b01c105f | ||
|
|
ba0b1c93d2 | ||
|
|
e5ba415ec5 | ||
|
|
74434317d3 | ||
|
|
558d077ef6 | ||
|
|
354fa5470c | ||
|
|
dcac6fae70 | ||
|
|
e9b00bb369 | ||
|
|
623869ce25 | ||
|
|
052a27ddaf | ||
|
|
7133e492b6 | ||
|
|
e7b94e9698 | ||
|
|
6af228d82b | ||
|
|
705d51c818 | ||
|
|
469d2cc321 | ||
|
|
056b611125 | ||
|
|
55827b0720 | ||
|
|
a8b8ea10d1 | ||
|
|
8f98bb6897 | ||
|
|
3c76ee9109 | ||
|
|
93f06e431e | ||
|
|
1e04c65f76 | ||
|
|
b4696f8c60 | ||
|
|
cced40b5cb | ||
|
|
fb24c4550b | ||
|
|
11fee7db63 | ||
|
|
c28dc2f441 | ||
|
|
fefd96a5d3 | ||
|
|
e7042f4429 | ||
|
|
ad51ab6273 | ||
|
|
2b05500ae3 | ||
|
|
5dafee2c0f | ||
|
|
7370973a79 | ||
|
|
e1703d287b | ||
|
|
8a0a25f0bf | ||
|
|
b350323440 | ||
|
|
420df51e47 | ||
|
|
7f74ae6e79 | ||
|
|
2e04dee223 | ||
|
|
20e434be5a | ||
|
|
caec2767ab | ||
|
|
73b003b0bc | ||
|
|
352416e309 | ||
|
|
47d3d4f845 | ||
|
|
b08878c75a | ||
|
|
1b99041e5f | ||
|
|
8eb6a6fc8a | ||
|
|
96e3b5c9e0 | ||
|
|
8f8d353eb2 | ||
|
|
86497d41e0 | ||
|
|
4129700cf3 | ||
|
|
5d1f9d5cd9 | ||
|
|
8465f10648 | ||
|
|
8b2450cdcf | ||
|
|
baf03dffd4 | ||
|
|
807be52e8f | ||
|
|
bd42ff1393 | ||
|
|
b167a7b384 | ||
|
|
9f2b3a27fa | ||
|
|
865f0aca80 | ||
|
|
c3546a1931 | ||
|
|
b0e65f71d4 | ||
|
|
f763218864 | ||
|
|
9c4fc0c55c | ||
|
|
893fa3131b | ||
|
|
0a49a986a4 | ||
|
|
0930ec2d3d | ||
|
|
e20f556f37 | ||
|
|
f78b0d82e9 | ||
|
|
e4a58abd7b | ||
|
|
ff28415863 | ||
|
|
975d15a8ef | ||
|
|
653d2753d4 | ||
|
|
d301765263 | ||
|
|
a03f9a0f3e | ||
|
|
a80f26ec36 | ||
|
|
c6088d97d3 | ||
|
|
e9339a16b1 | ||
|
|
4de6c838b4 | ||
|
|
3cf262afbf | ||
|
|
36f9592a10 | ||
|
|
cc57d2a073 | ||
|
|
9d7baa855e | ||
|
|
01d7bbe0bd | ||
|
|
f470b380b8 | ||
|
|
e33956d584 | ||
|
|
821af69bbe | ||
|
|
d5cac7acbf | ||
|
|
185a24ff12 | ||
|
|
f6a9df54d2 | ||
|
|
28b273c38e | ||
|
|
cdedd1b8bf | ||
|
|
abd875983f | ||
|
|
30c2f1d1f8 | ||
|
|
a65c2dbd8f | ||
|
|
72eb8d0d73 | ||
|
|
90b884fe64 | ||
|
|
29e9683c35 | ||
|
|
870ab67be4 | ||
|
|
ecc73efe2b | ||
|
|
345d4d2f95 | ||
|
|
97fd6ca57b | ||
|
|
e021b911d9 | ||
|
|
6bb9737237 | ||
|
|
f0594cec77 | ||
|
|
eef8271d07 | ||
|
|
5dab98ec49 | ||
|
|
b3b9f128e2 | ||
|
|
3a0efcdbc7 | ||
|
|
13a7220af9 | ||
|
|
83d3a7a8b1 | ||
|
|
de315e4b7e | ||
|
|
d053662cf6 | ||
|
|
390da76f68 | ||
|
|
04d480012c | ||
|
|
b6ebb66966 | ||
|
|
602b721c0e | ||
|
|
33f0221c9e | ||
|
|
51b47fc389 | ||
|
|
5e6d636f82 | ||
|
|
24a8be7848 | ||
|
|
7b2f775723 | ||
|
|
5a5990f201 | ||
|
|
572e912c22 | ||
|
|
087b01a243 | ||
|
|
15d9665ac2 | ||
|
|
cc694a4c2b | ||
|
|
2071df6f58 | ||
|
|
6dc0d4e301 | ||
|
|
fad9582eac | ||
|
|
52edbfc6dc | ||
|
|
e9999befa9 | ||
|
|
b622ff69e2 | ||
|
|
3e8189f092 | ||
|
|
99918b8c62 | ||
|
|
ac8600451f | ||
|
|
bf8fac7809 | ||
|
|
1ebd16e2ce | ||
|
|
210558b353 | ||
|
|
f73e85d7ec | ||
|
|
de16e0b35d | ||
|
|
fc8d2a3a3e | ||
|
|
7862430a18 | ||
|
|
1dd0860bef | ||
|
|
ea3b182e2f | ||
|
|
68799ad044 | ||
|
|
1f2cb78b9d | ||
|
|
a127fafc26 | ||
|
|
44f673317f | ||
|
|
a93f03df54 | ||
|
|
e8e74052c2 | ||
|
|
06a129de07 | ||
|
|
5931d1bbfd | ||
|
|
dc95a7bd9c | ||
|
|
708c4d7251 | ||
|
|
c1c44ba070 | ||
|
|
2c3e1facc1 | ||
|
|
472883ab03 | ||
|
|
f8958f9335 | ||
|
|
e508b6b38e | ||
|
|
e931b5a6ad | ||
|
|
de25c7af4b | ||
|
|
b53bb46bdf | ||
|
|
d0a06d0b92 | ||
|
|
9f498ecce1 | ||
|
|
e9bfa182b1 | ||
|
|
13c0353dae | ||
|
|
4241a702b6 | ||
|
|
34a05c9bde | ||
|
|
eb300839ca | ||
|
|
4aaeac88aa | ||
|
|
fac9640263 | ||
|
|
e1b02b270a | ||
|
|
d41c9c90a9 | ||
|
|
fc5a3715c7 | ||
|
|
269aeac18c | ||
|
|
bc76582c96 | ||
|
|
bb6a6a3172 | ||
|
|
192898995d | ||
|
|
b5c824ebbd | ||
|
|
1b966ecaf6 | ||
|
|
15e5333b09 | ||
|
|
51d3b8dfea | ||
|
|
883e6b17d2 | ||
|
|
c849fac54e | ||
|
|
61c1521fcb | ||
|
|
6c53ee503c | ||
|
|
46b1f80611 | ||
|
|
1201649474 | ||
|
|
9d7f8ee80a | ||
|
|
870ac2f371 | ||
|
|
4ff41d9416 | ||
|
|
52bb25cf2a | ||
|
|
2acfecfafc | ||
|
|
52aabcbdfc | ||
|
|
76cb6e7806 | ||
|
|
e5e6ac6064 | ||
|
|
a65cf09830 | ||
|
|
c77a0d1ef9 | ||
|
|
5cf1bcef28 | ||
|
|
4dd65ee9b2 | ||
|
|
967e7b65b1 | ||
|
|
bb84439dc6 | ||
|
|
aa66366ea2 | ||
|
|
4994dd026c | ||
|
|
0d43debd32 | ||
|
|
18821d3feb | ||
|
|
a65ad217a3 | ||
|
|
1b29a6ba3a | ||
|
|
77fdec48a9 | ||
|
|
b21846cf76 | ||
|
|
e4599113fa | ||
|
|
394667637c | ||
|
|
5a99a9a78a | ||
|
|
21c3c36d7c | ||
|
|
cea15032c9 | ||
|
|
e73a055bc3 | ||
|
|
ce93715287 | ||
|
|
09d3a0edc3 | ||
|
|
805d6c9de2 | ||
|
|
7f5ffab148 | ||
|
|
a551cbcedd | ||
|
|
d27b74a9ab | ||
|
|
a69ad3bbdb | ||
|
|
8d57a375ac | ||
|
|
84324e4340 | ||
|
|
3f58472ea7 | ||
|
|
a383d271e5 | ||
|
|
cc70b9766f | ||
|
|
c92a45a386 | ||
|
|
abda820c0d | ||
|
|
5320b8f260 | ||
|
|
dd2bf7085a | ||
|
|
f70365534a | ||
|
|
b744a29df6 | ||
|
|
77905adc56 | ||
|
|
192200a2cf | ||
|
|
5cd7441052 | ||
|
|
81bba2a7a0 | ||
|
|
447884250e | ||
|
|
0bf5e1be22 | ||
|
|
d7004cf045 | ||
|
|
c57db2454c | ||
|
|
9a0916aca0 | ||
|
|
b8d66ffe92 | ||
|
|
5838df0cde | ||
|
|
37bd73591f | ||
|
|
0dc8ce65b3 | ||
|
|
f977545374 | ||
|
|
719e1091b6 | ||
|
|
b3f951a5d7 | ||
|
|
c07817f0ec | ||
|
|
ef6262867f | ||
|
|
88572a28f6 | ||
|
|
965b7b7fa8 | ||
|
|
f825ef4d85 | ||
|
|
c943779387 | ||
|
|
c11e6314a3 | ||
|
|
f86d2127d1 | ||
|
|
b18c3e60ea | ||
|
|
271c709be6 | ||
|
|
173bd3f679 | ||
|
|
66cac921b9 | ||
|
|
3a1d0cffaf | ||
|
|
6a6e4c5dd4 | ||
|
|
5b5082fd1a | ||
|
|
4e5297e12b | ||
|
|
a29399fde4 | ||
|
|
6b1217d862 | ||
|
|
bfd7d80b9f | ||
|
|
3cb233f2de | ||
|
|
6e3a8bc8aa | ||
|
|
a6a582ec08 | ||
|
|
6d08904602 | ||
|
|
8aa9fbf47b | ||
|
|
7ebe59502a | ||
|
|
2b0d2a1ad0 | ||
|
|
139d107bad | ||
|
|
87534580d6 | ||
|
|
1619024818 | ||
|
|
cd22fda858 | ||
|
|
ee873711b7 | ||
|
|
a519a2f9a2 | ||
|
|
a834b5e926 | ||
|
|
5b133a8e53 | ||
|
|
251e1a06c5 | ||
|
|
7d59544b61 | ||
|
|
ba83b04da4 | ||
|
|
1cf1354abf | ||
|
|
9ecb045131 | ||
|
|
2fe9d75cb5 | ||
|
|
641497a88c | ||
|
|
2fcc4f0db1 | ||
|
|
96948e8b8d | ||
|
|
17cdf17473 | ||
|
|
766d33ec94 | ||
|
|
1484b696c5 | ||
|
|
3053cd9238 | ||
|
|
18dd8c8ee1 | ||
|
|
0d6b43e02e | ||
|
|
d5c02a28c2 | ||
|
|
77eab865a0 | ||
|
|
308e16afa0 | ||
|
|
39a4d6bc5b | ||
|
|
823da21de1 | ||
|
|
fc7c584719 | ||
|
|
11ccd098ed | ||
|
|
1ea83b8748 | ||
|
|
bd0c404396 | ||
|
|
f117dd453d | ||
|
|
26a6a004c1 | ||
|
|
8707518433 | ||
|
|
fdd12fc6b6 | ||
|
|
04989f3554 | ||
|
|
c4ed189e28 | ||
|
|
2cb425c9fc | ||
|
|
029964d886 | ||
|
|
526effa743 | ||
|
|
5dca424132 | ||
|
|
c76a633de3 | ||
|
|
b6db68b064 | ||
|
|
0b1cfb3b43 | ||
|
|
4c14710596 | ||
|
|
1006cd9b56 | ||
|
|
d55e369a7f | ||
|
|
3edcac4d3c | ||
|
|
70ff8e22f2 | ||
|
|
b7824075a3 | ||
|
|
96644d3430 | ||
|
|
6c8b88eb7e | ||
|
|
33aa966937 | ||
|
|
80776c2442 | ||
|
|
c9a52da723 | ||
|
|
a79b788b27 | ||
|
|
31981f473b | ||
|
|
7194bbe18f | ||
|
|
c600d85e7a | ||
|
|
de7d80f1a7 | ||
|
|
8d287f9106 | ||
|
|
24b90ae6f8 | ||
|
|
65d4ae8e65 | ||
|
|
f76cde683d | ||
|
|
8970fc912f | ||
|
|
144da0a212 | ||
|
|
ac96a99b9f | ||
|
|
4b3a5e9bcb | ||
|
|
8a8d33b437 | ||
|
|
e0eb3860fa | ||
|
|
67aeadff42 | ||
|
|
5140ce43d1 | ||
|
|
fe3cb81209 | ||
|
|
0c7aaeeee5 | ||
|
|
9afaada3fb | ||
|
|
a697ffe442 | ||
|
|
d720064919 | ||
|
|
b09e3ed8c7 | ||
|
|
556b650133 | ||
|
|
8da69d6ad1 | ||
|
|
47182c0544 | ||
|
|
5dcd1e9aca | ||
|
|
e60933819a | ||
|
|
18dd4db147 | ||
|
|
a601be3af0 | ||
|
|
ac60c9984c | ||
|
|
0c3f6b8ffa | ||
|
|
6b4b662d2c | ||
|
|
facd77dbf8 | ||
|
|
8b6aa1dedd | ||
|
|
11a87bab8c | ||
|
|
fa92edff2f | ||
|
|
3686afafc0 | ||
|
|
86f50f7262 | ||
|
|
c3d0e621a7 | ||
|
|
4e1d1bdc2f | ||
|
|
fc018cb5bb | ||
|
|
a2b661965a | ||
|
|
ab9bd0cc9b | ||
|
|
7e3aee4a1a | ||
|
|
2a0d7a047d | ||
|
|
59b38976fa | ||
|
|
a1bba52b49 | ||
|
|
f0d1488cf9 | ||
|
|
cc79d5590a | ||
|
|
3caeb73bb8 | ||
|
|
6b226847aa | ||
|
|
9d30d8c8d8 | ||
|
|
635cf9d568 | ||
|
|
ee517f09ec | ||
|
|
124d3f6a4f | ||
|
|
d6bd189a5b | ||
|
|
05746b346a | ||
|
|
c0ff822fc6 | ||
|
|
38846b36a0 | ||
|
|
28c357a6ad | ||
|
|
68c02f88ef | ||
|
|
01705a2e0d | ||
|
|
3a1f814ef2 | ||
|
|
2a3bc4c5ea | ||
|
|
2acc3e19ef | ||
|
|
6db989620a | ||
|
|
6111c43a65 | ||
|
|
bec61d8ad6 | ||
|
|
599491f578 | ||
|
|
dd18d57a9b | ||
|
|
b5daa5c51f | ||
|
|
dec3b45d70 | ||
|
|
88c1e76300 | ||
|
|
852cbd8b65 | ||
|
|
ccde55c4e3 | ||
|
|
6651dcd873 | ||
|
|
953f31662b | ||
|
|
97ad2285b9 | ||
|
|
3f1d99273f | ||
|
|
b1df4aa615 | ||
|
|
a006cad19e | ||
|
|
cc86719898 | ||
|
|
480ef9005a | ||
|
|
128fe42d98 | ||
|
|
51fe557b2f | ||
|
|
e42a3315ae | ||
|
|
6b41e8e7bc | ||
|
|
0d1686412e | ||
|
|
04635ae321 | ||
|
|
300d006e6b | ||
|
|
ca85999d8f | ||
|
|
7b9467a972 | ||
|
|
9aa2c7bedd | ||
|
|
51e2333890 | ||
|
|
472df9dbb9 | ||
|
|
819e417d17 | ||
|
|
a430771e03 | ||
|
|
bb83f4bef3 | ||
|
|
4058d66644 | ||
|
|
c73dbd31d2 | ||
|
|
41152cbd56 | ||
|
|
15ef8aabf4 | ||
|
|
fd85572b87 | ||
|
|
8fbff623e2 | ||
|
|
13bf39004e | ||
|
|
403c50b233 | ||
|
|
1849e8ef6a | ||
|
|
d1989c6990 | ||
|
|
40ab19b380 | ||
|
|
fb69061f99 | ||
|
|
6c630cb33a | ||
|
|
3dfa4d83b9 | ||
|
|
85915a2f11 | ||
|
|
8acc1127e5 | ||
|
|
e5b75ca056 | ||
|
|
c3cee07c78 | ||
|
|
48e2c86d5e | ||
|
|
bc79ea515d | ||
|
|
94f98c35b8 | ||
|
|
b15d01c59d | ||
|
|
32592289dc | ||
|
|
dfdfded371 | ||
|
|
e0e31ba5e1 | ||
|
|
4f1306fe5d | ||
|
|
56d05e3dca | ||
|
|
67f6726c25 | ||
|
|
4519c01fd0 | ||
|
|
4db7d77381 | ||
|
|
0417e3035d | ||
|
|
33c4249013 | ||
|
|
6c2071eeb9 | ||
|
|
ad9dad49e3 | ||
|
|
0b8b70a990 | ||
|
|
8d1dca3b8e | ||
|
|
c071a19d54 | ||
|
|
4a0419520f | ||
|
|
ed6ba83768 | ||
|
|
5986bc3b39 | ||
|
|
12cd756920 | ||
|
|
08c238957d | ||
|
|
d08fc72052 | ||
|
|
d2c09c2182 | ||
|
|
c063b19418 | ||
|
|
ca19c4849e | ||
|
|
cc22a9f7ef | ||
|
|
b9e907b401 | ||
|
|
4324c3e006 | ||
|
|
b630dfb229 | ||
|
|
527ef74b6d | ||
|
|
47d1b545d5 | ||
|
|
6c8a41ece1 | ||
|
|
0dcd704c55 | ||
|
|
d1d38cb2e2 | ||
|
|
906c27a9ba | ||
|
|
acaad6c58c | ||
|
|
83e0e6d8ab | ||
|
|
c9dfef95fb | ||
|
|
62fe12fed7 | ||
|
|
106fc7f063 | ||
|
|
7e4b782aed | ||
|
|
59866b8867 | ||
|
|
ea04ce8545 | ||
|
|
df6cbb4660 | ||
|
|
9ab742168c | ||
|
|
f51cabc3d9 | ||
|
|
c8fdbcf548 | ||
|
|
9ac0565a19 | ||
|
|
30cd97004f | ||
|
|
4f189b6216 | ||
|
|
28e7498e9a | ||
|
|
af59922112 | ||
|
|
327fb687ca | ||
|
|
44a7d8d315 | ||
|
|
3224a39ed8 | ||
|
|
c1a3442cd5 | ||
|
|
ad85245743 | ||
|
|
df82aa5ef1 | ||
|
|
16767cc36d | ||
|
|
c494167bee | ||
|
|
a8ee7d45d5 | ||
|
|
536519328c | ||
|
|
1e902f4688 | ||
|
|
4afd8872d0 | ||
|
|
d0d82adc85 | ||
|
|
4b5dbbf4c3 | ||
|
|
d7264dc7d2 | ||
|
|
90ac002c42 | ||
|
|
495a6ffa05 | ||
|
|
0876e8845c | ||
|
|
872017243f | ||
|
|
4a514f4ab3 | ||
|
|
80688be059 | ||
|
|
16f7368514 | ||
|
|
3f2d94687f | ||
|
|
ad9a20337b | ||
|
|
79a0992773 | ||
|
|
55c6b1b498 | ||
|
|
31e13a966a | ||
|
|
ca915a5f6d | ||
|
|
9778c91ac7 | ||
|
|
24544a30a1 | ||
|
|
dc041da402 | ||
|
|
8aa966a31a | ||
|
|
ea70c607eb | ||
|
|
0f9b8df1b8 | ||
|
|
c9e2319c3a | ||
|
|
e85b469df7 | ||
|
|
de59cd2deb | ||
|
|
91db1fc3dc | ||
|
|
9d7e2b439c | ||
|
|
85ffa447f3 | ||
|
|
9880a0e039 | ||
|
|
b14826f05c | ||
|
|
36fa900817 | ||
|
|
50d61d96d9 | ||
|
|
64c37260b7 | ||
|
|
0b8c698392 | ||
|
|
65bde5555c | ||
|
|
5794017cdd | ||
|
|
a9239be2e1 | ||
|
|
fdfa68be2e | ||
|
|
2649b0edb5 | ||
|
|
ed1e3676b1 | ||
|
|
1215ed152b | ||
|
|
938ad4ec44 | ||
|
|
21bceecccb | ||
|
|
e1f265c940 | ||
|
|
dbf0d46a08 | ||
|
|
7eca5ea9dc | ||
|
|
0206b84b63 | ||
|
|
15675927ad | ||
|
|
4e2d5c7107 | ||
|
|
5d3166168e | ||
|
|
570ada9050 | ||
|
|
617bd38a01 | ||
|
|
43e5e7dd55 | ||
|
|
de9f69dd54 | ||
|
|
241c80880c | ||
|
|
2e5c3b2d09 | ||
|
|
85daac92b5 | ||
|
|
aeb81908c7 | ||
|
|
2c1d0bf792 | ||
|
|
d9c8871da0 | ||
|
|
0a5c9c8376 | ||
|
|
d68f7c7b72 | ||
|
|
f7b0bfa76a | ||
|
|
df7e31380b | ||
|
|
a2df4128ed | ||
|
|
069c5377be | ||
|
|
9161b8fffe |
579
.cursorrules
Normal file
579
.cursorrules
Normal file
@@ -0,0 +1,579 @@
|
||||
# RustFS Project Cursor Rules
|
||||
|
||||
## ⚠️ CRITICAL DEVELOPMENT RULES ⚠️
|
||||
|
||||
### 🚨 NEVER COMMIT DIRECTLY TO MASTER/MAIN BRANCH 🚨
|
||||
- **This is the most important rule - NEVER modify code directly on main or master branch**
|
||||
- **Always work on feature branches and use pull requests for all changes**
|
||||
- **Any direct commits to master/main branch are strictly forbidden**
|
||||
- Before starting any development, always:
|
||||
1. `git checkout main` (switch to main branch)
|
||||
2. `git pull` (get latest changes)
|
||||
3. `git checkout -b feat/your-feature-name` (create and switch to feature branch)
|
||||
4. Make your changes on the feature branch
|
||||
5. Commit and push to the feature branch
|
||||
6. Create a pull request for review
|
||||
|
||||
## Project Overview
|
||||
RustFS is a high-performance distributed object storage system written in Rust, compatible with S3 API. The project adopts a modular architecture, supporting erasure coding storage, multi-tenant management, observability, and other enterprise-level features.
|
||||
|
||||
## Core Architecture Principles
|
||||
|
||||
### 1. Modular Design
|
||||
- Project uses Cargo workspace structure, containing multiple independent crates
|
||||
- Core modules: `rustfs` (main service), `ecstore` (erasure coding storage), `common` (shared components)
|
||||
- Functional modules: `iam` (identity management), `madmin` (management interface), `crypto` (encryption), etc.
|
||||
- Tool modules: `cli` (command line tool), `crates/*` (utility libraries)
|
||||
|
||||
### 2. Asynchronous Programming Pattern
|
||||
- Comprehensive use of `tokio` async runtime
|
||||
- Prioritize `async/await` syntax
|
||||
- Use `async-trait` for async methods in traits
|
||||
- Avoid blocking operations, use `spawn_blocking` when necessary
|
||||
|
||||
### 3. Error Handling Strategy
|
||||
- Use unified error type `common::error::Error`
|
||||
- Support error chains and context information
|
||||
- Use `thiserror` to define specific error types
|
||||
- Error conversion uses `downcast_ref` for type checking
|
||||
|
||||
## Code Style Guidelines
|
||||
|
||||
### 1. Formatting Configuration
|
||||
```toml
|
||||
max_width = 130
|
||||
fn_call_width = 90
|
||||
single_line_let_else_max_width = 100
|
||||
```
|
||||
|
||||
### 2. **🔧 MANDATORY Code Formatting Rules**
|
||||
|
||||
**CRITICAL**: All code must be properly formatted before committing. This project enforces strict formatting standards to maintain code consistency and readability.
|
||||
|
||||
#### Pre-commit Requirements (MANDATORY)
|
||||
|
||||
Before every commit, you **MUST**:
|
||||
|
||||
1. **Format your code**:
|
||||
```bash
|
||||
cargo fmt --all
|
||||
```
|
||||
|
||||
2. **Verify formatting**:
|
||||
```bash
|
||||
cargo fmt --all --check
|
||||
```
|
||||
|
||||
3. **Pass clippy checks**:
|
||||
```bash
|
||||
cargo clippy --all-targets --all-features -- -D warnings
|
||||
```
|
||||
|
||||
4. **Ensure compilation**:
|
||||
```bash
|
||||
cargo check --all-targets
|
||||
```
|
||||
|
||||
#### Quick Commands
|
||||
|
||||
Use these convenient Makefile targets for common tasks:
|
||||
|
||||
```bash
|
||||
# Format all code
|
||||
make fmt
|
||||
|
||||
# Check if code is properly formatted
|
||||
make fmt-check
|
||||
|
||||
# Run clippy checks
|
||||
make clippy
|
||||
|
||||
# Run compilation check
|
||||
make check
|
||||
|
||||
# Run tests
|
||||
make test
|
||||
|
||||
# Run all pre-commit checks (format + clippy + check + test)
|
||||
make pre-commit
|
||||
|
||||
# Setup git hooks (one-time setup)
|
||||
make setup-hooks
|
||||
```
|
||||
|
||||
#### 🔒 Automated Pre-commit Hooks
|
||||
|
||||
This project includes a pre-commit hook that automatically runs before each commit to ensure:
|
||||
|
||||
- ✅ Code is properly formatted (`cargo fmt --all --check`)
|
||||
- ✅ No clippy warnings (`cargo clippy --all-targets --all-features -- -D warnings`)
|
||||
- ✅ Code compiles successfully (`cargo check --all-targets`)
|
||||
|
||||
**Setting Up Pre-commit Hooks** (MANDATORY for all developers):
|
||||
|
||||
Run this command once after cloning the repository:
|
||||
|
||||
```bash
|
||||
make setup-hooks
|
||||
```
|
||||
|
||||
Or manually:
|
||||
|
||||
```bash
|
||||
chmod +x .git/hooks/pre-commit
|
||||
```
|
||||
|
||||
#### 🚫 Commit Prevention
|
||||
|
||||
If your code doesn't meet the formatting requirements, the pre-commit hook will:
|
||||
|
||||
1. **Block the commit** and show clear error messages
|
||||
2. **Provide exact commands** to fix the issues
|
||||
3. **Guide you through** the resolution process
|
||||
|
||||
Example output when formatting fails:
|
||||
|
||||
```
|
||||
❌ Code formatting check failed!
|
||||
💡 Please run 'cargo fmt --all' to format your code before committing.
|
||||
|
||||
🔧 Quick fix:
|
||||
cargo fmt --all
|
||||
git add .
|
||||
git commit
|
||||
```
|
||||
|
||||
### 3. Naming Conventions
|
||||
- Use `snake_case` for functions, variables, modules
|
||||
- Use `PascalCase` for types, traits, enums
|
||||
- Constants use `SCREAMING_SNAKE_CASE`
|
||||
- Global variables prefix `GLOBAL_`, e.g., `GLOBAL_Endpoints`
|
||||
- Use meaningful and descriptive names for variables, functions, and methods
|
||||
- Avoid meaningless names like `temp`, `data`, `foo`, `bar`, `test123`
|
||||
- Choose names that clearly express the purpose and intent
|
||||
|
||||
### 4. Type Declaration Guidelines
|
||||
- **Prefer type inference over explicit type declarations** when the type is obvious from context
|
||||
- Let the Rust compiler infer types whenever possible to reduce verbosity and improve maintainability
|
||||
- Only specify types explicitly when:
|
||||
- The type cannot be inferred by the compiler
|
||||
- Explicit typing improves code clarity and readability
|
||||
- Required for API boundaries (function signatures, public struct fields)
|
||||
- Needed to resolve ambiguity between multiple possible types
|
||||
|
||||
**Good examples (prefer these):**
|
||||
```rust
|
||||
// Compiler can infer the type
|
||||
let items = vec![1, 2, 3, 4];
|
||||
let config = Config::default();
|
||||
let result = process_data(&input);
|
||||
|
||||
// Iterator chains with clear context
|
||||
let filtered: Vec<_> = items.iter().filter(|&&x| x > 2).collect();
|
||||
```
|
||||
|
||||
**Avoid unnecessary explicit types:**
|
||||
```rust
|
||||
// Unnecessary - type is obvious
|
||||
let items: Vec<i32> = vec![1, 2, 3, 4];
|
||||
let config: Config = Config::default();
|
||||
let result: ProcessResult = process_data(&input);
|
||||
```
|
||||
|
||||
**When explicit types are beneficial:**
|
||||
```rust
|
||||
// API boundaries - always specify types
|
||||
pub fn process_data(input: &[u8]) -> Result<ProcessResult, Error> { ... }
|
||||
|
||||
// Ambiguous cases - explicit type needed
|
||||
let value: f64 = "3.14".parse().unwrap();
|
||||
|
||||
// Complex generic types - explicit for clarity
|
||||
let cache: HashMap<String, Arc<Mutex<CacheEntry>>> = HashMap::new();
|
||||
```
|
||||
|
||||
### 5. Documentation Comments
|
||||
- Public APIs must have documentation comments
|
||||
- Use `///` for documentation comments
|
||||
- Complex functions add `# Examples` and `# Parameters` descriptions
|
||||
- Error cases use `# Errors` descriptions
|
||||
- Always use English for all comments and documentation
|
||||
- Avoid meaningless comments like "debug 111" or placeholder text
|
||||
|
||||
### 6. Import Guidelines
|
||||
- Standard library imports first
|
||||
- Third-party crate imports in the middle
|
||||
- Project internal imports last
|
||||
- Group `use` statements with blank lines between groups
|
||||
|
||||
## Asynchronous Programming Guidelines
|
||||
|
||||
### 1. Trait Definition
|
||||
```rust
|
||||
#[async_trait::async_trait]
|
||||
pub trait StorageAPI: Send + Sync {
|
||||
async fn get_object(&self, bucket: &str, object: &str) -> Result<ObjectInfo>;
|
||||
}
|
||||
```
|
||||
|
||||
### 2. Error Handling
|
||||
```rust
|
||||
// Use ? operator to propagate errors
|
||||
async fn example_function() -> Result<()> {
|
||||
let data = read_file("path").await?;
|
||||
process_data(data).await?;
|
||||
Ok(())
|
||||
}
|
||||
```
|
||||
|
||||
### 3. Concurrency Control
|
||||
- Use `Arc` and `Mutex`/`RwLock` for shared state management
|
||||
- Prioritize async locks from `tokio::sync`
|
||||
- Avoid holding locks for long periods
|
||||
|
||||
## Logging and Tracing Guidelines
|
||||
|
||||
### 1. Tracing Usage
|
||||
```rust
|
||||
#[tracing::instrument(skip(self, data))]
|
||||
async fn process_data(&self, data: &[u8]) -> Result<()> {
|
||||
info!("Processing {} bytes", data.len());
|
||||
// Implementation logic
|
||||
}
|
||||
```
|
||||
|
||||
### 2. Log Levels
|
||||
- `error!`: System errors requiring immediate attention
|
||||
- `warn!`: Warning information that may affect functionality
|
||||
- `info!`: Important business information
|
||||
- `debug!`: Debug information for development use
|
||||
- `trace!`: Detailed execution paths
|
||||
|
||||
### 3. Structured Logging
|
||||
```rust
|
||||
info!(
|
||||
counter.rustfs_api_requests_total = 1_u64,
|
||||
key_request_method = %request.method(),
|
||||
key_request_uri_path = %request.uri().path(),
|
||||
"API request processed"
|
||||
);
|
||||
```
|
||||
|
||||
## Error Handling Guidelines
|
||||
|
||||
### 1. Error Type Definition
|
||||
```rust
|
||||
#[derive(Debug, thiserror::Error)]
|
||||
pub enum MyError {
|
||||
#[error("IO error: {0}")]
|
||||
Io(#[from] std::io::Error),
|
||||
#[error("Custom error: {message}")]
|
||||
Custom { message: String },
|
||||
}
|
||||
```
|
||||
|
||||
### 2. Error Conversion
|
||||
```rust
|
||||
pub fn to_s3_error(err: Error) -> S3Error {
|
||||
if let Some(storage_err) = err.downcast_ref::<StorageError>() {
|
||||
match storage_err {
|
||||
StorageError::ObjectNotFound(bucket, object) => {
|
||||
s3_error!(NoSuchKey, "{}/{}", bucket, object)
|
||||
}
|
||||
// Other error types...
|
||||
}
|
||||
}
|
||||
// Default error handling
|
||||
}
|
||||
```
|
||||
|
||||
### 3. Error Context
|
||||
```rust
|
||||
// Add error context
|
||||
.map_err(|e| Error::from_string(format!("Failed to process {}: {}", path, e)))?
|
||||
```
|
||||
|
||||
## Performance Optimization Guidelines
|
||||
|
||||
### 1. Memory Management
|
||||
- Use `Bytes` instead of `Vec<u8>` for zero-copy operations
|
||||
- Avoid unnecessary cloning, use reference passing
|
||||
- Use `Arc` for sharing large objects
|
||||
|
||||
### 2. Concurrency Optimization
|
||||
```rust
|
||||
// Use join_all for concurrent operations
|
||||
let futures = disks.iter().map(|disk| disk.operation());
|
||||
let results = join_all(futures).await;
|
||||
```
|
||||
|
||||
### 3. Caching Strategy
|
||||
- Use `lazy_static` or `OnceCell` for global caching
|
||||
- Implement LRU cache to avoid memory leaks
|
||||
|
||||
## Testing Guidelines
|
||||
|
||||
### 1. Unit Tests
|
||||
```rust
|
||||
#[cfg(test)]
|
||||
mod tests {
|
||||
use super::*;
|
||||
use test_case::test_case;
|
||||
|
||||
#[tokio::test]
|
||||
async fn test_async_function() {
|
||||
let result = async_function().await;
|
||||
assert!(result.is_ok());
|
||||
}
|
||||
|
||||
#[test_case("input1", "expected1")]
|
||||
#[test_case("input2", "expected2")]
|
||||
fn test_with_cases(input: &str, expected: &str) {
|
||||
assert_eq!(function(input), expected);
|
||||
}
|
||||
}
|
||||
```
|
||||
|
||||
### 2. Integration Tests
|
||||
- Use `e2e_test` module for end-to-end testing
|
||||
- Simulate real storage environments
|
||||
|
||||
### 3. Test Quality Standards
|
||||
- Write meaningful test cases that verify actual functionality
|
||||
- Avoid placeholder or debug content like "debug 111", "test test", etc.
|
||||
- Use descriptive test names that clearly indicate what is being tested
|
||||
- Each test should have a clear purpose and verify specific behavior
|
||||
- Test data should be realistic and representative of actual use cases
|
||||
|
||||
## Cross-Platform Compatibility Guidelines
|
||||
|
||||
### 1. CPU Architecture Compatibility
|
||||
- **Always consider multi-platform and different CPU architecture compatibility** when writing code
|
||||
- Support major architectures: x86_64, aarch64 (ARM64), and other target platforms
|
||||
- Use conditional compilation for architecture-specific code:
|
||||
```rust
|
||||
#[cfg(target_arch = "x86_64")]
|
||||
fn optimized_x86_64_function() { /* x86_64 specific implementation */ }
|
||||
|
||||
#[cfg(target_arch = "aarch64")]
|
||||
fn optimized_aarch64_function() { /* ARM64 specific implementation */ }
|
||||
|
||||
#[cfg(not(any(target_arch = "x86_64", target_arch = "aarch64")))]
|
||||
fn generic_function() { /* Generic fallback implementation */ }
|
||||
```
|
||||
|
||||
### 2. Platform-Specific Dependencies
|
||||
- Use feature flags for platform-specific dependencies
|
||||
- Provide fallback implementations for unsupported platforms
|
||||
- Test on multiple architectures in CI/CD pipeline
|
||||
|
||||
### 3. Endianness Considerations
|
||||
- Use explicit byte order conversion when dealing with binary data
|
||||
- Prefer `to_le_bytes()`, `from_le_bytes()` for consistent little-endian format
|
||||
- Use `byteorder` crate for complex binary format handling
|
||||
|
||||
### 4. SIMD and Performance Optimizations
|
||||
- Use portable SIMD libraries like `wide` or `packed_simd`
|
||||
- Provide fallback implementations for non-SIMD architectures
|
||||
- Use runtime feature detection when appropriate
|
||||
|
||||
## Security Guidelines
|
||||
|
||||
### 1. Memory Safety
|
||||
- Disable `unsafe` code (workspace.lints.rust.unsafe_code = "deny")
|
||||
- Use `rustls` instead of `openssl`
|
||||
|
||||
### 2. Authentication and Authorization
|
||||
```rust
|
||||
// Use IAM system for permission checks
|
||||
let identity = iam.authenticate(&access_key, &secret_key).await?;
|
||||
iam.authorize(&identity, &action, &resource).await?;
|
||||
```
|
||||
|
||||
## Configuration Management Guidelines
|
||||
|
||||
### 1. Environment Variables
|
||||
- Use `RUSTFS_` prefix
|
||||
- Support both configuration files and environment variables
|
||||
- Provide reasonable default values
|
||||
|
||||
### 2. Configuration Structure
|
||||
```rust
|
||||
#[derive(Debug, Deserialize, Clone)]
|
||||
pub struct Config {
|
||||
pub address: String,
|
||||
pub volumes: String,
|
||||
#[serde(default)]
|
||||
pub console_enable: bool,
|
||||
}
|
||||
```
|
||||
|
||||
## Dependency Management Guidelines
|
||||
|
||||
### 1. Workspace Dependencies
|
||||
- Manage versions uniformly at workspace level
|
||||
- Use `workspace = true` to inherit configuration
|
||||
|
||||
### 2. Feature Flags
|
||||
```rust
|
||||
[features]
|
||||
default = ["file"]
|
||||
gpu = ["dep:nvml-wrapper"]
|
||||
kafka = ["dep:rdkafka"]
|
||||
```
|
||||
|
||||
## Deployment and Operations Guidelines
|
||||
|
||||
### 1. Containerization
|
||||
- Provide Dockerfile and docker-compose configuration
|
||||
- Support multi-stage builds to optimize image size
|
||||
|
||||
### 2. Observability
|
||||
- Integrate OpenTelemetry for distributed tracing
|
||||
- Support Prometheus metrics collection
|
||||
- Provide Grafana dashboards
|
||||
|
||||
### 3. Health Checks
|
||||
```rust
|
||||
// Implement health check endpoint
|
||||
async fn health_check() -> Result<HealthStatus> {
|
||||
// Check component status
|
||||
}
|
||||
```
|
||||
|
||||
## Code Review Checklist
|
||||
|
||||
### 1. **Code Formatting and Quality (MANDATORY)**
|
||||
- [ ] **Code is properly formatted** (`cargo fmt --all --check` passes)
|
||||
- [ ] **All clippy warnings are resolved** (`cargo clippy --all-targets --all-features -- -D warnings` passes)
|
||||
- [ ] **Code compiles successfully** (`cargo check --all-targets` passes)
|
||||
- [ ] **Pre-commit hooks are working** and all checks pass
|
||||
- [ ] **No formatting-related changes** mixed with functional changes (separate commits)
|
||||
|
||||
### 2. Functionality
|
||||
- [ ] Are all error cases properly handled?
|
||||
- [ ] Is there appropriate logging?
|
||||
- [ ] Is there necessary test coverage?
|
||||
|
||||
### 3. Performance
|
||||
- [ ] Are unnecessary memory allocations avoided?
|
||||
- [ ] Are async operations used correctly?
|
||||
- [ ] Are there potential deadlock risks?
|
||||
|
||||
### 4. Security
|
||||
- [ ] Are input parameters properly validated?
|
||||
- [ ] Are there appropriate permission checks?
|
||||
- [ ] Is information leakage avoided?
|
||||
|
||||
### 5. Cross-Platform Compatibility
|
||||
- [ ] Does the code work on different CPU architectures (x86_64, aarch64)?
|
||||
- [ ] Are platform-specific features properly gated with conditional compilation?
|
||||
- [ ] Is byte order handling correct for binary data?
|
||||
- [ ] Are there appropriate fallback implementations for unsupported platforms?
|
||||
|
||||
### 6. Code Commits and Documentation
|
||||
- [ ] Does it comply with [Conventional Commits](https://www.conventionalcommits.org/en/v1.0.0/)?
|
||||
- [ ] Are commit messages concise and under 72 characters for the title line?
|
||||
- [ ] Commit titles should be concise and in English, avoid Chinese
|
||||
- [ ] Is PR description provided in copyable markdown format for easy copying?
|
||||
|
||||
## Common Patterns and Best Practices
|
||||
|
||||
### 1. Resource Management
|
||||
```rust
|
||||
// Use RAII pattern for resource management
|
||||
pub struct ResourceGuard {
|
||||
resource: Resource,
|
||||
}
|
||||
|
||||
impl Drop for ResourceGuard {
|
||||
fn drop(&mut self) {
|
||||
// Clean up resources
|
||||
}
|
||||
}
|
||||
```
|
||||
|
||||
### 2. Dependency Injection
|
||||
```rust
|
||||
// Use dependency injection pattern
|
||||
pub struct Service {
|
||||
config: Arc<Config>,
|
||||
storage: Arc<dyn StorageAPI>,
|
||||
}
|
||||
```
|
||||
|
||||
### 3. Graceful Shutdown
|
||||
```rust
|
||||
// Implement graceful shutdown
|
||||
async fn shutdown_gracefully(shutdown_rx: &mut Receiver<()>) {
|
||||
tokio::select! {
|
||||
_ = shutdown_rx.recv() => {
|
||||
info!("Received shutdown signal");
|
||||
// Perform cleanup operations
|
||||
}
|
||||
_ = tokio::time::sleep(SHUTDOWN_TIMEOUT) => {
|
||||
warn!("Shutdown timeout reached");
|
||||
}
|
||||
}
|
||||
}
|
||||
```
|
||||
|
||||
## Domain-Specific Guidelines
|
||||
|
||||
### 1. Storage Operations
|
||||
- All storage operations must support erasure coding
|
||||
- Implement read/write quorum mechanisms
|
||||
- Support data integrity verification
|
||||
|
||||
### 2. Network Communication
|
||||
- Use gRPC for internal service communication
|
||||
- HTTP/HTTPS support for S3-compatible API
|
||||
- Implement connection pooling and retry mechanisms
|
||||
|
||||
### 3. Metadata Management
|
||||
- Use FlatBuffers for serialization
|
||||
- Support version control and migration
|
||||
- Implement metadata caching
|
||||
|
||||
These rules should serve as guiding principles when developing the RustFS project, ensuring code quality, performance, and maintainability.
|
||||
|
||||
### 4. Code Operations
|
||||
|
||||
#### Branch Management
|
||||
- **🚨 CRITICAL: NEVER modify code directly on main or master branch - THIS IS ABSOLUTELY FORBIDDEN 🚨**
|
||||
- **⚠️ ANY DIRECT COMMITS TO MASTER/MAIN WILL BE REJECTED AND MUST BE REVERTED IMMEDIATELY ⚠️**
|
||||
- **Always work on feature branches - NO EXCEPTIONS**
|
||||
- Always check the .cursorrules file before starting to ensure you understand the project guidelines
|
||||
- **MANDATORY workflow for ALL changes:**
|
||||
1. `git checkout main` (switch to main branch)
|
||||
2. `git pull` (get latest changes)
|
||||
3. `git checkout -b feat/your-feature-name` (create and switch to feature branch)
|
||||
4. Make your changes ONLY on the feature branch
|
||||
5. Test thoroughly before committing
|
||||
6. Commit and push to the feature branch
|
||||
7. Create a pull request for code review
|
||||
- Use descriptive branch names following the pattern: `feat/feature-name`, `fix/issue-name`, `refactor/component-name`, etc.
|
||||
- **Double-check current branch before ANY commit: `git branch` to ensure you're NOT on main/master**
|
||||
- Ensure all changes are made on feature branches and merged through pull requests
|
||||
|
||||
#### Development Workflow
|
||||
- Use English for all code comments, documentation, and variable names
|
||||
- Write meaningful and descriptive names for variables, functions, and methods
|
||||
- Avoid meaningless test content like "debug 111" or placeholder values
|
||||
- Before each change, carefully read the existing code to ensure you understand the code structure and implementation, do not break existing logic implementation, do not introduce new issues
|
||||
- Ensure each change provides sufficient test cases to guarantee code correctness
|
||||
- Do not arbitrarily modify numbers and constants in test cases, carefully analyze their meaning to ensure test case correctness
|
||||
- When writing or modifying tests, check existing test cases to ensure they have scientific naming and rigorous logic testing, if not compliant, modify test cases to ensure scientific and rigorous testing
|
||||
- **Before committing any changes, run `cargo clippy --all-targets --all-features -- -D warnings` to ensure all code passes Clippy checks**
|
||||
- After each development completion, first git add . then git commit -m "feat: feature description" or "fix: issue description", ensure compliance with [Conventional Commits](https://www.conventionalcommits.org/en/v1.0.0/)
|
||||
- **Keep commit messages concise and under 72 characters** for the title line, use body for detailed explanations if needed
|
||||
- After each development completion, first git push to remote repository
|
||||
- After each change completion, summarize the changes, do not create summary files, provide a brief change description, ensure compliance with [Conventional Commits](https://www.conventionalcommits.org/en/v1.0.0/)
|
||||
- Provide change descriptions needed for PR in the conversation, ensure compliance with [Conventional Commits](https://www.conventionalcommits.org/en/v1.0.0/)
|
||||
- **Always provide PR descriptions in English** after completing any changes, including:
|
||||
- Clear and concise title following Conventional Commits format
|
||||
- Detailed description of what was changed and why
|
||||
- List of key changes and improvements
|
||||
- Any breaking changes or migration notes if applicable
|
||||
- Testing information and verification steps
|
||||
- **Provide PR descriptions in copyable markdown format** enclosed in code blocks for easy one-click copying
|
||||
29
.docker/Dockerfile.devenv
Normal file
29
.docker/Dockerfile.devenv
Normal file
@@ -0,0 +1,29 @@
|
||||
FROM m.daocloud.io/docker.io/library/ubuntu:22.04
|
||||
|
||||
ENV LANG C.UTF-8
|
||||
|
||||
RUN sed -i s@http://.*archive.ubuntu.com@http://repo.huaweicloud.com@g /etc/apt/sources.list
|
||||
|
||||
RUN apt-get clean && apt-get update && apt-get install wget git curl unzip gcc pkg-config libssl-dev lld libdbus-1-dev libwayland-dev libwebkit2gtk-4.1-dev libxdo-dev -y
|
||||
|
||||
# install protoc
|
||||
RUN wget https://github.com/protocolbuffers/protobuf/releases/download/v30.2/protoc-30.2-linux-x86_64.zip \
|
||||
&& unzip protoc-30.2-linux-x86_64.zip -d protoc3 \
|
||||
&& mv protoc3/bin/* /usr/local/bin/ && chmod +x /usr/local/bin/protoc && mv protoc3/include/* /usr/local/include/ && rm -rf protoc-30.2-linux-x86_64.zip protoc3
|
||||
|
||||
# install flatc
|
||||
RUN wget https://github.com/google/flatbuffers/releases/download/v25.2.10/Linux.flatc.binary.g++-13.zip \
|
||||
&& unzip Linux.flatc.binary.g++-13.zip \
|
||||
&& mv flatc /usr/local/bin/ && chmod +x /usr/local/bin/flatc && rm -rf Linux.flatc.binary.g++-13.zip
|
||||
|
||||
# install rust
|
||||
ENV RUSTUP_DIST_SERVER="https://rsproxy.cn"
|
||||
ENV RUSTUP_UPDATE_ROOT="https://rsproxy.cn/rustup"
|
||||
RUN curl -o rustup-init.sh --proto '=https' --tlsv1.2 -sSf https://rsproxy.cn/rustup-init.sh \
|
||||
&& sh rustup-init.sh -y && rm -rf rustup-init.sh
|
||||
|
||||
COPY .docker/cargo.config.toml /root/.cargo/config.toml
|
||||
|
||||
WORKDIR /root/s3-rustfs
|
||||
|
||||
CMD [ "bash", "-c", "while true; do sleep 1; done" ]
|
||||
35
.docker/Dockerfile.rockylinux9.3
Normal file
35
.docker/Dockerfile.rockylinux9.3
Normal file
@@ -0,0 +1,35 @@
|
||||
FROM m.daocloud.io/docker.io/library/rockylinux:9.3 AS builder
|
||||
|
||||
ENV LANG C.UTF-8
|
||||
|
||||
RUN sed -e 's|^mirrorlist=|#mirrorlist=|g' \
|
||||
-e 's|^#baseurl=http://dl.rockylinux.org/$contentdir|baseurl=https://mirrors.ustc.edu.cn/rocky|g' \
|
||||
-i.bak \
|
||||
/etc/yum.repos.d/rocky-extras.repo \
|
||||
/etc/yum.repos.d/rocky.repo
|
||||
|
||||
RUN dnf makecache
|
||||
|
||||
RUN yum install wget git unzip gcc openssl-devel pkgconf-pkg-config -y
|
||||
|
||||
# install protoc
|
||||
RUN wget https://github.com/protocolbuffers/protobuf/releases/download/v30.2/protoc-30.2-linux-x86_64.zip \
|
||||
&& unzip protoc-30.2-linux-x86_64.zip -d protoc3 \
|
||||
&& mv protoc3/bin/* /usr/local/bin/ && chmod +x /usr/local/bin/protoc \
|
||||
&& rm -rf protoc-30.2-linux-x86_64.zip protoc3
|
||||
|
||||
# install flatc
|
||||
RUN wget https://github.com/google/flatbuffers/releases/download/v25.2.10/Linux.flatc.binary.g++-13.zip \
|
||||
&& unzip Linux.flatc.binary.g++-13.zip \
|
||||
&& mv flatc /usr/local/bin/ && chmod +x /usr/local/bin/flatc \
|
||||
&& rm -rf Linux.flatc.binary.g++-13.zip
|
||||
|
||||
# install rust
|
||||
ENV RUSTUP_DIST_SERVER="https://rsproxy.cn"
|
||||
ENV RUSTUP_UPDATE_ROOT="https://rsproxy.cn/rustup"
|
||||
RUN curl -o rustup-init.sh --proto '=https' --tlsv1.2 -sSf https://rsproxy.cn/rustup-init.sh \
|
||||
&& sh rustup-init.sh -y && rm -rf rustup-init.sh
|
||||
|
||||
COPY .docker/cargo.config.toml /root/.cargo/config.toml
|
||||
|
||||
WORKDIR /root/s3-rustfs
|
||||
22
.docker/Dockerfile.rustyvault
Normal file
22
.docker/Dockerfile.rustyvault
Normal file
@@ -0,0 +1,22 @@
|
||||
FROM vault:1.13
|
||||
|
||||
# Configure Vault for dev mode
|
||||
ENV VAULT_DEV_ROOT_TOKEN_ID=rustfs-root-token
|
||||
ENV VAULT_DEV_LISTEN_ADDRESS=0.0.0.0:8200
|
||||
|
||||
# Install curl for health checks
|
||||
USER root
|
||||
RUN apk add --no-cache curl jq
|
||||
|
||||
# Copy the Vault initialization script
|
||||
COPY vault-init.sh /usr/local/bin/vault-init.sh
|
||||
RUN chmod +x /usr/local/bin/vault-init.sh
|
||||
|
||||
# Switch back to vault user
|
||||
USER vault
|
||||
|
||||
# Expose Vault port
|
||||
EXPOSE 8200
|
||||
|
||||
# Start Vault in dev mode and run the initialization script
|
||||
ENTRYPOINT ["sh", "-c", "vault server -dev & sleep 5 && vault-init.sh"]
|
||||
27
.docker/Dockerfile.ubuntu22.04
Normal file
27
.docker/Dockerfile.ubuntu22.04
Normal file
@@ -0,0 +1,27 @@
|
||||
FROM m.daocloud.io/docker.io/library/ubuntu:22.04
|
||||
|
||||
ENV LANG C.UTF-8
|
||||
|
||||
RUN sed -i s@http://.*archive.ubuntu.com@http://repo.huaweicloud.com@g /etc/apt/sources.list
|
||||
|
||||
RUN apt-get clean && apt-get update && apt-get install wget git curl unzip gcc pkg-config libssl-dev lld libdbus-1-dev libwayland-dev libwebkit2gtk-4.1-dev libxdo-dev -y
|
||||
|
||||
# install protoc
|
||||
RUN wget https://github.com/protocolbuffers/protobuf/releases/download/v30.2/protoc-30.2-linux-x86_64.zip \
|
||||
&& unzip protoc-30.2-linux-x86_64.zip -d protoc3 \
|
||||
&& mv protoc3/bin/* /usr/local/bin/ && chmod +x /usr/local/bin/protoc && mv protoc3/include/* /usr/local/include/ && rm -rf protoc-30.2-linux-x86_64.zip protoc3
|
||||
|
||||
# install flatc
|
||||
RUN wget https://github.com/google/flatbuffers/releases/download/v25.2.10/Linux.flatc.binary.g++-13.zip \
|
||||
&& unzip Linux.flatc.binary.g++-13.zip \
|
||||
&& mv flatc /usr/local/bin/ && chmod +x /usr/local/bin/flatc && rm -rf Linux.flatc.binary.g++-13.zip
|
||||
|
||||
# install rust
|
||||
ENV RUSTUP_DIST_SERVER="https://rsproxy.cn"
|
||||
ENV RUSTUP_UPDATE_ROOT="https://rsproxy.cn/rustup"
|
||||
RUN curl -o rustup-init.sh --proto '=https' --tlsv1.2 -sSf https://rsproxy.cn/rustup-init.sh \
|
||||
&& sh rustup-init.sh -y && rm -rf rustup-init.sh
|
||||
|
||||
COPY .docker/cargo.config.toml /root/.cargo/config.toml
|
||||
|
||||
WORKDIR /root/s3-rustfs
|
||||
13
.docker/cargo.config.toml
Normal file
13
.docker/cargo.config.toml
Normal file
@@ -0,0 +1,13 @@
|
||||
[source.crates-io]
|
||||
registry = "https://github.com/rust-lang/crates.io-index"
|
||||
replace-with = 'rsproxy-sparse'
|
||||
|
||||
[source.rsproxy]
|
||||
registry = "https://rsproxy.cn/crates.io-index"
|
||||
[registries.rsproxy]
|
||||
index = "https://rsproxy.cn/crates.io-index"
|
||||
[source.rsproxy-sparse]
|
||||
registry = "sparse+https://rsproxy.cn/index/"
|
||||
|
||||
[net]
|
||||
git-fetch-with-cli = true
|
||||
26
.docker/kms/docker-compose.yml
Normal file
26
.docker/kms/docker-compose.yml
Normal file
@@ -0,0 +1,26 @@
|
||||
services:
|
||||
rustyvault:
|
||||
build:
|
||||
context: ./.docker
|
||||
dockerfile: Dockerfile.rustyvault
|
||||
container_name: rustyvault
|
||||
hostname: rustyvault
|
||||
ports:
|
||||
- "8200:8200" # Vault API port
|
||||
volumes:
|
||||
- vault-data:/vault/data
|
||||
- vault-config:/vault/config
|
||||
cap_add:
|
||||
- IPC_LOCK # Allow the vault to lock sensitive data in memory
|
||||
environment:
|
||||
- VAULT_DEV_ROOT_TOKEN_ID=rustfs-root-token
|
||||
- VAULT_ADDR=http://0.0.0.0:8200
|
||||
healthcheck:
|
||||
test: ["CMD", "curl", "-s", "http://127.0.0.1:8200/v1/sys/health"]
|
||||
interval: 10s
|
||||
timeout: 5s
|
||||
retries: 3
|
||||
|
||||
networks:
|
||||
default:
|
||||
driver: bridge
|
||||
109
.docker/observability/README.md
Normal file
109
.docker/observability/README.md
Normal file
@@ -0,0 +1,109 @@
|
||||
# Observability
|
||||
|
||||
This directory contains the observability stack for the application. The stack is composed of the following components:
|
||||
|
||||
- Prometheus v3.2.1
|
||||
- Grafana 11.6.0
|
||||
- Loki 3.4.2
|
||||
- Jaeger 2.4.0
|
||||
- Otel Collector 0.120.0 # 0.121.0 remove loki
|
||||
|
||||
## Prometheus
|
||||
|
||||
Prometheus is a monitoring and alerting toolkit. It scrapes metrics from instrumented jobs, either directly or via an
|
||||
intermediary push gateway for short-lived jobs. It stores all scraped samples locally and runs rules over this data to
|
||||
either aggregate and record new time series from existing data or generate alerts. Grafana or other API consumers can be
|
||||
used to visualize the collected data.
|
||||
|
||||
## Grafana
|
||||
|
||||
Grafana is a multi-platform open-source analytics and interactive visualization web application. It provides charts,
|
||||
graphs, and alerts for the web when connected to supported data sources.
|
||||
|
||||
## Loki
|
||||
|
||||
Loki is a horizontally-scalable, highly-available, multi-tenant log aggregation system inspired by Prometheus. It is
|
||||
designed to be very cost-effective and easy to operate. It does not index the contents of the logs, but rather a set of
|
||||
labels for each log stream.
|
||||
|
||||
## Jaeger
|
||||
|
||||
Jaeger is a distributed tracing system released as open source by Uber Technologies. It is used for monitoring and
|
||||
troubleshooting microservices-based distributed systems, including:
|
||||
|
||||
- Distributed context propagation
|
||||
- Distributed transaction monitoring
|
||||
- Root cause analysis
|
||||
- Service dependency analysis
|
||||
- Performance / latency optimization
|
||||
|
||||
## Otel Collector
|
||||
|
||||
The OpenTelemetry Collector offers a vendor-agnostic implementation on how to receive, process, and export telemetry
|
||||
data. It removes the need to run, operate, and maintain multiple agents/collectors in order to support open-source
|
||||
observability data formats (e.g. Jaeger, Prometheus, etc.) sending to one or more open-source or commercial back-ends.
|
||||
|
||||
## How to use
|
||||
|
||||
To deploy the observability stack, run the following command:
|
||||
|
||||
- docker latest version
|
||||
|
||||
```bash
|
||||
docker compose -f docker-compose.yml -f docker-compose.override.yml up -d
|
||||
```
|
||||
|
||||
- docker compose v2.0.0 or before
|
||||
|
||||
```bash
|
||||
docke-compose -f docker-compose.yml -f docker-compose.override.yml up -d
|
||||
```
|
||||
|
||||
To access the Grafana dashboard, navigate to `http://localhost:3000` in your browser. The default username and password
|
||||
are `admin` and `admin`, respectively.
|
||||
|
||||
To access the Jaeger dashboard, navigate to `http://localhost:16686` in your browser.
|
||||
|
||||
To access the Prometheus dashboard, navigate to `http://localhost:9090` in your browser.
|
||||
|
||||
## How to stop
|
||||
|
||||
To stop the observability stack, run the following command:
|
||||
|
||||
```bash
|
||||
docker compose -f docker-compose.yml -f docker-compose.override.yml down
|
||||
```
|
||||
|
||||
## How to remove data
|
||||
|
||||
To remove the data generated by the observability stack, run the following command:
|
||||
|
||||
```bash
|
||||
docker compose -f docker-compose.yml -f docker-compose.override.yml down -v
|
||||
```
|
||||
|
||||
## How to configure
|
||||
|
||||
To configure the observability stack, modify the `docker-compose.override.yml` file. The file contains the following
|
||||
|
||||
```yaml
|
||||
services:
|
||||
prometheus:
|
||||
environment:
|
||||
- PROMETHEUS_CONFIG_FILE=/etc/prometheus/prometheus.yml
|
||||
volumes:
|
||||
- ./prometheus.yml:/etc/prometheus/prometheus.yml
|
||||
|
||||
grafana:
|
||||
environment:
|
||||
- GF_SECURITY_ADMIN_PASSWORD=admin
|
||||
volumes:
|
||||
- ./grafana/provisioning:/etc/grafana/provisioning
|
||||
```
|
||||
|
||||
The `prometheus` service mounts the `prometheus.yml` file to `/etc/prometheus/prometheus.yml`. The `grafana` service
|
||||
mounts the `grafana/provisioning` directory to `/etc/grafana/provisioning`. You can modify these files to configure the
|
||||
observability stack.
|
||||
|
||||
|
||||
|
||||
42
.docker/observability/README_ZH.md
Normal file
42
.docker/observability/README_ZH.md
Normal file
@@ -0,0 +1,42 @@
|
||||
## 部署可观测性系统
|
||||
|
||||
OpenTelemetry Collector 提供了一个厂商中立的遥测数据处理方案,用于接收、处理和导出遥测数据。它消除了为支持多种开源可观测性数据格式(如
|
||||
Jaeger、Prometheus 等)而需要运行和维护多个代理/收集器的必要性。
|
||||
|
||||
### 快速部署
|
||||
|
||||
1. 进入 `.docker/observability` 目录
|
||||
2. 执行以下命令启动服务:
|
||||
|
||||
```bash
|
||||
docker compose -f docker-compose.yml up -d
|
||||
```
|
||||
|
||||
### 访问监控面板
|
||||
|
||||
服务启动后,可通过以下地址访问各个监控面板:
|
||||
|
||||
- Grafana: `http://localhost:3000` (默认账号/密码:`admin`/`admin`)
|
||||
- Jaeger: `http://localhost:16686`
|
||||
- Prometheus: `http://localhost:9090`
|
||||
|
||||
## 配置可观测性
|
||||
|
||||
### 创建配置文件
|
||||
|
||||
1. 进入 `deploy/config` 目录
|
||||
2. 复制示例配置:`cp obs.toml.example obs.toml`
|
||||
3. 编辑 `obs.toml` 配置文件,修改以下关键参数:
|
||||
|
||||
| 配置项 | 说明 | 示例值 |
|
||||
|-----------------|----------------------------|-----------------------|
|
||||
| endpoint | OpenTelemetry Collector 地址 | http://localhost:4317 |
|
||||
| service_name | 服务名称 | rustfs |
|
||||
| service_version | 服务版本 | 1.0.0 |
|
||||
| environment | 运行环境 | production |
|
||||
| meter_interval | 指标导出间隔 (秒) | 30 |
|
||||
| sample_ratio | 采样率 | 1.0 |
|
||||
| use_stdout | 是否输出到控制台 | true/false |
|
||||
| logger_level | 日志级别 | info |
|
||||
|
||||
```
|
||||
34
.docker/observability/config/obs-multi.toml
Normal file
34
.docker/observability/config/obs-multi.toml
Normal file
@@ -0,0 +1,34 @@
|
||||
[observability]
|
||||
endpoint = "http://otel-collector:4317" # Default is "http://localhost:4317" if not specified
|
||||
use_stdout = false # Output with stdout, true output, false no output
|
||||
sample_ratio = 2.0
|
||||
meter_interval = 30
|
||||
service_name = "rustfs"
|
||||
service_version = "0.1.0"
|
||||
environments = "production"
|
||||
logger_level = "debug"
|
||||
local_logging_enabled = true
|
||||
|
||||
#[[sinks]]
|
||||
#type = "Kafka"
|
||||
#brokers = "localhost:9092"
|
||||
#topic = "logs"
|
||||
#batch_size = 100 # Default is 100 if not specified
|
||||
#batch_timeout_ms = 1000 # Default is 1000ms if not specified
|
||||
#
|
||||
#[[sinks]]
|
||||
#type = "Webhook"
|
||||
#endpoint = "http://localhost:8080/webhook"
|
||||
#auth_token = ""
|
||||
#batch_size = 100 # Default is 3 if not specified
|
||||
#batch_timeout_ms = 1000 # Default is 100ms if not specified
|
||||
|
||||
[[sinks]]
|
||||
type = "File"
|
||||
path = "/root/data/logs/rustfs.log"
|
||||
buffer_size = 100 # Default is 8192 bytes if not specified
|
||||
flush_interval_ms = 1000
|
||||
flush_threshold = 100
|
||||
|
||||
[logger]
|
||||
queue_capacity = 10
|
||||
34
.docker/observability/config/obs.toml
Normal file
34
.docker/observability/config/obs.toml
Normal file
@@ -0,0 +1,34 @@
|
||||
[observability]
|
||||
endpoint = "http://localhost:4317" # Default is "http://localhost:4317" if not specified
|
||||
use_stdout = false # Output with stdout, true output, false no output
|
||||
sample_ratio = 2.0
|
||||
meter_interval = 30
|
||||
service_name = "rustfs"
|
||||
service_version = "0.1.0"
|
||||
environments = "production"
|
||||
logger_level = "debug"
|
||||
local_logging_enabled = true
|
||||
|
||||
#[[sinks]]
|
||||
#type = "Kafka"
|
||||
#brokers = "localhost:9092"
|
||||
#topic = "logs"
|
||||
#batch_size = 100 # Default is 100 if not specified
|
||||
#batch_timeout_ms = 1000 # Default is 1000ms if not specified
|
||||
#
|
||||
#[[sinks]]
|
||||
#type = "Webhook"
|
||||
#endpoint = "http://localhost:8080/webhook"
|
||||
#auth_token = ""
|
||||
#batch_size = 100 # Default is 3 if not specified
|
||||
#batch_timeout_ms = 1000 # Default is 100ms if not specified
|
||||
|
||||
[[sinks]]
|
||||
type = "File"
|
||||
path = "/root/data/logs/rustfs.log"
|
||||
buffer_size = 100 # Default is 8192 bytes if not specified
|
||||
flush_interval_ms = 1000
|
||||
flush_threshold = 100
|
||||
|
||||
[logger]
|
||||
queue_capacity = 10
|
||||
65
.docker/observability/docker-compose.yml
Normal file
65
.docker/observability/docker-compose.yml
Normal file
@@ -0,0 +1,65 @@
|
||||
services:
|
||||
otel-collector:
|
||||
image: ghcr.io/open-telemetry/opentelemetry-collector-releases/opentelemetry-collector-contrib:0.127.0
|
||||
environment:
|
||||
- TZ=Asia/Shanghai
|
||||
volumes:
|
||||
- ./otel-collector-config.yaml:/etc/otelcol-contrib/config.yaml
|
||||
ports:
|
||||
- 1888:1888
|
||||
- 8888:8888
|
||||
- 8889:8889
|
||||
- 13133:13133
|
||||
- 4317:4317
|
||||
- 4318:4318
|
||||
- 55679:55679
|
||||
networks:
|
||||
- otel-network
|
||||
jaeger:
|
||||
image: jaegertracing/jaeger:2.6.0
|
||||
environment:
|
||||
- TZ=Asia/Shanghai
|
||||
ports:
|
||||
- "16686:16686"
|
||||
- "14317:4317"
|
||||
- "14318:4318"
|
||||
networks:
|
||||
- otel-network
|
||||
prometheus:
|
||||
image: prom/prometheus:v3.4.1
|
||||
environment:
|
||||
- TZ=Asia/Shanghai
|
||||
volumes:
|
||||
- ./prometheus.yml:/etc/prometheus/prometheus.yml
|
||||
ports:
|
||||
- "9090:9090"
|
||||
networks:
|
||||
- otel-network
|
||||
loki:
|
||||
image: grafana/loki:3.5.1
|
||||
environment:
|
||||
- TZ=Asia/Shanghai
|
||||
volumes:
|
||||
- ./loki-config.yaml:/etc/loki/local-config.yaml
|
||||
ports:
|
||||
- "3100:3100"
|
||||
command: -config.file=/etc/loki/local-config.yaml
|
||||
networks:
|
||||
- otel-network
|
||||
grafana:
|
||||
image: grafana/grafana:12.0.1
|
||||
ports:
|
||||
- "3000:3000" # Web UI
|
||||
environment:
|
||||
- GF_SECURITY_ADMIN_PASSWORD=admin
|
||||
- TZ=Asia/Shanghai
|
||||
networks:
|
||||
- otel-network
|
||||
|
||||
|
||||
networks:
|
||||
otel-network:
|
||||
driver: bridge
|
||||
name: "network_otel_config"
|
||||
driver_opts:
|
||||
com.docker.network.enable_ipv6: "true"
|
||||
98
.docker/observability/jaeger-config.yaml
Normal file
98
.docker/observability/jaeger-config.yaml
Normal file
@@ -0,0 +1,98 @@
|
||||
service:
|
||||
extensions: [ jaeger_storage, jaeger_query, remote_sampling, healthcheckv2 ]
|
||||
pipelines:
|
||||
traces:
|
||||
receivers: [ otlp, jaeger, zipkin ]
|
||||
processors: [ batch, adaptive_sampling ]
|
||||
exporters: [ jaeger_storage_exporter ]
|
||||
telemetry:
|
||||
resource:
|
||||
service.name: jaeger
|
||||
metrics:
|
||||
level: detailed
|
||||
readers:
|
||||
- pull:
|
||||
exporter:
|
||||
prometheus:
|
||||
host: 0.0.0.0
|
||||
port: 8888
|
||||
logs:
|
||||
level: debug
|
||||
# TODO Initialize telemetry tracer once OTEL released new feature.
|
||||
# https://github.com/open-telemetry/opentelemetry-collector/issues/10663
|
||||
|
||||
extensions:
|
||||
healthcheckv2:
|
||||
use_v2: true
|
||||
http:
|
||||
|
||||
# pprof:
|
||||
# endpoint: 0.0.0.0:1777
|
||||
# zpages:
|
||||
# endpoint: 0.0.0.0:55679
|
||||
|
||||
jaeger_query:
|
||||
storage:
|
||||
traces: some_store
|
||||
traces_archive: another_store
|
||||
ui:
|
||||
config_file: ./cmd/jaeger/config-ui.json
|
||||
log_access: true
|
||||
# The maximum duration that is considered for clock skew adjustments.
|
||||
# Defaults to 0 seconds, which means it's disabled.
|
||||
max_clock_skew_adjust: 0s
|
||||
grpc:
|
||||
endpoint: 0.0.0.0:16685
|
||||
http:
|
||||
endpoint: 0.0.0.0:16686
|
||||
|
||||
jaeger_storage:
|
||||
backends:
|
||||
some_store:
|
||||
memory:
|
||||
max_traces: 100000
|
||||
another_store:
|
||||
memory:
|
||||
max_traces: 100000
|
||||
metric_backends:
|
||||
some_metrics_storage:
|
||||
prometheus:
|
||||
endpoint: http://prometheus:9090
|
||||
normalize_calls: true
|
||||
normalize_duration: true
|
||||
|
||||
remote_sampling:
|
||||
# You can either use file or adaptive sampling strategy in remote_sampling
|
||||
# file:
|
||||
# path: ./cmd/jaeger/sampling-strategies.json
|
||||
adaptive:
|
||||
sampling_store: some_store
|
||||
initial_sampling_probability: 0.1
|
||||
http:
|
||||
grpc:
|
||||
|
||||
receivers:
|
||||
otlp:
|
||||
protocols:
|
||||
grpc:
|
||||
http:
|
||||
|
||||
jaeger:
|
||||
protocols:
|
||||
grpc:
|
||||
thrift_binary:
|
||||
thrift_compact:
|
||||
thrift_http:
|
||||
|
||||
zipkin:
|
||||
|
||||
processors:
|
||||
batch:
|
||||
# Adaptive Sampling Processor is required to support adaptive sampling.
|
||||
# It expects remote_sampling extension with `adaptive:` config to be enabled.
|
||||
adaptive_sampling:
|
||||
|
||||
exporters:
|
||||
jaeger_storage_exporter:
|
||||
trace_storage: some_store
|
||||
|
||||
63
.docker/observability/loki-config.yaml
Normal file
63
.docker/observability/loki-config.yaml
Normal file
@@ -0,0 +1,63 @@
|
||||
auth_enabled: false
|
||||
|
||||
server:
|
||||
http_listen_port: 3100
|
||||
grpc_listen_port: 9096
|
||||
log_level: debug
|
||||
grpc_server_max_concurrent_streams: 1000
|
||||
|
||||
common:
|
||||
instance_addr: 127.0.0.1
|
||||
path_prefix: /tmp/loki
|
||||
storage:
|
||||
filesystem:
|
||||
chunks_directory: /tmp/loki/chunks
|
||||
rules_directory: /tmp/loki/rules
|
||||
replication_factor: 1
|
||||
ring:
|
||||
kvstore:
|
||||
store: inmemory
|
||||
|
||||
query_range:
|
||||
results_cache:
|
||||
cache:
|
||||
embedded_cache:
|
||||
enabled: true
|
||||
max_size_mb: 100
|
||||
|
||||
limits_config:
|
||||
metric_aggregation_enabled: true
|
||||
|
||||
schema_config:
|
||||
configs:
|
||||
- from: 2020-10-24
|
||||
store: tsdb
|
||||
object_store: filesystem
|
||||
schema: v13
|
||||
index:
|
||||
prefix: index_
|
||||
period: 24h
|
||||
|
||||
pattern_ingester:
|
||||
enabled: true
|
||||
metric_aggregation:
|
||||
loki_address: localhost:3100
|
||||
|
||||
ruler:
|
||||
alertmanager_url: http://localhost:9093
|
||||
|
||||
frontend:
|
||||
encoding: protobuf
|
||||
|
||||
# By default, Loki will send anonymous, but uniquely-identifiable usage and configuration
|
||||
# analytics to Grafana Labs. These statistics are sent to https://stats.grafana.org/
|
||||
#
|
||||
# Statistics help us better understand how Loki is used, and they show us performance
|
||||
# levels for most users. This helps us prioritize features and documentation.
|
||||
# For more information on what's sent, look at
|
||||
# https://github.com/grafana/loki/blob/main/pkg/analytics/stats.go
|
||||
# Refer to the buildReport method to see what goes into a report.
|
||||
#
|
||||
# If you would like to disable reporting, uncomment the following lines:
|
||||
#analytics:
|
||||
# reporting_enabled: false
|
||||
57
.docker/observability/otel-collector-config.yaml
Normal file
57
.docker/observability/otel-collector-config.yaml
Normal file
@@ -0,0 +1,57 @@
|
||||
receivers:
|
||||
otlp:
|
||||
protocols:
|
||||
grpc: # OTLP gRPC 接收器
|
||||
endpoint: 0.0.0.0:4317
|
||||
http: # OTLP HTTP 接收器
|
||||
endpoint: 0.0.0.0:4318
|
||||
|
||||
processors:
|
||||
batch: # 批处理处理器,提升吞吐量
|
||||
timeout: 5s
|
||||
send_batch_size: 1000
|
||||
memory_limiter:
|
||||
check_interval: 1s
|
||||
limit_mib: 512
|
||||
|
||||
exporters:
|
||||
otlp/traces: # OTLP 导出器,用于跟踪数据
|
||||
endpoint: "jaeger:4317" # Jaeger 的 OTLP gRPC 端点
|
||||
tls:
|
||||
insecure: true # 开发环境禁用 TLS,生产环境需配置证书
|
||||
prometheus: # Prometheus 导出器,用于指标数据
|
||||
endpoint: "0.0.0.0:8889" # Prometheus 刮取端点
|
||||
namespace: "rustfs" # 指标前缀
|
||||
send_timestamps: true # 发送时间戳
|
||||
# enable_open_metrics: true
|
||||
loki: # Loki 导出器,用于日志数据
|
||||
# endpoint: "http://loki:3100/otlp/v1/logs"
|
||||
endpoint: "http://loki:3100/loki/api/v1/push"
|
||||
tls:
|
||||
insecure: true
|
||||
extensions:
|
||||
health_check:
|
||||
pprof:
|
||||
zpages:
|
||||
service:
|
||||
extensions: [ health_check, pprof, zpages ] # 启用扩展
|
||||
pipelines:
|
||||
traces:
|
||||
receivers: [ otlp ]
|
||||
processors: [ memory_limiter,batch ]
|
||||
exporters: [ otlp/traces ]
|
||||
metrics:
|
||||
receivers: [ otlp ]
|
||||
processors: [ batch ]
|
||||
exporters: [ prometheus ]
|
||||
logs:
|
||||
receivers: [ otlp ]
|
||||
processors: [ batch ]
|
||||
exporters: [ loki ]
|
||||
telemetry:
|
||||
logs:
|
||||
level: "info" # Collector 日志级别
|
||||
metrics:
|
||||
address: "0.0.0.0:8888" # Collector 自身指标暴露
|
||||
|
||||
|
||||
11
.docker/observability/prometheus.yml
Normal file
11
.docker/observability/prometheus.yml
Normal file
@@ -0,0 +1,11 @@
|
||||
global:
|
||||
scrape_interval: 5s # 刮取间隔
|
||||
|
||||
scrape_configs:
|
||||
- job_name: 'otel-collector'
|
||||
static_configs:
|
||||
- targets: ['otel-collector:8888'] # 从 Collector 刮取指标
|
||||
- job_name: 'otel-metrics'
|
||||
static_configs:
|
||||
- targets: ['otel-collector:8889'] # 应用指标
|
||||
|
||||
75
.docker/openobserve-otel/README.md
Normal file
75
.docker/openobserve-otel/README.md
Normal file
@@ -0,0 +1,75 @@
|
||||
# OpenObserve + OpenTelemetry Collector
|
||||
|
||||
[](https://openobserve.org)
|
||||
[](https://opentelemetry.io/)
|
||||
|
||||
English | [中文](README_ZH.md)
|
||||
|
||||
This directory contains the configuration files for setting up an observability stack with OpenObserve and OpenTelemetry
|
||||
Collector.
|
||||
|
||||
### Overview
|
||||
|
||||
This setup provides a complete observability solution for your applications:
|
||||
|
||||
- **OpenObserve**: A modern, open-source observability platform for logs, metrics, and traces.
|
||||
- **OpenTelemetry Collector**: Collects and processes telemetry data before sending it to OpenObserve.
|
||||
|
||||
### Setup Instructions
|
||||
|
||||
1. **Prerequisites**:
|
||||
- Docker and Docker Compose installed
|
||||
- Sufficient memory resources (minimum 2GB recommended)
|
||||
|
||||
2. **Starting the Services**:
|
||||
```bash
|
||||
cd .docker/openobserve-otel
|
||||
docker compose -f docker-compose.yml up -d
|
||||
```
|
||||
|
||||
3. **Accessing the Dashboard**:
|
||||
- OpenObserve UI: http://localhost:5080
|
||||
- Default credentials:
|
||||
- Username: root@rustfs.com
|
||||
- Password: rustfs123
|
||||
|
||||
### Configuration
|
||||
|
||||
#### OpenObserve Configuration
|
||||
|
||||
The OpenObserve service is configured with:
|
||||
|
||||
- Root user credentials
|
||||
- Data persistence through a volume mount
|
||||
- Memory cache enabled
|
||||
- Health checks
|
||||
- Exposed ports:
|
||||
- 5080: HTTP API and UI
|
||||
- 5081: OTLP gRPC
|
||||
|
||||
#### OpenTelemetry Collector Configuration
|
||||
|
||||
The collector is configured to:
|
||||
|
||||
- Receive telemetry data via OTLP (HTTP and gRPC)
|
||||
- Collect logs from files
|
||||
- Process data in batches
|
||||
- Export data to OpenObserve
|
||||
- Manage memory usage
|
||||
|
||||
### Integration with Your Application
|
||||
|
||||
To send telemetry data from your application, configure your OpenTelemetry SDK to send data to:
|
||||
|
||||
- OTLP gRPC: `localhost:4317`
|
||||
- OTLP HTTP: `localhost:4318`
|
||||
|
||||
For example, in a Rust application using the `rustfs-obs` library:
|
||||
|
||||
```bash
|
||||
export RUSTFS_OBS_ENDPOINT=http://localhost:4317
|
||||
export RUSTFS_OBS_SERVICE_NAME=yourservice
|
||||
export RUSTFS_OBS_SERVICE_VERSION=1.0.0
|
||||
export RUSTFS_OBS_ENVIRONMENT=development
|
||||
```
|
||||
|
||||
75
.docker/openobserve-otel/README_ZH.md
Normal file
75
.docker/openobserve-otel/README_ZH.md
Normal file
@@ -0,0 +1,75 @@
|
||||
# OpenObserve + OpenTelemetry Collector
|
||||
|
||||
[](https://openobserve.org)
|
||||
[](https://opentelemetry.io/)
|
||||
|
||||
[English](README.md) | 中文
|
||||
|
||||
## 中文
|
||||
|
||||
本目录包含搭建 OpenObserve 和 OpenTelemetry Collector 可观测性栈的配置文件。
|
||||
|
||||
### 概述
|
||||
|
||||
此设置为应用程序提供了完整的可观测性解决方案:
|
||||
|
||||
- **OpenObserve**:现代化、开源的可观测性平台,用于日志、指标和追踪。
|
||||
- **OpenTelemetry Collector**:收集和处理遥测数据,然后将其发送到 OpenObserve。
|
||||
|
||||
### 设置说明
|
||||
|
||||
1. **前提条件**:
|
||||
- 已安装 Docker 和 Docker Compose
|
||||
- 足够的内存资源(建议至少 2GB)
|
||||
|
||||
2. **启动服务**:
|
||||
```bash
|
||||
cd .docker/openobserve-otel
|
||||
docker compose -f docker-compose.yml up -d
|
||||
```
|
||||
|
||||
3. **访问仪表板**:
|
||||
- OpenObserve UI:http://localhost:5080
|
||||
- 默认凭据:
|
||||
- 用户名:root@rustfs.com
|
||||
- 密码:rustfs123
|
||||
|
||||
### 配置
|
||||
|
||||
#### OpenObserve 配置
|
||||
|
||||
OpenObserve 服务配置:
|
||||
|
||||
- 根用户凭据
|
||||
- 通过卷挂载实现数据持久化
|
||||
- 启用内存缓存
|
||||
- 健康检查
|
||||
- 暴露端口:
|
||||
- 5080:HTTP API 和 UI
|
||||
- 5081:OTLP gRPC
|
||||
|
||||
#### OpenTelemetry Collector 配置
|
||||
|
||||
收集器配置为:
|
||||
|
||||
- 通过 OTLP(HTTP 和 gRPC)接收遥测数据
|
||||
- 从文件中收集日志
|
||||
- 批处理数据
|
||||
- 将数据导出到 OpenObserve
|
||||
- 管理内存使用
|
||||
|
||||
### 与应用程序集成
|
||||
|
||||
要从应用程序发送遥测数据,将 OpenTelemetry SDK 配置为发送数据到:
|
||||
|
||||
- OTLP gRPC:`localhost:4317`
|
||||
- OTLP HTTP:`localhost:4318`
|
||||
|
||||
例如,在使用 `rustfs-obs` 库的 Rust 应用程序中:
|
||||
|
||||
```bash
|
||||
export RUSTFS_OBS_ENDPOINT=http://localhost:4317
|
||||
export RUSTFS_OBS_SERVICE_NAME=yourservice
|
||||
export RUSTFS_OBS_SERVICE_VERSION=1.0.0
|
||||
export RUSTFS_OBS_ENVIRONMENT=development
|
||||
```
|
||||
73
.docker/openobserve-otel/docker-compose.yml
Normal file
73
.docker/openobserve-otel/docker-compose.yml
Normal file
@@ -0,0 +1,73 @@
|
||||
services:
|
||||
openobserve:
|
||||
image: public.ecr.aws/zinclabs/openobserve:latest
|
||||
restart: unless-stopped
|
||||
environment:
|
||||
ZO_ROOT_USER_EMAIL: "root@rustfs.com"
|
||||
ZO_ROOT_USER_PASSWORD: "rustfs123"
|
||||
ZO_TRACING_HEADER_KEY: "Authorization"
|
||||
ZO_TRACING_HEADER_VALUE: "Basic cm9vdEBydXN0ZnMuY29tOmQ4SXlCSEJTUkk3RGVlcEQ="
|
||||
ZO_DATA_DIR: "/data"
|
||||
ZO_MEMORY_CACHE_ENABLED: "true"
|
||||
ZO_MEMORY_CACHE_MAX_SIZE: "256"
|
||||
RUST_LOG: "info"
|
||||
TZ: Asia/Shanghai
|
||||
ports:
|
||||
- "5080:5080"
|
||||
- "5081:5081"
|
||||
volumes:
|
||||
- ./data:/data
|
||||
healthcheck:
|
||||
test: [ "CMD", "curl", "-f", "http://localhost:5080/health" ]
|
||||
start_period: 60s
|
||||
interval: 10s
|
||||
timeout: 5s
|
||||
retries: 6
|
||||
networks:
|
||||
- otel-network
|
||||
deploy:
|
||||
resources:
|
||||
limits:
|
||||
memory: 1024M
|
||||
reservations:
|
||||
memory: 512M
|
||||
|
||||
otel-collector:
|
||||
image: otel/opentelemetry-collector-contrib:latest
|
||||
restart: unless-stopped
|
||||
environment:
|
||||
- TZ=Asia/Shanghai
|
||||
volumes:
|
||||
- ./otel-collector-config.yaml:/etc/otelcol-contrib/config.yaml
|
||||
ports:
|
||||
- "4317:4317" # OTLP gRPC
|
||||
- "4318:4318" # OTLP HTTP
|
||||
- "13133:13133" # Health check
|
||||
- "1777:1777" # pprof
|
||||
- "55679:55679" # zpages
|
||||
- "1888:1888" # Metrics
|
||||
- "8888:8888" # Prometheus metrics
|
||||
- "8889:8889" # Additional metrics endpoint
|
||||
depends_on:
|
||||
- openobserve
|
||||
networks:
|
||||
- otel-network
|
||||
deploy:
|
||||
resources:
|
||||
limits:
|
||||
memory: 10240M
|
||||
reservations:
|
||||
memory: 512M
|
||||
|
||||
networks:
|
||||
otel-network:
|
||||
driver: bridge
|
||||
name: otel-network
|
||||
ipam:
|
||||
config:
|
||||
- subnet: 172.28.0.0/16
|
||||
gateway: 172.28.0.1
|
||||
labels:
|
||||
com.example.description: "Network for OpenObserve and OpenTelemetry Collector"
|
||||
volumes:
|
||||
data:
|
||||
78
.docker/openobserve-otel/otel-collector-config.yaml
Normal file
78
.docker/openobserve-otel/otel-collector-config.yaml
Normal file
@@ -0,0 +1,78 @@
|
||||
receivers:
|
||||
otlp:
|
||||
protocols:
|
||||
grpc:
|
||||
endpoint: 0.0.0.0:4317
|
||||
http:
|
||||
endpoint: 0.0.0.0:4318
|
||||
filelog:
|
||||
include: [ "/var/log/app/*.log" ]
|
||||
start_at: end
|
||||
|
||||
processors:
|
||||
batch:
|
||||
timeout: 1s
|
||||
send_batch_size: 1024
|
||||
memory_limiter:
|
||||
check_interval: 1s
|
||||
limit_mib: 400
|
||||
spike_limit_mib: 100
|
||||
|
||||
exporters:
|
||||
otlphttp/openobserve:
|
||||
endpoint: http://openobserve:5080/api/default # http://127.0.0.1:5080/api/default
|
||||
headers:
|
||||
Authorization: "Basic cm9vdEBydXN0ZnMuY29tOmQ4SXlCSEJTUkk3RGVlcEQ="
|
||||
stream-name: default
|
||||
organization: default
|
||||
compression: gzip
|
||||
retry_on_failure:
|
||||
enabled: true
|
||||
initial_interval: 5s
|
||||
max_interval: 30s
|
||||
max_elapsed_time: 300s
|
||||
timeout: 10s
|
||||
otlp/openobserve:
|
||||
endpoint: openobserve:5081 # http://127.0.0.1:5080/api/default
|
||||
headers:
|
||||
Authorization: "Basic cm9vdEBydXN0ZnMuY29tOmQ4SXlCSEJTUkk3RGVlcEQ="
|
||||
stream-name: default
|
||||
organization: default
|
||||
compression: gzip
|
||||
retry_on_failure:
|
||||
enabled: true
|
||||
initial_interval: 5s
|
||||
max_interval: 30s
|
||||
max_elapsed_time: 300s
|
||||
timeout: 10s
|
||||
tls:
|
||||
insecure: true
|
||||
|
||||
extensions:
|
||||
health_check:
|
||||
endpoint: 0.0.0.0:13133
|
||||
pprof:
|
||||
endpoint: 0.0.0.0:1777
|
||||
zpages:
|
||||
endpoint: 0.0.0.0:55679
|
||||
|
||||
service:
|
||||
extensions: [ health_check, pprof, zpages ]
|
||||
pipelines:
|
||||
traces:
|
||||
receivers: [ otlp ]
|
||||
processors: [ memory_limiter, batch ]
|
||||
exporters: [ otlp/openobserve ]
|
||||
metrics:
|
||||
receivers: [ otlp ]
|
||||
processors: [ memory_limiter, batch ]
|
||||
exporters: [ otlp/openobserve ]
|
||||
logs:
|
||||
receivers: [ otlp, filelog ]
|
||||
processors: [ memory_limiter, batch ]
|
||||
exporters: [ otlp/openobserve ]
|
||||
telemetry:
|
||||
logs:
|
||||
level: "info" # Collector 日志级别
|
||||
metrics:
|
||||
address: "0.0.0.0:8888" # Collector 自身指标暴露
|
||||
71
.docker/vault-init.sh
Normal file
71
.docker/vault-init.sh
Normal file
@@ -0,0 +1,71 @@
|
||||
#!/bin/sh
|
||||
# vault-init.sh - Initialize Vault for RustFS SSE-KMS
|
||||
|
||||
# Wait for Vault to start
|
||||
until curl -s http://127.0.0.1:8200/v1/sys/health | grep "initialized" > /dev/null; do
|
||||
echo "Waiting for Vault to start..."
|
||||
sleep 1
|
||||
done
|
||||
|
||||
# Set the Vault token
|
||||
export VAULT_TOKEN="$VAULT_DEV_ROOT_TOKEN_ID"
|
||||
export VAULT_ADDR="http://127.0.0.1:8200"
|
||||
|
||||
echo "Vault is running and initialized"
|
||||
|
||||
# Enable the Transit secrets engine (for encryption operations)
|
||||
vault secrets enable transit
|
||||
echo "Transit secrets engine enabled"
|
||||
|
||||
# Create a key for RustFS encryption
|
||||
vault write -f transit/keys/rustfs-encryption-key
|
||||
echo "Created rustfs-encryption-key"
|
||||
|
||||
# Create another key for RustFS with rotation capability
|
||||
vault write -f transit/keys/rustfs-rotating-key
|
||||
echo "Created rustfs-rotating-key"
|
||||
|
||||
# Set up key rotation policy
|
||||
vault write transit/keys/rustfs-rotating-key/config auto_rotate_period="30d"
|
||||
echo "Set up auto rotation for rustfs-rotating-key"
|
||||
|
||||
# Create a policy for RustFS to access these keys
|
||||
cat > /tmp/rustfs-policy.hcl << EOF
|
||||
# Policy for RustFS encryption operations
|
||||
path "transit/encrypt/rustfs-encryption-key" {
|
||||
capabilities = ["create", "update"]
|
||||
}
|
||||
|
||||
path "transit/decrypt/rustfs-encryption-key" {
|
||||
capabilities = ["create", "update"]
|
||||
}
|
||||
|
||||
path "transit/encrypt/rustfs-rotating-key" {
|
||||
capabilities = ["create", "update"]
|
||||
}
|
||||
|
||||
path "transit/decrypt/rustfs-rotating-key" {
|
||||
capabilities = ["create", "update"]
|
||||
}
|
||||
EOF
|
||||
|
||||
# Create the policy
|
||||
vault policy write rustfs-encryption-policy /tmp/rustfs-policy.hcl
|
||||
echo "Created rustfs-encryption-policy"
|
||||
|
||||
# Create a token for RustFS to use
|
||||
RUSTFS_TOKEN=$(vault token create -policy=rustfs-encryption-policy -field=token)
|
||||
echo "Created token for RustFS: $RUSTFS_TOKEN"
|
||||
|
||||
# Store the token for RustFS to use
|
||||
echo "RUSTFS_KMS_VAULT_TOKEN=$RUSTFS_TOKEN" > /vault/config/rustfs-kms.env
|
||||
echo "RUSTFS_KMS_VAULT_ENDPOINT=http://rustyvault:8200" >> /vault/config/rustfs-kms.env
|
||||
echo "RUSTFS_KMS_VAULT_KEY_NAME=rustfs-encryption-key" >> /vault/config/rustfs-kms.env
|
||||
|
||||
echo "RustFS KMS configuration has been created"
|
||||
echo "============================================"
|
||||
echo "Vault is ready for use with RustFS SSE-KMS"
|
||||
echo "============================================"
|
||||
|
||||
# Keep the container running
|
||||
tail -f /dev/null
|
||||
54
.github/actions/setup/action.yml
vendored
Normal file
54
.github/actions/setup/action.yml
vendored
Normal file
@@ -0,0 +1,54 @@
|
||||
name: "setup"
|
||||
|
||||
description: "setup environment for rustfs"
|
||||
|
||||
inputs:
|
||||
rust-version:
|
||||
required: true
|
||||
default: "stable"
|
||||
description: "Rust version to use"
|
||||
cache-shared-key:
|
||||
required: true
|
||||
default: ""
|
||||
description: "Cache key for shared cache"
|
||||
cache-save-if:
|
||||
required: true
|
||||
default: true
|
||||
description: "Cache save condition"
|
||||
run-os:
|
||||
required: true
|
||||
default: "ubuntu-latest"
|
||||
description: "Running system"
|
||||
|
||||
runs:
|
||||
using: "composite"
|
||||
steps:
|
||||
- name: Install system dependencies
|
||||
if: inputs.run-os == 'ubuntu-latest'
|
||||
shell: bash
|
||||
run: |
|
||||
sudo apt update
|
||||
sudo apt install -y musl-tools build-essential lld libdbus-1-dev libwayland-dev libwebkit2gtk-4.1-dev libxdo-dev
|
||||
|
||||
- uses: arduino/setup-protoc@v3
|
||||
with:
|
||||
version: "30.2"
|
||||
|
||||
- uses: Nugine/setup-flatc@v1
|
||||
with:
|
||||
version: "24.3.25"
|
||||
|
||||
- uses: dtolnay/rust-toolchain@master
|
||||
with:
|
||||
toolchain: ${{ inputs.rust-version }}
|
||||
components: rustfmt, clippy
|
||||
|
||||
- uses: Swatinem/rust-cache@v2
|
||||
with:
|
||||
cache-on-failure: true
|
||||
cache-all-crates: true
|
||||
shared-key: ${{ inputs.cache-shared-key }}
|
||||
save-if: ${{ inputs.cache-save-if }}
|
||||
|
||||
- uses: mlugg/setup-zig@v2
|
||||
- uses: taiki-e/install-action@cargo-zigbuild
|
||||
15
.github/dependabot.yml
vendored
Normal file
15
.github/dependabot.yml
vendored
Normal file
@@ -0,0 +1,15 @@
|
||||
# To get started with Dependabot version updates, you'll need to specify which
|
||||
# package ecosystems to update and where the package manifests are located.
|
||||
# Please see the documentation for all configuration options:
|
||||
# https://help.github.com/github/administering-a-repository/configuration-options-for-dependency-updates
|
||||
|
||||
version: 2
|
||||
updates:
|
||||
- package-ecosystem: "cargo" # See documentation for possible values
|
||||
directory: "/" # Location of package manifests
|
||||
schedule:
|
||||
interval: "monthly"
|
||||
groups:
|
||||
dependencies:
|
||||
patterns:
|
||||
- "*"
|
||||
25
.github/workflows/audit.yml
vendored
Normal file
25
.github/workflows/audit.yml
vendored
Normal file
@@ -0,0 +1,25 @@
|
||||
name: Audit
|
||||
|
||||
on:
|
||||
push:
|
||||
branches:
|
||||
- main
|
||||
paths:
|
||||
- '**/Cargo.toml'
|
||||
- '**/Cargo.lock'
|
||||
pull_request:
|
||||
branches:
|
||||
- main
|
||||
paths:
|
||||
- '**/Cargo.toml'
|
||||
- '**/Cargo.lock'
|
||||
schedule:
|
||||
- cron: '0 0 * * 0' # at midnight of each sunday
|
||||
|
||||
jobs:
|
||||
audit:
|
||||
runs-on: ubuntu-latest
|
||||
steps:
|
||||
- uses: actions/checkout@v4
|
||||
- uses: taiki-e/install-action@cargo-audit
|
||||
- run: cargo audit -D warnings
|
||||
416
.github/workflows/build.yml
vendored
Normal file
416
.github/workflows/build.yml
vendored
Normal file
@@ -0,0 +1,416 @@
|
||||
name: Build RustFS And GUI
|
||||
|
||||
on:
|
||||
workflow_dispatch:
|
||||
schedule:
|
||||
- cron: "0 0 * * 0" # at midnight of each sunday
|
||||
push:
|
||||
branches:
|
||||
- main
|
||||
tags: [ "v*", "*" ]
|
||||
|
||||
jobs:
|
||||
build-rustfs:
|
||||
runs-on: ${{ matrix.os }}
|
||||
strategy:
|
||||
fail-fast: false
|
||||
matrix:
|
||||
os: [ ubuntu-latest, macos-latest, windows-latest ]
|
||||
variant:
|
||||
- { profile: release, target: x86_64-unknown-linux-musl, glibc: "default" }
|
||||
- { profile: release, target: x86_64-unknown-linux-gnu, glibc: "default" }
|
||||
- { profile: release, target: aarch64-apple-darwin, glibc: "default" }
|
||||
#- { profile: release, target: aarch64-unknown-linux-gnu, glibc: "default" }
|
||||
- { profile: release, target: aarch64-unknown-linux-musl, glibc: "default" }
|
||||
#- { profile: release, target: x86_64-pc-windows-msvc, glibc: "default" }
|
||||
exclude:
|
||||
# Linux targets on non-Linux systems
|
||||
- os: macos-latest
|
||||
variant: { profile: release, target: x86_64-unknown-linux-gnu, glibc: "default" }
|
||||
- os: macos-latest
|
||||
variant: { profile: release, target: x86_64-unknown-linux-musl, glibc: "default" }
|
||||
- os: macos-latest
|
||||
variant: { profile: release, target: aarch64-unknown-linux-gnu, glibc: "default" }
|
||||
- os: macos-latest
|
||||
variant: { profile: release, target: aarch64-unknown-linux-musl, glibc: "default" }
|
||||
- os: windows-latest
|
||||
variant: { profile: release, target: x86_64-unknown-linux-gnu, glibc: "default" }
|
||||
- os: windows-latest
|
||||
variant: { profile: release, target: x86_64-unknown-linux-musl, glibc: "default" }
|
||||
- os: windows-latest
|
||||
variant: { profile: release, target: aarch64-unknown-linux-gnu, glibc: "default" }
|
||||
- os: windows-latest
|
||||
variant: { profile: release, target: aarch64-unknown-linux-musl, glibc: "default" }
|
||||
|
||||
# Apple targets on non-macOS systems
|
||||
- os: ubuntu-latest
|
||||
variant: { profile: release, target: aarch64-apple-darwin, glibc: "default" }
|
||||
- os: windows-latest
|
||||
variant: { profile: release, target: aarch64-apple-darwin, glibc: "default" }
|
||||
|
||||
# Windows targets on non-Windows systems
|
||||
- os: ubuntu-latest
|
||||
variant: { profile: release, target: x86_64-pc-windows-msvc, glibc: "default" }
|
||||
- os: macos-latest
|
||||
variant: { profile: release, target: x86_64-pc-windows-msvc, glibc: "default" }
|
||||
|
||||
steps:
|
||||
- name: Checkout repository
|
||||
uses: actions/checkout@v4.2.2
|
||||
with:
|
||||
fetch-depth: 0
|
||||
|
||||
# Installation system dependencies
|
||||
- name: Install system dependencies (Ubuntu)
|
||||
if: runner.os == 'Linux'
|
||||
run: |
|
||||
sudo apt update
|
||||
sudo apt install -y musl-tools build-essential lld libdbus-1-dev libwayland-dev libwebkit2gtk-4.1-dev libxdo-dev
|
||||
shell: bash
|
||||
|
||||
#Install Rust using dtolnay/rust-toolchain
|
||||
- uses: dtolnay/rust-toolchain@master
|
||||
with:
|
||||
toolchain: stable
|
||||
targets: ${{ matrix.variant.target }}
|
||||
components: rustfmt, clippy
|
||||
|
||||
# Install system dependencies
|
||||
- name: Cache Protoc
|
||||
id: cache-protoc
|
||||
uses: actions/cache@v4.2.3
|
||||
with:
|
||||
path: /Users/runner/hostedtoolcache/protoc
|
||||
key: protoc-${{ runner.os }}-30.2
|
||||
restore-keys: |
|
||||
protoc-${{ runner.os }}-
|
||||
|
||||
- name: Install Protoc
|
||||
if: steps.cache-protoc.outputs.cache-hit != 'true'
|
||||
uses: arduino/setup-protoc@v3
|
||||
with:
|
||||
version: '30.2'
|
||||
repo-token: ${{ secrets.GITHUB_TOKEN }}
|
||||
|
||||
- name: Setup Flatc
|
||||
uses: Nugine/setup-flatc@v1
|
||||
with:
|
||||
version: "25.2.10"
|
||||
|
||||
# Cache Cargo dependencies
|
||||
- uses: Swatinem/rust-cache@v2
|
||||
with:
|
||||
cache-on-failure: true
|
||||
cache-all-crates: true
|
||||
shared-key: rustfs-${{ matrix.os }}-${{ matrix.variant.profile }}-${{ matrix.variant.target }}-${{ matrix.variant.glibc }}-${{ hashFiles('**/Cargo.lock') }}
|
||||
save-if: ${{ github.event_name != 'pull_request' }}
|
||||
|
||||
# Set up Zig for cross-compilation
|
||||
- uses: mlugg/setup-zig@v2
|
||||
if: matrix.variant.glibc != 'default' || contains(matrix.variant.target, 'linux')
|
||||
|
||||
- uses: taiki-e/install-action@cargo-zigbuild
|
||||
if: matrix.variant.glibc != 'default' || contains(matrix.variant.target, 'linux')
|
||||
|
||||
# Download static resources
|
||||
- name: Download and Extract Static Assets
|
||||
run: |
|
||||
url="https://dl.rustfs.com/artifacts/console/rustfs-console-latest.zip"
|
||||
|
||||
# Create a static resource directory
|
||||
mkdir -p ./rustfs/static
|
||||
|
||||
# Download static resources
|
||||
echo "::group::Downloading static assets"
|
||||
curl -L -o static_assets.zip "$url" --retry 3
|
||||
|
||||
# Unzip static resources
|
||||
echo "::group::Extracting static assets"
|
||||
if [ "${{ runner.os }}" = "Windows" ]; then
|
||||
7z x static_assets.zip -o./rustfs/static
|
||||
del static_assets.zip
|
||||
else
|
||||
unzip -o static_assets.zip -d ./rustfs/static
|
||||
rm static_assets.zip
|
||||
fi
|
||||
|
||||
echo "::group::Static assets content"
|
||||
ls -la ./rustfs/static
|
||||
shell: bash
|
||||
|
||||
# Build rustfs
|
||||
- name: Build rustfs
|
||||
id: build
|
||||
shell: bash
|
||||
run: |
|
||||
echo "::group::Setting up build parameters"
|
||||
PROFILE="${{ matrix.variant.profile }}"
|
||||
TARGET="${{ matrix.variant.target }}"
|
||||
GLIBC="${{ matrix.variant.glibc }}"
|
||||
|
||||
# Determine whether to use zigbuild
|
||||
USE_ZIGBUILD=false
|
||||
if [[ "$GLIBC" != "default" || "$TARGET" == *"linux"* ]]; then
|
||||
USE_ZIGBUILD=true
|
||||
echo "Using zigbuild for cross-compilation"
|
||||
fi
|
||||
|
||||
# Determine the target parameters
|
||||
TARGET_ARG="$TARGET"
|
||||
if [[ "$GLIBC" != "default" ]]; then
|
||||
TARGET_ARG="${TARGET}.${GLIBC}"
|
||||
echo "Using custom glibc target: $TARGET_ARG"
|
||||
fi
|
||||
|
||||
# Confirm the profile directory name
|
||||
if [[ "$PROFILE" == "dev" ]]; then
|
||||
PROFILE_DIR="debug"
|
||||
else
|
||||
PROFILE_DIR="$PROFILE"
|
||||
fi
|
||||
|
||||
# Determine the binary suffix
|
||||
BIN_SUFFIX=""
|
||||
if [[ "${{ matrix.variant.target }}" == *"windows"* ]]; then
|
||||
BIN_SUFFIX=".exe"
|
||||
fi
|
||||
|
||||
# Determine the binary name - Use the appropriate extension for Windows
|
||||
BIN_NAME="rustfs.${PROFILE}.${TARGET}"
|
||||
if [[ "$GLIBC" != "default" ]]; then
|
||||
BIN_NAME="${BIN_NAME}.glibc${GLIBC}"
|
||||
fi
|
||||
|
||||
# Windows systems use exe suffix, and other systems do not have suffix
|
||||
if [[ "${{ matrix.variant.target }}" == *"windows"* ]]; then
|
||||
BIN_NAME="${BIN_NAME}.exe"
|
||||
else
|
||||
BIN_NAME="${BIN_NAME}.bin"
|
||||
fi
|
||||
|
||||
echo "Binary name will be: $BIN_NAME"
|
||||
|
||||
echo "::group::Building rustfs"
|
||||
# Refresh build information
|
||||
touch rustfs/build.rs
|
||||
|
||||
# Identify the build command and execute it
|
||||
if [[ "$USE_ZIGBUILD" == "true" ]]; then
|
||||
echo "Build command: cargo zigbuild --profile $PROFILE --target $TARGET_ARG -p rustfs --bins"
|
||||
cargo zigbuild --profile $PROFILE --target $TARGET_ARG -p rustfs --bins
|
||||
else
|
||||
echo "Build command: cargo build --profile $PROFILE --target $TARGET_ARG -p rustfs --bins"
|
||||
cargo build --profile $PROFILE --target $TARGET_ARG -p rustfs --bins
|
||||
fi
|
||||
|
||||
# Determine the binary path and output path
|
||||
BIN_PATH="target/${TARGET_ARG}/${PROFILE_DIR}/rustfs${BIN_SUFFIX}"
|
||||
OUT_PATH="target/artifacts/${BIN_NAME}"
|
||||
|
||||
# Create a target directory
|
||||
mkdir -p target/artifacts
|
||||
|
||||
echo "Copying binary from ${BIN_PATH} to ${OUT_PATH}"
|
||||
cp "${BIN_PATH}" "${OUT_PATH}"
|
||||
|
||||
# Record the output path for use in the next steps
|
||||
echo "bin_path=${OUT_PATH}" >> $GITHUB_OUTPUT
|
||||
echo "bin_name=${BIN_NAME}" >> $GITHUB_OUTPUT
|
||||
|
||||
- name: Package Binary and Static Assets
|
||||
id: package
|
||||
run: |
|
||||
# Create component file name
|
||||
ARTIFACT_NAME="rustfs-${{ matrix.variant.profile }}-${{ matrix.variant.target }}"
|
||||
if [ "${{ matrix.variant.glibc }}" != "default" ]; then
|
||||
ARTIFACT_NAME="${ARTIFACT_NAME}-glibc${{ matrix.variant.glibc }}"
|
||||
fi
|
||||
echo "artifact_name=${ARTIFACT_NAME}" >> $GITHUB_OUTPUT
|
||||
|
||||
# Get the binary path
|
||||
BIN_PATH="${{ steps.build.outputs.bin_path }}"
|
||||
|
||||
# Create a packaged directory structure - only contains bin and docs directories
|
||||
mkdir -p ${ARTIFACT_NAME}/{bin,docs}
|
||||
|
||||
# Copy binary files (note the difference between Windows and other systems)
|
||||
if [[ "${{ matrix.variant.target }}" == *"windows"* ]]; then
|
||||
cp "${BIN_PATH}" ${ARTIFACT_NAME}/bin/rustfs.exe
|
||||
else
|
||||
cp "${BIN_PATH}" ${ARTIFACT_NAME}/bin/rustfs
|
||||
fi
|
||||
|
||||
# copy documents and licenses
|
||||
if [ -f "LICENSE" ]; then
|
||||
cp LICENSE ${ARTIFACT_NAME}/docs/
|
||||
fi
|
||||
if [ -f "README.md" ]; then
|
||||
cp README.md ${ARTIFACT_NAME}/docs/
|
||||
fi
|
||||
|
||||
# Packaged as zip
|
||||
if [ "${{ runner.os }}" = "Windows" ]; then
|
||||
7z a ${ARTIFACT_NAME}.zip ${ARTIFACT_NAME}
|
||||
else
|
||||
zip -r ${ARTIFACT_NAME}.zip ${ARTIFACT_NAME}
|
||||
fi
|
||||
|
||||
echo "Created artifact: ${ARTIFACT_NAME}.zip"
|
||||
ls -la ${ARTIFACT_NAME}.zip
|
||||
shell: bash
|
||||
|
||||
- uses: actions/upload-artifact@v4
|
||||
with:
|
||||
name: ${{ steps.package.outputs.artifact_name }}
|
||||
path: ${{ steps.package.outputs.artifact_name }}.zip
|
||||
retention-days: 7
|
||||
|
||||
- name: Upload to Aliyun OSS
|
||||
if: startsWith(github.ref, 'refs/tags/') || github.ref == 'refs/heads/main'
|
||||
uses: JohnGuan/oss-upload-action@main
|
||||
with:
|
||||
key-id: ${{ secrets.ALICLOUDOSS_KEY_ID }}
|
||||
key-secret: ${{ secrets.ALICLOUDOSS_KEY_SECRET }}
|
||||
region: oss-cn-beijing
|
||||
bucket: rustfs-artifacts
|
||||
assets: |
|
||||
${{ steps.package.outputs.artifact_name }}.zip:/artifacts/rustfs/${{ steps.package.outputs.artifact_name }}.zip
|
||||
${{ steps.package.outputs.artifact_name }}.zip:/artifacts/rustfs/${{ steps.package.outputs.artifact_name }}.latest.zip
|
||||
|
||||
# Determine whether to perform GUI construction based on conditions
|
||||
- name: Prepare for GUI build
|
||||
if: startsWith(github.ref, 'refs/tags/')
|
||||
id: prepare_gui
|
||||
run: |
|
||||
# Create a target directory
|
||||
mkdir -p ./cli/rustfs-gui/embedded-rustfs/
|
||||
|
||||
# Copy the currently built binary to the embedded-rustfs directory
|
||||
if [[ "${{ matrix.variant.target }}" == *"windows"* ]]; then
|
||||
cp "${{ steps.build.outputs.bin_path }}" ./cli/rustfs-gui/embedded-rustfs/rustfs.exe
|
||||
else
|
||||
cp "${{ steps.build.outputs.bin_path }}" ./cli/rustfs-gui/embedded-rustfs/rustfs
|
||||
fi
|
||||
|
||||
echo "Copied binary to embedded-rustfs directory"
|
||||
ls -la ./cli/rustfs-gui/embedded-rustfs/
|
||||
shell: bash
|
||||
|
||||
#Install the dioxus-cli tool
|
||||
- uses: taiki-e/cache-cargo-install-action@v2
|
||||
if: startsWith(github.ref, 'refs/tags/')
|
||||
with:
|
||||
tool: dioxus-cli
|
||||
|
||||
# Build and package GUI applications
|
||||
- name: Build and Bundle rustfs-gui
|
||||
if: startsWith(github.ref, 'refs/tags/')
|
||||
id: build_gui
|
||||
shell: bash
|
||||
run: |
|
||||
echo "::group::Setting up build parameters for GUI"
|
||||
PROFILE="${{ matrix.variant.profile }}"
|
||||
TARGET="${{ matrix.variant.target }}"
|
||||
GLIBC="${{ matrix.variant.glibc }}"
|
||||
RELEASE_PATH="target/artifacts/$TARGET"
|
||||
|
||||
# Make sure the output directory exists
|
||||
mkdir -p ${RELEASE_PATH}
|
||||
|
||||
# Configure the target platform linker
|
||||
echo "::group::Configuring linker for $TARGET"
|
||||
case "$TARGET" in
|
||||
"x86_64-unknown-linux-gnu")
|
||||
export CC_x86_64_unknown_linux_gnu=gcc
|
||||
export CARGO_TARGET_X86_64_UNKNOWN_LINUX_GNU_LINKER=gcc
|
||||
;;
|
||||
"x86_64-unknown-linux-musl")
|
||||
export CC_x86_64_unknown_linux_musl=musl-gcc
|
||||
export CARGO_TARGET_X86_64_UNKNOWN_LINUX_MUSL_LINKER=musl-gcc
|
||||
;;
|
||||
"aarch64-unknown-linux-gnu")
|
||||
export CC_aarch64_unknown_linux_gnu=aarch64-linux-gnu-gcc
|
||||
export CARGO_TARGET_AARCH64_UNKNOWN_LINUX_GNU_LINKER=aarch64-linux-gnu-gcc
|
||||
;;
|
||||
"aarch64-unknown-linux-musl")
|
||||
export CC_aarch64_unknown_linux_musl=aarch64-linux-musl-gcc
|
||||
export CARGO_TARGET_AARCH64_UNKNOWN_LINUX_MUSL_LINKER=aarch64-linux-musl-gcc
|
||||
;;
|
||||
"aarch64-apple-darwin")
|
||||
export CC_aarch64_apple_darwin=clang
|
||||
export CARGO_TARGET_AARCH64_APPLE_DARWIN_LINKER=clang
|
||||
;;
|
||||
"x86_64-pc-windows-msvc")
|
||||
export CC_x86_64_pc_windows_msvc=cl
|
||||
export CARGO_TARGET_X86_64_PC_WINDOWS_MSVC_LINKER=link
|
||||
;;
|
||||
esac
|
||||
|
||||
echo "::group::Building GUI application"
|
||||
cd cli/rustfs-gui
|
||||
|
||||
# Building according to the target platform
|
||||
if [[ "$TARGET" == *"apple-darwin"* ]]; then
|
||||
echo "Building for macOS"
|
||||
dx bundle --platform macos --package-types "macos" --package-types "dmg" --release --profile ${PROFILE} --out-dir ../../${RELEASE_PATH}
|
||||
elif [[ "$TARGET" == *"windows-msvc"* ]]; then
|
||||
echo "Building for Windows"
|
||||
dx bundle --platform windows --package-types "msi" --release --profile ${PROFILE} --out-dir ../../${RELEASE_PATH}
|
||||
elif [[ "$TARGET" == *"linux"* ]]; then
|
||||
echo "Building for Linux"
|
||||
dx bundle --platform linux --package-types "deb" --package-types "rpm" --package-types "appimage" --release --profile ${PROFILE} --out-dir ../../${RELEASE_PATH}
|
||||
fi
|
||||
|
||||
cd ../..
|
||||
|
||||
# Create component name
|
||||
GUI_ARTIFACT_NAME="rustfs-gui-${PROFILE}-${TARGET}"
|
||||
|
||||
if [ "$GLIBC" != "default" ]; then
|
||||
GUI_ARTIFACT_NAME="${GUI_ARTIFACT_NAME}-glibc${GLIBC}"
|
||||
fi
|
||||
|
||||
echo "::group::Packaging GUI application"
|
||||
# Select packaging method according to the operating system
|
||||
if [ "${{ runner.os }}" = "Windows" ]; then
|
||||
7z a ${GUI_ARTIFACT_NAME}.zip ${RELEASE_PATH}/*
|
||||
else
|
||||
zip -r ${GUI_ARTIFACT_NAME}.zip ${RELEASE_PATH}/*
|
||||
fi
|
||||
|
||||
echo "gui_artifact_name=${GUI_ARTIFACT_NAME}" >> $GITHUB_OUTPUT
|
||||
echo "Created GUI artifact: ${GUI_ARTIFACT_NAME}.zip"
|
||||
ls -la ${GUI_ARTIFACT_NAME}.zip
|
||||
|
||||
# Upload GUI components
|
||||
- uses: actions/upload-artifact@v4
|
||||
if: startsWith(github.ref, 'refs/tags/')
|
||||
with:
|
||||
name: ${{ steps.build_gui.outputs.gui_artifact_name }}
|
||||
path: ${{ steps.build_gui.outputs.gui_artifact_name }}.zip
|
||||
retention-days: 7
|
||||
|
||||
# Upload GUI to Alibaba Cloud OSS
|
||||
- name: Upload GUI to Aliyun OSS
|
||||
if: startsWith(github.ref, 'refs/tags/')
|
||||
uses: JohnGuan/oss-upload-action@main
|
||||
with:
|
||||
key-id: ${{ secrets.ALICLOUDOSS_KEY_ID }}
|
||||
key-secret: ${{ secrets.ALICLOUDOSS_KEY_SECRET }}
|
||||
region: oss-cn-beijing
|
||||
bucket: rustfs-artifacts
|
||||
assets: |
|
||||
${{ steps.build_gui.outputs.gui_artifact_name }}.zip:/artifacts/rustfs/${{ steps.build_gui.outputs.gui_artifact_name }}.zip
|
||||
${{ steps.build_gui.outputs.gui_artifact_name }}.zip:/artifacts/rustfs/${{ steps.build_gui.outputs.gui_artifact_name }}.latest.zip
|
||||
|
||||
|
||||
merge:
|
||||
runs-on: ubuntu-latest
|
||||
needs: [ build-rustfs ]
|
||||
if: startsWith(github.ref, 'refs/tags/')
|
||||
steps:
|
||||
- uses: actions/upload-artifact/merge@v4
|
||||
with:
|
||||
name: rustfs-packages
|
||||
pattern: "rustfs-*"
|
||||
delete-merged: true
|
||||
122
.github/workflows/ci.yml
vendored
Normal file
122
.github/workflows/ci.yml
vendored
Normal file
@@ -0,0 +1,122 @@
|
||||
name: CI
|
||||
|
||||
on:
|
||||
push:
|
||||
branches:
|
||||
- main
|
||||
pull_request:
|
||||
branches:
|
||||
- main
|
||||
schedule:
|
||||
- cron: '0 0 * * 0' # at midnight of each sunday
|
||||
workflow_dispatch:
|
||||
|
||||
env:
|
||||
CARGO_TERM_COLOR: always
|
||||
|
||||
jobs:
|
||||
skip-check:
|
||||
permissions:
|
||||
actions: write
|
||||
contents: read
|
||||
runs-on: ubuntu-latest
|
||||
outputs:
|
||||
should_skip: ${{ steps.skip_check.outputs.should_skip }}
|
||||
steps:
|
||||
- id: skip_check
|
||||
uses: fkirc/skip-duplicate-actions@v5
|
||||
with:
|
||||
concurrent_skipping: 'same_content_newer'
|
||||
cancel_others: true
|
||||
paths_ignore: '["*.md"]'
|
||||
|
||||
# Quality checks for pull requests
|
||||
pr-checks:
|
||||
name: Pull Request Quality Checks
|
||||
if: github.event_name == 'pull_request'
|
||||
runs-on: self-hosted
|
||||
steps:
|
||||
- uses: actions/checkout@v4
|
||||
- uses: ./.github/actions/setup
|
||||
|
||||
- name: Format Check
|
||||
run: cargo fmt --all --check
|
||||
|
||||
- name: Lint Check
|
||||
run: cargo check --all-targets
|
||||
|
||||
- name: Clippy Check
|
||||
run: cargo clippy --all-targets --all-features -- -D warnings
|
||||
|
||||
- name: Unit Tests
|
||||
run: cargo test --all --exclude e2e_test
|
||||
|
||||
develop:
|
||||
needs: skip-check
|
||||
if: needs.skip-check.outputs.should_skip != 'true'
|
||||
runs-on: self-hosted
|
||||
steps:
|
||||
- uses: actions/checkout@v4
|
||||
- uses: ./.github/actions/setup
|
||||
|
||||
- name: Format
|
||||
run: cargo fmt --all --check
|
||||
|
||||
- name: Lint
|
||||
run: cargo check --all-targets
|
||||
|
||||
- name: Clippy
|
||||
run: cargo clippy --all-targets --all-features -- -D warnings
|
||||
|
||||
- name: Test
|
||||
run: cargo test --all --exclude e2e_test
|
||||
|
||||
- name: Build debug
|
||||
run: |
|
||||
touch rustfs/build.rs
|
||||
cargo build -p rustfs --bins
|
||||
|
||||
- name: Pack artifacts
|
||||
run: |
|
||||
mkdir -p ./target/artifacts
|
||||
cp target/debug/rustfs ./target/artifacts/rustfs-debug
|
||||
|
||||
- uses: actions/upload-artifact@v4
|
||||
with:
|
||||
name: rustfs
|
||||
path: ./target/artifacts/*
|
||||
|
||||
s3s-e2e:
|
||||
name: E2E (s3s-e2e)
|
||||
needs:
|
||||
- skip-check
|
||||
- develop
|
||||
if: needs.skip-check.outputs.should_skip != 'true'
|
||||
runs-on: self-hosted
|
||||
steps:
|
||||
- uses: actions/checkout@v4
|
||||
- uses: dtolnay/rust-toolchain@stable
|
||||
- uses: Swatinem/rust-cache@v2
|
||||
with:
|
||||
cache-on-failure: true
|
||||
cache-all-crates: true
|
||||
|
||||
- name: Install s3s-e2e
|
||||
run: |
|
||||
cargo install s3s-e2e --git https://github.com/Nugine/s3s.git
|
||||
s3s-e2e --version
|
||||
|
||||
- uses: actions/download-artifact@v4
|
||||
with:
|
||||
name: rustfs
|
||||
path: ./target/artifacts
|
||||
|
||||
- name: Run s3s-e2e
|
||||
timeout-minutes: 10
|
||||
run: |
|
||||
./scripts/e2e-run.sh ./target/artifacts/rustfs-debug /tmp/rustfs
|
||||
|
||||
- uses: actions/upload-artifact@v4
|
||||
with:
|
||||
name: s3s-e2e.logs
|
||||
path: /tmp/rustfs.log
|
||||
68
.github/workflows/samply.yml
vendored
Normal file
68
.github/workflows/samply.yml
vendored
Normal file
@@ -0,0 +1,68 @@
|
||||
name: Profile with Samply
|
||||
on:
|
||||
push:
|
||||
branches: [ main ]
|
||||
workflow_dispatch:
|
||||
jobs:
|
||||
profile:
|
||||
runs-on: ubuntu-latest
|
||||
steps:
|
||||
- uses: actions/checkout@v4
|
||||
|
||||
- uses: dtolnay/rust-toolchain@nightly
|
||||
with:
|
||||
components: llvm-tools-preview
|
||||
|
||||
- uses: actions/cache@v4
|
||||
with:
|
||||
path: |
|
||||
~/.cargo/registry
|
||||
~/.cargo/git
|
||||
target
|
||||
key: ${{ runner.os }}-cargo-${{ hashFiles('**/Cargo.lock') }}
|
||||
|
||||
- name: Install samply
|
||||
uses: taiki-e/cache-cargo-install-action@v2
|
||||
with:
|
||||
tool: samply
|
||||
|
||||
- name: Configure kernel for profiling
|
||||
run: echo '1' | sudo tee /proc/sys/kernel/perf_event_paranoid
|
||||
|
||||
- name: Create test volumes
|
||||
run: |
|
||||
for i in {0..4}; do
|
||||
mkdir -p ./target/volume/test$i
|
||||
done
|
||||
|
||||
- name: Set environment variables
|
||||
run: |
|
||||
echo "RUSTFS_VOLUMES=./target/volume/test{0...4}" >> $GITHUB_ENV
|
||||
echo "RUST_LOG=rustfs=info,ecstore=info,s3s=info,iam=info,rustfs-obs=info" >> $GITHUB_ENV
|
||||
|
||||
- name: Download static files
|
||||
run: |
|
||||
curl -L "https://dl.rustfs.com/artifacts/console/rustfs-console-latest.zip" -o tempfile.zip && unzip -o tempfile.zip -d ./rustfs/static && rm tempfile.zip
|
||||
|
||||
- name: Build with profiling
|
||||
run: |
|
||||
RUSTFLAGS="-C force-frame-pointers=yes" cargo +nightly build --profile profiling -p rustfs --bins
|
||||
|
||||
- name: Run samply with timeout
|
||||
id: samply_record
|
||||
run: |
|
||||
timeout 120s samply record --output samply.json ./target/profiling/rustfs ${RUSTFS_VOLUMES}
|
||||
if [ -f "samply.json" ]; then
|
||||
echo "profile_generated=true" >> $GITHUB_OUTPUT
|
||||
else
|
||||
echo "profile_generated=false" >> $GITHUB_OUTPUT
|
||||
echo "::error::Failed to generate profile data"
|
||||
fi
|
||||
|
||||
- name: Upload profile data
|
||||
if: steps.samply_record.outputs.profile_generated == 'true'
|
||||
uses: actions/upload-artifact@v4
|
||||
with:
|
||||
name: samply-profile-${{ github.run_number }}
|
||||
path: samply.json
|
||||
retention-days: 7
|
||||
20
.gitignore
vendored
20
.gitignore
vendored
@@ -1 +1,21 @@
|
||||
/target
|
||||
.DS_Store
|
||||
.idea
|
||||
.vscode
|
||||
/test
|
||||
/logs
|
||||
/data
|
||||
.devcontainer
|
||||
rustfs/static/*
|
||||
!rustfs/static/.gitkeep
|
||||
vendor
|
||||
cli/rustfs-gui/embedded-rustfs/rustfs
|
||||
deploy/config/obs.toml
|
||||
*.log
|
||||
deploy/certs/*
|
||||
*jsonl
|
||||
.env
|
||||
.rustfs.sys
|
||||
.cargo
|
||||
profile.json
|
||||
.docker/openobserve-otel/data
|
||||
|
||||
91
.vscode/launch.json
vendored
Normal file
91
.vscode/launch.json
vendored
Normal file
@@ -0,0 +1,91 @@
|
||||
{
|
||||
// 使用 IntelliSense 了解相关属性。
|
||||
// 悬停以查看现有属性的描述。
|
||||
// 欲了解更多信息,请访问: https://go.microsoft.com/fwlink/?linkid=830387
|
||||
"version": "0.2.0",
|
||||
"configurations": [
|
||||
{
|
||||
"type": "lldb",
|
||||
"request": "launch",
|
||||
"name": "Debug executable 'rustfs'",
|
||||
"cargo": {
|
||||
"args": [
|
||||
"build",
|
||||
"--bin=rustfs",
|
||||
"--package=rustfs"
|
||||
],
|
||||
"filter": {
|
||||
"name": "rustfs",
|
||||
"kind": "bin"
|
||||
}
|
||||
},
|
||||
"env": {
|
||||
"RUST_LOG": "rustfs=debug,ecstore=info,s3s=debug",
|
||||
"RUSTFS_VOLUMES": "./target/volume/test{0...3}",
|
||||
"RUSTFS_ADDRESS": "[::]:9000",
|
||||
"RUSTFS_CONSOLE_ENABLE": "true",
|
||||
"RUSTFS_CONSOLE_ADDRESS": "[::]:9002",
|
||||
"RUSTFS_SERVER_DOMAINS": "localhost:9000",
|
||||
"RUSTFS_TLS_PATH": "./deploy/certs",
|
||||
"RUSTFS_OBS_CONFIG": "./deploy/config/obs.example.toml",
|
||||
"RUSTFS__OBSERVABILITY__ENDPOINT": "http://localhost:4317",
|
||||
"RUSTFS__OBSERVABILITY__USE_STDOUT": "true",
|
||||
"RUSTFS__OBSERVABILITY__SAMPLE_RATIO": "2.0",
|
||||
"RUSTFS__OBSERVABILITY__METER_INTERVAL": "30",
|
||||
"RUSTFS__OBSERVABILITY__SERVICE_NAME": "rustfs",
|
||||
"RUSTFS__OBSERVABILITY__SERVICE_VERSION": "0.1.0",
|
||||
"RUSTFS__OBSERVABILITY__ENVIRONMENT": "develop",
|
||||
"RUSTFS__OBSERVABILITY__LOGGER_LEVEL": "info",
|
||||
"RUSTFS__SINKS__FILE__ENABLED": "true",
|
||||
"RUSTFS__SINKS__FILE__PATH": "./deploy/logs/rustfs.log",
|
||||
"RUSTFS__SINKS__WEBHOOK__ENABLED": "false",
|
||||
"RUSTFS__SINKS__WEBHOOK__ENDPOINT": "",
|
||||
"RUSTFS__SINKS__WEBHOOK__AUTH_TOKEN": "",
|
||||
"RUSTFS__SINKS__KAFKA__ENABLED": "false",
|
||||
"RUSTFS__SINKS__KAFKA__BOOTSTRAP_SERVERS": "",
|
||||
"RUSTFS__SINKS__KAFKA__TOPIC": "",
|
||||
"RUSTFS__LOGGER__QUEUE_CAPACITY": "10"
|
||||
|
||||
},
|
||||
"cwd": "${workspaceFolder}"
|
||||
},
|
||||
{
|
||||
"type": "lldb",
|
||||
"request": "launch",
|
||||
"name": "Debug unit tests in executable 'rustfs'",
|
||||
"cargo": {
|
||||
"args": [
|
||||
"test",
|
||||
"--no-run",
|
||||
"--bin=rustfs",
|
||||
"--package=rustfs"
|
||||
],
|
||||
"filter": {
|
||||
"name": "rustfs",
|
||||
"kind": "bin"
|
||||
}
|
||||
},
|
||||
"args": [],
|
||||
"cwd": "${workspaceFolder}"
|
||||
},
|
||||
{
|
||||
"type": "lldb",
|
||||
"request": "launch",
|
||||
"name": "Debug unit tests in library 'ecstore'",
|
||||
"cargo": {
|
||||
"args": [
|
||||
"test",
|
||||
"--no-run",
|
||||
"--lib",
|
||||
"--package=ecstore"
|
||||
],
|
||||
"filter": {
|
||||
"name": "ecstore",
|
||||
"kind": "lib"
|
||||
}
|
||||
},
|
||||
"args": [],
|
||||
"cwd": "${workspaceFolder}"
|
||||
}
|
||||
]
|
||||
}
|
||||
11319
Cargo.lock
generated
11319
Cargo.lock
generated
File diff suppressed because it is too large
Load Diff
238
Cargo.toml
238
Cargo.toml
@@ -1,21 +1,235 @@
|
||||
[workspace]
|
||||
members = [
|
||||
"appauth", # Application authentication and authorization
|
||||
"cli/rustfs-gui", # Graphical user interface client
|
||||
"common/common", # Shared utilities and data structures
|
||||
"common/lock", # Distributed locking implementation
|
||||
"common/protos", # Protocol buffer definitions
|
||||
"common/workers", # Worker thread pools and task scheduling
|
||||
"crates/config", # Configuration management
|
||||
"crates/event-notifier", # Event notification system
|
||||
"crates/obs", # Observability utilities
|
||||
"crates/utils", # Utility functions and helpers
|
||||
"crypto", # Cryptography and security features
|
||||
"ecstore", # Erasure coding storage implementation
|
||||
"e2e_test", # End-to-end test suite
|
||||
"iam", # Identity and Access Management
|
||||
"madmin", # Management dashboard and admin API interface
|
||||
"rustfs", # Core file system implementation
|
||||
"s3select/api", # S3 Select API interface
|
||||
"s3select/query", # S3 Select query engine
|
||||
"crates/zip",
|
||||
]
|
||||
resolver = "2"
|
||||
members = ["rustfs", "store"]
|
||||
|
||||
[workspace.package]
|
||||
edition = "2021"
|
||||
license = "MIT OR Apache-2.0"
|
||||
license = "Apache-2.0"
|
||||
repository = "https://github.com/rustfs/rustfs"
|
||||
rust-version = "1.75"
|
||||
version = "0.0.1"
|
||||
|
||||
[workspace.lints.rust]
|
||||
unsafe_code = "deny"
|
||||
|
||||
[workspace.lints.clippy]
|
||||
all = "warn"
|
||||
|
||||
[workspace.dependencies]
|
||||
serde = { version = "1.0.203", features = ["derive"] }
|
||||
serde_json = "1.0.117"
|
||||
tracing = "0.1.40"
|
||||
futures = "0.3.30"
|
||||
bytes = "1.6.0"
|
||||
http = "1.1.0"
|
||||
thiserror = "1.0.61"
|
||||
time = "0.3.36"
|
||||
async-trait = "0.1.80"
|
||||
tokio = { version = "1.38.0", features = ["macros", "rt", "rt-multi-thread"] }
|
||||
api = { path = "./s3select/api", version = "0.0.1" }
|
||||
appauth = { path = "./appauth", version = "0.0.1" }
|
||||
common = { path = "./common/common", version = "0.0.1" }
|
||||
crypto = { path = "./crypto", version = "0.0.1" }
|
||||
ecstore = { path = "./ecstore", version = "0.0.1" }
|
||||
iam = { path = "./iam", version = "0.0.1" }
|
||||
lock = { path = "./common/lock", version = "0.0.1" }
|
||||
madmin = { path = "./madmin", version = "0.0.1" }
|
||||
policy = { path = "./policy", version = "0.0.1" }
|
||||
protos = { path = "./common/protos", version = "0.0.1" }
|
||||
query = { path = "./s3select/query", version = "0.0.1" }
|
||||
rustfs = { path = "./rustfs", version = "0.0.1" }
|
||||
rustfs-zip = { path = "./crates/zip", version = "0.0.1" }
|
||||
rustfs-config = { path = "./crates/config", version = "0.0.1" }
|
||||
rustfs-obs = { path = "crates/obs", version = "0.0.1" }
|
||||
rustfs-event-notifier = { path = "crates/event-notifier", version = "0.0.1" }
|
||||
rustfs-utils = { path = "crates/utils", version = "0.0.1" }
|
||||
workers = { path = "./common/workers", version = "0.0.1" }
|
||||
tokio-tar = "0.3.1"
|
||||
atoi = "2.0.0"
|
||||
async-recursion = "1.1.1"
|
||||
async-trait = "0.1.88"
|
||||
atomic_enum = "0.3.0"
|
||||
aws-sdk-s3 = "1.29.0"
|
||||
axum = "0.8.4"
|
||||
axum-extra = "0.10.1"
|
||||
axum-server = { version = "0.7.2", features = ["tls-rustls"] }
|
||||
backon = "1.5.1"
|
||||
blake2 = "0.10.6"
|
||||
bytes = "1.10.1"
|
||||
bytesize = "2.0.1"
|
||||
byteorder = "1.5.0"
|
||||
chrono = { version = "0.4.41", features = ["serde"] }
|
||||
clap = { version = "4.5.39", features = ["derive", "env"] }
|
||||
config = "0.15.11"
|
||||
const-str = { version = "0.6.2", features = ["std", "proc"] }
|
||||
datafusion = "46.0.1"
|
||||
derive_builder = "0.20.2"
|
||||
dioxus = { version = "0.6.3", features = ["router"] }
|
||||
dirs = "6.0.0"
|
||||
flatbuffers = "25.2.10"
|
||||
flexi_logger = { version = "0.30.2", features = ["trc"] }
|
||||
futures = "0.3.31"
|
||||
futures-core = "0.3.31"
|
||||
futures-util = "0.3.31"
|
||||
glob = "0.3.2"
|
||||
hex = "0.4.3"
|
||||
highway = { version = "1.3.0" }
|
||||
hyper = "1.6.0"
|
||||
hyper-util = { version = "0.1.14", features = [
|
||||
"tokio",
|
||||
"server-auto",
|
||||
"server-graceful",
|
||||
] }
|
||||
http = "1.3.1"
|
||||
http-body = "1.0.1"
|
||||
humantime = "2.2.0"
|
||||
include_dir = "0.7.4"
|
||||
jsonwebtoken = "9.3.1"
|
||||
keyring = { version = "3.6.2", features = [
|
||||
"apple-native",
|
||||
"windows-native",
|
||||
"sync-secret-service",
|
||||
] }
|
||||
lazy_static = "1.5.0"
|
||||
libsystemd = { version = "0.7.2" }
|
||||
local-ip-address = "0.6.5"
|
||||
matchit = "0.8.4"
|
||||
md-5 = "0.10.6"
|
||||
mime = "0.3.17"
|
||||
mime_guess = "2.0.5"
|
||||
netif = "0.1.6"
|
||||
nix = { version = "0.30.1", features = ["fs"] }
|
||||
nu-ansi-term = "0.50.1"
|
||||
num_cpus = { version = "1.17.0" }
|
||||
nvml-wrapper = "0.11.0"
|
||||
object_store = "0.11.2"
|
||||
once_cell = "1.21.3"
|
||||
opentelemetry = { version = "0.30.0" }
|
||||
opentelemetry-appender-tracing = { version = "0.30.1", features = [
|
||||
"experimental_use_tracing_span_context",
|
||||
"experimental_metadata_attributes",
|
||||
"spec_unstable_logs_enabled"
|
||||
] }
|
||||
opentelemetry_sdk = { version = "0.30.0" }
|
||||
opentelemetry-stdout = { version = "0.30.0" }
|
||||
opentelemetry-otlp = { version = "0.30.0", default-features = false, features = [
|
||||
"grpc-tonic", "gzip-tonic", "trace", "metrics", "logs", "internal-logs"
|
||||
] }
|
||||
opentelemetry-semantic-conventions = { version = "0.30.0", features = [
|
||||
"semconv_experimental",
|
||||
] }
|
||||
parking_lot = "0.12.4"
|
||||
percent-encoding = "2.3.1"
|
||||
pin-project-lite = "0.2.16"
|
||||
# pin-utils = "0.1.0"
|
||||
prost = "0.13.5"
|
||||
prost-build = "0.13.5"
|
||||
protobuf = "3.7"
|
||||
rand = "0.8.5"
|
||||
rdkafka = { version = "0.37.0", features = ["tokio"] }
|
||||
reed-solomon-erasure = { version = "6.0.0", features = ["simd-accel"] }
|
||||
regex = { version = "1.11.1" }
|
||||
reqwest = { version = "0.12.19", default-features = false, features = [
|
||||
"rustls-tls",
|
||||
"charset",
|
||||
"http2",
|
||||
"system-proxy",
|
||||
"stream",
|
||||
"json",
|
||||
"blocking",
|
||||
] }
|
||||
rfd = { version = "0.15.3", default-features = false, features = [
|
||||
"xdg-portal",
|
||||
"tokio",
|
||||
] }
|
||||
rmp = "0.8.14"
|
||||
rmp-serde = "1.3.0"
|
||||
rumqttc = { version = "0.24" }
|
||||
rust-embed = { version = "8.7.2" }
|
||||
rustfs-rsc = "2025.506.1"
|
||||
rustls = { version = "0.23.27" }
|
||||
rustls-pki-types = "1.12.0"
|
||||
rustls-pemfile = "2.2.0"
|
||||
s3s = { git = "https://github.com/Nugine/s3s.git", rev = "4733cdfb27b2713e832967232cbff413bb768c10" }
|
||||
s3s-policy = { git = "https://github.com/Nugine/s3s.git", rev = "4733cdfb27b2713e832967232cbff413bb768c10" }
|
||||
shadow-rs = { version = "1.1.1", default-features = false }
|
||||
serde = { version = "1.0.219", features = ["derive"] }
|
||||
serde_json = "1.0.140"
|
||||
serde_urlencoded = "0.7.1"
|
||||
serde_with = "3.12.0"
|
||||
sha2 = "0.10.9"
|
||||
smallvec = { version = "1.15.0", features = ["serde"] }
|
||||
snafu = "0.8.6"
|
||||
socket2 = "0.5.10"
|
||||
strum = { version = "0.27.1", features = ["derive"] }
|
||||
sysinfo = "0.35.2"
|
||||
tempfile = "3.20.0"
|
||||
test-case = "3.3.1"
|
||||
thiserror = "2.0.12"
|
||||
time = { version = "0.3.41", features = [
|
||||
"std",
|
||||
"parsing",
|
||||
"formatting",
|
||||
"macros",
|
||||
"serde",
|
||||
] }
|
||||
tokio = { version = "1.45.1", features = ["fs", "rt-multi-thread"] }
|
||||
tonic = { version = "0.13.1", features = ["gzip"] }
|
||||
tonic-build = { version = "0.13.1" }
|
||||
tokio-rustls = { version = "0.26.2", default-features = false }
|
||||
tokio-stream = { version = "0.1.17" }
|
||||
tokio-util = { version = "0.7.15", features = ["io", "compat"] }
|
||||
tower = { version = "0.5.2", features = ["timeout"] }
|
||||
tower-http = { version = "0.6.6", features = ["cors"] }
|
||||
tracing = "0.1.41"
|
||||
tracing-core = "0.1.34"
|
||||
tracing-error = "0.2.1"
|
||||
tracing-subscriber = { version = "0.3.19", features = ["env-filter", "time"] }
|
||||
tracing-appender = "0.2.3"
|
||||
tracing-opentelemetry = "0.31.0"
|
||||
transform-stream = "0.3.1"
|
||||
url = "2.5.4"
|
||||
urlencoding = "2.1.3"
|
||||
uuid = { version = "1.17.0", features = [
|
||||
"v4",
|
||||
"fast-rng",
|
||||
"macro-diagnostics",
|
||||
] }
|
||||
winapi = { version = "0.3.9" }
|
||||
|
||||
|
||||
[profile.wasm-dev]
|
||||
inherits = "dev"
|
||||
opt-level = 1
|
||||
|
||||
[profile.server-dev]
|
||||
inherits = "dev"
|
||||
|
||||
[profile.android-dev]
|
||||
inherits = "dev"
|
||||
|
||||
[profile.release]
|
||||
opt-level = 3
|
||||
lto = "thin"
|
||||
codegen-units = 1
|
||||
panic = "abort" # Optional, remove the panic expansion code
|
||||
strip = true # strip symbol information to reduce binary size
|
||||
|
||||
[profile.production]
|
||||
inherits = "release"
|
||||
lto = "fat"
|
||||
codegen-units = 1
|
||||
|
||||
[profile.profiling]
|
||||
inherits = "release"
|
||||
debug = true
|
||||
|
||||
184
DEVELOPMENT.md
Normal file
184
DEVELOPMENT.md
Normal file
@@ -0,0 +1,184 @@
|
||||
# RustFS Development Guide
|
||||
|
||||
## 📋 Code Quality Requirements
|
||||
|
||||
### 🔧 Code Formatting Rules
|
||||
|
||||
**MANDATORY**: All code must be properly formatted before committing. This project enforces strict formatting standards to maintain code consistency and readability.
|
||||
|
||||
#### Pre-commit Requirements
|
||||
|
||||
Before every commit, you **MUST**:
|
||||
|
||||
1. **Format your code**:
|
||||
```bash
|
||||
cargo fmt --all
|
||||
```
|
||||
|
||||
2. **Verify formatting**:
|
||||
```bash
|
||||
cargo fmt --all --check
|
||||
```
|
||||
|
||||
3. **Pass clippy checks**:
|
||||
```bash
|
||||
cargo clippy --all-targets --all-features -- -D warnings
|
||||
```
|
||||
|
||||
4. **Ensure compilation**:
|
||||
```bash
|
||||
cargo check --all-targets
|
||||
```
|
||||
|
||||
#### Quick Commands
|
||||
|
||||
We provide convenient Makefile targets for common tasks:
|
||||
|
||||
```bash
|
||||
# Format all code
|
||||
make fmt
|
||||
|
||||
# Check if code is properly formatted
|
||||
make fmt-check
|
||||
|
||||
# Run clippy checks
|
||||
make clippy
|
||||
|
||||
# Run compilation check
|
||||
make check
|
||||
|
||||
# Run tests
|
||||
make test
|
||||
|
||||
# Run all pre-commit checks (format + clippy + check + test)
|
||||
make pre-commit
|
||||
|
||||
# Setup git hooks (one-time setup)
|
||||
make setup-hooks
|
||||
```
|
||||
|
||||
### 🔒 Automated Pre-commit Hooks
|
||||
|
||||
This project includes a pre-commit hook that automatically runs before each commit to ensure:
|
||||
|
||||
- ✅ Code is properly formatted (`cargo fmt --all --check`)
|
||||
- ✅ No clippy warnings (`cargo clippy --all-targets --all-features -- -D warnings`)
|
||||
- ✅ Code compiles successfully (`cargo check --all-targets`)
|
||||
|
||||
#### Setting Up Pre-commit Hooks
|
||||
|
||||
Run this command once after cloning the repository:
|
||||
|
||||
```bash
|
||||
make setup-hooks
|
||||
```
|
||||
|
||||
Or manually:
|
||||
|
||||
```bash
|
||||
chmod +x .git/hooks/pre-commit
|
||||
```
|
||||
|
||||
### 📝 Formatting Configuration
|
||||
|
||||
The project uses the following rustfmt configuration (defined in `rustfmt.toml`):
|
||||
|
||||
```toml
|
||||
max_width = 130
|
||||
fn_call_width = 90
|
||||
single_line_let_else_max_width = 100
|
||||
```
|
||||
|
||||
### 🚫 Commit Prevention
|
||||
|
||||
If your code doesn't meet the formatting requirements, the pre-commit hook will:
|
||||
|
||||
1. **Block the commit** and show clear error messages
|
||||
2. **Provide exact commands** to fix the issues
|
||||
3. **Guide you through** the resolution process
|
||||
|
||||
Example output when formatting fails:
|
||||
|
||||
```
|
||||
❌ Code formatting check failed!
|
||||
💡 Please run 'cargo fmt --all' to format your code before committing.
|
||||
|
||||
🔧 Quick fix:
|
||||
cargo fmt --all
|
||||
git add .
|
||||
git commit
|
||||
```
|
||||
|
||||
### 🔄 Development Workflow
|
||||
|
||||
1. **Make your changes**
|
||||
2. **Format your code**: `make fmt` or `cargo fmt --all`
|
||||
3. **Run pre-commit checks**: `make pre-commit`
|
||||
4. **Commit your changes**: `git commit -m "your message"`
|
||||
5. **Push to your branch**: `git push`
|
||||
|
||||
### 🛠️ IDE Integration
|
||||
|
||||
#### VS Code
|
||||
|
||||
Install the `rust-analyzer` extension and add to your `settings.json`:
|
||||
|
||||
```json
|
||||
{
|
||||
"rust-analyzer.rustfmt.extraArgs": ["--config-path", "./rustfmt.toml"],
|
||||
"editor.formatOnSave": true,
|
||||
"[rust]": {
|
||||
"editor.defaultFormatter": "rust-lang.rust-analyzer"
|
||||
}
|
||||
}
|
||||
```
|
||||
|
||||
#### Other IDEs
|
||||
|
||||
Configure your IDE to:
|
||||
- Use the project's `rustfmt.toml` configuration
|
||||
- Format on save
|
||||
- Run clippy checks
|
||||
|
||||
### ❗ Important Notes
|
||||
|
||||
- **Never bypass formatting checks** - they are there for a reason
|
||||
- **All CI/CD pipelines** will also enforce these same checks
|
||||
- **Pull requests** will be automatically rejected if formatting checks fail
|
||||
- **Consistent formatting** improves code readability and reduces merge conflicts
|
||||
|
||||
### 🆘 Troubleshooting
|
||||
|
||||
#### Pre-commit hook not running?
|
||||
|
||||
```bash
|
||||
# Check if hook is executable
|
||||
ls -la .git/hooks/pre-commit
|
||||
|
||||
# Make it executable if needed
|
||||
chmod +x .git/hooks/pre-commit
|
||||
```
|
||||
|
||||
#### Formatting issues?
|
||||
|
||||
```bash
|
||||
# Format all code
|
||||
cargo fmt --all
|
||||
|
||||
# Check specific issues
|
||||
cargo fmt --all --check --verbose
|
||||
```
|
||||
|
||||
#### Clippy issues?
|
||||
|
||||
```bash
|
||||
# See detailed clippy output
|
||||
cargo clippy --all-targets --all-features -- -D warnings
|
||||
|
||||
# Fix automatically fixable issues
|
||||
cargo clippy --fix --all-targets --all-features
|
||||
```
|
||||
|
||||
---
|
||||
|
||||
Following these guidelines ensures high code quality and smooth collaboration across the RustFS project! 🚀
|
||||
17
Dockerfile
Normal file
17
Dockerfile
Normal file
@@ -0,0 +1,17 @@
|
||||
FROM alpine:latest
|
||||
|
||||
# RUN apk add --no-cache <package-name>
|
||||
|
||||
WORKDIR /app
|
||||
|
||||
RUN mkdir -p /data/rustfs0 /data/rustfs1 /data/rustfs2 /data/rustfs3
|
||||
|
||||
COPY ./target/x86_64-unknown-linux-musl/release/rustfs /app/rustfs
|
||||
|
||||
RUN chmod +x /app/rustfs
|
||||
|
||||
EXPOSE 9000
|
||||
EXPOSE 9001
|
||||
|
||||
|
||||
CMD ["/app/rustfs"]
|
||||
21
Dockerfile.obs
Normal file
21
Dockerfile.obs
Normal file
@@ -0,0 +1,21 @@
|
||||
FROM ubuntu:latest
|
||||
|
||||
# RUN apk add --no-cache <package-name>
|
||||
# 如果 rustfs 有依赖,可以在这里添加,例如:
|
||||
# RUN apk add --no-cache openssl
|
||||
# RUN apk add --no-cache bash # 安装 Bash
|
||||
|
||||
WORKDIR /app
|
||||
|
||||
# 创建与 RUSTFS_VOLUMES 一致的目录
|
||||
RUN mkdir -p /root/data/target/volume/test1 /root/data/target/volume/test2 /root/data/target/volume/test3 /root/data/target/volume/test4
|
||||
|
||||
# COPY ./target/x86_64-unknown-linux-musl/release/rustfs /app/rustfs
|
||||
COPY ./target/x86_64-unknown-linux-gnu/release/rustfs /app/rustfs
|
||||
|
||||
RUN chmod +x /app/rustfs
|
||||
|
||||
EXPOSE 9000
|
||||
EXPOSE 9002
|
||||
|
||||
CMD ["/app/rustfs"]
|
||||
81
Makefile
Normal file
81
Makefile
Normal file
@@ -0,0 +1,81 @@
|
||||
###########
|
||||
# 远程开发,需要 VSCode 安装 Dev Containers, Remote SSH, Remote Explorer
|
||||
# https://code.visualstudio.com/docs/remote/containers
|
||||
###########
|
||||
DOCKER_CLI ?= docker
|
||||
IMAGE_NAME ?= rustfs:v1.0.0
|
||||
CONTAINER_NAME ?= rustfs-dev
|
||||
DOCKERFILE_PATH = $(shell pwd)/.docker
|
||||
|
||||
# Code quality and formatting targets
|
||||
.PHONY: fmt
|
||||
fmt:
|
||||
@echo "🔧 Formatting code..."
|
||||
cargo fmt --all
|
||||
|
||||
.PHONY: fmt-check
|
||||
fmt-check:
|
||||
@echo "📝 Checking code formatting..."
|
||||
cargo fmt --all --check
|
||||
|
||||
.PHONY: clippy
|
||||
clippy:
|
||||
@echo "🔍 Running clippy checks..."
|
||||
cargo clippy --all-targets --all-features -- -D warnings
|
||||
|
||||
.PHONY: check
|
||||
check:
|
||||
@echo "🔨 Running compilation check..."
|
||||
cargo check --all-targets
|
||||
|
||||
.PHONY: test
|
||||
test:
|
||||
@echo "🧪 Running tests..."
|
||||
cargo test --all --exclude e2e_test
|
||||
|
||||
.PHONY: pre-commit
|
||||
pre-commit: fmt clippy check test
|
||||
@echo "✅ All pre-commit checks passed!"
|
||||
|
||||
.PHONY: setup-hooks
|
||||
setup-hooks:
|
||||
@echo "🔧 Setting up git hooks..."
|
||||
chmod +x .git/hooks/pre-commit
|
||||
@echo "✅ Git hooks setup complete!"
|
||||
|
||||
.PHONY: init-devenv
|
||||
init-devenv:
|
||||
$(DOCKER_CLI) build -t $(IMAGE_NAME) -f $(DOCKERFILE_PATH)/Dockerfile.devenv .
|
||||
$(DOCKER_CLI) stop $(CONTAINER_NAME)
|
||||
$(DOCKER_CLI) rm $(CONTAINER_NAME)
|
||||
$(DOCKER_CLI) run -d --name $(CONTAINER_NAME) -p 9010:9010 -p 9000:9000 -v $(shell pwd):/root/s3-rustfs -it $(IMAGE_NAME)
|
||||
|
||||
.PHONY: start
|
||||
start:
|
||||
$(DOCKER_CLI) start $(CONTAINER_NAME)
|
||||
|
||||
.PHONY: stop
|
||||
stop:
|
||||
$(DOCKER_CLI) stop $(CONTAINER_NAME)
|
||||
|
||||
.PHONY: e2e-server
|
||||
e2e-server:
|
||||
sh $(shell pwd)/scripts/run.sh
|
||||
|
||||
.PHONY: probe-e2e
|
||||
probe-e2e:
|
||||
sh $(shell pwd)/scripts/probe.sh
|
||||
|
||||
# make BUILD_OS=ubuntu22.04 build
|
||||
# in target/ubuntu22.04/release/rustfs
|
||||
|
||||
# make BUILD_OS=rockylinux9.3 build
|
||||
# in target/rockylinux9.3/release/rustfs
|
||||
BUILD_OS ?= rockylinux9.3
|
||||
.PHONY: build
|
||||
build: ROCKYLINUX_BUILD_IMAGE_NAME = rustfs-$(BUILD_OS):v1
|
||||
build: ROCKYLINUX_BUILD_CONTAINER_NAME = rustfs-$(BUILD_OS)-build
|
||||
build: BUILD_CMD = /root/.cargo/bin/cargo build --release --bin rustfs --target-dir /root/s3-rustfs/target/$(BUILD_OS)
|
||||
build:
|
||||
$(DOCKER_CLI) build -t $(ROCKYLINUX_BUILD_IMAGE_NAME) -f $(DOCKERFILE_PATH)/Dockerfile.$(BUILD_OS) .
|
||||
$(DOCKER_CLI) run --rm --name $(ROCKYLINUX_BUILD_CONTAINER_NAME) -v $(shell pwd):/root/s3-rustfs -it $(ROCKYLINUX_BUILD_IMAGE_NAME) $(BUILD_CMD)
|
||||
95
README.md
95
README.md
@@ -1 +1,94 @@
|
||||
# s3-rustfs
|
||||
# RustFS
|
||||
|
||||
## English Documentation |[中文文档](README_ZH.md)
|
||||
|
||||
### Prerequisites
|
||||
|
||||
| Package | Version | Download Link |
|
||||
|---------|---------|----------------------------------------------------------------------------------------------------------------------------------|
|
||||
| Rust | 1.8.5+ | [rust-lang.org/tools/install](https://www.rust-lang.org/tools/install) |
|
||||
| protoc | 30.2+ | [protoc-30.2-linux-x86_64.zip](https://github.com/protocolbuffers/protobuf/releases/download/v30.2/protoc-30.2-linux-x86_64.zip) |
|
||||
| flatc | 24.0+ | [Linux.flatc.binary.g++-13.zip](https://github.com/google/flatbuffers/releases/download/v25.2.10/Linux.flatc.binary.g++-13.zip) |
|
||||
|
||||
### Building RustFS
|
||||
|
||||
#### Generate Protobuf Code
|
||||
|
||||
```bash
|
||||
cargo run --bin gproto
|
||||
```
|
||||
|
||||
#### Using Docker for Prerequisites
|
||||
|
||||
```yaml
|
||||
- uses: arduino/setup-protoc@v3
|
||||
with:
|
||||
version: "30.2"
|
||||
|
||||
- uses: Nugine/setup-flatc@v1
|
||||
with:
|
||||
version: "25.2.10"
|
||||
```
|
||||
|
||||
#### Adding Console Web UI
|
||||
|
||||
1. Download the latest console UI:
|
||||
```bash
|
||||
wget https://dl.rustfs.com/artifacts/console/rustfs-console-latest.zip
|
||||
```
|
||||
2. Create the static directory:
|
||||
```bash
|
||||
mkdir -p ./rustfs/static
|
||||
```
|
||||
3. Extract and compile RustFS:
|
||||
```bash
|
||||
unzip rustfs-console-latest.zip -d ./rustfs/static
|
||||
cargo build
|
||||
```
|
||||
|
||||
### Running RustFS
|
||||
|
||||
#### Configuration
|
||||
|
||||
Set the required environment variables:
|
||||
|
||||
```bash
|
||||
# Basic config
|
||||
export RUSTFS_VOLUMES="./target/volume/test"
|
||||
export RUSTFS_ADDRESS="0.0.0.0:9000"
|
||||
export RUSTFS_CONSOLE_ENABLE=true
|
||||
export RUSTFS_CONSOLE_ADDRESS="0.0.0.0:9001"
|
||||
|
||||
# Observability config
|
||||
export RUSTFS_OBS_ENDPOINT="http://localhost:4317"
|
||||
|
||||
# Event message configuration
|
||||
#export RUSTFS_EVENT_CONFIG="./deploy/config/event.toml"
|
||||
|
||||
```
|
||||
|
||||
#### Start the service
|
||||
|
||||
```bash
|
||||
./rustfs /data/rustfs
|
||||
```
|
||||
|
||||
### Observability Stack
|
||||
|
||||
#### Deployment
|
||||
|
||||
1. Navigate to the observability directory:
|
||||
```bash
|
||||
cd .docker/observability
|
||||
```
|
||||
|
||||
2. Start the observability stack:
|
||||
```bash
|
||||
docker compose -f docker-compose.yml up -d
|
||||
```
|
||||
|
||||
#### Access Monitoring Dashboards
|
||||
|
||||
- Grafana: `http://localhost:3000` (credentials: `admin`/`admin`)
|
||||
- Jaeger: `http://localhost:16686`
|
||||
- Prometheus: `http://localhost:9090`
|
||||
|
||||
99
README_ZH.md
Normal file
99
README_ZH.md
Normal file
@@ -0,0 +1,99 @@
|
||||
# RustFS
|
||||
|
||||
## [English Documentation](README.md) |中文文档
|
||||
|
||||
### 前置要求
|
||||
|
||||
| 软件包 | 版本 | 下载链接 |
|
||||
|--------|--------|----------------------------------------------------------------------------------------------------------------------------------|
|
||||
| Rust | 1.8.5+ | [rust-lang.org/tools/install](https://www.rust-lang.org/tools/install) |
|
||||
| protoc | 30.2+ | [protoc-30.2-linux-x86_64.zip](https://github.com/protocolbuffers/protobuf/releases/download/v30.2/protoc-30.2-linux-x86_64.zip) |
|
||||
| flatc | 24.0+ | [Linux.flatc.binary.g++-13.zip](https://github.com/google/flatbuffers/releases/download/v25.2.10/Linux.flatc.binary.g++-13.zip) |
|
||||
|
||||
### 构建 RustFS
|
||||
|
||||
#### 生成 Protobuf 代码
|
||||
|
||||
```bash
|
||||
cargo run --bin gproto
|
||||
```
|
||||
|
||||
#### 使用 Docker 安装依赖
|
||||
|
||||
```yaml
|
||||
- uses: arduino/setup-protoc@v3
|
||||
with:
|
||||
version: "30.2"
|
||||
|
||||
- uses: Nugine/setup-flatc@v1
|
||||
with:
|
||||
version: "25.2.10"
|
||||
```
|
||||
|
||||
#### 添加控制台 Web UI
|
||||
|
||||
1. 下载最新的控制台 UI:
|
||||
```bash
|
||||
wget https://dl.rustfs.com/artifacts/console/rustfs-console-latest.zip
|
||||
```
|
||||
2. 创建静态资源目录:
|
||||
```bash
|
||||
mkdir -p ./rustfs/static
|
||||
```
|
||||
3. 解压并编译 RustFS:
|
||||
```bash
|
||||
unzip rustfs-console-latest.zip -d ./rustfs/static
|
||||
cargo build
|
||||
```
|
||||
|
||||
### 运行 RustFS
|
||||
|
||||
#### 配置
|
||||
|
||||
设置必要的环境变量:
|
||||
|
||||
```bash
|
||||
# 基础配置
|
||||
export RUSTFS_VOLUMES="./target/volume/test"
|
||||
export RUSTFS_ADDRESS="0.0.0.0:9000"
|
||||
export RUSTFS_CONSOLE_ENABLE=true
|
||||
export RUSTFS_CONSOLE_ADDRESS="0.0.0.0:9001"
|
||||
|
||||
# 可观测性配置
|
||||
export RUSTFS_OBS_ENDPOINT="http://localhost:4317"
|
||||
|
||||
# 事件消息配置
|
||||
#export RUSTFS_EVENT_CONFIG="./deploy/config/event.toml"
|
||||
```
|
||||
|
||||
#### 启动服务
|
||||
|
||||
```bash
|
||||
./rustfs /data/rustfs
|
||||
```
|
||||
|
||||
### 可观测性系统
|
||||
|
||||
#### 部署
|
||||
|
||||
1. 进入可观测性目录:
|
||||
```bash
|
||||
cd .docker/observability
|
||||
```
|
||||
|
||||
2. 启动可观测性系统:
|
||||
```bash
|
||||
docker compose -f docker-compose.yml up -d
|
||||
```
|
||||
|
||||
#### 访问监控面板
|
||||
|
||||
- Grafana: `http://localhost:3000` (默认账号/密码:`admin`/`admin`)
|
||||
- Jaeger: `http://localhost:16686`
|
||||
- Prometheus: `http://localhost:9090`
|
||||
|
||||
#### 配置可观测性
|
||||
|
||||
```
|
||||
OpenTelemetry Collector 地址(endpoint): http://localhost:4317
|
||||
```
|
||||
154
SSE_KMS_IMPROVEMENTS.md
Normal file
154
SSE_KMS_IMPROVEMENTS.md
Normal file
@@ -0,0 +1,154 @@
|
||||
# RustFS SSE-KMS 改进实现总结
|
||||
|
||||
本次改进针对 RustFS 的 SSE-KMS 系统进行了四个主要增强,使其更符合 MinIO 标准并支持动态配置管理。
|
||||
|
||||
## 实现的改进
|
||||
|
||||
### 1. 创建 KMS 配置子系统 ✅
|
||||
|
||||
**实现位置**: `crates/config/src/lib.rs`
|
||||
|
||||
- 创建了统一的配置管理器 `ConfigManager`
|
||||
- 支持动态 KMS 配置的读取、设置和持久化
|
||||
- 提供线程安全的全局配置访问
|
||||
- 支持配置验证和错误处理
|
||||
|
||||
**主要功能**:
|
||||
|
||||
```rust
|
||||
// 全局配置管理器
|
||||
ConfigManager::global().get_kms_config("vault").await
|
||||
ConfigManager::global().set_kms_config("vault", config).await
|
||||
ConfigManager::global().validate_all_configs().await
|
||||
```
|
||||
|
||||
### 2. KMS 配置查找和验证 ✅
|
||||
|
||||
**实现位置**: `ecstore/src/config/kms.rs`
|
||||
|
||||
- 实现了完整的 KMS 配置结构 `Config`
|
||||
- 支持环境变量和配置文件双重配置源
|
||||
- 提供配置验证和连接测试功能
|
||||
- 兼容 MinIO 的配置参数命名
|
||||
|
||||
**主要特性**:
|
||||
|
||||
- 支持 Vault 端点、密钥名称、认证 token 等配置
|
||||
- 自动验证配置完整性和有效性
|
||||
- 支持 TLS 配置和证书验证
|
||||
- 提供连接测试功能
|
||||
|
||||
### 3. S3 标准元数据格式支持 ✅
|
||||
|
||||
**实现位置**: `crypto/src/sse_kms.rs`
|
||||
|
||||
- 实现了 MinIO 兼容的元数据格式
|
||||
- 支持标准 S3 SSE-KMS HTTP 头部
|
||||
- 提供元数据与 HTTP 头部的双向转换
|
||||
- 支持分片加密的元数据管理
|
||||
|
||||
**标准头部支持**:
|
||||
|
||||
```
|
||||
x-amz-server-side-encryption: aws:kms
|
||||
x-amz-server-side-encryption-aws-kms-key-id: key-id
|
||||
x-amz-server-side-encryption-context: context
|
||||
x-amz-meta-sse-kms-encrypted-key: encrypted-data-key
|
||||
x-amz-meta-sse-kms-iv: initialization-vector
|
||||
```
|
||||
|
||||
### 4. 管理 API 支持动态配置 ✅
|
||||
|
||||
**实现位置**: `rustfs/src/admin/handlers.rs` 和 `rustfs/src/admin/mod.rs`
|
||||
|
||||
- 添加了 KMS 配置管理的 REST API 端点
|
||||
- 支持 MinIO 兼容的配置管理路径
|
||||
- 提供获取和设置配置的 HTTP 接口
|
||||
- 支持实时配置更新和验证
|
||||
|
||||
**API 端点**:
|
||||
|
||||
```
|
||||
GET /minio/admin/v3/config # 获取所有配置
|
||||
POST /minio/admin/v3/config/kms_vault/{target} # 设置KMS配置
|
||||
GET /rustfs/admin/v3/config # RustFS原生配置API
|
||||
POST /rustfs/admin/v3/config/kms_vault/{target} # RustFS原生设置API
|
||||
```
|
||||
|
||||
## 技术特性
|
||||
|
||||
### 兼容性
|
||||
|
||||
- ✅ 完全兼容 MinIO 的 SSE-KMS 配置格式
|
||||
- ✅ 支持标准 S3 SSE-KMS HTTP 头部
|
||||
- ✅ 兼容 MinIO Admin API 配置管理接口
|
||||
|
||||
### 安全性
|
||||
|
||||
- ✅ 支持 RustyVault KMS 集成
|
||||
- ✅ 数据密钥的安全生成和加密存储
|
||||
- ✅ 支持 TLS 连接和证书验证
|
||||
- ✅ 敏感配置信息的安全处理
|
||||
|
||||
### 性能
|
||||
|
||||
- ✅ 异步配置操作
|
||||
- ✅ 线程安全的全局配置缓存
|
||||
- ✅ 高效的元数据序列化/反序列化
|
||||
- ✅ 支持分片并行加密
|
||||
|
||||
### 可维护性
|
||||
|
||||
- ✅ 模块化设计,职责分离
|
||||
- ✅ 完整的错误处理和日志记录
|
||||
- ✅ 丰富的单元测试覆盖
|
||||
- ✅ 详细的文档和注释
|
||||
|
||||
## 使用示例
|
||||
|
||||
### 配置 KMS
|
||||
|
||||
```bash
|
||||
# 通过环境变量配置
|
||||
export RUSTFS_KMS_ENABLED=true
|
||||
export RUSTFS_KMS_VAULT_ENDPOINT=http://vault:8200
|
||||
export RUSTFS_KMS_VAULT_KEY_NAME=rustfs-key
|
||||
export RUSTFS_KMS_VAULT_TOKEN=vault-token
|
||||
|
||||
# 通过API配置
|
||||
curl -X POST "http://rustfs:9000/minio/admin/v3/config/kms_vault/default" \
|
||||
-H "Content-Type: application/json" \
|
||||
-d '{
|
||||
"endpoint": "http://vault:8200",
|
||||
"key_name": "rustfs-encryption-key",
|
||||
"token": "vault-token",
|
||||
"enabled": true
|
||||
}'
|
||||
```
|
||||
|
||||
### 使用 SSE-KMS 上传对象
|
||||
|
||||
```bash
|
||||
# 使用aws-cli上传加密对象
|
||||
aws s3 cp file.txt s3://bucket/file.txt \
|
||||
--server-side-encryption aws:kms \
|
||||
--ssekms-key-id rustfs-encryption-key
|
||||
```
|
||||
|
||||
## 部署注意事项
|
||||
|
||||
1. **RustyVault 集成**: 确保 RustyVault 服务可访问且已正确配置 transit 引擎
|
||||
2. **网络安全**: 建议在生产环境中使用 TLS 连接到 Vault
|
||||
3. **权限管理**: 确保 RustFS 具有访问 Vault 密钥的适当权限
|
||||
4. **监控**: 建议监控 KMS 连接状态和加密操作性能
|
||||
|
||||
## 后续发展
|
||||
|
||||
这次实现为 RustFS 的企业级加密功能奠定了坚实基础。未来可以考虑:
|
||||
|
||||
- 支持多个 KMS 提供商(AWS KMS, Azure Key Vault 等)
|
||||
- 实现密钥轮换功能
|
||||
- 添加加密性能监控和优化
|
||||
- 支持更复杂的访问控制策略
|
||||
|
||||
通过这些改进,RustFS 现在具备了与 MinIO 相当的 SSE-KMS 功能,可以满足企业级数据加密需求。
|
||||
68
TODO.md
Normal file
68
TODO.md
Normal file
@@ -0,0 +1,68 @@
|
||||
# TODO LIST
|
||||
|
||||
## 基础存储
|
||||
|
||||
- [x] EC 可用读写数量判断 Read/WriteQuorum
|
||||
- [ ] 优化后台并发执行,可中断,传引用?
|
||||
- [x] 小文件存储到 metafile, inlinedata
|
||||
- [x] 完善 bucketmeta
|
||||
- [x] 对象锁
|
||||
- [x] 边读写边 hash,实现 reader 嵌套
|
||||
- [x] 远程 rpc
|
||||
- [x] 错误类型判断,程序中判断错误类型,如何统一错误
|
||||
- [x] 优化 xlmeta, 自定义 msg 数据结构
|
||||
- [ ] 优化 io.reader 参考 GetObjectNInfo 方便 io copy 如果 异步写,再平衡
|
||||
- [ ] 代码优化 使用范型?
|
||||
- [ ] 抽象出 metafile 存储
|
||||
|
||||
## 基础功能
|
||||
|
||||
- [ ] 桶操作
|
||||
- [x] 创建 CreateBucket
|
||||
- [x] 列表 ListBuckets
|
||||
- [ ] 桶下面的文件列表 ListObjects
|
||||
- [x] 简单实现功能
|
||||
- [ ] 优化并发读取
|
||||
- [ ] 删除
|
||||
- [x] 详情 HeadBucket
|
||||
- [ ] 文件操作
|
||||
- [x] 上传 PutObject
|
||||
- [x] 大文件上传
|
||||
- [x] 创建分片上传 CreateMultipartUpload
|
||||
- [x] 上传分片 PubObjectPart
|
||||
- [x] 提交完成 CompleteMultipartUpload
|
||||
- [x] 取消上传 AbortMultipartUpload
|
||||
- [x] 下载 GetObject
|
||||
- [x] 删除 DeleteObjects
|
||||
- [ ] 版本控制
|
||||
- [ ] 对象锁
|
||||
- [ ] 复制 CopyObject
|
||||
- [ ] 详情 HeadObject
|
||||
- [ ] 对象预先签名(get、put、head、post)
|
||||
|
||||
## 扩展功能
|
||||
|
||||
- [ ] 用户管理
|
||||
- [ ] Policy 管理
|
||||
- [ ] AK/SK分配管理
|
||||
- [ ] data scanner 统计和对象修复
|
||||
- [ ] 桶配额
|
||||
- [ ] 桶只读
|
||||
- [ ] 桶复制
|
||||
- [ ] 桶事件通知
|
||||
- [ ] 桶公开、桶私有
|
||||
- [ ] 对象生命周期管理
|
||||
- [ ] prometheus 对接
|
||||
- [ ] 日志收集和日志外发
|
||||
- [ ] 对象压缩
|
||||
- [ ] STS
|
||||
- [ ] 分层(阿里云、腾讯云、S3 远程对接)
|
||||
|
||||
|
||||
|
||||
## 性能优化
|
||||
- [ ] bitrot impl AsyncRead/AsyncWrite
|
||||
- [ ] erasure 并发读写
|
||||
- [x] 完善删除逻辑,并发处理,先移动到回收站,
|
||||
- [ ] 空间不足时清空回收站
|
||||
- [ ] list_object 使用 reader 传输
|
||||
19
appauth/Cargo.toml
Normal file
19
appauth/Cargo.toml
Normal file
@@ -0,0 +1,19 @@
|
||||
[package]
|
||||
name = "appauth"
|
||||
edition.workspace = true
|
||||
license.workspace = true
|
||||
repository.workspace = true
|
||||
rust-version.workspace = true
|
||||
version.workspace = true
|
||||
|
||||
[dependencies]
|
||||
base64-simd = "0.8.0"
|
||||
common.workspace = true
|
||||
hex-simd = "0.8.0"
|
||||
rand.workspace = true
|
||||
rsa = "0.9.8"
|
||||
serde.workspace = true
|
||||
serde_json.workspace = true
|
||||
|
||||
[lints]
|
||||
workspace = true
|
||||
1
appauth/src/lib.rs
Normal file
1
appauth/src/lib.rs
Normal file
@@ -0,0 +1 @@
|
||||
pub mod token;
|
||||
110
appauth/src/token.rs
Normal file
110
appauth/src/token.rs
Normal file
@@ -0,0 +1,110 @@
|
||||
use common::error::Result;
|
||||
use rsa::Pkcs1v15Encrypt;
|
||||
use rsa::{
|
||||
pkcs8::{DecodePrivateKey, DecodePublicKey},
|
||||
RsaPrivateKey, RsaPublicKey,
|
||||
};
|
||||
use serde::{Deserialize, Serialize};
|
||||
|
||||
#[derive(Serialize, Deserialize, Debug, Default, Clone)]
|
||||
pub struct Token {
|
||||
pub name: String, // 应用 ID
|
||||
pub expired: u64, // 到期时间 (UNIX 时间戳)
|
||||
}
|
||||
|
||||
// 公钥生成 Token
|
||||
// [token] Token 对象
|
||||
// [key] 公钥字符串
|
||||
// 返回 base64 处理的加密字符串
|
||||
pub fn gencode(token: &Token, key: &str) -> Result<String> {
|
||||
let data = serde_json::to_vec(token)?;
|
||||
let public_key = RsaPublicKey::from_public_key_pem(key)?;
|
||||
let encrypted_data = public_key.encrypt(&mut rand::thread_rng(), Pkcs1v15Encrypt, &data)?;
|
||||
Ok(base64_simd::URL_SAFE_NO_PAD.encode_to_string(&encrypted_data))
|
||||
}
|
||||
|
||||
// 私钥解析 Token
|
||||
// [token] base64 处理的加密字符串
|
||||
// [key] 私钥字符串
|
||||
// 返回 Token 对象
|
||||
pub fn parse(token: &str, key: &str) -> Result<Token> {
|
||||
let encrypted_data = base64_simd::URL_SAFE_NO_PAD.decode_to_vec(token.as_bytes())?;
|
||||
let private_key = RsaPrivateKey::from_pkcs8_pem(key)?;
|
||||
let decrypted_data = private_key.decrypt(Pkcs1v15Encrypt, &encrypted_data)?;
|
||||
let res: Token = serde_json::from_slice(&decrypted_data)?;
|
||||
Ok(res)
|
||||
}
|
||||
|
||||
pub fn parse_license(license: &str) -> Result<Token> {
|
||||
parse(license, TEST_PRIVATE_KEY)
|
||||
// match parse(license, TEST_PRIVATE_KEY) {
|
||||
// Ok(token) => {
|
||||
// if token.expired > SystemTime::now().duration_since(UNIX_EPOCH)?.as_secs() {
|
||||
// Ok(token)
|
||||
// } else {
|
||||
// Err("Token expired".into())
|
||||
// }
|
||||
// }
|
||||
// Err(e) => Err(e),
|
||||
// }
|
||||
}
|
||||
|
||||
static TEST_PRIVATE_KEY:&str ="-----BEGIN PRIVATE KEY-----\nMIIEvAIBADANBgkqhkiG9w0BAQEFAASCBKYwggSiAgEAAoIBAQCj86SrJIuxSxR6\nBJ/dlJEUIj6NeBRnhLQlCDdovuz61+7kJXVcxaR66w4m8W7SLEUP+IlPtnn6vmiG\n7XMhGNHIr7r1JsEVVLhZmL3tKI66DEZl786ZhG81BWqUlmcooIPS8UEPZNqJXLuz\nVGhxNyVGbj/tV7QC2pSISnKaixc+nrhxvo7w56p5qrm9tik0PjTgfZsUePkoBsSN\npoRkAauS14MAzK6HGB75CzG3dZqXUNWSWVocoWtQbZUwFGXyzU01ammsHQDvc2xu\nK1RQpd1qYH5bOWZ0N0aPFwT0r59HztFXg9sbjsnuhO1A7OiUOkc6iGVuJ0wm/9nA\nwZIBqzgjAgMBAAECggEAPMpeSEbotPhNw2BrllE76ec4omPfzPJbiU+em+wPGoNu\nRJHPDnMKJbl6Kd5jZPKdOOrCnxfd6qcnQsBQa/kz7+GYxMV12l7ra+1Cnujm4v0i\nLTHZvPpp8ZLsjeOmpF3AAzsJEJgon74OqtOlVjVIUPEYKvzV9ijt4gsYq0zfdYv0\nhrTMzyrGM4/UvKLsFIBROAfCeWfA7sXLGH8JhrRAyDrtCPzGtyyAmzoHKHtHafcB\nuyPFw/IP8otAgpDk5iiQPNkH0WwzAQIm12oHuNUa66NwUK4WEjXTnDg8KeWLHHNv\nIfN8vdbZchMUpMIvvkr7is315d8f2cHCB5gEO+GWAQKBgQDR/0xNll+FYaiUKCPZ\nvkOCAd3l5mRhsqnjPQ/6Ul1lAyYWpoJSFMrGGn/WKTa/FVFJRTGbBjwP+Mx10bfb\ngUg2GILDTISUh54fp4zngvTi9w4MWGKXrb7I1jPkM3vbJfC/v2fraQ/r7qHPpO2L\nf6ZbGxasIlSvr37KeGoelwcAQQKBgQDH3hmOTS2Hl6D4EXdq5meHKrfeoicGN7m8\noQK7u8iwn1R9zK5nh6IXxBhKYNXNwdCQtBZVRvFjjZ56SZJb7lKqa1BcTsgJfZCy\nnI3Uu4UykrECAH8AVCVqBXUDJmeA2yE+gDAtYEjvhSDHpUfWxoGHr0B/Oqk2Lxc/\npRy1qV5fYwKBgBWSL/hYVf+RhIuTg/s9/BlCr9SJ0g3nGGRrRVTlWQqjRCpXeFOO\nJzYqSq9pFGKUggEQxoOyJEFPwVDo9gXqRcyov+Xn2kaXl7qQr3yoixc1YZALFDWY\nd1ySBEqQr0xXnV9U/gvEgwotPRnjSzNlLWV2ZuHPtPtG/7M0o1H5GZMBAoGAKr3N\nW0gX53o+my4pCnxRQW+aOIsWq1a5aqRIEFudFGBOUkS2Oz+fI1P1GdrRfhnnfzpz\n2DK+plp/vIkFOpGhrf4bBlJ2psjqa7fdANRFLMaAAfyXLDvScHTQTCcnVUAHQPVq\n2BlSH56pnugyj7SNuLV6pnql+wdhAmRN2m9o1h8CgYAbX2juSr4ioXwnYjOUdrIY\n4+ERvHcXdjoJmmPcAm4y5NbSqLXyU0FQmplNMt2A5LlniWVJ9KNdjAQUt60FZw/+\nr76LdxXaHNZghyx0BOs7mtq5unSQXamZ8KixasfhE9uz3ij1jXjG6hafWkS8/68I\nuWbaZqgvy7a9oPHYlKH7Jg==\n-----END PRIVATE KEY-----\n";
|
||||
|
||||
#[cfg(test)]
|
||||
mod tests {
|
||||
use super::*;
|
||||
use rsa::{
|
||||
pkcs8::{EncodePrivateKey, EncodePublicKey, LineEnding},
|
||||
RsaPrivateKey,
|
||||
};
|
||||
use std::time::{SystemTime, UNIX_EPOCH};
|
||||
#[test]
|
||||
fn test_gencode_and_parse() {
|
||||
let mut rng = rand::thread_rng();
|
||||
let bits = 2048;
|
||||
let private_key = RsaPrivateKey::new(&mut rng, bits).expect("Failed to generate private key");
|
||||
let public_key = RsaPublicKey::from(&private_key);
|
||||
|
||||
let private_key_pem = private_key.to_pkcs8_pem(LineEnding::LF).unwrap();
|
||||
let public_key_pem = public_key.to_public_key_pem(LineEnding::LF).unwrap();
|
||||
|
||||
let token = Token {
|
||||
name: "test_app".to_string(),
|
||||
expired: SystemTime::now().duration_since(UNIX_EPOCH).unwrap().as_secs() + 3600, // 1 hour from now
|
||||
};
|
||||
|
||||
let encoded = gencode(&token, &public_key_pem).expect("Failed to encode token");
|
||||
|
||||
let decoded = parse(&encoded, &private_key_pem).expect("Failed to decode token");
|
||||
|
||||
assert_eq!(token.name, decoded.name);
|
||||
assert_eq!(token.expired, decoded.expired);
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn test_parse_invalid_token() {
|
||||
let private_key_pem = RsaPrivateKey::new(&mut rand::thread_rng(), 2048)
|
||||
.expect("Failed to generate private key")
|
||||
.to_pkcs8_pem(LineEnding::LF)
|
||||
.unwrap();
|
||||
|
||||
let invalid_token = "invalid_base64_token";
|
||||
let result = parse(invalid_token, &private_key_pem);
|
||||
|
||||
assert!(result.is_err());
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn test_gencode_with_invalid_key() {
|
||||
let token = Token {
|
||||
name: "test_app".to_string(),
|
||||
expired: SystemTime::now().duration_since(UNIX_EPOCH).unwrap().as_secs() + 3600, // 1 hour from now
|
||||
};
|
||||
|
||||
let invalid_key = "invalid_public_key";
|
||||
let result = gencode(&token, invalid_key);
|
||||
|
||||
assert!(result.is_err());
|
||||
}
|
||||
}
|
||||
45
bucket_replicate_test.md
Normal file
45
bucket_replicate_test.md
Normal file
@@ -0,0 +1,45 @@
|
||||
启动两个rustfs
|
||||
rustfs --address 0.0.0.0:9000 /rustfs-data9000
|
||||
rustfs --address 0.0.0.0:9001 /rustfs-data9001
|
||||
|
||||
|
||||
### 使用 minio mc 设置 alias 分别为 rustfs 和 rustfs2
|
||||
|
||||
|
||||
### 创建 bucket
|
||||
mc mb rustfs/srcbucket
|
||||
|
||||
### 创建 desc bucket
|
||||
|
||||
mc mb rustfs2/destbucket
|
||||
|
||||
|
||||
|
||||
### 开启版本控制
|
||||
|
||||
mc version enable rustfs/srcbucket
|
||||
mc version enable rustfs2/destbucket
|
||||
|
||||
#### 使用修改过的 mc 才能 add bucket replication
|
||||
|
||||
./mc replication add rustfs/srcbucket --remote-bucket rustfs2/destbucket
|
||||
|
||||
|
||||
|
||||
###### 复制一个小文件;
|
||||
mc cp ./1.txt rustfs/srcbucket
|
||||
|
||||
###### 查看是否成功
|
||||
mc ls --versions rustfs/srcbucket/1.txt
|
||||
mc ls --versions rustfs/destbucket/1.txt
|
||||
|
||||
|
||||
##### 复制一个大文件
|
||||
1 创建一个大文件
|
||||
dd if=/dev/zero of=./dd.out bs=4096000 count=1000
|
||||
|
||||
mc cp ./dd.out rustfs/srcbucket/
|
||||
|
||||
##### 查看是否成功
|
||||
mc ls --versions rustfs/srcbucket/dd.out
|
||||
mc ls --versions rustfs2/destbucket/dd.out
|
||||
21
build_rustfs.sh
Executable file
21
build_rustfs.sh
Executable file
@@ -0,0 +1,21 @@
|
||||
#!/bin/bash
|
||||
clear
|
||||
|
||||
# 获取当前平台架构
|
||||
ARCH=$(uname -m)
|
||||
|
||||
# 根据架构设置 target 目录
|
||||
if [ "$ARCH" == "x86_64" ]; then
|
||||
TARGET_DIR="target/x86_64"
|
||||
elif [ "$ARCH" == "aarch64" ]; then
|
||||
TARGET_DIR="target/arm64"
|
||||
else
|
||||
TARGET_DIR="target/unknown"
|
||||
fi
|
||||
|
||||
# 设置 CARGO_TARGET_DIR 并构建项目
|
||||
CARGO_TARGET_DIR=$TARGET_DIR RUSTFLAGS="-C link-arg=-fuse-ld=mold" cargo build --package rustfs
|
||||
|
||||
echo -e "\a"
|
||||
echo -e "\a"
|
||||
echo -e "\a"
|
||||
32
cli/rustfs-gui/Cargo.toml
Normal file
32
cli/rustfs-gui/Cargo.toml
Normal file
@@ -0,0 +1,32 @@
|
||||
[package]
|
||||
name = "rustfs-gui"
|
||||
edition.workspace = true
|
||||
license.workspace = true
|
||||
repository.workspace = true
|
||||
rust-version.workspace = true
|
||||
version.workspace = true
|
||||
|
||||
[dependencies]
|
||||
chrono = { workspace = true }
|
||||
dioxus = { workspace = true, features = ["router"] }
|
||||
dirs = { workspace = true }
|
||||
hex = { workspace = true }
|
||||
keyring = { workspace = true }
|
||||
lazy_static = { workspace = true }
|
||||
rfd = { workspace = true }
|
||||
rust-embed = { workspace = true, features = ["interpolate-folder-path"] }
|
||||
serde = { workspace = true }
|
||||
serde_json = { workspace = true }
|
||||
sha2 = { workspace = true }
|
||||
tokio = { workspace = true, features = ["io-util", "net", "process", "sync"] }
|
||||
tracing-subscriber = { workspace = true, features = ["fmt", "env-filter", "tracing-log", "time", "local-time", "json"] }
|
||||
tracing-appender = { workspace = true }
|
||||
|
||||
[features]
|
||||
default = ["desktop"]
|
||||
web = ["dioxus/web"]
|
||||
desktop = ["dioxus/desktop"]
|
||||
mobile = ["dioxus/mobile"]
|
||||
|
||||
[lints]
|
||||
workspace = true
|
||||
50
cli/rustfs-gui/Dioxus.toml
Normal file
50
cli/rustfs-gui/Dioxus.toml
Normal file
@@ -0,0 +1,50 @@
|
||||
[application]
|
||||
|
||||
# App (Project) Name
|
||||
name = "rustfs-gui"
|
||||
|
||||
# The static resource path
|
||||
asset_dir = "public"
|
||||
|
||||
[web.app]
|
||||
|
||||
# HTML title tag content
|
||||
title = "rustfs-gui"
|
||||
|
||||
# include `assets` in web platform
|
||||
[web.resource]
|
||||
|
||||
# Additional CSS style files
|
||||
style = []
|
||||
|
||||
# Additional JavaScript files
|
||||
script = []
|
||||
|
||||
[web.resource.dev]
|
||||
|
||||
# Javascript code file
|
||||
# serve: [dev-server] only
|
||||
script = []
|
||||
|
||||
[bundle]
|
||||
identifier = "com.rustfs.cli.gui"
|
||||
|
||||
publisher = "RustFsGUI"
|
||||
|
||||
category = "Utility"
|
||||
|
||||
copyright = "Copyright 2025 rustfs.com"
|
||||
|
||||
icon = [
|
||||
"assets/icons/icon.icns",
|
||||
"assets/icons/icon.ico"
|
||||
]
|
||||
#[bundle.macos]
|
||||
#provider_short_name = "RustFs"
|
||||
[bundle.windows]
|
||||
tsp = true
|
||||
icon_path = "assets/icons/icon.ico"
|
||||
allow_downgrades = true
|
||||
[bundle.windows.webview_install_mode]
|
||||
[bundle.windows.webview_install_mode.EmbedBootstrapper]
|
||||
silent = true
|
||||
34
cli/rustfs-gui/README.md
Normal file
34
cli/rustfs-gui/README.md
Normal file
@@ -0,0 +1,34 @@
|
||||
## Rustfs GUI
|
||||
|
||||
### Tailwind
|
||||
|
||||
1. Install npm: https://docs.npmjs.com/downloading-and-installing-node-js-and-npm
|
||||
2. Install the Tailwind CSS CLI: https://tailwindcss.com/docs/installation
|
||||
3. Run the following command in the root of the project to start the Tailwind CSS compiler:
|
||||
|
||||
```bash
|
||||
npx tailwindcss -i ./input.css -o ./assets/tailwind.css --watch
|
||||
```
|
||||
|
||||
### Dioxus CLI
|
||||
|
||||
#### Install the stable version (recommended)
|
||||
|
||||
```shell
|
||||
cargo install dioxus-cli
|
||||
```
|
||||
|
||||
### Serving Your App
|
||||
|
||||
Run the following command in the root of your project to start developing with the default platform:
|
||||
|
||||
```bash
|
||||
dx serve
|
||||
```
|
||||
|
||||
To run for a different platform, use the `--platform platform` flag. E.g.
|
||||
|
||||
```bash
|
||||
dx serve --platform desktop
|
||||
```
|
||||
|
||||
BIN
cli/rustfs-gui/assets/favicon.ico
Normal file
BIN
cli/rustfs-gui/assets/favicon.ico
Normal file
Binary file not shown.
|
After Width: | Height: | Size: 80 KiB |
BIN
cli/rustfs-gui/assets/icons/icon-all.icns
Normal file
BIN
cli/rustfs-gui/assets/icons/icon-all.icns
Normal file
Binary file not shown.
BIN
cli/rustfs-gui/assets/icons/icon-all.ico
Normal file
BIN
cli/rustfs-gui/assets/icons/icon-all.ico
Normal file
Binary file not shown.
|
After Width: | Height: | Size: 80 KiB |
BIN
cli/rustfs-gui/assets/icons/icon.icns
Normal file
BIN
cli/rustfs-gui/assets/icons/icon.icns
Normal file
Binary file not shown.
BIN
cli/rustfs-gui/assets/icons/icon.ico
Normal file
BIN
cli/rustfs-gui/assets/icons/icon.ico
Normal file
Binary file not shown.
|
After Width: | Height: | Size: 80 KiB |
32
cli/rustfs-gui/assets/js/sts.js
Normal file
32
cli/rustfs-gui/assets/js/sts.js
Normal file
@@ -0,0 +1,32 @@
|
||||
window.switchTab = function (tabId) {
|
||||
// Hide everything
|
||||
document.querySelectorAll('.tab-content').forEach(content => {
|
||||
content.classList.add('hidden');
|
||||
});
|
||||
|
||||
// Reset all label styles
|
||||
document.querySelectorAll('.tab-btn').forEach(btn => {
|
||||
btn.classList.remove('border-b-2', 'border-black');
|
||||
btn.classList.add('text-gray-500');
|
||||
});
|
||||
|
||||
// Displays the selected content
|
||||
const activeContent = document.getElementById(tabId);
|
||||
if (activeContent) {
|
||||
activeContent.classList.remove('hidden');
|
||||
}
|
||||
|
||||
// Updates the selected label style
|
||||
const activeBtn = document.querySelector(`[data-tab="${tabId}"]`);
|
||||
if (activeBtn) {
|
||||
activeBtn.classList.add('border-b-2', 'border-black');
|
||||
activeBtn.classList.remove('text-gray-500');
|
||||
}
|
||||
};
|
||||
|
||||
window.togglePassword = function (button) {
|
||||
const input = button.parentElement.querySelector('input[type="password"], input[type="text"]');
|
||||
if (input) {
|
||||
input.type = input.type === 'password' ? 'text' : 'password';
|
||||
}
|
||||
};
|
||||
BIN
cli/rustfs-gui/assets/rustfs-logo-square.png
Normal file
BIN
cli/rustfs-gui/assets/rustfs-logo-square.png
Normal file
Binary file not shown.
|
After Width: | Height: | Size: 34 KiB |
20
cli/rustfs-gui/assets/rustfs-logo.svg
Normal file
20
cli/rustfs-gui/assets/rustfs-logo.svg
Normal file
@@ -0,0 +1,20 @@
|
||||
<svg width="1558" height="260" viewBox="0 0 1558 260" fill="none" xmlns="http://www.w3.org/2000/svg">
|
||||
<g clip-path="url(#clip0_0_3)">
|
||||
<path d="M1288.5 112.905H1159.75V58.4404H1262L1270 0L1074 0V260H1159.75V162.997H1296.95L1288.5 112.905Z"
|
||||
fill="#0196D0"/>
|
||||
<path d="M1058.62 58.4404V0H789V58.4404H881.133V260H966.885V58.4404H1058.62Z" fill="#0196D0"/>
|
||||
<path d="M521 179.102V0L454.973 15V161C454.973 181.124 452.084 193.146 443.5 202C434.916 211.257 419.318 214.5 400.5 214.5C381.022 214.5 366.744 210.854 357.5 202C348.916 193.548 346.357 175.721 346.357 156V0L280 15V175.48C280 208.08 290.234 229.412 309.712 241.486C329.19 253.56 358.903 260 400.5 260C440.447 260 470.159 253.56 490.297 241.486C510.766 229.412 521 208.483 521 179.102Z"
|
||||
fill="#0196D0"/>
|
||||
<path d="M172.84 84.2813C172.84 97.7982 168.249 107.737 158.41 113.303C149.883 118.471 137.092 121.254 120.693 122.049V162.997C129.876 163.792 138.076 166.177 144.307 176.514L184.647 260H265L225.316 180.489C213.181 155.046 201.374 149.48 178.744 143.517C212.197 138.349 241.386 118.471 241.386 73.1499C241.386 53.2722 233.843 30.2141 218.756 17.8899C203.998 5.56575 183.991 0 159.394 0H120.693V48.5015H127.58C142.23 48.5015 153.6 51.4169 161.689 57.2477C169.233 62.8135 172.84 71.5596 172.84 84.2813ZM120.693 122.049C119.163 122.049 117.741 122.049 116.43 122.049H68.5457V48.5015H120.693V0H0V260H70.5137V162.997H110.526C113.806 162.997 117.741 162.997 120.693 162.997V122.049Z"
|
||||
fill="#0196D0"/>
|
||||
<path d="M774 179.297C774 160.829 766.671 144.669 752.013 131.972C738.127 119.66 712.025 110.169 673.708 103.5C662.136 101.191 651.722 99.6523 643.235 97.3437C586.532 84.6467 594.632 52.7118 650.564 52.7118C680.651 52.7118 709.582 61.946 738.127 66.9478C742.37 67.7174 743.913 68.1021 744.298 68.1021L750.47 12.697C720.383 3.46282 684.895 0 654.036 0C616.619 0 587.689 6.54088 567.245 19.2379C546.801 31.9349 536 57.7137 536 82.3382C536 103.5 543.715 119.66 559.916 131.972C575.731 143.515 604.276 152.749 645.55 160.059C658.279 162.368 668.694 163.907 676.794 166.215C685.023 168.524 691.066 170.704 694.924 172.756C702.253 176.604 706.11 182.375 706.11 188.531C706.11 196.611 701.481 202.767 692.224 207C664.836 220.081 587.689 212.001 556.83 198.15L543.715 247.784C547.186 248.169 552.972 249.323 559.916 250.477C616.619 259.327 690.681 270.869 741.212 238.935C762.814 225.468 774 206.23 774 179.297Z"
|
||||
fill="#0196D0"/>
|
||||
<path d="M1558 179.568C1558 160.383 1550.42 144.268 1535.67 131.99C1521.32 119.968 1494.34 110.631 1454.74 103.981C1442.38 101.679 1432.01 99.3764 1422.84 97.8416C1422.44 97.8416 1422.04 97.8416 1422.04 97.4579V112.422L1361.04 75.2038L1422.04 38.3692V52.9496C1424.7 52.9496 1427.49 52.9496 1430.41 52.9496C1461.51 52.9496 1491.42 62.5419 1521.32 67.5299C1525.31 67.9136 1526.9 67.9136 1527.3 67.9136L1533.68 12.6619C1502.98 3.83692 1465.9 0 1434 0C1395.33 0 1365.43 6.52277 1345.09 19.5683C1323.16 32.6139 1312 57.9376 1312 82.8776C1312 103.981 1320.37 120.096 1336.72 131.607C1353.46 143.885 1382.97 153.093 1425.23 160.383C1434 161.535 1441.18 162.686 1447.56 164.22L1448.36 150.791L1507.36 190.312L1445.57 224.844L1445.96 212.949C1409.68 215.635 1357.45 209.112 1333.53 197.985L1320.37 247.482C1323.56 248.249 1329.54 248.633 1336.72 250.551C1395.33 259.376 1471.88 270.887 1524.11 238.657C1546.84 225.611 1558 205.659 1558 179.568Z"
|
||||
fill="#0196D0"/>
|
||||
</g>
|
||||
<defs>
|
||||
<clipPath id="clip0_0_3">
|
||||
<rect width="1558" height="260" fill="white"/>
|
||||
</clipPath>
|
||||
</defs>
|
||||
</svg>
|
||||
|
After Width: | Height: | Size: 3.5 KiB |
17
cli/rustfs-gui/assets/styling/navbar.css
Normal file
17
cli/rustfs-gui/assets/styling/navbar.css
Normal file
@@ -0,0 +1,17 @@
|
||||
#navbar {
|
||||
display: flex;
|
||||
flex-direction: row;
|
||||
}
|
||||
|
||||
#navbar a {
|
||||
color: #ffffff;
|
||||
margin-right: 20px;
|
||||
text-decoration: none;
|
||||
transition: color 0.2s ease;
|
||||
}
|
||||
|
||||
#navbar a:hover {
|
||||
cursor: pointer;
|
||||
color: #ffffff;
|
||||
/ / #91a4d2;
|
||||
}
|
||||
956
cli/rustfs-gui/assets/tailwind.css
Normal file
956
cli/rustfs-gui/assets/tailwind.css
Normal file
@@ -0,0 +1,956 @@
|
||||
*, ::before, ::after {
|
||||
--tw-border-spacing-x: 0;
|
||||
--tw-border-spacing-y: 0;
|
||||
--tw-translate-x: 0;
|
||||
--tw-translate-y: 0;
|
||||
--tw-rotate: 0;
|
||||
--tw-skew-x: 0;
|
||||
--tw-skew-y: 0;
|
||||
--tw-scale-x: 1;
|
||||
--tw-scale-y: 1;
|
||||
--tw-pan-x: ;
|
||||
--tw-pan-y: ;
|
||||
--tw-pinch-zoom: ;
|
||||
--tw-scroll-snap-strictness: proximity;
|
||||
--tw-gradient-from-position: ;
|
||||
--tw-gradient-via-position: ;
|
||||
--tw-gradient-to-position: ;
|
||||
--tw-ordinal: ;
|
||||
--tw-slashed-zero: ;
|
||||
--tw-numeric-figure: ;
|
||||
--tw-numeric-spacing: ;
|
||||
--tw-numeric-fraction: ;
|
||||
--tw-ring-inset: ;
|
||||
--tw-ring-offset-width: 0px;
|
||||
--tw-ring-offset-color: #fff;
|
||||
--tw-ring-color: rgb(59 130 246 / 0.5);
|
||||
--tw-ring-offset-shadow: 0 0 #0000;
|
||||
--tw-ring-shadow: 0 0 #0000;
|
||||
--tw-shadow: 0 0 #0000;
|
||||
--tw-shadow-colored: 0 0 #0000;
|
||||
--tw-blur: ;
|
||||
--tw-brightness: ;
|
||||
--tw-contrast: ;
|
||||
--tw-grayscale: ;
|
||||
--tw-hue-rotate: ;
|
||||
--tw-invert: ;
|
||||
--tw-saturate: ;
|
||||
--tw-sepia: ;
|
||||
--tw-drop-shadow: ;
|
||||
--tw-backdrop-blur: ;
|
||||
--tw-backdrop-brightness: ;
|
||||
--tw-backdrop-contrast: ;
|
||||
--tw-backdrop-grayscale: ;
|
||||
--tw-backdrop-hue-rotate: ;
|
||||
--tw-backdrop-invert: ;
|
||||
--tw-backdrop-opacity: ;
|
||||
--tw-backdrop-saturate: ;
|
||||
--tw-backdrop-sepia: ;
|
||||
--tw-contain-size: ;
|
||||
--tw-contain-layout: ;
|
||||
--tw-contain-paint: ;
|
||||
--tw-contain-style: ;
|
||||
}
|
||||
|
||||
::backdrop {
|
||||
--tw-border-spacing-x: 0;
|
||||
--tw-border-spacing-y: 0;
|
||||
--tw-translate-x: 0;
|
||||
--tw-translate-y: 0;
|
||||
--tw-rotate: 0;
|
||||
--tw-skew-x: 0;
|
||||
--tw-skew-y: 0;
|
||||
--tw-scale-x: 1;
|
||||
--tw-scale-y: 1;
|
||||
--tw-pan-x: ;
|
||||
--tw-pan-y: ;
|
||||
--tw-pinch-zoom: ;
|
||||
--tw-scroll-snap-strictness: proximity;
|
||||
--tw-gradient-from-position: ;
|
||||
--tw-gradient-via-position: ;
|
||||
--tw-gradient-to-position: ;
|
||||
--tw-ordinal: ;
|
||||
--tw-slashed-zero: ;
|
||||
--tw-numeric-figure: ;
|
||||
--tw-numeric-spacing: ;
|
||||
--tw-numeric-fraction: ;
|
||||
--tw-ring-inset: ;
|
||||
--tw-ring-offset-width: 0px;
|
||||
--tw-ring-offset-color: #fff;
|
||||
--tw-ring-color: rgb(59 130 246 / 0.5);
|
||||
--tw-ring-offset-shadow: 0 0 #0000;
|
||||
--tw-ring-shadow: 0 0 #0000;
|
||||
--tw-shadow: 0 0 #0000;
|
||||
--tw-shadow-colored: 0 0 #0000;
|
||||
--tw-blur: ;
|
||||
--tw-brightness: ;
|
||||
--tw-contrast: ;
|
||||
--tw-grayscale: ;
|
||||
--tw-hue-rotate: ;
|
||||
--tw-invert: ;
|
||||
--tw-saturate: ;
|
||||
--tw-sepia: ;
|
||||
--tw-drop-shadow: ;
|
||||
--tw-backdrop-blur: ;
|
||||
--tw-backdrop-brightness: ;
|
||||
--tw-backdrop-contrast: ;
|
||||
--tw-backdrop-grayscale: ;
|
||||
--tw-backdrop-hue-rotate: ;
|
||||
--tw-backdrop-invert: ;
|
||||
--tw-backdrop-opacity: ;
|
||||
--tw-backdrop-saturate: ;
|
||||
--tw-backdrop-sepia: ;
|
||||
--tw-contain-size: ;
|
||||
--tw-contain-layout: ;
|
||||
--tw-contain-paint: ;
|
||||
--tw-contain-style: ;
|
||||
}
|
||||
|
||||
/*
|
||||
! tailwindcss v3.4.17 | MIT License | https://tailwindcss.com
|
||||
*/
|
||||
|
||||
/*
|
||||
1. Prevent padding and border from affecting element width. (https://github.com/mozdevs/cssremedy/issues/4)
|
||||
2. Allow adding a border to an element by just adding a border-width. (https://github.com/tailwindcss/tailwindcss/pull/116)
|
||||
*/
|
||||
|
||||
*,
|
||||
::before,
|
||||
::after {
|
||||
box-sizing: border-box;
|
||||
/* 1 */
|
||||
border-width: 0;
|
||||
/* 2 */
|
||||
border-style: solid;
|
||||
/* 2 */
|
||||
border-color: #e5e7eb;
|
||||
/* 2 */
|
||||
}
|
||||
|
||||
::before,
|
||||
::after {
|
||||
--tw-content: '';
|
||||
}
|
||||
|
||||
/*
|
||||
1. Use a consistent sensible line-height in all browsers.
|
||||
2. Prevent adjustments of font size after orientation changes in iOS.
|
||||
3. Use a more readable tab size.
|
||||
4. Use the user's configured `sans` font-family by default.
|
||||
5. Use the user's configured `sans` font-feature-settings by default.
|
||||
6. Use the user's configured `sans` font-variation-settings by default.
|
||||
7. Disable tap highlights on iOS
|
||||
*/
|
||||
|
||||
html,
|
||||
:host {
|
||||
line-height: 1.5;
|
||||
/* 1 */
|
||||
-webkit-text-size-adjust: 100%;
|
||||
/* 2 */
|
||||
-moz-tab-size: 4;
|
||||
/* 3 */
|
||||
-o-tab-size: 4;
|
||||
tab-size: 4;
|
||||
/* 3 */
|
||||
font-family: ui-sans-serif, system-ui, sans-serif, "Apple Color Emoji", "Segoe UI Emoji", "Segoe UI Symbol", "Noto Color Emoji";
|
||||
/* 4 */
|
||||
font-feature-settings: normal;
|
||||
/* 5 */
|
||||
font-variation-settings: normal;
|
||||
/* 6 */
|
||||
-webkit-tap-highlight-color: transparent;
|
||||
/* 7 */
|
||||
}
|
||||
|
||||
/*
|
||||
1. Remove the margin in all browsers.
|
||||
2. Inherit line-height from `html` so users can set them as a class directly on the `html` element.
|
||||
*/
|
||||
|
||||
body {
|
||||
margin: 0;
|
||||
/* 1 */
|
||||
line-height: inherit;
|
||||
/* 2 */
|
||||
}
|
||||
|
||||
/*
|
||||
1. Add the correct height in Firefox.
|
||||
2. Correct the inheritance of border color in Firefox. (https://bugzilla.mozilla.org/show_bug.cgi?id=190655)
|
||||
3. Ensure horizontal rules are visible by default.
|
||||
*/
|
||||
|
||||
hr {
|
||||
height: 0;
|
||||
/* 1 */
|
||||
color: inherit;
|
||||
/* 2 */
|
||||
border-top-width: 1px;
|
||||
/* 3 */
|
||||
}
|
||||
|
||||
/*
|
||||
Add the correct text decoration in Chrome, Edge, and Safari.
|
||||
*/
|
||||
|
||||
abbr:where([title]) {
|
||||
-webkit-text-decoration: underline dotted;
|
||||
text-decoration: underline dotted;
|
||||
}
|
||||
|
||||
/*
|
||||
Remove the default font size and weight for headings.
|
||||
*/
|
||||
|
||||
h1,
|
||||
h2,
|
||||
h3,
|
||||
h4,
|
||||
h5,
|
||||
h6 {
|
||||
font-size: inherit;
|
||||
font-weight: inherit;
|
||||
}
|
||||
|
||||
/*
|
||||
Reset links to optimize for opt-in styling instead of opt-out.
|
||||
*/
|
||||
|
||||
a {
|
||||
color: inherit;
|
||||
text-decoration: inherit;
|
||||
}
|
||||
|
||||
/*
|
||||
Add the correct font weight in Edge and Safari.
|
||||
*/
|
||||
|
||||
b,
|
||||
strong {
|
||||
font-weight: bolder;
|
||||
}
|
||||
|
||||
/*
|
||||
1. Use the user's configured `mono` font-family by default.
|
||||
2. Use the user's configured `mono` font-feature-settings by default.
|
||||
3. Use the user's configured `mono` font-variation-settings by default.
|
||||
4. Correct the odd `em` font sizing in all browsers.
|
||||
*/
|
||||
|
||||
code,
|
||||
kbd,
|
||||
samp,
|
||||
pre {
|
||||
font-family: ui-monospace, SFMono-Regular, Menlo, Monaco, Consolas, "Liberation Mono", "Courier New", monospace;
|
||||
/* 1 */
|
||||
font-feature-settings: normal;
|
||||
/* 2 */
|
||||
font-variation-settings: normal;
|
||||
/* 3 */
|
||||
font-size: 1em;
|
||||
/* 4 */
|
||||
}
|
||||
|
||||
/*
|
||||
Add the correct font size in all browsers.
|
||||
*/
|
||||
|
||||
small {
|
||||
font-size: 80%;
|
||||
}
|
||||
|
||||
/*
|
||||
Prevent `sub` and `sup` elements from affecting the line height in all browsers.
|
||||
*/
|
||||
|
||||
sub,
|
||||
sup {
|
||||
font-size: 75%;
|
||||
line-height: 0;
|
||||
position: relative;
|
||||
vertical-align: baseline;
|
||||
}
|
||||
|
||||
sub {
|
||||
bottom: -0.25em;
|
||||
}
|
||||
|
||||
sup {
|
||||
top: -0.5em;
|
||||
}
|
||||
|
||||
/*
|
||||
1. Remove text indentation from table contents in Chrome and Safari. (https://bugs.chromium.org/p/chromium/issues/detail?id=999088, https://bugs.webkit.org/show_bug.cgi?id=201297)
|
||||
2. Correct table border color inheritance in all Chrome and Safari. (https://bugs.chromium.org/p/chromium/issues/detail?id=935729, https://bugs.webkit.org/show_bug.cgi?id=195016)
|
||||
3. Remove gaps between table borders by default.
|
||||
*/
|
||||
|
||||
table {
|
||||
text-indent: 0;
|
||||
/* 1 */
|
||||
border-color: inherit;
|
||||
/* 2 */
|
||||
border-collapse: collapse;
|
||||
/* 3 */
|
||||
}
|
||||
|
||||
/*
|
||||
1. Change the font styles in all browsers.
|
||||
2. Remove the margin in Firefox and Safari.
|
||||
3. Remove default padding in all browsers.
|
||||
*/
|
||||
|
||||
button,
|
||||
input,
|
||||
optgroup,
|
||||
select,
|
||||
textarea {
|
||||
font-family: inherit;
|
||||
/* 1 */
|
||||
font-feature-settings: inherit;
|
||||
/* 1 */
|
||||
font-variation-settings: inherit;
|
||||
/* 1 */
|
||||
font-size: 100%;
|
||||
/* 1 */
|
||||
font-weight: inherit;
|
||||
/* 1 */
|
||||
line-height: inherit;
|
||||
/* 1 */
|
||||
letter-spacing: inherit;
|
||||
/* 1 */
|
||||
color: inherit;
|
||||
/* 1 */
|
||||
margin: 0;
|
||||
/* 2 */
|
||||
padding: 0;
|
||||
/* 3 */
|
||||
}
|
||||
|
||||
/*
|
||||
Remove the inheritance of text transform in Edge and Firefox.
|
||||
*/
|
||||
|
||||
button,
|
||||
select {
|
||||
text-transform: none;
|
||||
}
|
||||
|
||||
/*
|
||||
1. Correct the inability to style clickable types in iOS and Safari.
|
||||
2. Remove default button styles.
|
||||
*/
|
||||
|
||||
button,
|
||||
input:where([type='button']),
|
||||
input:where([type='reset']),
|
||||
input:where([type='submit']) {
|
||||
-webkit-appearance: button;
|
||||
/* 1 */
|
||||
background-color: transparent;
|
||||
/* 2 */
|
||||
background-image: none;
|
||||
/* 2 */
|
||||
}
|
||||
|
||||
/*
|
||||
Use the modern Firefox focus style for all focusable elements.
|
||||
*/
|
||||
|
||||
:-moz-focusring {
|
||||
outline: auto;
|
||||
}
|
||||
|
||||
/*
|
||||
Remove the additional `:invalid` styles in Firefox. (https://github.com/mozilla/gecko-dev/blob/2f9eacd9d3d995c937b4251a5557d95d494c9be1/layout/style/res/forms.css#L728-L737)
|
||||
*/
|
||||
|
||||
:-moz-ui-invalid {
|
||||
box-shadow: none;
|
||||
}
|
||||
|
||||
/*
|
||||
Add the correct vertical alignment in Chrome and Firefox.
|
||||
*/
|
||||
|
||||
progress {
|
||||
vertical-align: baseline;
|
||||
}
|
||||
|
||||
/*
|
||||
Correct the cursor style of increment and decrement buttons in Safari.
|
||||
*/
|
||||
|
||||
::-webkit-inner-spin-button,
|
||||
::-webkit-outer-spin-button {
|
||||
height: auto;
|
||||
}
|
||||
|
||||
/*
|
||||
1. Correct the odd appearance in Chrome and Safari.
|
||||
2. Correct the outline style in Safari.
|
||||
*/
|
||||
|
||||
[type='search'] {
|
||||
-webkit-appearance: textfield;
|
||||
/* 1 */
|
||||
outline-offset: -2px;
|
||||
/* 2 */
|
||||
}
|
||||
|
||||
/*
|
||||
Remove the inner padding in Chrome and Safari on macOS.
|
||||
*/
|
||||
|
||||
::-webkit-search-decoration {
|
||||
-webkit-appearance: none;
|
||||
}
|
||||
|
||||
/*
|
||||
1. Correct the inability to style clickable types in iOS and Safari.
|
||||
2. Change font properties to `inherit` in Safari.
|
||||
*/
|
||||
|
||||
::-webkit-file-upload-button {
|
||||
-webkit-appearance: button;
|
||||
/* 1 */
|
||||
font: inherit;
|
||||
/* 2 */
|
||||
}
|
||||
|
||||
/*
|
||||
Add the correct display in Chrome and Safari.
|
||||
*/
|
||||
|
||||
summary {
|
||||
display: list-item;
|
||||
}
|
||||
|
||||
/*
|
||||
Removes the default spacing and border for appropriate elements.
|
||||
*/
|
||||
|
||||
blockquote,
|
||||
dl,
|
||||
dd,
|
||||
h1,
|
||||
h2,
|
||||
h3,
|
||||
h4,
|
||||
h5,
|
||||
h6,
|
||||
hr,
|
||||
figure,
|
||||
p,
|
||||
pre {
|
||||
margin: 0;
|
||||
}
|
||||
|
||||
fieldset {
|
||||
margin: 0;
|
||||
padding: 0;
|
||||
}
|
||||
|
||||
legend {
|
||||
padding: 0;
|
||||
}
|
||||
|
||||
ol,
|
||||
ul,
|
||||
menu {
|
||||
list-style: none;
|
||||
margin: 0;
|
||||
padding: 0;
|
||||
}
|
||||
|
||||
/*
|
||||
Reset default styling for dialogs.
|
||||
*/
|
||||
|
||||
dialog {
|
||||
padding: 0;
|
||||
}
|
||||
|
||||
/*
|
||||
Prevent resizing textareas horizontally by default.
|
||||
*/
|
||||
|
||||
textarea {
|
||||
resize: vertical;
|
||||
}
|
||||
|
||||
/*
|
||||
1. Reset the default placeholder opacity in Firefox. (https://github.com/tailwindlabs/tailwindcss/issues/3300)
|
||||
2. Set the default placeholder color to the user's configured gray 400 color.
|
||||
*/
|
||||
|
||||
input::-moz-placeholder, textarea::-moz-placeholder {
|
||||
opacity: 1;
|
||||
/* 1 */
|
||||
color: #9ca3af;
|
||||
/* 2 */
|
||||
}
|
||||
|
||||
input::placeholder,
|
||||
textarea::placeholder {
|
||||
opacity: 1;
|
||||
/* 1 */
|
||||
color: #9ca3af;
|
||||
/* 2 */
|
||||
}
|
||||
|
||||
/*
|
||||
Set the default cursor for buttons.
|
||||
*/
|
||||
|
||||
button,
|
||||
[role="button"] {
|
||||
cursor: pointer;
|
||||
}
|
||||
|
||||
/*
|
||||
Make sure disabled buttons don't get the pointer cursor.
|
||||
*/
|
||||
|
||||
:disabled {
|
||||
cursor: default;
|
||||
}
|
||||
|
||||
/*
|
||||
1. Make replaced elements `display: block` by default. (https://github.com/mozdevs/cssremedy/issues/14)
|
||||
2. Add `vertical-align: middle` to align replaced elements more sensibly by default. (https://github.com/jensimmons/cssremedy/issues/14#issuecomment-634934210)
|
||||
This can trigger a poorly considered lint error in some tools but is included by design.
|
||||
*/
|
||||
|
||||
img,
|
||||
svg,
|
||||
video,
|
||||
canvas,
|
||||
audio,
|
||||
iframe,
|
||||
embed,
|
||||
object {
|
||||
display: block;
|
||||
/* 1 */
|
||||
vertical-align: middle;
|
||||
/* 2 */
|
||||
}
|
||||
|
||||
/*
|
||||
Constrain images and videos to the parent width and preserve their intrinsic aspect ratio. (https://github.com/mozdevs/cssremedy/issues/14)
|
||||
*/
|
||||
|
||||
img,
|
||||
video {
|
||||
max-width: 100%;
|
||||
height: auto;
|
||||
}
|
||||
|
||||
/* Make elements with the HTML hidden attribute stay hidden by default */
|
||||
|
||||
[hidden]:where(:not([hidden="until-found"])) {
|
||||
display: none;
|
||||
}
|
||||
|
||||
.static {
|
||||
position: static;
|
||||
}
|
||||
|
||||
.absolute {
|
||||
position: absolute;
|
||||
}
|
||||
|
||||
.relative {
|
||||
position: relative;
|
||||
}
|
||||
|
||||
.right-2 {
|
||||
right: 0.5rem;
|
||||
}
|
||||
|
||||
.right-6 {
|
||||
right: 1.5rem;
|
||||
}
|
||||
|
||||
.top-1\/2 {
|
||||
top: 50%;
|
||||
}
|
||||
|
||||
.top-4 {
|
||||
top: 1rem;
|
||||
}
|
||||
|
||||
.z-10 {
|
||||
z-index: 10;
|
||||
}
|
||||
|
||||
.mb-2 {
|
||||
margin-bottom: 0.5rem;
|
||||
}
|
||||
|
||||
.mb-4 {
|
||||
margin-bottom: 1rem;
|
||||
}
|
||||
|
||||
.mb-6 {
|
||||
margin-bottom: 1.5rem;
|
||||
}
|
||||
|
||||
.mb-8 {
|
||||
margin-bottom: 2rem;
|
||||
}
|
||||
|
||||
.ml-2 {
|
||||
margin-left: 0.5rem;
|
||||
}
|
||||
|
||||
.flex {
|
||||
display: flex;
|
||||
}
|
||||
|
||||
.hidden {
|
||||
display: none;
|
||||
}
|
||||
|
||||
.h-16 {
|
||||
height: 4rem;
|
||||
}
|
||||
|
||||
.h-24 {
|
||||
height: 6rem;
|
||||
}
|
||||
|
||||
.h-4 {
|
||||
height: 1rem;
|
||||
}
|
||||
|
||||
.h-5 {
|
||||
height: 1.25rem;
|
||||
}
|
||||
|
||||
.h-6 {
|
||||
height: 1.5rem;
|
||||
}
|
||||
|
||||
.min-h-screen {
|
||||
min-height: 100vh;
|
||||
}
|
||||
|
||||
.w-16 {
|
||||
width: 4rem;
|
||||
}
|
||||
|
||||
.w-20 {
|
||||
width: 5rem;
|
||||
}
|
||||
|
||||
.w-24 {
|
||||
width: 6rem;
|
||||
}
|
||||
|
||||
.w-4 {
|
||||
width: 1rem;
|
||||
}
|
||||
|
||||
.w-48 {
|
||||
width: 12rem;
|
||||
}
|
||||
|
||||
.w-5 {
|
||||
width: 1.25rem;
|
||||
}
|
||||
|
||||
.w-6 {
|
||||
width: 1.5rem;
|
||||
}
|
||||
|
||||
.w-full {
|
||||
width: 100%;
|
||||
}
|
||||
|
||||
.flex-1 {
|
||||
flex: 1 1 0%;
|
||||
}
|
||||
|
||||
.-translate-y-1\/2 {
|
||||
--tw-translate-y: -50%;
|
||||
transform: translate(var(--tw-translate-x), var(--tw-translate-y)) rotate(var(--tw-rotate)) skewX(var(--tw-skew-x)) skewY(var(--tw-skew-y)) scaleX(var(--tw-scale-x)) scaleY(var(--tw-scale-y));
|
||||
}
|
||||
|
||||
.transform {
|
||||
transform: translate(var(--tw-translate-x), var(--tw-translate-y)) rotate(var(--tw-rotate)) skewX(var(--tw-skew-x)) skewY(var(--tw-skew-y)) scaleX(var(--tw-scale-x)) scaleY(var(--tw-scale-y));
|
||||
}
|
||||
|
||||
@keyframes spin {
|
||||
to {
|
||||
transform: rotate(360deg);
|
||||
}
|
||||
}
|
||||
|
||||
.animate-spin {
|
||||
animation: spin 1s linear infinite;
|
||||
}
|
||||
|
||||
.flex-col {
|
||||
flex-direction: column;
|
||||
}
|
||||
|
||||
.items-center {
|
||||
align-items: center;
|
||||
}
|
||||
|
||||
.justify-center {
|
||||
justify-content: center;
|
||||
}
|
||||
|
||||
.space-x-2 > :not([hidden]) ~ :not([hidden]) {
|
||||
--tw-space-x-reverse: 0;
|
||||
margin-right: calc(0.5rem * var(--tw-space-x-reverse));
|
||||
margin-left: calc(0.5rem * calc(1 - var(--tw-space-x-reverse)));
|
||||
}
|
||||
|
||||
.space-x-4 > :not([hidden]) ~ :not([hidden]) {
|
||||
--tw-space-x-reverse: 0;
|
||||
margin-right: calc(1rem * var(--tw-space-x-reverse));
|
||||
margin-left: calc(1rem * calc(1 - var(--tw-space-x-reverse)));
|
||||
}
|
||||
|
||||
.space-x-8 > :not([hidden]) ~ :not([hidden]) {
|
||||
--tw-space-x-reverse: 0;
|
||||
margin-right: calc(2rem * var(--tw-space-x-reverse));
|
||||
margin-left: calc(2rem * calc(1 - var(--tw-space-x-reverse)));
|
||||
}
|
||||
|
||||
.space-y-4 > :not([hidden]) ~ :not([hidden]) {
|
||||
--tw-space-y-reverse: 0;
|
||||
margin-top: calc(1rem * calc(1 - var(--tw-space-y-reverse)));
|
||||
margin-bottom: calc(1rem * var(--tw-space-y-reverse));
|
||||
}
|
||||
|
||||
.space-y-6 > :not([hidden]) ~ :not([hidden]) {
|
||||
--tw-space-y-reverse: 0;
|
||||
margin-top: calc(1.5rem * calc(1 - var(--tw-space-y-reverse)));
|
||||
margin-bottom: calc(1.5rem * var(--tw-space-y-reverse));
|
||||
}
|
||||
|
||||
.rounded {
|
||||
border-radius: 0.25rem;
|
||||
}
|
||||
|
||||
.rounded-full {
|
||||
border-radius: 9999px;
|
||||
}
|
||||
|
||||
.rounded-lg {
|
||||
border-radius: 0.5rem;
|
||||
}
|
||||
|
||||
.rounded-md {
|
||||
border-radius: 0.375rem;
|
||||
}
|
||||
|
||||
.border {
|
||||
border-width: 1px;
|
||||
}
|
||||
|
||||
.border-b {
|
||||
border-bottom-width: 1px;
|
||||
}
|
||||
|
||||
.border-b-2 {
|
||||
border-bottom-width: 2px;
|
||||
}
|
||||
|
||||
.border-black {
|
||||
--tw-border-opacity: 1;
|
||||
border-color: rgb(0 0 0 / var(--tw-border-opacity, 1));
|
||||
}
|
||||
|
||||
.border-gray-200 {
|
||||
--tw-border-opacity: 1;
|
||||
border-color: rgb(229 231 235 / var(--tw-border-opacity, 1));
|
||||
}
|
||||
|
||||
.bg-\[\#111827\] {
|
||||
--tw-bg-opacity: 1;
|
||||
background-color: rgb(17 24 39 / var(--tw-bg-opacity, 1));
|
||||
}
|
||||
|
||||
.bg-gray-100 {
|
||||
--tw-bg-opacity: 1;
|
||||
background-color: rgb(243 244 246 / var(--tw-bg-opacity, 1));
|
||||
}
|
||||
|
||||
.bg-gray-900 {
|
||||
--tw-bg-opacity: 1;
|
||||
background-color: rgb(17 24 39 / var(--tw-bg-opacity, 1));
|
||||
}
|
||||
|
||||
.bg-red-500 {
|
||||
--tw-bg-opacity: 1;
|
||||
background-color: rgb(239 68 68 / var(--tw-bg-opacity, 1));
|
||||
}
|
||||
|
||||
.bg-white {
|
||||
--tw-bg-opacity: 1;
|
||||
background-color: rgb(255 255 255 / var(--tw-bg-opacity, 1));
|
||||
}
|
||||
|
||||
.p-2 {
|
||||
padding: 0.5rem;
|
||||
}
|
||||
|
||||
.p-4 {
|
||||
padding: 1rem;
|
||||
}
|
||||
|
||||
.p-8 {
|
||||
padding: 2rem;
|
||||
}
|
||||
|
||||
.px-1 {
|
||||
padding-left: 0.25rem;
|
||||
padding-right: 0.25rem;
|
||||
}
|
||||
|
||||
.px-3 {
|
||||
padding-left: 0.75rem;
|
||||
padding-right: 0.75rem;
|
||||
}
|
||||
|
||||
.px-4 {
|
||||
padding-left: 1rem;
|
||||
padding-right: 1rem;
|
||||
}
|
||||
|
||||
.py-0\.5 {
|
||||
padding-top: 0.125rem;
|
||||
padding-bottom: 0.125rem;
|
||||
}
|
||||
|
||||
.py-2 {
|
||||
padding-top: 0.5rem;
|
||||
padding-bottom: 0.5rem;
|
||||
}
|
||||
|
||||
.py-4 {
|
||||
padding-top: 1rem;
|
||||
padding-bottom: 1rem;
|
||||
}
|
||||
|
||||
.py-6 {
|
||||
padding-top: 1.5rem;
|
||||
padding-bottom: 1.5rem;
|
||||
}
|
||||
|
||||
.pr-10 {
|
||||
padding-right: 2.5rem;
|
||||
}
|
||||
|
||||
.text-2xl {
|
||||
font-size: 1.5rem;
|
||||
line-height: 2rem;
|
||||
}
|
||||
|
||||
.text-base {
|
||||
font-size: 1rem;
|
||||
line-height: 1.5rem;
|
||||
}
|
||||
|
||||
.text-sm {
|
||||
font-size: 0.875rem;
|
||||
line-height: 1.25rem;
|
||||
}
|
||||
|
||||
.font-medium {
|
||||
font-weight: 500;
|
||||
}
|
||||
|
||||
.font-semibold {
|
||||
font-weight: 600;
|
||||
}
|
||||
|
||||
.text-blue-500 {
|
||||
--tw-text-opacity: 1;
|
||||
color: rgb(59 130 246 / var(--tw-text-opacity, 1));
|
||||
}
|
||||
|
||||
.text-blue-600 {
|
||||
--tw-text-opacity: 1;
|
||||
color: rgb(37 99 235 / var(--tw-text-opacity, 1));
|
||||
}
|
||||
|
||||
.text-gray-400 {
|
||||
--tw-text-opacity: 1;
|
||||
color: rgb(156 163 175 / var(--tw-text-opacity, 1));
|
||||
}
|
||||
|
||||
.text-gray-500 {
|
||||
--tw-text-opacity: 1;
|
||||
color: rgb(107 114 128 / var(--tw-text-opacity, 1));
|
||||
}
|
||||
|
||||
.text-gray-600 {
|
||||
--tw-text-opacity: 1;
|
||||
color: rgb(75 85 99 / var(--tw-text-opacity, 1));
|
||||
}
|
||||
|
||||
.text-white {
|
||||
--tw-text-opacity: 1;
|
||||
color: rgb(255 255 255 / var(--tw-text-opacity, 1));
|
||||
}
|
||||
|
||||
.opacity-25 {
|
||||
opacity: 0.25;
|
||||
}
|
||||
|
||||
.opacity-75 {
|
||||
opacity: 0.75;
|
||||
}
|
||||
|
||||
.filter {
|
||||
filter: var(--tw-blur) var(--tw-brightness) var(--tw-contrast) var(--tw-grayscale) var(--tw-hue-rotate) var(--tw-invert) var(--tw-saturate) var(--tw-sepia) var(--tw-drop-shadow);
|
||||
}
|
||||
|
||||
.hover\:bg-\[\#1f2937\]:hover {
|
||||
--tw-bg-opacity: 1;
|
||||
background-color: rgb(31 41 55 / var(--tw-bg-opacity, 1));
|
||||
}
|
||||
|
||||
.hover\:bg-gray-100:hover {
|
||||
--tw-bg-opacity: 1;
|
||||
background-color: rgb(243 244 246 / var(--tw-bg-opacity, 1));
|
||||
}
|
||||
|
||||
.hover\:bg-red-600:hover {
|
||||
--tw-bg-opacity: 1;
|
||||
background-color: rgb(220 38 38 / var(--tw-bg-opacity, 1));
|
||||
}
|
||||
|
||||
.hover\:text-gray-700:hover {
|
||||
--tw-text-opacity: 1;
|
||||
color: rgb(55 65 81 / var(--tw-text-opacity, 1));
|
||||
}
|
||||
|
||||
.hover\:text-gray-900:hover {
|
||||
--tw-text-opacity: 1;
|
||||
color: rgb(17 24 39 / var(--tw-text-opacity, 1));
|
||||
}
|
||||
|
||||
.focus\:outline-none:focus {
|
||||
outline: 2px solid transparent;
|
||||
outline-offset: 2px;
|
||||
}
|
||||
|
||||
.focus\:ring-2:focus {
|
||||
--tw-ring-offset-shadow: var(--tw-ring-inset) 0 0 0 var(--tw-ring-offset-width) var(--tw-ring-offset-color);
|
||||
--tw-ring-shadow: var(--tw-ring-inset) 0 0 0 calc(2px + var(--tw-ring-offset-width)) var(--tw-ring-color);
|
||||
box-shadow: var(--tw-ring-offset-shadow), var(--tw-ring-shadow), var(--tw-shadow, 0 0 #0000);
|
||||
}
|
||||
|
||||
.focus\:ring-blue-500:focus {
|
||||
--tw-ring-opacity: 1;
|
||||
--tw-ring-color: rgb(59 130 246 / var(--tw-ring-opacity, 1));
|
||||
}
|
||||
1
cli/rustfs-gui/embedded-rustfs/README.md
Normal file
1
cli/rustfs-gui/embedded-rustfs/README.md
Normal file
@@ -0,0 +1 @@
|
||||
rustfs bin path, do not delete
|
||||
3
cli/rustfs-gui/input.css
Normal file
3
cli/rustfs-gui/input.css
Normal file
@@ -0,0 +1,3 @@
|
||||
@tailwind base;
|
||||
@tailwind components;
|
||||
@tailwind utilities;
|
||||
315
cli/rustfs-gui/src/components/home.rs
Normal file
315
cli/rustfs-gui/src/components/home.rs
Normal file
@@ -0,0 +1,315 @@
|
||||
use crate::components::navbar::LoadingSpinner;
|
||||
use crate::route::Route;
|
||||
use crate::utils::{RustFSConfig, ServiceManager};
|
||||
use chrono::Datelike;
|
||||
use dioxus::logger::tracing::debug;
|
||||
use dioxus::prelude::*;
|
||||
use std::time::Duration;
|
||||
|
||||
const HEADER_LOGO: Asset = asset!("/assets/rustfs-logo.svg");
|
||||
const TAILWIND_CSS: Asset = asset!("/assets/tailwind.css");
|
||||
|
||||
/// Define the state of the service
|
||||
#[derive(PartialEq, Debug, Clone)]
|
||||
enum ServiceState {
|
||||
Start,
|
||||
Stop,
|
||||
}
|
||||
|
||||
/// Define the Home component
|
||||
/// The Home component is the main component of the application
|
||||
/// It is responsible for starting and stopping the service
|
||||
/// It also displays the service status and provides a button to toggle the service
|
||||
/// The Home component also displays the footer of the application
|
||||
/// The footer contains links to the official site, documentation, GitHub, and license
|
||||
/// The footer also displays the version of the application
|
||||
/// The Home component also contains a button to change the theme of the application
|
||||
/// The Home component also contains a button to go to the settings page
|
||||
#[component]
|
||||
pub fn Home() -> Element {
|
||||
#[allow(clippy::redundant_closure)]
|
||||
let service = use_signal(|| ServiceManager::new());
|
||||
let conf = RustFSConfig::load().unwrap_or_else(|e| {
|
||||
ServiceManager::show_error(&format!("加载配置失败:{}", e));
|
||||
RustFSConfig::default()
|
||||
});
|
||||
|
||||
debug!("loaded configurations:{:?}", conf);
|
||||
let config = use_signal(|| conf.clone());
|
||||
|
||||
use dioxus_router::prelude::Link;
|
||||
use document::{Meta, Stylesheet, Title};
|
||||
let mut service_state = use_signal(|| ServiceState::Start);
|
||||
// Create a periodic check on the effect of the service status
|
||||
use_effect(move || {
|
||||
spawn(async move {
|
||||
loop {
|
||||
if let Some(pid) = ServiceManager::check_service_status().await {
|
||||
debug!("service_running true pid: {:?}", pid);
|
||||
service_state.set(ServiceState::Stop);
|
||||
} else {
|
||||
debug!("service_running true pid: 0");
|
||||
service_state.set(ServiceState::Start);
|
||||
}
|
||||
tokio::time::sleep(Duration::from_secs(2)).await;
|
||||
}
|
||||
});
|
||||
});
|
||||
debug!("project start service_state: {:?}", service_state.read());
|
||||
// Use 'use_signal' to manage service status
|
||||
let mut loading = use_signal(|| false);
|
||||
let mut start_service = move |_| {
|
||||
let service = service;
|
||||
let config = config.read().clone();
|
||||
let mut service_state = service_state;
|
||||
// set the loading status
|
||||
loading.set(true);
|
||||
debug!("stop loading_state: {:?}", loading.read());
|
||||
spawn(async move {
|
||||
match service.read().start(config).await {
|
||||
Ok(result) => {
|
||||
if result.success {
|
||||
let duration = result.end_time - result.start_time;
|
||||
debug!("The service starts successfully and takes a long time:{}ms", duration.num_milliseconds());
|
||||
service_state.set(ServiceState::Stop);
|
||||
} else {
|
||||
ServiceManager::show_error(&result.message);
|
||||
service_state.set(ServiceState::Start);
|
||||
}
|
||||
}
|
||||
Err(e) => {
|
||||
ServiceManager::show_error(&format!("服务启动失败:{}", e));
|
||||
}
|
||||
}
|
||||
// Only set loading to false when it's actually done
|
||||
loading.set(false);
|
||||
debug!("start loading_state: {:?}", loading.read());
|
||||
});
|
||||
};
|
||||
|
||||
let mut stop_service = move |_| {
|
||||
let service = service;
|
||||
let mut service_state = service_state;
|
||||
// set the loading status
|
||||
loading.set(true);
|
||||
spawn(async move {
|
||||
match service.read().stop().await {
|
||||
Ok(result) => {
|
||||
if result.success {
|
||||
let duration = result.end_time - result.start_time;
|
||||
debug!("The service stops successfully and takes a long time:{}ms", duration.num_milliseconds());
|
||||
service_state.set(ServiceState::Start);
|
||||
} else {
|
||||
ServiceManager::show_error(&result.message);
|
||||
}
|
||||
}
|
||||
Err(e) => {
|
||||
ServiceManager::show_error(&format!("服务停止失败:{}", e));
|
||||
}
|
||||
}
|
||||
debug!("service_state: {:?}", service_state.read());
|
||||
// Only set loading to false when it's actually done
|
||||
loading.set(false);
|
||||
debug!("stop loading_state: {:?}", loading.read());
|
||||
});
|
||||
};
|
||||
|
||||
// Toggle the state when the button is clicked
|
||||
let toggle_service = {
|
||||
let mut service_state = service_state;
|
||||
debug!("toggle_service service_state: {:?}", service_state.read());
|
||||
move |_| {
|
||||
if service_state.read().eq(&ServiceState::Stop) {
|
||||
// If the service status is started, you need to run a command to stop the service
|
||||
stop_service(());
|
||||
service_state.set(ServiceState::Start);
|
||||
} else {
|
||||
start_service(());
|
||||
service_state.set(ServiceState::Stop);
|
||||
}
|
||||
}
|
||||
};
|
||||
|
||||
// Define dynamic styles based on state
|
||||
let button_class = if service_state.read().eq(&ServiceState::Start) {
|
||||
"bg-[#111827] hover:bg-[#1f2937] text-white px-4 py-2 rounded-md flex items-center space-x-2"
|
||||
} else {
|
||||
"bg-red-500 hover:bg-red-600 text-white px-4 py-2 rounded-md flex items-center space-x-2"
|
||||
};
|
||||
|
||||
rsx! {
|
||||
// The Stylesheet component inserts a style link into the head of the document
|
||||
Stylesheet {href: TAILWIND_CSS,}
|
||||
Title { "RustFS APP" }
|
||||
Meta {
|
||||
name: "description",
|
||||
content: "RustFS RustFS 用热门安全的 Rust 语言开发,兼容 S3 协议。适用于 AI/ML 及海量数据存储、大数据、互联网、工业和保密存储等全部场景。近乎免费使用。遵循 Apache 2 协议,支持国产保密设备和系统。",
|
||||
}
|
||||
div { class: "min-h-screen flex flex-col items-center bg-white",
|
||||
div { class: "absolute top-4 right-6 flex space-x-2",
|
||||
// change theme
|
||||
button { class: "p-2 hover:bg-gray-100 rounded-lg", ChangeThemeButton {} }
|
||||
// setting button
|
||||
Link {
|
||||
class: "p-2 hover:bg-gray-100 rounded-lg",
|
||||
to: Route::SettingViews {},
|
||||
SettingButton {}
|
||||
}
|
||||
}
|
||||
main { class: "flex-1 flex flex-col items-center justify-center space-y-6 p-4",
|
||||
div { class: "w-24 h-24 bg-gray-900 rounded-full flex items-center justify-center",
|
||||
img { alt: "Logo", class: "w-16 h-16", src: HEADER_LOGO }
|
||||
}
|
||||
div { class: "text-gray-600",
|
||||
"Service is running on "
|
||||
span { class: "text-blue-600", " 127.0.0.1:9000 " }
|
||||
}
|
||||
LoadingSpinner {
|
||||
loading: loading.read().to_owned(),
|
||||
text: "服务处理中...",
|
||||
}
|
||||
button { class: button_class, onclick: toggle_service,
|
||||
svg {
|
||||
class: "h-4 w-4",
|
||||
fill: "none",
|
||||
stroke: "currentColor",
|
||||
view_box: "0 0 24 24",
|
||||
xmlns: "http://www.w3.org/2000/svg",
|
||||
if service_state.read().eq(&ServiceState::Start) {
|
||||
path {
|
||||
d: "M14.752 11.168l-3.197-2.132A1 1 0 0010 9.87v4.263a1 1 0 001.555.832l3.197-2.132a1 1 0 000-1.664z",
|
||||
stroke_linecap: "round",
|
||||
stroke_linejoin: "round",
|
||||
stroke_width: "2",
|
||||
}
|
||||
path {
|
||||
d: "M21 12a9 9 0 11-18 0 9 9 0 0118 0z",
|
||||
stroke_linecap: "round",
|
||||
stroke_linejoin: "round",
|
||||
stroke_width: "2",
|
||||
}
|
||||
} else {
|
||||
path {
|
||||
stroke_linecap: "round",
|
||||
stroke_linejoin: "round",
|
||||
stroke_width: "2",
|
||||
d: "M21 12a9 9 0 11-18 0 9 9 0 0118 0z",
|
||||
}
|
||||
path {
|
||||
stroke_linecap: "round",
|
||||
stroke_linejoin: "round",
|
||||
stroke_width: "2",
|
||||
d: "M9 10h6v4H9z",
|
||||
}
|
||||
}
|
||||
}
|
||||
span { id: "serviceStatus",
|
||||
if service_state.read().eq(&ServiceState::Start) {
|
||||
"Start service"
|
||||
} else {
|
||||
"Stop service"
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
Footer { version: "v1.0.0".to_string() }
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
#[component]
|
||||
pub fn Footer(version: String) -> Element {
|
||||
let now = chrono::Local::now();
|
||||
let year = now.naive_local().year();
|
||||
rsx! {
|
||||
footer { class: "w-full py-6 flex flex-col items-center space-y-4 mb-6",
|
||||
nav { class: "flex space-x-4 text-gray-600",
|
||||
a { class: "hover:text-gray-900", href: "https://rustfs.com", "Official Site" }
|
||||
a {
|
||||
class: "hover:text-gray-900",
|
||||
href: "https://rustfs.com/docs",
|
||||
"Documentation"
|
||||
}
|
||||
a {
|
||||
class: "hover:text-gray-900",
|
||||
href: "https://github.com/rustfs/rustfs",
|
||||
"GitHub"
|
||||
}
|
||||
a {
|
||||
class: "hover:text-gray-900",
|
||||
href: "https://rustfs.com/docs/license/",
|
||||
"License"
|
||||
}
|
||||
a { class: "hover:text-gray-900", href: "#", "Sponsors" }
|
||||
}
|
||||
div { class: "text-gray-500 text-sm", " © rustfs.com {year}, All rights reserved." }
|
||||
div { class: "text-gray-400 text-sm mb-8", " version {version} " }
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
#[component]
|
||||
pub fn GoBackButtons() -> Element {
|
||||
rsx! {
|
||||
button {
|
||||
class: "p-2 hover:bg-gray-100 rounded-lg",
|
||||
"onclick": "window.history.back()",
|
||||
"Back to the Past"
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
#[component]
|
||||
pub fn GoForwardButtons() -> Element {
|
||||
rsx! {
|
||||
button {
|
||||
class: "p-2 hover:bg-gray-100 rounded-lg",
|
||||
"onclick": "window.history.forward()",
|
||||
"Back to the Future"
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
#[component]
|
||||
pub fn ChangeThemeButton() -> Element {
|
||||
rsx! {
|
||||
svg {
|
||||
class: "h-6 w-6 text-gray-600",
|
||||
fill: "none",
|
||||
stroke: "currentColor",
|
||||
view_box: "0 0 24 24",
|
||||
xmlns: "http://www.w3.org/2000/svg",
|
||||
path {
|
||||
d: "M9 3v2m6-2v2M9 19v2m6-2v2M5 9H3m2 6H3m18-6h-2m2 6h-2M7 19h10a2 2 0 002-2V7a2 2 0 00-2-2H7a2 2 0 00-2 2v10a2 2 0 002 2zM9 9h6v6H9V9z",
|
||||
stroke_linecap: "round",
|
||||
stroke_linejoin: "round",
|
||||
stroke_width: "2",
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
#[component]
|
||||
pub fn SettingButton() -> Element {
|
||||
rsx! {
|
||||
svg {
|
||||
class: "h-6 w-6 text-gray-600",
|
||||
fill: "none",
|
||||
stroke: "currentColor",
|
||||
view_box: "0 0 24 24",
|
||||
xmlns: "http://www.w3.org/2000/svg",
|
||||
path {
|
||||
d: "M10.325 4.317c.426-1.756 2.924-1.756 3.35 0a1.724 1.724 0 002.573 1.066c1.543-.94 3.31.826 2.37 2.37a1.724 1.724 0 001.065 2.572c1.756.426 1.756 2.924 0 3.35a1.724 1.724 0 00-1.066 2.573c.94 1.543-.826 3.31-2.37 2.37a1.724 1.724 0 00-2.572 1.065c-.426 1.756-2.924 1.756-3.35 0a1.724 1.724 0 00-2.573-1.066c-1.543.94-3.31-.826-2.37-2.37a1.724 1.724 0 00-1.065-2.572c-1.756-.426-1.756-2.924 0-3.35a1.724 1.724 0 001.066-2.573c-.94-1.543.826-3.31 2.37-2.37.996.608 2.296.07 2.572-1.065z",
|
||||
stroke_linecap: "round",
|
||||
stroke_linejoin: "round",
|
||||
stroke_width: "2",
|
||||
}
|
||||
path {
|
||||
d: "M15 12a3 3 0 11-6 0 3 3 0 016 0z",
|
||||
stroke_linecap: "round",
|
||||
stroke_linejoin: "round",
|
||||
stroke_width: "2",
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
6
cli/rustfs-gui/src/components/mod.rs
Normal file
6
cli/rustfs-gui/src/components/mod.rs
Normal file
@@ -0,0 +1,6 @@
|
||||
mod home;
|
||||
pub use home::Home;
|
||||
mod navbar;
|
||||
pub use navbar::Navbar;
|
||||
mod setting;
|
||||
pub use setting::Setting;
|
||||
60
cli/rustfs-gui/src/components/navbar.rs
Normal file
60
cli/rustfs-gui/src/components/navbar.rs
Normal file
@@ -0,0 +1,60 @@
|
||||
use crate::route::Route;
|
||||
use dioxus::logger::tracing::debug;
|
||||
use dioxus::prelude::*;
|
||||
|
||||
const NAVBAR_CSS: Asset = asset!("/assets/styling/navbar.css");
|
||||
|
||||
#[component]
|
||||
pub fn Navbar() -> Element {
|
||||
rsx! {
|
||||
document::Link { rel: "stylesheet", href: NAVBAR_CSS }
|
||||
|
||||
div { id: "navbar", class: "hidden", style: "display: none;",
|
||||
Link { to: Route::HomeViews {}, "Home" }
|
||||
Link { to: Route::SettingViews {}, "Setting" }
|
||||
}
|
||||
|
||||
Outlet::<Route> {}
|
||||
}
|
||||
}
|
||||
|
||||
#[derive(Props, PartialEq, Debug, Clone)]
|
||||
pub struct LoadingSpinnerProps {
|
||||
#[props(default = true)]
|
||||
loading: bool,
|
||||
#[props(default = "正在处理中...")]
|
||||
text: &'static str,
|
||||
}
|
||||
|
||||
#[component]
|
||||
pub fn LoadingSpinner(props: LoadingSpinnerProps) -> Element {
|
||||
debug!("loading: {}", props.loading);
|
||||
if !props.loading {
|
||||
debug!("LoadingSpinner false loading: {}", props.loading);
|
||||
return rsx! {};
|
||||
}
|
||||
rsx! {
|
||||
div { class: "flex items-center justify-center z-10",
|
||||
svg {
|
||||
class: "animate-spin h-5 w-5 text-blue-500",
|
||||
xmlns: "http://www.w3.org/2000/svg",
|
||||
fill: "none",
|
||||
view_box: "0 0 24 24",
|
||||
circle {
|
||||
class: "opacity-25",
|
||||
cx: "12",
|
||||
cy: "12",
|
||||
r: "10",
|
||||
stroke: "currentColor",
|
||||
stroke_width: "4",
|
||||
}
|
||||
path {
|
||||
class: "opacity-75",
|
||||
fill: "currentColor",
|
||||
d: "M4 12a8 8 0 018-8V0C5.373 0 0 5.373 0 12h4zm2 5.291A7.962 7.962 0 014 12H0c0 3.042 1.135 5.824 3 7.938l3-2.647z",
|
||||
}
|
||||
}
|
||||
span { class: "ml-2 text-gray-600", "{props.text}" }
|
||||
}
|
||||
}
|
||||
}
|
||||
202
cli/rustfs-gui/src/components/setting.rs
Normal file
202
cli/rustfs-gui/src/components/setting.rs
Normal file
@@ -0,0 +1,202 @@
|
||||
use crate::components::navbar::LoadingSpinner;
|
||||
use dioxus::logger::tracing::{debug, error};
|
||||
use dioxus::prelude::*;
|
||||
|
||||
const SETTINGS_JS: Asset = asset!("/assets/js/sts.js");
|
||||
const TAILWIND_CSS: Asset = asset!("/assets/tailwind.css");
|
||||
#[component]
|
||||
pub fn Setting() -> Element {
|
||||
use crate::utils::{RustFSConfig, ServiceManager};
|
||||
use document::{Meta, Script, Stylesheet, Title};
|
||||
|
||||
#[allow(clippy::redundant_closure)]
|
||||
let service = use_signal(|| ServiceManager::new());
|
||||
let conf = RustFSConfig::load().unwrap_or_else(|e| {
|
||||
error!("load config error: {}", e);
|
||||
RustFSConfig::default_config()
|
||||
});
|
||||
debug!("conf address: {:?}", conf.clone().address);
|
||||
|
||||
let config = use_signal(|| conf.clone());
|
||||
let address_state = use_signal(|| conf.address.to_string());
|
||||
let mut host_state = use_signal(|| conf.host.to_string());
|
||||
let mut port_state = use_signal(|| conf.port.to_string());
|
||||
let mut access_key_state = use_signal(|| conf.access_key.to_string());
|
||||
let mut secret_key_state = use_signal(|| conf.secret_key.to_string());
|
||||
let mut volume_name_state = use_signal(|| conf.volume_name.to_string());
|
||||
let loading = use_signal(|| false);
|
||||
|
||||
let save_and_restart = {
|
||||
let host_state = host_state;
|
||||
let port_state = port_state;
|
||||
let access_key_state = access_key_state;
|
||||
let secret_key_state = secret_key_state;
|
||||
let volume_name_state = volume_name_state;
|
||||
let mut loading = loading;
|
||||
debug!("save_and_restart access_key:{}", access_key_state.read());
|
||||
move |_| {
|
||||
// set the loading status
|
||||
loading.set(true);
|
||||
let mut config = config;
|
||||
config.write().address = format!("{}:{}", host_state.read(), port_state.read());
|
||||
config.write().host = host_state.read().to_string();
|
||||
config.write().port = port_state.read().to_string();
|
||||
config.write().access_key = access_key_state.read().to_string();
|
||||
config.write().secret_key = secret_key_state.read().to_string();
|
||||
config.write().volume_name = volume_name_state.read().to_string();
|
||||
// restart service
|
||||
let service = service;
|
||||
let config = config.read().clone();
|
||||
spawn(async move {
|
||||
if let Err(e) = service.read().restart(config).await {
|
||||
ServiceManager::show_error(&format!("发送重启命令失败:{}", e));
|
||||
}
|
||||
// reset the status when you're done
|
||||
loading.set(false);
|
||||
});
|
||||
}
|
||||
};
|
||||
|
||||
rsx! {
|
||||
Title { "Settings - RustFS App" }
|
||||
Meta { name: "description", content: "Settings - RustFS App." }
|
||||
// The Stylesheet component inserts a style link into the head of the document
|
||||
Stylesheet { href: TAILWIND_CSS }
|
||||
Script { src: SETTINGS_JS }
|
||||
div { class: "bg-white p-8",
|
||||
h1 { class: "text-2xl font-semibold mb-6", "Settings" }
|
||||
div { class: "border-b border-gray-200 mb-6",
|
||||
nav { class: "flex space-x-8",
|
||||
button {
|
||||
class: "tab-btn px-1 py-4 text-sm font-medium border-b-2 border-black",
|
||||
"data-tab": "service",
|
||||
"onclick": "switchTab('service')",
|
||||
"Service "
|
||||
}
|
||||
button {
|
||||
class: "tab-btn px-1 py-4 text-sm font-medium text-gray-500 hover:text-gray-700",
|
||||
"data-tab": "user",
|
||||
"onclick": "switchTab('user')",
|
||||
"User "
|
||||
}
|
||||
button {
|
||||
class: "tab-btn px-1 py-4 text-sm font-medium text-gray-500 hover:text-gray-700 hidden",
|
||||
"data-tab": "logs",
|
||||
"onclick": "switchTab('logs')",
|
||||
"Logs "
|
||||
}
|
||||
}
|
||||
}
|
||||
div { id: "tabContent",
|
||||
div { class: "tab-content", id: "service",
|
||||
div { class: "mb-8",
|
||||
h2 { class: "text-base font-medium mb-2", "Service address" }
|
||||
p { class: "text-gray-600 mb-4",
|
||||
" The service address is the IP address and port number of the service. the default address is "
|
||||
code { class: "bg-gray-100 px-1 py-0.5 rounded", {address_state} }
|
||||
". "
|
||||
}
|
||||
div { class: "flex space-x-2",
|
||||
input {
|
||||
class: "border rounded px-3 py-2 w-48 focus:outline-none focus:ring-2 focus:ring-blue-500",
|
||||
r#type: "text",
|
||||
value: host_state,
|
||||
oninput: move |evt| host_state.set(evt.value().clone()),
|
||||
}
|
||||
span { class: "flex items-center", ":" }
|
||||
input {
|
||||
class: "border rounded px-3 py-2 w-20 focus:outline-none focus:ring-2 focus:ring-blue-500",
|
||||
r#type: "text",
|
||||
value: port_state,
|
||||
oninput: move |evt| port_state.set(evt.value().clone()),
|
||||
}
|
||||
}
|
||||
}
|
||||
div { class: "mb-8",
|
||||
h2 { class: "text-base font-medium mb-2", "Storage path" }
|
||||
p { class: "text-gray-600 mb-4",
|
||||
"Update the storage path of the service. the default path is {volume_name_state}."
|
||||
}
|
||||
input {
|
||||
class: "border rounded px-3 py-2 w-full focus:outline-none focus:ring-2 focus:ring-blue-500",
|
||||
r#type: "text",
|
||||
value: volume_name_state,
|
||||
oninput: move |evt| volume_name_state.set(evt.value().clone()),
|
||||
}
|
||||
}
|
||||
}
|
||||
div { class: "tab-content hidden", id: "user",
|
||||
div { class: "mb-8",
|
||||
h2 { class: "text-base font-medium mb-2", "User" }
|
||||
p { class: "text-gray-600 mb-4",
|
||||
"The user is the owner of the service. the default user is "
|
||||
code { class: "bg-gray-100 px-1 py-0.5 rounded", {access_key_state} }
|
||||
}
|
||||
input {
|
||||
class: "border rounded px-3 py-2 w-full focus:outline-none focus:ring-2 focus:ring-blue-500",
|
||||
r#type: "text",
|
||||
value: access_key_state,
|
||||
oninput: move |evt| access_key_state.set(evt.value().clone()),
|
||||
}
|
||||
}
|
||||
div { class: "mb-8",
|
||||
h2 { class: "text-base font-medium mb-2", "Password" }
|
||||
p { class: "text-gray-600 mb-4",
|
||||
"The password is the password of the user. the default password is "
|
||||
code { class: "bg-gray-100 px-1 py-0.5 rounded", {secret_key_state} }
|
||||
}
|
||||
div { class: "relative",
|
||||
input {
|
||||
class: "border rounded px-3 py-2 w-full pr-10 focus:outline-none focus:ring-2 focus:ring-blue-500",
|
||||
r#type: "password",
|
||||
value: secret_key_state,
|
||||
oninput: move |evt| secret_key_state.set(evt.value().clone()),
|
||||
}
|
||||
button {
|
||||
class: "absolute right-2 top-1/2 transform -translate-y-1/2 text-gray-500 hover:text-gray-700",
|
||||
"onclick": "togglePassword(this)",
|
||||
svg {
|
||||
class: "h-5 w-5",
|
||||
fill: "currentColor",
|
||||
view_box: "0 0 20 20",
|
||||
xmlns: "http://www.w3.org/2000/svg",
|
||||
path { d: "M10 12a2 2 0 100-4 2 2 0 000 4z" }
|
||||
path {
|
||||
clip_rule: "evenodd",
|
||||
d: "M.458 10C1.732 5.943 5.522 3 10 3s8.268 2.943 9.542 7c-1.274 4.057-5.064 7-9.542 7S1.732 14.057.458 10zM14 10a4 4 0 11-8 0 4 4 0 018 0z",
|
||||
fill_rule: "evenodd",
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
div { class: "tab-content hidden", id: "logs",
|
||||
div { class: "mb-8",
|
||||
h2 { class: "text-base font-medium mb-2", "Logs storage path" }
|
||||
p { class: "text-gray-600 mb-4",
|
||||
"The logs storage path is the path where the logs are stored. the default path is /var/log/rustfs. "
|
||||
}
|
||||
input {
|
||||
class: "border rounded px-3 py-2 w-full focus:outline-none focus:ring-2 focus:ring-blue-500",
|
||||
r#type: "text",
|
||||
value: "/var/logs/rustfs",
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
div { class: "flex space-x-4",
|
||||
button {
|
||||
class: "bg-[#111827] text-white px-4 py-2 rounded hover:bg-[#1f2937]",
|
||||
onclick: save_and_restart,
|
||||
" Save and restart "
|
||||
}
|
||||
GoBackButton { "Back" }
|
||||
}
|
||||
LoadingSpinner {
|
||||
loading: loading.read().to_owned(),
|
||||
text: "服务处理中...",
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
9
cli/rustfs-gui/src/main.rs
Normal file
9
cli/rustfs-gui/src/main.rs
Normal file
@@ -0,0 +1,9 @@
|
||||
mod components;
|
||||
mod route;
|
||||
mod utils;
|
||||
mod views;
|
||||
|
||||
fn main() {
|
||||
let _worker_guard = utils::init_logger();
|
||||
dioxus::launch(views::App);
|
||||
}
|
||||
3
cli/rustfs-gui/src/route/mod.rs
Normal file
3
cli/rustfs-gui/src/route/mod.rs
Normal file
@@ -0,0 +1,3 @@
|
||||
mod router;
|
||||
|
||||
pub use router::Route;
|
||||
14
cli/rustfs-gui/src/route/router.rs
Normal file
14
cli/rustfs-gui/src/route/router.rs
Normal file
@@ -0,0 +1,14 @@
|
||||
use crate::components::Navbar;
|
||||
use crate::views::{HomeViews, SettingViews};
|
||||
use dioxus::prelude::*;
|
||||
|
||||
/// The router for the application
|
||||
#[derive(Debug, Clone, Routable, PartialEq)]
|
||||
#[rustfmt::skip]
|
||||
pub enum Route {
|
||||
#[layout(Navbar)]
|
||||
#[route("/")]
|
||||
HomeViews {},
|
||||
#[route("/settings")]
|
||||
SettingViews {},
|
||||
}
|
||||
550
cli/rustfs-gui/src/utils/config.rs
Normal file
550
cli/rustfs-gui/src/utils/config.rs
Normal file
@@ -0,0 +1,550 @@
|
||||
use keyring::Entry;
|
||||
use serde::{Deserialize, Serialize};
|
||||
use std::error::Error;
|
||||
|
||||
/// Configuration for the RustFS service
|
||||
///
|
||||
/// # Fields
|
||||
/// * `address` - The address of the RustFS service
|
||||
/// * `host` - The host of the RustFS service
|
||||
/// * `port` - The port of the RustFS service
|
||||
/// * `access_key` - The access key of the RustFS service
|
||||
/// * `secret_key` - The secret key of the RustFS service
|
||||
/// * `domain_name` - The domain name of the RustFS service
|
||||
/// * `volume_name` - The volume name of the RustFS service
|
||||
/// * `console_address` - The console address of the RustFS service
|
||||
///
|
||||
/// # Example
|
||||
/// ```
|
||||
/// let config = RustFSConfig {
|
||||
/// address: "127.0.0.1:9000".to_string(),
|
||||
/// host: "127.0.0.1".to_string(),
|
||||
/// port: "9000".to_string(),
|
||||
/// access_key: "rustfsadmin".to_string(),
|
||||
/// secret_key: "rustfsadmin".to_string(),
|
||||
/// domain_name: "demo.rustfs.com".to_string(),
|
||||
/// volume_name: "data".to_string(),
|
||||
/// console_address: "127.0.0.1:9001".to_string(),
|
||||
/// };
|
||||
/// println!("{:?}", config);
|
||||
/// assert_eq!(config.address, "127.0.0.1:9000");
|
||||
/// ```
|
||||
#[derive(Debug, Clone, Default, Deserialize, Serialize, Ord, PartialOrd, Eq, PartialEq)]
|
||||
pub struct RustFSConfig {
|
||||
pub address: String,
|
||||
pub host: String,
|
||||
pub port: String,
|
||||
pub access_key: String,
|
||||
pub secret_key: String,
|
||||
pub domain_name: String,
|
||||
pub volume_name: String,
|
||||
pub console_address: String,
|
||||
}
|
||||
|
||||
impl RustFSConfig {
|
||||
/// keyring the name of the service
|
||||
const SERVICE_NAME: &'static str = "rustfs-service";
|
||||
/// keyring the key of the service
|
||||
const SERVICE_KEY: &'static str = "rustfs_key";
|
||||
/// default domain name
|
||||
const DEFAULT_DOMAIN_NAME_VALUE: &'static str = "demo.rustfs.com";
|
||||
/// default address value
|
||||
const DEFAULT_ADDRESS_VALUE: &'static str = "127.0.0.1:9000";
|
||||
/// default port value
|
||||
const DEFAULT_PORT_VALUE: &'static str = "9000";
|
||||
/// default host value
|
||||
const DEFAULT_HOST_VALUE: &'static str = "127.0.0.1";
|
||||
/// default access key value
|
||||
const DEFAULT_ACCESS_KEY_VALUE: &'static str = "rustfsadmin";
|
||||
/// default secret key value
|
||||
const DEFAULT_SECRET_KEY_VALUE: &'static str = "rustfsadmin";
|
||||
/// default console address value
|
||||
const DEFAULT_CONSOLE_ADDRESS_VALUE: &'static str = "127.0.0.1:9001";
|
||||
|
||||
/// get the default volume_name
|
||||
///
|
||||
/// # Returns
|
||||
/// * The default volume name
|
||||
///
|
||||
/// # Example
|
||||
/// ```
|
||||
/// let volume_name = RustFSConfig::default_volume_name();
|
||||
/// ```
|
||||
pub fn default_volume_name() -> String {
|
||||
dirs::home_dir()
|
||||
.map(|home| home.join("rustfs").join("data"))
|
||||
.and_then(|path| path.to_str().map(String::from))
|
||||
.unwrap_or_else(|| "data".to_string())
|
||||
}
|
||||
|
||||
/// create a default configuration
|
||||
///
|
||||
/// # Returns
|
||||
/// * The default configuration
|
||||
///
|
||||
/// # Example
|
||||
/// ```
|
||||
/// let config = RustFSConfig::default_config();
|
||||
/// println!("{:?}", config);
|
||||
/// assert_eq!(config.address, "127.0.0.1:9000");
|
||||
/// ```
|
||||
pub fn default_config() -> Self {
|
||||
Self {
|
||||
address: Self::DEFAULT_ADDRESS_VALUE.to_string(),
|
||||
host: Self::DEFAULT_HOST_VALUE.to_string(),
|
||||
port: Self::DEFAULT_PORT_VALUE.to_string(),
|
||||
access_key: Self::DEFAULT_ACCESS_KEY_VALUE.to_string(),
|
||||
secret_key: Self::DEFAULT_SECRET_KEY_VALUE.to_string(),
|
||||
domain_name: Self::DEFAULT_DOMAIN_NAME_VALUE.to_string(),
|
||||
volume_name: Self::default_volume_name(),
|
||||
console_address: Self::DEFAULT_CONSOLE_ADDRESS_VALUE.to_string(),
|
||||
}
|
||||
}
|
||||
|
||||
/// Load the configuration from the keyring
|
||||
///
|
||||
/// # Errors
|
||||
/// * If the configuration cannot be loaded from the keyring
|
||||
/// * If the configuration cannot be deserialized
|
||||
/// * If the address cannot be extracted from the configuration
|
||||
///
|
||||
/// # Example
|
||||
/// ```
|
||||
/// let config = RustFSConfig::load().unwrap();
|
||||
/// println!("{:?}", config);
|
||||
/// assert_eq!(config.address, "127.0.0.1:9000");
|
||||
/// ```
|
||||
pub fn load() -> Result<Self, Box<dyn Error>> {
|
||||
let mut config = Self::default_config();
|
||||
|
||||
// Try to get the configuration of the storage from the keyring
|
||||
let entry = Entry::new(Self::SERVICE_NAME, Self::SERVICE_KEY)?;
|
||||
if let Ok(stored_json) = entry.get_password() {
|
||||
if let Ok(stored_config) = serde_json::from_str::<RustFSConfig>(&stored_json) {
|
||||
// update fields that are not empty and non default
|
||||
if !stored_config.address.is_empty() && stored_config.address != Self::DEFAULT_ADDRESS_VALUE {
|
||||
config.address = stored_config.address;
|
||||
let (host, port) = Self::extract_host_port(config.address.as_str())
|
||||
.ok_or_else(|| format!("无法从地址 '{}' 中提取主机和端口", config.address))?;
|
||||
config.host = host.to_string();
|
||||
config.port = port.to_string();
|
||||
}
|
||||
if !stored_config.access_key.is_empty() && stored_config.access_key != Self::DEFAULT_ACCESS_KEY_VALUE {
|
||||
config.access_key = stored_config.access_key;
|
||||
}
|
||||
if !stored_config.secret_key.is_empty() && stored_config.secret_key != Self::DEFAULT_SECRET_KEY_VALUE {
|
||||
config.secret_key = stored_config.secret_key;
|
||||
}
|
||||
if !stored_config.domain_name.is_empty() && stored_config.domain_name != Self::DEFAULT_DOMAIN_NAME_VALUE {
|
||||
config.domain_name = stored_config.domain_name;
|
||||
}
|
||||
// The stored volume_name is updated only if it is not empty and different from the default
|
||||
if !stored_config.volume_name.is_empty() && stored_config.volume_name != Self::default_volume_name() {
|
||||
config.volume_name = stored_config.volume_name;
|
||||
}
|
||||
if !stored_config.console_address.is_empty()
|
||||
&& stored_config.console_address != Self::DEFAULT_CONSOLE_ADDRESS_VALUE
|
||||
{
|
||||
config.console_address = stored_config.console_address;
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
Ok(config)
|
||||
}
|
||||
|
||||
/// Auxiliary method: Extract the host and port from the address string
|
||||
/// # Arguments
|
||||
/// * `address` - The address string
|
||||
///
|
||||
/// # Returns
|
||||
/// * `Some((host, port))` - The host and port
|
||||
///
|
||||
/// # Errors
|
||||
/// * If the address is not in the form 'host:port'
|
||||
/// * If the port is not a valid u16
|
||||
///
|
||||
/// # Example
|
||||
/// ```
|
||||
/// let (host, port) = RustFSConfig::extract_host_port("127.0.0.1:9000").unwrap();
|
||||
/// assert_eq!(host, "127.0.0.1");
|
||||
/// assert_eq!(port, 9000);
|
||||
/// ```
|
||||
pub fn extract_host_port(address: &str) -> Option<(&str, u16)> {
|
||||
let parts: Vec<&str> = address.split(':').collect();
|
||||
if parts.len() == 2 {
|
||||
if let Ok(port) = parts[1].parse::<u16>() {
|
||||
return Some((parts[0], port));
|
||||
}
|
||||
}
|
||||
None
|
||||
}
|
||||
|
||||
/// save the configuration to keyring
|
||||
///
|
||||
/// # Errors
|
||||
/// * If the configuration cannot be serialized
|
||||
/// * If the configuration cannot be saved to the keyring
|
||||
///
|
||||
/// # Example
|
||||
/// ```
|
||||
/// let config = RustFSConfig::default_config();
|
||||
/// config.save().unwrap();
|
||||
/// ```
|
||||
pub fn save(&self) -> Result<(), Box<dyn Error>> {
|
||||
let entry = Entry::new(Self::SERVICE_NAME, Self::SERVICE_KEY)?;
|
||||
let json = serde_json::to_string(self)?;
|
||||
entry.set_password(&json)?;
|
||||
Ok(())
|
||||
}
|
||||
|
||||
/// Clear the stored configuration from the system keyring
|
||||
///
|
||||
/// # Returns
|
||||
/// Returns `Ok(())` if the configuration was successfully cleared, or an error if the operation failed.
|
||||
///
|
||||
/// # Example
|
||||
/// ```
|
||||
/// RustFSConfig::clear().unwrap();
|
||||
/// ```
|
||||
#[allow(dead_code)]
|
||||
pub fn clear() -> Result<(), Box<dyn Error>> {
|
||||
let entry = Entry::new(Self::SERVICE_NAME, Self::SERVICE_KEY)?;
|
||||
entry.delete_credential()?;
|
||||
Ok(())
|
||||
}
|
||||
}
|
||||
|
||||
#[cfg(test)]
|
||||
mod tests {
|
||||
use super::*;
|
||||
|
||||
#[test]
|
||||
fn test_rustfs_config_default() {
|
||||
let config = RustFSConfig::default();
|
||||
assert!(config.address.is_empty());
|
||||
assert!(config.host.is_empty());
|
||||
assert!(config.port.is_empty());
|
||||
assert!(config.access_key.is_empty());
|
||||
assert!(config.secret_key.is_empty());
|
||||
assert!(config.domain_name.is_empty());
|
||||
assert!(config.volume_name.is_empty());
|
||||
assert!(config.console_address.is_empty());
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn test_rustfs_config_creation() {
|
||||
let config = RustFSConfig {
|
||||
address: "192.168.1.100:9000".to_string(),
|
||||
host: "192.168.1.100".to_string(),
|
||||
port: "9000".to_string(),
|
||||
access_key: "testuser".to_string(),
|
||||
secret_key: "testpass".to_string(),
|
||||
domain_name: "test.rustfs.com".to_string(),
|
||||
volume_name: "/data/rustfs".to_string(),
|
||||
console_address: "192.168.1.100:9001".to_string(),
|
||||
};
|
||||
|
||||
assert_eq!(config.address, "192.168.1.100:9000");
|
||||
assert_eq!(config.host, "192.168.1.100");
|
||||
assert_eq!(config.port, "9000");
|
||||
assert_eq!(config.access_key, "testuser");
|
||||
assert_eq!(config.secret_key, "testpass");
|
||||
assert_eq!(config.domain_name, "test.rustfs.com");
|
||||
assert_eq!(config.volume_name, "/data/rustfs");
|
||||
assert_eq!(config.console_address, "192.168.1.100:9001");
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn test_default_volume_name() {
|
||||
let volume_name = RustFSConfig::default_volume_name();
|
||||
assert!(!volume_name.is_empty());
|
||||
// Should either be the home directory path or fallback to "data"
|
||||
assert!(volume_name.contains("rustfs") || volume_name == "data");
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn test_default_config() {
|
||||
let config = RustFSConfig::default_config();
|
||||
assert_eq!(config.address, RustFSConfig::DEFAULT_ADDRESS_VALUE);
|
||||
assert_eq!(config.host, RustFSConfig::DEFAULT_HOST_VALUE);
|
||||
assert_eq!(config.port, RustFSConfig::DEFAULT_PORT_VALUE);
|
||||
assert_eq!(config.access_key, RustFSConfig::DEFAULT_ACCESS_KEY_VALUE);
|
||||
assert_eq!(config.secret_key, RustFSConfig::DEFAULT_SECRET_KEY_VALUE);
|
||||
assert_eq!(config.domain_name, RustFSConfig::DEFAULT_DOMAIN_NAME_VALUE);
|
||||
assert_eq!(config.console_address, RustFSConfig::DEFAULT_CONSOLE_ADDRESS_VALUE);
|
||||
assert!(!config.volume_name.is_empty());
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn test_extract_host_port_valid() {
|
||||
let test_cases = vec![
|
||||
("127.0.0.1:9000", Some(("127.0.0.1", 9000))),
|
||||
("localhost:8080", Some(("localhost", 8080))),
|
||||
("192.168.1.100:3000", Some(("192.168.1.100", 3000))),
|
||||
("0.0.0.0:80", Some(("0.0.0.0", 80))),
|
||||
("example.com:443", Some(("example.com", 443))),
|
||||
];
|
||||
|
||||
for (input, expected) in test_cases {
|
||||
let result = RustFSConfig::extract_host_port(input);
|
||||
assert_eq!(result, expected, "Failed for input: {}", input);
|
||||
}
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn test_extract_host_port_invalid() {
|
||||
let invalid_cases = vec![
|
||||
"127.0.0.1", // Missing port
|
||||
"127.0.0.1:", // Empty port
|
||||
"127.0.0.1:abc", // Invalid port
|
||||
"127.0.0.1:99999", // Port out of range
|
||||
"", // Empty string
|
||||
"127.0.0.1:9000:extra", // Too many parts
|
||||
"invalid", // No colon
|
||||
];
|
||||
|
||||
for input in invalid_cases {
|
||||
let result = RustFSConfig::extract_host_port(input);
|
||||
assert_eq!(result, None, "Should be None for input: {}", input);
|
||||
}
|
||||
|
||||
// Special case: empty host but valid port should still work
|
||||
let result = RustFSConfig::extract_host_port(":9000");
|
||||
assert_eq!(result, Some(("", 9000)));
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn test_extract_host_port_edge_cases() {
|
||||
// Test edge cases for port numbers
|
||||
assert_eq!(RustFSConfig::extract_host_port("host:0"), Some(("host", 0)));
|
||||
assert_eq!(RustFSConfig::extract_host_port("host:65535"), Some(("host", 65535)));
|
||||
assert_eq!(RustFSConfig::extract_host_port("host:65536"), None); // Out of range
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn test_serialization() {
|
||||
let config = RustFSConfig {
|
||||
address: "127.0.0.1:9000".to_string(),
|
||||
host: "127.0.0.1".to_string(),
|
||||
port: "9000".to_string(),
|
||||
access_key: "admin".to_string(),
|
||||
secret_key: "password".to_string(),
|
||||
domain_name: "test.com".to_string(),
|
||||
volume_name: "/data".to_string(),
|
||||
console_address: "127.0.0.1:9001".to_string(),
|
||||
};
|
||||
|
||||
let json = serde_json::to_string(&config).unwrap();
|
||||
assert!(json.contains("127.0.0.1:9000"));
|
||||
assert!(json.contains("admin"));
|
||||
assert!(json.contains("test.com"));
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn test_deserialization() {
|
||||
let json = r#"{
|
||||
"address": "192.168.1.100:9000",
|
||||
"host": "192.168.1.100",
|
||||
"port": "9000",
|
||||
"access_key": "testuser",
|
||||
"secret_key": "testpass",
|
||||
"domain_name": "example.com",
|
||||
"volume_name": "/opt/data",
|
||||
"console_address": "192.168.1.100:9001"
|
||||
}"#;
|
||||
|
||||
let config: RustFSConfig = serde_json::from_str(json).unwrap();
|
||||
assert_eq!(config.address, "192.168.1.100:9000");
|
||||
assert_eq!(config.host, "192.168.1.100");
|
||||
assert_eq!(config.port, "9000");
|
||||
assert_eq!(config.access_key, "testuser");
|
||||
assert_eq!(config.secret_key, "testpass");
|
||||
assert_eq!(config.domain_name, "example.com");
|
||||
assert_eq!(config.volume_name, "/opt/data");
|
||||
assert_eq!(config.console_address, "192.168.1.100:9001");
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn test_serialization_deserialization_roundtrip() {
|
||||
let original_config = RustFSConfig {
|
||||
address: "10.0.0.1:8080".to_string(),
|
||||
host: "10.0.0.1".to_string(),
|
||||
port: "8080".to_string(),
|
||||
access_key: "roundtrip_user".to_string(),
|
||||
secret_key: "roundtrip_pass".to_string(),
|
||||
domain_name: "roundtrip.test".to_string(),
|
||||
volume_name: "/tmp/roundtrip".to_string(),
|
||||
console_address: "10.0.0.1:8081".to_string(),
|
||||
};
|
||||
|
||||
let json = serde_json::to_string(&original_config).unwrap();
|
||||
let deserialized_config: RustFSConfig = serde_json::from_str(&json).unwrap();
|
||||
|
||||
assert_eq!(original_config, deserialized_config);
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn test_config_ordering() {
|
||||
let config1 = RustFSConfig {
|
||||
address: "127.0.0.1:9000".to_string(),
|
||||
host: "127.0.0.1".to_string(),
|
||||
port: "9000".to_string(),
|
||||
access_key: "admin".to_string(),
|
||||
secret_key: "password".to_string(),
|
||||
domain_name: "test.com".to_string(),
|
||||
volume_name: "/data".to_string(),
|
||||
console_address: "127.0.0.1:9001".to_string(),
|
||||
};
|
||||
|
||||
let config2 = RustFSConfig {
|
||||
address: "127.0.0.1:9000".to_string(),
|
||||
host: "127.0.0.1".to_string(),
|
||||
port: "9000".to_string(),
|
||||
access_key: "admin".to_string(),
|
||||
secret_key: "password".to_string(),
|
||||
domain_name: "test.com".to_string(),
|
||||
volume_name: "/data".to_string(),
|
||||
console_address: "127.0.0.1:9001".to_string(),
|
||||
};
|
||||
|
||||
let config3 = RustFSConfig {
|
||||
address: "127.0.0.1:9001".to_string(), // Different port
|
||||
host: "127.0.0.1".to_string(),
|
||||
port: "9001".to_string(),
|
||||
access_key: "admin".to_string(),
|
||||
secret_key: "password".to_string(),
|
||||
domain_name: "test.com".to_string(),
|
||||
volume_name: "/data".to_string(),
|
||||
console_address: "127.0.0.1:9002".to_string(),
|
||||
};
|
||||
|
||||
assert_eq!(config1, config2);
|
||||
assert_ne!(config1, config3);
|
||||
assert!(config1 < config3); // Lexicographic ordering
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn test_clone() {
|
||||
let original = RustFSConfig::default_config();
|
||||
let cloned = original.clone();
|
||||
|
||||
assert_eq!(original, cloned);
|
||||
assert_eq!(original.address, cloned.address);
|
||||
assert_eq!(original.access_key, cloned.access_key);
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn test_debug_format() {
|
||||
let config = RustFSConfig::default_config();
|
||||
let debug_str = format!("{:?}", config);
|
||||
|
||||
assert!(debug_str.contains("RustFSConfig"));
|
||||
assert!(debug_str.contains("address"));
|
||||
assert!(debug_str.contains("127.0.0.1:9000"));
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn test_constants() {
|
||||
assert_eq!(RustFSConfig::SERVICE_NAME, "rustfs-service");
|
||||
assert_eq!(RustFSConfig::SERVICE_KEY, "rustfs_key");
|
||||
assert_eq!(RustFSConfig::DEFAULT_DOMAIN_NAME_VALUE, "demo.rustfs.com");
|
||||
assert_eq!(RustFSConfig::DEFAULT_ADDRESS_VALUE, "127.0.0.1:9000");
|
||||
assert_eq!(RustFSConfig::DEFAULT_PORT_VALUE, "9000");
|
||||
assert_eq!(RustFSConfig::DEFAULT_HOST_VALUE, "127.0.0.1");
|
||||
assert_eq!(RustFSConfig::DEFAULT_ACCESS_KEY_VALUE, "rustfsadmin");
|
||||
assert_eq!(RustFSConfig::DEFAULT_SECRET_KEY_VALUE, "rustfsadmin");
|
||||
assert_eq!(RustFSConfig::DEFAULT_CONSOLE_ADDRESS_VALUE, "127.0.0.1:9001");
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn test_empty_strings() {
|
||||
let config = RustFSConfig {
|
||||
address: "".to_string(),
|
||||
host: "".to_string(),
|
||||
port: "".to_string(),
|
||||
access_key: "".to_string(),
|
||||
secret_key: "".to_string(),
|
||||
domain_name: "".to_string(),
|
||||
volume_name: "".to_string(),
|
||||
console_address: "".to_string(),
|
||||
};
|
||||
|
||||
assert!(config.address.is_empty());
|
||||
assert!(config.host.is_empty());
|
||||
assert!(config.port.is_empty());
|
||||
assert!(config.access_key.is_empty());
|
||||
assert!(config.secret_key.is_empty());
|
||||
assert!(config.domain_name.is_empty());
|
||||
assert!(config.volume_name.is_empty());
|
||||
assert!(config.console_address.is_empty());
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn test_very_long_strings() {
|
||||
let long_string = "a".repeat(1000);
|
||||
let config = RustFSConfig {
|
||||
address: format!("{}:9000", long_string),
|
||||
host: long_string.clone(),
|
||||
port: "9000".to_string(),
|
||||
access_key: long_string.clone(),
|
||||
secret_key: long_string.clone(),
|
||||
domain_name: format!("{}.com", long_string),
|
||||
volume_name: format!("/data/{}", long_string),
|
||||
console_address: format!("{}:9001", long_string),
|
||||
};
|
||||
|
||||
assert_eq!(config.host.len(), 1000);
|
||||
assert_eq!(config.access_key.len(), 1000);
|
||||
assert_eq!(config.secret_key.len(), 1000);
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn test_special_characters() {
|
||||
let config = RustFSConfig {
|
||||
address: "127.0.0.1:9000".to_string(),
|
||||
host: "127.0.0.1".to_string(),
|
||||
port: "9000".to_string(),
|
||||
access_key: "user@domain.com".to_string(),
|
||||
secret_key: "p@ssw0rd!#$%".to_string(),
|
||||
domain_name: "test-domain.example.com".to_string(),
|
||||
volume_name: "/data/rust-fs/storage".to_string(),
|
||||
console_address: "127.0.0.1:9001".to_string(),
|
||||
};
|
||||
|
||||
assert!(config.access_key.contains("@"));
|
||||
assert!(config.secret_key.contains("!#$%"));
|
||||
assert!(config.domain_name.contains("-"));
|
||||
assert!(config.volume_name.contains("/"));
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn test_unicode_strings() {
|
||||
let config = RustFSConfig {
|
||||
address: "127.0.0.1:9000".to_string(),
|
||||
host: "127.0.0.1".to_string(),
|
||||
port: "9000".to_string(),
|
||||
access_key: "用户名".to_string(),
|
||||
secret_key: "密码123".to_string(),
|
||||
domain_name: "测试.com".to_string(),
|
||||
volume_name: "/数据/存储".to_string(),
|
||||
console_address: "127.0.0.1:9001".to_string(),
|
||||
};
|
||||
|
||||
assert_eq!(config.access_key, "用户名");
|
||||
assert_eq!(config.secret_key, "密码123");
|
||||
assert_eq!(config.domain_name, "测试.com");
|
||||
assert_eq!(config.volume_name, "/数据/存储");
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn test_memory_efficiency() {
|
||||
// Test that the structure doesn't use excessive memory
|
||||
assert!(std::mem::size_of::<RustFSConfig>() < 1000);
|
||||
}
|
||||
|
||||
// Note: Keyring-related tests (load, save, clear) are not included here
|
||||
// because they require actual keyring access and would be integration tests
|
||||
// rather than unit tests. They should be tested separately in an integration
|
||||
// test environment where keyring access can be properly mocked or controlled.
|
||||
}
|
||||
887
cli/rustfs-gui/src/utils/helper.rs
Normal file
887
cli/rustfs-gui/src/utils/helper.rs
Normal file
@@ -0,0 +1,887 @@
|
||||
use crate::utils::RustFSConfig;
|
||||
use dioxus::logger::tracing::{debug, error, info};
|
||||
use lazy_static::lazy_static;
|
||||
use rust_embed::RustEmbed;
|
||||
use sha2::{Digest, Sha256};
|
||||
use std::error::Error;
|
||||
use std::path::{Path, PathBuf};
|
||||
use std::process::Command as StdCommand;
|
||||
use std::time::Duration;
|
||||
use tokio::fs;
|
||||
use tokio::fs::File;
|
||||
use tokio::io::AsyncWriteExt;
|
||||
use tokio::net::TcpStream;
|
||||
use tokio::sync::{mpsc, Mutex};
|
||||
|
||||
#[derive(RustEmbed)]
|
||||
#[folder = "$CARGO_MANIFEST_DIR/embedded-rustfs/"]
|
||||
struct Asset;
|
||||
|
||||
// Use `lazy_static` to cache the checksum of embedded resources
|
||||
lazy_static! {
|
||||
static ref RUSTFS_HASH: Mutex<String> = {
|
||||
let rustfs_file = if cfg!(windows) { "rustfs.exe" } else { "rustfs" };
|
||||
let rustfs_data = Asset::get(rustfs_file).expect("RustFs binary not embedded");
|
||||
let hash = hex::encode(Sha256::digest(&rustfs_data.data));
|
||||
Mutex::new(hash)
|
||||
};
|
||||
}
|
||||
|
||||
/// Service command
|
||||
/// This enum represents the commands that can be sent to the service manager
|
||||
/// to start, stop, or restart the service
|
||||
/// The `Start` variant contains the configuration for the service
|
||||
/// The `Restart` variant contains the configuration for the service
|
||||
///
|
||||
/// # Example
|
||||
/// ```
|
||||
/// let config = RustFSConfig {
|
||||
/// address: "127.0.0.1:9000".to_string(),
|
||||
/// host: "127.0.0.1".to_string(),
|
||||
/// port: "9000".to_string(),
|
||||
/// access_key: "rustfsadmin".to_string(),
|
||||
/// secret_key: "rustfsadmin".to_string(),
|
||||
/// domain_name: "demo.rustfs.com".to_string(),
|
||||
/// volume_name: "data".to_string(),
|
||||
/// console_address: "127.0.0.1:9001".to_string(),
|
||||
/// };
|
||||
///
|
||||
/// let command = ServiceCommand::Start(config);
|
||||
/// println!("{:?}", command);
|
||||
///
|
||||
/// assert_eq!(command, ServiceCommand::Start(config));
|
||||
/// ```
|
||||
pub enum ServiceCommand {
|
||||
Start(RustFSConfig),
|
||||
Stop,
|
||||
Restart(RustFSConfig),
|
||||
}
|
||||
|
||||
/// Service operation result
|
||||
/// This struct represents the result of a service operation
|
||||
/// It contains information about the success of the operation,
|
||||
///
|
||||
/// # Example
|
||||
/// ```
|
||||
/// use chrono::Local;
|
||||
///
|
||||
/// let result = ServiceOperationResult {
|
||||
/// success: true,
|
||||
/// start_time: chrono::Local::now(),
|
||||
/// end_time: chrono::Local::now(),
|
||||
/// message: "服务启动成功".to_string(),
|
||||
/// };
|
||||
///
|
||||
/// println!("{:?}", result);
|
||||
/// assert_eq!(result.success, true);
|
||||
/// ```
|
||||
#[derive(Debug)]
|
||||
pub struct ServiceOperationResult {
|
||||
pub success: bool,
|
||||
pub start_time: chrono::DateTime<chrono::Local>,
|
||||
pub end_time: chrono::DateTime<chrono::Local>,
|
||||
pub message: String,
|
||||
}
|
||||
|
||||
/// Service manager
|
||||
/// This struct represents a service manager that can be used to start, stop, or restart a service
|
||||
/// It contains a command sender that can be used to send commands to the service manager
|
||||
///
|
||||
/// # Example
|
||||
/// ```
|
||||
/// let service_manager = ServiceManager::new();
|
||||
/// println!("{:?}", service_manager);
|
||||
/// ```
|
||||
#[derive(Debug, Clone)]
|
||||
pub struct ServiceManager {
|
||||
command_tx: mpsc::Sender<ServiceCommand>,
|
||||
// process: Arc<Mutex<Option<Child>>>,
|
||||
// pid: Arc<Mutex<Option<u32>>>, // Add PID storage
|
||||
// current_config: Arc<Mutex<Option<RustFSConfig>>>, // Add configuration storage
|
||||
}
|
||||
|
||||
impl ServiceManager {
|
||||
/// check if the service is running and return a pid
|
||||
/// This function is platform dependent
|
||||
/// On Unix systems, it uses the `ps` command to check for the service
|
||||
/// On Windows systems, it uses the `wmic` command to check for the service
|
||||
///
|
||||
/// # Example
|
||||
/// ```
|
||||
/// let pid = check_service_status().await;
|
||||
/// println!("{:?}", pid);
|
||||
/// ```
|
||||
pub async fn check_service_status() -> Option<u32> {
|
||||
#[cfg(unix)]
|
||||
{
|
||||
// use the ps command on a unix system
|
||||
if let Ok(output) = StdCommand::new("ps").arg("-ef").output() {
|
||||
let output_str = String::from_utf8_lossy(&output.stdout);
|
||||
for line in output_str.lines() {
|
||||
// match contains `rustfs/bin/rustfs` of the line
|
||||
if line.contains("rustfs/bin/rustfs") && !line.contains("grep") {
|
||||
if let Some(pid_str) = line.split_whitespace().nth(1) {
|
||||
if let Ok(pid) = pid_str.parse::<u32>() {
|
||||
return Some(pid);
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
#[cfg(windows)]
|
||||
{
|
||||
if let Ok(output) = StdCommand::new("wmic")
|
||||
.arg("process")
|
||||
.arg("where")
|
||||
.arg("caption='rustfs.exe'")
|
||||
.arg("get")
|
||||
.arg("processid")
|
||||
.output()
|
||||
{
|
||||
let output_str = String::from_utf8_lossy(&output.stdout);
|
||||
for line in output_str.lines() {
|
||||
if let Ok(pid) = line.trim().parse::<u32>() {
|
||||
return Some(pid);
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
None
|
||||
}
|
||||
|
||||
/// Prepare the service
|
||||
/// This function downloads the service executable if it doesn't exist
|
||||
/// It also creates the necessary directories for the service
|
||||
///
|
||||
/// # Example
|
||||
/// ```
|
||||
/// let executable_path = prepare_service().await;
|
||||
/// println!("{:?}", executable_path);
|
||||
/// ```
|
||||
async fn prepare_service() -> Result<PathBuf, Box<dyn Error>> {
|
||||
// get the user directory
|
||||
let home_dir = dirs::home_dir().ok_or("无法获取用户目录")?;
|
||||
let rustfs_dir = home_dir.join("rustfs");
|
||||
let bin_dir = rustfs_dir.join("bin");
|
||||
let data_dir = rustfs_dir.join("data");
|
||||
let logs_dir = rustfs_dir.join("logs");
|
||||
|
||||
// create the necessary directories
|
||||
for dir in [&bin_dir, &data_dir, &logs_dir] {
|
||||
if !dir.exists() {
|
||||
tokio::fs::create_dir_all(dir).await?;
|
||||
}
|
||||
}
|
||||
|
||||
let rustfs_file = if cfg!(windows) { "rustfs.exe" } else { "rustfs" };
|
||||
let executable_path = bin_dir.join(rustfs_file);
|
||||
let hash_path = bin_dir.join("embedded_rustfs.sha256");
|
||||
|
||||
if executable_path.exists() && hash_path.exists() {
|
||||
let cached_hash = fs::read_to_string(&hash_path).await?;
|
||||
let expected_hash = RUSTFS_HASH.lock().await;
|
||||
if cached_hash == *expected_hash {
|
||||
println!("Use cached rustfs: {:?}", executable_path);
|
||||
return Ok(executable_path);
|
||||
}
|
||||
}
|
||||
|
||||
// Extract and write files
|
||||
let rustfs_data = Asset::get(rustfs_file).expect("RustFS binary not embedded");
|
||||
let mut file = File::create(&executable_path).await?;
|
||||
file.write_all(&rustfs_data.data).await?;
|
||||
let expected_hash = hex::encode(Sha256::digest(&rustfs_data.data));
|
||||
fs::write(&hash_path, expected_hash).await?;
|
||||
|
||||
// set execution permissions on unix systems
|
||||
#[cfg(unix)]
|
||||
{
|
||||
use std::os::unix::fs::PermissionsExt;
|
||||
let mut perms = std::fs::metadata(&executable_path)?.permissions();
|
||||
perms.set_mode(0o755);
|
||||
std::fs::set_permissions(&executable_path, perms)?;
|
||||
}
|
||||
|
||||
Ok(executable_path)
|
||||
}
|
||||
|
||||
/// Helper function: Extracts the port from the address string
|
||||
///
|
||||
/// # Example
|
||||
/// ```
|
||||
/// let address = "127.0.0.1:9000";
|
||||
/// let port = extract_port(address);
|
||||
/// println!("{:?}", port);
|
||||
/// ```
|
||||
fn extract_port(address: &str) -> Option<u16> {
|
||||
address.split(':').nth(1)?.parse().ok()
|
||||
}
|
||||
|
||||
/// Create a new instance of the service manager
|
||||
///
|
||||
/// # Example
|
||||
/// ```
|
||||
/// let service_manager = ServiceManager::new();
|
||||
/// println!("{:?}", service_manager);
|
||||
/// ```
|
||||
pub(crate) fn new() -> Self {
|
||||
let (command_tx, mut command_rx) = mpsc::channel(10);
|
||||
// Start the control loop
|
||||
tokio::spawn(async move {
|
||||
while let Some(cmd) = command_rx.recv().await {
|
||||
match cmd {
|
||||
ServiceCommand::Start(config) => {
|
||||
if let Err(e) = Self::start_service(&config).await {
|
||||
Self::show_error(&format!("启动服务失败:{}", e));
|
||||
}
|
||||
}
|
||||
ServiceCommand::Stop => {
|
||||
if let Err(e) = Self::stop_service().await {
|
||||
Self::show_error(&format!("停止服务失败:{}", e));
|
||||
}
|
||||
}
|
||||
ServiceCommand::Restart(config) => {
|
||||
if Self::check_service_status().await.is_some() {
|
||||
if let Err(e) = Self::stop_service().await {
|
||||
Self::show_error(&format!("重启服务失败:{}", e));
|
||||
continue;
|
||||
}
|
||||
}
|
||||
if let Err(e) = Self::start_service(&config).await {
|
||||
Self::show_error(&format!("重启服务失败:{}", e));
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
});
|
||||
|
||||
ServiceManager { command_tx }
|
||||
}
|
||||
|
||||
/// Start the service
|
||||
/// This function starts the service with the given configuration
|
||||
///
|
||||
/// # Example
|
||||
/// ```
|
||||
/// let config = RustFSConfig {
|
||||
/// address: "127.0.0.1:9000".to_string(),
|
||||
/// host: "127.0.0.1".to_string(),
|
||||
/// port: "9000".to_string(),
|
||||
/// access_key: "rustfsadmin".to_string(),
|
||||
/// secret_key: "rustfsadmin".to_string(),
|
||||
/// domain_name: "demo.rustfs.com".to_string(),
|
||||
/// volume_name: "data".to_string(),
|
||||
/// console_address: "127.0.0.1:9001".to_string(),
|
||||
/// };
|
||||
///
|
||||
/// let result = start_service(&config).await;
|
||||
/// println!("{:?}", result);
|
||||
/// ```
|
||||
async fn start_service(config: &RustFSConfig) -> Result<(), Box<dyn Error>> {
|
||||
// Check if the service is already running
|
||||
if let Some(existing_pid) = Self::check_service_status().await {
|
||||
return Err(format!("服务已经在运行,PID: {}", existing_pid).into());
|
||||
}
|
||||
|
||||
// Prepare the service program
|
||||
let executable_path = Self::prepare_service().await?;
|
||||
// Check the data catalog
|
||||
let volume_name_path = Path::new(&config.volume_name);
|
||||
if !volume_name_path.exists() {
|
||||
tokio::fs::create_dir_all(&config.volume_name).await?;
|
||||
}
|
||||
|
||||
// Extract the port from the configuration
|
||||
let main_port = Self::extract_port(&config.address).ok_or("无法解析主服务端口")?;
|
||||
let console_port = Self::extract_port(&config.console_address).ok_or("无法解析控制台端口")?;
|
||||
|
||||
let host = config.address.split(':').next().ok_or("无法解析主机地址")?;
|
||||
|
||||
// Check the port
|
||||
let ports = vec![main_port, console_port];
|
||||
for port in ports {
|
||||
if Self::is_port_in_use(host, port).await {
|
||||
return Err(format!("端口 {} 已被占用", port).into());
|
||||
}
|
||||
}
|
||||
|
||||
// Start the service
|
||||
let mut child = tokio::process::Command::new(executable_path)
|
||||
.arg("--address")
|
||||
.arg(&config.address)
|
||||
.arg("--access-key")
|
||||
.arg(&config.access_key)
|
||||
.arg("--secret-key")
|
||||
.arg(&config.secret_key)
|
||||
.arg("--console-address")
|
||||
.arg(&config.console_address)
|
||||
.arg(config.volume_name.clone())
|
||||
.spawn()?;
|
||||
|
||||
let process_pid = child.id().unwrap();
|
||||
// Wait for the service to start
|
||||
tokio::time::sleep(Duration::from_secs(2)).await;
|
||||
|
||||
// Check if the service started successfully
|
||||
if Self::is_port_in_use(host, main_port).await {
|
||||
Self::show_info(&format!("服务启动成功!进程 ID: {}", process_pid));
|
||||
|
||||
Ok(())
|
||||
} else {
|
||||
child.kill().await?;
|
||||
Err("服务启动失败".into())
|
||||
}
|
||||
}
|
||||
|
||||
/// Stop the service
|
||||
/// This function stops the service
|
||||
///
|
||||
/// # Example
|
||||
/// ```
|
||||
/// let result = stop_service().await;
|
||||
/// println!("{:?}", result);
|
||||
/// ```
|
||||
async fn stop_service() -> Result<(), Box<dyn Error>> {
|
||||
let existing_pid = Self::check_service_status().await;
|
||||
debug!("existing_pid: {:?}", existing_pid);
|
||||
if let Some(service_pid) = existing_pid {
|
||||
// An attempt was made to terminate the process
|
||||
#[cfg(unix)]
|
||||
{
|
||||
StdCommand::new("kill").arg("-9").arg(service_pid.to_string()).output()?;
|
||||
}
|
||||
|
||||
#[cfg(windows)]
|
||||
{
|
||||
StdCommand::new("taskkill")
|
||||
.arg("/F")
|
||||
.arg("/PID")
|
||||
.arg(&service_pid.to_string())
|
||||
.output()?;
|
||||
}
|
||||
|
||||
// Verify that the service is indeed stopped
|
||||
tokio::time::sleep(Duration::from_secs(1)).await;
|
||||
if Self::check_service_status().await.is_some() {
|
||||
return Err("服务停止失败".into());
|
||||
}
|
||||
Self::show_info("服务已成功停止");
|
||||
|
||||
Ok(())
|
||||
} else {
|
||||
Err("服务未运行".into())
|
||||
}
|
||||
}
|
||||
|
||||
/// Check if the port is in use
|
||||
/// This function checks if the given port is in use on the given host
|
||||
///
|
||||
/// # Example
|
||||
/// ```
|
||||
/// let host = "127.0.0.1";
|
||||
/// let port = 9000;
|
||||
/// let result = is_port_in_use(host, port).await;
|
||||
/// println!("{:?}", result);
|
||||
/// ```
|
||||
async fn is_port_in_use(host: &str, port: u16) -> bool {
|
||||
TcpStream::connect(format!("{}:{}", host, port)).await.is_ok()
|
||||
}
|
||||
|
||||
/// Show an error message
|
||||
/// This function shows an error message dialog
|
||||
///
|
||||
/// # Example
|
||||
/// ```
|
||||
/// show_error("This is an error message");
|
||||
/// ```
|
||||
pub(crate) fn show_error(message: &str) {
|
||||
rfd::MessageDialog::new()
|
||||
.set_title("错误")
|
||||
.set_description(message)
|
||||
.set_level(rfd::MessageLevel::Error)
|
||||
.show();
|
||||
}
|
||||
|
||||
/// Show an information message
|
||||
/// This function shows an information message dialog
|
||||
///
|
||||
/// # Example
|
||||
/// ```
|
||||
/// show_info("This is an information message");
|
||||
/// ```
|
||||
pub(crate) fn show_info(message: &str) {
|
||||
rfd::MessageDialog::new()
|
||||
.set_title("成功")
|
||||
.set_description(message)
|
||||
.set_level(rfd::MessageLevel::Info)
|
||||
.show();
|
||||
}
|
||||
|
||||
/// Start the service
|
||||
/// This function sends a `Start` command to the service manager
|
||||
///
|
||||
/// # Example
|
||||
/// ```
|
||||
/// let config = RustFSConfig {
|
||||
/// address: "127.0.0.1:9000".to_string(),
|
||||
/// host: "127.0.0.1".to_string(),
|
||||
/// port: "9000".to_string(),
|
||||
/// access_key: "rustfsadmin".to_string(),
|
||||
/// secret_key: "rustfsadmin".to_string(),
|
||||
/// domain_name: "demo.rustfs.com".to_string(),
|
||||
/// volume_name: "data".to_string(),
|
||||
/// console_address: "127.0.0.1:9001".to_string(),
|
||||
/// };
|
||||
///
|
||||
/// let service_manager = ServiceManager::new();
|
||||
/// let result = service_manager.start(config).await;
|
||||
/// println!("{:?}", result);
|
||||
/// ```
|
||||
///
|
||||
/// # Errors
|
||||
/// This function returns an error if the service fails to start
|
||||
///
|
||||
/// # Panics
|
||||
/// This function panics if the port number is invalid
|
||||
///
|
||||
/// # Safety
|
||||
/// This function is not marked as unsafe
|
||||
///
|
||||
/// # Performance
|
||||
/// This function is not optimized for performance
|
||||
///
|
||||
/// # Design
|
||||
/// This function is designed to be simple and easy to use
|
||||
///
|
||||
/// # Security
|
||||
/// This function does not have any security implications
|
||||
pub async fn start(&self, config: RustFSConfig) -> Result<ServiceOperationResult, Box<dyn Error>> {
|
||||
let start_time = chrono::Local::now();
|
||||
self.command_tx.send(ServiceCommand::Start(config.clone())).await?;
|
||||
|
||||
let host = &config.host;
|
||||
let port = config.port.parse::<u16>().expect("无效的端口号");
|
||||
// wait for the service to actually start
|
||||
let mut retries = 0;
|
||||
while retries < 30 {
|
||||
// wait up to 30 seconds
|
||||
if Self::check_service_status().await.is_some() && Self::is_port_in_use(host, port).await {
|
||||
let end_time = chrono::Local::now();
|
||||
return Ok(ServiceOperationResult {
|
||||
success: true,
|
||||
start_time,
|
||||
end_time,
|
||||
message: "服务启动成功".to_string(),
|
||||
});
|
||||
}
|
||||
tokio::time::sleep(Duration::from_secs(1)).await;
|
||||
retries += 1;
|
||||
}
|
||||
|
||||
Err("服务启动超时".into())
|
||||
}
|
||||
|
||||
/// Stop the service
|
||||
/// This function sends a `Stop` command to the service manager
|
||||
///
|
||||
/// # Example
|
||||
/// ```
|
||||
/// let service_manager = ServiceManager::new();
|
||||
/// let result = service_manager.stop().await;
|
||||
/// println!("{:?}", result);
|
||||
/// ```
|
||||
///
|
||||
/// # Errors
|
||||
/// This function returns an error if the service fails to stop
|
||||
///
|
||||
/// # Panics
|
||||
/// This function panics if the port number is invalid
|
||||
///
|
||||
/// # Safety
|
||||
/// This function is not marked as unsafe
|
||||
///
|
||||
/// # Performance
|
||||
/// This function is not optimized for performance
|
||||
///
|
||||
/// # Design
|
||||
/// This function is designed to be simple and easy to use
|
||||
///
|
||||
/// # Security
|
||||
/// This function does not have any security implications
|
||||
pub async fn stop(&self) -> Result<ServiceOperationResult, Box<dyn Error>> {
|
||||
let start_time = chrono::Local::now();
|
||||
self.command_tx.send(ServiceCommand::Stop).await?;
|
||||
|
||||
// Wait for the service to actually stop
|
||||
let mut retries = 0;
|
||||
while retries < 15 {
|
||||
// Wait up to 15 seconds
|
||||
if Self::check_service_status().await.is_none() {
|
||||
let end_time = chrono::Local::now();
|
||||
return Ok(ServiceOperationResult {
|
||||
success: true,
|
||||
start_time,
|
||||
end_time,
|
||||
message: "服务停止成功".to_string(),
|
||||
});
|
||||
}
|
||||
tokio::time::sleep(Duration::from_secs(1)).await;
|
||||
retries += 1;
|
||||
}
|
||||
|
||||
Err("服务停止超时".into())
|
||||
}
|
||||
|
||||
/// Restart the service
|
||||
/// This function sends a `Restart` command to the service manager
|
||||
///
|
||||
/// # Example
|
||||
/// ```
|
||||
/// let config = RustFSConfig {
|
||||
/// address: "127.0.0.1:9000".to_string(),
|
||||
/// host: "127.0.0.1".to_string(),
|
||||
/// port: "9000".to_string(),
|
||||
/// access_key: "rustfsadmin".to_string(),
|
||||
/// secret_key: "rustfsadmin".to_string(),
|
||||
/// domain_name: "demo.rustfs.com".to_string(),
|
||||
/// volume_name: "data".to_string(),
|
||||
/// console_address: "127.0.0.1:9001".to_string(),
|
||||
/// };
|
||||
///
|
||||
/// let service_manager = ServiceManager::new();
|
||||
/// let result = service_manager.restart(config).await;
|
||||
/// println!("{:?}", result);
|
||||
/// ```
|
||||
///
|
||||
/// # Errors
|
||||
/// This function returns an error if the service fails to restart
|
||||
///
|
||||
/// # Panics
|
||||
/// This function panics if the port number is invalid
|
||||
///
|
||||
/// # Safety
|
||||
/// This function is not marked as unsafe
|
||||
///
|
||||
/// # Performance
|
||||
/// This function is not optimized for performance
|
||||
///
|
||||
/// # Design
|
||||
/// This function is designed to be simple and easy to use
|
||||
///
|
||||
/// # Security
|
||||
/// This function does not have any security implications
|
||||
pub async fn restart(&self, config: RustFSConfig) -> Result<ServiceOperationResult, Box<dyn Error>> {
|
||||
let start_time = chrono::Local::now();
|
||||
self.command_tx.send(ServiceCommand::Restart(config.clone())).await?;
|
||||
|
||||
let host = &config.host;
|
||||
let port = config.port.parse::<u16>().expect("无效的端口号");
|
||||
|
||||
// wait for the service to restart
|
||||
let mut retries = 0;
|
||||
while retries < 45 {
|
||||
// Longer waiting time is given as both the stop and start processes are involved
|
||||
if Self::check_service_status().await.is_some() && Self::is_port_in_use(host, port).await {
|
||||
match config.save() {
|
||||
Ok(_) => info!("save config success"),
|
||||
Err(e) => {
|
||||
error!("save config error: {}", e);
|
||||
self.command_tx.send(ServiceCommand::Stop).await?;
|
||||
Self::show_error("保存配置失败");
|
||||
return Err("保存配置失败".into());
|
||||
}
|
||||
}
|
||||
let end_time = chrono::Local::now();
|
||||
return Ok(ServiceOperationResult {
|
||||
success: true,
|
||||
start_time,
|
||||
end_time,
|
||||
message: "服务重启成功".to_string(),
|
||||
});
|
||||
}
|
||||
tokio::time::sleep(Duration::from_secs(1)).await;
|
||||
retries += 1;
|
||||
}
|
||||
Err("服务重启超时".into())
|
||||
}
|
||||
}
|
||||
|
||||
#[cfg(test)]
|
||||
mod tests {
|
||||
use super::*;
|
||||
use std::time::Duration;
|
||||
|
||||
#[test]
|
||||
fn test_service_command_creation() {
|
||||
let config = RustFSConfig::default_config();
|
||||
|
||||
let start_cmd = ServiceCommand::Start(config.clone());
|
||||
let stop_cmd = ServiceCommand::Stop;
|
||||
let restart_cmd = ServiceCommand::Restart(config);
|
||||
|
||||
// Test that commands can be created
|
||||
match start_cmd {
|
||||
ServiceCommand::Start(_) => {}
|
||||
_ => panic!("Expected Start command"),
|
||||
}
|
||||
|
||||
match stop_cmd {
|
||||
ServiceCommand::Stop => {}
|
||||
_ => panic!("Expected Stop command"),
|
||||
}
|
||||
|
||||
match restart_cmd {
|
||||
ServiceCommand::Restart(_) => {}
|
||||
_ => panic!("Expected Restart command"),
|
||||
}
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn test_service_operation_result_creation() {
|
||||
let start_time = chrono::Local::now();
|
||||
let end_time = chrono::Local::now();
|
||||
|
||||
let success_result = ServiceOperationResult {
|
||||
success: true,
|
||||
start_time,
|
||||
end_time,
|
||||
message: "Operation successful".to_string(),
|
||||
};
|
||||
|
||||
let failure_result = ServiceOperationResult {
|
||||
success: false,
|
||||
start_time,
|
||||
end_time,
|
||||
message: "Operation failed".to_string(),
|
||||
};
|
||||
|
||||
assert!(success_result.success);
|
||||
assert_eq!(success_result.message, "Operation successful");
|
||||
|
||||
assert!(!failure_result.success);
|
||||
assert_eq!(failure_result.message, "Operation failed");
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn test_service_operation_result_debug() {
|
||||
let result = ServiceOperationResult {
|
||||
success: true,
|
||||
start_time: chrono::Local::now(),
|
||||
end_time: chrono::Local::now(),
|
||||
message: "Test message".to_string(),
|
||||
};
|
||||
|
||||
let debug_str = format!("{:?}", result);
|
||||
assert!(debug_str.contains("ServiceOperationResult"));
|
||||
assert!(debug_str.contains("success: true"));
|
||||
assert!(debug_str.contains("Test message"));
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn test_service_manager_creation() {
|
||||
// Test ServiceManager creation in a tokio runtime
|
||||
let rt = tokio::runtime::Runtime::new().unwrap();
|
||||
rt.block_on(async {
|
||||
let service_manager = ServiceManager::new();
|
||||
|
||||
// Test that ServiceManager can be created and cloned
|
||||
let cloned_manager = service_manager.clone();
|
||||
|
||||
// Both should be valid (we can't test much more without async runtime)
|
||||
assert!(format!("{:?}", service_manager).contains("ServiceManager"));
|
||||
assert!(format!("{:?}", cloned_manager).contains("ServiceManager"));
|
||||
});
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn test_extract_port_valid() {
|
||||
let test_cases = vec![
|
||||
("127.0.0.1:9000", Some(9000)),
|
||||
("localhost:8080", Some(8080)),
|
||||
("192.168.1.100:3000", Some(3000)),
|
||||
("0.0.0.0:80", Some(80)),
|
||||
("example.com:443", Some(443)),
|
||||
("host:65535", Some(65535)),
|
||||
("host:1", Some(1)),
|
||||
];
|
||||
|
||||
for (input, expected) in test_cases {
|
||||
let result = ServiceManager::extract_port(input);
|
||||
assert_eq!(result, expected, "Failed for input: {}", input);
|
||||
}
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn test_extract_port_invalid() {
|
||||
let invalid_cases = vec![
|
||||
"127.0.0.1", // Missing port
|
||||
"127.0.0.1:", // Empty port
|
||||
"127.0.0.1:abc", // Invalid port
|
||||
"127.0.0.1:99999", // Port out of range
|
||||
"", // Empty string
|
||||
"invalid", // No colon
|
||||
"host:-1", // Negative port
|
||||
"host:0.5", // Decimal port
|
||||
];
|
||||
|
||||
for input in invalid_cases {
|
||||
let result = ServiceManager::extract_port(input);
|
||||
assert_eq!(result, None, "Should be None for input: {}", input);
|
||||
}
|
||||
|
||||
// Special case: empty host but valid port should still work
|
||||
assert_eq!(ServiceManager::extract_port(":9000"), Some(9000));
|
||||
|
||||
// Special case: multiple colons - extract_port takes the second part
|
||||
// For "127.0.0.1:9000:extra", it takes "9000" which is valid
|
||||
assert_eq!(ServiceManager::extract_port("127.0.0.1:9000:extra"), Some(9000));
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn test_extract_port_edge_cases() {
|
||||
// Test edge cases for port numbers
|
||||
assert_eq!(ServiceManager::extract_port("host:0"), Some(0));
|
||||
assert_eq!(ServiceManager::extract_port("host:65535"), Some(65535));
|
||||
assert_eq!(ServiceManager::extract_port("host:65536"), None); // Out of range
|
||||
// IPv6-like address - extract_port takes the second part after split(':')
|
||||
// For "::1:8080", split(':') gives ["", "", "1", "8080"], nth(1) gives ""
|
||||
assert_eq!(ServiceManager::extract_port("::1:8080"), None); // Second part is empty
|
||||
// For "[::1]:8080", split(':') gives ["[", "", "1]", "8080"], nth(1) gives ""
|
||||
assert_eq!(ServiceManager::extract_port("[::1]:8080"), None); // Second part is empty
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn test_show_error() {
|
||||
// Test that show_error function exists and can be called
|
||||
// We can't actually test the dialog in a test environment
|
||||
// so we just verify the function signature
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn test_show_info() {
|
||||
// Test that show_info function exists and can be called
|
||||
// We can't actually test the dialog in a test environment
|
||||
// so we just verify the function signature
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn test_service_operation_result_timing() {
|
||||
let start_time = chrono::Local::now();
|
||||
std::thread::sleep(Duration::from_millis(10)); // Small delay
|
||||
let end_time = chrono::Local::now();
|
||||
|
||||
let result = ServiceOperationResult {
|
||||
success: true,
|
||||
start_time,
|
||||
end_time,
|
||||
message: "Timing test".to_string(),
|
||||
};
|
||||
|
||||
// End time should be after start time
|
||||
assert!(result.end_time >= result.start_time);
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn test_service_operation_result_with_unicode() {
|
||||
let result = ServiceOperationResult {
|
||||
success: true,
|
||||
start_time: chrono::Local::now(),
|
||||
end_time: chrono::Local::now(),
|
||||
message: "操作成功 🎉".to_string(),
|
||||
};
|
||||
|
||||
assert_eq!(result.message, "操作成功 🎉");
|
||||
assert!(result.success);
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn test_service_operation_result_with_long_message() {
|
||||
let long_message = "A".repeat(10000);
|
||||
let result = ServiceOperationResult {
|
||||
success: false,
|
||||
start_time: chrono::Local::now(),
|
||||
end_time: chrono::Local::now(),
|
||||
message: long_message.clone(),
|
||||
};
|
||||
|
||||
assert_eq!(result.message.len(), 10000);
|
||||
assert_eq!(result.message, long_message);
|
||||
assert!(!result.success);
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn test_service_command_with_different_configs() {
|
||||
let config1 = RustFSConfig {
|
||||
address: "127.0.0.1:9000".to_string(),
|
||||
host: "127.0.0.1".to_string(),
|
||||
port: "9000".to_string(),
|
||||
access_key: "admin1".to_string(),
|
||||
secret_key: "pass1".to_string(),
|
||||
domain_name: "test1.com".to_string(),
|
||||
volume_name: "/data1".to_string(),
|
||||
console_address: "127.0.0.1:9001".to_string(),
|
||||
};
|
||||
|
||||
let config2 = RustFSConfig {
|
||||
address: "192.168.1.100:8080".to_string(),
|
||||
host: "192.168.1.100".to_string(),
|
||||
port: "8080".to_string(),
|
||||
access_key: "admin2".to_string(),
|
||||
secret_key: "pass2".to_string(),
|
||||
domain_name: "test2.com".to_string(),
|
||||
volume_name: "/data2".to_string(),
|
||||
console_address: "192.168.1.100:8081".to_string(),
|
||||
};
|
||||
|
||||
let start_cmd1 = ServiceCommand::Start(config1);
|
||||
let restart_cmd2 = ServiceCommand::Restart(config2);
|
||||
|
||||
// Test that different configs can be used
|
||||
match start_cmd1 {
|
||||
ServiceCommand::Start(config) => {
|
||||
assert_eq!(config.address, "127.0.0.1:9000");
|
||||
assert_eq!(config.access_key, "admin1");
|
||||
}
|
||||
_ => panic!("Expected Start command"),
|
||||
}
|
||||
|
||||
match restart_cmd2 {
|
||||
ServiceCommand::Restart(config) => {
|
||||
assert_eq!(config.address, "192.168.1.100:8080");
|
||||
assert_eq!(config.access_key, "admin2");
|
||||
}
|
||||
_ => panic!("Expected Restart command"),
|
||||
}
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn test_memory_efficiency() {
|
||||
// Test that structures don't use excessive memory
|
||||
assert!(std::mem::size_of::<ServiceCommand>() < 2000);
|
||||
assert!(std::mem::size_of::<ServiceOperationResult>() < 1000);
|
||||
assert!(std::mem::size_of::<ServiceManager>() < 1000);
|
||||
}
|
||||
|
||||
// Note: The following methods are not tested here because they require:
|
||||
// - Async runtime (tokio)
|
||||
// - File system access
|
||||
// - Network access
|
||||
// - Process management
|
||||
// - External dependencies (embedded assets)
|
||||
//
|
||||
// These should be tested in integration tests:
|
||||
// - check_service_status()
|
||||
// - prepare_service()
|
||||
// - start_service()
|
||||
// - stop_service()
|
||||
// - is_port_in_use()
|
||||
// - ServiceManager::start()
|
||||
// - ServiceManager::stop()
|
||||
// - ServiceManager::restart()
|
||||
//
|
||||
// The RUSTFS_HASH lazy_static is also not tested here as it depends
|
||||
// on embedded assets that may not be available in unit test environment.
|
||||
}
|
||||
286
cli/rustfs-gui/src/utils/logger.rs
Normal file
286
cli/rustfs-gui/src/utils/logger.rs
Normal file
@@ -0,0 +1,286 @@
|
||||
use dioxus::logger::tracing::debug;
|
||||
use tracing_appender::non_blocking::WorkerGuard;
|
||||
use tracing_appender::rolling::{RollingFileAppender, Rotation};
|
||||
use tracing_subscriber::fmt;
|
||||
use tracing_subscriber::layer::SubscriberExt;
|
||||
use tracing_subscriber::util::SubscriberInitExt;
|
||||
|
||||
/// Initialize the logger with a rolling file appender
|
||||
/// that rotates log files daily
|
||||
pub fn init_logger() -> WorkerGuard {
|
||||
// configuring rolling logs rolling by day
|
||||
let home_dir = dirs::home_dir().expect("无法获取用户目录");
|
||||
let rustfs_dir = home_dir.join("rustfs");
|
||||
let logs_dir = rustfs_dir.join("logs");
|
||||
let file_appender = RollingFileAppender::builder()
|
||||
.rotation(Rotation::DAILY) // rotate log files once every hour
|
||||
.filename_prefix("rustfs-cli") // log file names will be prefixed with `myapp.`
|
||||
.filename_suffix("log") // log file names will be suffixed with `.log`
|
||||
.build(logs_dir) // try to build an appender that stores log files in `/ var/ log`
|
||||
.expect("initializing rolling file appender failed");
|
||||
// non-blocking writer for improved performance
|
||||
let (non_blocking_file, worker_guard) = tracing_appender::non_blocking(file_appender);
|
||||
|
||||
// console output layer
|
||||
let console_layer = fmt::layer()
|
||||
.with_writer(std::io::stdout)
|
||||
.with_ansi(true)
|
||||
.with_line_number(true); // enable colors in the console
|
||||
|
||||
// file output layer
|
||||
let file_layer = fmt::layer()
|
||||
.with_writer(non_blocking_file)
|
||||
.with_ansi(false)
|
||||
.with_thread_names(true)
|
||||
.with_target(true)
|
||||
.with_thread_ids(true)
|
||||
.with_level(true)
|
||||
.with_line_number(true); // disable colors in the file
|
||||
|
||||
// Combine all tiers and initialize global subscribers
|
||||
tracing_subscriber::registry()
|
||||
.with(console_layer)
|
||||
.with(file_layer)
|
||||
.with(tracing_subscriber::EnvFilter::new("info")) // filter the log level by environment variables
|
||||
.init();
|
||||
debug!("Logger initialized");
|
||||
worker_guard
|
||||
}
|
||||
|
||||
#[cfg(test)]
|
||||
mod tests {
|
||||
use super::*;
|
||||
use std::sync::Once;
|
||||
|
||||
static INIT: Once = Once::new();
|
||||
|
||||
// Helper function to ensure logger is only initialized once in tests
|
||||
fn ensure_logger_init() {
|
||||
INIT.call_once(|| {
|
||||
// Initialize a simple test logger to avoid conflicts
|
||||
let _ = tracing_subscriber::fmt().with_test_writer().try_init();
|
||||
});
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn test_logger_initialization_components() {
|
||||
ensure_logger_init();
|
||||
|
||||
// Test that we can create the components used in init_logger
|
||||
// without actually initializing the global logger again
|
||||
|
||||
// Test home directory access
|
||||
let home_dir_result = dirs::home_dir();
|
||||
assert!(home_dir_result.is_some(), "Should be able to get home directory");
|
||||
|
||||
let home_dir = home_dir_result.unwrap();
|
||||
let rustfs_dir = home_dir.join("rustfs");
|
||||
let logs_dir = rustfs_dir.join("logs");
|
||||
|
||||
// Test path construction
|
||||
assert!(rustfs_dir.to_string_lossy().contains("rustfs"));
|
||||
assert!(logs_dir.to_string_lossy().contains("logs"));
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn test_rolling_file_appender_builder() {
|
||||
ensure_logger_init();
|
||||
|
||||
// Test that we can create a RollingFileAppender builder
|
||||
let builder = RollingFileAppender::builder()
|
||||
.rotation(Rotation::DAILY)
|
||||
.filename_prefix("test-rustfs-cli")
|
||||
.filename_suffix("log");
|
||||
|
||||
// We can't actually build it without creating directories,
|
||||
// but we can verify the builder pattern works
|
||||
let debug_str = format!("{:?}", builder);
|
||||
// The actual debug format might be different, so just check it's not empty
|
||||
assert!(!debug_str.is_empty());
|
||||
// Check that it contains some expected parts
|
||||
assert!(debug_str.contains("Builder") || debug_str.contains("builder") || debug_str.contains("RollingFileAppender"));
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn test_rotation_types() {
|
||||
ensure_logger_init();
|
||||
|
||||
// Test different rotation types
|
||||
let daily = Rotation::DAILY;
|
||||
let hourly = Rotation::HOURLY;
|
||||
let minutely = Rotation::MINUTELY;
|
||||
let never = Rotation::NEVER;
|
||||
|
||||
// Test that rotation types can be created and formatted
|
||||
assert!(!format!("{:?}", daily).is_empty());
|
||||
assert!(!format!("{:?}", hourly).is_empty());
|
||||
assert!(!format!("{:?}", minutely).is_empty());
|
||||
assert!(!format!("{:?}", never).is_empty());
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn test_fmt_layer_configuration() {
|
||||
ensure_logger_init();
|
||||
|
||||
// Test that we can create fmt layers with different configurations
|
||||
// We can't actually test the layers directly due to type complexity,
|
||||
// but we can test that the configuration values are correct
|
||||
|
||||
// Test console layer settings
|
||||
let console_ansi = true;
|
||||
let console_line_number = true;
|
||||
assert!(console_ansi);
|
||||
assert!(console_line_number);
|
||||
|
||||
// Test file layer settings
|
||||
let file_ansi = false;
|
||||
let file_thread_names = true;
|
||||
let file_target = true;
|
||||
let file_thread_ids = true;
|
||||
let file_level = true;
|
||||
let file_line_number = true;
|
||||
|
||||
assert!(!file_ansi);
|
||||
assert!(file_thread_names);
|
||||
assert!(file_target);
|
||||
assert!(file_thread_ids);
|
||||
assert!(file_level);
|
||||
assert!(file_line_number);
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn test_env_filter_creation() {
|
||||
ensure_logger_init();
|
||||
|
||||
// Test that EnvFilter can be created with different levels
|
||||
let info_filter = tracing_subscriber::EnvFilter::new("info");
|
||||
let debug_filter = tracing_subscriber::EnvFilter::new("debug");
|
||||
let warn_filter = tracing_subscriber::EnvFilter::new("warn");
|
||||
let error_filter = tracing_subscriber::EnvFilter::new("error");
|
||||
|
||||
// Test that filters can be created
|
||||
assert!(!format!("{:?}", info_filter).is_empty());
|
||||
assert!(!format!("{:?}", debug_filter).is_empty());
|
||||
assert!(!format!("{:?}", warn_filter).is_empty());
|
||||
assert!(!format!("{:?}", error_filter).is_empty());
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn test_path_construction() {
|
||||
ensure_logger_init();
|
||||
|
||||
// Test path construction logic used in init_logger
|
||||
if let Some(home_dir) = dirs::home_dir() {
|
||||
let rustfs_dir = home_dir.join("rustfs");
|
||||
let logs_dir = rustfs_dir.join("logs");
|
||||
|
||||
// Test that paths are constructed correctly
|
||||
assert!(rustfs_dir.ends_with("rustfs"));
|
||||
assert!(logs_dir.ends_with("logs"));
|
||||
assert!(logs_dir.parent().unwrap().ends_with("rustfs"));
|
||||
|
||||
// Test path string representation
|
||||
let rustfs_str = rustfs_dir.to_string_lossy();
|
||||
let logs_str = logs_dir.to_string_lossy();
|
||||
|
||||
assert!(rustfs_str.contains("rustfs"));
|
||||
assert!(logs_str.contains("rustfs"));
|
||||
assert!(logs_str.contains("logs"));
|
||||
}
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn test_filename_patterns() {
|
||||
ensure_logger_init();
|
||||
|
||||
// Test the filename patterns used in the logger
|
||||
let prefix = "rustfs-cli";
|
||||
let suffix = "log";
|
||||
|
||||
assert_eq!(prefix, "rustfs-cli");
|
||||
assert_eq!(suffix, "log");
|
||||
|
||||
// Test that these would create valid filenames
|
||||
let sample_filename = format!("{}.2024-01-01.{}", prefix, suffix);
|
||||
assert_eq!(sample_filename, "rustfs-cli.2024-01-01.log");
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn test_worker_guard_type() {
|
||||
ensure_logger_init();
|
||||
|
||||
// Test that WorkerGuard type exists and can be referenced
|
||||
// We can't actually create one without the full setup, but we can test the type
|
||||
let guard_size = std::mem::size_of::<WorkerGuard>();
|
||||
assert!(guard_size > 0, "WorkerGuard should have non-zero size");
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn test_logger_configuration_constants() {
|
||||
ensure_logger_init();
|
||||
|
||||
// Test the configuration values used in the logger
|
||||
let default_log_level = "info";
|
||||
let filename_prefix = "rustfs-cli";
|
||||
let filename_suffix = "log";
|
||||
let rotation = Rotation::DAILY;
|
||||
|
||||
assert_eq!(default_log_level, "info");
|
||||
assert_eq!(filename_prefix, "rustfs-cli");
|
||||
assert_eq!(filename_suffix, "log");
|
||||
assert!(matches!(rotation, Rotation::DAILY));
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn test_directory_names() {
|
||||
ensure_logger_init();
|
||||
|
||||
// Test the directory names used in the logger setup
|
||||
let rustfs_dir_name = "rustfs";
|
||||
let logs_dir_name = "logs";
|
||||
|
||||
assert_eq!(rustfs_dir_name, "rustfs");
|
||||
assert_eq!(logs_dir_name, "logs");
|
||||
|
||||
// Test path joining
|
||||
let combined = format!("{}/{}", rustfs_dir_name, logs_dir_name);
|
||||
assert_eq!(combined, "rustfs/logs");
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn test_layer_settings() {
|
||||
ensure_logger_init();
|
||||
|
||||
// Test the boolean settings used in layer configuration
|
||||
let console_ansi = true;
|
||||
let console_line_number = true;
|
||||
let file_ansi = false;
|
||||
let file_thread_names = true;
|
||||
let file_target = true;
|
||||
let file_thread_ids = true;
|
||||
let file_level = true;
|
||||
let file_line_number = true;
|
||||
|
||||
// Verify the settings
|
||||
assert!(console_ansi);
|
||||
assert!(console_line_number);
|
||||
assert!(!file_ansi);
|
||||
assert!(file_thread_names);
|
||||
assert!(file_target);
|
||||
assert!(file_thread_ids);
|
||||
assert!(file_level);
|
||||
assert!(file_line_number);
|
||||
}
|
||||
|
||||
// Note: The actual init_logger() function is not tested here because:
|
||||
// 1. It initializes a global tracing subscriber which can only be done once
|
||||
// 2. It requires file system access to create directories
|
||||
// 3. It has side effects that would interfere with other tests
|
||||
// 4. It returns a WorkerGuard that needs to be kept alive
|
||||
//
|
||||
// This function should be tested in integration tests where:
|
||||
// - File system access can be properly controlled
|
||||
// - The global state can be managed
|
||||
// - The actual logging behavior can be verified
|
||||
// - The WorkerGuard lifecycle can be properly managed
|
||||
}
|
||||
7
cli/rustfs-gui/src/utils/mod.rs
Normal file
7
cli/rustfs-gui/src/utils/mod.rs
Normal file
@@ -0,0 +1,7 @@
|
||||
mod config;
|
||||
mod helper;
|
||||
mod logger;
|
||||
|
||||
pub use config::RustFSConfig;
|
||||
pub use helper::ServiceManager;
|
||||
pub use logger::init_logger;
|
||||
24
cli/rustfs-gui/src/views/app.rs
Normal file
24
cli/rustfs-gui/src/views/app.rs
Normal file
@@ -0,0 +1,24 @@
|
||||
use crate::route::Route;
|
||||
use dioxus::logger::tracing::info;
|
||||
use dioxus::prelude::*;
|
||||
|
||||
const FAVICON: Asset = asset!("/assets/favicon.ico");
|
||||
const TAILWIND_CSS: Asset = asset!("/assets/tailwind.css");
|
||||
|
||||
/// The main application component
|
||||
/// This is the root component of the application
|
||||
/// It contains the global resources and the router
|
||||
/// for the application
|
||||
#[component]
|
||||
pub fn App() -> Element {
|
||||
// Build cool things ✌️
|
||||
use document::{Link, Title};
|
||||
info!("App rendered");
|
||||
rsx! {
|
||||
// Global app resources
|
||||
Link { rel: "icon", href: FAVICON }
|
||||
Link { rel: "stylesheet", href: TAILWIND_CSS }
|
||||
Title { "RustFS" }
|
||||
Router::<Route> {}
|
||||
}
|
||||
}
|
||||
9
cli/rustfs-gui/src/views/home.rs
Normal file
9
cli/rustfs-gui/src/views/home.rs
Normal file
@@ -0,0 +1,9 @@
|
||||
use crate::components::Home;
|
||||
use dioxus::prelude::*;
|
||||
|
||||
#[component]
|
||||
pub fn HomeViews() -> Element {
|
||||
rsx! {
|
||||
Home {}
|
||||
}
|
||||
}
|
||||
7
cli/rustfs-gui/src/views/mod.rs
Normal file
7
cli/rustfs-gui/src/views/mod.rs
Normal file
@@ -0,0 +1,7 @@
|
||||
mod app;
|
||||
mod home;
|
||||
mod setting;
|
||||
|
||||
pub use app::App;
|
||||
pub use home::HomeViews;
|
||||
pub use setting::SettingViews;
|
||||
9
cli/rustfs-gui/src/views/setting.rs
Normal file
9
cli/rustfs-gui/src/views/setting.rs
Normal file
@@ -0,0 +1,9 @@
|
||||
use crate::components::Setting;
|
||||
use dioxus::prelude::*;
|
||||
|
||||
#[component]
|
||||
pub fn SettingViews() -> Element {
|
||||
rsx! {
|
||||
Setting {}
|
||||
}
|
||||
}
|
||||
8
cli/rustfs-gui/tailwind.config.js
Normal file
8
cli/rustfs-gui/tailwind.config.js
Normal file
@@ -0,0 +1,8 @@
|
||||
module.exports = {
|
||||
mode: "all",
|
||||
content: ["./src/**/*.{rs,html,css}", "./dist/**/*.html"],
|
||||
theme: {
|
||||
extend: {},
|
||||
},
|
||||
plugins: [],
|
||||
};
|
||||
15
common/common/Cargo.toml
Normal file
15
common/common/Cargo.toml
Normal file
@@ -0,0 +1,15 @@
|
||||
[package]
|
||||
name = "common"
|
||||
version.workspace = true
|
||||
edition.workspace = true
|
||||
|
||||
[lints]
|
||||
workspace = true
|
||||
|
||||
[dependencies]
|
||||
async-trait.workspace = true
|
||||
lazy_static.workspace = true
|
||||
scopeguard = "1.2.0"
|
||||
tokio.workspace = true
|
||||
tonic = { workspace = true }
|
||||
tracing-error.workspace = true
|
||||
73
common/common/src/bucket_stats.rs
Normal file
73
common/common/src/bucket_stats.rs
Normal file
@@ -0,0 +1,73 @@
|
||||
use std::collections::HashMap;
|
||||
|
||||
use crate::last_minute::{self};
|
||||
pub struct ReplicationLatency {
|
||||
// 单个和多部分 PUT 请求的延迟
|
||||
upload_histogram: last_minute::LastMinuteHistogram,
|
||||
}
|
||||
|
||||
impl ReplicationLatency {
|
||||
// 合并两个 ReplicationLatency
|
||||
pub fn merge(&mut self, other: &mut ReplicationLatency) -> &ReplicationLatency {
|
||||
self.upload_histogram.merge(&other.upload_histogram);
|
||||
self
|
||||
}
|
||||
|
||||
// 获取上传延迟(按对象大小区间分类)
|
||||
pub fn get_upload_latency(&mut self) -> HashMap<String, u64> {
|
||||
let mut ret = HashMap::new();
|
||||
let avg = self.upload_histogram.get_avg_data();
|
||||
for (i, v) in avg.iter().enumerate() {
|
||||
let avg_duration = v.avg();
|
||||
ret.insert(self.size_tag_to_string(i), avg_duration.as_millis() as u64);
|
||||
}
|
||||
ret
|
||||
}
|
||||
pub fn update(&mut self, size: i64, during: std::time::Duration) {
|
||||
self.upload_histogram.add(size, during);
|
||||
}
|
||||
|
||||
// 模拟从 size tag 到字符串的转换
|
||||
fn size_tag_to_string(&self, tag: usize) -> String {
|
||||
match tag {
|
||||
0 => String::from("Size < 1 KiB"),
|
||||
1 => String::from("Size < 1 MiB"),
|
||||
2 => String::from("Size < 10 MiB"),
|
||||
3 => String::from("Size < 100 MiB"),
|
||||
4 => String::from("Size < 1 GiB"),
|
||||
_ => String::from("Size > 1 GiB"),
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
// #[derive(Debug, Clone, Default)]
|
||||
// pub struct ReplicationLastMinute {
|
||||
// pub last_minute: LastMinuteLatency,
|
||||
// }
|
||||
|
||||
// impl ReplicationLastMinute {
|
||||
// pub fn merge(&mut self, other: ReplicationLastMinute) -> ReplicationLastMinute {
|
||||
// let mut nl = ReplicationLastMinute::default();
|
||||
// nl.last_minute = self.last_minute.merge(&mut other.last_minute);
|
||||
// nl
|
||||
// }
|
||||
|
||||
// pub fn add_size(&mut self, n: i64) {
|
||||
// let t = SystemTime::now()
|
||||
// .duration_since(UNIX_EPOCH)
|
||||
// .expect("Time went backwards")
|
||||
// .as_secs();
|
||||
// self.last_minute.add_all(t - 1, &AccElem { total: t - 1, size: n as u64, n: 1 });
|
||||
// }
|
||||
|
||||
// pub fn get_total(&self) -> AccElem {
|
||||
// self.last_minute.get_total()
|
||||
// }
|
||||
// }
|
||||
|
||||
// impl fmt::Display for ReplicationLastMinute {
|
||||
// fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result {
|
||||
// let t = self.last_minute.get_total();
|
||||
// write!(f, "ReplicationLastMinute sz= {}, n= {}, dur= {}", t.size, t.n, t.total)
|
||||
// }
|
||||
// }
|
||||
339
common/common/src/error.rs
Normal file
339
common/common/src/error.rs
Normal file
@@ -0,0 +1,339 @@
|
||||
use tracing_error::{SpanTrace, SpanTraceStatus};
|
||||
|
||||
pub type StdError = Box<dyn std::error::Error + Send + Sync + 'static>;
|
||||
|
||||
pub type Result<T = (), E = Error> = std::result::Result<T, E>;
|
||||
|
||||
#[derive(Debug)]
|
||||
pub struct Error {
|
||||
inner: Box<dyn std::error::Error + Send + Sync + 'static>,
|
||||
span_trace: SpanTrace,
|
||||
}
|
||||
|
||||
impl Error {
|
||||
/// Create a new error from a `std::error::Error`.
|
||||
#[must_use]
|
||||
#[track_caller]
|
||||
pub fn new<T: std::error::Error + Send + Sync + 'static>(source: T) -> Self {
|
||||
Self::from_std_error(source.into())
|
||||
}
|
||||
|
||||
/// Create a new error from a `std::error::Error`.
|
||||
#[must_use]
|
||||
#[track_caller]
|
||||
pub fn from_std_error(inner: StdError) -> Self {
|
||||
Self {
|
||||
inner,
|
||||
span_trace: SpanTrace::capture(),
|
||||
}
|
||||
}
|
||||
|
||||
/// Create a new error from a string.
|
||||
#[must_use]
|
||||
#[track_caller]
|
||||
pub fn from_string(s: impl Into<String>) -> Self {
|
||||
Self::msg(s)
|
||||
}
|
||||
|
||||
/// Create a new error from a string.
|
||||
#[must_use]
|
||||
#[track_caller]
|
||||
pub fn msg(s: impl Into<String>) -> Self {
|
||||
Self::from_std_error(s.into().into())
|
||||
}
|
||||
|
||||
/// Returns `true` if the inner type is the same as `T`.
|
||||
#[inline]
|
||||
pub fn is<T: std::error::Error + 'static>(&self) -> bool {
|
||||
self.inner.is::<T>()
|
||||
}
|
||||
|
||||
/// Returns some reference to the inner value if it is of type `T`, or
|
||||
/// `None` if it isn't.
|
||||
#[inline]
|
||||
pub fn downcast_ref<T: std::error::Error + 'static>(&self) -> Option<&T> {
|
||||
self.inner.downcast_ref()
|
||||
}
|
||||
|
||||
/// Returns some mutable reference to the inner value if it is of type `T`, or
|
||||
/// `None` if it isn't.
|
||||
#[inline]
|
||||
pub fn downcast_mut<T: std::error::Error + 'static>(&mut self) -> Option<&mut T> {
|
||||
self.inner.downcast_mut()
|
||||
}
|
||||
|
||||
pub fn to_io_err(&self) -> Option<std::io::Error> {
|
||||
self.downcast_ref::<std::io::Error>()
|
||||
.map(|e| std::io::Error::new(e.kind(), e.to_string()))
|
||||
}
|
||||
|
||||
pub fn inner_string(&self) -> String {
|
||||
self.inner.to_string()
|
||||
}
|
||||
}
|
||||
|
||||
impl<T: std::error::Error + Send + Sync + 'static> From<T> for Error {
|
||||
fn from(e: T) -> Self {
|
||||
Self::new(e)
|
||||
}
|
||||
}
|
||||
|
||||
impl std::fmt::Display for Error {
|
||||
fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result {
|
||||
write!(f, "{}", self.inner)?;
|
||||
|
||||
if self.span_trace.status() != SpanTraceStatus::EMPTY {
|
||||
write!(f, "\nspan_trace:\n{}", self.span_trace)?;
|
||||
}
|
||||
|
||||
Ok(())
|
||||
}
|
||||
}
|
||||
|
||||
#[cfg(test)]
|
||||
mod tests {
|
||||
use super::*;
|
||||
use std::io;
|
||||
|
||||
#[derive(Debug)]
|
||||
struct CustomTestError {
|
||||
message: String,
|
||||
}
|
||||
|
||||
impl std::fmt::Display for CustomTestError {
|
||||
fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result {
|
||||
write!(f, "Custom test error: {}", self.message)
|
||||
}
|
||||
}
|
||||
|
||||
impl std::error::Error for CustomTestError {}
|
||||
|
||||
#[derive(Debug)]
|
||||
struct AnotherTestError;
|
||||
|
||||
impl std::fmt::Display for AnotherTestError {
|
||||
fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result {
|
||||
write!(f, "Another test error")
|
||||
}
|
||||
}
|
||||
|
||||
impl std::error::Error for AnotherTestError {}
|
||||
|
||||
#[test]
|
||||
fn test_error_new_from_std_error() {
|
||||
let io_error = io::Error::new(io::ErrorKind::NotFound, "File not found");
|
||||
let error = Error::new(io_error);
|
||||
|
||||
assert!(error.inner_string().contains("File not found"));
|
||||
assert!(error.is::<io::Error>());
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn test_error_from_std_error() {
|
||||
let io_error = io::Error::new(io::ErrorKind::PermissionDenied, "Permission denied");
|
||||
let boxed_error: StdError = Box::new(io_error);
|
||||
let error = Error::from_std_error(boxed_error);
|
||||
|
||||
assert!(error.inner_string().contains("Permission denied"));
|
||||
assert!(error.is::<io::Error>());
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn test_error_from_string() {
|
||||
let error = Error::from_string("Test error message");
|
||||
assert_eq!(error.inner_string(), "Test error message");
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn test_error_msg() {
|
||||
let error = Error::msg("Another test message");
|
||||
assert_eq!(error.inner_string(), "Another test message");
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn test_error_msg_with_string() {
|
||||
let message = String::from("String message");
|
||||
let error = Error::msg(message);
|
||||
assert_eq!(error.inner_string(), "String message");
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn test_error_is_type_checking() {
|
||||
let io_error = io::Error::new(io::ErrorKind::InvalidInput, "Invalid input");
|
||||
let error = Error::new(io_error);
|
||||
|
||||
assert!(error.is::<io::Error>());
|
||||
assert!(!error.is::<CustomTestError>());
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn test_error_downcast_ref() {
|
||||
let io_error = io::Error::new(io::ErrorKind::TimedOut, "Operation timed out");
|
||||
let error = Error::new(io_error);
|
||||
|
||||
let downcast_io = error.downcast_ref::<io::Error>();
|
||||
assert!(downcast_io.is_some());
|
||||
assert_eq!(downcast_io.unwrap().kind(), io::ErrorKind::TimedOut);
|
||||
|
||||
let downcast_custom = error.downcast_ref::<CustomTestError>();
|
||||
assert!(downcast_custom.is_none());
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn test_error_downcast_mut() {
|
||||
let io_error = io::Error::new(io::ErrorKind::Interrupted, "Operation interrupted");
|
||||
let mut error = Error::new(io_error);
|
||||
|
||||
let downcast_io = error.downcast_mut::<io::Error>();
|
||||
assert!(downcast_io.is_some());
|
||||
assert_eq!(downcast_io.unwrap().kind(), io::ErrorKind::Interrupted);
|
||||
|
||||
let downcast_custom = error.downcast_mut::<CustomTestError>();
|
||||
assert!(downcast_custom.is_none());
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn test_error_to_io_err() {
|
||||
// Test with IO error
|
||||
let original_io_error = io::Error::new(io::ErrorKind::BrokenPipe, "Broken pipe");
|
||||
let error = Error::new(original_io_error);
|
||||
|
||||
let converted_io_error = error.to_io_err();
|
||||
assert!(converted_io_error.is_some());
|
||||
let io_err = converted_io_error.unwrap();
|
||||
assert_eq!(io_err.kind(), io::ErrorKind::BrokenPipe);
|
||||
assert!(io_err.to_string().contains("Broken pipe"));
|
||||
|
||||
// Test with non-IO error
|
||||
let custom_error = CustomTestError {
|
||||
message: "Not an IO error".to_string(),
|
||||
};
|
||||
let error = Error::new(custom_error);
|
||||
|
||||
let converted_io_error = error.to_io_err();
|
||||
assert!(converted_io_error.is_none());
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn test_error_inner_string() {
|
||||
let custom_error = CustomTestError {
|
||||
message: "Test message".to_string(),
|
||||
};
|
||||
let error = Error::new(custom_error);
|
||||
|
||||
assert_eq!(error.inner_string(), "Custom test error: Test message");
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn test_error_from_trait() {
|
||||
let io_error = io::Error::new(io::ErrorKind::UnexpectedEof, "Unexpected EOF");
|
||||
let error: Error = io_error.into();
|
||||
|
||||
assert!(error.is::<io::Error>());
|
||||
assert!(error.inner_string().contains("Unexpected EOF"));
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn test_error_display() {
|
||||
let custom_error = CustomTestError {
|
||||
message: "Display test".to_string(),
|
||||
};
|
||||
let error = Error::new(custom_error);
|
||||
|
||||
let display_string = format!("{}", error);
|
||||
assert!(display_string.contains("Custom test error: Display test"));
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn test_error_debug() {
|
||||
let error = Error::msg("Debug test");
|
||||
let debug_string = format!("{:?}", error);
|
||||
|
||||
assert!(debug_string.contains("Error"));
|
||||
assert!(debug_string.contains("inner"));
|
||||
assert!(debug_string.contains("span_trace"));
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn test_multiple_error_types() {
|
||||
let errors = vec![
|
||||
Error::new(io::Error::new(io::ErrorKind::NotFound, "Not found")),
|
||||
Error::new(CustomTestError {
|
||||
message: "Custom".to_string(),
|
||||
}),
|
||||
Error::new(AnotherTestError),
|
||||
Error::msg("String error"),
|
||||
];
|
||||
|
||||
assert!(errors[0].is::<io::Error>());
|
||||
assert!(errors[1].is::<CustomTestError>());
|
||||
assert!(errors[2].is::<AnotherTestError>());
|
||||
assert!(!errors[3].is::<io::Error>());
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn test_error_chain_compatibility() {
|
||||
// Test that our Error type works well with error chains
|
||||
let io_error = io::Error::new(io::ErrorKind::InvalidData, "Invalid data");
|
||||
let error = Error::new(io_error);
|
||||
|
||||
// Should be able to convert back to Result
|
||||
let result: Result<(), Error> = Err(error);
|
||||
assert!(result.is_err());
|
||||
|
||||
// Test the error from the result
|
||||
if let Err(err) = result {
|
||||
assert!(err.is::<io::Error>());
|
||||
}
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn test_result_type_alias() {
|
||||
// Test the Result type alias
|
||||
fn test_function() -> Result<String> {
|
||||
Ok("Success".to_string())
|
||||
}
|
||||
|
||||
fn test_function_with_error() -> Result<String> {
|
||||
Err(Error::msg("Test error"))
|
||||
}
|
||||
|
||||
let success_result = test_function();
|
||||
assert!(success_result.is_ok());
|
||||
assert_eq!(success_result.unwrap(), "Success");
|
||||
|
||||
let error_result = test_function_with_error();
|
||||
assert!(error_result.is_err());
|
||||
assert_eq!(error_result.unwrap_err().inner_string(), "Test error");
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn test_error_with_empty_message() {
|
||||
let error = Error::msg("");
|
||||
assert_eq!(error.inner_string(), "");
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn test_error_with_unicode_message() {
|
||||
let unicode_message = "错误信息 🚨 Error message with émojis and ñon-ASCII";
|
||||
let error = Error::msg(unicode_message);
|
||||
assert_eq!(error.inner_string(), unicode_message);
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn test_error_with_very_long_message() {
|
||||
let long_message = "A".repeat(10000);
|
||||
let error = Error::msg(&long_message);
|
||||
assert_eq!(error.inner_string(), long_message);
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn test_span_trace_capture() {
|
||||
// Test that span trace is captured (though we can't easily test the content)
|
||||
let error = Error::msg("Span trace test");
|
||||
let display_string = format!("{}", error);
|
||||
|
||||
// The error should at least contain the message
|
||||
assert!(display_string.contains("Span trace test"));
|
||||
}
|
||||
}
|
||||
17
common/common/src/globals.rs
Normal file
17
common/common/src/globals.rs
Normal file
@@ -0,0 +1,17 @@
|
||||
use std::collections::HashMap;
|
||||
|
||||
use lazy_static::lazy_static;
|
||||
use tokio::sync::RwLock;
|
||||
use tonic::transport::Channel;
|
||||
|
||||
lazy_static! {
|
||||
pub static ref GLOBAL_Local_Node_Name: RwLock<String> = RwLock::new("".to_string());
|
||||
pub static ref GLOBAL_Rustfs_Host: RwLock<String> = RwLock::new("".to_string());
|
||||
pub static ref GLOBAL_Rustfs_Port: RwLock<String> = RwLock::new("9000".to_string());
|
||||
pub static ref GLOBAL_Rustfs_Addr: RwLock<String> = RwLock::new("".to_string());
|
||||
pub static ref GLOBAL_Conn_Map: RwLock<HashMap<String, Channel>> = RwLock::new(HashMap::new());
|
||||
}
|
||||
|
||||
pub async fn set_global_addr(addr: &str) {
|
||||
*GLOBAL_Rustfs_Addr.write().await = addr.to_string();
|
||||
}
|
||||
873
common/common/src/last_minute.rs
Normal file
873
common/common/src/last_minute.rs
Normal file
@@ -0,0 +1,873 @@
|
||||
use std::time::{Duration, SystemTime, UNIX_EPOCH};
|
||||
|
||||
#[allow(dead_code)]
|
||||
#[derive(Debug, Default)]
|
||||
struct TimedAction {
|
||||
count: u64,
|
||||
acc_time: u64,
|
||||
min_time: Option<u64>,
|
||||
max_time: Option<u64>,
|
||||
bytes: u64,
|
||||
}
|
||||
|
||||
#[allow(dead_code)]
|
||||
impl TimedAction {
|
||||
// Avg returns the average time spent on the action.
|
||||
pub fn avg(&self) -> Option<std::time::Duration> {
|
||||
if self.count == 0 {
|
||||
return None;
|
||||
}
|
||||
Some(std::time::Duration::from_nanos(self.acc_time / self.count))
|
||||
}
|
||||
|
||||
// AvgBytes returns the average bytes processed.
|
||||
pub fn avg_bytes(&self) -> u64 {
|
||||
if self.count == 0 {
|
||||
return 0;
|
||||
}
|
||||
self.bytes / self.count
|
||||
}
|
||||
|
||||
// Merge other into t.
|
||||
pub fn merge(&mut self, other: TimedAction) {
|
||||
self.count += other.count;
|
||||
self.acc_time += other.acc_time;
|
||||
self.bytes += other.bytes;
|
||||
|
||||
if self.count == 0 {
|
||||
self.min_time = other.min_time;
|
||||
}
|
||||
if let Some(other_min) = other.min_time {
|
||||
self.min_time = self.min_time.map_or(Some(other_min), |min| Some(min.min(other_min)));
|
||||
}
|
||||
|
||||
self.max_time = self
|
||||
.max_time
|
||||
.map_or(other.max_time, |max| Some(max.max(other.max_time.unwrap_or(0))));
|
||||
}
|
||||
}
|
||||
|
||||
#[allow(dead_code)]
|
||||
#[derive(Debug)]
|
||||
enum SizeCategory {
|
||||
SizeLessThan1KiB = 0,
|
||||
SizeLessThan1MiB,
|
||||
SizeLessThan10MiB,
|
||||
SizeLessThan100MiB,
|
||||
SizeLessThan1GiB,
|
||||
SizeGreaterThan1GiB,
|
||||
// Add new entries here
|
||||
SizeLastElemMarker,
|
||||
}
|
||||
|
||||
impl std::fmt::Display for SizeCategory {
|
||||
fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result {
|
||||
let s = match *self {
|
||||
SizeCategory::SizeLessThan1KiB => "SizeLessThan1KiB",
|
||||
SizeCategory::SizeLessThan1MiB => "SizeLessThan1MiB",
|
||||
SizeCategory::SizeLessThan10MiB => "SizeLessThan10MiB",
|
||||
SizeCategory::SizeLessThan100MiB => "SizeLessThan100MiB",
|
||||
SizeCategory::SizeLessThan1GiB => "SizeLessThan1GiB",
|
||||
SizeCategory::SizeGreaterThan1GiB => "SizeGreaterThan1GiB",
|
||||
SizeCategory::SizeLastElemMarker => "SizeLastElemMarker",
|
||||
};
|
||||
write!(f, "{}", s)
|
||||
}
|
||||
}
|
||||
|
||||
#[derive(Clone, Debug, Default, Copy)]
|
||||
pub struct AccElem {
|
||||
pub total: u64,
|
||||
pub size: u64,
|
||||
pub n: u64,
|
||||
}
|
||||
|
||||
impl AccElem {
|
||||
pub fn add(&mut self, dur: &Duration) {
|
||||
let dur = dur.as_secs();
|
||||
self.total = self.total.wrapping_add(dur);
|
||||
self.n = self.n.wrapping_add(1);
|
||||
}
|
||||
|
||||
pub fn merge(&mut self, b: &AccElem) {
|
||||
self.n = self.n.wrapping_add(b.n);
|
||||
self.total = self.total.wrapping_add(b.total);
|
||||
self.size = self.size.wrapping_add(b.size);
|
||||
}
|
||||
|
||||
pub fn avg(&self) -> Duration {
|
||||
if self.n >= 1 && self.total > 0 {
|
||||
return Duration::from_secs(self.total / self.n);
|
||||
}
|
||||
Duration::from_secs(0)
|
||||
}
|
||||
}
|
||||
|
||||
#[derive(Clone, Debug)]
|
||||
pub struct LastMinuteLatency {
|
||||
pub totals: Vec<AccElem>,
|
||||
pub last_sec: u64,
|
||||
}
|
||||
|
||||
impl Default for LastMinuteLatency {
|
||||
fn default() -> Self {
|
||||
Self {
|
||||
totals: vec![AccElem::default(); 60],
|
||||
last_sec: Default::default(),
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
impl LastMinuteLatency {
|
||||
pub fn merge(&mut self, o: &LastMinuteLatency) -> LastMinuteLatency {
|
||||
let mut merged = LastMinuteLatency::default();
|
||||
let mut x = o.clone();
|
||||
if self.last_sec > o.last_sec {
|
||||
x.forward_to(self.last_sec);
|
||||
merged.last_sec = self.last_sec;
|
||||
} else {
|
||||
self.forward_to(o.last_sec);
|
||||
merged.last_sec = o.last_sec;
|
||||
}
|
||||
|
||||
for i in 0..merged.totals.len() {
|
||||
merged.totals[i] = AccElem {
|
||||
total: self.totals[i].total + o.totals[i].total,
|
||||
n: self.totals[i].n + o.totals[i].n,
|
||||
size: self.totals[i].size + o.totals[i].size,
|
||||
}
|
||||
}
|
||||
merged
|
||||
}
|
||||
|
||||
pub fn add(&mut self, t: &Duration) {
|
||||
let sec = SystemTime::now()
|
||||
.duration_since(UNIX_EPOCH)
|
||||
.expect("Time went backwards")
|
||||
.as_secs();
|
||||
self.forward_to(sec);
|
||||
let win_idx = sec % 60;
|
||||
self.totals[win_idx as usize].add(t);
|
||||
self.last_sec = sec;
|
||||
}
|
||||
|
||||
pub fn add_all(&mut self, sec: u64, a: &AccElem) {
|
||||
self.forward_to(sec);
|
||||
let win_idx = sec % 60;
|
||||
self.totals[win_idx as usize].merge(a);
|
||||
self.last_sec = sec;
|
||||
}
|
||||
|
||||
pub fn get_total(&mut self) -> AccElem {
|
||||
let mut res = AccElem::default();
|
||||
let sec = SystemTime::now()
|
||||
.duration_since(UNIX_EPOCH)
|
||||
.expect("Time went backwards")
|
||||
.as_secs();
|
||||
self.forward_to(sec);
|
||||
for elem in self.totals.iter() {
|
||||
res.merge(elem);
|
||||
}
|
||||
res
|
||||
}
|
||||
|
||||
pub fn forward_to(&mut self, t: u64) {
|
||||
if self.last_sec >= t {
|
||||
return;
|
||||
}
|
||||
if t - self.last_sec >= 60 {
|
||||
self.totals = vec![AccElem::default(); 60];
|
||||
self.last_sec = t;
|
||||
return;
|
||||
}
|
||||
while self.last_sec != t {
|
||||
let idx = (self.last_sec + 1) % 60;
|
||||
self.totals[idx as usize] = AccElem::default();
|
||||
self.last_sec += 1;
|
||||
}
|
||||
}
|
||||
}
|
||||
#[cfg(test)]
|
||||
mod tests {
|
||||
use super::*;
|
||||
use std::time::Duration;
|
||||
|
||||
#[test]
|
||||
fn test_acc_elem_default() {
|
||||
let elem = AccElem::default();
|
||||
assert_eq!(elem.total, 0);
|
||||
assert_eq!(elem.size, 0);
|
||||
assert_eq!(elem.n, 0);
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn test_acc_elem_add_single_duration() {
|
||||
let mut elem = AccElem::default();
|
||||
let duration = Duration::from_secs(5);
|
||||
|
||||
elem.add(&duration);
|
||||
|
||||
assert_eq!(elem.total, 5);
|
||||
assert_eq!(elem.n, 1);
|
||||
assert_eq!(elem.size, 0); // size is not modified by add
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn test_acc_elem_add_multiple_durations() {
|
||||
let mut elem = AccElem::default();
|
||||
|
||||
elem.add(&Duration::from_secs(3));
|
||||
elem.add(&Duration::from_secs(7));
|
||||
elem.add(&Duration::from_secs(2));
|
||||
|
||||
assert_eq!(elem.total, 12);
|
||||
assert_eq!(elem.n, 3);
|
||||
assert_eq!(elem.size, 0);
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn test_acc_elem_add_zero_duration() {
|
||||
let mut elem = AccElem::default();
|
||||
let duration = Duration::from_secs(0);
|
||||
|
||||
elem.add(&duration);
|
||||
|
||||
assert_eq!(elem.total, 0);
|
||||
assert_eq!(elem.n, 1);
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn test_acc_elem_add_subsecond_duration() {
|
||||
let mut elem = AccElem::default();
|
||||
// Duration less than 1 second should be truncated to 0
|
||||
let duration = Duration::from_millis(500);
|
||||
|
||||
elem.add(&duration);
|
||||
|
||||
assert_eq!(elem.total, 0); // as_secs() truncates subsecond values
|
||||
assert_eq!(elem.n, 1);
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn test_acc_elem_merge_empty_elements() {
|
||||
let mut elem1 = AccElem::default();
|
||||
let elem2 = AccElem::default();
|
||||
|
||||
elem1.merge(&elem2);
|
||||
|
||||
assert_eq!(elem1.total, 0);
|
||||
assert_eq!(elem1.size, 0);
|
||||
assert_eq!(elem1.n, 0);
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn test_acc_elem_merge_with_data() {
|
||||
let mut elem1 = AccElem {
|
||||
total: 10,
|
||||
size: 100,
|
||||
n: 2,
|
||||
};
|
||||
let elem2 = AccElem {
|
||||
total: 15,
|
||||
size: 200,
|
||||
n: 3,
|
||||
};
|
||||
|
||||
elem1.merge(&elem2);
|
||||
|
||||
assert_eq!(elem1.total, 25);
|
||||
assert_eq!(elem1.size, 300);
|
||||
assert_eq!(elem1.n, 5);
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn test_acc_elem_merge_one_empty() {
|
||||
let mut elem1 = AccElem {
|
||||
total: 10,
|
||||
size: 100,
|
||||
n: 2,
|
||||
};
|
||||
let elem2 = AccElem::default();
|
||||
|
||||
elem1.merge(&elem2);
|
||||
|
||||
assert_eq!(elem1.total, 10);
|
||||
assert_eq!(elem1.size, 100);
|
||||
assert_eq!(elem1.n, 2);
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn test_acc_elem_avg_with_data() {
|
||||
let elem = AccElem {
|
||||
total: 15,
|
||||
size: 0,
|
||||
n: 3,
|
||||
};
|
||||
|
||||
let avg = elem.avg();
|
||||
assert_eq!(avg, Duration::from_secs(5)); // 15 / 3 = 5
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn test_acc_elem_avg_zero_count() {
|
||||
let elem = AccElem {
|
||||
total: 10,
|
||||
size: 0,
|
||||
n: 0,
|
||||
};
|
||||
|
||||
let avg = elem.avg();
|
||||
assert_eq!(avg, Duration::from_secs(0));
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn test_acc_elem_avg_zero_total() {
|
||||
let elem = AccElem { total: 0, size: 0, n: 5 };
|
||||
|
||||
let avg = elem.avg();
|
||||
assert_eq!(avg, Duration::from_secs(0));
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn test_acc_elem_avg_rounding() {
|
||||
let elem = AccElem {
|
||||
total: 10,
|
||||
size: 0,
|
||||
n: 3,
|
||||
};
|
||||
|
||||
let avg = elem.avg();
|
||||
assert_eq!(avg, Duration::from_secs(3)); // 10 / 3 = 3 (integer division)
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn test_last_minute_latency_default() {
|
||||
let latency = LastMinuteLatency::default();
|
||||
|
||||
assert_eq!(latency.totals.len(), 60);
|
||||
assert_eq!(latency.last_sec, 0);
|
||||
|
||||
// All elements should be default (empty)
|
||||
for elem in &latency.totals {
|
||||
assert_eq!(elem.total, 0);
|
||||
assert_eq!(elem.size, 0);
|
||||
assert_eq!(elem.n, 0);
|
||||
}
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn test_last_minute_latency_forward_to_same_time() {
|
||||
let mut latency = LastMinuteLatency {
|
||||
last_sec: 100,
|
||||
..Default::default()
|
||||
};
|
||||
|
||||
// Add some data to verify it's not cleared
|
||||
latency.totals[0].total = 10;
|
||||
latency.totals[0].n = 1;
|
||||
|
||||
latency.forward_to(100); // Same time
|
||||
|
||||
assert_eq!(latency.last_sec, 100);
|
||||
assert_eq!(latency.totals[0].total, 10); // Data should remain
|
||||
assert_eq!(latency.totals[0].n, 1);
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn test_last_minute_latency_forward_to_past_time() {
|
||||
let mut latency = LastMinuteLatency {
|
||||
last_sec: 100,
|
||||
..Default::default()
|
||||
};
|
||||
|
||||
// Add some data to verify it's not cleared
|
||||
latency.totals[0].total = 10;
|
||||
latency.totals[0].n = 1;
|
||||
|
||||
latency.forward_to(50); // Past time
|
||||
|
||||
assert_eq!(latency.last_sec, 100); // Should not change
|
||||
assert_eq!(latency.totals[0].total, 10); // Data should remain
|
||||
assert_eq!(latency.totals[0].n, 1);
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn test_last_minute_latency_forward_to_large_gap() {
|
||||
let mut latency = LastMinuteLatency {
|
||||
last_sec: 100,
|
||||
..Default::default()
|
||||
};
|
||||
|
||||
// Add some data to verify it's cleared
|
||||
latency.totals[0].total = 10;
|
||||
latency.totals[0].n = 1;
|
||||
|
||||
latency.forward_to(200); // Gap >= 60 seconds
|
||||
|
||||
assert_eq!(latency.last_sec, 200); // last_sec should be updated to target time
|
||||
|
||||
// All data should be cleared
|
||||
for elem in &latency.totals {
|
||||
assert_eq!(elem.total, 0);
|
||||
assert_eq!(elem.size, 0);
|
||||
assert_eq!(elem.n, 0);
|
||||
}
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn test_last_minute_latency_forward_to_small_gap() {
|
||||
let mut latency = LastMinuteLatency {
|
||||
last_sec: 100,
|
||||
..Default::default()
|
||||
};
|
||||
|
||||
// Add data at specific indices
|
||||
latency.totals[41].total = 10; // (100 + 1) % 60 = 41
|
||||
latency.totals[42].total = 20; // (100 + 2) % 60 = 42
|
||||
|
||||
latency.forward_to(102); // Forward by 2 seconds
|
||||
|
||||
assert_eq!(latency.last_sec, 102);
|
||||
|
||||
// The slots that were advanced should be cleared
|
||||
assert_eq!(latency.totals[41].total, 0); // Cleared during forward
|
||||
assert_eq!(latency.totals[42].total, 0); // Cleared during forward
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn test_last_minute_latency_add_all() {
|
||||
let mut latency = LastMinuteLatency::default();
|
||||
let acc_elem = AccElem {
|
||||
total: 15,
|
||||
size: 100,
|
||||
n: 3,
|
||||
};
|
||||
|
||||
latency.add_all(1000, &acc_elem);
|
||||
|
||||
assert_eq!(latency.last_sec, 1000);
|
||||
let idx = 1000 % 60; // Should be 40
|
||||
assert_eq!(latency.totals[idx as usize].total, 15);
|
||||
assert_eq!(latency.totals[idx as usize].size, 100);
|
||||
assert_eq!(latency.totals[idx as usize].n, 3);
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn test_last_minute_latency_add_all_multiple() {
|
||||
let mut latency = LastMinuteLatency::default();
|
||||
|
||||
let acc_elem1 = AccElem {
|
||||
total: 10,
|
||||
size: 50,
|
||||
n: 2,
|
||||
};
|
||||
let acc_elem2 = AccElem {
|
||||
total: 20,
|
||||
size: 100,
|
||||
n: 4,
|
||||
};
|
||||
|
||||
latency.add_all(1000, &acc_elem1);
|
||||
latency.add_all(1000, &acc_elem2); // Same second
|
||||
|
||||
let idx = 1000 % 60;
|
||||
assert_eq!(latency.totals[idx as usize].total, 30); // 10 + 20
|
||||
assert_eq!(latency.totals[idx as usize].size, 150); // 50 + 100
|
||||
assert_eq!(latency.totals[idx as usize].n, 6); // 2 + 4
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn test_last_minute_latency_merge_same_time() {
|
||||
let mut latency1 = LastMinuteLatency::default();
|
||||
let mut latency2 = LastMinuteLatency::default();
|
||||
|
||||
latency1.last_sec = 1000;
|
||||
latency2.last_sec = 1000;
|
||||
|
||||
// Add data to both
|
||||
latency1.totals[0].total = 10;
|
||||
latency1.totals[0].n = 2;
|
||||
latency2.totals[0].total = 20;
|
||||
latency2.totals[0].n = 3;
|
||||
|
||||
let merged = latency1.merge(&latency2);
|
||||
|
||||
assert_eq!(merged.last_sec, 1000);
|
||||
assert_eq!(merged.totals[0].total, 30); // 10 + 20
|
||||
assert_eq!(merged.totals[0].n, 5); // 2 + 3
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn test_last_minute_latency_merge_different_times() {
|
||||
let mut latency1 = LastMinuteLatency::default();
|
||||
let mut latency2 = LastMinuteLatency::default();
|
||||
|
||||
latency1.last_sec = 1000;
|
||||
latency2.last_sec = 1010; // 10 seconds later
|
||||
|
||||
// Add data to both
|
||||
latency1.totals[0].total = 10;
|
||||
latency2.totals[0].total = 20;
|
||||
|
||||
let merged = latency1.merge(&latency2);
|
||||
|
||||
assert_eq!(merged.last_sec, 1010); // Should use the later time
|
||||
assert_eq!(merged.totals[0].total, 30);
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn test_last_minute_latency_merge_empty() {
|
||||
let mut latency1 = LastMinuteLatency::default();
|
||||
let latency2 = LastMinuteLatency::default();
|
||||
|
||||
let merged = latency1.merge(&latency2);
|
||||
|
||||
assert_eq!(merged.last_sec, 0);
|
||||
for elem in &merged.totals {
|
||||
assert_eq!(elem.total, 0);
|
||||
assert_eq!(elem.size, 0);
|
||||
assert_eq!(elem.n, 0);
|
||||
}
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn test_last_minute_latency_window_wraparound() {
|
||||
let mut latency = LastMinuteLatency::default();
|
||||
|
||||
// Test that indices wrap around correctly
|
||||
for sec in 0..120 {
|
||||
// Test for 2 minutes
|
||||
let acc_elem = AccElem {
|
||||
total: sec,
|
||||
size: 0,
|
||||
n: 1,
|
||||
};
|
||||
latency.add_all(sec, &acc_elem);
|
||||
|
||||
let expected_idx = sec % 60;
|
||||
assert_eq!(latency.totals[expected_idx as usize].total, sec);
|
||||
}
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn test_last_minute_latency_time_progression() {
|
||||
let mut latency = LastMinuteLatency::default();
|
||||
|
||||
// Add data at time 1000
|
||||
latency.add_all(
|
||||
1000,
|
||||
&AccElem {
|
||||
total: 10,
|
||||
size: 0,
|
||||
n: 1,
|
||||
},
|
||||
);
|
||||
|
||||
// Forward to time 1030 (30 seconds later)
|
||||
latency.forward_to(1030);
|
||||
|
||||
// Original data should still be there
|
||||
let idx_1000 = 1000 % 60;
|
||||
assert_eq!(latency.totals[idx_1000 as usize].total, 10);
|
||||
|
||||
// Forward to time 1070 (70 seconds from original, > 60 seconds)
|
||||
latency.forward_to(1070);
|
||||
|
||||
// All data should be cleared due to large gap
|
||||
for elem in &latency.totals {
|
||||
assert_eq!(elem.total, 0);
|
||||
assert_eq!(elem.n, 0);
|
||||
}
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn test_last_minute_latency_realistic_scenario() {
|
||||
let mut latency = LastMinuteLatency::default();
|
||||
let base_time = 1000u64;
|
||||
|
||||
// Add data for exactly 60 seconds to fill the window
|
||||
for i in 0..60 {
|
||||
let current_time = base_time + i;
|
||||
let duration_secs = i % 10 + 1; // Varying durations 1-10 seconds
|
||||
let acc_elem = AccElem {
|
||||
total: duration_secs,
|
||||
size: 1024 * (i % 5 + 1), // Varying sizes
|
||||
n: 1,
|
||||
};
|
||||
|
||||
latency.add_all(current_time, &acc_elem);
|
||||
}
|
||||
|
||||
// Count non-empty slots after filling the window
|
||||
let mut non_empty_count = 0;
|
||||
let mut total_n = 0;
|
||||
let mut total_sum = 0;
|
||||
|
||||
for elem in &latency.totals {
|
||||
if elem.n > 0 {
|
||||
non_empty_count += 1;
|
||||
total_n += elem.n;
|
||||
total_sum += elem.total;
|
||||
}
|
||||
}
|
||||
|
||||
// We should have exactly 60 non-empty slots (one for each second in the window)
|
||||
assert_eq!(non_empty_count, 60);
|
||||
assert_eq!(total_n, 60); // 60 data points total
|
||||
assert!(total_sum > 0);
|
||||
|
||||
// Test manual total calculation (get_total uses system time which interferes with test)
|
||||
let mut manual_total = AccElem::default();
|
||||
for elem in &latency.totals {
|
||||
manual_total.merge(elem);
|
||||
}
|
||||
assert_eq!(manual_total.n, 60);
|
||||
assert_eq!(manual_total.total, total_sum);
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn test_acc_elem_clone_and_debug() {
|
||||
let elem = AccElem {
|
||||
total: 100,
|
||||
size: 200,
|
||||
n: 5,
|
||||
};
|
||||
|
||||
let cloned = elem;
|
||||
assert_eq!(elem.total, cloned.total);
|
||||
assert_eq!(elem.size, cloned.size);
|
||||
assert_eq!(elem.n, cloned.n);
|
||||
|
||||
// Test Debug trait
|
||||
let debug_str = format!("{:?}", elem);
|
||||
assert!(debug_str.contains("100"));
|
||||
assert!(debug_str.contains("200"));
|
||||
assert!(debug_str.contains("5"));
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn test_last_minute_latency_clone() {
|
||||
let mut latency = LastMinuteLatency {
|
||||
last_sec: 1000,
|
||||
..Default::default()
|
||||
};
|
||||
latency.totals[0].total = 100;
|
||||
latency.totals[0].n = 5;
|
||||
|
||||
let cloned = latency.clone();
|
||||
assert_eq!(latency.last_sec, cloned.last_sec);
|
||||
assert_eq!(latency.totals[0].total, cloned.totals[0].total);
|
||||
assert_eq!(latency.totals[0].n, cloned.totals[0].n);
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn test_edge_case_max_values() {
|
||||
let mut elem = AccElem {
|
||||
total: u64::MAX - 50,
|
||||
size: u64::MAX - 50,
|
||||
n: u64::MAX - 50,
|
||||
};
|
||||
|
||||
let other = AccElem {
|
||||
total: 100,
|
||||
size: 100,
|
||||
n: 100,
|
||||
};
|
||||
|
||||
// This should not panic due to overflow, values will wrap around
|
||||
elem.merge(&other);
|
||||
|
||||
// Values should wrap around due to overflow (wrapping_add behavior)
|
||||
assert_eq!(elem.total, 49); // (u64::MAX - 50) + 100 wraps to 49
|
||||
assert_eq!(elem.size, 49);
|
||||
assert_eq!(elem.n, 49);
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn test_forward_to_boundary_conditions() {
|
||||
let mut latency = LastMinuteLatency {
|
||||
last_sec: 59,
|
||||
..Default::default()
|
||||
};
|
||||
|
||||
// Add data at the last slot
|
||||
latency.totals[59].total = 100;
|
||||
latency.totals[59].n = 1;
|
||||
|
||||
// Forward exactly 60 seconds (boundary case)
|
||||
latency.forward_to(119);
|
||||
|
||||
// All data should be cleared
|
||||
for elem in &latency.totals {
|
||||
assert_eq!(elem.total, 0);
|
||||
assert_eq!(elem.n, 0);
|
||||
}
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn test_get_total_with_data() {
|
||||
let mut latency = LastMinuteLatency::default();
|
||||
|
||||
// Set a recent timestamp to avoid forward_to clearing data
|
||||
let current_time = SystemTime::now()
|
||||
.duration_since(UNIX_EPOCH)
|
||||
.expect("Time went backwards")
|
||||
.as_secs();
|
||||
latency.last_sec = current_time;
|
||||
|
||||
// Add data to multiple slots
|
||||
latency.totals[0] = AccElem {
|
||||
total: 10,
|
||||
size: 100,
|
||||
n: 1,
|
||||
};
|
||||
latency.totals[1] = AccElem {
|
||||
total: 20,
|
||||
size: 200,
|
||||
n: 2,
|
||||
};
|
||||
latency.totals[59] = AccElem {
|
||||
total: 30,
|
||||
size: 300,
|
||||
n: 3,
|
||||
};
|
||||
|
||||
let total = latency.get_total();
|
||||
|
||||
assert_eq!(total.total, 60);
|
||||
assert_eq!(total.size, 600);
|
||||
assert_eq!(total.n, 6);
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn test_window_index_calculation() {
|
||||
// Test that window index calculation works correctly
|
||||
let _latency = LastMinuteLatency::default();
|
||||
|
||||
let acc_elem = AccElem { total: 1, size: 1, n: 1 };
|
||||
|
||||
// Test various timestamps
|
||||
let test_cases = [(0, 0), (1, 1), (59, 59), (60, 0), (61, 1), (119, 59), (120, 0)];
|
||||
|
||||
for (timestamp, expected_idx) in test_cases {
|
||||
let mut test_latency = LastMinuteLatency::default();
|
||||
test_latency.add_all(timestamp, &acc_elem);
|
||||
|
||||
assert_eq!(
|
||||
test_latency.totals[expected_idx].n, 1,
|
||||
"Failed for timestamp {} (expected index {})",
|
||||
timestamp, expected_idx
|
||||
);
|
||||
}
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn test_concurrent_safety_simulation() {
|
||||
// Simulate concurrent access patterns
|
||||
let mut latency = LastMinuteLatency::default();
|
||||
|
||||
// Use current time to ensure data doesn't get cleared by get_total
|
||||
let current_time = SystemTime::now()
|
||||
.duration_since(UNIX_EPOCH)
|
||||
.expect("Time went backwards")
|
||||
.as_secs();
|
||||
|
||||
// Simulate rapid additions within a 60-second window
|
||||
for i in 0..1000 {
|
||||
let acc_elem = AccElem {
|
||||
total: (i % 10) + 1, // Ensure non-zero values
|
||||
size: (i % 100) + 1,
|
||||
n: 1,
|
||||
};
|
||||
// Keep all timestamps within the current minute window
|
||||
latency.add_all(current_time - (i % 60), &acc_elem);
|
||||
}
|
||||
|
||||
let total = latency.get_total();
|
||||
assert!(total.n > 0, "Total count should be greater than 0");
|
||||
assert!(total.total > 0, "Total time should be greater than 0");
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn test_acc_elem_debug_format() {
|
||||
let elem = AccElem {
|
||||
total: 123,
|
||||
size: 456,
|
||||
n: 789,
|
||||
};
|
||||
|
||||
let debug_str = format!("{:?}", elem);
|
||||
assert!(debug_str.contains("123"));
|
||||
assert!(debug_str.contains("456"));
|
||||
assert!(debug_str.contains("789"));
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn test_large_values() {
|
||||
let mut elem = AccElem::default();
|
||||
|
||||
// Test with large duration values
|
||||
let large_duration = Duration::from_secs(u64::MAX / 2);
|
||||
elem.add(&large_duration);
|
||||
|
||||
assert_eq!(elem.total, u64::MAX / 2);
|
||||
assert_eq!(elem.n, 1);
|
||||
|
||||
// Test average calculation with large values
|
||||
let avg = elem.avg();
|
||||
assert_eq!(avg, Duration::from_secs(u64::MAX / 2));
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn test_zero_duration_handling() {
|
||||
let mut elem = AccElem::default();
|
||||
|
||||
let zero_duration = Duration::from_secs(0);
|
||||
elem.add(&zero_duration);
|
||||
|
||||
assert_eq!(elem.total, 0);
|
||||
assert_eq!(elem.n, 1);
|
||||
assert_eq!(elem.avg(), Duration::from_secs(0));
|
||||
}
|
||||
}
|
||||
|
||||
const SIZE_LAST_ELEM_MARKER: usize = 10; // 这里假设你的 marker 是 10,请根据实际情况修改
|
||||
|
||||
#[allow(dead_code)]
|
||||
#[derive(Debug, Default)]
|
||||
pub struct LastMinuteHistogram {
|
||||
histogram: Vec<LastMinuteLatency>,
|
||||
size: u32,
|
||||
}
|
||||
|
||||
impl LastMinuteHistogram {
|
||||
pub fn merge(&mut self, other: &LastMinuteHistogram) {
|
||||
for i in 0..self.histogram.len() {
|
||||
self.histogram[i].merge(&other.histogram[i]);
|
||||
}
|
||||
}
|
||||
|
||||
pub fn add(&mut self, size: i64, t: std::time::Duration) {
|
||||
let index = size_to_tag(size);
|
||||
self.histogram[index].add(&t);
|
||||
}
|
||||
|
||||
pub fn get_avg_data(&mut self) -> [AccElem; SIZE_LAST_ELEM_MARKER] {
|
||||
let mut res = [AccElem::default(); SIZE_LAST_ELEM_MARKER];
|
||||
for (i, elem) in self.histogram.iter_mut().enumerate() {
|
||||
res[i] = elem.get_total();
|
||||
}
|
||||
res
|
||||
}
|
||||
}
|
||||
|
||||
fn size_to_tag(size: i64) -> usize {
|
||||
match size {
|
||||
_ if size < 1024 => 0, // sizeLessThan1KiB
|
||||
_ if size < 1024 * 1024 => 1, // sizeLessThan1MiB
|
||||
_ if size < 10 * 1024 * 1024 => 2, // sizeLessThan10MiB
|
||||
_ if size < 100 * 1024 * 1024 => 3, // sizeLessThan100MiB
|
||||
_ if size < 1024 * 1024 * 1024 => 4, // sizeLessThan1GiB
|
||||
_ => 5, // sizeGreaterThan1GiB
|
||||
}
|
||||
}
|
||||
27
common/common/src/lib.rs
Normal file
27
common/common/src/lib.rs
Normal file
@@ -0,0 +1,27 @@
|
||||
pub mod bucket_stats;
|
||||
pub mod error;
|
||||
pub mod globals;
|
||||
pub mod last_minute;
|
||||
|
||||
// is ','
|
||||
pub static DEFAULT_DELIMITER: u8 = 44;
|
||||
|
||||
/// Defers evaluation of a block of code until the end of the scope.
|
||||
#[macro_export]
|
||||
macro_rules! defer {
|
||||
($($body:tt)*) => {
|
||||
let _guard = {
|
||||
pub struct Guard<F: FnOnce()>(Option<F>);
|
||||
|
||||
impl<F: FnOnce()> Drop for Guard<F> {
|
||||
fn drop(&mut self) {
|
||||
(self.0).take().map(|f| f());
|
||||
}
|
||||
}
|
||||
|
||||
Guard(Some(|| {
|
||||
let _ = { $($body)* };
|
||||
}))
|
||||
};
|
||||
};
|
||||
}
|
||||
23
common/lock/Cargo.toml
Normal file
23
common/lock/Cargo.toml
Normal file
@@ -0,0 +1,23 @@
|
||||
[package]
|
||||
name = "lock"
|
||||
version.workspace = true
|
||||
edition.workspace = true
|
||||
|
||||
[lints]
|
||||
workspace = true
|
||||
|
||||
[dependencies]
|
||||
async-trait.workspace = true
|
||||
backon.workspace = true
|
||||
common.workspace = true
|
||||
lazy_static.workspace = true
|
||||
protos.workspace = true
|
||||
rand.workspace = true
|
||||
serde.workspace = true
|
||||
serde_json.workspace = true
|
||||
tokio.workspace = true
|
||||
tonic.workspace = true
|
||||
tracing.workspace = true
|
||||
tracing-error.workspace = true
|
||||
url.workspace = true
|
||||
uuid.workspace = true
|
||||
1196
common/lock/src/drwmutex.rs
Normal file
1196
common/lock/src/drwmutex.rs
Normal file
File diff suppressed because it is too large
Load Diff
117
common/lock/src/lib.rs
Normal file
117
common/lock/src/lib.rs
Normal file
@@ -0,0 +1,117 @@
|
||||
#![allow(dead_code)]
|
||||
|
||||
use std::sync::Arc;
|
||||
|
||||
use async_trait::async_trait;
|
||||
use common::error::Result;
|
||||
use lazy_static::lazy_static;
|
||||
use local_locker::LocalLocker;
|
||||
use lock_args::LockArgs;
|
||||
use remote_client::RemoteClient;
|
||||
use tokio::sync::RwLock;
|
||||
|
||||
pub mod drwmutex;
|
||||
pub mod local_locker;
|
||||
pub mod lock_args;
|
||||
pub mod lrwmutex;
|
||||
pub mod namespace_lock;
|
||||
pub mod remote_client;
|
||||
|
||||
lazy_static! {
|
||||
pub static ref GLOBAL_LOCAL_SERVER: Arc<RwLock<LocalLocker>> = Arc::new(RwLock::new(LocalLocker::new()));
|
||||
}
|
||||
|
||||
type LockClient = dyn Locker;
|
||||
|
||||
#[async_trait]
|
||||
pub trait Locker {
|
||||
async fn lock(&mut self, args: &LockArgs) -> Result<bool>;
|
||||
async fn unlock(&mut self, args: &LockArgs) -> Result<bool>;
|
||||
async fn rlock(&mut self, args: &LockArgs) -> Result<bool>;
|
||||
async fn runlock(&mut self, args: &LockArgs) -> Result<bool>;
|
||||
async fn refresh(&mut self, args: &LockArgs) -> Result<bool>;
|
||||
async fn force_unlock(&mut self, args: &LockArgs) -> Result<bool>;
|
||||
async fn close(&self);
|
||||
async fn is_online(&self) -> bool;
|
||||
async fn is_local(&self) -> bool;
|
||||
}
|
||||
|
||||
#[derive(Debug, Clone)]
|
||||
pub enum LockApi {
|
||||
Local,
|
||||
Remote(RemoteClient),
|
||||
}
|
||||
|
||||
#[async_trait]
|
||||
impl Locker for LockApi {
|
||||
async fn lock(&mut self, args: &LockArgs) -> Result<bool> {
|
||||
match self {
|
||||
LockApi::Local => GLOBAL_LOCAL_SERVER.write().await.lock(args).await,
|
||||
LockApi::Remote(r) => r.lock(args).await,
|
||||
}
|
||||
}
|
||||
|
||||
async fn unlock(&mut self, args: &LockArgs) -> Result<bool> {
|
||||
match self {
|
||||
LockApi::Local => GLOBAL_LOCAL_SERVER.write().await.unlock(args).await,
|
||||
LockApi::Remote(r) => r.unlock(args).await,
|
||||
}
|
||||
}
|
||||
|
||||
async fn rlock(&mut self, args: &LockArgs) -> Result<bool> {
|
||||
match self {
|
||||
LockApi::Local => GLOBAL_LOCAL_SERVER.write().await.rlock(args).await,
|
||||
LockApi::Remote(r) => r.rlock(args).await,
|
||||
}
|
||||
}
|
||||
|
||||
async fn runlock(&mut self, args: &LockArgs) -> Result<bool> {
|
||||
match self {
|
||||
LockApi::Local => GLOBAL_LOCAL_SERVER.write().await.runlock(args).await,
|
||||
LockApi::Remote(r) => r.runlock(args).await,
|
||||
}
|
||||
}
|
||||
|
||||
async fn refresh(&mut self, args: &LockArgs) -> Result<bool> {
|
||||
match self {
|
||||
LockApi::Local => GLOBAL_LOCAL_SERVER.write().await.refresh(args).await,
|
||||
LockApi::Remote(r) => r.refresh(args).await,
|
||||
}
|
||||
}
|
||||
|
||||
async fn force_unlock(&mut self, args: &LockArgs) -> Result<bool> {
|
||||
match self {
|
||||
LockApi::Local => GLOBAL_LOCAL_SERVER.write().await.force_unlock(args).await,
|
||||
LockApi::Remote(r) => r.force_unlock(args).await,
|
||||
}
|
||||
}
|
||||
|
||||
async fn close(&self) {
|
||||
match self {
|
||||
LockApi::Local => GLOBAL_LOCAL_SERVER.read().await.close().await,
|
||||
LockApi::Remote(r) => r.close().await,
|
||||
}
|
||||
}
|
||||
|
||||
async fn is_online(&self) -> bool {
|
||||
match self {
|
||||
LockApi::Local => GLOBAL_LOCAL_SERVER.read().await.is_online().await,
|
||||
LockApi::Remote(r) => r.is_online().await,
|
||||
}
|
||||
}
|
||||
|
||||
async fn is_local(&self) -> bool {
|
||||
match self {
|
||||
LockApi::Local => GLOBAL_LOCAL_SERVER.write().await.is_local().await,
|
||||
LockApi::Remote(r) => r.is_local().await,
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
pub fn new_lock_api(is_local: bool, url: Option<url::Url>) -> LockApi {
|
||||
if is_local {
|
||||
return LockApi::Local;
|
||||
}
|
||||
|
||||
LockApi::Remote(RemoteClient::new(url.unwrap()))
|
||||
}
|
||||
415
common/lock/src/local_locker.rs
Normal file
415
common/lock/src/local_locker.rs
Normal file
@@ -0,0 +1,415 @@
|
||||
use async_trait::async_trait;
|
||||
use common::error::{Error, Result};
|
||||
use std::{
|
||||
collections::HashMap,
|
||||
time::{Duration, Instant},
|
||||
};
|
||||
|
||||
use crate::{lock_args::LockArgs, Locker};
|
||||
|
||||
const MAX_DELETE_LIST: usize = 1000;
|
||||
|
||||
#[derive(Clone, Debug)]
|
||||
struct LockRequesterInfo {
|
||||
name: String,
|
||||
writer: bool,
|
||||
uid: String,
|
||||
time_stamp: Instant,
|
||||
time_last_refresh: Instant,
|
||||
source: String,
|
||||
group: bool,
|
||||
owner: String,
|
||||
quorum: usize,
|
||||
idx: usize,
|
||||
}
|
||||
|
||||
impl Default for LockRequesterInfo {
|
||||
fn default() -> Self {
|
||||
Self {
|
||||
name: Default::default(),
|
||||
writer: Default::default(),
|
||||
uid: Default::default(),
|
||||
time_stamp: Instant::now(),
|
||||
time_last_refresh: Instant::now(),
|
||||
source: Default::default(),
|
||||
group: Default::default(),
|
||||
owner: Default::default(),
|
||||
quorum: Default::default(),
|
||||
idx: Default::default(),
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
fn is_write_lock(lri: &[LockRequesterInfo]) -> bool {
|
||||
lri.len() == 1 && lri[0].writer
|
||||
}
|
||||
|
||||
#[derive(Debug, Default)]
|
||||
pub struct LockStats {
|
||||
total: usize,
|
||||
writes: usize,
|
||||
reads: usize,
|
||||
}
|
||||
|
||||
#[derive(Debug, Default)]
|
||||
pub struct LocalLocker {
|
||||
lock_map: HashMap<String, Vec<LockRequesterInfo>>,
|
||||
lock_uid: HashMap<String, String>,
|
||||
}
|
||||
|
||||
impl LocalLocker {
|
||||
pub fn new() -> Self {
|
||||
LocalLocker::default()
|
||||
}
|
||||
}
|
||||
|
||||
impl LocalLocker {
|
||||
fn can_take_lock(&self, resource: &[String]) -> bool {
|
||||
resource.iter().fold(true, |acc, x| !self.lock_map.contains_key(x) && acc)
|
||||
}
|
||||
|
||||
pub fn stats(&self) -> LockStats {
|
||||
let mut st = LockStats {
|
||||
total: self.lock_map.len(),
|
||||
..Default::default()
|
||||
};
|
||||
|
||||
self.lock_map.iter().for_each(|(_, value)| {
|
||||
if !value.is_empty() {
|
||||
if value[0].writer {
|
||||
st.writes += 1;
|
||||
} else {
|
||||
st.reads += 1;
|
||||
}
|
||||
}
|
||||
});
|
||||
|
||||
st
|
||||
}
|
||||
|
||||
fn dump_lock_map(&mut self) -> HashMap<String, Vec<LockRequesterInfo>> {
|
||||
let mut lock_copy = HashMap::new();
|
||||
self.lock_map.iter().for_each(|(key, value)| {
|
||||
lock_copy.insert(key.to_string(), value.to_vec());
|
||||
});
|
||||
|
||||
lock_copy
|
||||
}
|
||||
|
||||
fn expire_old_locks(&mut self, interval: Duration) {
|
||||
self.lock_map.iter_mut().for_each(|(_, lris)| {
|
||||
lris.retain(|lri| {
|
||||
if Instant::now().duration_since(lri.time_last_refresh) > interval {
|
||||
let mut key = lri.uid.to_string();
|
||||
format_uuid(&mut key, &lri.idx);
|
||||
self.lock_uid.remove(&key);
|
||||
return false;
|
||||
}
|
||||
|
||||
true
|
||||
});
|
||||
});
|
||||
}
|
||||
}
|
||||
|
||||
#[async_trait]
|
||||
impl Locker for LocalLocker {
|
||||
async fn lock(&mut self, args: &LockArgs) -> Result<bool> {
|
||||
if args.resources.len() > MAX_DELETE_LIST {
|
||||
return Err(Error::from_string(format!(
|
||||
"internal error: LocalLocker.lock called with more than {} resources",
|
||||
MAX_DELETE_LIST
|
||||
)));
|
||||
}
|
||||
|
||||
if !self.can_take_lock(&args.resources) {
|
||||
return Ok(false);
|
||||
}
|
||||
|
||||
args.resources.iter().enumerate().for_each(|(idx, resource)| {
|
||||
self.lock_map.insert(
|
||||
resource.to_string(),
|
||||
vec![LockRequesterInfo {
|
||||
name: resource.to_string(),
|
||||
writer: true,
|
||||
source: args.source.to_string(),
|
||||
owner: args.owner.to_string(),
|
||||
uid: args.uid.to_string(),
|
||||
group: args.resources.len() > 1,
|
||||
quorum: args.quorum,
|
||||
idx,
|
||||
..Default::default()
|
||||
}],
|
||||
);
|
||||
|
||||
let mut uuid = args.uid.to_string();
|
||||
format_uuid(&mut uuid, &idx);
|
||||
self.lock_uid.insert(uuid, resource.to_string());
|
||||
});
|
||||
|
||||
Ok(true)
|
||||
}
|
||||
|
||||
async fn unlock(&mut self, args: &LockArgs) -> Result<bool> {
|
||||
if args.resources.len() > MAX_DELETE_LIST {
|
||||
return Err(Error::from_string(format!(
|
||||
"internal error: LocalLocker.unlock called with more than {} resources",
|
||||
MAX_DELETE_LIST
|
||||
)));
|
||||
}
|
||||
|
||||
let mut reply = false;
|
||||
let mut err_info = String::new();
|
||||
for resource in args.resources.iter() {
|
||||
match self.lock_map.get_mut(resource) {
|
||||
Some(lris) => {
|
||||
if !is_write_lock(lris) {
|
||||
if err_info.is_empty() {
|
||||
err_info = format!("unlock attempted on a read locked entity: {}", resource);
|
||||
} else {
|
||||
err_info.push_str(&format!(", {}", resource));
|
||||
}
|
||||
} else {
|
||||
lris.retain(|lri| {
|
||||
if lri.uid == args.uid && (args.owner.is_empty() || lri.owner == args.owner) {
|
||||
let mut key = args.uid.to_string();
|
||||
format_uuid(&mut key, &lri.idx);
|
||||
self.lock_uid.remove(&key).unwrap();
|
||||
reply |= true;
|
||||
return false;
|
||||
}
|
||||
|
||||
true
|
||||
});
|
||||
}
|
||||
if lris.is_empty() {
|
||||
self.lock_map.remove(resource);
|
||||
}
|
||||
}
|
||||
None => {
|
||||
continue;
|
||||
}
|
||||
};
|
||||
}
|
||||
|
||||
Ok(reply)
|
||||
}
|
||||
|
||||
async fn rlock(&mut self, args: &LockArgs) -> Result<bool> {
|
||||
if args.resources.len() != 1 {
|
||||
return Err(Error::from_string("internal error: localLocker.RLock called with more than one resource"));
|
||||
}
|
||||
|
||||
let resource = &args.resources[0];
|
||||
match self.lock_map.get_mut(resource) {
|
||||
Some(lri) => {
|
||||
if !is_write_lock(lri) {
|
||||
lri.push(LockRequesterInfo {
|
||||
name: resource.to_string(),
|
||||
writer: false,
|
||||
source: args.source.to_string(),
|
||||
owner: args.owner.to_string(),
|
||||
uid: args.uid.to_string(),
|
||||
quorum: args.quorum,
|
||||
..Default::default()
|
||||
});
|
||||
} else {
|
||||
return Ok(false);
|
||||
}
|
||||
}
|
||||
None => {
|
||||
self.lock_map.insert(
|
||||
resource.to_string(),
|
||||
vec![LockRequesterInfo {
|
||||
name: resource.to_string(),
|
||||
writer: false,
|
||||
source: args.source.to_string(),
|
||||
owner: args.owner.to_string(),
|
||||
uid: args.uid.to_string(),
|
||||
quorum: args.quorum,
|
||||
..Default::default()
|
||||
}],
|
||||
);
|
||||
}
|
||||
}
|
||||
let mut uuid = args.uid.to_string();
|
||||
format_uuid(&mut uuid, &0);
|
||||
self.lock_uid.insert(uuid, resource.to_string());
|
||||
|
||||
Ok(true)
|
||||
}
|
||||
|
||||
async fn runlock(&mut self, args: &LockArgs) -> Result<bool> {
|
||||
if args.resources.len() != 1 {
|
||||
return Err(Error::from_string("internal error: localLocker.RLock called with more than one resource"));
|
||||
}
|
||||
|
||||
let mut reply = false;
|
||||
let resource = &args.resources[0];
|
||||
match self.lock_map.get_mut(resource) {
|
||||
Some(lris) => {
|
||||
if is_write_lock(lris) {
|
||||
return Err(Error::from_string(format!("runlock attempted on a write locked entity: {}", resource)));
|
||||
} else {
|
||||
lris.retain(|lri| {
|
||||
if lri.uid == args.uid && (args.owner.is_empty() || lri.owner == args.owner) {
|
||||
let mut key = args.uid.to_string();
|
||||
format_uuid(&mut key, &lri.idx);
|
||||
self.lock_uid.remove(&key).unwrap();
|
||||
reply |= true;
|
||||
return false;
|
||||
}
|
||||
|
||||
true
|
||||
});
|
||||
}
|
||||
if lris.is_empty() {
|
||||
self.lock_map.remove(resource);
|
||||
}
|
||||
}
|
||||
None => {
|
||||
return Ok(reply);
|
||||
}
|
||||
};
|
||||
|
||||
Ok(reply)
|
||||
}
|
||||
|
||||
async fn refresh(&mut self, args: &LockArgs) -> Result<bool> {
|
||||
let mut idx = 0;
|
||||
let mut key = args.uid.to_string();
|
||||
format_uuid(&mut key, &idx);
|
||||
match self.lock_uid.get(&key) {
|
||||
Some(resource) => {
|
||||
let mut resource = resource;
|
||||
loop {
|
||||
match self.lock_map.get_mut(resource) {
|
||||
Some(_lris) => {}
|
||||
None => {
|
||||
let mut key = args.uid.to_string();
|
||||
format_uuid(&mut key, &0);
|
||||
self.lock_uid.remove(&key);
|
||||
return Ok(idx > 0);
|
||||
}
|
||||
}
|
||||
|
||||
idx += 1;
|
||||
let mut key = args.uid.to_string();
|
||||
format_uuid(&mut key, &idx);
|
||||
resource = match self.lock_uid.get(&key) {
|
||||
Some(resource) => resource,
|
||||
None => return Ok(true),
|
||||
};
|
||||
}
|
||||
}
|
||||
None => Ok(false),
|
||||
}
|
||||
}
|
||||
|
||||
// TODO: need add timeout mechanism
|
||||
async fn force_unlock(&mut self, args: &LockArgs) -> Result<bool> {
|
||||
if args.uid.is_empty() {
|
||||
args.resources.iter().for_each(|resource| {
|
||||
if let Some(lris) = self.lock_map.get(resource) {
|
||||
lris.iter().for_each(|lri| {
|
||||
let mut key = lri.uid.to_string();
|
||||
format_uuid(&mut key, &lri.idx);
|
||||
self.lock_uid.remove(&key);
|
||||
});
|
||||
if lris.is_empty() {
|
||||
self.lock_map.remove(resource);
|
||||
}
|
||||
}
|
||||
});
|
||||
|
||||
return Ok(true);
|
||||
}
|
||||
let mut idx = 0;
|
||||
let mut need_remove_resource = Vec::new();
|
||||
let mut need_remove_map_id = Vec::new();
|
||||
let reply = loop {
|
||||
let mut map_id = args.uid.to_string();
|
||||
format_uuid(&mut map_id, &idx);
|
||||
match self.lock_uid.get(&map_id) {
|
||||
Some(resource) => match self.lock_map.get_mut(resource) {
|
||||
Some(lris) => {
|
||||
{
|
||||
lris.retain(|lri| {
|
||||
if lri.uid == args.uid && (args.owner.is_empty() || lri.owner == args.owner) {
|
||||
let mut key = args.uid.to_string();
|
||||
format_uuid(&mut key, &lri.idx);
|
||||
need_remove_map_id.push(key);
|
||||
return false;
|
||||
}
|
||||
|
||||
true
|
||||
});
|
||||
}
|
||||
idx += 1;
|
||||
if lris.is_empty() {
|
||||
need_remove_resource.push(resource.to_string());
|
||||
}
|
||||
}
|
||||
None => {
|
||||
need_remove_map_id.push(map_id);
|
||||
idx += 1;
|
||||
continue;
|
||||
}
|
||||
},
|
||||
None => {
|
||||
break idx > 0;
|
||||
}
|
||||
}
|
||||
};
|
||||
need_remove_resource.into_iter().for_each(|resource| {
|
||||
self.lock_map.remove(&resource);
|
||||
});
|
||||
need_remove_map_id.into_iter().for_each(|map_id| {
|
||||
self.lock_uid.remove(&map_id);
|
||||
});
|
||||
|
||||
Ok(reply)
|
||||
}
|
||||
|
||||
async fn close(&self) {}
|
||||
|
||||
async fn is_online(&self) -> bool {
|
||||
true
|
||||
}
|
||||
|
||||
async fn is_local(&self) -> bool {
|
||||
true
|
||||
}
|
||||
}
|
||||
|
||||
fn format_uuid(s: &mut String, idx: &usize) {
|
||||
s.push_str(&idx.to_string());
|
||||
}
|
||||
|
||||
#[cfg(test)]
|
||||
mod test {
|
||||
use super::LocalLocker;
|
||||
use crate::{lock_args::LockArgs, Locker};
|
||||
use common::error::Result;
|
||||
use tokio;
|
||||
|
||||
#[tokio::test]
|
||||
async fn test_lock_unlock() -> Result<()> {
|
||||
let mut local_locker = LocalLocker::new();
|
||||
let args = LockArgs {
|
||||
uid: "1111".to_string(),
|
||||
resources: vec!["dandan".to_string()],
|
||||
owner: "dd".to_string(),
|
||||
source: "".to_string(),
|
||||
quorum: 3,
|
||||
};
|
||||
local_locker.lock(&args).await?;
|
||||
|
||||
println!("lock local_locker: {:?} \n", local_locker);
|
||||
|
||||
local_locker.unlock(&args).await?;
|
||||
println!("unlock local_locker: {:?}", local_locker);
|
||||
|
||||
Ok(())
|
||||
}
|
||||
}
|
||||
22
common/lock/src/lock_args.rs
Normal file
22
common/lock/src/lock_args.rs
Normal file
@@ -0,0 +1,22 @@
|
||||
use std::fmt::Display;
|
||||
|
||||
use serde::{Deserialize, Serialize};
|
||||
|
||||
#[derive(Clone, Debug, Default, Serialize, Deserialize)]
|
||||
pub struct LockArgs {
|
||||
pub uid: String,
|
||||
pub resources: Vec<String>,
|
||||
pub owner: String,
|
||||
pub source: String,
|
||||
pub quorum: usize,
|
||||
}
|
||||
|
||||
impl Display for LockArgs {
|
||||
fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result {
|
||||
write!(
|
||||
f,
|
||||
"LockArgs[ uid: {}, resources: {:?}, owner: {}, source:{}, quorum: {} ]",
|
||||
self.uid, self.resources, self.owner, self.source, self.quorum
|
||||
)
|
||||
}
|
||||
}
|
||||
178
common/lock/src/lrwmutex.rs
Normal file
178
common/lock/src/lrwmutex.rs
Normal file
@@ -0,0 +1,178 @@
|
||||
use std::time::{Duration, Instant};
|
||||
|
||||
use rand::Rng;
|
||||
use tokio::{sync::RwLock, time::sleep};
|
||||
use tracing::info;
|
||||
|
||||
#[derive(Debug, Default)]
|
||||
pub struct LRWMutex {
|
||||
id: RwLock<String>,
|
||||
source: RwLock<String>,
|
||||
is_write: RwLock<bool>,
|
||||
refrence: RwLock<usize>,
|
||||
}
|
||||
|
||||
impl LRWMutex {
|
||||
pub async fn lock(&self) -> bool {
|
||||
let is_write = true;
|
||||
let id = self.id.read().await.clone();
|
||||
let source = self.source.read().await.clone();
|
||||
let timeout = Duration::from_secs(10000);
|
||||
self.look_loop(
|
||||
&id, &source, &timeout, // big enough
|
||||
is_write,
|
||||
)
|
||||
.await
|
||||
}
|
||||
|
||||
pub async fn get_lock(&self, id: &str, source: &str, timeout: &Duration) -> bool {
|
||||
let is_write = true;
|
||||
self.look_loop(id, source, timeout, is_write).await
|
||||
}
|
||||
|
||||
pub async fn r_lock(&self) -> bool {
|
||||
let is_write: bool = false;
|
||||
let id = self.id.read().await.clone();
|
||||
let source = self.source.read().await.clone();
|
||||
let timeout = Duration::from_secs(10000);
|
||||
self.look_loop(
|
||||
&id, &source, &timeout, // big enough
|
||||
is_write,
|
||||
)
|
||||
.await
|
||||
}
|
||||
|
||||
pub async fn get_r_lock(&self, id: &str, source: &str, timeout: &Duration) -> bool {
|
||||
let is_write = false;
|
||||
self.look_loop(id, source, timeout, is_write).await
|
||||
}
|
||||
|
||||
async fn inner_lock(&self, id: &str, source: &str, is_write: bool) -> bool {
|
||||
*self.id.write().await = id.to_string();
|
||||
*self.source.write().await = source.to_string();
|
||||
|
||||
let mut locked = false;
|
||||
if is_write {
|
||||
if *self.refrence.read().await == 0 && !*self.is_write.read().await {
|
||||
*self.refrence.write().await = 1;
|
||||
*self.is_write.write().await = true;
|
||||
locked = true;
|
||||
}
|
||||
} else if !*self.is_write.read().await {
|
||||
*self.refrence.write().await += 1;
|
||||
locked = true;
|
||||
}
|
||||
|
||||
locked
|
||||
}
|
||||
|
||||
async fn look_loop(&self, id: &str, source: &str, timeout: &Duration, is_write: bool) -> bool {
|
||||
let start = Instant::now();
|
||||
loop {
|
||||
if self.inner_lock(id, source, is_write).await {
|
||||
return true;
|
||||
} else {
|
||||
if Instant::now().duration_since(start) > *timeout {
|
||||
return false;
|
||||
}
|
||||
let sleep_time: u64;
|
||||
{
|
||||
let mut rng = rand::thread_rng();
|
||||
sleep_time = rng.gen_range(10..=50);
|
||||
}
|
||||
sleep(Duration::from_millis(sleep_time)).await;
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
pub async fn un_lock(&self) {
|
||||
let is_write = true;
|
||||
if !self.unlock(is_write).await {
|
||||
info!("Trying to un_lock() while no Lock() is active")
|
||||
}
|
||||
}
|
||||
|
||||
pub async fn un_r_lock(&self) {
|
||||
let is_write = false;
|
||||
if !self.unlock(is_write).await {
|
||||
info!("Trying to un_r_lock() while no Lock() is active")
|
||||
}
|
||||
}
|
||||
|
||||
async fn unlock(&self, is_write: bool) -> bool {
|
||||
let mut unlocked = false;
|
||||
if is_write {
|
||||
if *self.is_write.read().await && *self.refrence.read().await == 1 {
|
||||
*self.refrence.write().await = 0;
|
||||
*self.is_write.write().await = false;
|
||||
unlocked = true;
|
||||
}
|
||||
} else if !*self.is_write.read().await && *self.refrence.read().await > 0 {
|
||||
*self.refrence.write().await -= 1;
|
||||
unlocked = true;
|
||||
}
|
||||
|
||||
unlocked
|
||||
}
|
||||
|
||||
pub async fn force_un_lock(&self) {
|
||||
*self.refrence.write().await = 0;
|
||||
*self.is_write.write().await = false;
|
||||
}
|
||||
}
|
||||
|
||||
#[cfg(test)]
|
||||
mod test {
|
||||
use std::{sync::Arc, time::Duration};
|
||||
|
||||
use common::error::Result;
|
||||
use tokio::time::sleep;
|
||||
|
||||
use crate::lrwmutex::LRWMutex;
|
||||
|
||||
#[tokio::test]
|
||||
async fn test_lock_unlock() -> Result<()> {
|
||||
let l_rw_lock = LRWMutex::default();
|
||||
let id = "foo";
|
||||
let source = "dandan";
|
||||
let timeout = Duration::from_secs(5);
|
||||
assert!(l_rw_lock.get_lock(id, source, &timeout).await);
|
||||
l_rw_lock.un_lock().await;
|
||||
|
||||
l_rw_lock.lock().await;
|
||||
|
||||
assert!(!l_rw_lock.get_r_lock(id, source, &timeout).await);
|
||||
l_rw_lock.un_lock().await;
|
||||
assert!(l_rw_lock.get_r_lock(id, source, &timeout).await);
|
||||
|
||||
Ok(())
|
||||
}
|
||||
|
||||
#[tokio::test]
|
||||
async fn multi_thread_test() -> Result<()> {
|
||||
let l_rw_lock = Arc::new(LRWMutex::default());
|
||||
let id = "foo";
|
||||
let source = "dandan";
|
||||
|
||||
let one_fn = async {
|
||||
let one = Arc::clone(&l_rw_lock);
|
||||
let timeout = Duration::from_secs(1);
|
||||
assert!(one.get_lock(id, source, &timeout).await);
|
||||
sleep(Duration::from_secs(5)).await;
|
||||
l_rw_lock.un_lock().await;
|
||||
};
|
||||
|
||||
let two_fn = async {
|
||||
let two = Arc::clone(&l_rw_lock);
|
||||
let timeout = Duration::from_secs(2);
|
||||
assert!(!two.get_r_lock(id, source, &timeout).await);
|
||||
sleep(Duration::from_secs(5)).await;
|
||||
assert!(two.get_r_lock(id, source, &timeout).await);
|
||||
two.un_r_lock().await;
|
||||
};
|
||||
|
||||
tokio::join!(one_fn, two_fn);
|
||||
|
||||
Ok(())
|
||||
}
|
||||
}
|
||||
293
common/lock/src/namespace_lock.rs
Normal file
293
common/lock/src/namespace_lock.rs
Normal file
@@ -0,0 +1,293 @@
|
||||
use std::{collections::HashMap, path::Path, sync::Arc, time::Duration};
|
||||
|
||||
use async_trait::async_trait;
|
||||
use tokio::sync::RwLock;
|
||||
use uuid::Uuid;
|
||||
|
||||
use crate::{
|
||||
drwmutex::{DRWMutex, Options},
|
||||
lrwmutex::LRWMutex,
|
||||
LockApi,
|
||||
};
|
||||
use common::error::Result;
|
||||
|
||||
pub type RWLockerImpl = Box<dyn RWLocker + Send + Sync>;
|
||||
|
||||
#[async_trait]
|
||||
pub trait RWLocker {
|
||||
async fn get_lock(&mut self, opts: &Options) -> Result<bool>;
|
||||
async fn un_lock(&mut self) -> Result<()>;
|
||||
async fn get_u_lock(&mut self, opts: &Options) -> Result<bool>;
|
||||
async fn un_r_lock(&mut self) -> Result<()>;
|
||||
}
|
||||
|
||||
#[derive(Debug)]
|
||||
struct NsLock {
|
||||
reference: usize,
|
||||
lock: LRWMutex,
|
||||
}
|
||||
|
||||
#[derive(Debug, Default)]
|
||||
pub struct NsLockMap {
|
||||
is_dist_erasure: bool,
|
||||
lock_map: RwLock<HashMap<String, NsLock>>,
|
||||
}
|
||||
|
||||
impl NsLockMap {
|
||||
pub fn new(is_dist_erasure: bool) -> Self {
|
||||
Self {
|
||||
is_dist_erasure,
|
||||
..Default::default()
|
||||
}
|
||||
}
|
||||
|
||||
async fn lock(
|
||||
&mut self,
|
||||
volume: &String,
|
||||
path: &String,
|
||||
lock_source: &str,
|
||||
ops_id: &str,
|
||||
read_lock: bool,
|
||||
timeout: Duration,
|
||||
) -> bool {
|
||||
let resource = Path::new(volume).join(path).to_str().unwrap().to_string();
|
||||
let mut w_lock_map = self.lock_map.write().await;
|
||||
let nslk = w_lock_map.entry(resource.clone()).or_insert(NsLock {
|
||||
reference: 0,
|
||||
lock: LRWMutex::default(),
|
||||
});
|
||||
nslk.reference += 1;
|
||||
|
||||
let locked = if read_lock {
|
||||
nslk.lock.get_r_lock(ops_id, lock_source, &timeout).await
|
||||
} else {
|
||||
nslk.lock.get_lock(ops_id, lock_source, &timeout).await
|
||||
};
|
||||
|
||||
if !locked {
|
||||
nslk.reference -= 1;
|
||||
if nslk.reference == 0 {
|
||||
w_lock_map.remove(&resource);
|
||||
}
|
||||
}
|
||||
|
||||
locked
|
||||
}
|
||||
|
||||
async fn un_lock(&mut self, volume: &String, path: &String, read_lock: bool) {
|
||||
let resource = Path::new(volume).join(path).to_str().unwrap().to_string();
|
||||
let mut w_lock_map = self.lock_map.write().await;
|
||||
if let Some(nslk) = w_lock_map.get_mut(&resource) {
|
||||
if read_lock {
|
||||
nslk.lock.un_r_lock().await;
|
||||
} else {
|
||||
nslk.lock.un_lock().await;
|
||||
}
|
||||
|
||||
nslk.reference -= 1;
|
||||
|
||||
if nslk.reference == 0 {
|
||||
w_lock_map.remove(&resource);
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
pub struct WrapperLocker(pub Arc<RwLock<RWLockerImpl>>);
|
||||
|
||||
impl Drop for WrapperLocker {
|
||||
fn drop(&mut self) {
|
||||
let inner = self.0.clone();
|
||||
tokio::spawn(async move {
|
||||
let _ = inner.write().await.un_lock().await;
|
||||
});
|
||||
}
|
||||
}
|
||||
|
||||
pub async fn new_nslock(
|
||||
ns: Arc<RwLock<NsLockMap>>,
|
||||
owner: String,
|
||||
volume: String,
|
||||
paths: Vec<String>,
|
||||
lockers: Vec<LockApi>,
|
||||
) -> WrapperLocker {
|
||||
if ns.read().await.is_dist_erasure {
|
||||
let names = paths
|
||||
.iter()
|
||||
.map(|path| Path::new(&volume).join(path).to_str().unwrap().to_string())
|
||||
.collect();
|
||||
return WrapperLocker(Arc::new(RwLock::new(Box::new(DistLockInstance::new(owner, names, lockers)))));
|
||||
}
|
||||
|
||||
WrapperLocker(Arc::new(RwLock::new(Box::new(LocalLockInstance::new(ns, volume, paths)))))
|
||||
}
|
||||
|
||||
struct DistLockInstance {
|
||||
lock: Box<DRWMutex>,
|
||||
ops_id: String,
|
||||
}
|
||||
|
||||
impl DistLockInstance {
|
||||
fn new(owner: String, names: Vec<String>, lockers: Vec<LockApi>) -> Self {
|
||||
let ops_id = Uuid::new_v4().to_string();
|
||||
Self {
|
||||
lock: Box::new(DRWMutex::new(owner, names, lockers)),
|
||||
ops_id,
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
#[async_trait]
|
||||
impl RWLocker for DistLockInstance {
|
||||
async fn get_lock(&mut self, opts: &Options) -> Result<bool> {
|
||||
let source = "".to_string();
|
||||
|
||||
Ok(self.lock.get_lock(&self.ops_id, &source, opts).await)
|
||||
}
|
||||
|
||||
async fn un_lock(&mut self) -> Result<()> {
|
||||
self.lock.un_lock().await;
|
||||
Ok(())
|
||||
}
|
||||
|
||||
async fn get_u_lock(&mut self, opts: &Options) -> Result<bool> {
|
||||
let source = "".to_string();
|
||||
|
||||
Ok(self.lock.get_r_lock(&self.ops_id, &source, opts).await)
|
||||
}
|
||||
|
||||
async fn un_r_lock(&mut self) -> Result<()> {
|
||||
self.lock.un_r_lock().await;
|
||||
Ok(())
|
||||
}
|
||||
}
|
||||
|
||||
struct LocalLockInstance {
|
||||
ns: Arc<RwLock<NsLockMap>>,
|
||||
volume: String,
|
||||
paths: Vec<String>,
|
||||
ops_id: String,
|
||||
}
|
||||
|
||||
impl LocalLockInstance {
|
||||
fn new(ns: Arc<RwLock<NsLockMap>>, volume: String, paths: Vec<String>) -> Self {
|
||||
let ops_id = Uuid::new_v4().to_string();
|
||||
Self {
|
||||
ns,
|
||||
volume,
|
||||
paths,
|
||||
ops_id,
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
#[async_trait]
|
||||
impl RWLocker for LocalLockInstance {
|
||||
async fn get_lock(&mut self, opts: &Options) -> Result<bool> {
|
||||
let source = "".to_string();
|
||||
let read_lock = false;
|
||||
let mut success = vec![false; self.paths.len()];
|
||||
for (idx, path) in self.paths.iter().enumerate() {
|
||||
if !self
|
||||
.ns
|
||||
.write()
|
||||
.await
|
||||
.lock(&self.volume, path, &source, &self.ops_id, read_lock, opts.timeout)
|
||||
.await
|
||||
{
|
||||
for (i, x) in success.iter().enumerate() {
|
||||
if *x {
|
||||
self.ns.write().await.un_lock(&self.volume, &self.paths[i], read_lock).await;
|
||||
}
|
||||
}
|
||||
|
||||
return Ok(false);
|
||||
}
|
||||
|
||||
success[idx] = true;
|
||||
}
|
||||
Ok(true)
|
||||
}
|
||||
|
||||
async fn un_lock(&mut self) -> Result<()> {
|
||||
let read_lock = false;
|
||||
for path in self.paths.iter() {
|
||||
self.ns.write().await.un_lock(&self.volume, path, read_lock).await;
|
||||
}
|
||||
|
||||
Ok(())
|
||||
}
|
||||
|
||||
async fn get_u_lock(&mut self, opts: &Options) -> Result<bool> {
|
||||
let source = "".to_string();
|
||||
let read_lock = true;
|
||||
let mut success = Vec::with_capacity(self.paths.len());
|
||||
for (idx, path) in self.paths.iter().enumerate() {
|
||||
if !self
|
||||
.ns
|
||||
.write()
|
||||
.await
|
||||
.lock(&self.volume, path, &source, &self.ops_id, read_lock, opts.timeout)
|
||||
.await
|
||||
{
|
||||
for (i, x) in success.iter().enumerate() {
|
||||
if *x {
|
||||
self.ns.write().await.un_lock(&self.volume, &self.paths[i], read_lock).await;
|
||||
}
|
||||
}
|
||||
|
||||
return Ok(false);
|
||||
}
|
||||
|
||||
success[idx] = true;
|
||||
}
|
||||
Ok(true)
|
||||
}
|
||||
|
||||
async fn un_r_lock(&mut self) -> Result<()> {
|
||||
let read_lock = true;
|
||||
for path in self.paths.iter() {
|
||||
self.ns.write().await.un_lock(&self.volume, path, read_lock).await;
|
||||
}
|
||||
|
||||
Ok(())
|
||||
}
|
||||
}
|
||||
|
||||
#[cfg(test)]
|
||||
mod test {
|
||||
use std::{sync::Arc, time::Duration};
|
||||
|
||||
use common::error::Result;
|
||||
use tokio::sync::RwLock;
|
||||
|
||||
use crate::{
|
||||
drwmutex::Options,
|
||||
namespace_lock::{new_nslock, NsLockMap},
|
||||
};
|
||||
|
||||
#[tokio::test]
|
||||
async fn test_local_instance() -> Result<()> {
|
||||
let ns_lock_map = Arc::new(RwLock::new(NsLockMap::default()));
|
||||
let ns = new_nslock(
|
||||
Arc::clone(&ns_lock_map),
|
||||
"local".to_string(),
|
||||
"test".to_string(),
|
||||
vec!["foo".to_string()],
|
||||
Vec::new(),
|
||||
)
|
||||
.await;
|
||||
|
||||
let result =
|
||||
ns.0.write()
|
||||
.await
|
||||
.get_lock(&Options {
|
||||
timeout: Duration::from_secs(5),
|
||||
retry_interval: Duration::from_secs(1),
|
||||
})
|
||||
.await?;
|
||||
|
||||
assert!(result);
|
||||
Ok(())
|
||||
}
|
||||
}
|
||||
134
common/lock/src/remote_client.rs
Normal file
134
common/lock/src/remote_client.rs
Normal file
@@ -0,0 +1,134 @@
|
||||
use async_trait::async_trait;
|
||||
use common::error::{Error, Result};
|
||||
use protos::{node_service_time_out_client, proto_gen::node_service::GenerallyLockRequest};
|
||||
use tonic::Request;
|
||||
use tracing::info;
|
||||
|
||||
use crate::{lock_args::LockArgs, Locker};
|
||||
|
||||
#[derive(Debug, Clone)]
|
||||
pub struct RemoteClient {
|
||||
addr: String,
|
||||
}
|
||||
|
||||
impl RemoteClient {
|
||||
pub fn new(url: url::Url) -> Self {
|
||||
let addr = format!("{}://{}:{}", url.scheme(), url.host_str().unwrap(), url.port().unwrap());
|
||||
Self { addr }
|
||||
}
|
||||
}
|
||||
|
||||
#[async_trait]
|
||||
impl Locker for RemoteClient {
|
||||
async fn lock(&mut self, args: &LockArgs) -> Result<bool> {
|
||||
info!("remote lock");
|
||||
let args = serde_json::to_string(args)?;
|
||||
let mut client = node_service_time_out_client(&self.addr)
|
||||
.await
|
||||
.map_err(|err| Error::from_string(format!("can not get client, err: {}", err)))?;
|
||||
let request = Request::new(GenerallyLockRequest { args });
|
||||
|
||||
let response = client.lock(request).await?.into_inner();
|
||||
|
||||
if let Some(error_info) = response.error_info {
|
||||
return Err(Error::from_string(error_info));
|
||||
}
|
||||
|
||||
Ok(response.success)
|
||||
}
|
||||
|
||||
async fn unlock(&mut self, args: &LockArgs) -> Result<bool> {
|
||||
info!("remote unlock");
|
||||
let args = serde_json::to_string(args)?;
|
||||
let mut client = node_service_time_out_client(&self.addr)
|
||||
.await
|
||||
.map_err(|err| Error::from_string(format!("can not get client, err: {}", err)))?;
|
||||
let request = Request::new(GenerallyLockRequest { args });
|
||||
|
||||
let response = client.un_lock(request).await?.into_inner();
|
||||
|
||||
if let Some(error_info) = response.error_info {
|
||||
return Err(Error::from_string(error_info));
|
||||
}
|
||||
|
||||
Ok(response.success)
|
||||
}
|
||||
|
||||
async fn rlock(&mut self, args: &LockArgs) -> Result<bool> {
|
||||
info!("remote rlock");
|
||||
let args = serde_json::to_string(args)?;
|
||||
let mut client = node_service_time_out_client(&self.addr)
|
||||
.await
|
||||
.map_err(|err| Error::from_string(format!("can not get client, err: {}", err)))?;
|
||||
let request = Request::new(GenerallyLockRequest { args });
|
||||
|
||||
let response = client.r_lock(request).await?.into_inner();
|
||||
|
||||
if let Some(error_info) = response.error_info {
|
||||
return Err(Error::from_string(error_info));
|
||||
}
|
||||
|
||||
Ok(response.success)
|
||||
}
|
||||
|
||||
async fn runlock(&mut self, args: &LockArgs) -> Result<bool> {
|
||||
info!("remote runlock");
|
||||
let args = serde_json::to_string(args)?;
|
||||
let mut client = node_service_time_out_client(&self.addr)
|
||||
.await
|
||||
.map_err(|err| Error::from_string(format!("can not get client, err: {}", err)))?;
|
||||
let request = Request::new(GenerallyLockRequest { args });
|
||||
|
||||
let response = client.r_un_lock(request).await?.into_inner();
|
||||
|
||||
if let Some(error_info) = response.error_info {
|
||||
return Err(Error::from_string(error_info));
|
||||
}
|
||||
|
||||
Ok(response.success)
|
||||
}
|
||||
|
||||
async fn refresh(&mut self, args: &LockArgs) -> Result<bool> {
|
||||
info!("remote refresh");
|
||||
let args = serde_json::to_string(args)?;
|
||||
let mut client = node_service_time_out_client(&self.addr)
|
||||
.await
|
||||
.map_err(|err| Error::from_string(format!("can not get client, err: {}", err)))?;
|
||||
let request = Request::new(GenerallyLockRequest { args });
|
||||
|
||||
let response = client.refresh(request).await?.into_inner();
|
||||
|
||||
if let Some(error_info) = response.error_info {
|
||||
return Err(Error::from_string(error_info));
|
||||
}
|
||||
|
||||
Ok(response.success)
|
||||
}
|
||||
|
||||
async fn force_unlock(&mut self, args: &LockArgs) -> Result<bool> {
|
||||
info!("remote force_unlock");
|
||||
let args = serde_json::to_string(args)?;
|
||||
let mut client = node_service_time_out_client(&self.addr)
|
||||
.await
|
||||
.map_err(|err| Error::from_string(format!("can not get client, err: {}", err)))?;
|
||||
let request = Request::new(GenerallyLockRequest { args });
|
||||
|
||||
let response = client.force_un_lock(request).await?.into_inner();
|
||||
|
||||
if let Some(error_info) = response.error_info {
|
||||
return Err(Error::from_string(error_info));
|
||||
}
|
||||
|
||||
Ok(response.success)
|
||||
}
|
||||
|
||||
async fn close(&self) {}
|
||||
|
||||
async fn is_online(&self) -> bool {
|
||||
true
|
||||
}
|
||||
|
||||
async fn is_local(&self) -> bool {
|
||||
false
|
||||
}
|
||||
}
|
||||
23
common/protos/Cargo.toml
Normal file
23
common/protos/Cargo.toml
Normal file
@@ -0,0 +1,23 @@
|
||||
[package]
|
||||
name = "protos"
|
||||
version.workspace = true
|
||||
edition.workspace = true
|
||||
|
||||
[lints]
|
||||
workspace = true
|
||||
|
||||
[[bin]]
|
||||
name = "gproto"
|
||||
path = "src/main.rs"
|
||||
|
||||
[dependencies]
|
||||
#async-backtrace = { workspace = true, optional = true }
|
||||
common.workspace = true
|
||||
flatbuffers = { workspace = true }
|
||||
prost = { workspace = true }
|
||||
protobuf = { workspace = true }
|
||||
tokio = { workspace = true }
|
||||
tonic = { workspace = true, features = ["transport"] }
|
||||
tower = { workspace = true }
|
||||
prost-build = { workspace = true }
|
||||
tonic-build = { workspace = true }
|
||||
1
common/protos/src/generated/flatbuffers_generated/mod.rs
Normal file
1
common/protos/src/generated/flatbuffers_generated/mod.rs
Normal file
@@ -0,0 +1 @@
|
||||
pub mod models;
|
||||
124
common/protos/src/generated/flatbuffers_generated/models.rs
Normal file
124
common/protos/src/generated/flatbuffers_generated/models.rs
Normal file
@@ -0,0 +1,124 @@
|
||||
// automatically generated by the FlatBuffers compiler, do not modify
|
||||
|
||||
// @generated
|
||||
|
||||
use core::cmp::Ordering;
|
||||
use core::mem;
|
||||
|
||||
extern crate flatbuffers;
|
||||
use self::flatbuffers::{EndianScalar, Follow};
|
||||
|
||||
#[allow(unused_imports, dead_code)]
|
||||
pub mod models {
|
||||
|
||||
use core::cmp::Ordering;
|
||||
use core::mem;
|
||||
|
||||
extern crate flatbuffers;
|
||||
use self::flatbuffers::{EndianScalar, Follow};
|
||||
|
||||
pub enum PingBodyOffset {}
|
||||
#[derive(Copy, Clone, PartialEq)]
|
||||
|
||||
pub struct PingBody<'a> {
|
||||
pub _tab: flatbuffers::Table<'a>,
|
||||
}
|
||||
|
||||
impl<'a> flatbuffers::Follow<'a> for PingBody<'a> {
|
||||
type Inner = PingBody<'a>;
|
||||
#[inline]
|
||||
unsafe fn follow(buf: &'a [u8], loc: usize) -> Self::Inner {
|
||||
Self {
|
||||
_tab: flatbuffers::Table::new(buf, loc),
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
impl<'a> PingBody<'a> {
|
||||
pub const VT_PAYLOAD: flatbuffers::VOffsetT = 4;
|
||||
|
||||
pub const fn get_fully_qualified_name() -> &'static str {
|
||||
"models.PingBody"
|
||||
}
|
||||
|
||||
#[inline]
|
||||
pub unsafe fn init_from_table(table: flatbuffers::Table<'a>) -> Self {
|
||||
PingBody { _tab: table }
|
||||
}
|
||||
#[allow(unused_mut)]
|
||||
pub fn create<'bldr: 'args, 'args: 'mut_bldr, 'mut_bldr, A: flatbuffers::Allocator + 'bldr>(
|
||||
_fbb: &'mut_bldr mut flatbuffers::FlatBufferBuilder<'bldr, A>,
|
||||
args: &'args PingBodyArgs<'args>,
|
||||
) -> flatbuffers::WIPOffset<PingBody<'bldr>> {
|
||||
let mut builder = PingBodyBuilder::new(_fbb);
|
||||
if let Some(x) = args.payload {
|
||||
builder.add_payload(x);
|
||||
}
|
||||
builder.finish()
|
||||
}
|
||||
|
||||
#[inline]
|
||||
pub fn payload(&self) -> Option<flatbuffers::Vector<'a, u8>> {
|
||||
// Safety:
|
||||
// Created from valid Table for this object
|
||||
// which contains a valid value in this slot
|
||||
unsafe {
|
||||
self._tab
|
||||
.get::<flatbuffers::ForwardsUOffset<flatbuffers::Vector<'a, u8>>>(PingBody::VT_PAYLOAD, None)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
impl flatbuffers::Verifiable for PingBody<'_> {
|
||||
#[inline]
|
||||
fn run_verifier(v: &mut flatbuffers::Verifier, pos: usize) -> Result<(), flatbuffers::InvalidFlatbuffer> {
|
||||
use self::flatbuffers::Verifiable;
|
||||
v.visit_table(pos)?
|
||||
.visit_field::<flatbuffers::ForwardsUOffset<flatbuffers::Vector<'_, u8>>>("payload", Self::VT_PAYLOAD, false)?
|
||||
.finish();
|
||||
Ok(())
|
||||
}
|
||||
}
|
||||
pub struct PingBodyArgs<'a> {
|
||||
pub payload: Option<flatbuffers::WIPOffset<flatbuffers::Vector<'a, u8>>>,
|
||||
}
|
||||
impl<'a> Default for PingBodyArgs<'a> {
|
||||
#[inline]
|
||||
fn default() -> Self {
|
||||
PingBodyArgs { payload: None }
|
||||
}
|
||||
}
|
||||
|
||||
pub struct PingBodyBuilder<'a: 'b, 'b, A: flatbuffers::Allocator + 'a> {
|
||||
fbb_: &'b mut flatbuffers::FlatBufferBuilder<'a, A>,
|
||||
start_: flatbuffers::WIPOffset<flatbuffers::TableUnfinishedWIPOffset>,
|
||||
}
|
||||
impl<'a: 'b, 'b, A: flatbuffers::Allocator + 'a> PingBodyBuilder<'a, 'b, A> {
|
||||
#[inline]
|
||||
pub fn add_payload(&mut self, payload: flatbuffers::WIPOffset<flatbuffers::Vector<'b, u8>>) {
|
||||
self.fbb_
|
||||
.push_slot_always::<flatbuffers::WIPOffset<_>>(PingBody::VT_PAYLOAD, payload);
|
||||
}
|
||||
#[inline]
|
||||
pub fn new(_fbb: &'b mut flatbuffers::FlatBufferBuilder<'a, A>) -> PingBodyBuilder<'a, 'b, A> {
|
||||
let start = _fbb.start_table();
|
||||
PingBodyBuilder {
|
||||
fbb_: _fbb,
|
||||
start_: start,
|
||||
}
|
||||
}
|
||||
#[inline]
|
||||
pub fn finish(self) -> flatbuffers::WIPOffset<PingBody<'a>> {
|
||||
let o = self.fbb_.end_table(self.start_);
|
||||
flatbuffers::WIPOffset::new(o.value())
|
||||
}
|
||||
}
|
||||
|
||||
impl core::fmt::Debug for PingBody<'_> {
|
||||
fn fmt(&self, f: &mut core::fmt::Formatter<'_>) -> core::fmt::Result {
|
||||
let mut ds = f.debug_struct("PingBody");
|
||||
ds.field("payload", &self.payload());
|
||||
ds.finish()
|
||||
}
|
||||
}
|
||||
} // pub mod models
|
||||
6
common/protos/src/generated/mod.rs
Normal file
6
common/protos/src/generated/mod.rs
Normal file
@@ -0,0 +1,6 @@
|
||||
#![allow(unused_imports)]
|
||||
#![allow(clippy::all)]
|
||||
pub mod proto_gen;
|
||||
|
||||
mod flatbuffers_generated;
|
||||
pub use flatbuffers_generated::models::*;
|
||||
1
common/protos/src/generated/proto_gen/mod.rs
Normal file
1
common/protos/src/generated/proto_gen/mod.rs
Normal file
@@ -0,0 +1 @@
|
||||
pub mod node_service;
|
||||
4842
common/protos/src/generated/proto_gen/node_service.rs
Normal file
4842
common/protos/src/generated/proto_gen/node_service.rs
Normal file
File diff suppressed because it is too large
Load Diff
45
common/protos/src/lib.rs
Normal file
45
common/protos/src/lib.rs
Normal file
@@ -0,0 +1,45 @@
|
||||
#[allow(unsafe_code)]
|
||||
mod generated;
|
||||
|
||||
use std::{error::Error, time::Duration};
|
||||
|
||||
use common::globals::GLOBAL_Conn_Map;
|
||||
pub use generated::*;
|
||||
use proto_gen::node_service::node_service_client::NodeServiceClient;
|
||||
use tonic::{
|
||||
metadata::MetadataValue,
|
||||
service::interceptor::InterceptedService,
|
||||
transport::{Channel, Endpoint},
|
||||
Request, Status,
|
||||
};
|
||||
|
||||
// Default 100 MB
|
||||
pub const DEFAULT_GRPC_SERVER_MESSAGE_LEN: usize = 100 * 1024 * 1024;
|
||||
|
||||
pub async fn node_service_time_out_client(
|
||||
addr: &String,
|
||||
) -> Result<
|
||||
NodeServiceClient<
|
||||
InterceptedService<Channel, Box<dyn Fn(Request<()>) -> Result<Request<()>, Status> + Send + Sync + 'static>>,
|
||||
>,
|
||||
Box<dyn Error>,
|
||||
> {
|
||||
let token: MetadataValue<_> = "rustfs rpc".parse()?;
|
||||
let channel = match GLOBAL_Conn_Map.read().await.get(addr) {
|
||||
Some(channel) => channel.clone(),
|
||||
None => {
|
||||
let connector = Endpoint::from_shared(addr.to_string())?.connect_timeout(Duration::from_secs(60));
|
||||
connector.connect().await?
|
||||
}
|
||||
};
|
||||
GLOBAL_Conn_Map.write().await.insert(addr.to_string(), channel.clone());
|
||||
|
||||
// let timeout_channel = Timeout::new(channel, Duration::from_secs(60));
|
||||
Ok(NodeServiceClient::with_interceptor(
|
||||
channel,
|
||||
Box::new(move |mut req: Request<()>| {
|
||||
req.metadata_mut().insert("authorization", token.clone());
|
||||
Ok(req)
|
||||
}),
|
||||
))
|
||||
}
|
||||
270
common/protos/src/main.rs
Normal file
270
common/protos/src/main.rs
Normal file
@@ -0,0 +1,270 @@
|
||||
use std::{cmp, env, fs, io::Write, path::Path, process::Command};
|
||||
|
||||
type AnyError = Box<dyn std::error::Error>;
|
||||
|
||||
const VERSION_PROTOBUF: Version = Version(30, 2, 0); // 30.2.0
|
||||
const VERSION_FLATBUFFERS: Version = Version(24, 3, 25); // 24.3.25
|
||||
/// Build protos if the major version of `flatc` or `protoc` is greater
|
||||
/// or lesser than the expected version.
|
||||
const ENV_BUILD_PROTOS: &str = "BUILD_PROTOS";
|
||||
/// Path of `flatc` binary.
|
||||
const ENV_FLATC_PATH: &str = "FLATC_PATH";
|
||||
|
||||
fn main() -> Result<(), AnyError> {
|
||||
let version = protobuf_compiler_version()?;
|
||||
let need_compile = match version.compare_ext(&VERSION_PROTOBUF) {
|
||||
Ok(cmp::Ordering::Equal) => true,
|
||||
Ok(_) => {
|
||||
let version_err = Version::build_error_message(&version, &VERSION_PROTOBUF).unwrap();
|
||||
println!("cargo:warning=Tool `protoc` {version_err}, skip compiling.");
|
||||
false
|
||||
}
|
||||
Err(version_err) => {
|
||||
// return Err(format!("Tool `protoc` {version_err}, please update it.").into());
|
||||
println!("cargo:warning=Tool `protoc` {version_err}, please update it.");
|
||||
false
|
||||
}
|
||||
};
|
||||
|
||||
if !need_compile {
|
||||
return Ok(());
|
||||
}
|
||||
|
||||
// path of proto file
|
||||
let project_root_dir = env::current_dir()?.join("common/protos/src");
|
||||
let proto_dir = project_root_dir.clone();
|
||||
let proto_files = &["node.proto"];
|
||||
let proto_out_dir = project_root_dir.join("generated").join("proto_gen");
|
||||
let flatbuffer_out_dir = project_root_dir.join("generated").join("flatbuffers_generated");
|
||||
// let descriptor_set_path = PathBuf::from(env::var(ENV_OUT_DIR).unwrap()).join("proto-descriptor.bin");
|
||||
|
||||
tonic_build::configure()
|
||||
.out_dir(proto_out_dir)
|
||||
// .file_descriptor_set_path(descriptor_set_path)
|
||||
.protoc_arg("--experimental_allow_proto3_optional")
|
||||
.compile_well_known_types(true)
|
||||
.emit_rerun_if_changed(false)
|
||||
.compile_protos(proto_files, &[proto_dir.clone()])
|
||||
.map_err(|e| format!("Failed to generate protobuf file: {e}."))?;
|
||||
|
||||
// protos/gen/mod.rs
|
||||
let generated_mod_rs_path = project_root_dir.join("generated").join("proto_gen").join("mod.rs");
|
||||
|
||||
let mut generated_mod_rs = fs::File::create(generated_mod_rs_path)?;
|
||||
writeln!(&mut generated_mod_rs, "pub mod node_service;")?;
|
||||
generated_mod_rs.flush()?;
|
||||
|
||||
let generated_mod_rs_path = project_root_dir.join("generated").join("mod.rs");
|
||||
|
||||
let mut generated_mod_rs = fs::File::create(generated_mod_rs_path)?;
|
||||
writeln!(&mut generated_mod_rs, "#![allow(unused_imports)]")?;
|
||||
writeln!(&mut generated_mod_rs, "#![allow(clippy::all)]")?;
|
||||
writeln!(&mut generated_mod_rs, "pub mod proto_gen;")?;
|
||||
generated_mod_rs.flush()?;
|
||||
|
||||
let flatc_path = match env::var(ENV_FLATC_PATH) {
|
||||
Ok(path) => {
|
||||
println!("cargo:warning=Specified flatc path by environment {ENV_FLATC_PATH}={path}");
|
||||
path
|
||||
}
|
||||
Err(_) => "flatc".to_string(),
|
||||
};
|
||||
|
||||
compile_flatbuffers_models(
|
||||
&mut generated_mod_rs,
|
||||
&flatc_path,
|
||||
proto_dir.clone(),
|
||||
flatbuffer_out_dir.clone(),
|
||||
vec!["models"],
|
||||
)?;
|
||||
|
||||
fmt();
|
||||
Ok(())
|
||||
}
|
||||
|
||||
/// Compile proto/**.fbs files.
|
||||
fn compile_flatbuffers_models<P: AsRef<Path>, S: AsRef<str>>(
|
||||
generated_mod_rs: &mut fs::File,
|
||||
flatc_path: &str,
|
||||
in_fbs_dir: P,
|
||||
out_rust_dir: P,
|
||||
mod_names: Vec<S>,
|
||||
) -> Result<(), AnyError> {
|
||||
let version = flatbuffers_compiler_version(flatc_path)?;
|
||||
let need_compile = match version.compare_ext(&VERSION_FLATBUFFERS) {
|
||||
Ok(cmp::Ordering::Equal) => true,
|
||||
Ok(_) => {
|
||||
let version_err = Version::build_error_message(&version, &VERSION_FLATBUFFERS).unwrap();
|
||||
println!("cargo:warning=Tool `{flatc_path}` {version_err}, skip compiling.");
|
||||
false
|
||||
}
|
||||
Err(version_err) => {
|
||||
return Err(format!("Tool `{flatc_path}` {version_err}, please update it.").into());
|
||||
}
|
||||
};
|
||||
|
||||
let fbs_dir = in_fbs_dir.as_ref();
|
||||
let rust_dir = out_rust_dir.as_ref();
|
||||
fs::create_dir_all(rust_dir)?;
|
||||
|
||||
// $rust_dir/mod.rs
|
||||
let mut sub_mod_rs = fs::File::create(rust_dir.join("mod.rs"))?;
|
||||
writeln!(generated_mod_rs)?;
|
||||
writeln!(generated_mod_rs, "mod flatbuffers_generated;")?;
|
||||
for mod_name in mod_names.iter() {
|
||||
let mod_name = mod_name.as_ref();
|
||||
writeln!(generated_mod_rs, "pub use flatbuffers_generated::{mod_name}::*;")?;
|
||||
writeln!(&mut sub_mod_rs, "pub mod {mod_name};")?;
|
||||
|
||||
if need_compile {
|
||||
let fbs_file_path = fbs_dir.join(format!("{mod_name}.fbs"));
|
||||
let output = Command::new(flatc_path)
|
||||
.arg("-o")
|
||||
.arg(rust_dir)
|
||||
.arg("--rust")
|
||||
.arg("--gen-mutable")
|
||||
.arg("--gen-onefile")
|
||||
.arg("--gen-name-strings")
|
||||
.arg("--filename-suffix")
|
||||
.arg("")
|
||||
.arg(&fbs_file_path)
|
||||
.output()
|
||||
.map_err(|e| format!("Failed to execute process of flatc: {e}"))?;
|
||||
if !output.status.success() {
|
||||
return Err(format!(
|
||||
"Failed to generate file '{}' by flatc(path: '{flatc_path}'): {}.",
|
||||
fbs_file_path.display(),
|
||||
String::from_utf8_lossy(&output.stderr),
|
||||
)
|
||||
.into());
|
||||
}
|
||||
}
|
||||
}
|
||||
generated_mod_rs.flush()?;
|
||||
sub_mod_rs.flush()?;
|
||||
|
||||
Ok(())
|
||||
}
|
||||
|
||||
/// Run command `flatc --version` to get the version of flatc.
|
||||
///
|
||||
/// ```ignore
|
||||
/// $ flatc --version
|
||||
/// flatc version 24.3.25
|
||||
/// ```
|
||||
fn flatbuffers_compiler_version(flatc_path: impl AsRef<Path>) -> Result<Version, String> {
|
||||
let flatc_path = flatc_path.as_ref();
|
||||
Version::try_get(format!("{}", flatc_path.display()), |output| {
|
||||
const PREFIX_OF_VERSION: &str = "flatc version ";
|
||||
let output = output.trim();
|
||||
if let Some(version) = output.strip_prefix(PREFIX_OF_VERSION) {
|
||||
Ok(version.to_string())
|
||||
} else {
|
||||
Err(format!("Failed to get flatc version: {output}"))
|
||||
}
|
||||
})
|
||||
}
|
||||
|
||||
#[derive(Debug, Clone, Copy, PartialEq, Eq, PartialOrd, Ord)]
|
||||
struct Version(u32, u32, u32);
|
||||
|
||||
impl Version {
|
||||
fn try_get<F: FnOnce(&str) -> Result<String, String>>(exe: String, output_to_version_string: F) -> Result<Self, String> {
|
||||
let cmd = format!("{exe} --version");
|
||||
let output = std::process::Command::new(exe)
|
||||
.arg("--version")
|
||||
.output()
|
||||
.map_err(|e| format!("Failed to execute `{cmd}`: {e}",))?;
|
||||
let output_utf8 = String::from_utf8(output.stdout).map_err(|e| {
|
||||
let output_lossy = String::from_utf8_lossy(e.as_bytes());
|
||||
format!("Command `{cmd}` returned invalid UTF-8('{output_lossy}'): {e}")
|
||||
})?;
|
||||
if output.status.success() {
|
||||
let version_string = output_to_version_string(&output_utf8)?;
|
||||
Ok(version_string.parse::<Self>()?)
|
||||
} else {
|
||||
Err(format!("Failed to get version by command `{cmd}`: {output_utf8}"))
|
||||
}
|
||||
}
|
||||
|
||||
fn build_error_message(version: &Self, expected: &Self) -> Option<String> {
|
||||
match version.compare_major_version(expected) {
|
||||
cmp::Ordering::Equal => None,
|
||||
cmp::Ordering::Greater => Some(format!("version({version}) is greater than version({expected})")),
|
||||
cmp::Ordering::Less => Some(format!("version({version}) is lesser than version({expected})")),
|
||||
}
|
||||
}
|
||||
|
||||
fn compare_ext(&self, expected_version: &Self) -> Result<cmp::Ordering, String> {
|
||||
match env::var(ENV_BUILD_PROTOS) {
|
||||
Ok(build_protos) => {
|
||||
if build_protos.is_empty() || build_protos == "0" {
|
||||
Ok(self.compare_major_version(expected_version))
|
||||
} else {
|
||||
match self.compare_major_version(expected_version) {
|
||||
cmp::Ordering::Equal => Ok(cmp::Ordering::Equal),
|
||||
_ => Err(Self::build_error_message(self, expected_version).unwrap()),
|
||||
}
|
||||
}
|
||||
}
|
||||
Err(_) => Ok(self.compare_major_version(expected_version)),
|
||||
}
|
||||
}
|
||||
|
||||
fn compare_major_version(&self, other: &Self) -> cmp::Ordering {
|
||||
self.0.cmp(&other.0)
|
||||
}
|
||||
}
|
||||
|
||||
impl std::str::FromStr for Version {
|
||||
type Err = String;
|
||||
|
||||
fn from_str(s: &str) -> Result<Self, Self::Err> {
|
||||
let mut version = [0_u32; 3];
|
||||
for (i, v) in s.split('.').take(3).enumerate() {
|
||||
version[i] = v.parse().map_err(|e| format!("Failed to parse version string '{s}': {e}"))?;
|
||||
}
|
||||
Ok(Version(version[0], version[1], version[2]))
|
||||
}
|
||||
}
|
||||
|
||||
impl std::fmt::Display for Version {
|
||||
fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result {
|
||||
write!(f, "{}.{}.{}", self.0, self.1, self.2)
|
||||
}
|
||||
}
|
||||
|
||||
/// Run command `protoc --version` to get the version of flatc.
|
||||
///
|
||||
/// ```ignore
|
||||
/// $ protoc --version
|
||||
/// libprotoc 27.0
|
||||
/// ```
|
||||
fn protobuf_compiler_version() -> Result<Version, String> {
|
||||
Version::try_get("protoc".to_string(), |output| {
|
||||
const PREFIX_OF_VERSION: &str = "libprotoc ";
|
||||
let output = output.trim();
|
||||
if let Some(version) = output.strip_prefix(PREFIX_OF_VERSION) {
|
||||
Ok(version.to_string())
|
||||
} else {
|
||||
Err(format!("Failed to get protoc version: {output}"))
|
||||
}
|
||||
})
|
||||
}
|
||||
|
||||
fn fmt() {
|
||||
let output = Command::new("cargo").arg("fmt").arg("-p").arg("protos").status();
|
||||
|
||||
match output {
|
||||
Ok(status) => {
|
||||
if status.success() {
|
||||
println!("cargo fmt executed successfully.");
|
||||
} else {
|
||||
eprintln!("cargo fmt failed with status: {:?}", status);
|
||||
}
|
||||
}
|
||||
Err(e) => {
|
||||
eprintln!("Failed to execute cargo fmt: {}", e);
|
||||
}
|
||||
}
|
||||
}
|
||||
5
common/protos/src/models.fbs
Normal file
5
common/protos/src/models.fbs
Normal file
@@ -0,0 +1,5 @@
|
||||
namespace models;
|
||||
|
||||
table PingBody {
|
||||
payload: [ubyte];
|
||||
}
|
||||
832
common/protos/src/node.proto
Normal file
832
common/protos/src/node.proto
Normal file
@@ -0,0 +1,832 @@
|
||||
syntax = "proto3";
|
||||
package node_service;
|
||||
|
||||
/* -------------------------------------------------------------------- */
|
||||
message Error {
|
||||
uint32 code = 1;
|
||||
string error_info = 2;
|
||||
}
|
||||
|
||||
message PingRequest {
|
||||
uint64 version = 1;
|
||||
bytes body = 2;
|
||||
}
|
||||
|
||||
message PingResponse {
|
||||
uint64 version = 1;
|
||||
bytes body = 2;
|
||||
}
|
||||
|
||||
message HealBucketRequest {
|
||||
string bucket = 1;
|
||||
string options = 2;
|
||||
}
|
||||
|
||||
message HealBucketResponse {
|
||||
bool success = 1;
|
||||
optional Error error = 2;
|
||||
}
|
||||
|
||||
message ListBucketRequest {
|
||||
string options = 1;
|
||||
}
|
||||
|
||||
message ListBucketResponse {
|
||||
bool success = 1;
|
||||
repeated string bucket_infos = 2;
|
||||
optional Error error = 3;
|
||||
}
|
||||
|
||||
message MakeBucketRequest {
|
||||
string name = 1;
|
||||
string options = 2;
|
||||
}
|
||||
|
||||
message MakeBucketResponse {
|
||||
bool success = 1;
|
||||
optional Error error = 2;
|
||||
}
|
||||
|
||||
message GetBucketInfoRequest {
|
||||
string bucket = 1;
|
||||
string options = 2;
|
||||
}
|
||||
|
||||
message GetBucketInfoResponse {
|
||||
bool success = 1;
|
||||
string bucket_info = 2;
|
||||
optional Error error = 3;
|
||||
}
|
||||
|
||||
message DeleteBucketRequest {
|
||||
string bucket = 1;
|
||||
}
|
||||
|
||||
message DeleteBucketResponse {
|
||||
bool success = 1;
|
||||
optional Error error = 2;
|
||||
}
|
||||
|
||||
message ReadAllRequest {
|
||||
string disk = 1; // indicate which one in the disks
|
||||
string volume = 2;
|
||||
string path = 3;
|
||||
}
|
||||
|
||||
message ReadAllResponse {
|
||||
bool success = 1;
|
||||
bytes data = 2;
|
||||
optional Error error = 3;
|
||||
}
|
||||
|
||||
message WriteAllRequest {
|
||||
string disk = 1; // indicate which one in the disks
|
||||
string volume = 2;
|
||||
string path = 3;
|
||||
bytes data = 4;
|
||||
}
|
||||
|
||||
message WriteAllResponse {
|
||||
bool success = 1;
|
||||
optional Error error = 2;
|
||||
}
|
||||
|
||||
message DeleteRequest {
|
||||
string disk = 1; // indicate which one in the disks
|
||||
string volume = 2;
|
||||
string path = 3;
|
||||
string options = 4;
|
||||
}
|
||||
|
||||
message DeleteResponse {
|
||||
bool success = 1;
|
||||
optional Error error = 2;
|
||||
}
|
||||
|
||||
message VerifyFileRequest {
|
||||
string disk = 1; // indicate which one in the disks
|
||||
string volume = 2;
|
||||
string path = 3;
|
||||
string file_info = 4;
|
||||
}
|
||||
|
||||
message VerifyFileResponse {
|
||||
bool success = 1;
|
||||
string check_parts_resp = 2;
|
||||
optional Error error = 3;
|
||||
}
|
||||
|
||||
message CheckPartsRequest {
|
||||
string disk = 1; // indicate which one in the disks
|
||||
string volume = 2;
|
||||
string path = 3;
|
||||
string file_info = 4;
|
||||
}
|
||||
|
||||
message CheckPartsResponse {
|
||||
bool success = 1;
|
||||
string check_parts_resp = 2;
|
||||
optional Error error = 3;
|
||||
}
|
||||
|
||||
message RenamePartRequst {
|
||||
string disk = 1;
|
||||
string src_volume = 2;
|
||||
string src_path = 3;
|
||||
string dst_volume = 4;
|
||||
string dst_path = 5;
|
||||
bytes meta = 6;
|
||||
}
|
||||
|
||||
message RenamePartResponse {
|
||||
bool success = 1;
|
||||
optional Error error = 2;
|
||||
}
|
||||
|
||||
message RenameFileRequst {
|
||||
string disk = 1;
|
||||
string src_volume = 2;
|
||||
string src_path = 3;
|
||||
string dst_volume = 4;
|
||||
string dst_path = 5;
|
||||
}
|
||||
|
||||
message RenameFileResponse {
|
||||
bool success = 1;
|
||||
optional Error error = 2;
|
||||
}
|
||||
|
||||
message WriteRequest {
|
||||
string disk = 1; // indicate which one in the disks
|
||||
string volume = 2;
|
||||
string path = 3;
|
||||
bool is_append = 4;
|
||||
bytes data = 5;
|
||||
}
|
||||
|
||||
message WriteResponse {
|
||||
bool success = 1;
|
||||
optional Error error = 2;
|
||||
}
|
||||
|
||||
// message AppendRequest {
|
||||
// string disk = 1; // indicate which one in the disks
|
||||
// string volume = 2;
|
||||
// string path = 3;
|
||||
// bytes data = 4;
|
||||
// }
|
||||
//
|
||||
// message AppendResponse {
|
||||
// bool success = 1;
|
||||
// optional Error error = 2;
|
||||
// }
|
||||
|
||||
message ReadAtRequest {
|
||||
string disk = 1; // indicate which one in the disks
|
||||
string volume = 2;
|
||||
string path = 3;
|
||||
int64 offset = 4;
|
||||
int64 length = 5;
|
||||
}
|
||||
|
||||
message ReadAtResponse {
|
||||
bool success = 1;
|
||||
bytes data = 2;
|
||||
int64 read_size = 3;
|
||||
optional Error error = 4;
|
||||
}
|
||||
|
||||
message ListDirRequest {
|
||||
string disk = 1; // indicate which one in the disks
|
||||
string volume = 2;
|
||||
}
|
||||
|
||||
message ListDirResponse {
|
||||
bool success = 1;
|
||||
repeated string volumes = 2;
|
||||
optional Error error = 3;
|
||||
}
|
||||
|
||||
message WalkDirRequest {
|
||||
string disk = 1; // indicate which one in the disks
|
||||
bytes walk_dir_options = 2;
|
||||
}
|
||||
|
||||
message WalkDirResponse {
|
||||
bool success = 1;
|
||||
string meta_cache_entry = 2;
|
||||
optional string error_info = 3;
|
||||
}
|
||||
|
||||
message RenameDataRequest {
|
||||
string disk = 1; // indicate which one in the disks
|
||||
string src_volume = 2;
|
||||
string src_path = 3;
|
||||
string file_info = 4;
|
||||
string dst_volume = 5;
|
||||
string dst_path = 6;
|
||||
}
|
||||
|
||||
message RenameDataResponse {
|
||||
bool success = 1;
|
||||
string rename_data_resp = 2;
|
||||
optional Error error = 3;
|
||||
}
|
||||
|
||||
message MakeVolumesRequest {
|
||||
string disk = 1; // indicate which one in the disks
|
||||
repeated string volumes = 2;
|
||||
}
|
||||
|
||||
message MakeVolumesResponse {
|
||||
bool success = 1;
|
||||
optional Error error = 2;
|
||||
}
|
||||
|
||||
message MakeVolumeRequest {
|
||||
string disk = 1; // indicate which one in the disks
|
||||
string volume = 2;
|
||||
}
|
||||
|
||||
message MakeVolumeResponse {
|
||||
bool success = 1;
|
||||
optional Error error = 2;
|
||||
}
|
||||
|
||||
message ListVolumesRequest {
|
||||
string disk = 1; // indicate which one in the disks
|
||||
}
|
||||
|
||||
message ListVolumesResponse {
|
||||
bool success = 1;
|
||||
repeated string volume_infos = 2;
|
||||
optional Error error = 3;
|
||||
}
|
||||
|
||||
message StatVolumeRequest {
|
||||
string disk = 1; // indicate which one in the disks
|
||||
string volume = 2;
|
||||
}
|
||||
|
||||
message StatVolumeResponse {
|
||||
bool success = 1;
|
||||
string volume_info = 2;
|
||||
optional Error error = 3;
|
||||
}
|
||||
|
||||
message DeletePathsRequest {
|
||||
string disk = 1;
|
||||
string volume = 2;
|
||||
repeated string paths = 3;
|
||||
}
|
||||
|
||||
message DeletePathsResponse {
|
||||
bool success = 1;
|
||||
optional Error error = 2;
|
||||
}
|
||||
|
||||
message UpdateMetadataRequest {
|
||||
string disk = 1;
|
||||
string volume = 2;
|
||||
string path = 3;
|
||||
string file_info = 4;
|
||||
string opts = 5;
|
||||
}
|
||||
|
||||
message UpdateMetadataResponse {
|
||||
bool success = 1;
|
||||
optional Error error = 2;
|
||||
}
|
||||
|
||||
message WriteMetadataRequest {
|
||||
string disk = 1; // indicate which one in the disks
|
||||
string volume = 2;
|
||||
string path = 3;
|
||||
string file_info = 4;
|
||||
}
|
||||
|
||||
message WriteMetadataResponse {
|
||||
bool success = 1;
|
||||
optional Error error = 2;
|
||||
}
|
||||
|
||||
message ReadVersionRequest {
|
||||
string disk = 1;
|
||||
string volume = 2;
|
||||
string path = 3;
|
||||
string version_id = 4;
|
||||
string opts = 5;
|
||||
}
|
||||
|
||||
message ReadVersionResponse {
|
||||
bool success = 1;
|
||||
string file_info = 2;
|
||||
optional Error error = 3;
|
||||
}
|
||||
|
||||
message ReadXLRequest {
|
||||
string disk = 1;
|
||||
string volume = 2;
|
||||
string path = 3;
|
||||
bool read_data = 4;
|
||||
}
|
||||
|
||||
message ReadXLResponse {
|
||||
bool success = 1;
|
||||
string raw_file_info = 2;
|
||||
optional Error error = 3;
|
||||
}
|
||||
|
||||
message DeleteVersionRequest {
|
||||
string disk = 1;
|
||||
string volume = 2;
|
||||
string path = 3;
|
||||
string file_info = 4;
|
||||
bool force_del_marker = 5;
|
||||
string opts = 6;
|
||||
}
|
||||
|
||||
message DeleteVersionResponse {
|
||||
bool success = 1;
|
||||
string raw_file_info = 2;
|
||||
optional Error error = 3;
|
||||
}
|
||||
|
||||
message DeleteVersionsRequest {
|
||||
string disk = 1;
|
||||
string volume = 2;
|
||||
repeated string versions = 3;
|
||||
string opts = 4;
|
||||
}
|
||||
|
||||
message DeleteVersionsResponse {
|
||||
bool success = 1;
|
||||
repeated string errors = 2;
|
||||
optional Error error = 3;
|
||||
}
|
||||
|
||||
message ReadMultipleRequest {
|
||||
string disk = 1;
|
||||
string read_multiple_req = 2;
|
||||
}
|
||||
|
||||
message ReadMultipleResponse {
|
||||
bool success = 1;
|
||||
repeated string read_multiple_resps = 2;
|
||||
optional Error error = 3;
|
||||
}
|
||||
|
||||
message DeleteVolumeRequest {
|
||||
string disk = 1;
|
||||
string volume = 2;
|
||||
}
|
||||
|
||||
message DeleteVolumeResponse {
|
||||
bool success = 1;
|
||||
optional Error error = 2;
|
||||
}
|
||||
|
||||
message DiskInfoRequest {
|
||||
string disk = 1;
|
||||
string opts = 2;
|
||||
}
|
||||
|
||||
message DiskInfoResponse {
|
||||
bool success = 1;
|
||||
string disk_info = 2;
|
||||
optional Error error = 3;
|
||||
}
|
||||
|
||||
message NsScannerRequest {
|
||||
string disk = 1;
|
||||
string cache = 2;
|
||||
uint64 scan_mode = 3;
|
||||
}
|
||||
|
||||
message NsScannerResponse {
|
||||
bool success = 1;
|
||||
string update = 2;
|
||||
string data_usage_cache = 3;
|
||||
optional Error error = 4;
|
||||
}
|
||||
|
||||
// lock api have same argument type
|
||||
message GenerallyLockRequest {
|
||||
string args = 1;
|
||||
}
|
||||
|
||||
message GenerallyLockResponse {
|
||||
bool success = 1;
|
||||
optional string error_info = 2;
|
||||
}
|
||||
|
||||
message Mss {
|
||||
map<string, string> value = 1;
|
||||
}
|
||||
|
||||
message LocalStorageInfoRequest {
|
||||
bool metrics = 1;
|
||||
}
|
||||
|
||||
message LocalStorageInfoResponse {
|
||||
bool success = 1;
|
||||
bytes storage_info = 2;
|
||||
optional string error_info = 3;
|
||||
}
|
||||
|
||||
message ServerInfoRequest {
|
||||
bool metrics = 1;
|
||||
}
|
||||
|
||||
message ServerInfoResponse {
|
||||
bool success = 1;
|
||||
bytes server_properties = 2;
|
||||
optional string error_info = 3;
|
||||
}
|
||||
|
||||
message GetCpusRequest {}
|
||||
|
||||
message GetCpusResponse {
|
||||
bool success = 1;
|
||||
bytes cpus = 2;
|
||||
optional string error_info = 3;
|
||||
}
|
||||
|
||||
message GetNetInfoRequest {}
|
||||
|
||||
message GetNetInfoResponse {
|
||||
bool success = 1;
|
||||
bytes net_info = 2;
|
||||
optional string error_info = 3;
|
||||
}
|
||||
|
||||
message GetPartitionsRequest {}
|
||||
|
||||
message GetPartitionsResponse {
|
||||
bool success = 1;
|
||||
bytes partitions = 2;
|
||||
optional string error_info = 3;
|
||||
}
|
||||
|
||||
message GetOsInfoRequest {}
|
||||
|
||||
message GetOsInfoResponse {
|
||||
bool success = 1;
|
||||
bytes os_info = 2;
|
||||
optional string error_info = 3;
|
||||
}
|
||||
|
||||
message GetSELinuxInfoRequest {}
|
||||
|
||||
message GetSELinuxInfoResponse {
|
||||
bool success = 1;
|
||||
bytes sys_services = 2;
|
||||
optional string error_info = 3;
|
||||
}
|
||||
|
||||
message GetSysConfigRequest {}
|
||||
|
||||
message GetSysConfigResponse {
|
||||
bool success = 1;
|
||||
bytes sys_config = 2;
|
||||
optional string error_info = 3;
|
||||
}
|
||||
|
||||
message GetSysErrorsRequest {}
|
||||
|
||||
message GetSysErrorsResponse {
|
||||
bool success = 1;
|
||||
bytes sys_errors = 2;
|
||||
optional string error_info = 3;
|
||||
}
|
||||
|
||||
message GetMemInfoRequest {}
|
||||
|
||||
message GetMemInfoResponse {
|
||||
bool success = 1;
|
||||
bytes mem_info = 2;
|
||||
optional string error_info = 3;
|
||||
}
|
||||
|
||||
message GetMetricsRequest {
|
||||
bytes metric_type = 1;
|
||||
bytes opts = 2;
|
||||
}
|
||||
|
||||
message GetMetricsResponse {
|
||||
bool success = 1;
|
||||
bytes realtime_metrics = 2;
|
||||
optional string error_info = 3;
|
||||
}
|
||||
|
||||
message GetProcInfoRequest {}
|
||||
|
||||
message GetProcInfoResponse {
|
||||
bool success = 1;
|
||||
bytes proc_info = 2;
|
||||
optional string error_info = 3;
|
||||
}
|
||||
|
||||
message StartProfilingRequest {
|
||||
string profiler = 1;
|
||||
}
|
||||
|
||||
message StartProfilingResponse {
|
||||
bool success = 1;
|
||||
optional string error_info = 2;
|
||||
}
|
||||
|
||||
message DownloadProfileDataRequest {}
|
||||
|
||||
message DownloadProfileDataResponse {
|
||||
bool success = 1;
|
||||
map<string, bytes> data = 2;
|
||||
optional string error_info = 3;
|
||||
}
|
||||
|
||||
message GetBucketStatsDataRequest {
|
||||
string bucket = 1;
|
||||
}
|
||||
|
||||
message GetBucketStatsDataResponse {
|
||||
bool success = 1;
|
||||
bytes bucket_stats = 2;
|
||||
optional string error_info = 3;
|
||||
}
|
||||
|
||||
message GetSRMetricsDataRequest {}
|
||||
|
||||
message GetSRMetricsDataResponse {
|
||||
bool success = 1;
|
||||
bytes sr_metrics_summary = 2;
|
||||
optional string error_info = 3;
|
||||
}
|
||||
|
||||
message GetAllBucketStatsRequest {}
|
||||
|
||||
message GetAllBucketStatsResponse {
|
||||
bool success = 1;
|
||||
bytes bucket_stats_map = 2;
|
||||
optional string error_info = 3;
|
||||
}
|
||||
|
||||
message LoadBucketMetadataRequest {
|
||||
string bucket = 1;
|
||||
}
|
||||
|
||||
message LoadBucketMetadataResponse {
|
||||
bool success = 1;
|
||||
optional string error_info = 2;
|
||||
}
|
||||
|
||||
message DeleteBucketMetadataRequest {
|
||||
string bucket = 1;
|
||||
}
|
||||
|
||||
message DeleteBucketMetadataResponse {
|
||||
bool success = 1;
|
||||
optional string error_info = 2;
|
||||
}
|
||||
|
||||
message DeletePolicyRequest {
|
||||
string policy_name = 1;
|
||||
}
|
||||
|
||||
message DeletePolicyResponse {
|
||||
bool success = 1;
|
||||
optional string error_info = 2;
|
||||
}
|
||||
|
||||
message LoadPolicyRequest {
|
||||
string policy_name = 1;
|
||||
}
|
||||
|
||||
message LoadPolicyResponse {
|
||||
bool success = 1;
|
||||
optional string error_info = 2;
|
||||
}
|
||||
|
||||
message LoadPolicyMappingRequest {
|
||||
string user_or_group = 1;
|
||||
uint64 user_type = 2;
|
||||
bool is_group = 3;
|
||||
}
|
||||
|
||||
message LoadPolicyMappingResponse {
|
||||
bool success = 1;
|
||||
optional string error_info = 2;
|
||||
}
|
||||
|
||||
message DeleteUserRequest {
|
||||
string access_key = 1;
|
||||
}
|
||||
|
||||
message DeleteUserResponse {
|
||||
bool success = 1;
|
||||
optional string error_info = 2;
|
||||
}
|
||||
|
||||
message DeleteServiceAccountRequest {
|
||||
string access_key = 1;
|
||||
}
|
||||
|
||||
message DeleteServiceAccountResponse {
|
||||
bool success = 1;
|
||||
optional string error_info = 2;
|
||||
}
|
||||
|
||||
message LoadUserRequest {
|
||||
string access_key = 1;
|
||||
bool temp = 2;
|
||||
}
|
||||
|
||||
message LoadUserResponse {
|
||||
bool success = 1;
|
||||
optional string error_info = 2;
|
||||
}
|
||||
|
||||
message LoadServiceAccountRequest {
|
||||
string access_key = 1;
|
||||
}
|
||||
|
||||
message LoadServiceAccountResponse {
|
||||
bool success = 1;
|
||||
optional string error_info = 2;
|
||||
}
|
||||
|
||||
message LoadGroupRequest {
|
||||
string group = 1;
|
||||
}
|
||||
|
||||
message LoadGroupResponse {
|
||||
bool success = 1;
|
||||
optional string error_info = 2;
|
||||
}
|
||||
|
||||
message ReloadSiteReplicationConfigRequest {}
|
||||
|
||||
message ReloadSiteReplicationConfigResponse {
|
||||
bool success = 1;
|
||||
optional string error_info = 2;
|
||||
}
|
||||
|
||||
message SignalServiceRequest {
|
||||
Mss vars = 1;
|
||||
}
|
||||
|
||||
message SignalServiceResponse {
|
||||
bool success = 1;
|
||||
optional string error_info = 2;
|
||||
}
|
||||
|
||||
message BackgroundHealStatusRequest {}
|
||||
|
||||
message BackgroundHealStatusResponse {
|
||||
bool success = 1;
|
||||
bytes bg_heal_state = 2;
|
||||
optional string error_info = 3;
|
||||
}
|
||||
|
||||
message GetMetacacheListingRequest {
|
||||
bytes opts = 1;
|
||||
}
|
||||
|
||||
message GetMetacacheListingResponse {
|
||||
bool success = 1;
|
||||
bytes metacache = 2;
|
||||
optional string error_info = 3;
|
||||
}
|
||||
|
||||
message UpdateMetacacheListingRequest {
|
||||
bytes metacache = 1;
|
||||
}
|
||||
|
||||
message UpdateMetacacheListingResponse {
|
||||
bool success = 1;
|
||||
bytes metacache = 2;
|
||||
optional string error_info = 3;
|
||||
}
|
||||
|
||||
message ReloadPoolMetaRequest {}
|
||||
|
||||
message ReloadPoolMetaResponse {
|
||||
bool success = 1;
|
||||
optional string error_info = 2;
|
||||
}
|
||||
|
||||
message StopRebalanceRequest {}
|
||||
|
||||
message StopRebalanceResponse {
|
||||
bool success = 1;
|
||||
optional string error_info = 2;
|
||||
}
|
||||
|
||||
message LoadRebalanceMetaRequest {
|
||||
bool start_rebalance = 1;
|
||||
}
|
||||
|
||||
message LoadRebalanceMetaResponse {
|
||||
bool success = 1;
|
||||
optional string error_info = 2;
|
||||
}
|
||||
|
||||
message LoadTransitionTierConfigRequest {}
|
||||
|
||||
message LoadTransitionTierConfigResponse {
|
||||
bool success = 1;
|
||||
optional string error_info = 2;
|
||||
}
|
||||
|
||||
/* -------------------------------------------------------------------- */
|
||||
|
||||
service NodeService {
|
||||
/* -------------------------------meta service-------------------------- */
|
||||
rpc Ping(PingRequest) returns (PingResponse) {};
|
||||
rpc HealBucket(HealBucketRequest) returns (HealBucketResponse) {};
|
||||
rpc ListBucket(ListBucketRequest) returns (ListBucketResponse) {};
|
||||
rpc MakeBucket(MakeBucketRequest) returns (MakeBucketResponse) {};
|
||||
rpc GetBucketInfo(GetBucketInfoRequest) returns (GetBucketInfoResponse) {};
|
||||
rpc DeleteBucket(DeleteBucketRequest) returns (DeleteBucketResponse) {};
|
||||
|
||||
/* -------------------------------disk service-------------------------- */
|
||||
|
||||
rpc ReadAll(ReadAllRequest) returns (ReadAllResponse) {};
|
||||
rpc WriteAll(WriteAllRequest) returns (WriteAllResponse) {};
|
||||
rpc Delete(DeleteRequest) returns (DeleteResponse) {};
|
||||
rpc VerifyFile(VerifyFileRequest) returns (VerifyFileResponse) {};
|
||||
rpc CheckParts(CheckPartsRequest) returns (CheckPartsResponse) {};
|
||||
rpc RenamePart(RenamePartRequst) returns (RenamePartResponse) {};
|
||||
rpc RenameFile(RenameFileRequst) returns (RenameFileResponse) {};
|
||||
rpc Write(WriteRequest) returns (WriteResponse) {};
|
||||
rpc WriteStream(stream WriteRequest) returns (stream WriteResponse) {};
|
||||
// rpc Append(AppendRequest) returns (AppendResponse) {};
|
||||
rpc ReadAt(stream ReadAtRequest) returns (stream ReadAtResponse) {};
|
||||
rpc ListDir(ListDirRequest) returns (ListDirResponse) {};
|
||||
rpc WalkDir(WalkDirRequest) returns (stream WalkDirResponse) {};
|
||||
rpc RenameData(RenameDataRequest) returns (RenameDataResponse) {};
|
||||
rpc MakeVolumes(MakeVolumesRequest) returns (MakeVolumesResponse) {};
|
||||
rpc MakeVolume(MakeVolumeRequest) returns (MakeVolumeResponse) {};
|
||||
rpc ListVolumes(ListVolumesRequest) returns (ListVolumesResponse) {};
|
||||
rpc StatVolume(StatVolumeRequest) returns (StatVolumeResponse) {};
|
||||
rpc DeletePaths(DeletePathsRequest) returns (DeletePathsResponse) {};
|
||||
rpc UpdateMetadata(UpdateMetadataRequest) returns (UpdateMetadataResponse) {};
|
||||
rpc WriteMetadata(WriteMetadataRequest) returns (WriteMetadataResponse) {};
|
||||
rpc ReadVersion(ReadVersionRequest) returns (ReadVersionResponse) {};
|
||||
rpc ReadXL(ReadXLRequest) returns (ReadXLResponse) {};
|
||||
rpc DeleteVersion(DeleteVersionRequest) returns (DeleteVersionResponse) {};
|
||||
rpc DeleteVersions(DeleteVersionsRequest) returns (DeleteVersionsResponse) {};
|
||||
rpc ReadMultiple(ReadMultipleRequest) returns (ReadMultipleResponse) {};
|
||||
rpc DeleteVolume(DeleteVolumeRequest) returns (DeleteVolumeResponse) {};
|
||||
rpc DiskInfo(DiskInfoRequest) returns (DiskInfoResponse) {};
|
||||
rpc NsScanner(stream NsScannerRequest) returns (stream NsScannerResponse) {};
|
||||
|
||||
/* -------------------------------lock service-------------------------- */
|
||||
|
||||
rpc Lock(GenerallyLockRequest) returns (GenerallyLockResponse) {};
|
||||
rpc UnLock(GenerallyLockRequest) returns (GenerallyLockResponse) {};
|
||||
rpc RLock(GenerallyLockRequest) returns (GenerallyLockResponse) {};
|
||||
rpc RUnLock(GenerallyLockRequest) returns (GenerallyLockResponse) {};
|
||||
rpc ForceUnLock(GenerallyLockRequest) returns (GenerallyLockResponse) {};
|
||||
rpc Refresh(GenerallyLockRequest) returns (GenerallyLockResponse) {};
|
||||
|
||||
/* -------------------------------peer rest service-------------------------- */
|
||||
|
||||
rpc LocalStorageInfo(LocalStorageInfoRequest) returns (LocalStorageInfoResponse) {};
|
||||
rpc ServerInfo(ServerInfoRequest) returns (ServerInfoResponse) {};
|
||||
rpc GetCpus(GetCpusRequest) returns (GetCpusResponse) {};
|
||||
rpc GetNetInfo(GetNetInfoRequest) returns (GetNetInfoResponse) {};
|
||||
rpc GetPartitions(GetPartitionsRequest) returns (GetPartitionsResponse) {};
|
||||
rpc GetOsInfo(GetOsInfoRequest) returns (GetOsInfoResponse) {};
|
||||
rpc GetSELinuxInfo(GetSELinuxInfoRequest) returns (GetSELinuxInfoResponse) {};
|
||||
rpc GetSysConfig(GetSysConfigRequest) returns (GetSysConfigResponse) {};
|
||||
rpc GetSysErrors(GetSysErrorsRequest) returns (GetSysErrorsResponse) {};
|
||||
rpc GetMemInfo(GetMemInfoRequest) returns (GetMemInfoResponse) {};
|
||||
rpc GetMetrics(GetMetricsRequest) returns (GetMetricsResponse) {};
|
||||
rpc GetProcInfo(GetProcInfoRequest) returns (GetProcInfoResponse) {};
|
||||
rpc StartProfiling(StartProfilingRequest) returns (StartProfilingResponse) {};
|
||||
rpc DownloadProfileData(DownloadProfileDataRequest) returns (DownloadProfileDataResponse) {};
|
||||
rpc GetBucketStats(GetBucketStatsDataRequest) returns (GetBucketStatsDataResponse) {};
|
||||
rpc GetSRMetrics(GetSRMetricsDataRequest) returns (GetSRMetricsDataResponse) {};
|
||||
rpc GetAllBucketStats(GetAllBucketStatsRequest) returns (GetAllBucketStatsResponse) {};
|
||||
rpc LoadBucketMetadata(LoadBucketMetadataRequest) returns (LoadBucketMetadataResponse) {};
|
||||
rpc DeleteBucketMetadata(DeleteBucketMetadataRequest) returns (DeleteBucketMetadataResponse) {};
|
||||
rpc DeletePolicy(DeletePolicyRequest) returns (DeletePolicyResponse) {};
|
||||
rpc LoadPolicy(LoadPolicyRequest) returns (LoadPolicyResponse) {};
|
||||
rpc LoadPolicyMapping(LoadPolicyMappingRequest) returns (LoadPolicyMappingResponse) {};
|
||||
rpc DeleteUser(DeleteUserRequest) returns (DeleteUserResponse) {};
|
||||
rpc DeleteServiceAccount(DeleteServiceAccountRequest) returns (DeleteServiceAccountResponse) {};
|
||||
rpc LoadUser(LoadUserRequest) returns (LoadUserResponse) {};
|
||||
rpc LoadServiceAccount(LoadServiceAccountRequest) returns (LoadServiceAccountResponse) {};
|
||||
rpc LoadGroup(LoadGroupRequest) returns (LoadGroupResponse) {};
|
||||
rpc ReloadSiteReplicationConfig(ReloadSiteReplicationConfigRequest) returns (ReloadSiteReplicationConfigResponse) {};
|
||||
// rpc VerifyBinary() returns () {};
|
||||
// rpc CommitBinary() returns () {};
|
||||
rpc SignalService(SignalServiceRequest) returns (SignalServiceResponse) {};
|
||||
rpc BackgroundHealStatus(BackgroundHealStatusRequest) returns (BackgroundHealStatusResponse) {};
|
||||
rpc GetMetacacheListing(GetMetacacheListingRequest) returns (GetMetacacheListingResponse) {};
|
||||
rpc UpdateMetacacheListing(UpdateMetacacheListingRequest) returns (UpdateMetacacheListingResponse) {};
|
||||
rpc ReloadPoolMeta(ReloadPoolMetaRequest) returns (ReloadPoolMetaResponse) {};
|
||||
rpc StopRebalance(StopRebalanceRequest) returns (StopRebalanceResponse) {};
|
||||
rpc LoadRebalanceMeta(LoadRebalanceMetaRequest) returns (LoadRebalanceMetaResponse) {};
|
||||
rpc LoadTransitionTierConfig(LoadTransitionTierConfigRequest) returns (LoadTransitionTierConfigResponse) {};
|
||||
}
|
||||
Some files were not shown because too many files have changed in this diff Show More
Reference in New Issue
Block a user