Compare commits
427 Commits
Author | SHA1 | Date | |
---|---|---|---|
a889a9f0b8 | |||
71fc44acc5 | |||
36cd9266b3 | |||
21ff0825cc | |||
c7cdd65f87 | |||
3b2adea40f | |||
8e82c90734 | |||
ea7529c443 | |||
d83d53180d | |||
511fdbce7c | |||
e2bdc520dd | |||
d5eeda3bab | |||
bcb10ec9ad | |||
0d760d43d6 | |||
78236841f0 | |||
b3239b9d96 | |||
6442e21ba7 | |||
7204fa7c1b | |||
8b01419070 | |||
761fbd2074 | |||
8c6d0000f9 | |||
47a6edf37e | |||
c310333396 | |||
94f980d399 | |||
dced346102 | |||
3e2f7059b5 | |||
877e2b5c5b | |||
66edc2016a | |||
08de9a59b9 | |||
e86caebaf5 | |||
88315c0b8e | |||
4a281b9502 | |||
59d47e6938 | |||
1756ef7c54 | |||
aa4b8199f6 | |||
69df8decb9 | |||
b084807eb7 | |||
ee90948a73 | |||
014d8296eb | |||
8df54d0495 | |||
c0a93ea0cc | |||
6edc7790dc | |||
f2114e001b | |||
1b715c2a8d | |||
07ca3dd3a5 | |||
a333928d39 | |||
eadcddc659 | |||
ad9b1cd38d | |||
4789bb5c03 | |||
c57c660f9f | |||
2c66d6c165 | |||
cd025a9291 | |||
4f5ff56f9e | |||
728da376f7 | |||
e87401928e | |||
aa84e6e182 | |||
d2085e2305 | |||
7fefb3d53b | |||
2304d88224 | |||
2c3c115517 | |||
64191136a2 | |||
2798b4f3bd | |||
5d4bfe05de | |||
e8daa92fba | |||
f3317ba2ac | |||
ed89f99265 | |||
278a4e57e9 | |||
6e4c44e746 | |||
933d29d395 | |||
6a15af8e82 | |||
c2c3763976 | |||
bafbda6a6f | |||
b4f6b8cc98 | |||
22a456e8b0 | |||
a4dcae26c2 | |||
4b19c14aae | |||
94485bdb36 | |||
1137607c76 | |||
a5654cc14d | |||
94ea21a73f | |||
5bd92dc914 | |||
17e2cb3f0a | |||
00f14787d3 | |||
a50ceadff2 | |||
518bb37bfe | |||
598e5996ea | |||
c2ae40c859 | |||
0d6e5d6e5e | |||
1216493667 | |||
44c643d9d4 | |||
2f73fc3dd1 | |||
c10960326f | |||
b893e539b3 | |||
234c4f8500 | |||
124841452e | |||
ec2f1b96d2 | |||
37e9c90b1b | |||
e49c37d92c | |||
c0beda72c8 | |||
9f13ebfe5e | |||
edfa564b04 | |||
52c19aff99 | |||
6bd545062b | |||
5de35478b9 | |||
e68313b5d4 | |||
530cf8bac7 | |||
62939c50b9 | |||
d503e7c9fb | |||
41112b725d | |||
b3b922eb73 | |||
77114a7684 | |||
7b752648f2 | |||
af6d0b2640 | |||
5fe53742cf | |||
59d5812ef4 | |||
3099ef273f | |||
f7f493e007 | |||
a05dbbbe93 | |||
fa6da5c600 | |||
11b9ef8b59 | |||
cc81f4b020 | |||
43a8afeaa7 | |||
97355aaecb | |||
0c602dbd2f | |||
451695b861 | |||
1d7af0506d | |||
eeb112557a | |||
c92cc3655b | |||
e145ec2e51 | |||
f596ad54ef | |||
cf41ae8e4c | |||
105cb0de40 | |||
52443678bc | |||
0100eb545e | |||
d6e39155d3 | |||
0afa52c595 | |||
6a5f2e45cd | |||
3712ad0fc4 | |||
c2328e0df1 | |||
ac238c9d2e | |||
62641ed7f8 | |||
7d61056d6b | |||
43a1d8f914 | |||
185bbe8b29 | |||
bfd1d1ca03 | |||
58f9034236 | |||
6be7706196 | |||
052815301b | |||
b0ba21d08d | |||
ba99a5be09 | |||
f837c8b71b | |||
dc145a1b4c | |||
650cb92129 | |||
f0cfc765b1 | |||
68847d422f | |||
365301d2b6 | |||
88cb4929f9 | |||
ef30d9fb96 | |||
56660d32ba | |||
6057606a2d | |||
6e727ac414 | |||
9bc67ea68c | |||
ac15d5e4f1 | |||
43ab62c469 | |||
f943e015ef | |||
d8e9b029a0 | |||
e8584452b0 | |||
b97c2c28f7 | |||
ac5d72ebea | |||
2dfb5944e4 | |||
6c48b0b87c | |||
0457a5a46d | |||
b663158503 | |||
e15002e8bb | |||
1f8a8cdc9c | |||
8809f02868 | |||
b1a9642bc4 | |||
49a984f19a | |||
b495d5022b | |||
a454792968 | |||
1f9e6ab47f | |||
13aa16f516 | |||
f92b11eb34 | |||
673dae791a | |||
fb38f7f3d2 | |||
4a8928e9f6 | |||
6ac2a94dd7 | |||
593c8950b0 | |||
3f5ea711ac | |||
6a641bf71b | |||
7ed9b159d9 | |||
1e0382079f | |||
b31e739c3f | |||
31bcc0c105 | |||
d369834ef0 | |||
87f13fc687 | |||
9a8c5ce0ab | |||
c1d32438f7 | |||
a17c02f82f | |||
fa41a363fa | |||
06abcda4f3 | |||
18a8bd7760 | |||
0abb839ae3 | |||
49137f84f2 | |||
189d883c55 | |||
759c916022 | |||
58f02c4e9f | |||
ffc6628d58 | |||
f882cef83e | |||
b227e5bf75 | |||
3608909712 | |||
eb9ac1e28e | |||
7cad7ec1bf | |||
17fb386b50 | |||
99d315698d | |||
c70128aa0a | |||
0ece9082b2 | |||
08562ed2e3 | |||
a06e1642ad | |||
c8c7df99fd | |||
96c5b9d779 | |||
a089b9d047 | |||
5a877d009c | |||
ca8aa110b0 | |||
b18e070571 | |||
696fe88e58 | |||
308eaf1d2c | |||
aed1c6b3e7 | |||
ae99e271e7 | |||
1e3b2fa6cc | |||
a2c801ef38 | |||
3708d2e63f | |||
f12ae7e262 | |||
ba21e75995 | |||
281ed1ccb3 | |||
6ac7775750 | |||
7cec61b2f3 | |||
87878d1347 | |||
f31598f640 | |||
2387612618 | |||
07af978507 | |||
0e985070c7 | |||
e67aa74eb2 | |||
36da769f0e | |||
99a632a891 | |||
c0b84228f0 | |||
d7e8336db0 | |||
7f28697c08 | |||
e9bbdd223d | |||
b1ee5d0d1f | |||
2f5baad20e | |||
fdcb305cb4 | |||
1596f0670c | |||
db71e8014d | |||
a9b40dde4d | |||
d9945977d1 | |||
96ef1da81c | |||
f577a65e70 | |||
6d3c345e75 | |||
4ecb854b2a | |||
5902732262 | |||
850cb48a32 | |||
47948f2a87 | |||
b06875819f | |||
1c4d080839 | |||
9a6f7553c0 | |||
141b2b1ac5 | |||
82f975d893 | |||
469553a994 | |||
ac43d0d07a | |||
84729e0130 | |||
3c58db60fd | |||
10afc1a61d | |||
25c6d94a3d | |||
b9d5b47581 | |||
b033d5c785 | |||
92afcff6f9 | |||
88d7007036 | |||
24949710b7 | |||
3f4868216b | |||
9070fe442f | |||
7ef13c90b9 | |||
1df70303eb | |||
d00420efb0 | |||
2331c93cf5 | |||
5cf088c911 | |||
3ddc9cf9a6 | |||
375aec9175 | |||
25c8c4c5ee | |||
8ba3b480eb | |||
16cf5cadd7 | |||
633813b5b4 | |||
b1d8d32ba8 | |||
d6115ba7db | |||
d6e9170fb9 | |||
a82f292bd7 | |||
68e74f3e05 | |||
cacb6f78bb | |||
aad3615542 | |||
30116c360d | |||
548a84ebe3 | |||
db7655c4f8 | |||
e7e5ec9586 | |||
d040374dbc | |||
1a5f7aea75 | |||
a241e91d8c | |||
a8e608070f | |||
15bf34c299 | |||
f5f3b033bb | |||
a9dacbb3a2 | |||
f93bf3c26f | |||
cca4ebc90e | |||
df7d31290f | |||
7df54a5b33 | |||
a251cc4df2 | |||
b5753b8681 | |||
32ef3c0fec | |||
47c58dfdd3 | |||
79bba89e68 | |||
67923abd1f | |||
48769dac97 | |||
5426360d04 | |||
f4cd8d1123 | |||
ebc4d0b6f9 | |||
f6c10850ac | |||
32e1d9d53d | |||
e4438ece84 | |||
d9b143cdec | |||
536e262226 | |||
1b1449ce51 | |||
d9ddee9fd8 | |||
e3022703d3 | |||
51b3f5bc94 | |||
fbe01c5de3 | |||
93c1206041 | |||
34ac768d17 | |||
239d5ac202 | |||
55d0b867a9 | |||
75b7684337 | |||
f6a31fb75d | |||
2a8db86536 | |||
33aa481c87 | |||
a971aabc95 | |||
097b6169c2 | |||
fd75702fb7 | |||
f1a175a7b7 | |||
9a28d870f7 | |||
0a3cd51090 | |||
210cb3102d | |||
b5599be1eb | |||
4ac61e1294 | |||
3a57bbf6f1 | |||
1a295cc401 | |||
52ba6d9ea4 | |||
0b18290797 | |||
1cfa1b4cb7 | |||
ad6a31b71b | |||
71ffe8531b | |||
f6bacfa5d6 | |||
598c34f9fe | |||
009cc3d5eb | |||
0243b9f26e | |||
e12fdfb3f7 | |||
b447e476f1 | |||
92df3cbaf1 | |||
aa7b5d6c14 | |||
67f047b1e4 | |||
5882a96ff3 | |||
cbf3a88b83 | |||
f78d791730 | |||
cf23988636 | |||
1e6795ae27 | |||
318b03d1eb | |||
7050abbed0 | |||
1787b4a2ac | |||
d6d236f143 | |||
91cbf70f40 | |||
8b0efa3b60 | |||
0c4ba0a562 | |||
50f21191f2 | |||
bdc6ba81cd | |||
12405f9ac4 | |||
3669b8afde | |||
f3990d2de6 | |||
84b703efa0 | |||
8c5497b92b | |||
d168ca0414 | |||
94b521c4ff | |||
767a9c7b52 | |||
2a8446c72d | |||
c54908dbe6 | |||
0570f3610b | |||
0e68a34e6d | |||
78a62ea7f1 | |||
0ffe508858 | |||
24ef84162f | |||
db03e71f2f | |||
2ef95db920 | |||
5631c459c8 | |||
ef68fb6854 | |||
0252b1186e | |||
aca9cde58e | |||
1a661ada20 | |||
17394a99b7 | |||
6b190f1a33 | |||
de5d063ca9 | |||
7e88dda273 | |||
5984a9e1cf | |||
e09cbb2cd1 | |||
2b7ad1304d | |||
9a13ade068 | |||
a0cc73a7e9 | |||
52d5591f17 | |||
bc30cbb870 | |||
ae41c9409b | |||
de974c8d32 | |||
122c054f20 | |||
37a4f9d00d | |||
161d16242c | |||
a77fc3e9d8 | |||
c0585a7f05 | |||
d0f5c1eddd | |||
b02b6a2b6c | |||
70c60216c2 | |||
ff3b57126a | |||
78d8b640a4 | |||
2a47eb58b0 |
26
.gitea/workflows/build.yaml
Normal file
26
.gitea/workflows/build.yaml
Normal file
@ -0,0 +1,26 @@
|
||||
on:
|
||||
push:
|
||||
branches:
|
||||
- main
|
||||
|
||||
jobs:
|
||||
build:
|
||||
runs-on: ubuntu-latest
|
||||
steps:
|
||||
- uses: actions/checkout@v4
|
||||
with:
|
||||
submodules: true
|
||||
- uses: actions/cache@v4
|
||||
with:
|
||||
path: resources
|
||||
key: ${{ runner.os }}-resources
|
||||
- uses: peaceiris/actions-hugo@v3
|
||||
with:
|
||||
extended: true
|
||||
- name: Build
|
||||
run: hugo --minify
|
||||
- uses: https://gitea.okami101.io/okami101/actions/docker@main
|
||||
with:
|
||||
password: ${{ secrets.CONTAINER_REGISTRY_PASSWORD }}
|
||||
gitea-token: ${{ secrets.RELEASE_TOKEN }}
|
||||
release: true
|
3
.gitignore
vendored
3
.gitignore
vendored
@ -1,2 +1,3 @@
|
||||
node_modules
|
||||
resources
|
||||
resources
|
||||
public
|
@ -1,3 +1,7 @@
|
||||
FROM nginx:alpine
|
||||
|
||||
RUN sed -i 's/^\(.*\)http {/\1http {\n map_hash_bucket_size 128;\n/' /etc/nginx/nginx.conf
|
||||
|
||||
COPY nginx/ /etc/nginx/conf.d/
|
||||
|
||||
COPY public /usr/share/nginx/html
|
||||
|
File diff suppressed because it is too large
Load Diff
@ -6,10 +6,11 @@ baseURL = "https://blog.okami101.io"
|
||||
languageCode = "en"
|
||||
defaultContentLanguage = "en"
|
||||
theme = "congo"
|
||||
|
||||
title = "Okami101 Blog"
|
||||
# copyright = "Copy, _right?_ :thinking_face:"
|
||||
|
||||
timeout = "120s"
|
||||
|
||||
enableEmoji = true
|
||||
enableRobotsTXT = true
|
||||
|
||||
@ -20,16 +21,5 @@ ignoreFiles = ['_data/*']
|
||||
[outputs]
|
||||
home = ["HTML", "RSS", "JSON"]
|
||||
|
||||
[author]
|
||||
name = "Adrien Beaudouin"
|
||||
image = "author.jpg"
|
||||
bio = "A senior web developer @janze"
|
||||
links = [
|
||||
{ email = "mailto:adrien@okami101.io" },
|
||||
{ github = "https://github.com/adr1enbe4udou1n" },
|
||||
{ linkedin = "https://linkedin.com/in/adr1enbe4udou1n" },
|
||||
{ twitter = "https://twitter.com/adr1enbe4udou1n" },
|
||||
]
|
||||
|
||||
[permalinks]
|
||||
posts = "/:year/:month/:title/"
|
||||
|
@ -61,3 +61,14 @@ excludedKinds = ["taxonomy", "term"]
|
||||
# bing = ""
|
||||
# pinterest = ""
|
||||
# yandex = ""
|
||||
|
||||
[author]
|
||||
name = "Adrien Beaudouin"
|
||||
image = "author.jpg"
|
||||
bio = "A senior web developer @janze"
|
||||
links = [
|
||||
{ email = "mailto:adrien@okami101.io" },
|
||||
{ github = "https://github.com/adr1enbe4udou1n" },
|
||||
{ linkedin = "https://linkedin.com/in/adr1enbe4udou1n" },
|
||||
{ bluesky = "https://bsky.app/profile/adr1enbe4udou1n.bsky.social" },
|
||||
]
|
||||
|
@ -4,5 +4,5 @@ description: "This is adr1enbe4udou1n blog."
|
||||
---
|
||||
|
||||
{{< lead >}}
|
||||
A 🧔🌍💻 aka senior web developer @Bretagne 🇫🇷
|
||||
A 🧔🌍💻 aka senior test web developer @Bretagne 🇫🇷
|
||||
{{< /lead >}}
|
||||
|
@ -18,7 +18,7 @@ I can develop proper API design following [**DDD / Hexa**](https://en.wikipedia.
|
||||
I encourage `TDD` or at least proper **integration tests** on any backend frameworks, following **AAA** aka *Arrange Act Assert* principle :
|
||||
|
||||
* `PHPUnit` or [`Pest`](https://pestphp.com/) for *PHP*
|
||||
* [`NUnit.net`](https://nunit.org/) or [`xUnit.net`](https://xunit.net/) with [`Fluent Assertions`](https://github.com/fluentassertions/fluentassertions) for *.NET Core*
|
||||
* [`NUnit.net`](https://nunit.org/) or [`xUnit.net`](https://xunit.net/) for *.NET Core*
|
||||
* `JUnit` with [`REST Assured`](https://rest-assured.io/) for *Spring Boot*
|
||||
* `Jest` and `pytest` on respective *NodeJS* end *Python* stacks
|
||||
|
||||
@ -52,9 +52,9 @@ Some notes of this blog :
|
||||
* Kubernetes infrastructure completely managed with [`Terraform`](https://github.com/adr1enbe4udou1n/terraform-kube-okami) 🌴
|
||||
* **HA** setup using **Hetzner LB**, targeting 2 worker nodes, with **Postgres cluster** (managed on same Kubernetes cluster)
|
||||
* `Traefik` as reverse proxy, configured for HA 🛣️
|
||||
* Source code on my own [`Gitea`](https://gitea.okami101.io/adr1enbe4udou1n/blog)
|
||||
* Compiled by my own [`Concourse`](https://concourse.okami101.io) instance as a final docker container image into self-hosted private registry (**CI** 🏗️)
|
||||
* Automatically deployed by `Flux CD v2` to the Kubernetes cluster from [central Git source](https://gitea.okami101.io/okami101/flux-source/src/branch/main/okami/deploy-blog.yaml) (**CD** 🚀)
|
||||
* Source code on my own [`Gitea`](https://about.gitea.com/)
|
||||
* Compiled by my own [`Concourse`](https://concourse-ci.org/) instance as a final docker container image into self-hosted private registry (**CI** 🏗️)
|
||||
* Automatically deployed by `Flux CD v2` to the Kubernetes cluster (**CD** 🚀)
|
||||
* Tracked with [`Umami`](https://umami.is/) 📈
|
||||
|
||||
All above tools are 💯% self-hosted ! Just sadly missing my own Homelab with Proxmox because no fiber 😿
|
||||
|
@ -1,13 +1,12 @@
|
||||
[`ASP.NET Core 7`](https://docs.microsoft.com/aspnet/core/) implementation, following `DDD` principle, implemented with `Hexa architecture` and `CQRS` pattern. [Swashbuckle](https://github.com/domaindrivendev/Swashbuckle.AspNetCore) is used as default main OpenAPI generator that's perfectly integrates into the code.
|
||||
[`ASP.NET Core 8`](https://docs.microsoft.com/aspnet/core/) implementation, using minimal APIs, mature since 8.0, following `DDD` principle, implemented with `Hexa architecture` and `CQRS` pattern. [Swashbuckle](https://github.com/domaindrivendev/Swashbuckle.AspNetCore) is used as default main OpenAPI generator.
|
||||
|
||||
Main packages involved :
|
||||
|
||||
* [Carter](https://github.com/CarterCommunity/Carter/) for seamless endpoints grouping
|
||||
* [EF Core](https://docs.microsoft.com/ef/) as strongly typed ORM
|
||||
* [MediatR](https://github.com/jbogard/MediatR) for easy mediator implementation. It allows strong decoupling between all ASP.NET controllers and the final application which is cutted into small queries and commands
|
||||
* [Fluent Validation](https://fluentvalidation.net/) for strongly typed validation
|
||||
* [dotnet-format](https://github.com/dotnet/format) as official formatter
|
||||
* [xUnit.net](https://xunit.net/) as framework test
|
||||
* [Fluent Assertions](https://fluentassertions.com/) for strongly typed assertions within the API
|
||||
* [Respawn](https://github.com/jbogard/Respawn) as for optimal integration tests isolation
|
||||
* [Bogus](https://github.com/bchavez/Bogus) for strongly typed fake data generator
|
||||
* [Bullseye](https://github.com/adamralph/bullseye) as a nice CLI publisher tool with dependency graph
|
||||
|
@ -1,4 +1,4 @@
|
||||
[`FastAPI`](https://fastapi.tiangolo.com/) implementation under last `Python 3.11` with [Pipenv](https://pypi.org/project/pipenv/) as package manager.
|
||||
[`FastAPI`](https://fastapi.tiangolo.com/) implementation under last `Python 3.12` with [Poetry](https://python-poetry.org/) as package manager.
|
||||
|
||||
It's based on [pydantic](https://pydantic-docs.helpmanual.io/), an essential component that allows proper OpenAPI generation and data validations while bringing advanced type hints.
|
||||
|
||||
@ -8,7 +8,6 @@ Main packages involved :
|
||||
* [SQLAlchemy 2](https://www.sqlalchemy.org/) with [Alembic](https://alembic.sqlalchemy.org/en/latest/) for schema migration
|
||||
* [python-jose](https://github.com/mpdavis/python-jose) as JWT implementation
|
||||
* [Faker](https://faker.readthedocs.io/en/master/) as dummy data generator
|
||||
* [autoflake](https://pypi.org/project/autoflake/) and [isort](https://pycqa.github.io/isort/) for clean imports
|
||||
* [Flake8](https://flake8.pycqa.org/en/latest/) and [Black](https://black.readthedocs.io/en/stable/) as respective code linter and powerful code formatter
|
||||
* [Ruff](https://docs.astral.sh/ruff/) as extremely fast linter and code formatter written in rust, a perfect drop-in replacement for flake8, isort and black
|
||||
* [mypy](http://mypy-lang.org/) as advanced static analyzer
|
||||
* [pytest](https://docs.pytest.org) as main test framework
|
||||
|
@ -1,4 +1,4 @@
|
||||
[`Laravel 10`](https://laravel.com/) implementation on `PHP 8.2` with extensive usage of last attributes support. The particularity of this framework is to give you almost of all you need for quickly develop any complex application. So minimal external packages need.
|
||||
[`Laravel 11`](https://laravel.com/) implementation on `PHP 8.3` with extensive usage of last attributes support. The particularity of this framework is to give you almost of all you need for quickly develop any complex application. So minimal external packages need.
|
||||
|
||||
I obviously made usage of **Eloquent** as a very expressive **Active Record** ORM, and the Laravel factories system based on [PHP Faker](https://fakerphp.github.io/) is already perfect for dummy data generator.
|
||||
|
||||
@ -8,7 +8,7 @@ Main packages involved :
|
||||
|
||||
* [PHP JWT](https://github.com/lcobucci/jwt) as JWT implementation, with proper integration to Laravel using custom guard
|
||||
* [Laravel Routes Attribute](https://github.com/spatie/laravel-route-attributes) for Laravel routing that leverage on last PHP 8 attributes feature
|
||||
* [Laravel OpenAPI](https://github.com/vyuldashev/laravel-openapi) that also use PHP 8 attributes for API documentation
|
||||
* [Laravel OpenAPI](https://github.com/DarkaOnLine/L5-Swagger) that also use PHP 8 attributes for API documentation
|
||||
* [Laravel IDE Helper](https://github.com/barryvdh/laravel-ide-helper) for proper IDE integration, perfectly suited for **VS Code** with [Intelephense](https://marketplace.visualstudio.com/items?itemName=bmewburn.vscode-intelephense-client) extension
|
||||
* [PHP CS Fixer](https://github.com/FriendsOfPHP/PHP-CS-Fixer) as formatter with Laravel style guide
|
||||
* [Larastan](https://github.com/nunomaduro/larastan), a Laravel wrapper of [PHPStan](https://phpstan.org/), as advanced code static analyzer
|
||||
|
@ -1,4 +1,4 @@
|
||||
[`NestJS 9`](https://nestjs.com/) implementation under `NodeJS` using [`Typescript`](https://www.typescriptlang.org/) and [`pnpm`](https://pnpm.io/) as fast package manager. It relies by default on [`express`](https://github.com/expressjs/express) as NodeJS HTTP server implementation. NestJS offers a nice OpenAPI documentation generator thanks to Typescript which provides strong typing.
|
||||
[`NestJS 10`](https://nestjs.com/) implementation under `Node.js 20` using [`Typescript 5`](https://www.typescriptlang.org/) and [`pnpm`](https://pnpm.io/) as fast package manager. It relies by default on [`express`](https://github.com/expressjs/express) as NodeJS HTTP server implementation. NestJS offers a nice OpenAPI documentation generator thanks to Typescript which provides strong typing.
|
||||
|
||||
Main packages involved :
|
||||
|
||||
|
@ -12,7 +12,5 @@ Main purpose of this projects is to have personal extensive API training on mult
|
||||
* Proper seeder / faker for quick starting with filled DB
|
||||
* Separated RW / RO database connections for maximizing performance between these 2 contexts
|
||||
* Proper suited QA + production Dockerfile
|
||||
* Complete CI on Kubernetes with [Concourse](https://concourse.okami101.io/)
|
||||
* Complete CI on Kubernetes with [Concourse CI](https://concourse-ci.org/)
|
||||
* Automatic CD on Kubernetes using [Flux](https://fluxcd.io/)
|
||||
|
||||
See complete production deployment manifests [here](https://gitea.okami101.io/okami101/flux-source/src/branch/main/conduit), allowing **GitOps** management.
|
||||
|
@ -1,4 +1,4 @@
|
||||
[`Spring Boot 3`](https://spring.io/projects/spring-boot) implementation using `Gradle 8` & `Java 17+`. Similar to the [official Spring Boot implementation](https://github.com/gothinkster/spring-boot-realworld-example-app) but with usage of `Spring Data JPA` instead of `MyBatis`. [Here is another nice one](https://github.com/raeperd/realworld-springboot-java) that explicitly follows `DDD`.
|
||||
[`Spring Boot 3.2`](https://spring.io/projects/spring-boot) implementation using `Gradle 8` & `Java 21`. Similar to the [official Spring Boot implementation](https://github.com/gothinkster/spring-boot-realworld-example-app) but with usage of `Spring Data JPA` instead of `MyBatis`. [Here is another nice one](https://github.com/raeperd/realworld-springboot-java) that explicitly follows `DDD`.
|
||||
|
||||
Main packages involved :
|
||||
|
||||
|
@ -1,9 +1,10 @@
|
||||
[`Symfony 6.3`](https://symfony.com/) implementation on `PHP 8.2` that supports PHP 8 attributes. I excluded the usage of [API Platform](https://api-platform.com/) here, which is a very powerful API crud generator but really not well suited for real customized API in my taste.
|
||||
[`Symfony 7`](https://symfony.com/) implementation on `PHP 8.3` that supports PHP 8 attributes, using [API Platform](https://api-platform.com/).
|
||||
|
||||
Contrary to Laravel, the usage of **DataMapper** pattern ORM involve classic POPO models. The additional usage of plain PHP DTO classes facilitates the OpenAPI spec models generation without writing all schemas by hand. On the downside the Nelmio package is far more verbose than the Laravel OpenAPI version.
|
||||
|
||||
Main packages involved :
|
||||
|
||||
* [API Platform](https://api-platform.com/) as API framework
|
||||
* [Doctrine](https://www.doctrine-project.org/) as **DataMapper** ORM
|
||||
* [SensioFrameworkExtraBundle](https://github.com/sensiolabs/SensioFrameworkExtraBundle) for ParamConverter helper with Doctrine
|
||||
* [FOSRestBundle](https://github.com/FriendsOfSymfony/FOSRestBundle) only for some helpers as DTO automatic converters and validation
|
||||
|
@ -345,7 +345,7 @@ Set proper `GF_DATABASE_PASSWORD` and deploy. Database migration should be autom
|
||||
|
||||
### Docker Swarm dashboard
|
||||
|
||||
For best show-case scenario of Grafana, let's import an [existing dashboard](https://grafana.com/grafana/dashboards/11939) suited for complete Swarm monitor overview.
|
||||
For best show-case scenario of Grafana, let's import an [existing dashboard](https://grafana.com/dashboards/11939) suited for complete Swarm monitor overview.
|
||||
|
||||
First we need to add Prometheus as main metrics data source. Go to *Configuration > Data source* menu and click on *Add data source*. Select Prometheus and set the internal docker prometheus URL, which should be `http://prometheus:9090`. A successful message should appear when saving.
|
||||
|
||||
|
@ -39,7 +39,7 @@ For better fluidity, here is the expected list of variables you'll need to prepa
|
||||
| `s3_bucket` | kuberocks | |
|
||||
| `s3_access_key` | xxx | |
|
||||
| `s3_secret_key` | xxx | |
|
||||
| `smtp_host` | smtp-relay.brevo.com | |
|
||||
| `smtp_host` | smtp.tem.scw.cloud | |
|
||||
| `smtp_port` | 587 | |
|
||||
| `smtp_user` | <me@kube.rocks> | |
|
||||
| `smtp_password` | xxx | |
|
||||
@ -77,6 +77,7 @@ Here are the pros and cons of each module:
|
||||
| | [Kube Hetzner](https://registry.terraform.io/modules/kube-hetzner/kube-hetzner/hcloud/latest) | [Okami101 K3s](https://registry.terraform.io/modules/okami101/k3s) |
|
||||
| ----------------------- | ---------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- | ---------------------------------------------------------------------------------------------------------------------------------------------------------- |
|
||||
| **Support** | Strong community | Just intended as a reusable starter-kit |
|
||||
| **CNI support** | Choice between Flannel, Cilium, Calico | Flannel only, while supporting network encryption with `enable_wireguard` variable, set `flannel-backend` to `none` if installing other CNI |
|
||||
| **Included helms** | Traefik, Longhorn, Cert Manager, Kured | None, just the K3s initial setup, as it's generally preferable to manage this helms dependencies on separated terraform project, allowing easier upgrading |
|
||||
| **Hetzner integration** | Complete, use [Hcloud Controller](https://github.com/hetznercloud/hcloud-cloud-controller-manager) internally, allowing dynamic Load Balancing, autoscaling, cleaner node deletion | Basic, public Load Balancer is statically managed by the nodepool configuration, no autoscaling support |
|
||||
| **OS** | openSUSE MicroOS, optimized for container worloads | Debian 11 or Ubuntu 22.04 |
|
||||
@ -86,6 +87,7 @@ Here are the pros and cons of each module:
|
||||
| **Upgrade** | You may need to follow new versions regularly | As a simple starter-kit, no need to support all community problems, so very few updates |
|
||||
| **Quality** | Use many hacks to satisfy all community needs, plenty of remote-exec and file provisioner which is not recommended by HashiCorp themselves | Use standard **cloud-config** for initial provisioning, then **Salt** for cluster OS management |
|
||||
| **Security** | Needs an SSH private key because of local provisioners, and SSH port opened to every node | Require only public SSH key, minimized opened SSH ports to only controllers, use SSH jump from a controller to access any internal worker node |
|
||||
| **Bastion** | No real bastion support | Dedicated bastion host support with preinstalled WireGuard VPN, ideal for internal access to critical services like Kube API, longhorn, etc. |
|
||||
| **Reusability** | Vendor locked to Hetzner Cloud | Easy to adapt for a different cloud provider as long as it supports **cloud-config** (as 99% of them) |
|
||||
|
||||
So for resume, choose Kube Hetzner module if:
|
||||
@ -171,14 +173,14 @@ module "hcloud_kube" {
|
||||
|
||||
k3s_channel = "stable"
|
||||
|
||||
tls_sans = ["cp.kube.rocks"]
|
||||
|
||||
disabled_components = ["traefik"]
|
||||
kubelet_args = [
|
||||
"eviction-hard=memory.available<250Mi"
|
||||
]
|
||||
|
||||
etcd_s3_backup = {
|
||||
control_planes_custom_config = {
|
||||
tls-san = ["cp.kube.rocks"]
|
||||
disable = ["traefik"]
|
||||
etcd-s3 = true
|
||||
etcd-s3-endpoint = "s3.fr-par.scw.cloud"
|
||||
etcd-s3-access-key = var.s3_access_key
|
||||
etcd-s3-secret-key = var.s3_secret_key
|
||||
@ -216,7 +218,7 @@ output "ssh_config" {
|
||||
}
|
||||
```
|
||||
|
||||
{{</ highlight >}}
|
||||
{{< /highlight >}}
|
||||
|
||||
#### Explanation
|
||||
|
||||
@ -240,7 +242,7 @@ At any case, consider any leak of writeable Hetzner Cloud token as a **Game Over
|
||||
3. Sniff any data from the cluster that comes to the compromised server, including secrets, thanks to the new agent.
|
||||
4. Get access to remote S3 backups.
|
||||
|
||||
In order to mitigate any risk of critical data leak, you may use data encryption whenever is possible. K3s offer it [natively for etcd](https://docs.k3s.io/security/secrets-encryption). Longhorn also offer it [natively for volumes](https://longhorn.io/docs/latest/advanced-resources/security/volume-encryption/) (including backups).
|
||||
In order to mitigate any risk of critical data leak, you may use data encryption whenever is possible. K3s offer it natively [for etcd](https://docs.k3s.io/security/secrets-encryption) and [for networking using WireGuard flannel option](https://docs.k3s.io/installation/network-options). Longhorn also offer it [natively for volumes](https://longhorn.io/docs/latest/advanced-resources/security/volume-encryption/) (including backups).
|
||||
|
||||
{{</ tab >}}
|
||||
{{< tab tabName="Global" >}}
|
||||
@ -266,7 +268,7 @@ Why not `debian-12` ? Because it's sadly not yet supported by [Salt project](htt
|
||||
|
||||
{{< alert >}}
|
||||
`nfs-common` package is required for Longhorn in order to support RWX volumes.
|
||||
{{</ alert >}}
|
||||
{{< /alert >}}
|
||||
|
||||
`cluster_name` is the node's name prefix and will have the format `{cluster_name}-{pool_name}-{index}`, for example `kube-storage-01`. `cluster_user` is the username UID 1000 for SSH access with sudo rights. `root` user is disabled for remote access security reasons.
|
||||
|
||||
@ -276,17 +278,12 @@ Why not `debian-12` ? Because it's sadly not yet supported by [Salt project](htt
|
||||
```tf
|
||||
k3s_channel = "stable"
|
||||
|
||||
tls_sans = ["cp.kube.rocks"]
|
||||
|
||||
disabled_components = ["traefik"]
|
||||
kubelet_args = [
|
||||
"eviction-hard=memory.available<250Mi"
|
||||
]
|
||||
```
|
||||
|
||||
This is the K3s specific configuration, where you can choose the channel (stable or latest), the TLS SANs, and the kubelet arguments.
|
||||
|
||||
I'm disabling included Traefik because we'll use a more flexible official Helm later.
|
||||
This is the K3s specific configuration, where you can choose the channel (stable or latest), and the kubelet arguments.
|
||||
|
||||
I also prefer increase the eviction threshold to 250Mi, in order to avoid OS OOM killer.
|
||||
|
||||
@ -294,7 +291,10 @@ I also prefer increase the eviction threshold to 250Mi, in order to avoid OS OOM
|
||||
{{< tab tabName="Backup" >}}
|
||||
|
||||
```tf
|
||||
etcd_s3_backup = {
|
||||
control_planes_custom_config = {
|
||||
tls-san = ["cp.kube.rocks"]
|
||||
disable = ["traefik"]
|
||||
etcd-s3 = true
|
||||
etcd-s3-endpoint = "s3.fr-par.scw.cloud"
|
||||
etcd-s3-access-key = var.s3_access_key
|
||||
etcd-s3-secret-key = var.s3_secret_key
|
||||
@ -304,7 +304,11 @@ etcd_s3_backup = {
|
||||
}
|
||||
```
|
||||
|
||||
This will enable automatic daily backup of etcd database on S3 bucket, which is useful for faster disaster recovery. See the official guide [here](https://docs.k3s.io/datastore/backup-restore).
|
||||
Here some specific additional configuration for k3s servers.
|
||||
|
||||
I'm disabling included Traefik because we'll use a more flexible official Helm later.
|
||||
|
||||
We're adding automatic daily backup of etcd database on S3 bucket, which is useful for faster disaster recovery. See the official guide [here](https://docs.k3s.io/datastore/backup-restore).
|
||||
|
||||
{{</ tab >}}
|
||||
{{< tab tabName="Cluster" >}}
|
||||
@ -354,6 +358,42 @@ Will print the SSH config access after cluster creation.
|
||||
{{</ tab >}}
|
||||
{{</ tabs >}}
|
||||
|
||||
#### ETCD and network encryption by default
|
||||
|
||||
You may need to enable etcd and network encryption in order to prevent any data leak in case of a server is compromised. You can easily do so by adding the following variables:
|
||||
|
||||
{{< highlight host="demo-kube-hcloud" file="kube.tf" >}}
|
||||
|
||||
```tf
|
||||
module "hcloud_kube" {
|
||||
//...
|
||||
# You need to install WireGuard package on all nodes
|
||||
server_packages = ["wireguard"]
|
||||
|
||||
control_planes_custom_config = {
|
||||
//...
|
||||
flannel-backend = "wireguard-native"
|
||||
secrets-encryption = true,
|
||||
}
|
||||
|
||||
//...
|
||||
}
|
||||
```
|
||||
|
||||
{{< /highlight >}}
|
||||
|
||||
You can check the ETCD encryption status with `sudo k3s secrets-encrypt status`:
|
||||
|
||||
```txt
|
||||
Encryption Status: Enabled
|
||||
Current Rotation Stage: start
|
||||
Server Encryption Hashes: All hashes match
|
||||
|
||||
Active Key Type Name
|
||||
------ -------- ----
|
||||
* AES-CBC aescbckey
|
||||
```
|
||||
|
||||
#### Inputs
|
||||
|
||||
As input variables, you have the choice to use environment variables or separated `terraform.tfvars` file.
|
||||
@ -365,17 +405,17 @@ As input variables, you have the choice to use environment variables or separate
|
||||
|
||||
```tf
|
||||
hcloud_token = "xxx"
|
||||
my_public_ssh_keys = [
|
||||
my_ip_addresses = [
|
||||
"82.82.82.82/32"
|
||||
]
|
||||
my_ip_addresses = [
|
||||
my_public_ssh_keys = [
|
||||
"ssh-ed25519 xxx"
|
||||
]
|
||||
s3_access_key = "xxx"
|
||||
s3_secret_key = "xxx"
|
||||
```
|
||||
|
||||
{{</ highlight >}}
|
||||
{{< /highlight >}}
|
||||
|
||||
{{</ tab >}}
|
||||
{{< tab tabName="Environment variables" >}}
|
||||
@ -440,7 +480,7 @@ Merge above SSH config into your `~/.ssh/config` file, then test the connection
|
||||
|
||||
{{< alert >}}
|
||||
If you get "Connection refused", it's probably because the server is still on cloud-init phase. Wait a few minutes and try again. Be sure to have the same public IPs as the one you whitelisted in the Terraform variables. You can edit them and reapply the Terraform configuration at any moment.
|
||||
{{</ alert >}}
|
||||
{{< /alert >}}
|
||||
|
||||
Before using K3s, let's enable Salt for OS management by taping `sudo salt-key -A -y`. This will accept all pending keys, and allow Salt to connect to all nodes. To upgrade all nodes at one, just type `sudo salt '*' pkg.upgrade`.
|
||||
|
||||
@ -455,7 +495,7 @@ From the controller, copy `/etc/rancher/k3s/k3s.yaml` on your machine located ou
|
||||
{{< alert >}}
|
||||
If `~/.kube/config` already existing, you have to properly [merging the config inside it](https://able8.medium.com/how-to-merge-multiple-kubeconfig-files-into-one-36fc987c2e2f). You can use `kubectl config view --flatten` for that.
|
||||
Then use `kubectl config use-context kube` for switching to your new cluster.
|
||||
{{</ alert >}}
|
||||
{{< /alert >}}
|
||||
|
||||
Type `kubectl get nodes` and you should see the 2 nodes of your cluster in **Ready** state.
|
||||
|
||||
@ -491,7 +531,7 @@ agent_nodepools = [
|
||||
]
|
||||
```
|
||||
|
||||
{{</ highlight >}}
|
||||
{{< /highlight >}}
|
||||
|
||||
Then apply the Terraform configuration again. After few minutes, you should see 2 new nodes in **Ready** state.
|
||||
|
||||
@ -505,7 +545,7 @@ kube-worker-03 Ready <none> 25s v1.27.4+k3s1
|
||||
|
||||
{{< alert >}}
|
||||
You'll have to use `sudo salt-key -A -y` each time you'll add a new node to the cluster for global OS management.
|
||||
{{</ alert >}}
|
||||
{{< /alert >}}
|
||||
|
||||
#### Deleting workers
|
||||
|
||||
@ -515,7 +555,7 @@ To finalize the deletion, delete the node from the cluster with `krm no kube-wor
|
||||
|
||||
{{< alert >}}
|
||||
If node have some workloads running, you'll have to consider a proper [draining](https://kubernetes.io/docs/tasks/administer-cluster/safely-drain-node/) before deleting it.
|
||||
{{</ alert >}}
|
||||
{{< /alert >}}
|
||||
|
||||
## 1st check ✅
|
||||
|
||||
|
@ -25,7 +25,7 @@ terraform {
|
||||
}
|
||||
```
|
||||
|
||||
{{</ highlight >}}
|
||||
{{< /highlight >}}
|
||||
|
||||
Let's begin with automatic upgrades management.
|
||||
|
||||
@ -34,8 +34,8 @@ Let's begin with automatic upgrades management.
|
||||
Before we go next steps, we need to install critical monitoring CRDs that will be used by many components for monitoring, a subject that will be covered later.
|
||||
|
||||
```sh
|
||||
kubectl apply --server-side -f https://raw.githubusercontent.com/prometheus-operator/prometheus-operator/v0.67.1/example/prometheus-operator-crd/monitoring.coreos.com_servicemonitors.yaml
|
||||
kubectl apply --server-side -f https://raw.githubusercontent.com/prometheus-operator/prometheus-operator/v0.67.1/example/prometheus-operator-crd/monitoring.coreos.com_podmonitors.yaml
|
||||
ka https://raw.githubusercontent.com/prometheus-operator/prometheus-operator/v0.73.0/example/prometheus-operator-crd/monitoring.coreos.com_servicemonitors.yaml --server-side
|
||||
ka https://raw.githubusercontent.com/prometheus-operator/prometheus-operator/v0.73.0/example/prometheus-operator-crd/monitoring.coreos.com_podmonitors.yaml --server-side
|
||||
```
|
||||
|
||||
### Automatic reboot
|
||||
@ -47,7 +47,7 @@ When OS kernel is upgraded, the system needs to be rebooted to apply it. This is
|
||||
```tf
|
||||
resource "helm_release" "kubereboot" {
|
||||
chart = "kured"
|
||||
version = "5.1.0"
|
||||
version = "5.4.5"
|
||||
repository = "https://kubereboot.github.io/charts"
|
||||
|
||||
name = "kured"
|
||||
@ -75,7 +75,7 @@ resource "helm_release" "kubereboot" {
|
||||
}
|
||||
```
|
||||
|
||||
{{</ highlight >}}
|
||||
{{< /highlight >}}
|
||||
|
||||
For all `helm_release` resource you'll see from this guide, you may check the last chart version available. Example for `kured`:
|
||||
|
||||
@ -100,11 +100,13 @@ However, as Terraform doesn't offer a proper way to apply a remote multi-documen
|
||||
|
||||
{{< alert >}}
|
||||
Don't push yourself get fully 100% GitOps everywhere if the remedy give far more code complexity. Sometimes a simple documentation of manual steps in README is better.
|
||||
{{</ alert >}}
|
||||
{{< /alert >}}
|
||||
|
||||
```sh
|
||||
k create ns system-upgrade
|
||||
# installing system-upgrade-controller
|
||||
ka https://github.com/rancher/system-upgrade-controller/releases/latest/download/system-upgrade-controller.yaml
|
||||
ka https://github.com/rancher/system-upgrade-controller/releases/latest/download/crd.yaml
|
||||
# checking system-upgrade-controller deployment status
|
||||
kg deploy -n system-upgrade
|
||||
```
|
||||
@ -187,19 +189,25 @@ resource "kubernetes_manifest" "agent_plan" {
|
||||
}
|
||||
```
|
||||
|
||||
{{</ highlight >}}
|
||||
{{< /highlight >}}
|
||||
|
||||
{{< alert >}}
|
||||
You may set the same channel as previous step for hcloud cluster creation.
|
||||
{{</ alert >}}
|
||||
{{< /alert >}}
|
||||
|
||||
## External access
|
||||
|
||||
Now it's time to expose our cluster to the outside world. We'll use Traefik as ingress controller and cert-manager for SSL certificates management.
|
||||
Now it's time to expose our cluster to the outside world. We'll use Traefik v3 as ingress controller and cert-manager for SSL certificates management.
|
||||
|
||||
### Traefik
|
||||
|
||||
Apply following file:
|
||||
Apply CRDs:
|
||||
|
||||
```sh
|
||||
kak https://github.com/traefik/traefik-helm-chart/traefik/crds/ --server-side
|
||||
```
|
||||
|
||||
Then apply following:
|
||||
|
||||
{{< highlight host="demo-kube-k3s" file="traefik.tf" >}}
|
||||
|
||||
@ -216,25 +224,31 @@ resource "kubernetes_namespace_v1" "traefik" {
|
||||
|
||||
resource "helm_release" "traefik" {
|
||||
chart = "traefik"
|
||||
version = "24.0.0"
|
||||
version = "28.0.0"
|
||||
repository = "https://traefik.github.io/charts"
|
||||
|
||||
name = "traefik"
|
||||
namespace = kubernetes_namespace_v1.traefik.metadata[0].name
|
||||
|
||||
set {
|
||||
name = "ports.web.redirectTo"
|
||||
name = "ports.web.redirectTo.port"
|
||||
value = "websecure"
|
||||
}
|
||||
|
||||
set {
|
||||
set_list {
|
||||
name = "ports.websecure.forwardedHeaders.trustedIPs"
|
||||
value = "{127.0.0.1/32,10.0.0.0/8}"
|
||||
value = [
|
||||
"127.0.0.1/32",
|
||||
"10.0.0.0/8"
|
||||
]
|
||||
}
|
||||
|
||||
set {
|
||||
set_list {
|
||||
name = "ports.websecure.proxyProtocol.trustedIPs"
|
||||
value = "{127.0.0.1/32,10.0.0.0/8}"
|
||||
value = [
|
||||
"127.0.0.1/32",
|
||||
"10.0.0.0/8"
|
||||
]
|
||||
}
|
||||
|
||||
set {
|
||||
@ -259,9 +273,9 @@ resource "helm_release" "traefik" {
|
||||
}
|
||||
```
|
||||
|
||||
{{</ highlight >}}
|
||||
{{< /highlight >}}
|
||||
|
||||
`ports.web.redirectTo` will redirect all HTTP traffic to HTTPS.
|
||||
`ports.web.redirectTo.port` will redirect all HTTP traffic to HTTPS.
|
||||
|
||||
`forwardedHeaders` and `proxyProtocol` will allow Traefik to get real IP of clients.
|
||||
|
||||
@ -317,14 +331,14 @@ resource "hcloud_load_balancer_service" "https_service" {
|
||||
}
|
||||
```
|
||||
|
||||
{{</ highlight >}}
|
||||
{{< /highlight >}}
|
||||
|
||||
Use `hcloud load-balancer-type list` to get the list of available load balancer types.
|
||||
|
||||
{{< alert >}}
|
||||
Don't forget to add `hcloud_load_balancer_service` resource for each service (aka port) you want to serve.
|
||||
We use `tcp` protocol as Traefik will handle SSL termination. Set `proxyprotocol` to true to allow Traefik to get real IP of clients.
|
||||
{{</ alert >}}
|
||||
{{< /alert >}}
|
||||
|
||||
One applied, use `hcloud load-balancer list` to get the public IP of the load balancer and try to curl it. You should be properly redirected to HTTPS and have certificate error. It's time to get SSL certificates.
|
||||
|
||||
@ -333,7 +347,7 @@ One applied, use `hcloud load-balancer list` to get the public IP of the load ba
|
||||
First we need to install cert-manager for proper distributed SSL management. First install CRDs manually.
|
||||
|
||||
```sh
|
||||
ka https://github.com/cert-manager/cert-manager/releases/download/v1.12.3/cert-manager.crds.yaml
|
||||
ka https://github.com/cert-manager/cert-manager/releases/download/v1.15.0/cert-manager.crds.yaml
|
||||
```
|
||||
|
||||
Then apply the following Terraform code.
|
||||
@ -349,7 +363,7 @@ resource "kubernetes_namespace_v1" "cert_manager" {
|
||||
|
||||
resource "helm_release" "cert_manager" {
|
||||
chart = "cert-manager"
|
||||
version = "v1.12.3"
|
||||
version = "v1.15.0"
|
||||
repository = "https://charts.jetstack.io"
|
||||
|
||||
name = "cert-manager"
|
||||
@ -362,12 +376,12 @@ resource "helm_release" "cert_manager" {
|
||||
}
|
||||
```
|
||||
|
||||
{{</ highlight >}}
|
||||
{{< /highlight >}}
|
||||
|
||||
{{< alert >}}
|
||||
You can use `installCRDs` option to install CRDs automatically. But uninstall cert-manager will delete all associated resources including generated certificates. That's why I generally prefer to install CRDs manually.
|
||||
As always we enable `prometheus.servicemonitor.enabled` to allow Prometheus to scrape cert-manager metrics.
|
||||
{{</ alert >}}
|
||||
{{< /alert >}}
|
||||
|
||||
All should be ok with `kg deploy -n cert-manager`.
|
||||
|
||||
@ -377,7 +391,7 @@ We'll use [DNS01 challenge](https://cert-manager.io/docs/configuration/acme/dns0
|
||||
|
||||
{{< alert >}}
|
||||
You may use a DNS provider supported by cert-manager. Check the [list of supported providers](https://cert-manager.io/docs/configuration/acme/dns01/#supported-dns01-providers). As cert-manager is highly extensible, you can easily create your own provider with some efforts. Check [available contrib webhooks](https://cert-manager.io/docs/configuration/acme/dns01/#webhook).
|
||||
{{</ alert >}}
|
||||
{{< /alert >}}
|
||||
|
||||
First prepare variables and set them accordingly:
|
||||
|
||||
@ -398,7 +412,7 @@ variable "dns_api_token" {
|
||||
}
|
||||
```
|
||||
|
||||
{{</ highlight >}}
|
||||
{{< /highlight >}}
|
||||
|
||||
{{< highlight host="demo-kube-k3s" file="terraform.tfvars" >}}
|
||||
|
||||
@ -408,7 +422,7 @@ domain = "kube.rocks"
|
||||
dns_api_token = "xxx"
|
||||
```
|
||||
|
||||
{{</ highlight >}}
|
||||
{{< /highlight >}}
|
||||
|
||||
Then we need to create a default `Certificate` k8s resource associated to a valid `ClusterIssuer` resource that will manage its generation. Apply the following Terraform code for issuing the new wildcard certificate for your domain.
|
||||
|
||||
@ -484,12 +498,12 @@ resource "kubernetes_manifest" "tls_certificate" {
|
||||
}
|
||||
```
|
||||
|
||||
{{</ highlight >}}
|
||||
{{< /highlight >}}
|
||||
|
||||
{{< alert >}}
|
||||
You can set `acme.privateKeySecretRef.name` to **letsencrypt-staging** for testing purpose and avoid wasting LE quota limit.
|
||||
Set `privateKey.rotationPolicy` to `Always` to ensure that the certificate will be [renewed automatically](https://cert-manager.io/docs/usage/certificate/) 30 days before expires without downtime.
|
||||
{{</ alert >}}
|
||||
{{< /alert >}}
|
||||
|
||||
In the meantime, go to your DNS provider and add a new `*.kube.rocks` entry pointing to the load balancer IP.
|
||||
|
||||
@ -530,7 +544,7 @@ resource "null_resource" "encrypted_admin_password" {
|
||||
}
|
||||
```
|
||||
|
||||
{{</ highlight >}}
|
||||
{{< /highlight >}}
|
||||
|
||||
{{< highlight host="demo-kube-k3s" file="terraform.tfvars" >}}
|
||||
|
||||
@ -540,11 +554,11 @@ http_password = "xxx"
|
||||
whitelisted_ips = ["82.82.82.82"]
|
||||
```
|
||||
|
||||
{{</ highlight >}}
|
||||
{{< /highlight >}}
|
||||
|
||||
{{< alert >}}
|
||||
Note on `encrypted_admin_password`, we generate a bcrypt hash of the password compatible for HTTP basic auth and keep the original to avoid to regenerate it each time.
|
||||
{{</ alert >}}
|
||||
{{< /alert >}}
|
||||
|
||||
Then apply the following Terraform code:
|
||||
|
||||
@ -554,9 +568,9 @@ Then apply the following Terraform code:
|
||||
resource "helm_release" "traefik" {
|
||||
//...
|
||||
|
||||
set {
|
||||
set_list {
|
||||
name = "ingressRoute.dashboard.entryPoints"
|
||||
value = "{websecure}"
|
||||
value = ["websecure"]
|
||||
}
|
||||
|
||||
set {
|
||||
@ -611,7 +625,7 @@ resource "kubernetes_manifest" "traefik_middleware_ip" {
|
||||
namespace = kubernetes_namespace_v1.traefik.metadata[0].name
|
||||
}
|
||||
spec = {
|
||||
ipWhiteList = {
|
||||
ipAllowList = {
|
||||
sourceRange = var.whitelisted_ips
|
||||
}
|
||||
}
|
||||
@ -619,7 +633,7 @@ resource "kubernetes_manifest" "traefik_middleware_ip" {
|
||||
}
|
||||
```
|
||||
|
||||
{{</ highlight >}}
|
||||
{{< /highlight >}}
|
||||
|
||||
Now go to `https://traefik.kube.rocks` and you should be asked for credentials. After login, you should see the dashboard.
|
||||
|
||||
@ -640,7 +654,7 @@ resource "kubernetes_manifest" "traefik_middleware_ip" {
|
||||
manifest = {
|
||||
//...
|
||||
spec = {
|
||||
ipWhiteList = {
|
||||
ipAllowList = {
|
||||
sourceRange = var.whitelisted_ips
|
||||
ipStrategy = {
|
||||
depth = 1
|
||||
@ -651,7 +665,7 @@ resource "kubernetes_manifest" "traefik_middleware_ip" {
|
||||
}
|
||||
```
|
||||
|
||||
{{</ highlight >}}
|
||||
{{< /highlight >}}
|
||||
|
||||
In the case of Cloudflare, you may need also to trust the [Cloudflare IP ranges](https://www.cloudflare.com/ips-v4) in addition to Hetzner load balancer. Just set `ports.websecure.forwardedHeaders.trustedIPs` and `ports.websecure.proxyProtocol.trustedIPs` accordingly.
|
||||
|
||||
@ -664,7 +678,7 @@ variable "cloudflare_ips" {
|
||||
}
|
||||
```
|
||||
|
||||
{{</ highlight >}}
|
||||
{{< /highlight >}}
|
||||
|
||||
{{< highlight host="demo-kube-k3s" file="traefik.tf" >}}
|
||||
|
||||
@ -676,19 +690,19 @@ locals {
|
||||
resource "helm_release" "traefik" {
|
||||
//...
|
||||
|
||||
set {
|
||||
set_list {
|
||||
name = "ports.websecure.forwardedHeaders.trustedIPs"
|
||||
value = "{${join(",", local.trusted_ips)}}"
|
||||
value = local.trusted_ips
|
||||
}
|
||||
|
||||
set {
|
||||
set_list {
|
||||
name = "ports.websecure.proxyProtocol.trustedIPs"
|
||||
value = "{${join(",", local.trusted_ips)}}"
|
||||
value = local.trusted_ips
|
||||
}
|
||||
}
|
||||
```
|
||||
|
||||
{{</ highlight >}}
|
||||
{{< /highlight >}}
|
||||
|
||||
Or for testing purpose set `ports.websecure.forwardedHeaders.insecure` and `ports.websecure.proxyProtocol.insecure` to true.
|
||||
|
||||
|
@ -19,7 +19,7 @@ In Kubernetes world, the most difficult while essential part is probably the sto
|
||||
If you are not familiar with Kubernetes storage, you must at least be aware of pros and cons of `RWO` and `RWX` volumes when creating `PVC`.
|
||||
In general `RWO` is more performant, but only one pod can mount it, while `RWX` is slower, but allow sharing between multiple pods.
|
||||
`RWO` is a single node volume, and `RWX` is a shared volume between multiple nodes.
|
||||
{{</ alert >}}
|
||||
{{< /alert >}}
|
||||
|
||||
`K3s` comes with a built-in `local-path` provisioner, which is the most performant `RWO` solution by directly using local NVMe SSD. But it's not resilient neither scalable. I think it's a good solution for what you consider as not critical data.
|
||||
|
||||
@ -126,7 +126,7 @@ The volume is of course automatically mounted on each node reboot, it's done via
|
||||
|
||||
{{< alert >}}
|
||||
Note as if you set volume in same time as node pool creation, Hetzner doesn't seem to automatically mount the volume. So it's preferable to create the node pool first, then add the volume as soon as the node in ready state. You can always detach / re-attach volumes manually through UI, which will force a proper remount.
|
||||
{{</ alert >}}
|
||||
{{< /alert >}}
|
||||
|
||||
### Longhorn variables
|
||||
|
||||
@ -200,7 +200,7 @@ resource "kubernetes_secret_v1" "longhorn_backup_credential" {
|
||||
|
||||
resource "helm_release" "longhorn" {
|
||||
chart = "longhorn"
|
||||
version = "1.5.1"
|
||||
version = "1.6.1"
|
||||
repository = "https://charts.longhorn.io"
|
||||
|
||||
name = "longhorn"
|
||||
@ -254,7 +254,7 @@ resource "helm_release" "longhorn" {
|
||||
Set both `persistence.defaultClassReplicaCount` (used for Kubernetes configuration in longhorn storage class) and `defaultSettings.defaultReplicaCount` (for volumes created from the UI) to 2 as we have 2 storage nodes.
|
||||
The toleration is required to allow Longhorn pods (managers and drivers) to be scheduled on storage nodes in addition to workers.
|
||||
Note as we need to have longhorn deployed on workers too, otherwise pods scheduled on these nodes can't be attached to longhorn volumes.
|
||||
{{</ alert >}}
|
||||
{{< /alert >}}
|
||||
|
||||
Use `kgpo -n longhorn-system -o wide` to check that Longhorn pods are correctly running on storage nodes as well as worker nodes. You should have `instance-manager` deployed on each node.
|
||||
|
||||
@ -342,7 +342,7 @@ resource "kubernetes_manifest" "longhorn_ingress" {
|
||||
{{< alert >}}
|
||||
It's vital that you have at least IP and AUTH middlewares with a strong password for Longhorn UI access, as its concern the most critical part of cluster.
|
||||
Of course, you can skip this ingress and directly use `kpf svc/longhorn-frontend -n longhorn-system 8000:80` to access Longhorn UI securely.
|
||||
{{</ alert >}}
|
||||
{{< /alert >}}
|
||||
|
||||
### Nodes and volumes configuration
|
||||
|
||||
@ -358,7 +358,7 @@ Type this commands for both storage nodes or use Longhorn UI from **Node** tab:
|
||||
|
||||
```sh
|
||||
# get the default-disk-xxx identifier
|
||||
kg nodes.longhorn.io okami-storage-01 -n longhorn-system -o yaml
|
||||
kg nodes.longhorn.io kube-storage-0x -n longhorn-system -o yaml
|
||||
# patch main default-disk-xxx as fast storage
|
||||
k patch nodes.longhorn.io kube-storage-0x -n longhorn-system --type=merge --patch '{"spec": {"disks": {"default-disk-xxx": {"tags": ["fast"]}}}}'
|
||||
# add a new schedulable disk by adding HC_Volume_XXXXXXXX path
|
||||
@ -386,6 +386,7 @@ resource "kubernetes_storage_class_v1" "longhorn_fast" {
|
||||
fromBackup = ""
|
||||
fsType = "ext4"
|
||||
diskSelector = "fast"
|
||||
dataLocality = "strict-local"
|
||||
}
|
||||
}
|
||||
```
|
||||
@ -476,7 +477,7 @@ resource "kubernetes_secret_v1" "postgresql_auth" {
|
||||
|
||||
resource "helm_release" "postgresql" {
|
||||
chart = "postgresql"
|
||||
version = var.chart_postgresql_version
|
||||
version = "15.2.5"
|
||||
repository = "https://charts.bitnami.com/bitnami"
|
||||
|
||||
name = "postgresql"
|
||||
@ -507,11 +508,6 @@ resource "helm_release" "postgresql" {
|
||||
value = "replication"
|
||||
}
|
||||
|
||||
set {
|
||||
name = "architecture"
|
||||
value = "replication"
|
||||
}
|
||||
|
||||
set {
|
||||
name = "metrics.enabled"
|
||||
value = "true"
|
||||
@ -576,25 +572,25 @@ resource "helm_release" "postgresql" {
|
||||
}
|
||||
```
|
||||
|
||||
{{</ highlight >}}
|
||||
{{< /highlight >}}
|
||||
|
||||
{{< alert >}}
|
||||
Don't forget to use fast storage by setting `primary.persistence.storageClass` and `readReplicas.persistence.storageClass` accordingly.
|
||||
{{</ alert >}}
|
||||
{{< /alert >}}
|
||||
|
||||
Now check that PostgreSQL pods are correctly running on storage nodes with `kgpo -n postgres -o wide`.
|
||||
|
||||
```txt
|
||||
NAME READY STATUS RESTARTS AGE IP NODE NOMINATED NODE READINESS GATES
|
||||
postgresql-primary-0 2/2 Running 0 151m 10.42.5.253 okami-storage-01 <none> <none>
|
||||
postgresql-read-0 2/2 Running 0 152m 10.42.2.216 okami-storage-02 <none> <none>
|
||||
postgresql-primary-0 2/2 Running 0 151m 10.42.5.253 kube-storage-01 <none> <none>
|
||||
postgresql-read-0 2/2 Running 0 152m 10.42.2.216 kube-storage-02 <none> <none>
|
||||
```
|
||||
|
||||
And that's it, we have replicated PostgreSQL cluster ready to use ! Go to longhorn UI and be sure that 2 volumes are created on fast disk under **Volume** menu.
|
||||
|
||||
## Redis cluster
|
||||
|
||||
After PostgreSQL, set up a master/slave redis is a piece of cake. You may prefer [redis cluster](https://redis.io/docs/management/scaling/) by using [Bitnami redis cluster](https://artifacthub.io/packages/helm/bitnami/redis-cluster), but it [doesn't work](https://github.com/bitnami/charts/issues/12901) at the time of writing this guide.
|
||||
After PostgreSQL, set up a redis cluster is a piece of cake. Let's use [Bitnami redis](https://artifacthub.io/packages/helm/bitnami/redis) with [Sentinel](https://redis.io/docs/management/sentinel/).
|
||||
|
||||
### Redis variables
|
||||
|
||||
@ -640,17 +636,12 @@ resource "kubernetes_secret_v1" "redis_auth" {
|
||||
|
||||
resource "helm_release" "redis" {
|
||||
chart = "redis"
|
||||
version = "17.15.6"
|
||||
version = "19.1.0"
|
||||
repository = "https://charts.bitnami.com/bitnami"
|
||||
|
||||
name = "redis"
|
||||
namespace = kubernetes_namespace_v1.redis.metadata[0].name
|
||||
|
||||
set {
|
||||
name = "architecture"
|
||||
value = "standalone"
|
||||
}
|
||||
|
||||
set {
|
||||
name = "auth.existingSecret"
|
||||
value = kubernetes_secret_v1.redis_auth.metadata[0].name
|
||||
@ -672,67 +663,25 @@ resource "helm_release" "redis" {
|
||||
}
|
||||
|
||||
set {
|
||||
name = "master.tolerations[0].key"
|
||||
value = "node-role.kubernetes.io/storage"
|
||||
}
|
||||
|
||||
set {
|
||||
name = "master.tolerations[0].effect"
|
||||
value = "NoSchedule"
|
||||
}
|
||||
|
||||
set {
|
||||
name = "master.nodeSelector.node-role\\.kubernetes\\.io/primary"
|
||||
type = "string"
|
||||
name = "sentinel.enabled"
|
||||
value = "true"
|
||||
}
|
||||
|
||||
set {
|
||||
name = "master.persistence.size"
|
||||
value = "10Gi"
|
||||
}
|
||||
|
||||
set {
|
||||
name = "master.persistence.storageClass"
|
||||
value = "longhorn-fast"
|
||||
name = "replica.persistence.enabled"
|
||||
value = "false"
|
||||
}
|
||||
|
||||
set {
|
||||
name = "replica.replicaCount"
|
||||
value = "1"
|
||||
}
|
||||
|
||||
set {
|
||||
name = "replica.tolerations[0].key"
|
||||
value = "node-role.kubernetes.io/storage"
|
||||
}
|
||||
|
||||
set {
|
||||
name = "replica.tolerations[0].effect"
|
||||
value = "NoSchedule"
|
||||
}
|
||||
|
||||
set {
|
||||
name = "replica.nodeSelector.node-role\\.kubernetes\\.io/read"
|
||||
type = "string"
|
||||
value = "true"
|
||||
}
|
||||
|
||||
set {
|
||||
name = "replica.persistence.size"
|
||||
value = "10Gi"
|
||||
}
|
||||
|
||||
set {
|
||||
name = "replica.persistence.storageClass"
|
||||
value = "longhorn-fast"
|
||||
value = "3"
|
||||
}
|
||||
}
|
||||
```
|
||||
|
||||
{{< /highlight >}}
|
||||
|
||||
And that's it, job done ! Always check that Redis pods are correctly running on storage nodes with `kgpo -n redis -o wide` and volumes are ready on Longhorn.
|
||||
And that's it, job done ! Check that all 3 Redis nodes are correctly running on worker nodes with `kgpo -n redis -o wide`. Thanks to Sentinel, Redis is highly available and resilient.
|
||||
|
||||
## Backups
|
||||
|
||||
@ -805,7 +754,7 @@ Configure this variable according to your needs.
|
||||
If you need some regular dump of your database without requiring a dedicated Kubernetes `CronJob`, you can simply use following crontab line on control plane node:
|
||||
|
||||
```sh
|
||||
0 */8 * * * root /usr/local/bin/k3s kubectl exec sts/postgresql-primary -n postgres -- /bin/sh -c 'PGUSER="okami" PGPASSWORD="$POSTGRES_PASSWORD" pg_dumpall -c | gzip > /bitnami/postgresql/dump_$(date "+\%H")h.sql.gz'
|
||||
0 */8 * * * root /usr/local/bin/k3s kubectl exec sts/postgresql-primary -n postgres -- /bin/sh -c 'PGUSER="okami" PGPASSWORD="$POSTGRES_PASSWORD" pg_dumpall -c --if-exists | gzip > /bitnami/postgresql/dump_$(date "+\%H")h.sql.gz'
|
||||
```
|
||||
|
||||
It will generate 3 daily dumps, one every 8 hours, on the same primary db volume, allowing easy `psql` restore from the same container.
|
||||
|
@ -125,7 +125,8 @@ provider "flux" {
|
||||
}
|
||||
|
||||
resource "flux_bootstrap_git" "this" {
|
||||
path = "clusters/demo"
|
||||
path = "clusters/demo"
|
||||
embedded_manifests = true
|
||||
|
||||
components_extra = [
|
||||
"image-reflector-controller",
|
||||
@ -152,7 +153,7 @@ Open `demo-kube-flux` project and create helm deployment for sealed secret.
|
||||
|
||||
```yaml
|
||||
---
|
||||
apiVersion: source.toolkit.fluxcd.io/v1beta2
|
||||
apiVersion: source.toolkit.fluxcd.io/v1
|
||||
kind: HelmRepository
|
||||
metadata:
|
||||
name: sealed-secrets
|
||||
@ -161,7 +162,7 @@ spec:
|
||||
interval: 1h0m0s
|
||||
url: https://bitnami-labs.github.io/sealed-secrets
|
||||
---
|
||||
apiVersion: helm.toolkit.fluxcd.io/v2beta1
|
||||
apiVersion: helm.toolkit.fluxcd.io/v2
|
||||
kind: HelmRelease
|
||||
metadata:
|
||||
name: sealed-secrets
|
||||
@ -352,7 +353,7 @@ Let's try some app that require a bit more configuration and real database conne
|
||||
{{< highlight host="demo-kube-flux" file="clusters/demo/n8n/deploy-n8n.yaml" >}}
|
||||
|
||||
```yaml
|
||||
apiVersion: apps/v1
|
||||
apiVersion: v1
|
||||
kind: Namespace
|
||||
metadata:
|
||||
name: n8n
|
||||
@ -423,10 +424,14 @@ spec:
|
||||
volumeMounts:
|
||||
- name: n8n-data
|
||||
mountPath: /home/node/.n8n
|
||||
- name: n8n-cache
|
||||
mountPath: /home/node/.cache
|
||||
volumes:
|
||||
- name: n8n-data
|
||||
persistentVolumeClaim:
|
||||
claimName: n8n-data
|
||||
- name: n8n-cache
|
||||
emptyDir: {}
|
||||
---
|
||||
apiVersion: v1
|
||||
kind: PersistentVolumeClaim
|
||||
@ -517,7 +522,7 @@ Let's try a final candidate with NocoDB, an Airtable-like generator for Postgres
|
||||
{{< highlight host="demo-kube-flux" file="clusters/demo/nocodb/deploy-nocodb.yaml" >}}
|
||||
|
||||
```yaml
|
||||
apiVersion: apps/v1
|
||||
apiVersion: v1
|
||||
kind: Namespace
|
||||
metadata:
|
||||
name: nocodb
|
||||
|
@ -69,7 +69,7 @@ resource "kubernetes_namespace_v1" "monitoring" {
|
||||
|
||||
resource "helm_release" "kube_prometheus_stack" {
|
||||
chart = "kube-prometheus-stack"
|
||||
version = "49.2.0"
|
||||
version = "58.1.0"
|
||||
repository = "https://prometheus-community.github.io/helm-charts"
|
||||
|
||||
name = "kube-prometheus-stack"
|
||||
@ -112,12 +112,12 @@ resource "helm_release" "kube_prometheus_stack" {
|
||||
|
||||
set {
|
||||
name = "prometheus.prometheusSpec.tolerations[0].key"
|
||||
value = "node-role.kubernetes.io/storage"
|
||||
value = "node-role.kubernetes.io/monitor"
|
||||
}
|
||||
|
||||
set {
|
||||
name = "prometheus.prometheusSpec.tolerations[0].operator"
|
||||
value = "Exists"
|
||||
name = "prometheus.prometheusSpec.tolerations[0].effect"
|
||||
value = "NoSchedule"
|
||||
}
|
||||
|
||||
set {
|
||||
@ -159,6 +159,10 @@ Important notes:
|
||||
* As we don't set any storage class, the default one will be used, which is `local-path` when using K3s. If you want to use longhorn instead and benefit of automatic monitoring backup, you can set it with `...volumeClaimTemplate.spec.storageClassName`. But don't forget to deploy Longhorn manager by adding monitor toleration.
|
||||
* As it's a huge chart, I want to minimize dependencies by disabling Grafana, as I prefer manage it separately. However, in this case we may set `grafana.forceDeployDatasources` and `grafana.forceDeployDashboards` to `true` in order to benefit of all included Kubernetes dashboards and automatic Prometheus datasource injection, and deploy them to config maps that can be used for next Grafana install by provisioning.
|
||||
|
||||
{{< alert >}}
|
||||
As Terraform plan become slower and slower, you can directly apply one single resource by using `target` option. For example for applying only Prometheus stack, use `terraform apply -target=helm_release.kube_prometheus_stack`. It will save you a lot of time for testing.
|
||||
{{< /alert >}}
|
||||
|
||||
And finally the ingress for external access:
|
||||
|
||||
{{< highlight host="demo-kube-k3s" file="monitoring.tf" >}}
|
||||
@ -292,7 +296,7 @@ Create `grafana` database through pgAdmin with same user and according `grafana_
|
||||
{{< highlight host="demo-kube-k3s" file="terraform.tfvars" >}}
|
||||
|
||||
```tf
|
||||
smtp_host = "smtp.mailgun.org"
|
||||
smtp_host = "smtp.tem.scw.cloud"
|
||||
smtp_port = "587"
|
||||
smtp_user = "xxx"
|
||||
smtp_password = "xxx"
|
||||
@ -307,7 +311,7 @@ Apply next configuration to Terraform project:
|
||||
```tf
|
||||
resource "helm_release" "grafana" {
|
||||
chart = "grafana"
|
||||
version = "6.58.9"
|
||||
version = "7.3.8"
|
||||
repository = "https://grafana.github.io/helm-charts"
|
||||
|
||||
name = "grafana"
|
||||
@ -432,10 +436,6 @@ If you go to `https://grafana.kube.rocks/dashboards`, you should see a many dash
|
||||
* Prometheus and Grafana itself stats
|
||||
* Flux stats
|
||||
|
||||
{{< alert >}}
|
||||
Some other core components like etcd, scheduler, proxy, and controller manager need to have metrics enabled to be scraped. See K3s docs or [this issue](https://github.com/k3s-io/k3s/issues/3619)
|
||||
{{< /alert >}}
|
||||
|
||||
#### Prometheus
|
||||
|
||||
[](dashboards-prometheus.png)
|
||||
@ -466,7 +466,7 @@ You can easily import some additional dashboards by importing them from Grafana
|
||||
|
||||
#### Traefik
|
||||
|
||||
[Link](https://grafana.com/grafana/17346)
|
||||
[Link](https://grafana.com/dashboards/17346)
|
||||
|
||||
[](dashboards-traefik.png)
|
||||
|
||||
@ -478,31 +478,61 @@ You can easily import some additional dashboards by importing them from Grafana
|
||||
|
||||
#### Longhorn
|
||||
|
||||
[Link](https://grafana.com/grafana/16888)
|
||||
[Link](https://grafana.com/dashboards/16888)
|
||||
|
||||
[](dashboards-longhorn.png)
|
||||
|
||||
#### PostgreSQL
|
||||
|
||||
[Link](https://grafana.com/grafana/9628)
|
||||
[Link](https://grafana.com/dashboards/9628)
|
||||
|
||||
[](dashboards-postgresql.png)
|
||||
|
||||
#### Redis
|
||||
|
||||
[Link](https://grafana.com/grafana/dashboards/763)
|
||||
[Link](https://grafana.com/dashboards/763)
|
||||
|
||||
[](dashboards-redis.png)
|
||||
|
||||
#### Other core components
|
||||
|
||||
Some other core components like etcd, scheduler, proxy, and controller manager need to have metrics enabled to be scraped. See K3s docs or [this issue](https://github.com/k3s-io/k3s/issues/3619).
|
||||
|
||||
From Terraform Hcloud project, use `control_planes_custom_config` for expose all remaining metrics endpoint:
|
||||
|
||||
{{< highlight host="demo-kube-hcloud" file="kube.tf" >}}
|
||||
|
||||
```tf
|
||||
module "hcloud_kube" {
|
||||
//...
|
||||
|
||||
control_planes_custom_config = {
|
||||
//...
|
||||
etcd-expose-metrics = true,
|
||||
kube-scheduler-arg = "bind-address=0.0.0.0",
|
||||
kube-controller-manager-arg = "bind-address=0.0.0.0",
|
||||
kube-proxy-arg = "metrics-bind-address=0.0.0.0",
|
||||
}
|
||||
|
||||
//...
|
||||
}
|
||||
```
|
||||
|
||||
{{< /highlight >}}
|
||||
|
||||
{{< alert >}}
|
||||
As above config applies only at cluster initialization, you may change directly `/etc/rancher/k3s/config.yaml` instead and restart K3s server.
|
||||
{{< /alert >}}
|
||||
|
||||
## Logging
|
||||
|
||||
Last but not least, we need to add a logging stack. The most popular one is [Elastic Stack](https://www.elastic.co/elastic-stack), but it's very resource intensive. A more lightweight option is to use [Loki](https://grafana.com/oss/loki/), also part of Grafana Labs.
|
||||
|
||||
In order to work on scalable mode, we need to have a S3 storage backend. We will reuse same S3 compatible storage as longhorn backup here, but it's recommended to use a separate bucket and credentials.
|
||||
We need to have a S3 storage backend for long term storage. We will reuse same S3 compatible storage as longhorn backup here, but it's recommended to use a separate bucket and credentials.
|
||||
|
||||
### Loki
|
||||
|
||||
Let's install it now:
|
||||
Let's install it on single binary mode:
|
||||
|
||||
{{< highlight host="demo-kube-k3s" file="logging.tf" >}}
|
||||
|
||||
@ -515,7 +545,7 @@ resource "kubernetes_namespace_v1" "logging" {
|
||||
|
||||
resource "helm_release" "loki" {
|
||||
chart = "loki"
|
||||
version = "5.15.0"
|
||||
version = "6.2.0"
|
||||
repository = "https://grafana.github.io/helm-charts"
|
||||
|
||||
name = "loki"
|
||||
@ -531,6 +561,11 @@ resource "helm_release" "loki" {
|
||||
value = "true"
|
||||
}
|
||||
|
||||
set {
|
||||
name = "loki.compactor.delete_request_store"
|
||||
value = "s3"
|
||||
}
|
||||
|
||||
set {
|
||||
name = "loki.limits_config.retention_period"
|
||||
value = "24h"
|
||||
@ -572,34 +607,78 @@ resource "helm_release" "loki" {
|
||||
}
|
||||
|
||||
set {
|
||||
name = "read.replicas"
|
||||
name = "loki.commonConfig.replication_factor"
|
||||
value = "1"
|
||||
}
|
||||
|
||||
set {
|
||||
name = "loki.schemaConfig.configs[0].from"
|
||||
value = "2024-01-01"
|
||||
}
|
||||
|
||||
set {
|
||||
name = "loki.schemaConfig.configs[0].store"
|
||||
value = "tsdb"
|
||||
}
|
||||
|
||||
set {
|
||||
name = "loki.schemaConfig.configs[0].object_store"
|
||||
value = "s3"
|
||||
}
|
||||
|
||||
set {
|
||||
name = "loki.schemaConfig.configs[0].schema"
|
||||
value = "v13"
|
||||
}
|
||||
|
||||
set {
|
||||
name = "loki.schemaConfig.configs[0].index.prefix"
|
||||
value = "index_"
|
||||
}
|
||||
|
||||
set {
|
||||
name = "loki.schemaConfig.configs[0].index.period"
|
||||
value = "24h"
|
||||
}
|
||||
|
||||
set {
|
||||
name = "deploymentMode"
|
||||
value = "SingleBinary"
|
||||
}
|
||||
|
||||
set {
|
||||
name = "read.replicas"
|
||||
value = "0"
|
||||
}
|
||||
|
||||
set {
|
||||
name = "backend.replicas"
|
||||
value = "1"
|
||||
value = "0"
|
||||
}
|
||||
|
||||
set {
|
||||
name = "write.replicas"
|
||||
value = "2"
|
||||
value = "0"
|
||||
}
|
||||
|
||||
set {
|
||||
name = "write.tolerations[0].key"
|
||||
value = "node-role.kubernetes.io/storage"
|
||||
name = "singleBinary.replicas"
|
||||
value = "1"
|
||||
}
|
||||
|
||||
set {
|
||||
name = "write.tolerations[0].effect"
|
||||
name = "singleBinary.tolerations[0].key"
|
||||
value = "node-role.kubernetes.io/monitor"
|
||||
}
|
||||
|
||||
set {
|
||||
name = "singleBinary.tolerations[0].effect"
|
||||
value = "NoSchedule"
|
||||
}
|
||||
|
||||
set {
|
||||
name = "write.nodeSelector.node-role\\.kubernetes\\.io/storage"
|
||||
type = "string"
|
||||
value = "true"
|
||||
name = "singleBinary.nodeSelector.node\\.kubernetes\\.io/server-usage"
|
||||
value = "monitor"
|
||||
}
|
||||
|
||||
set {
|
||||
@ -626,6 +705,21 @@ resource "helm_release" "loki" {
|
||||
name = "test.enabled"
|
||||
value = "false"
|
||||
}
|
||||
|
||||
set {
|
||||
name = "chunksCache.enabled"
|
||||
value = "false"
|
||||
}
|
||||
|
||||
set {
|
||||
name = "resultsCache.enabled"
|
||||
value = "false"
|
||||
}
|
||||
|
||||
set {
|
||||
name = "lokiCanary.enabled"
|
||||
value = "false"
|
||||
}
|
||||
}
|
||||
```
|
||||
|
||||
@ -642,7 +736,7 @@ Okay so Loki is running but not fed, for that we'll deploy [Promtail](https://gr
|
||||
```tf
|
||||
resource "helm_release" "promtail" {
|
||||
chart = "promtail"
|
||||
version = "6.15.0"
|
||||
version = "6.15.5"
|
||||
repository = "https://grafana.github.io/helm-charts"
|
||||
|
||||
name = "promtail"
|
||||
@ -715,107 +809,6 @@ We have nothing more to do, all dashboards are already provided by Loki Helm cha
|
||||
|
||||
[](dashboards-loki.png)
|
||||
|
||||
## Helm Exporter
|
||||
|
||||
We have installed many Helm Charts so far, but how we manage upgrading plans ? We may need to be aware of new versions and security fixes. For that, we can use Helm Exporter:
|
||||
|
||||
{{< highlight host="demo-kube-k3s" file="monitoring.tf" >}}
|
||||
|
||||
```tf
|
||||
resource "helm_release" "helm_exporter" {
|
||||
chart = "helm-exporter"
|
||||
version = "1.2.5+1cbc9c5"
|
||||
repository = "https://shanestarcher.com/helm-charts"
|
||||
|
||||
name = "helm-exporter"
|
||||
namespace = kubernetes_namespace_v1.monitoring.metadata[0].name
|
||||
|
||||
set {
|
||||
name = "serviceMonitor.create"
|
||||
value = "true"
|
||||
}
|
||||
|
||||
set {
|
||||
name = "grafanaDashboard.enabled"
|
||||
value = "true"
|
||||
}
|
||||
|
||||
set {
|
||||
name = "grafanaDashboard.grafanaDashboard.namespace"
|
||||
value = kubernetes_namespace_v1.monitoring.metadata[0].name
|
||||
}
|
||||
|
||||
values = [
|
||||
file("values/helm-exporter-values.yaml")
|
||||
]
|
||||
}
|
||||
```
|
||||
|
||||
{{< /highlight >}}
|
||||
|
||||
As the helm exporter config is a bit tedious, it's more straightforward to use a separate helm values file. Here is a sample configuration for Helm Exporter for scraping all charts that we'll need:
|
||||
|
||||
{{< highlight host="demo-kube-k3s" file="values/helm-exporter-values.tf" >}}
|
||||
|
||||
```yaml
|
||||
config:
|
||||
helmRegistries:
|
||||
registryNames:
|
||||
- bitnami
|
||||
override:
|
||||
- registry:
|
||||
url: "https://concourse-charts.storage.googleapis.com"
|
||||
charts:
|
||||
- concourse
|
||||
- registry:
|
||||
url: "https://dl.gitea.io/charts"
|
||||
charts:
|
||||
- gitea
|
||||
- registry:
|
||||
url: "https://grafana.github.io/helm-charts"
|
||||
charts:
|
||||
- grafana
|
||||
- loki
|
||||
- promtail
|
||||
- tempo
|
||||
- registry:
|
||||
url: "https://charts.longhorn.io"
|
||||
charts:
|
||||
- longhorn
|
||||
- registry:
|
||||
url: "https://charts.jetstack.io"
|
||||
charts:
|
||||
- cert-manager
|
||||
- registry:
|
||||
url: "https://traefik.github.io/charts"
|
||||
charts:
|
||||
- traefik
|
||||
- registry:
|
||||
url: "https://bitnami-labs.github.io/sealed-secrets"
|
||||
charts:
|
||||
- sealed-secrets
|
||||
- registry:
|
||||
url: "https://prometheus-community.github.io/helm-charts"
|
||||
charts:
|
||||
- kube-prometheus-stack
|
||||
- registry:
|
||||
url: "https://SonarSource.github.io/helm-chart-sonarqube"
|
||||
charts:
|
||||
- sonarqube
|
||||
- registry:
|
||||
url: "https://kubereboot.github.io/charts"
|
||||
charts:
|
||||
- kured
|
||||
- registry:
|
||||
url: "https://shanestarcher.com/helm-charts"
|
||||
charts:
|
||||
- helm-exporter
|
||||
```
|
||||
|
||||
{{< /highlight >}}
|
||||
|
||||
You can easily start from provisioned dashboard and customize it for using `helm_chart_outdated` instead of `helm_chart_info` to list all outdated helms.
|
||||
|
||||
## 5th check ✅
|
||||
|
||||
We now have a full monitoring suite with performant logging collector ! What a pretty massive subject done. At this stage, you have a good starting point to run many apps on your cluster with high scalability and observability. We are done for the pure **operational** part. It's finally time to tackle the **building** part for a complete development stack. Go [next part]({{< ref "/posts/16-a-beautiful-gitops-day-6" >}}) to begin with continuous integration.
|
||||
|
@ -62,7 +62,7 @@ Then the Helm chart itself:
|
||||
|
||||
```tf
|
||||
locals {
|
||||
redis_connection = "redis://:${urlencode(var.redis_password)}@redis-master.redis:6379/0"
|
||||
redis_connection = "redis://:${urlencode(var.redis_password)}@redis.redis:6379/0"
|
||||
}
|
||||
|
||||
resource "kubernetes_namespace_v1" "gitea" {
|
||||
@ -73,7 +73,7 @@ resource "kubernetes_namespace_v1" "gitea" {
|
||||
|
||||
resource "helm_release" "gitea" {
|
||||
chart = "gitea"
|
||||
version = "9.2.0"
|
||||
version = "10.1.4"
|
||||
repository = "https://dl.gitea.io/charts"
|
||||
|
||||
name = "gitea"
|
||||
@ -303,7 +303,7 @@ You should be able to log in `https://gitea.kube.rocks` with chosen admin creden
|
||||
|
||||
### Push a basic Web API project
|
||||
|
||||
Let's generate a basic .NET Web API project. Create a new dotnet project like following (you may install [last .NET SDK](https://dotnet.microsoft.com/en-us/download)):
|
||||
Let's generate a basic .NET Web API project. Create a new dotnet 8 project like following (you may install [.NET 8 SDK](https://dotnet.microsoft.com/en-us/download)):
|
||||
|
||||
```sh
|
||||
mkdir kuberocks-demo
|
||||
@ -311,7 +311,7 @@ cd kuberocks-demo
|
||||
dotnet new sln
|
||||
dotnet new gitignore
|
||||
dotnet new editorconfig
|
||||
dotnet new webapi -o src/KubeRocks.WebApi
|
||||
dotnet new webapi -o src/KubeRocks.WebApi --use-controllers
|
||||
dotnet sln add src/KubeRocks.WebApi
|
||||
git init
|
||||
git add .
|
||||
@ -359,7 +359,7 @@ resource "helm_release" "traefik" {
|
||||
}
|
||||
|
||||
set {
|
||||
name = "ports.ssh.expose"
|
||||
name = "ports.ssh.expose.default"
|
||||
value = "true"
|
||||
}
|
||||
|
||||
@ -414,7 +414,7 @@ Now retry pull again and it should work seamlessly !
|
||||
|
||||
### Gitea monitoring
|
||||
|
||||
[Link](https://grafana.com/grafana/dashboards/17802)
|
||||
[Link](https://grafana.com/dashboards/17802)
|
||||
|
||||
[](gitea-monitoring.png)
|
||||
|
||||
@ -510,7 +510,7 @@ resource "kubernetes_namespace_v1" "concourse" {
|
||||
|
||||
resource "helm_release" "concourse" {
|
||||
chart = "concourse"
|
||||
version = "17.2.0"
|
||||
version = "17.3.1"
|
||||
repository = "https://concourse-charts.storage.googleapis.com"
|
||||
|
||||
name = "concourse"
|
||||
|
@ -193,7 +193,7 @@ Firstly create following files in root of your repo that we'll use for building
|
||||
{{< highlight host="kuberocks-demo" file="Dockerfile" >}}
|
||||
|
||||
```Dockerfile
|
||||
FROM mcr.microsoft.com/dotnet/aspnet:7.0
|
||||
FROM mcr.microsoft.com/dotnet/aspnet:8.0
|
||||
|
||||
WORKDIR /publish
|
||||
COPY /publish .
|
||||
@ -253,7 +253,7 @@ jobs:
|
||||
type: registry-image
|
||||
source:
|
||||
repository: mcr.microsoft.com/dotnet/sdk
|
||||
tag: "7.0"
|
||||
tag: "8.0"
|
||||
inputs:
|
||||
- name: source-code
|
||||
path: .
|
||||
@ -432,7 +432,7 @@ Let's define the image update automation task for main Flux repository:
|
||||
{{< highlight host="demo-kube-flux" file="clusters/demo/flux-add-ons/image-update-automation.yaml" >}}
|
||||
|
||||
```yaml
|
||||
apiVersion: image.toolkit.fluxcd.io/v1beta1
|
||||
apiVersion: image.toolkit.fluxcd.io/v1beta2
|
||||
kind: ImageUpdateAutomation
|
||||
metadata:
|
||||
name: flux-system
|
||||
@ -465,7 +465,7 @@ Now we need to tell Image Reflector how to scan the repository, as well as the a
|
||||
{{< highlight host="demo-kube-flux" file="clusters/demo/kuberocks/images-demo.yaml" >}}
|
||||
|
||||
```yaml
|
||||
apiVersion: image.toolkit.fluxcd.io/v1beta1
|
||||
apiVersion: image.toolkit.fluxcd.io/v1beta2
|
||||
kind: ImageRepository
|
||||
metadata:
|
||||
name: demo
|
||||
@ -476,7 +476,7 @@ spec:
|
||||
secretRef:
|
||||
name: dockerconfigjson
|
||||
---
|
||||
apiVersion: image.toolkit.fluxcd.io/v1beta1
|
||||
apiVersion: image.toolkit.fluxcd.io/v1beta2
|
||||
kind: ImagePolicy
|
||||
metadata:
|
||||
name: demo
|
||||
|
@ -120,8 +120,9 @@ The last step but not least for a total integration with our monitored Kubernete
|
||||
Install minimal ASP.NET Core metrics is really a no-brainer:
|
||||
|
||||
```sh
|
||||
dotnet add src/KubeRocks.WebApi package OpenTelemetry.AutoInstrumentation --prerelease
|
||||
dotnet add src/KubeRocks.WebApi package OpenTelemetry.Extensions.Hosting --prerelease
|
||||
dotnet add src/KubeRocks.WebApi package OpenTelemetry.Instrumentation.AspNetCore --prerelease
|
||||
dotnet add src/KubeRocks.WebApi package OpenTelemetry.Instrumentation.EntityFrameworkCore --prerelease
|
||||
dotnet add src/KubeRocks.WebApi package OpenTelemetry.Exporter.Prometheus.AspNetCore --prerelease
|
||||
```
|
||||
|
||||
@ -135,7 +136,11 @@ builder.Services.AddOpenTelemetry()
|
||||
{
|
||||
b
|
||||
.AddAspNetCoreInstrumentation()
|
||||
.AddPrometheusExporter();
|
||||
.AddPrometheusExporter()
|
||||
.AddMeter(
|
||||
"Microsoft.AspNetCore.Hosting",
|
||||
"Microsoft.AspNetCore.Server.Kestrel"
|
||||
);
|
||||
});
|
||||
|
||||
var app = builder.Build();
|
||||
@ -149,9 +154,7 @@ app.UseOpenTelemetryPrometheusScrapingEndpoint();
|
||||
|
||||
Relaunch app and go to `https://demo.kube.rocks/metrics` to confirm it's working. It should show metrics after each endpoint call, simply try `https://demo.kube.rocks/Articles`.
|
||||
|
||||
{{< alert >}}
|
||||
.NET metrics are currently pretty basic, but the next .NET 8 version will provide far better metrics from internal components allowing some [useful dashboard](https://github.com/JamesNK/aspnetcore-grafana).
|
||||
{{< /alert >}}
|
||||
Now you can easily import ASP.NET [specific grafana dashboards](https://github.com/dotnet/aspire/tree/main/src/Grafana) for visualizing.
|
||||
|
||||
#### Hide internal endpoints
|
||||
|
||||
@ -270,7 +273,7 @@ resource "kubernetes_namespace_v1" "tracing" {
|
||||
|
||||
resource "helm_release" "tempo" {
|
||||
chart = "tempo"
|
||||
version = "1.5.1"
|
||||
version = "1.7.2"
|
||||
repository = "https://grafana.github.io/helm-charts"
|
||||
|
||||
name = "tempo"
|
||||
@ -344,6 +347,7 @@ Use the *Test* button on `https://grafana.kube.rocks/connections/datasources/edi
|
||||
Let's firstly add another instrumentation package specialized for Npgsql driver used by EF Core to translate queries to PostgreSQL:
|
||||
|
||||
```sh
|
||||
dotnet add src/KubeRocks.WebApi package OpenTelemetry.Exporter.OpenTelemetryProtocol --prerelease
|
||||
dotnet add src/KubeRocks.WebApi package Npgsql.OpenTelemetry
|
||||
```
|
||||
|
||||
|
@ -19,105 +19,109 @@ SonarQube is leading the code metrics industry for a long time, embracing full O
|
||||
|
||||
SonarQube has its dedicated Helm chart which is perfect for us. However, it's the most resource hungry component of our development stack so far (because built with Java ? End of troll), so be sure to deploy it on almost empty free node (which should be ok with 3 workers), maybe a dedicated one. In fact, it's the last Helm chart for this tutorial, I promise!
|
||||
|
||||
Create dedicated database for SonarQube same as usual.
|
||||
Create dedicated database for SonarQube same as usual, then we can use flux for deployment.
|
||||
|
||||
{{< highlight host="demo-kube-k3s" file="main.tf" >}}
|
||||
{{< highlight host="demo-kube-flux" file="clusters/demo/sonarqube/deploy-sonarqube.yaml" >}}
|
||||
|
||||
```tf
|
||||
variable "sonarqube_db_password" {
|
||||
type = string
|
||||
sensitive = true
|
||||
}
|
||||
```yaml
|
||||
apiVersion: v1
|
||||
kind: Namespace
|
||||
metadata:
|
||||
name: sonarqube
|
||||
---
|
||||
apiVersion: source.toolkit.fluxcd.io/v1
|
||||
kind: HelmRepository
|
||||
metadata:
|
||||
name: sonarqube
|
||||
namespace: sonarqube
|
||||
spec:
|
||||
interval: 1h0m0s
|
||||
url: https://SonarSource.github.io/helm-chart-sonarqube
|
||||
---
|
||||
apiVersion: helm.toolkit.fluxcd.io/v2
|
||||
kind: HelmRelease
|
||||
metadata:
|
||||
name: sonarqube
|
||||
namespace: sonarqube
|
||||
spec:
|
||||
chart:
|
||||
spec:
|
||||
chart: sonarqube
|
||||
reconcileStrategy: ChartVersion
|
||||
sourceRef:
|
||||
kind: HelmRepository
|
||||
name: sonarqube
|
||||
version: ">=10.0.0"
|
||||
interval: 1m
|
||||
releaseName: sonarqube
|
||||
targetNamespace: sonarqube
|
||||
values:
|
||||
resources:
|
||||
limits:
|
||||
cpu: 1000m
|
||||
memory: 2Gi
|
||||
requests:
|
||||
cpu: 500m
|
||||
memory: 2Gi
|
||||
|
||||
prometheusMonitoring:
|
||||
podMonitor:
|
||||
enabled: true
|
||||
namespace: sonarqube
|
||||
|
||||
monitoringPasscode: null
|
||||
monitoringPasscodeSecretName: sonarqube-secret
|
||||
monitoringPasscodeSecretKey: monitoring-passcode
|
||||
|
||||
jdbcOverwrite:
|
||||
enable: true
|
||||
jdbcUrl: jdbc:postgresql://postgresql-primary.postgres/sonarqube
|
||||
jdbcUsername: sonarqube
|
||||
jdbcSecretName: sonarqube-secret
|
||||
jdbcSecretPasswordKey: db-password
|
||||
|
||||
postgresql:
|
||||
enabled: false
|
||||
---
|
||||
apiVersion: traefik.io/v1alpha1
|
||||
kind: IngressRoute
|
||||
metadata:
|
||||
name: sonarqube
|
||||
namespace: sonarqube
|
||||
spec:
|
||||
entryPoints:
|
||||
- websecure
|
||||
routes:
|
||||
- match: Host(`sonarqube.kube.rocks`)
|
||||
kind: Rule
|
||||
services:
|
||||
- name: sonarqube-sonarqube
|
||||
port: http
|
||||
```
|
||||
|
||||
{{< /highlight >}}
|
||||
|
||||
{{< highlight host="demo-kube-k3s" file="terraform.tfvars" >}}
|
||||
Here are the secrets to adapt to your needs:
|
||||
|
||||
```tf
|
||||
sonarqube_db_password = "xxx"
|
||||
{{< highlight host="demo-kube-flux" file="clusters/demo/sonarqube/secret-sonarqube.yaml" >}}
|
||||
|
||||
```yaml
|
||||
apiVersion: v1
|
||||
kind: Secret
|
||||
metadata:
|
||||
name: sonarqube-secret
|
||||
namespace: sonarqube
|
||||
type: Opaque
|
||||
data:
|
||||
db-password: YWRtaW4=
|
||||
monitoring-passcode: YWRtaW4=
|
||||
```
|
||||
|
||||
{{< /highlight >}}
|
||||
|
||||
{{< highlight host="demo-kube-k3s" file="sonarqube.tf" >}}
|
||||
As seen in part 4 of this guide, seal these secrets with `kubeseal` under `sealed-secret-sonarqube.yaml` and delete original secret file.
|
||||
|
||||
```tf
|
||||
resource "kubernetes_namespace_v1" "sonarqube" {
|
||||
metadata {
|
||||
name = "sonarqube"
|
||||
}
|
||||
}
|
||||
|
||||
resource "helm_release" "sonarqube" {
|
||||
chart = "sonarqube"
|
||||
version = "10.1.0+628"
|
||||
repository = "https://SonarSource.github.io/helm-chart-sonarqube"
|
||||
|
||||
name = "sonarqube"
|
||||
namespace = kubernetes_namespace_v1.sonarqube.metadata[0].name
|
||||
|
||||
set {
|
||||
name = "prometheusMonitoring.podMonitor.enabled"
|
||||
value = "true"
|
||||
}
|
||||
|
||||
set {
|
||||
name = "postgresql.enabled"
|
||||
value = "false"
|
||||
}
|
||||
|
||||
set {
|
||||
name = "jdbcOverwrite.enabled"
|
||||
value = "true"
|
||||
}
|
||||
|
||||
set {
|
||||
name = "jdbcOverwrite.jdbcUrl"
|
||||
value = "jdbc:postgresql://postgresql-primary.postgres/sonarqube"
|
||||
}
|
||||
|
||||
set {
|
||||
name = "jdbcOverwrite.jdbcUsername"
|
||||
value = "sonarqube"
|
||||
}
|
||||
|
||||
set {
|
||||
name = "jdbcOverwrite.jdbcPassword"
|
||||
value = var.sonarqube_db_password
|
||||
}
|
||||
}
|
||||
|
||||
resource "kubernetes_manifest" "sonarqube_ingress" {
|
||||
manifest = {
|
||||
apiVersion = "traefik.io/v1alpha1"
|
||||
kind = "IngressRoute"
|
||||
metadata = {
|
||||
name = "sonarqube"
|
||||
namespace = kubernetes_namespace_v1.sonarqube.metadata[0].name
|
||||
}
|
||||
spec = {
|
||||
entryPoints = ["websecure"]
|
||||
routes = [
|
||||
{
|
||||
match = "Host(`sonarqube.${var.domain}`)"
|
||||
kind = "Rule"
|
||||
services = [
|
||||
{
|
||||
name = "sonarqube-sonarqube"
|
||||
port = "http"
|
||||
}
|
||||
]
|
||||
}
|
||||
]
|
||||
}
|
||||
}
|
||||
}
|
||||
```
|
||||
|
||||
{{< /highlight >}}
|
||||
|
||||
Be sure to disable the PostgreSQL sub chart and use our self-hosted cluster with both `postgresql.enabled` and `jdbcOverwrite.enabled`. If needed, set proper `tolerations` and `nodeSelector` for deploying on a dedicated node.
|
||||
Inside Helm values, be sure to disable the PostgreSQL sub chart and use our self-hosted cluster with both `postgresql.enabled` and `jdbcOverwrite.enabled`. If needed, set proper `tolerations` and `nodeSelector` for deploying on a dedicated node.
|
||||
|
||||
The installation take many minutes, be patient. Once done, you can access SonarQube on `https://sonarqube.kube.rocks` and login with `admin` / `admin`.
|
||||
|
||||
@ -584,6 +588,10 @@ public class ArticlesListTests : TestBase
|
||||
|
||||
Ensure all tests passes with `dotnet test`.
|
||||
|
||||
{{< alert >}}
|
||||
You may be interested in [Testcontainers](https://testcontainers.com/) for native support of containers inside code, including parallelism.
|
||||
{{< /alert >}}
|
||||
|
||||
### CI tests & code coverage
|
||||
|
||||
Now we need to integrate the tests in our CI pipeline. As we testing with a real database, create a new `demo_test` database through pgAdmin with basic `test` / `test` credentials.
|
||||
|
@ -152,7 +152,7 @@ vus............................: 7 min=7 max=30
|
||||
vus_max........................: 30 min=30 max=30
|
||||
```
|
||||
|
||||
As we use Prometheus for outputting the result, we can visualize it easily with Grafana. You just have to import [this dashboard](https://grafana.com/grafana/dashboards/18030-official-k6-test-result/):
|
||||
As we use Prometheus for outputting the result, we can visualize it easily with Grafana. You just have to import [this dashboard](https://grafana.com/dashboards/18030):
|
||||
|
||||
[](grafana-k6.png)
|
||||
|
||||
@ -880,7 +880,7 @@ After push all CI should build correctly. Then the image policy for auto update:
|
||||
{{< highlight host="demo-kube-flux" file="clusters/demo/kuberocks/images-demo-ui.yaml" >}}
|
||||
|
||||
```yml
|
||||
apiVersion: image.toolkit.fluxcd.io/v1beta1
|
||||
apiVersion: image.toolkit.fluxcd.io/v1beta2
|
||||
kind: ImageRepository
|
||||
metadata:
|
||||
name: demo-ui
|
||||
@ -891,7 +891,7 @@ spec:
|
||||
secretRef:
|
||||
name: dockerconfigjson
|
||||
---
|
||||
apiVersion: image.toolkit.fluxcd.io/v1beta1
|
||||
apiVersion: image.toolkit.fluxcd.io/v1beta2
|
||||
kind: ImagePolicy
|
||||
metadata:
|
||||
name: demo-ui
|
||||
@ -931,7 +931,7 @@ spec:
|
||||
- name: dockerconfigjson
|
||||
containers:
|
||||
- name: front
|
||||
image: gitea.okami101.io/kuberocks/demo-ui:latest # {"$imagepolicy": "flux-system:image-demo-ui"}
|
||||
image: gitea.kube.rocks/kuberocks/demo-ui:latest # {"$imagepolicy": "flux-system:image-demo-ui"}
|
||||
ports:
|
||||
- containerPort: 80
|
||||
---
|
||||
|
2566
content/posts/22-web-api-benchmarks-2024/index.md
Normal file
2566
content/posts/22-web-api-benchmarks-2024/index.md
Normal file
File diff suppressed because it is too large
Load Diff
1
content/posts/22-web-api-benchmarks-2024/thumb.svg
Normal file
1
content/posts/22-web-api-benchmarks-2024/thumb.svg
Normal file
File diff suppressed because one or more lines are too long
After Width: | Height: | Size: 7.7 KiB |
2449
content/posts/23-web-api-benchmarks-2025/index.md
Normal file
2449
content/posts/23-web-api-benchmarks-2025/index.md
Normal file
File diff suppressed because it is too large
Load Diff
1
content/posts/23-web-api-benchmarks-2025/thumb.svg
Normal file
1
content/posts/23-web-api-benchmarks-2025/thumb.svg
Normal file
File diff suppressed because one or more lines are too long
After Width: | Height: | Size: 7.7 KiB |
@ -4,8 +4,64 @@ description: "Some boring stuf."
|
||||
layout: "simple"
|
||||
---
|
||||
|
||||
## What We Collect and Receive
|
||||
## Introduction
|
||||
|
||||
In order for us to provide you the best possible experience on our websites, we need to collect and process certain information. Depending on your use of the Services, that may include:
|
||||
Welcome to **Okami101**. We are committed to protecting your privacy. This Privacy Policy explains how we handle any personal data that may be collected when you visit our blog site. While we do not collect user information for tracking or marketing purposes, we use certain third-party services to ensure the security, functionality, and analytics of our site. This policy outlines our approach to privacy and how we ensure compliance with the General Data Protection Regulation (GDPR).
|
||||
|
||||
* **Usage data** — when you visit our site, we will store: the website from which you visited us from, the parts of our site you visit, the date and duration of your visit, your anonymized IP address, information from the device (device type, operating system, screen resolution, language, country you are located in, and web browser type) you used during your visit, and more. We process this usage data in Umami for statistical purposes, to improve our site and to recognize and stop any misuse.
|
||||
## Data Collection
|
||||
|
||||
### Personal Data
|
||||
|
||||
We do not collect, store, or process any personal data from our users for marketing or tracking purposes. However, we do process user IP addresses strictly for security purposes and use anonymized analytics data.
|
||||
|
||||
### IP Addresses
|
||||
|
||||
We use IP addresses solely for the purpose of preventing attacks and ensuring the security of our site. This is done through CrowdSec, a participative security solution that offers crowdsourced protection against malicious IPs. Your IP address may be processed to identify and mitigate potential security threats.
|
||||
|
||||
### Cookies
|
||||
|
||||
Our blog does not use cookies to track or identify visitors for our purposes. However, Cloudflare may use cookies to deliver its services effectively. These cookies are essential for security purposes and to improve site performance. Additionally, Umami, our analytics provider, does not use cookies and ensures user privacy.
|
||||
|
||||
### Log Files
|
||||
|
||||
We do not maintain log files of visitors to our site. However, Cloudflare and CrowdSec may collect log data for security and operational purposes, including IP addresses, browser types, and other technical information.
|
||||
|
||||
## Third-Party Services
|
||||
|
||||
### Cloudflare
|
||||
|
||||
We use Cloudflare for web security and performance optimization. Cloudflare may collect and process certain data as part of its service. This data processing is governed by Cloudflare's Privacy Policy, which can be found [here](https://www.cloudflare.com/privacypolicy/).
|
||||
|
||||
### Crowdsec
|
||||
|
||||
We use CrowdSec to enhance our site's security by protecting against malicious IP addresses. CrowdSec processes IP addresses to identify and mitigate security threats. The data handling practices of CrowdSec are governed by CrowdSec's Privacy Policy, which can be found [here](https://crowdsec.net/privacy-policy).
|
||||
|
||||
### Umami
|
||||
|
||||
We use Umami, a fully GDPR-compliant Google Analytics alternative, to gather anonymized analytics data about our site's usage. Umami does not use cookies or collect personally identifiable information. The data collected by Umami helps us understand site traffic and usage patterns without compromising user privacy. For more information, you can refer to Umami's privacy policy [here](https://umami.is/docs/).
|
||||
|
||||
### giscus
|
||||
|
||||
We use giscus, a GitHub-based commenting system, to manage comments on our blog posts. When you post a comment using giscus, you are interacting with GitHub's platform. This means your comment data, including your GitHub username and any other information you choose to share, is processed by GitHub. The data handling practices for giscus are governed by GitHub's Privacy Policy, which can be found [here](https://docs.github.com/en/site-policy/privacy-policies/github-privacy-statement).
|
||||
|
||||
## Third-Party Links
|
||||
|
||||
Our blog may contain links to other websites. Please be aware that we are not responsible for the privacy practices of other sites. We encourage you to read the privacy statements of each website that collects personal information.
|
||||
|
||||
## Data Protection Rights
|
||||
|
||||
Since we only process personal data (IP addresses) for security purposes and use anonymized analytics, your data protection rights are limited in this context. However, for any concerns or questions about data processed by Cloudflare, Crowdsec, giscus (GitHub), or Umami, please refer to their respective privacy policies.
|
||||
|
||||
## Contact Us
|
||||
|
||||
If you have any questions or concerns about our privacy practices or this policy, please contact us at <adrien@okami101.io>.
|
||||
|
||||
## Changes to This Privacy Policy
|
||||
|
||||
We may update our Privacy Policy from time to time. Any changes will be posted on this page with an updated effective date. We encourage you to review this policy periodically for any changes.
|
||||
|
||||
Effective Date: **19/05/2024**
|
||||
|
||||
---
|
||||
|
||||
By using our blog, you agree to the terms of this Privacy Policy. Thank you for visiting **Okami101**!
|
||||
|
@ -70,11 +70,8 @@
|
||||
title: Vuetify Admin
|
||||
date: 11/2020
|
||||
repo: okami101/vuetify-admin
|
||||
demo: https://va-demo.okami101.io/
|
||||
docs: https://www.okami101.io/vuetify-admin
|
||||
|
||||
- name: laravel-rad-stack
|
||||
title: Laravel RAD Stack
|
||||
date: 10/2021
|
||||
repo: adr1enbe4udou1n/laravel-rad-stack
|
||||
demo: https://laravel-rad-stack.okami101.io/
|
||||
|
@ -74,12 +74,8 @@
|
||||
{{ end }}
|
||||
<div class="flex items-center justify-between">
|
||||
<div class="flex items-center gap-4">
|
||||
<img src="/kube.png" width="30" height="30" alt="Kubernetes"
|
||||
title="Run on K3s over Hetzner Cloud" />
|
||||
|
||||
<a href="https://concourse.okami101.io/teams/main/pipelines/okami-blog" target="_blank">
|
||||
<img src="https://concourse.okami101.io/api/v1/teams/main/pipelines/okami-blog/badge" />
|
||||
</a>
|
||||
<img src="/talos-logo.svg" width="30" height="30" alt="Talos Linux"
|
||||
title="Run on Talos Linux over Hetzner Cloud" />
|
||||
</div>
|
||||
<div class="hidden lg:block">
|
||||
{{/* Copyright */}}
|
||||
@ -90,7 +86,7 @@
|
||||
{{- else }}
|
||||
©
|
||||
{{ now.Format "2006" }}
|
||||
{{ .Site.Author.name | markdownify | emojify }}
|
||||
{{ .Site.Params.Author.name | markdownify | emojify }}
|
||||
{{- end }}
|
||||
</p>
|
||||
{{ end }}
|
||||
|
@ -6,9 +6,9 @@
|
||||
>
|
||||
<header class="flex flex-col items-center mb-3">
|
||||
<h1 class="text-4xl font-extrabold">
|
||||
{{ .Site.Author.name | default .Site.Title }}
|
||||
{{ .Site.Params.Author.name | default .Site.Title }}
|
||||
</h1>
|
||||
{{ with .Site.Author.headline }}
|
||||
{{ with .Site.Params.Author.headline }}
|
||||
<h2 class="text-xl text-neutral-500 dark:text-neutral-400">
|
||||
{{ . | markdownify | emojify }}
|
||||
</h2>
|
||||
|
58
layouts/shortcodes/chart.html
Normal file
58
layouts/shortcodes/chart.html
Normal file
@ -0,0 +1,58 @@
|
||||
<div class="chart">
|
||||
{{ $id := delimit (shuffle (seq 1 9)) "" }}
|
||||
<canvas id="{{ $id }}" height="350"></canvas>
|
||||
<script type="text/javascript">
|
||||
window.addEventListener("DOMContentLoaded", (event) => {
|
||||
const ctx = document.getElementById("{{ $id }}");
|
||||
const chart = new Chart(ctx, {
|
||||
{{ if eq (.Get "type") "timeseries" }}
|
||||
type: 'line',
|
||||
options: {
|
||||
maintainAspectRatio: false,
|
||||
plugins: {
|
||||
title: {
|
||||
display: true,
|
||||
text: {{ .Get "title" }},
|
||||
},
|
||||
},
|
||||
scales: {
|
||||
x: {
|
||||
ticks: {
|
||||
autoSkip: true,
|
||||
callback: function(val, index) {
|
||||
return this.getLabelForValue(val) + 's'
|
||||
},
|
||||
}
|
||||
},
|
||||
y: {
|
||||
{{ if .Get "stacked" }}
|
||||
stacked: {{ .Get "stacked" }},
|
||||
{{ end }}
|
||||
beginAtZero: true,
|
||||
{{ if .Get "max" }}
|
||||
suggestedMax: {{ .Get "max" }},
|
||||
{{ end }}
|
||||
}
|
||||
},
|
||||
},
|
||||
data: {
|
||||
labels: [
|
||||
{{ if .Get "step" }}
|
||||
{{ range seq 0 (.Get "step") 90 }}
|
||||
{{ . }},
|
||||
{{ end }}
|
||||
{{ else }}
|
||||
{{ range seq 0 90 }}
|
||||
{{ . }},
|
||||
{{ end }}
|
||||
{{ end }}
|
||||
],
|
||||
datasets: {{ .Inner | safeJS }}
|
||||
}
|
||||
{{ else }}
|
||||
{{ .Inner | safeJS }}
|
||||
{{ end }}
|
||||
});
|
||||
});
|
||||
</script>
|
||||
</div>
|
@ -49,12 +49,7 @@
|
||||
{{ readFile (print "data/works/" .name ".md") | markdownify }}
|
||||
</div>
|
||||
<div class="flex justify-center gap-4">
|
||||
{{ partial "button.html" (dict "text" (partial "icon.html" "github") "href" (print
|
||||
"https://github.com/" .repo) "color" .color) }}
|
||||
{{ if .ci }}
|
||||
{{ partial "button.html" (dict "text" (partial "icon.html" "bug") "href" (print
|
||||
"https://concourse.okami101.io/teams/main/pipelines/" .ci) "color" .color) }}
|
||||
{{ end }}
|
||||
{{ partial "button.html" (dict "text" (partial "icon.html" "github") "href" (print "https://github.com/" .repo) "color" .color) }}
|
||||
{{ if .demo }}
|
||||
{{ partial "button.html" (dict "text" "Demo" "href" .demo "color" .color) }}
|
||||
{{ end }}
|
||||
|
16
nginx/default.conf
Normal file
16
nginx/default.conf
Normal file
@ -0,0 +1,16 @@
|
||||
server {
|
||||
listen 80;
|
||||
listen [::]:80;
|
||||
server_name localhost;
|
||||
|
||||
location / {
|
||||
root /usr/share/nginx/html;
|
||||
index index.html index.htm;
|
||||
|
||||
if ($new_uri != "") {
|
||||
rewrite ^(.*)$ $new_uri permanent;
|
||||
}
|
||||
}
|
||||
|
||||
error_page 404 /404.html;
|
||||
}
|
3
nginx/redirects.conf
Normal file
3
nginx/redirects.conf
Normal file
@ -0,0 +1,3 @@
|
||||
map $request_uri $new_uri {
|
||||
/2023/12/a-2024-benchmark-of-main-web-apis-frameworks/ /2023/12/a-2024-benchmark-of-main-web-api-frameworks/;
|
||||
}
|
BIN
static/kube.png
BIN
static/kube.png
Binary file not shown.
Before Width: | Height: | Size: 15 KiB |
3
static/talos-logo.svg
Normal file
3
static/talos-logo.svg
Normal file
@ -0,0 +1,3 @@
|
||||
<?xml version="1.0" encoding="UTF-8"?>
|
||||
<!-- Generator: Adobe Illustrator 23.0.3, SVG Export Plug-In . SVG Version: 6.00 Build 0) -->
|
||||
<svg xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" viewBox="0 0 203.74 226.05"><defs><style>.cls-1{fill:url(#linear-gradient);}.cls-2{fill:url(#linear-gradient-2);}.cls-3{fill:url(#linear-gradient-3);}.cls-4{fill:url(#linear-gradient-4);}.cls-5{fill:url(#linear-gradient-5);}</style><linearGradient id="linear-gradient" x1="101.85" y1="-15.19" x2="101.85" y2="237.81" gradientUnits="userSpaceOnUse"><stop offset="0" stop-color="#ffd200"/><stop offset="0.06" stop-color="#ffb500"/><stop offset="0.14" stop-color="#ff8c00"/><stop offset="0.21" stop-color="#ff7300"/><stop offset="0.26" stop-color="#ff6a00"/><stop offset="0.33" stop-color="#fc4f0e"/><stop offset="0.43" stop-color="#f92f1e"/><stop offset="0.51" stop-color="#f81b27"/><stop offset="0.57" stop-color="#f7142b"/><stop offset="0.68" stop-color="#df162e"/><stop offset="0.79" stop-color="#af1a38"/><stop offset="1" stop-color="#4b214c"/></linearGradient><linearGradient id="linear-gradient-2" x1="24.84" y1="-15.19" x2="24.84" y2="237.81" xlink:href="#linear-gradient"/><linearGradient id="linear-gradient-3" x1="178.9" y1="-15.19" x2="178.9" y2="237.81" xlink:href="#linear-gradient"/><linearGradient id="linear-gradient-4" x1="145.06" y1="-15.19" x2="145.06" y2="237.81" xlink:href="#linear-gradient"/><linearGradient id="linear-gradient-5" x1="58.64" y1="-15.19" x2="58.64" y2="237.81" xlink:href="#linear-gradient"/></defs><g id="Layer_2" data-name="Layer 2"><g id="Layer_1-2" data-name="Layer 1"><path class="cls-1" d="M101.89,226.05c2.85,0,5.67-.15,8.46-.35V.35c-2.8-.21-5.62-.35-8.48-.35s-5.7.14-8.52.35V225.69c2.81.21,5.64.35,8.5.36Z"/><path class="cls-2" d="M11.56,50.9,9.12,48.47A112.82,112.82,0,0,0,.2,63.61c29.42,29.89,32.52,44.31,32.48,49.14C32.57,125,17.58,144.21,0,162a113.69,113.69,0,0,0,8.84,15.15c1-1,1.95-1.92,2.92-2.9,25.37-25.54,37.77-45.61,37.92-61.38S37.36,77,11.56,50.9Z"/><path class="cls-3" d="M192,174.29l2.92,2.9A113.69,113.69,0,0,0,203.74,162c-17.57-17.83-32.56-37.09-32.68-49.29-.11-11.9,14.79-31.15,32.46-49.18a112.88,112.88,0,0,0-8.9-15.1l-2.44,2.43c-25.8,26.05-38.27,46.34-38.12,62S166.61,148.75,192,174.29Z"/><path class="cls-4" d="M140.68,112.83c0-22,9.81-58.58,24.92-93.15A113,113,0,0,0,150.45,11c-16.54,37.27-26.78,76.91-26.78,101.87,0,24.15,11.09,64.23,27.93,101.7a113,113,0,0,0,14.84-8.77C150.85,170.73,140.68,134.07,140.68,112.83Z"/><path class="cls-5" d="M80,112.83C80,87.74,69.35,47.88,53,11.07a112.76,112.76,0,0,0-14.93,8.64C53.21,54.26,63,90.85,63,112.83c0,21.23-10.17,57.88-25.76,92.91a113.66,113.66,0,0,0,14.84,8.77C68.94,177.05,80,137,80,112.83Z"/></g></g></svg>
|
After Width: | Height: | Size: 2.7 KiB |
Submodule themes/congo updated: c114943009...15b06a8615
Reference in New Issue
Block a user