Compare commits
131 Commits
Author | SHA1 | Date | |
---|---|---|---|
68e74f3e05 | |||
cacb6f78bb | |||
aad3615542 | |||
30116c360d | |||
548a84ebe3 | |||
db7655c4f8 | |||
e7e5ec9586 | |||
d040374dbc | |||
1a5f7aea75 | |||
a241e91d8c | |||
a8e608070f | |||
15bf34c299 | |||
f5f3b033bb | |||
a9dacbb3a2 | |||
f93bf3c26f | |||
cca4ebc90e | |||
df7d31290f | |||
7df54a5b33 | |||
a251cc4df2 | |||
b5753b8681 | |||
32ef3c0fec | |||
47c58dfdd3 | |||
79bba89e68 | |||
67923abd1f | |||
48769dac97 | |||
5426360d04 | |||
f4cd8d1123 | |||
ebc4d0b6f9 | |||
f6c10850ac | |||
32e1d9d53d | |||
e4438ece84 | |||
d9b143cdec | |||
536e262226 | |||
1b1449ce51 | |||
d9ddee9fd8 | |||
e3022703d3 | |||
51b3f5bc94 | |||
fbe01c5de3 | |||
93c1206041 | |||
34ac768d17 | |||
239d5ac202 | |||
55d0b867a9 | |||
75b7684337 | |||
f6a31fb75d | |||
2a8db86536 | |||
33aa481c87 | |||
a971aabc95 | |||
097b6169c2 | |||
fd75702fb7 | |||
f1a175a7b7 | |||
9a28d870f7 | |||
0a3cd51090 | |||
210cb3102d | |||
b5599be1eb | |||
4ac61e1294 | |||
3a57bbf6f1 | |||
1a295cc401 | |||
52ba6d9ea4 | |||
0b18290797 | |||
1cfa1b4cb7 | |||
ad6a31b71b | |||
71ffe8531b | |||
f6bacfa5d6 | |||
598c34f9fe | |||
009cc3d5eb | |||
0243b9f26e | |||
e12fdfb3f7 | |||
b447e476f1 | |||
92df3cbaf1 | |||
aa7b5d6c14 | |||
67f047b1e4 | |||
5882a96ff3 | |||
cbf3a88b83 | |||
f78d791730 | |||
cf23988636 | |||
1e6795ae27 | |||
318b03d1eb | |||
7050abbed0 | |||
1787b4a2ac | |||
d6d236f143 | |||
91cbf70f40 | |||
8b0efa3b60 | |||
0c4ba0a562 | |||
50f21191f2 | |||
bdc6ba81cd | |||
12405f9ac4 | |||
3669b8afde | |||
f3990d2de6 | |||
84b703efa0 | |||
8c5497b92b | |||
d168ca0414 | |||
94b521c4ff | |||
767a9c7b52 | |||
2a8446c72d | |||
c54908dbe6 | |||
0570f3610b | |||
0e68a34e6d | |||
78a62ea7f1 | |||
0ffe508858 | |||
24ef84162f | |||
db03e71f2f | |||
2ef95db920 | |||
5631c459c8 | |||
ef68fb6854 | |||
0252b1186e | |||
aca9cde58e | |||
1a661ada20 | |||
17394a99b7 | |||
6b190f1a33 | |||
de5d063ca9 | |||
7e88dda273 | |||
5984a9e1cf | |||
e09cbb2cd1 | |||
2b7ad1304d | |||
9a13ade068 | |||
a0cc73a7e9 | |||
52d5591f17 | |||
bc30cbb870 | |||
ae41c9409b | |||
de974c8d32 | |||
122c054f20 | |||
37a4f9d00d | |||
161d16242c | |||
a77fc3e9d8 | |||
c0585a7f05 | |||
d0f5c1eddd | |||
b02b6a2b6c | |||
70c60216c2 | |||
ff3b57126a | |||
78d8b640a4 | |||
2a47eb58b0 |
@ -1,4 +1,4 @@
|
||||
/*! Congo v2.6.1 | MIT License | https://github.com/jpanther/congo */
|
||||
/*! Congo v2.7.6 | MIT License | https://github.com/jpanther/congo */
|
||||
|
||||
/*! tailwindcss v3.3.2 | MIT License | https://tailwindcss.com */
|
||||
|
||||
@ -1733,6 +1733,10 @@ body:has(#menu-controller:checked) {
|
||||
bottom: 0px;
|
||||
}
|
||||
|
||||
.bottom-7 {
|
||||
bottom: 1.75rem;
|
||||
}
|
||||
|
||||
.end-0 {
|
||||
inset-inline-end: 0px;
|
||||
}
|
||||
@ -1749,6 +1753,10 @@ body:has(#menu-controller:checked) {
|
||||
top: 5rem;
|
||||
}
|
||||
|
||||
.top-8 {
|
||||
top: 2rem;
|
||||
}
|
||||
|
||||
.top-\[100vh\] {
|
||||
top: 100vh;
|
||||
}
|
||||
@ -1781,6 +1789,10 @@ body:has(#menu-controller:checked) {
|
||||
margin: 0.25rem;
|
||||
}
|
||||
|
||||
.m-3 {
|
||||
margin: 0.75rem;
|
||||
}
|
||||
|
||||
.m-auto {
|
||||
margin: auto;
|
||||
}
|
||||
@ -1925,6 +1937,10 @@ body:has(#menu-controller:checked) {
|
||||
margin-top: 3rem;
|
||||
}
|
||||
|
||||
.mt-4 {
|
||||
margin-top: 1rem;
|
||||
}
|
||||
|
||||
.mt-6 {
|
||||
margin-top: 1.5rem;
|
||||
}
|
||||
@ -1953,6 +1969,10 @@ body:has(#menu-controller:checked) {
|
||||
display: flex;
|
||||
}
|
||||
|
||||
.inline-flex {
|
||||
display: inline-flex;
|
||||
}
|
||||
|
||||
.grid {
|
||||
display: grid;
|
||||
}
|
||||
@ -1969,6 +1989,10 @@ body:has(#menu-controller:checked) {
|
||||
height: 3rem;
|
||||
}
|
||||
|
||||
.h-2 {
|
||||
height: 0.5rem;
|
||||
}
|
||||
|
||||
.h-24 {
|
||||
height: 6rem;
|
||||
}
|
||||
@ -2136,6 +2160,10 @@ body:has(#menu-controller:checked) {
|
||||
justify-content: space-between;
|
||||
}
|
||||
|
||||
.gap-1 {
|
||||
gap: 0.25rem;
|
||||
}
|
||||
|
||||
.gap-4 {
|
||||
gap: 1rem;
|
||||
}
|
||||
@ -2170,6 +2198,10 @@ body:has(#menu-controller:checked) {
|
||||
scroll-behavior: smooth;
|
||||
}
|
||||
|
||||
.whitespace-nowrap {
|
||||
white-space: nowrap;
|
||||
}
|
||||
|
||||
.\!rounded-md {
|
||||
border-radius: 0.375rem !important;
|
||||
}
|
||||
@ -2251,6 +2283,11 @@ body:has(#menu-controller:checked) {
|
||||
border-color: rgb(59 130 246 / var(--tw-border-opacity));
|
||||
}
|
||||
|
||||
.border-gray-500 {
|
||||
--tw-border-opacity: 1;
|
||||
border-color: rgb(107 114 128 / var(--tw-border-opacity));
|
||||
}
|
||||
|
||||
.border-green-500 {
|
||||
--tw-border-opacity: 1;
|
||||
border-color: rgb(34 197 94 / var(--tw-border-opacity));
|
||||
@ -2362,6 +2399,11 @@ body:has(#menu-controller:checked) {
|
||||
background-color: rgba(var(--color-primary-600), var(--tw-bg-opacity));
|
||||
}
|
||||
|
||||
.bg-purple-500 {
|
||||
--tw-bg-opacity: 1;
|
||||
background-color: rgba(var(--color-primary-500), var(--tw-bg-opacity));
|
||||
}
|
||||
|
||||
.bg-red-600 {
|
||||
--tw-bg-opacity: 1;
|
||||
background-color: rgb(220 38 38 / var(--tw-bg-opacity));
|
||||
@ -2476,6 +2518,11 @@ body:has(#menu-controller:checked) {
|
||||
padding-bottom: 1rem;
|
||||
}
|
||||
|
||||
.pe-10 {
|
||||
-webkit-padding-end: 2.5rem;
|
||||
padding-inline-end: 2.5rem;
|
||||
}
|
||||
|
||||
.pe-2 {
|
||||
-webkit-padding-end: 0.5rem;
|
||||
padding-inline-end: 0.5rem;
|
||||
@ -2526,6 +2573,14 @@ body:has(#menu-controller:checked) {
|
||||
text-align: right;
|
||||
}
|
||||
|
||||
.text-start {
|
||||
text-align: start;
|
||||
}
|
||||
|
||||
.text-end {
|
||||
text-align: end;
|
||||
}
|
||||
|
||||
.align-top {
|
||||
vertical-align: top;
|
||||
}
|
||||
@ -2534,6 +2589,10 @@ body:has(#menu-controller:checked) {
|
||||
vertical-align: text-bottom;
|
||||
}
|
||||
|
||||
.font-sans {
|
||||
font-family: ui-sans-serif, system-ui, -apple-system, BlinkMacSystemFont, "Segoe UI", Roboto, "Helvetica Neue", Arial, "Noto Sans", sans-serif, "Apple Color Emoji", "Segoe UI Emoji", "Segoe UI Symbol", "Noto Color Emoji";
|
||||
}
|
||||
|
||||
.text-2xl {
|
||||
font-size: 1.5rem;
|
||||
line-height: 2rem;
|
||||
@ -2622,6 +2681,11 @@ body:has(#menu-controller:checked) {
|
||||
color: rgba(var(--color-neutral), var(--tw-text-opacity)) !important;
|
||||
}
|
||||
|
||||
.text-gray-300 {
|
||||
--tw-text-opacity: 1;
|
||||
color: rgb(209 213 219 / var(--tw-text-opacity));
|
||||
}
|
||||
|
||||
.text-neutral-400 {
|
||||
--tw-text-opacity: 1;
|
||||
color: rgba(var(--color-neutral-400), var(--tw-text-opacity));
|
||||
@ -2667,6 +2731,15 @@ body:has(#menu-controller:checked) {
|
||||
color: rgba(var(--color-primary-700), var(--tw-text-opacity));
|
||||
}
|
||||
|
||||
.text-white {
|
||||
--tw-text-opacity: 1;
|
||||
color: rgb(255 255 255 / var(--tw-text-opacity));
|
||||
}
|
||||
|
||||
.underline {
|
||||
text-decoration-line: underline;
|
||||
}
|
||||
|
||||
.\!no-underline {
|
||||
text-decoration-line: none !important;
|
||||
}
|
||||
@ -2683,6 +2756,10 @@ body:has(#menu-controller:checked) {
|
||||
opacity: 0;
|
||||
}
|
||||
|
||||
.opacity-50 {
|
||||
opacity: 0.5;
|
||||
}
|
||||
|
||||
.shadow {
|
||||
--tw-shadow: 0 1px 3px 0 rgb(0 0 0 / 0.1), 0 1px 2px -1px rgb(0 0 0 / 0.1);
|
||||
--tw-shadow-colored: 0 1px 3px 0 var(--tw-shadow-color), 0 1px 2px -1px var(--tw-shadow-color);
|
||||
@ -2695,6 +2772,12 @@ body:has(#menu-controller:checked) {
|
||||
box-shadow: var(--tw-ring-offset-shadow, 0 0 #0000), var(--tw-ring-shadow, 0 0 #0000), var(--tw-shadow);
|
||||
}
|
||||
|
||||
.ring {
|
||||
--tw-ring-offset-shadow: var(--tw-ring-inset) 0 0 0 var(--tw-ring-offset-width) var(--tw-ring-offset-color);
|
||||
--tw-ring-shadow: var(--tw-ring-inset) 0 0 0 calc(3px + var(--tw-ring-offset-width)) var(--tw-ring-color);
|
||||
box-shadow: var(--tw-ring-offset-shadow), var(--tw-ring-shadow), var(--tw-shadow, 0 0 #0000);
|
||||
}
|
||||
|
||||
.backdrop-blur {
|
||||
--tw-backdrop-blur: blur(8px);
|
||||
-webkit-backdrop-filter: var(--tw-backdrop-blur) var(--tw-backdrop-brightness) var(--tw-backdrop-contrast) var(--tw-backdrop-grayscale) var(--tw-backdrop-hue-rotate) var(--tw-backdrop-invert) var(--tw-backdrop-opacity) var(--tw-backdrop-saturate) var(--tw-backdrop-sepia);
|
||||
@ -2894,6 +2977,10 @@ body:has(#menu-controller:checked) {
|
||||
outline-color: transparent;
|
||||
}
|
||||
|
||||
.group:hover .group-hover\:visible {
|
||||
visibility: visible;
|
||||
}
|
||||
|
||||
.group:hover .group-hover\:-translate-x-\[2px\] {
|
||||
--tw-translate-x: -2px;
|
||||
transform: translate(var(--tw-translate-x), var(--tw-translate-y)) rotate(var(--tw-rotate)) skewX(var(--tw-skew-x)) skewY(var(--tw-skew-y)) scaleX(var(--tw-scale-x)) scaleY(var(--tw-scale-y));
|
||||
@ -2929,6 +3016,10 @@ body:has(#menu-controller:checked) {
|
||||
opacity: 1;
|
||||
}
|
||||
|
||||
:is([dir="ltr"] .ltr\:right-0) {
|
||||
right: 0px;
|
||||
}
|
||||
|
||||
:is([dir="ltr"] .ltr\:mr-14) {
|
||||
margin-right: 3.5rem;
|
||||
}
|
||||
@ -2949,6 +3040,10 @@ body:has(#menu-controller:checked) {
|
||||
text-align: right;
|
||||
}
|
||||
|
||||
:is([dir="rtl"] .rtl\:left-0) {
|
||||
left: 0px;
|
||||
}
|
||||
|
||||
:is([dir="rtl"] .rtl\:ml-14) {
|
||||
margin-left: 3.5rem;
|
||||
}
|
||||
|
@ -6,10 +6,11 @@ baseURL = "https://blog.okami101.io"
|
||||
languageCode = "en"
|
||||
defaultContentLanguage = "en"
|
||||
theme = "congo"
|
||||
|
||||
title = "Okami101 Blog"
|
||||
# copyright = "Copy, _right?_ :thinking_face:"
|
||||
|
||||
timeout = "120s"
|
||||
|
||||
enableEmoji = true
|
||||
enableRobotsTXT = true
|
||||
|
||||
|
@ -1,7 +1,8 @@
|
||||
[`ASP.NET Core 7`](https://docs.microsoft.com/aspnet/core/) implementation, following `DDD` principle, implemented with `Hexa architecture` and `CQRS` pattern. [Swashbuckle](https://github.com/domaindrivendev/Swashbuckle.AspNetCore) is used as default main OpenAPI generator that's perfectly integrates into the code.
|
||||
[`ASP.NET Core 8`](https://docs.microsoft.com/aspnet/core/) implementation, using minimal APIs, mature since 8.0, following `DDD` principle, implemented with `Hexa architecture` and `CQRS` pattern. [Swashbuckle](https://github.com/domaindrivendev/Swashbuckle.AspNetCore) is used as default main OpenAPI generator.
|
||||
|
||||
Main packages involved :
|
||||
|
||||
* [Carter](https://github.com/CarterCommunity/Carter/) for seamless endpoints grouping
|
||||
* [EF Core](https://docs.microsoft.com/ef/) as strongly typed ORM
|
||||
* [MediatR](https://github.com/jbogard/MediatR) for easy mediator implementation. It allows strong decoupling between all ASP.NET controllers and the final application which is cutted into small queries and commands
|
||||
* [Fluent Validation](https://fluentvalidation.net/) for strongly typed validation
|
||||
|
@ -1,4 +1,4 @@
|
||||
[`FastAPI`](https://fastapi.tiangolo.com/) implementation under last `Python 3.11` with [Pipenv](https://pypi.org/project/pipenv/) as package manager.
|
||||
[`FastAPI`](https://fastapi.tiangolo.com/) implementation under last `Python 3.12` with [Poetry](https://python-poetry.org/) as package manager.
|
||||
|
||||
It's based on [pydantic](https://pydantic-docs.helpmanual.io/), an essential component that allows proper OpenAPI generation and data validations while bringing advanced type hints.
|
||||
|
||||
@ -8,7 +8,6 @@ Main packages involved :
|
||||
* [SQLAlchemy 2](https://www.sqlalchemy.org/) with [Alembic](https://alembic.sqlalchemy.org/en/latest/) for schema migration
|
||||
* [python-jose](https://github.com/mpdavis/python-jose) as JWT implementation
|
||||
* [Faker](https://faker.readthedocs.io/en/master/) as dummy data generator
|
||||
* [autoflake](https://pypi.org/project/autoflake/) and [isort](https://pycqa.github.io/isort/) for clean imports
|
||||
* [Flake8](https://flake8.pycqa.org/en/latest/) and [Black](https://black.readthedocs.io/en/stable/) as respective code linter and powerful code formatter
|
||||
* [Ruff](https://docs.astral.sh/ruff/) as extremely fast linter and code formatter written in rust, a perfect drop-in replacement for flake8, isort and black
|
||||
* [mypy](http://mypy-lang.org/) as advanced static analyzer
|
||||
* [pytest](https://docs.pytest.org) as main test framework
|
||||
|
@ -1,4 +1,4 @@
|
||||
[`Laravel 10`](https://laravel.com/) implementation on `PHP 8.2` with extensive usage of last attributes support. The particularity of this framework is to give you almost of all you need for quickly develop any complex application. So minimal external packages need.
|
||||
[`Laravel 10`](https://laravel.com/) implementation on `PHP 8.3` with extensive usage of last attributes support. The particularity of this framework is to give you almost of all you need for quickly develop any complex application. So minimal external packages need.
|
||||
|
||||
I obviously made usage of **Eloquent** as a very expressive **Active Record** ORM, and the Laravel factories system based on [PHP Faker](https://fakerphp.github.io/) is already perfect for dummy data generator.
|
||||
|
||||
|
@ -1,4 +1,4 @@
|
||||
[`NestJS 9`](https://nestjs.com/) implementation under `NodeJS` using [`Typescript`](https://www.typescriptlang.org/) and [`pnpm`](https://pnpm.io/) as fast package manager. It relies by default on [`express`](https://github.com/expressjs/express) as NodeJS HTTP server implementation. NestJS offers a nice OpenAPI documentation generator thanks to Typescript which provides strong typing.
|
||||
[`NestJS 10`](https://nestjs.com/) implementation under `Node.js 20` using [`Typescript 5`](https://www.typescriptlang.org/) and [`pnpm`](https://pnpm.io/) as fast package manager. It relies by default on [`express`](https://github.com/expressjs/express) as NodeJS HTTP server implementation. NestJS offers a nice OpenAPI documentation generator thanks to Typescript which provides strong typing.
|
||||
|
||||
Main packages involved :
|
||||
|
||||
|
@ -1,4 +1,4 @@
|
||||
[`Spring Boot 3`](https://spring.io/projects/spring-boot) implementation using `Gradle 8` & `Java 17+`. Similar to the [official Spring Boot implementation](https://github.com/gothinkster/spring-boot-realworld-example-app) but with usage of `Spring Data JPA` instead of `MyBatis`. [Here is another nice one](https://github.com/raeperd/realworld-springboot-java) that explicitly follows `DDD`.
|
||||
[`Spring Boot 3.2`](https://spring.io/projects/spring-boot) implementation using `Gradle 8` & `Java 21`. Similar to the [official Spring Boot implementation](https://github.com/gothinkster/spring-boot-realworld-example-app) but with usage of `Spring Data JPA` instead of `MyBatis`. [Here is another nice one](https://github.com/raeperd/realworld-springboot-java) that explicitly follows `DDD`.
|
||||
|
||||
Main packages involved :
|
||||
|
||||
|
@ -1,9 +1,10 @@
|
||||
[`Symfony 6.3`](https://symfony.com/) implementation on `PHP 8.2` that supports PHP 8 attributes. I excluded the usage of [API Platform](https://api-platform.com/) here, which is a very powerful API crud generator but really not well suited for real customized API in my taste.
|
||||
[`Symfony 7`](https://symfony.com/) implementation on `PHP 8.3` that supports PHP 8 attributes, using [API Platform](https://api-platform.com/).
|
||||
|
||||
Contrary to Laravel, the usage of **DataMapper** pattern ORM involve classic POPO models. The additional usage of plain PHP DTO classes facilitates the OpenAPI spec models generation without writing all schemas by hand. On the downside the Nelmio package is far more verbose than the Laravel OpenAPI version.
|
||||
|
||||
Main packages involved :
|
||||
|
||||
* [API Platform](https://api-platform.com/) as API framework
|
||||
* [Doctrine](https://www.doctrine-project.org/) as **DataMapper** ORM
|
||||
* [SensioFrameworkExtraBundle](https://github.com/sensiolabs/SensioFrameworkExtraBundle) for ParamConverter helper with Doctrine
|
||||
* [FOSRestBundle](https://github.com/FriendsOfSymfony/FOSRestBundle) only for some helpers as DTO automatic converters and validation
|
||||
|
@ -345,7 +345,7 @@ Set proper `GF_DATABASE_PASSWORD` and deploy. Database migration should be autom
|
||||
|
||||
### Docker Swarm dashboard
|
||||
|
||||
For best show-case scenario of Grafana, let's import an [existing dashboard](https://grafana.com/grafana/dashboards/11939) suited for complete Swarm monitor overview.
|
||||
For best show-case scenario of Grafana, let's import an [existing dashboard](https://grafana.com/dashboards/11939) suited for complete Swarm monitor overview.
|
||||
|
||||
First we need to add Prometheus as main metrics data source. Go to *Configuration > Data source* menu and click on *Add data source*. Select Prometheus and set the internal docker prometheus URL, which should be `http://prometheus:9090`. A successful message should appear when saving.
|
||||
|
||||
|
@ -77,6 +77,7 @@ Here are the pros and cons of each module:
|
||||
| | [Kube Hetzner](https://registry.terraform.io/modules/kube-hetzner/kube-hetzner/hcloud/latest) | [Okami101 K3s](https://registry.terraform.io/modules/okami101/k3s) |
|
||||
| ----------------------- | ---------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- | ---------------------------------------------------------------------------------------------------------------------------------------------------------- |
|
||||
| **Support** | Strong community | Just intended as a reusable starter-kit |
|
||||
| **CNI support** | Choice between Flannel, Cilium, Calico | Flannel only, while supporting network encryption with `enable_wireguard` variable, set `flannel-backend` to `none` if installing other CNI |
|
||||
| **Included helms** | Traefik, Longhorn, Cert Manager, Kured | None, just the K3s initial setup, as it's generally preferable to manage this helms dependencies on separated terraform project, allowing easier upgrading |
|
||||
| **Hetzner integration** | Complete, use [Hcloud Controller](https://github.com/hetznercloud/hcloud-cloud-controller-manager) internally, allowing dynamic Load Balancing, autoscaling, cleaner node deletion | Basic, public Load Balancer is statically managed by the nodepool configuration, no autoscaling support |
|
||||
| **OS** | openSUSE MicroOS, optimized for container worloads | Debian 11 or Ubuntu 22.04 |
|
||||
@ -86,6 +87,7 @@ Here are the pros and cons of each module:
|
||||
| **Upgrade** | You may need to follow new versions regularly | As a simple starter-kit, no need to support all community problems, so very few updates |
|
||||
| **Quality** | Use many hacks to satisfy all community needs, plenty of remote-exec and file provisioner which is not recommended by HashiCorp themselves | Use standard **cloud-config** for initial provisioning, then **Salt** for cluster OS management |
|
||||
| **Security** | Needs an SSH private key because of local provisioners, and SSH port opened to every node | Require only public SSH key, minimized opened SSH ports to only controllers, use SSH jump from a controller to access any internal worker node |
|
||||
| **Bastion** | No real bastion support | Dedicated bastion host support with preinstalled WireGuard VPN, ideal for internal access to critical services like Kube API, longhorn, etc. |
|
||||
| **Reusability** | Vendor locked to Hetzner Cloud | Easy to adapt for a different cloud provider as long as it supports **cloud-config** (as 99% of them) |
|
||||
|
||||
So for resume, choose Kube Hetzner module if:
|
||||
@ -171,14 +173,14 @@ module "hcloud_kube" {
|
||||
|
||||
k3s_channel = "stable"
|
||||
|
||||
tls_sans = ["cp.kube.rocks"]
|
||||
|
||||
disabled_components = ["traefik"]
|
||||
kubelet_args = [
|
||||
"eviction-hard=memory.available<250Mi"
|
||||
]
|
||||
|
||||
etcd_s3_backup = {
|
||||
control_planes_custom_config = {
|
||||
tls-sans = ["cp.kube.rocks"]
|
||||
disable = ["traefik"]
|
||||
etcd-s3 = true
|
||||
etcd-s3-endpoint = "s3.fr-par.scw.cloud"
|
||||
etcd-s3-access-key = var.s3_access_key
|
||||
etcd-s3-secret-key = var.s3_secret_key
|
||||
@ -216,7 +218,7 @@ output "ssh_config" {
|
||||
}
|
||||
```
|
||||
|
||||
{{</ highlight >}}
|
||||
{{< /highlight >}}
|
||||
|
||||
#### Explanation
|
||||
|
||||
@ -240,7 +242,7 @@ At any case, consider any leak of writeable Hetzner Cloud token as a **Game Over
|
||||
3. Sniff any data from the cluster that comes to the compromised server, including secrets, thanks to the new agent.
|
||||
4. Get access to remote S3 backups.
|
||||
|
||||
In order to mitigate any risk of critical data leak, you may use data encryption whenever is possible. K3s offer it [natively for etcd](https://docs.k3s.io/security/secrets-encryption). Longhorn also offer it [natively for volumes](https://longhorn.io/docs/latest/advanced-resources/security/volume-encryption/) (including backups).
|
||||
In order to mitigate any risk of critical data leak, you may use data encryption whenever is possible. K3s offer it natively [for etcd](https://docs.k3s.io/security/secrets-encryption) and [for networking using WireGuard flannel option](https://docs.k3s.io/installation/network-options). Longhorn also offer it [natively for volumes](https://longhorn.io/docs/latest/advanced-resources/security/volume-encryption/) (including backups).
|
||||
|
||||
{{</ tab >}}
|
||||
{{< tab tabName="Global" >}}
|
||||
@ -266,7 +268,7 @@ Why not `debian-12` ? Because it's sadly not yet supported by [Salt project](htt
|
||||
|
||||
{{< alert >}}
|
||||
`nfs-common` package is required for Longhorn in order to support RWX volumes.
|
||||
{{</ alert >}}
|
||||
{{< /alert >}}
|
||||
|
||||
`cluster_name` is the node's name prefix and will have the format `{cluster_name}-{pool_name}-{index}`, for example `kube-storage-01`. `cluster_user` is the username UID 1000 for SSH access with sudo rights. `root` user is disabled for remote access security reasons.
|
||||
|
||||
@ -276,17 +278,12 @@ Why not `debian-12` ? Because it's sadly not yet supported by [Salt project](htt
|
||||
```tf
|
||||
k3s_channel = "stable"
|
||||
|
||||
tls_sans = ["cp.kube.rocks"]
|
||||
|
||||
disabled_components = ["traefik"]
|
||||
kubelet_args = [
|
||||
"eviction-hard=memory.available<250Mi"
|
||||
]
|
||||
```
|
||||
|
||||
This is the K3s specific configuration, where you can choose the channel (stable or latest), the TLS SANs, and the kubelet arguments.
|
||||
|
||||
I'm disabling included Traefik because we'll use a more flexible official Helm later.
|
||||
This is the K3s specific configuration, where you can choose the channel (stable or latest), and the kubelet arguments.
|
||||
|
||||
I also prefer increase the eviction threshold to 250Mi, in order to avoid OS OOM killer.
|
||||
|
||||
@ -294,7 +291,10 @@ I also prefer increase the eviction threshold to 250Mi, in order to avoid OS OOM
|
||||
{{< tab tabName="Backup" >}}
|
||||
|
||||
```tf
|
||||
etcd_s3_backup = {
|
||||
control_planes_custom_config = {
|
||||
tls-sans = ["cp.kube.rocks"]
|
||||
disable = ["traefik"]
|
||||
etcd-s3 = true
|
||||
etcd-s3-endpoint = "s3.fr-par.scw.cloud"
|
||||
etcd-s3-access-key = var.s3_access_key
|
||||
etcd-s3-secret-key = var.s3_secret_key
|
||||
@ -304,7 +304,11 @@ etcd_s3_backup = {
|
||||
}
|
||||
```
|
||||
|
||||
This will enable automatic daily backup of etcd database on S3 bucket, which is useful for faster disaster recovery. See the official guide [here](https://docs.k3s.io/datastore/backup-restore).
|
||||
Here some specific additional configuration for k3s servers.
|
||||
|
||||
I'm disabling included Traefik because we'll use a more flexible official Helm later.
|
||||
|
||||
We're adding automatic daily backup of etcd database on S3 bucket, which is useful for faster disaster recovery. See the official guide [here](https://docs.k3s.io/datastore/backup-restore).
|
||||
|
||||
{{</ tab >}}
|
||||
{{< tab tabName="Cluster" >}}
|
||||
@ -354,6 +358,42 @@ Will print the SSH config access after cluster creation.
|
||||
{{</ tab >}}
|
||||
{{</ tabs >}}
|
||||
|
||||
#### ETCD and network encryption by default
|
||||
|
||||
You may need to enable etcd and network encryption in order to prevent any data leak in case of a server is compromised. You can easily do so by adding the following variables:
|
||||
|
||||
{{< highlight host="demo-kube-hcloud" file="kube.tf" >}}
|
||||
|
||||
```tf
|
||||
module "hcloud_kube" {
|
||||
//...
|
||||
# You need to install WireGuard package on all nodes
|
||||
server_packages = ["wireguard"]
|
||||
|
||||
control_planes_custom_config = {
|
||||
//...
|
||||
flannel-backend = "wireguard-native"
|
||||
secrets-encryption = true,
|
||||
}
|
||||
|
||||
//...
|
||||
}
|
||||
```
|
||||
|
||||
{{< /highlight >}}
|
||||
|
||||
You can check the ETCD encryption status with `sudo k3s secrets-encrypt status`:
|
||||
|
||||
```txt
|
||||
Encryption Status: Enabled
|
||||
Current Rotation Stage: start
|
||||
Server Encryption Hashes: All hashes match
|
||||
|
||||
Active Key Type Name
|
||||
------ -------- ----
|
||||
* AES-CBC aescbckey
|
||||
```
|
||||
|
||||
#### Inputs
|
||||
|
||||
As input variables, you have the choice to use environment variables or separated `terraform.tfvars` file.
|
||||
@ -375,7 +415,7 @@ s3_access_key = "xxx"
|
||||
s3_secret_key = "xxx"
|
||||
```
|
||||
|
||||
{{</ highlight >}}
|
||||
{{< /highlight >}}
|
||||
|
||||
{{</ tab >}}
|
||||
{{< tab tabName="Environment variables" >}}
|
||||
@ -440,7 +480,7 @@ Merge above SSH config into your `~/.ssh/config` file, then test the connection
|
||||
|
||||
{{< alert >}}
|
||||
If you get "Connection refused", it's probably because the server is still on cloud-init phase. Wait a few minutes and try again. Be sure to have the same public IPs as the one you whitelisted in the Terraform variables. You can edit them and reapply the Terraform configuration at any moment.
|
||||
{{</ alert >}}
|
||||
{{< /alert >}}
|
||||
|
||||
Before using K3s, let's enable Salt for OS management by taping `sudo salt-key -A -y`. This will accept all pending keys, and allow Salt to connect to all nodes. To upgrade all nodes at one, just type `sudo salt '*' pkg.upgrade`.
|
||||
|
||||
@ -455,7 +495,7 @@ From the controller, copy `/etc/rancher/k3s/k3s.yaml` on your machine located ou
|
||||
{{< alert >}}
|
||||
If `~/.kube/config` already existing, you have to properly [merging the config inside it](https://able8.medium.com/how-to-merge-multiple-kubeconfig-files-into-one-36fc987c2e2f). You can use `kubectl config view --flatten` for that.
|
||||
Then use `kubectl config use-context kube` for switching to your new cluster.
|
||||
{{</ alert >}}
|
||||
{{< /alert >}}
|
||||
|
||||
Type `kubectl get nodes` and you should see the 2 nodes of your cluster in **Ready** state.
|
||||
|
||||
@ -491,7 +531,7 @@ agent_nodepools = [
|
||||
]
|
||||
```
|
||||
|
||||
{{</ highlight >}}
|
||||
{{< /highlight >}}
|
||||
|
||||
Then apply the Terraform configuration again. After few minutes, you should see 2 new nodes in **Ready** state.
|
||||
|
||||
@ -505,7 +545,7 @@ kube-worker-03 Ready <none> 25s v1.27.4+k3s1
|
||||
|
||||
{{< alert >}}
|
||||
You'll have to use `sudo salt-key -A -y` each time you'll add a new node to the cluster for global OS management.
|
||||
{{</ alert >}}
|
||||
{{< /alert >}}
|
||||
|
||||
#### Deleting workers
|
||||
|
||||
@ -515,7 +555,7 @@ To finalize the deletion, delete the node from the cluster with `krm no kube-wor
|
||||
|
||||
{{< alert >}}
|
||||
If node have some workloads running, you'll have to consider a proper [draining](https://kubernetes.io/docs/tasks/administer-cluster/safely-drain-node/) before deleting it.
|
||||
{{</ alert >}}
|
||||
{{< /alert >}}
|
||||
|
||||
## 1st check ✅
|
||||
|
||||
|
@ -25,7 +25,7 @@ terraform {
|
||||
}
|
||||
```
|
||||
|
||||
{{</ highlight >}}
|
||||
{{< /highlight >}}
|
||||
|
||||
Let's begin with automatic upgrades management.
|
||||
|
||||
@ -75,7 +75,7 @@ resource "helm_release" "kubereboot" {
|
||||
}
|
||||
```
|
||||
|
||||
{{</ highlight >}}
|
||||
{{< /highlight >}}
|
||||
|
||||
For all `helm_release` resource you'll see from this guide, you may check the last chart version available. Example for `kured`:
|
||||
|
||||
@ -100,7 +100,7 @@ However, as Terraform doesn't offer a proper way to apply a remote multi-documen
|
||||
|
||||
{{< alert >}}
|
||||
Don't push yourself get fully 100% GitOps everywhere if the remedy give far more code complexity. Sometimes a simple documentation of manual steps in README is better.
|
||||
{{</ alert >}}
|
||||
{{< /alert >}}
|
||||
|
||||
```sh
|
||||
# installing system-upgrade-controller
|
||||
@ -187,11 +187,11 @@ resource "kubernetes_manifest" "agent_plan" {
|
||||
}
|
||||
```
|
||||
|
||||
{{</ highlight >}}
|
||||
{{< /highlight >}}
|
||||
|
||||
{{< alert >}}
|
||||
You may set the same channel as previous step for hcloud cluster creation.
|
||||
{{</ alert >}}
|
||||
{{< /alert >}}
|
||||
|
||||
## External access
|
||||
|
||||
@ -259,7 +259,7 @@ resource "helm_release" "traefik" {
|
||||
}
|
||||
```
|
||||
|
||||
{{</ highlight >}}
|
||||
{{< /highlight >}}
|
||||
|
||||
`ports.web.redirectTo` will redirect all HTTP traffic to HTTPS.
|
||||
|
||||
@ -317,14 +317,14 @@ resource "hcloud_load_balancer_service" "https_service" {
|
||||
}
|
||||
```
|
||||
|
||||
{{</ highlight >}}
|
||||
{{< /highlight >}}
|
||||
|
||||
Use `hcloud load-balancer-type list` to get the list of available load balancer types.
|
||||
|
||||
{{< alert >}}
|
||||
Don't forget to add `hcloud_load_balancer_service` resource for each service (aka port) you want to serve.
|
||||
We use `tcp` protocol as Traefik will handle SSL termination. Set `proxyprotocol` to true to allow Traefik to get real IP of clients.
|
||||
{{</ alert >}}
|
||||
{{< /alert >}}
|
||||
|
||||
One applied, use `hcloud load-balancer list` to get the public IP of the load balancer and try to curl it. You should be properly redirected to HTTPS and have certificate error. It's time to get SSL certificates.
|
||||
|
||||
@ -362,12 +362,12 @@ resource "helm_release" "cert_manager" {
|
||||
}
|
||||
```
|
||||
|
||||
{{</ highlight >}}
|
||||
{{< /highlight >}}
|
||||
|
||||
{{< alert >}}
|
||||
You can use `installCRDs` option to install CRDs automatically. But uninstall cert-manager will delete all associated resources including generated certificates. That's why I generally prefer to install CRDs manually.
|
||||
As always we enable `prometheus.servicemonitor.enabled` to allow Prometheus to scrape cert-manager metrics.
|
||||
{{</ alert >}}
|
||||
{{< /alert >}}
|
||||
|
||||
All should be ok with `kg deploy -n cert-manager`.
|
||||
|
||||
@ -377,7 +377,7 @@ We'll use [DNS01 challenge](https://cert-manager.io/docs/configuration/acme/dns0
|
||||
|
||||
{{< alert >}}
|
||||
You may use a DNS provider supported by cert-manager. Check the [list of supported providers](https://cert-manager.io/docs/configuration/acme/dns01/#supported-dns01-providers). As cert-manager is highly extensible, you can easily create your own provider with some efforts. Check [available contrib webhooks](https://cert-manager.io/docs/configuration/acme/dns01/#webhook).
|
||||
{{</ alert >}}
|
||||
{{< /alert >}}
|
||||
|
||||
First prepare variables and set them accordingly:
|
||||
|
||||
@ -398,7 +398,7 @@ variable "dns_api_token" {
|
||||
}
|
||||
```
|
||||
|
||||
{{</ highlight >}}
|
||||
{{< /highlight >}}
|
||||
|
||||
{{< highlight host="demo-kube-k3s" file="terraform.tfvars" >}}
|
||||
|
||||
@ -408,7 +408,7 @@ domain = "kube.rocks"
|
||||
dns_api_token = "xxx"
|
||||
```
|
||||
|
||||
{{</ highlight >}}
|
||||
{{< /highlight >}}
|
||||
|
||||
Then we need to create a default `Certificate` k8s resource associated to a valid `ClusterIssuer` resource that will manage its generation. Apply the following Terraform code for issuing the new wildcard certificate for your domain.
|
||||
|
||||
@ -484,12 +484,12 @@ resource "kubernetes_manifest" "tls_certificate" {
|
||||
}
|
||||
```
|
||||
|
||||
{{</ highlight >}}
|
||||
{{< /highlight >}}
|
||||
|
||||
{{< alert >}}
|
||||
You can set `acme.privateKeySecretRef.name` to **letsencrypt-staging** for testing purpose and avoid wasting LE quota limit.
|
||||
Set `privateKey.rotationPolicy` to `Always` to ensure that the certificate will be [renewed automatically](https://cert-manager.io/docs/usage/certificate/) 30 days before expires without downtime.
|
||||
{{</ alert >}}
|
||||
{{< /alert >}}
|
||||
|
||||
In the meantime, go to your DNS provider and add a new `*.kube.rocks` entry pointing to the load balancer IP.
|
||||
|
||||
@ -530,7 +530,7 @@ resource "null_resource" "encrypted_admin_password" {
|
||||
}
|
||||
```
|
||||
|
||||
{{</ highlight >}}
|
||||
{{< /highlight >}}
|
||||
|
||||
{{< highlight host="demo-kube-k3s" file="terraform.tfvars" >}}
|
||||
|
||||
@ -540,11 +540,11 @@ http_password = "xxx"
|
||||
whitelisted_ips = ["82.82.82.82"]
|
||||
```
|
||||
|
||||
{{</ highlight >}}
|
||||
{{< /highlight >}}
|
||||
|
||||
{{< alert >}}
|
||||
Note on `encrypted_admin_password`, we generate a bcrypt hash of the password compatible for HTTP basic auth and keep the original to avoid to regenerate it each time.
|
||||
{{</ alert >}}
|
||||
{{< /alert >}}
|
||||
|
||||
Then apply the following Terraform code:
|
||||
|
||||
@ -619,7 +619,7 @@ resource "kubernetes_manifest" "traefik_middleware_ip" {
|
||||
}
|
||||
```
|
||||
|
||||
{{</ highlight >}}
|
||||
{{< /highlight >}}
|
||||
|
||||
Now go to `https://traefik.kube.rocks` and you should be asked for credentials. After login, you should see the dashboard.
|
||||
|
||||
@ -651,7 +651,7 @@ resource "kubernetes_manifest" "traefik_middleware_ip" {
|
||||
}
|
||||
```
|
||||
|
||||
{{</ highlight >}}
|
||||
{{< /highlight >}}
|
||||
|
||||
In the case of Cloudflare, you may need also to trust the [Cloudflare IP ranges](https://www.cloudflare.com/ips-v4) in addition to Hetzner load balancer. Just set `ports.websecure.forwardedHeaders.trustedIPs` and `ports.websecure.proxyProtocol.trustedIPs` accordingly.
|
||||
|
||||
@ -664,7 +664,7 @@ variable "cloudflare_ips" {
|
||||
}
|
||||
```
|
||||
|
||||
{{</ highlight >}}
|
||||
{{< /highlight >}}
|
||||
|
||||
{{< highlight host="demo-kube-k3s" file="traefik.tf" >}}
|
||||
|
||||
@ -688,7 +688,7 @@ resource "helm_release" "traefik" {
|
||||
}
|
||||
```
|
||||
|
||||
{{</ highlight >}}
|
||||
{{< /highlight >}}
|
||||
|
||||
Or for testing purpose set `ports.websecure.forwardedHeaders.insecure` and `ports.websecure.proxyProtocol.insecure` to true.
|
||||
|
||||
|
@ -19,7 +19,7 @@ In Kubernetes world, the most difficult while essential part is probably the sto
|
||||
If you are not familiar with Kubernetes storage, you must at least be aware of pros and cons of `RWO` and `RWX` volumes when creating `PVC`.
|
||||
In general `RWO` is more performant, but only one pod can mount it, while `RWX` is slower, but allow sharing between multiple pods.
|
||||
`RWO` is a single node volume, and `RWX` is a shared volume between multiple nodes.
|
||||
{{</ alert >}}
|
||||
{{< /alert >}}
|
||||
|
||||
`K3s` comes with a built-in `local-path` provisioner, which is the most performant `RWO` solution by directly using local NVMe SSD. But it's not resilient neither scalable. I think it's a good solution for what you consider as not critical data.
|
||||
|
||||
@ -126,7 +126,7 @@ The volume is of course automatically mounted on each node reboot, it's done via
|
||||
|
||||
{{< alert >}}
|
||||
Note as if you set volume in same time as node pool creation, Hetzner doesn't seem to automatically mount the volume. So it's preferable to create the node pool first, then add the volume as soon as the node in ready state. You can always detach / re-attach volumes manually through UI, which will force a proper remount.
|
||||
{{</ alert >}}
|
||||
{{< /alert >}}
|
||||
|
||||
### Longhorn variables
|
||||
|
||||
@ -254,7 +254,7 @@ resource "helm_release" "longhorn" {
|
||||
Set both `persistence.defaultClassReplicaCount` (used for Kubernetes configuration in longhorn storage class) and `defaultSettings.defaultReplicaCount` (for volumes created from the UI) to 2 as we have 2 storage nodes.
|
||||
The toleration is required to allow Longhorn pods (managers and drivers) to be scheduled on storage nodes in addition to workers.
|
||||
Note as we need to have longhorn deployed on workers too, otherwise pods scheduled on these nodes can't be attached to longhorn volumes.
|
||||
{{</ alert >}}
|
||||
{{< /alert >}}
|
||||
|
||||
Use `kgpo -n longhorn-system -o wide` to check that Longhorn pods are correctly running on storage nodes as well as worker nodes. You should have `instance-manager` deployed on each node.
|
||||
|
||||
@ -342,7 +342,7 @@ resource "kubernetes_manifest" "longhorn_ingress" {
|
||||
{{< alert >}}
|
||||
It's vital that you have at least IP and AUTH middlewares with a strong password for Longhorn UI access, as its concern the most critical part of cluster.
|
||||
Of course, you can skip this ingress and directly use `kpf svc/longhorn-frontend -n longhorn-system 8000:80` to access Longhorn UI securely.
|
||||
{{</ alert >}}
|
||||
{{< /alert >}}
|
||||
|
||||
### Nodes and volumes configuration
|
||||
|
||||
@ -386,6 +386,7 @@ resource "kubernetes_storage_class_v1" "longhorn_fast" {
|
||||
fromBackup = ""
|
||||
fsType = "ext4"
|
||||
diskSelector = "fast"
|
||||
dataLocality = "strict-local"
|
||||
}
|
||||
}
|
||||
```
|
||||
@ -576,11 +577,11 @@ resource "helm_release" "postgresql" {
|
||||
}
|
||||
```
|
||||
|
||||
{{</ highlight >}}
|
||||
{{< /highlight >}}
|
||||
|
||||
{{< alert >}}
|
||||
Don't forget to use fast storage by setting `primary.persistence.storageClass` and `readReplicas.persistence.storageClass` accordingly.
|
||||
{{</ alert >}}
|
||||
{{< /alert >}}
|
||||
|
||||
Now check that PostgreSQL pods are correctly running on storage nodes with `kgpo -n postgres -o wide`.
|
||||
|
||||
@ -594,7 +595,7 @@ And that's it, we have replicated PostgreSQL cluster ready to use ! Go to longho
|
||||
|
||||
## Redis cluster
|
||||
|
||||
After PostgreSQL, set up a master/slave redis is a piece of cake. You may prefer [redis cluster](https://redis.io/docs/management/scaling/) by using [Bitnami redis cluster](https://artifacthub.io/packages/helm/bitnami/redis-cluster), but it [doesn't work](https://github.com/bitnami/charts/issues/12901) at the time of writing this guide.
|
||||
After PostgreSQL, set up a redis cluster is a piece of cake. Let's use [Bitnami redis](https://artifacthub.io/packages/helm/bitnami/redis) with [Sentinel](https://redis.io/docs/management/sentinel/).
|
||||
|
||||
### Redis variables
|
||||
|
||||
@ -640,17 +641,12 @@ resource "kubernetes_secret_v1" "redis_auth" {
|
||||
|
||||
resource "helm_release" "redis" {
|
||||
chart = "redis"
|
||||
version = "17.15.6"
|
||||
version = "18.0.2"
|
||||
repository = "https://charts.bitnami.com/bitnami"
|
||||
|
||||
name = "redis"
|
||||
namespace = kubernetes_namespace_v1.redis.metadata[0].name
|
||||
|
||||
set {
|
||||
name = "architecture"
|
||||
value = "standalone"
|
||||
}
|
||||
|
||||
set {
|
||||
name = "auth.existingSecret"
|
||||
value = kubernetes_secret_v1.redis_auth.metadata[0].name
|
||||
@ -672,67 +668,25 @@ resource "helm_release" "redis" {
|
||||
}
|
||||
|
||||
set {
|
||||
name = "master.tolerations[0].key"
|
||||
value = "node-role.kubernetes.io/storage"
|
||||
}
|
||||
|
||||
set {
|
||||
name = "master.tolerations[0].effect"
|
||||
value = "NoSchedule"
|
||||
}
|
||||
|
||||
set {
|
||||
name = "master.nodeSelector.node-role\\.kubernetes\\.io/primary"
|
||||
type = "string"
|
||||
name = "sentinel.enabled"
|
||||
value = "true"
|
||||
}
|
||||
|
||||
set {
|
||||
name = "master.persistence.size"
|
||||
value = "10Gi"
|
||||
}
|
||||
|
||||
set {
|
||||
name = "master.persistence.storageClass"
|
||||
value = "longhorn-fast"
|
||||
name = "replica.persistence.enabled"
|
||||
value = "false"
|
||||
}
|
||||
|
||||
set {
|
||||
name = "replica.replicaCount"
|
||||
value = "1"
|
||||
}
|
||||
|
||||
set {
|
||||
name = "replica.tolerations[0].key"
|
||||
value = "node-role.kubernetes.io/storage"
|
||||
}
|
||||
|
||||
set {
|
||||
name = "replica.tolerations[0].effect"
|
||||
value = "NoSchedule"
|
||||
}
|
||||
|
||||
set {
|
||||
name = "replica.nodeSelector.node-role\\.kubernetes\\.io/read"
|
||||
type = "string"
|
||||
value = "true"
|
||||
}
|
||||
|
||||
set {
|
||||
name = "replica.persistence.size"
|
||||
value = "10Gi"
|
||||
}
|
||||
|
||||
set {
|
||||
name = "replica.persistence.storageClass"
|
||||
value = "longhorn-fast"
|
||||
value = "3"
|
||||
}
|
||||
}
|
||||
```
|
||||
|
||||
{{< /highlight >}}
|
||||
|
||||
And that's it, job done ! Always check that Redis pods are correctly running on storage nodes with `kgpo -n redis -o wide` and volumes are ready on Longhorn.
|
||||
And that's it, job done ! Check that all 3 Redis nodes are correctly running on worker nodes with `kgpo -n redis -o wide`. Thanks to Sentinel, Redis is highly available and resilient.
|
||||
|
||||
## Backups
|
||||
|
||||
@ -805,7 +759,7 @@ Configure this variable according to your needs.
|
||||
If you need some regular dump of your database without requiring a dedicated Kubernetes `CronJob`, you can simply use following crontab line on control plane node:
|
||||
|
||||
```sh
|
||||
0 */8 * * * root /usr/local/bin/k3s kubectl exec sts/postgresql-primary -n postgres -- /bin/sh -c 'PGUSER="okami" PGPASSWORD="$POSTGRES_PASSWORD" pg_dumpall -c | gzip > /bitnami/postgresql/dump_$(date "+\%H")h.sql.gz'
|
||||
0 */8 * * * root /usr/local/bin/k3s kubectl exec sts/postgresql-primary -n postgres -- /bin/sh -c 'PGUSER="okami" PGPASSWORD="$POSTGRES_PASSWORD" pg_dumpall -c --if-exists | gzip > /bitnami/postgresql/dump_$(date "+\%H")h.sql.gz'
|
||||
```
|
||||
|
||||
It will generate 3 daily dumps, one every 8 hours, on the same primary db volume, allowing easy `psql` restore from the same container.
|
||||
|
@ -159,6 +159,10 @@ Important notes:
|
||||
* As we don't set any storage class, the default one will be used, which is `local-path` when using K3s. If you want to use longhorn instead and benefit of automatic monitoring backup, you can set it with `...volumeClaimTemplate.spec.storageClassName`. But don't forget to deploy Longhorn manager by adding monitor toleration.
|
||||
* As it's a huge chart, I want to minimize dependencies by disabling Grafana, as I prefer manage it separately. However, in this case we may set `grafana.forceDeployDatasources` and `grafana.forceDeployDashboards` to `true` in order to benefit of all included Kubernetes dashboards and automatic Prometheus datasource injection, and deploy them to config maps that can be used for next Grafana install by provisioning.
|
||||
|
||||
{{< alert >}}
|
||||
As Terraform plan become slower and slower, you can directly apply one single resource by using `target` option. For example for applying only Prometheus stack, use `terraform apply -target=helm_release.kube_prometheus_stack`. It will save you a lot of time for testing.
|
||||
{{< /alert >}}
|
||||
|
||||
And finally the ingress for external access:
|
||||
|
||||
{{< highlight host="demo-kube-k3s" file="monitoring.tf" >}}
|
||||
@ -432,10 +436,6 @@ If you go to `https://grafana.kube.rocks/dashboards`, you should see a many dash
|
||||
* Prometheus and Grafana itself stats
|
||||
* Flux stats
|
||||
|
||||
{{< alert >}}
|
||||
Some other core components like etcd, scheduler, proxy, and controller manager need to have metrics enabled to be scraped. See K3s docs or [this issue](https://github.com/k3s-io/k3s/issues/3619)
|
||||
{{< /alert >}}
|
||||
|
||||
#### Prometheus
|
||||
|
||||
[](dashboards-prometheus.png)
|
||||
@ -466,7 +466,7 @@ You can easily import some additional dashboards by importing them from Grafana
|
||||
|
||||
#### Traefik
|
||||
|
||||
[Link](https://grafana.com/grafana/17346)
|
||||
[Link](https://grafana.com/dashboards/17346)
|
||||
|
||||
[](dashboards-traefik.png)
|
||||
|
||||
@ -478,22 +478,52 @@ You can easily import some additional dashboards by importing them from Grafana
|
||||
|
||||
#### Longhorn
|
||||
|
||||
[Link](https://grafana.com/grafana/16888)
|
||||
[Link](https://grafana.com/dashboards/16888)
|
||||
|
||||
[](dashboards-longhorn.png)
|
||||
|
||||
#### PostgreSQL
|
||||
|
||||
[Link](https://grafana.com/grafana/9628)
|
||||
[Link](https://grafana.com/dashboards/9628)
|
||||
|
||||
[](dashboards-postgresql.png)
|
||||
|
||||
#### Redis
|
||||
|
||||
[Link](https://grafana.com/grafana/dashboards/763)
|
||||
[Link](https://grafana.com/dashboards/763)
|
||||
|
||||
[](dashboards-redis.png)
|
||||
|
||||
#### Other core components
|
||||
|
||||
Some other core components like etcd, scheduler, proxy, and controller manager need to have metrics enabled to be scraped. See K3s docs or [this issue](https://github.com/k3s-io/k3s/issues/3619).
|
||||
|
||||
From Terraform Hcloud project, use `control_planes_custom_config` for expose all remaining metrics endpoint:
|
||||
|
||||
{{< highlight host="demo-kube-hcloud" file="kube.tf" >}}
|
||||
|
||||
```tf
|
||||
module "hcloud_kube" {
|
||||
//...
|
||||
|
||||
control_planes_custom_config = {
|
||||
//...
|
||||
etcd-expose-metrics = true,
|
||||
kube-scheduler-arg = "bind-address=0.0.0.0",
|
||||
kube-controller-manager-arg = "bind-address=0.0.0.0",
|
||||
kube-proxy-arg = "metrics-bind-address=0.0.0.0",
|
||||
}
|
||||
|
||||
//...
|
||||
}
|
||||
```
|
||||
|
||||
{{< /highlight >}}
|
||||
|
||||
{{< alert >}}
|
||||
As above config applies only at cluster initialization, you may change directly `/etc/rancher/k3s/config.yaml` instead and restart K3s server.
|
||||
{{< /alert >}}
|
||||
|
||||
## Logging
|
||||
|
||||
Last but not least, we need to add a logging stack. The most popular one is [Elastic Stack](https://www.elastic.co/elastic-stack), but it's very resource intensive. A more lightweight option is to use [Loki](https://grafana.com/oss/loki/), also part of Grafana Labs.
|
||||
@ -571,6 +601,11 @@ resource "helm_release" "loki" {
|
||||
value = var.s3_secret_key
|
||||
}
|
||||
|
||||
set {
|
||||
name = "loki.commonConfig.replication_factor"
|
||||
value = "1"
|
||||
}
|
||||
|
||||
set {
|
||||
name = "read.replicas"
|
||||
value = "1"
|
||||
@ -715,107 +750,6 @@ We have nothing more to do, all dashboards are already provided by Loki Helm cha
|
||||
|
||||
[](dashboards-loki.png)
|
||||
|
||||
## Helm Exporter
|
||||
|
||||
We have installed many Helm Charts so far, but how we manage upgrading plans ? We may need to be aware of new versions and security fixes. For that, we can use Helm Exporter:
|
||||
|
||||
{{< highlight host="demo-kube-k3s" file="monitoring.tf" >}}
|
||||
|
||||
```tf
|
||||
resource "helm_release" "helm_exporter" {
|
||||
chart = "helm-exporter"
|
||||
version = "1.2.5+1cbc9c5"
|
||||
repository = "https://shanestarcher.com/helm-charts"
|
||||
|
||||
name = "helm-exporter"
|
||||
namespace = kubernetes_namespace_v1.monitoring.metadata[0].name
|
||||
|
||||
set {
|
||||
name = "serviceMonitor.create"
|
||||
value = "true"
|
||||
}
|
||||
|
||||
set {
|
||||
name = "grafanaDashboard.enabled"
|
||||
value = "true"
|
||||
}
|
||||
|
||||
set {
|
||||
name = "grafanaDashboard.grafanaDashboard.namespace"
|
||||
value = kubernetes_namespace_v1.monitoring.metadata[0].name
|
||||
}
|
||||
|
||||
values = [
|
||||
file("values/helm-exporter-values.yaml")
|
||||
]
|
||||
}
|
||||
```
|
||||
|
||||
{{< /highlight >}}
|
||||
|
||||
As the helm exporter config is a bit tedious, it's more straightforward to use a separate helm values file. Here is a sample configuration for Helm Exporter for scraping all charts that we'll need:
|
||||
|
||||
{{< highlight host="demo-kube-k3s" file="values/helm-exporter-values.tf" >}}
|
||||
|
||||
```yaml
|
||||
config:
|
||||
helmRegistries:
|
||||
registryNames:
|
||||
- bitnami
|
||||
override:
|
||||
- registry:
|
||||
url: "https://concourse-charts.storage.googleapis.com"
|
||||
charts:
|
||||
- concourse
|
||||
- registry:
|
||||
url: "https://dl.gitea.io/charts"
|
||||
charts:
|
||||
- gitea
|
||||
- registry:
|
||||
url: "https://grafana.github.io/helm-charts"
|
||||
charts:
|
||||
- grafana
|
||||
- loki
|
||||
- promtail
|
||||
- tempo
|
||||
- registry:
|
||||
url: "https://charts.longhorn.io"
|
||||
charts:
|
||||
- longhorn
|
||||
- registry:
|
||||
url: "https://charts.jetstack.io"
|
||||
charts:
|
||||
- cert-manager
|
||||
- registry:
|
||||
url: "https://traefik.github.io/charts"
|
||||
charts:
|
||||
- traefik
|
||||
- registry:
|
||||
url: "https://bitnami-labs.github.io/sealed-secrets"
|
||||
charts:
|
||||
- sealed-secrets
|
||||
- registry:
|
||||
url: "https://prometheus-community.github.io/helm-charts"
|
||||
charts:
|
||||
- kube-prometheus-stack
|
||||
- registry:
|
||||
url: "https://SonarSource.github.io/helm-chart-sonarqube"
|
||||
charts:
|
||||
- sonarqube
|
||||
- registry:
|
||||
url: "https://kubereboot.github.io/charts"
|
||||
charts:
|
||||
- kured
|
||||
- registry:
|
||||
url: "https://shanestarcher.com/helm-charts"
|
||||
charts:
|
||||
- helm-exporter
|
||||
```
|
||||
|
||||
{{< /highlight >}}
|
||||
|
||||
You can easily start from provisioned dashboard and customize it for using `helm_chart_outdated` instead of `helm_chart_info` to list all outdated helms.
|
||||
|
||||
## 5th check ✅
|
||||
|
||||
We now have a full monitoring suite with performant logging collector ! What a pretty massive subject done. At this stage, you have a good starting point to run many apps on your cluster with high scalability and observability. We are done for the pure **operational** part. It's finally time to tackle the **building** part for a complete development stack. Go [next part]({{< ref "/posts/16-a-beautiful-gitops-day-6" >}}) to begin with continuous integration.
|
||||
|
@ -62,7 +62,7 @@ Then the Helm chart itself:
|
||||
|
||||
```tf
|
||||
locals {
|
||||
redis_connection = "redis://:${urlencode(var.redis_password)}@redis-master.redis:6379/0"
|
||||
redis_connection = "redis://:${urlencode(var.redis_password)}@redis.redis:6379/0"
|
||||
}
|
||||
|
||||
resource "kubernetes_namespace_v1" "gitea" {
|
||||
@ -303,7 +303,7 @@ You should be able to log in `https://gitea.kube.rocks` with chosen admin creden
|
||||
|
||||
### Push a basic Web API project
|
||||
|
||||
Let's generate a basic .NET Web API project. Create a new dotnet project like following (you may install [last .NET SDK](https://dotnet.microsoft.com/en-us/download)):
|
||||
Let's generate a basic .NET Web API project. Create a new dotnet 8 project like following (you may install [.NET 8 SDK](https://dotnet.microsoft.com/en-us/download)):
|
||||
|
||||
```sh
|
||||
mkdir kuberocks-demo
|
||||
@ -311,7 +311,7 @@ cd kuberocks-demo
|
||||
dotnet new sln
|
||||
dotnet new gitignore
|
||||
dotnet new editorconfig
|
||||
dotnet new webapi -o src/KubeRocks.WebApi
|
||||
dotnet new webapi -o src/KubeRocks.WebApi --use-controllers
|
||||
dotnet sln add src/KubeRocks.WebApi
|
||||
git init
|
||||
git add .
|
||||
@ -414,7 +414,7 @@ Now retry pull again and it should work seamlessly !
|
||||
|
||||
### Gitea monitoring
|
||||
|
||||
[Link](https://grafana.com/grafana/dashboards/17802)
|
||||
[Link](https://grafana.com/dashboards/17802)
|
||||
|
||||
[](gitea-monitoring.png)
|
||||
|
||||
|
@ -193,7 +193,7 @@ Firstly create following files in root of your repo that we'll use for building
|
||||
{{< highlight host="kuberocks-demo" file="Dockerfile" >}}
|
||||
|
||||
```Dockerfile
|
||||
FROM mcr.microsoft.com/dotnet/aspnet:7.0
|
||||
FROM mcr.microsoft.com/dotnet/aspnet:8.0
|
||||
|
||||
WORKDIR /publish
|
||||
COPY /publish .
|
||||
@ -253,7 +253,7 @@ jobs:
|
||||
type: registry-image
|
||||
source:
|
||||
repository: mcr.microsoft.com/dotnet/sdk
|
||||
tag: "7.0"
|
||||
tag: "8.0"
|
||||
inputs:
|
||||
- name: source-code
|
||||
path: .
|
||||
|
@ -120,8 +120,9 @@ The last step but not least for a total integration with our monitored Kubernete
|
||||
Install minimal ASP.NET Core metrics is really a no-brainer:
|
||||
|
||||
```sh
|
||||
dotnet add src/KubeRocks.WebApi package OpenTelemetry.AutoInstrumentation --prerelease
|
||||
dotnet add src/KubeRocks.WebApi package OpenTelemetry.Extensions.Hosting --prerelease
|
||||
dotnet add src/KubeRocks.WebApi package OpenTelemetry.Instrumentation.AspNetCore --prerelease
|
||||
dotnet add src/KubeRocks.WebApi package OpenTelemetry.Instrumentation.EntityFrameworkCore --prerelease
|
||||
dotnet add src/KubeRocks.WebApi package OpenTelemetry.Exporter.Prometheus.AspNetCore --prerelease
|
||||
```
|
||||
|
||||
@ -135,7 +136,11 @@ builder.Services.AddOpenTelemetry()
|
||||
{
|
||||
b
|
||||
.AddAspNetCoreInstrumentation()
|
||||
.AddPrometheusExporter();
|
||||
.AddPrometheusExporter()
|
||||
.AddMeter(
|
||||
"Microsoft.AspNetCore.Hosting",
|
||||
"Microsoft.AspNetCore.Server.Kestrel"
|
||||
);
|
||||
});
|
||||
|
||||
var app = builder.Build();
|
||||
@ -149,9 +154,7 @@ app.UseOpenTelemetryPrometheusScrapingEndpoint();
|
||||
|
||||
Relaunch app and go to `https://demo.kube.rocks/metrics` to confirm it's working. It should show metrics after each endpoint call, simply try `https://demo.kube.rocks/Articles`.
|
||||
|
||||
{{< alert >}}
|
||||
.NET metrics are currently pretty basic, but the next .NET 8 version will provide far better metrics from internal components allowing some [useful dashboard](https://github.com/JamesNK/aspnetcore-grafana).
|
||||
{{< /alert >}}
|
||||
Now you can easily import ASP.NET [specific grafana dashboards](https://github.com/dotnet/aspire/tree/main/src/Grafana) for visualizing.
|
||||
|
||||
#### Hide internal endpoints
|
||||
|
||||
@ -344,6 +347,7 @@ Use the *Test* button on `https://grafana.kube.rocks/connections/datasources/edi
|
||||
Let's firstly add another instrumentation package specialized for Npgsql driver used by EF Core to translate queries to PostgreSQL:
|
||||
|
||||
```sh
|
||||
dotnet add src/KubeRocks.WebApi package OpenTelemetry.Exporter.OpenTelemetryProtocol --prerelease
|
||||
dotnet add src/KubeRocks.WebApi package Npgsql.OpenTelemetry
|
||||
```
|
||||
|
||||
|
@ -19,105 +19,109 @@ SonarQube is leading the code metrics industry for a long time, embracing full O
|
||||
|
||||
SonarQube has its dedicated Helm chart which is perfect for us. However, it's the most resource hungry component of our development stack so far (because built with Java ? End of troll), so be sure to deploy it on almost empty free node (which should be ok with 3 workers), maybe a dedicated one. In fact, it's the last Helm chart for this tutorial, I promise!
|
||||
|
||||
Create dedicated database for SonarQube same as usual.
|
||||
Create dedicated database for SonarQube same as usual, then we can use flux for deployment.
|
||||
|
||||
{{< highlight host="demo-kube-k3s" file="main.tf" >}}
|
||||
{{< highlight host="demo-kube-flux" file="clusters/demo/sonarqube/deploy-sonarqube.yaml" >}}
|
||||
|
||||
```tf
|
||||
variable "sonarqube_db_password" {
|
||||
type = string
|
||||
sensitive = true
|
||||
}
|
||||
```yaml
|
||||
apiVersion: apps/v1
|
||||
kind: Namespace
|
||||
metadata:
|
||||
name: sonarqube
|
||||
---
|
||||
apiVersion: source.toolkit.fluxcd.io/v1beta2
|
||||
kind: HelmRepository
|
||||
metadata:
|
||||
name: sonarqube
|
||||
namespace: sonarqube
|
||||
spec:
|
||||
interval: 1h0m0s
|
||||
url: https://SonarSource.github.io/helm-chart-sonarqube
|
||||
---
|
||||
apiVersion: helm.toolkit.fluxcd.io/v2beta1
|
||||
kind: HelmRelease
|
||||
metadata:
|
||||
name: sonarqube
|
||||
namespace: sonarqube
|
||||
spec:
|
||||
chart:
|
||||
spec:
|
||||
chart: sonarqube
|
||||
reconcileStrategy: ChartVersion
|
||||
sourceRef:
|
||||
kind: HelmRepository
|
||||
name: sonarqube
|
||||
version: ">=10.0.0"
|
||||
interval: 1m
|
||||
releaseName: sonarqube
|
||||
targetNamespace: sonarqube
|
||||
values:
|
||||
resources:
|
||||
limits:
|
||||
cpu: 1000m
|
||||
memory: 2Gi
|
||||
requests:
|
||||
cpu: 500m
|
||||
memory: 2Gi
|
||||
|
||||
prometheusMonitoring:
|
||||
podMonitor:
|
||||
enabled: true
|
||||
namespace: sonarqube
|
||||
|
||||
monitoringPasscode: null
|
||||
monitoringPasscodeSecretName: sonarqube-secret
|
||||
monitoringPasscodeSecretKey: monitoring-passcode
|
||||
|
||||
jdbcOverwrite:
|
||||
enable: true
|
||||
jdbcUrl: jdbc:postgresql://postgresql-primary.postgres/sonarqube
|
||||
jdbcUsername: sonarqube
|
||||
jdbcSecretName: sonarqube-secret
|
||||
jdbcSecretPasswordKey: db-password
|
||||
|
||||
postgresql:
|
||||
enabled: false
|
||||
---
|
||||
apiVersion: traefik.io/v1alpha1
|
||||
kind: IngressRoute
|
||||
metadata:
|
||||
name: sonarqube
|
||||
namespace: sonarqube
|
||||
spec:
|
||||
entryPoints:
|
||||
- websecure
|
||||
routes:
|
||||
- match: Host(`sonarqube.kube.rocks`)
|
||||
kind: Rule
|
||||
services:
|
||||
- name: sonarqube-sonarqube
|
||||
port: http
|
||||
```
|
||||
|
||||
{{< /highlight >}}
|
||||
|
||||
{{< highlight host="demo-kube-k3s" file="terraform.tfvars" >}}
|
||||
Here are the secrets to adapt to your needs:
|
||||
|
||||
```tf
|
||||
sonarqube_db_password = "xxx"
|
||||
{{< highlight host="demo-kube-flux" file="clusters/demo/sonarqube/secret-sonarqube.yaml" >}}
|
||||
|
||||
```yaml
|
||||
apiVersion: v1
|
||||
kind: Secret
|
||||
metadata:
|
||||
name: sonarqube-secret
|
||||
namespace: sonarqube
|
||||
type: Opaque
|
||||
data:
|
||||
db-password: YWRtaW4=
|
||||
monitoring-passcode: YWRtaW4=
|
||||
```
|
||||
|
||||
{{< /highlight >}}
|
||||
|
||||
{{< highlight host="demo-kube-k3s" file="sonarqube.tf" >}}
|
||||
As seen in part 4 of this guide, seal these secrets with `kubeseal` under `sealed-secret-sonarqube.yaml` and delete original secret file.
|
||||
|
||||
```tf
|
||||
resource "kubernetes_namespace_v1" "sonarqube" {
|
||||
metadata {
|
||||
name = "sonarqube"
|
||||
}
|
||||
}
|
||||
|
||||
resource "helm_release" "sonarqube" {
|
||||
chart = "sonarqube"
|
||||
version = "10.1.0+628"
|
||||
repository = "https://SonarSource.github.io/helm-chart-sonarqube"
|
||||
|
||||
name = "sonarqube"
|
||||
namespace = kubernetes_namespace_v1.sonarqube.metadata[0].name
|
||||
|
||||
set {
|
||||
name = "prometheusMonitoring.podMonitor.enabled"
|
||||
value = "true"
|
||||
}
|
||||
|
||||
set {
|
||||
name = "postgresql.enabled"
|
||||
value = "false"
|
||||
}
|
||||
|
||||
set {
|
||||
name = "jdbcOverwrite.enabled"
|
||||
value = "true"
|
||||
}
|
||||
|
||||
set {
|
||||
name = "jdbcOverwrite.jdbcUrl"
|
||||
value = "jdbc:postgresql://postgresql-primary.postgres/sonarqube"
|
||||
}
|
||||
|
||||
set {
|
||||
name = "jdbcOverwrite.jdbcUsername"
|
||||
value = "sonarqube"
|
||||
}
|
||||
|
||||
set {
|
||||
name = "jdbcOverwrite.jdbcPassword"
|
||||
value = var.sonarqube_db_password
|
||||
}
|
||||
}
|
||||
|
||||
resource "kubernetes_manifest" "sonarqube_ingress" {
|
||||
manifest = {
|
||||
apiVersion = "traefik.io/v1alpha1"
|
||||
kind = "IngressRoute"
|
||||
metadata = {
|
||||
name = "sonarqube"
|
||||
namespace = kubernetes_namespace_v1.sonarqube.metadata[0].name
|
||||
}
|
||||
spec = {
|
||||
entryPoints = ["websecure"]
|
||||
routes = [
|
||||
{
|
||||
match = "Host(`sonarqube.${var.domain}`)"
|
||||
kind = "Rule"
|
||||
services = [
|
||||
{
|
||||
name = "sonarqube-sonarqube"
|
||||
port = "http"
|
||||
}
|
||||
]
|
||||
}
|
||||
]
|
||||
}
|
||||
}
|
||||
}
|
||||
```
|
||||
|
||||
{{< /highlight >}}
|
||||
|
||||
Be sure to disable the PostgreSQL sub chart and use our self-hosted cluster with both `postgresql.enabled` and `jdbcOverwrite.enabled`. If needed, set proper `tolerations` and `nodeSelector` for deploying on a dedicated node.
|
||||
Inside Helm values, be sure to disable the PostgreSQL sub chart and use our self-hosted cluster with both `postgresql.enabled` and `jdbcOverwrite.enabled`. If needed, set proper `tolerations` and `nodeSelector` for deploying on a dedicated node.
|
||||
|
||||
The installation take many minutes, be patient. Once done, you can access SonarQube on `https://sonarqube.kube.rocks` and login with `admin` / `admin`.
|
||||
|
||||
@ -584,6 +588,10 @@ public class ArticlesListTests : TestBase
|
||||
|
||||
Ensure all tests passes with `dotnet test`.
|
||||
|
||||
{{< alert >}}
|
||||
You may be interested in [Testcontainers](https://testcontainers.com/) for native support of containers inside code, including parallelism.
|
||||
{{< /alert >}}
|
||||
|
||||
### CI tests & code coverage
|
||||
|
||||
Now we need to integrate the tests in our CI pipeline. As we testing with a real database, create a new `demo_test` database through pgAdmin with basic `test` / `test` credentials.
|
||||
|
@ -152,7 +152,7 @@ vus............................: 7 min=7 max=30
|
||||
vus_max........................: 30 min=30 max=30
|
||||
```
|
||||
|
||||
As we use Prometheus for outputting the result, we can visualize it easily with Grafana. You just have to import [this dashboard](https://grafana.com/grafana/dashboards/18030-official-k6-test-result/):
|
||||
As we use Prometheus for outputting the result, we can visualize it easily with Grafana. You just have to import [this dashboard](https://grafana.com/dashboards/18030):
|
||||
|
||||
[](grafana-k6.png)
|
||||
|
||||
|
1793
content/posts/22-web-api-benchmarks-2024/index.md
Normal file
1793
content/posts/22-web-api-benchmarks-2024/index.md
Normal file
File diff suppressed because it is too large
Load Diff
1
content/posts/22-web-api-benchmarks-2024/thumb.svg
Normal file
1
content/posts/22-web-api-benchmarks-2024/thumb.svg
Normal file
File diff suppressed because one or more lines are too long
After Width: | Height: | Size: 7.7 KiB |
@ -6,13 +6,13 @@
|
||||
apps:
|
||||
- name: vue-ts
|
||||
title: Vue 3 TS Realworld
|
||||
repo: adr1enbe4udou1n/vue-ts-realworld-example-app
|
||||
repo: https://gitea.okami101.io/adr1enbe4udou1n/vue-ts-realworld-example-app
|
||||
ci: conduit-vue-ts
|
||||
demo: https://vuetsrealworld.okami101.io
|
||||
color: green
|
||||
- name: react-ts
|
||||
title: React TS Realworld
|
||||
repo: adr1enbe4udou1n/react-ts-realworld-example-app
|
||||
repo: https://gitea.okami101.io/adr1enbe4udou1n/react-ts-realworld-example-app
|
||||
ci: conduit-react-ts
|
||||
demo: https://reacttsrealworld.okami101.io
|
||||
color: blue
|
||||
@ -22,41 +22,41 @@
|
||||
apps:
|
||||
- name: aspnet-core
|
||||
title: ASP.NET Core Realworld
|
||||
repo: adr1enbe4udou1n/aspnetcore-realworld-example-app
|
||||
repo: https://gitea.okami101.io/adr1enbe4udou1n/aspnetcore-realworld-example-app
|
||||
ci: conduit-aspnet-core
|
||||
demo: https://aspnetcorerealworld.okami101.io/api
|
||||
|
||||
- name: spring-boot
|
||||
title: Spring Boot Realworld
|
||||
repo: adr1enbe4udou1n/spring-boot-realworld-example-app
|
||||
repo: https://gitea.okami101.io/adr1enbe4udou1n/spring-boot-realworld-example-app
|
||||
ci: conduit-spring-boot
|
||||
demo: https://springbootrealworld.okami101.io/api
|
||||
color: green
|
||||
|
||||
- name: symfony
|
||||
title: Symfony Realworld
|
||||
repo: adr1enbe4udou1n/symfony-realworld-example-app
|
||||
repo: https://gitea.okami101.io/adr1enbe4udou1n/symfony-realworld-example-app
|
||||
ci: conduit-symfony
|
||||
demo: https://symfonyrealworld.okami101.io/api
|
||||
color: black
|
||||
|
||||
- name: laravel
|
||||
title: Laravel Realworld
|
||||
repo: adr1enbe4udou1n/laravel-realworld-example-app
|
||||
repo: https://gitea.okami101.io/adr1enbe4udou1n/laravel-realworld-example-app
|
||||
ci: conduit-laravel
|
||||
demo: https://laravelrealworld.okami101.io/api
|
||||
color: orange
|
||||
|
||||
- name: nestjs
|
||||
title: NestJS Realworld
|
||||
repo: adr1enbe4udou1n/nestjs-realworld-example-app
|
||||
repo: https://gitea.okami101.io/adr1enbe4udou1n/nestjs-realworld-example-app
|
||||
ci: conduit-nestjs
|
||||
demo: https://nestjsrealworld.okami101.io/api
|
||||
color: red
|
||||
|
||||
- name: fastapi
|
||||
title: FastAPI Realworld
|
||||
repo: adr1enbe4udou1n/fastapi-realworld-example-app
|
||||
repo: https://gitea.okami101.io/adr1enbe4udou1n/fastapi-realworld-example-app
|
||||
ci: conduit-fastapi
|
||||
demo: https://fastapirealworld.okami101.io/api
|
||||
color: teal
|
||||
@ -69,12 +69,12 @@
|
||||
- name: vuetify-admin
|
||||
title: Vuetify Admin
|
||||
date: 11/2020
|
||||
repo: okami101/vuetify-admin
|
||||
repo: https://github.com/okami101/vuetify-admin
|
||||
demo: https://va-demo.okami101.io/
|
||||
docs: https://www.okami101.io/vuetify-admin
|
||||
|
||||
- name: laravel-rad-stack
|
||||
title: Laravel RAD Stack
|
||||
date: 10/2021
|
||||
repo: adr1enbe4udou1n/laravel-rad-stack
|
||||
repo: https://github.com/adr1enbe4udou1n/laravel-rad-stack
|
||||
demo: https://laravel-rad-stack.okami101.io/
|
||||
|
@ -78,7 +78,7 @@
|
||||
title="Run on K3s over Hetzner Cloud" />
|
||||
|
||||
<a href="https://concourse.okami101.io/teams/main/pipelines/okami-blog" target="_blank">
|
||||
<img src="https://concourse.okami101.io/api/v1/teams/main/pipelines/okami-blog/badge" />
|
||||
<img src="https://concourse.okami101.io/api/v1/teams/main/pipelines/okami-blog/badge" alt="Blog build status" />
|
||||
</a>
|
||||
</div>
|
||||
<div class="hidden lg:block">
|
||||
|
55
layouts/shortcodes/chart.html
Normal file
55
layouts/shortcodes/chart.html
Normal file
@ -0,0 +1,55 @@
|
||||
<div class="chart">
|
||||
{{ $id := delimit (shuffle (seq 1 9)) "" }}
|
||||
<canvas id="{{ $id }}"></canvas>
|
||||
<script type="text/javascript">
|
||||
window.addEventListener("DOMContentLoaded", (event) => {
|
||||
const ctx = document.getElementById("{{ $id }}");
|
||||
const chart = new Chart(ctx, {
|
||||
{{ if eq (.Get "type") "timeseries" }}
|
||||
type: 'line',
|
||||
options: {
|
||||
plugins: {
|
||||
title: {
|
||||
display: true,
|
||||
text: {{ .Get "title" }},
|
||||
},
|
||||
},
|
||||
scales: {
|
||||
x: {
|
||||
ticks: {
|
||||
autoSkip: true,
|
||||
callback: function(val, index) {
|
||||
return this.getLabelForValue(val) + 's'
|
||||
},
|
||||
}
|
||||
},
|
||||
{{ if .Get "max" }}
|
||||
y: {
|
||||
stacked: {{ .Get "stacked" }},
|
||||
beginAtZero: true,
|
||||
suggestedMax: {{ .Get "max" }},
|
||||
}
|
||||
{{ end }}
|
||||
},
|
||||
},
|
||||
data: {
|
||||
labels: [
|
||||
{{ if .Get "step" }}
|
||||
{{ range seq 0 (.Get "step") 90 }}
|
||||
{{ . }},
|
||||
{{ end }}
|
||||
{{ else }}
|
||||
{{ range seq 0 90 }}
|
||||
{{ . }},
|
||||
{{ end }}
|
||||
{{ end }}
|
||||
],
|
||||
datasets: {{ .Inner | safeJS }}
|
||||
}
|
||||
{{ else }}
|
||||
{{ .Inner | safeJS }}
|
||||
{{ end }}
|
||||
});
|
||||
});
|
||||
</script>
|
||||
</div>
|
@ -49,8 +49,7 @@
|
||||
{{ readFile (print "data/works/" .name ".md") | markdownify }}
|
||||
</div>
|
||||
<div class="flex justify-center gap-4">
|
||||
{{ partial "button.html" (dict "text" (partial "icon.html" "github") "href" (print
|
||||
"https://github.com/" .repo) "color" .color) }}
|
||||
{{ partial "button.html" (dict "text" (partial "icon.html" "github") "href" .repo "color" .color) }}
|
||||
{{ if .ci }}
|
||||
{{ partial "button.html" (dict "text" (partial "icon.html" "bug") "href" (print
|
||||
"https://concourse.okami101.io/teams/main/pipelines/" .ci) "color" .color) }}
|
||||
|
Submodule themes/congo updated: c114943009...110bc3414f
Reference in New Issue
Block a user