Compare commits
220 Commits
Author | SHA1 | Date | |
---|---|---|---|
94485bdb36 | |||
1137607c76 | |||
a5654cc14d | |||
94ea21a73f | |||
5bd92dc914 | |||
17e2cb3f0a | |||
00f14787d3 | |||
a50ceadff2 | |||
518bb37bfe | |||
598e5996ea | |||
c2ae40c859 | |||
0d6e5d6e5e | |||
1216493667 | |||
44c643d9d4 | |||
2f73fc3dd1 | |||
c10960326f | |||
b893e539b3 | |||
234c4f8500 | |||
124841452e | |||
ec2f1b96d2 | |||
37e9c90b1b | |||
e49c37d92c | |||
c0beda72c8 | |||
9f13ebfe5e | |||
edfa564b04 | |||
52c19aff99 | |||
6bd545062b | |||
5de35478b9 | |||
e68313b5d4 | |||
530cf8bac7 | |||
62939c50b9 | |||
d503e7c9fb | |||
41112b725d | |||
b3b922eb73 | |||
77114a7684 | |||
7b752648f2 | |||
af6d0b2640 | |||
5fe53742cf | |||
59d5812ef4 | |||
3099ef273f | |||
f7f493e007 | |||
a05dbbbe93 | |||
fa6da5c600 | |||
11b9ef8b59 | |||
cc81f4b020 | |||
43a8afeaa7 | |||
97355aaecb | |||
0c602dbd2f | |||
451695b861 | |||
1d7af0506d | |||
eeb112557a | |||
c92cc3655b | |||
e145ec2e51 | |||
f596ad54ef | |||
cf41ae8e4c | |||
105cb0de40 | |||
52443678bc | |||
0100eb545e | |||
d6e39155d3 | |||
0afa52c595 | |||
6a5f2e45cd | |||
3712ad0fc4 | |||
c2328e0df1 | |||
ac238c9d2e | |||
62641ed7f8 | |||
7d61056d6b | |||
43a1d8f914 | |||
185bbe8b29 | |||
bfd1d1ca03 | |||
58f9034236 | |||
6be7706196 | |||
052815301b | |||
b0ba21d08d | |||
ba99a5be09 | |||
f837c8b71b | |||
dc145a1b4c | |||
650cb92129 | |||
f0cfc765b1 | |||
68847d422f | |||
365301d2b6 | |||
88cb4929f9 | |||
ef30d9fb96 | |||
56660d32ba | |||
6057606a2d | |||
6e727ac414 | |||
9bc67ea68c | |||
ac15d5e4f1 | |||
43ab62c469 | |||
f943e015ef | |||
d8e9b029a0 | |||
e8584452b0 | |||
b97c2c28f7 | |||
ac5d72ebea | |||
2dfb5944e4 | |||
6c48b0b87c | |||
0457a5a46d | |||
b663158503 | |||
e15002e8bb | |||
1f8a8cdc9c | |||
8809f02868 | |||
b1a9642bc4 | |||
49a984f19a | |||
b495d5022b | |||
a454792968 | |||
1f9e6ab47f | |||
13aa16f516 | |||
f92b11eb34 | |||
673dae791a | |||
fb38f7f3d2 | |||
4a8928e9f6 | |||
6ac2a94dd7 | |||
593c8950b0 | |||
3f5ea711ac | |||
6a641bf71b | |||
7ed9b159d9 | |||
1e0382079f | |||
b31e739c3f | |||
31bcc0c105 | |||
d369834ef0 | |||
87f13fc687 | |||
9a8c5ce0ab | |||
c1d32438f7 | |||
a17c02f82f | |||
fa41a363fa | |||
06abcda4f3 | |||
18a8bd7760 | |||
0abb839ae3 | |||
49137f84f2 | |||
189d883c55 | |||
759c916022 | |||
58f02c4e9f | |||
ffc6628d58 | |||
f882cef83e | |||
b227e5bf75 | |||
3608909712 | |||
eb9ac1e28e | |||
7cad7ec1bf | |||
17fb386b50 | |||
99d315698d | |||
c70128aa0a | |||
0ece9082b2 | |||
08562ed2e3 | |||
a06e1642ad | |||
c8c7df99fd | |||
96c5b9d779 | |||
a089b9d047 | |||
5a877d009c | |||
ca8aa110b0 | |||
b18e070571 | |||
696fe88e58 | |||
308eaf1d2c | |||
aed1c6b3e7 | |||
ae99e271e7 | |||
1e3b2fa6cc | |||
a2c801ef38 | |||
3708d2e63f | |||
f12ae7e262 | |||
ba21e75995 | |||
281ed1ccb3 | |||
6ac7775750 | |||
7cec61b2f3 | |||
87878d1347 | |||
f31598f640 | |||
2387612618 | |||
07af978507 | |||
0e985070c7 | |||
e67aa74eb2 | |||
36da769f0e | |||
99a632a891 | |||
c0b84228f0 | |||
d7e8336db0 | |||
7f28697c08 | |||
e9bbdd223d | |||
b1ee5d0d1f | |||
2f5baad20e | |||
fdcb305cb4 | |||
1596f0670c | |||
db71e8014d | |||
a9b40dde4d | |||
d9945977d1 | |||
96ef1da81c | |||
f577a65e70 | |||
6d3c345e75 | |||
4ecb854b2a | |||
5902732262 | |||
850cb48a32 | |||
47948f2a87 | |||
b06875819f | |||
1c4d080839 | |||
9a6f7553c0 | |||
141b2b1ac5 | |||
82f975d893 | |||
469553a994 | |||
ac43d0d07a | |||
84729e0130 | |||
3c58db60fd | |||
10afc1a61d | |||
25c6d94a3d | |||
b9d5b47581 | |||
b033d5c785 | |||
92afcff6f9 | |||
88d7007036 | |||
24949710b7 | |||
3f4868216b | |||
9070fe442f | |||
7ef13c90b9 | |||
1df70303eb | |||
d00420efb0 | |||
2331c93cf5 | |||
5cf088c911 | |||
3ddc9cf9a6 | |||
375aec9175 | |||
25c8c4c5ee | |||
8ba3b480eb | |||
16cf5cadd7 | |||
633813b5b4 | |||
b1d8d32ba8 | |||
d6115ba7db | |||
d6e9170fb9 | |||
a82f292bd7 |
36
.gitea/workflows/build.yaml
Normal file
36
.gitea/workflows/build.yaml
Normal file
@ -0,0 +1,36 @@
|
||||
on:
|
||||
push:
|
||||
tags:
|
||||
- "v*"
|
||||
|
||||
jobs:
|
||||
build:
|
||||
runs-on: ubuntu-latest
|
||||
steps:
|
||||
- uses: actions/checkout@v4
|
||||
with:
|
||||
submodules: true
|
||||
- uses: peaceiris/actions-hugo@v3
|
||||
with:
|
||||
extended: true
|
||||
- uses: actions/cache@v4
|
||||
with:
|
||||
path: resources
|
||||
key: ${{ runner.os }}-resources
|
||||
- name: Build test
|
||||
run: hugo --minify
|
||||
- uses: docker/metadata-action@v5
|
||||
id: meta
|
||||
with:
|
||||
images: ${{ vars.CONTAINER_REGISTRY_URL }}/${{ gitea.repository }}
|
||||
- uses: docker/login-action@v3
|
||||
with:
|
||||
registry: ${{ vars.CONTAINER_REGISTRY_URL }}
|
||||
username: ${{ vars.CONTAINER_REGISTRY_USERNAME }}
|
||||
password: ${{ secrets.CONTAINER_REGISTRY_PASSWORD }}
|
||||
- uses: docker/build-push-action@v6
|
||||
with:
|
||||
context: .
|
||||
push: true
|
||||
tags: ${{ steps.meta.outputs.tags }}
|
||||
labels: ${{ steps.meta.outputs.labels }}
|
3
.gitignore
vendored
3
.gitignore
vendored
@ -1,2 +1,3 @@
|
||||
node_modules
|
||||
resources
|
||||
resources
|
||||
public
|
@ -1,3 +1,7 @@
|
||||
FROM nginx:alpine
|
||||
|
||||
RUN sed -i 's/^\(.*\)http {/\1http {\n map_hash_bucket_size 128;\n/' /etc/nginx/nginx.conf
|
||||
|
||||
COPY nginx/ /etc/nginx/conf.d/
|
||||
|
||||
COPY public /usr/share/nginx/html
|
||||
|
@ -52,9 +52,9 @@ Some notes of this blog :
|
||||
* Kubernetes infrastructure completely managed with [`Terraform`](https://github.com/adr1enbe4udou1n/terraform-kube-okami) 🌴
|
||||
* **HA** setup using **Hetzner LB**, targeting 2 worker nodes, with **Postgres cluster** (managed on same Kubernetes cluster)
|
||||
* `Traefik` as reverse proxy, configured for HA 🛣️
|
||||
* Source code on my own [`Gitea`](https://gitea.okami101.io/adr1enbe4udou1n/blog)
|
||||
* Compiled by my own [`Concourse`](https://concourse.okami101.io) instance as a final docker container image into self-hosted private registry (**CI** 🏗️)
|
||||
* Automatically deployed by `Flux CD v2` to the Kubernetes cluster from [central Git source](https://gitea.okami101.io/okami101/flux-source/src/branch/main/okami/deploy-blog.yaml) (**CD** 🚀)
|
||||
* Source code on my own [`Gitea`](https://about.gitea.com/)
|
||||
* Compiled by my own [`Concourse`](https://concourse-ci.org/) instance as a final docker container image into self-hosted private registry (**CI** 🏗️)
|
||||
* Automatically deployed by `Flux CD v2` to the Kubernetes cluster (**CD** 🚀)
|
||||
* Tracked with [`Umami`](https://umami.is/) 📈
|
||||
|
||||
All above tools are 💯% self-hosted ! Just sadly missing my own Homelab with Proxmox because no fiber 😿
|
||||
|
@ -1,4 +1,4 @@
|
||||
[`Laravel 10`](https://laravel.com/) implementation on `PHP 8.3` with extensive usage of last attributes support. The particularity of this framework is to give you almost of all you need for quickly develop any complex application. So minimal external packages need.
|
||||
[`Laravel 11`](https://laravel.com/) implementation on `PHP 8.3` with extensive usage of last attributes support. The particularity of this framework is to give you almost of all you need for quickly develop any complex application. So minimal external packages need.
|
||||
|
||||
I obviously made usage of **Eloquent** as a very expressive **Active Record** ORM, and the Laravel factories system based on [PHP Faker](https://fakerphp.github.io/) is already perfect for dummy data generator.
|
||||
|
||||
@ -8,7 +8,7 @@ Main packages involved :
|
||||
|
||||
* [PHP JWT](https://github.com/lcobucci/jwt) as JWT implementation, with proper integration to Laravel using custom guard
|
||||
* [Laravel Routes Attribute](https://github.com/spatie/laravel-route-attributes) for Laravel routing that leverage on last PHP 8 attributes feature
|
||||
* [Laravel OpenAPI](https://github.com/vyuldashev/laravel-openapi) that also use PHP 8 attributes for API documentation
|
||||
* [Laravel OpenAPI](https://github.com/DarkaOnLine/L5-Swagger) that also use PHP 8 attributes for API documentation
|
||||
* [Laravel IDE Helper](https://github.com/barryvdh/laravel-ide-helper) for proper IDE integration, perfectly suited for **VS Code** with [Intelephense](https://marketplace.visualstudio.com/items?itemName=bmewburn.vscode-intelephense-client) extension
|
||||
* [PHP CS Fixer](https://github.com/FriendsOfPHP/PHP-CS-Fixer) as formatter with Laravel style guide
|
||||
* [Larastan](https://github.com/nunomaduro/larastan), a Laravel wrapper of [PHPStan](https://phpstan.org/), as advanced code static analyzer
|
||||
|
@ -12,7 +12,5 @@ Main purpose of this projects is to have personal extensive API training on mult
|
||||
* Proper seeder / faker for quick starting with filled DB
|
||||
* Separated RW / RO database connections for maximizing performance between these 2 contexts
|
||||
* Proper suited QA + production Dockerfile
|
||||
* Complete CI on Kubernetes with [Concourse](https://concourse.okami101.io/)
|
||||
* Complete CI on Kubernetes with [Concourse CI](https://concourse-ci.org/)
|
||||
* Automatic CD on Kubernetes using [Flux](https://fluxcd.io/)
|
||||
|
||||
See complete production deployment manifests [here](https://gitea.okami101.io/okami101/flux-source/src/branch/main/conduit), allowing **GitOps** management.
|
||||
|
@ -39,7 +39,7 @@ For better fluidity, here is the expected list of variables you'll need to prepa
|
||||
| `s3_bucket` | kuberocks | |
|
||||
| `s3_access_key` | xxx | |
|
||||
| `s3_secret_key` | xxx | |
|
||||
| `smtp_host` | smtp-relay.brevo.com | |
|
||||
| `smtp_host` | smtp.tem.scw.cloud | |
|
||||
| `smtp_port` | 587 | |
|
||||
| `smtp_user` | <me@kube.rocks> | |
|
||||
| `smtp_password` | xxx | |
|
||||
@ -178,7 +178,7 @@ module "hcloud_kube" {
|
||||
]
|
||||
|
||||
control_planes_custom_config = {
|
||||
tls-sans = ["cp.kube.rocks"]
|
||||
tls-san = ["cp.kube.rocks"]
|
||||
disable = ["traefik"]
|
||||
etcd-s3 = true
|
||||
etcd-s3-endpoint = "s3.fr-par.scw.cloud"
|
||||
@ -292,7 +292,7 @@ I also prefer increase the eviction threshold to 250Mi, in order to avoid OS OOM
|
||||
|
||||
```tf
|
||||
control_planes_custom_config = {
|
||||
tls-sans = ["cp.kube.rocks"]
|
||||
tls-san = ["cp.kube.rocks"]
|
||||
disable = ["traefik"]
|
||||
etcd-s3 = true
|
||||
etcd-s3-endpoint = "s3.fr-par.scw.cloud"
|
||||
@ -405,10 +405,10 @@ As input variables, you have the choice to use environment variables or separate
|
||||
|
||||
```tf
|
||||
hcloud_token = "xxx"
|
||||
my_public_ssh_keys = [
|
||||
my_ip_addresses = [
|
||||
"82.82.82.82/32"
|
||||
]
|
||||
my_ip_addresses = [
|
||||
my_public_ssh_keys = [
|
||||
"ssh-ed25519 xxx"
|
||||
]
|
||||
s3_access_key = "xxx"
|
||||
|
@ -34,8 +34,8 @@ Let's begin with automatic upgrades management.
|
||||
Before we go next steps, we need to install critical monitoring CRDs that will be used by many components for monitoring, a subject that will be covered later.
|
||||
|
||||
```sh
|
||||
kubectl apply --server-side -f https://raw.githubusercontent.com/prometheus-operator/prometheus-operator/v0.67.1/example/prometheus-operator-crd/monitoring.coreos.com_servicemonitors.yaml
|
||||
kubectl apply --server-side -f https://raw.githubusercontent.com/prometheus-operator/prometheus-operator/v0.67.1/example/prometheus-operator-crd/monitoring.coreos.com_podmonitors.yaml
|
||||
ka https://raw.githubusercontent.com/prometheus-operator/prometheus-operator/v0.73.0/example/prometheus-operator-crd/monitoring.coreos.com_servicemonitors.yaml --server-side
|
||||
ka https://raw.githubusercontent.com/prometheus-operator/prometheus-operator/v0.73.0/example/prometheus-operator-crd/monitoring.coreos.com_podmonitors.yaml --server-side
|
||||
```
|
||||
|
||||
### Automatic reboot
|
||||
@ -47,7 +47,7 @@ When OS kernel is upgraded, the system needs to be rebooted to apply it. This is
|
||||
```tf
|
||||
resource "helm_release" "kubereboot" {
|
||||
chart = "kured"
|
||||
version = "5.1.0"
|
||||
version = "5.4.5"
|
||||
repository = "https://kubereboot.github.io/charts"
|
||||
|
||||
name = "kured"
|
||||
@ -103,8 +103,10 @@ Don't push yourself get fully 100% GitOps everywhere if the remedy give far more
|
||||
{{< /alert >}}
|
||||
|
||||
```sh
|
||||
k create ns system-upgrade
|
||||
# installing system-upgrade-controller
|
||||
ka https://github.com/rancher/system-upgrade-controller/releases/latest/download/system-upgrade-controller.yaml
|
||||
ka https://github.com/rancher/system-upgrade-controller/releases/latest/download/crd.yaml
|
||||
# checking system-upgrade-controller deployment status
|
||||
kg deploy -n system-upgrade
|
||||
```
|
||||
@ -195,11 +197,17 @@ You may set the same channel as previous step for hcloud cluster creation.
|
||||
|
||||
## External access
|
||||
|
||||
Now it's time to expose our cluster to the outside world. We'll use Traefik as ingress controller and cert-manager for SSL certificates management.
|
||||
Now it's time to expose our cluster to the outside world. We'll use Traefik v3 as ingress controller and cert-manager for SSL certificates management.
|
||||
|
||||
### Traefik
|
||||
|
||||
Apply following file:
|
||||
Apply CRDs:
|
||||
|
||||
```sh
|
||||
kak https://github.com/traefik/traefik-helm-chart/traefik/crds/ --server-side
|
||||
```
|
||||
|
||||
Then apply following:
|
||||
|
||||
{{< highlight host="demo-kube-k3s" file="traefik.tf" >}}
|
||||
|
||||
@ -216,25 +224,31 @@ resource "kubernetes_namespace_v1" "traefik" {
|
||||
|
||||
resource "helm_release" "traefik" {
|
||||
chart = "traefik"
|
||||
version = "24.0.0"
|
||||
version = "28.0.0"
|
||||
repository = "https://traefik.github.io/charts"
|
||||
|
||||
name = "traefik"
|
||||
namespace = kubernetes_namespace_v1.traefik.metadata[0].name
|
||||
|
||||
set {
|
||||
name = "ports.web.redirectTo"
|
||||
name = "ports.web.redirectTo.port"
|
||||
value = "websecure"
|
||||
}
|
||||
|
||||
set {
|
||||
set_list {
|
||||
name = "ports.websecure.forwardedHeaders.trustedIPs"
|
||||
value = "{127.0.0.1/32,10.0.0.0/8}"
|
||||
value = [
|
||||
"127.0.0.1/32",
|
||||
"10.0.0.0/8"
|
||||
]
|
||||
}
|
||||
|
||||
set {
|
||||
set_list {
|
||||
name = "ports.websecure.proxyProtocol.trustedIPs"
|
||||
value = "{127.0.0.1/32,10.0.0.0/8}"
|
||||
value = [
|
||||
"127.0.0.1/32",
|
||||
"10.0.0.0/8"
|
||||
]
|
||||
}
|
||||
|
||||
set {
|
||||
@ -261,7 +275,7 @@ resource "helm_release" "traefik" {
|
||||
|
||||
{{< /highlight >}}
|
||||
|
||||
`ports.web.redirectTo` will redirect all HTTP traffic to HTTPS.
|
||||
`ports.web.redirectTo.port` will redirect all HTTP traffic to HTTPS.
|
||||
|
||||
`forwardedHeaders` and `proxyProtocol` will allow Traefik to get real IP of clients.
|
||||
|
||||
@ -333,7 +347,7 @@ One applied, use `hcloud load-balancer list` to get the public IP of the load ba
|
||||
First we need to install cert-manager for proper distributed SSL management. First install CRDs manually.
|
||||
|
||||
```sh
|
||||
ka https://github.com/cert-manager/cert-manager/releases/download/v1.12.3/cert-manager.crds.yaml
|
||||
ka https://github.com/cert-manager/cert-manager/releases/download/v1.15.0/cert-manager.crds.yaml
|
||||
```
|
||||
|
||||
Then apply the following Terraform code.
|
||||
@ -349,7 +363,7 @@ resource "kubernetes_namespace_v1" "cert_manager" {
|
||||
|
||||
resource "helm_release" "cert_manager" {
|
||||
chart = "cert-manager"
|
||||
version = "v1.12.3"
|
||||
version = "v1.15.0"
|
||||
repository = "https://charts.jetstack.io"
|
||||
|
||||
name = "cert-manager"
|
||||
@ -554,9 +568,9 @@ Then apply the following Terraform code:
|
||||
resource "helm_release" "traefik" {
|
||||
//...
|
||||
|
||||
set {
|
||||
set_list {
|
||||
name = "ingressRoute.dashboard.entryPoints"
|
||||
value = "{websecure}"
|
||||
value = ["websecure"]
|
||||
}
|
||||
|
||||
set {
|
||||
@ -611,7 +625,7 @@ resource "kubernetes_manifest" "traefik_middleware_ip" {
|
||||
namespace = kubernetes_namespace_v1.traefik.metadata[0].name
|
||||
}
|
||||
spec = {
|
||||
ipWhiteList = {
|
||||
ipAllowList = {
|
||||
sourceRange = var.whitelisted_ips
|
||||
}
|
||||
}
|
||||
@ -640,7 +654,7 @@ resource "kubernetes_manifest" "traefik_middleware_ip" {
|
||||
manifest = {
|
||||
//...
|
||||
spec = {
|
||||
ipWhiteList = {
|
||||
ipAllowList = {
|
||||
sourceRange = var.whitelisted_ips
|
||||
ipStrategy = {
|
||||
depth = 1
|
||||
@ -676,14 +690,14 @@ locals {
|
||||
resource "helm_release" "traefik" {
|
||||
//...
|
||||
|
||||
set {
|
||||
set_list {
|
||||
name = "ports.websecure.forwardedHeaders.trustedIPs"
|
||||
value = "{${join(",", local.trusted_ips)}}"
|
||||
value = local.trusted_ips
|
||||
}
|
||||
|
||||
set {
|
||||
set_list {
|
||||
name = "ports.websecure.proxyProtocol.trustedIPs"
|
||||
value = "{${join(",", local.trusted_ips)}}"
|
||||
value = local.trusted_ips
|
||||
}
|
||||
}
|
||||
```
|
||||
|
@ -200,7 +200,7 @@ resource "kubernetes_secret_v1" "longhorn_backup_credential" {
|
||||
|
||||
resource "helm_release" "longhorn" {
|
||||
chart = "longhorn"
|
||||
version = "1.5.1"
|
||||
version = "1.6.1"
|
||||
repository = "https://charts.longhorn.io"
|
||||
|
||||
name = "longhorn"
|
||||
@ -358,7 +358,7 @@ Type this commands for both storage nodes or use Longhorn UI from **Node** tab:
|
||||
|
||||
```sh
|
||||
# get the default-disk-xxx identifier
|
||||
kg nodes.longhorn.io okami-storage-01 -n longhorn-system -o yaml
|
||||
kg nodes.longhorn.io kube-storage-0x -n longhorn-system -o yaml
|
||||
# patch main default-disk-xxx as fast storage
|
||||
k patch nodes.longhorn.io kube-storage-0x -n longhorn-system --type=merge --patch '{"spec": {"disks": {"default-disk-xxx": {"tags": ["fast"]}}}}'
|
||||
# add a new schedulable disk by adding HC_Volume_XXXXXXXX path
|
||||
@ -477,7 +477,7 @@ resource "kubernetes_secret_v1" "postgresql_auth" {
|
||||
|
||||
resource "helm_release" "postgresql" {
|
||||
chart = "postgresql"
|
||||
version = var.chart_postgresql_version
|
||||
version = "15.2.5"
|
||||
repository = "https://charts.bitnami.com/bitnami"
|
||||
|
||||
name = "postgresql"
|
||||
@ -508,11 +508,6 @@ resource "helm_release" "postgresql" {
|
||||
value = "replication"
|
||||
}
|
||||
|
||||
set {
|
||||
name = "architecture"
|
||||
value = "replication"
|
||||
}
|
||||
|
||||
set {
|
||||
name = "metrics.enabled"
|
||||
value = "true"
|
||||
@ -587,8 +582,8 @@ Now check that PostgreSQL pods are correctly running on storage nodes with `kgpo
|
||||
|
||||
```txt
|
||||
NAME READY STATUS RESTARTS AGE IP NODE NOMINATED NODE READINESS GATES
|
||||
postgresql-primary-0 2/2 Running 0 151m 10.42.5.253 okami-storage-01 <none> <none>
|
||||
postgresql-read-0 2/2 Running 0 152m 10.42.2.216 okami-storage-02 <none> <none>
|
||||
postgresql-primary-0 2/2 Running 0 151m 10.42.5.253 kube-storage-01 <none> <none>
|
||||
postgresql-read-0 2/2 Running 0 152m 10.42.2.216 kube-storage-02 <none> <none>
|
||||
```
|
||||
|
||||
And that's it, we have replicated PostgreSQL cluster ready to use ! Go to longhorn UI and be sure that 2 volumes are created on fast disk under **Volume** menu.
|
||||
@ -641,7 +636,7 @@ resource "kubernetes_secret_v1" "redis_auth" {
|
||||
|
||||
resource "helm_release" "redis" {
|
||||
chart = "redis"
|
||||
version = "18.0.2"
|
||||
version = "19.1.0"
|
||||
repository = "https://charts.bitnami.com/bitnami"
|
||||
|
||||
name = "redis"
|
||||
|
@ -125,7 +125,8 @@ provider "flux" {
|
||||
}
|
||||
|
||||
resource "flux_bootstrap_git" "this" {
|
||||
path = "clusters/demo"
|
||||
path = "clusters/demo"
|
||||
embedded_manifests = true
|
||||
|
||||
components_extra = [
|
||||
"image-reflector-controller",
|
||||
@ -152,7 +153,7 @@ Open `demo-kube-flux` project and create helm deployment for sealed secret.
|
||||
|
||||
```yaml
|
||||
---
|
||||
apiVersion: source.toolkit.fluxcd.io/v1beta2
|
||||
apiVersion: source.toolkit.fluxcd.io/v1
|
||||
kind: HelmRepository
|
||||
metadata:
|
||||
name: sealed-secrets
|
||||
@ -161,7 +162,7 @@ spec:
|
||||
interval: 1h0m0s
|
||||
url: https://bitnami-labs.github.io/sealed-secrets
|
||||
---
|
||||
apiVersion: helm.toolkit.fluxcd.io/v2beta1
|
||||
apiVersion: helm.toolkit.fluxcd.io/v2
|
||||
kind: HelmRelease
|
||||
metadata:
|
||||
name: sealed-secrets
|
||||
@ -352,7 +353,7 @@ Let's try some app that require a bit more configuration and real database conne
|
||||
{{< highlight host="demo-kube-flux" file="clusters/demo/n8n/deploy-n8n.yaml" >}}
|
||||
|
||||
```yaml
|
||||
apiVersion: apps/v1
|
||||
apiVersion: v1
|
||||
kind: Namespace
|
||||
metadata:
|
||||
name: n8n
|
||||
@ -423,10 +424,14 @@ spec:
|
||||
volumeMounts:
|
||||
- name: n8n-data
|
||||
mountPath: /home/node/.n8n
|
||||
- name: n8n-cache
|
||||
mountPath: /home/node/.cache
|
||||
volumes:
|
||||
- name: n8n-data
|
||||
persistentVolumeClaim:
|
||||
claimName: n8n-data
|
||||
- name: n8n-cache
|
||||
emptyDir: {}
|
||||
---
|
||||
apiVersion: v1
|
||||
kind: PersistentVolumeClaim
|
||||
@ -517,7 +522,7 @@ Let's try a final candidate with NocoDB, an Airtable-like generator for Postgres
|
||||
{{< highlight host="demo-kube-flux" file="clusters/demo/nocodb/deploy-nocodb.yaml" >}}
|
||||
|
||||
```yaml
|
||||
apiVersion: apps/v1
|
||||
apiVersion: v1
|
||||
kind: Namespace
|
||||
metadata:
|
||||
name: nocodb
|
||||
|
@ -69,7 +69,7 @@ resource "kubernetes_namespace_v1" "monitoring" {
|
||||
|
||||
resource "helm_release" "kube_prometheus_stack" {
|
||||
chart = "kube-prometheus-stack"
|
||||
version = "49.2.0"
|
||||
version = "58.1.0"
|
||||
repository = "https://prometheus-community.github.io/helm-charts"
|
||||
|
||||
name = "kube-prometheus-stack"
|
||||
@ -112,12 +112,12 @@ resource "helm_release" "kube_prometheus_stack" {
|
||||
|
||||
set {
|
||||
name = "prometheus.prometheusSpec.tolerations[0].key"
|
||||
value = "node-role.kubernetes.io/storage"
|
||||
value = "node-role.kubernetes.io/monitor"
|
||||
}
|
||||
|
||||
set {
|
||||
name = "prometheus.prometheusSpec.tolerations[0].operator"
|
||||
value = "Exists"
|
||||
name = "prometheus.prometheusSpec.tolerations[0].effect"
|
||||
value = "NoSchedule"
|
||||
}
|
||||
|
||||
set {
|
||||
@ -296,7 +296,7 @@ Create `grafana` database through pgAdmin with same user and according `grafana_
|
||||
{{< highlight host="demo-kube-k3s" file="terraform.tfvars" >}}
|
||||
|
||||
```tf
|
||||
smtp_host = "smtp.mailgun.org"
|
||||
smtp_host = "smtp.tem.scw.cloud"
|
||||
smtp_port = "587"
|
||||
smtp_user = "xxx"
|
||||
smtp_password = "xxx"
|
||||
@ -311,7 +311,7 @@ Apply next configuration to Terraform project:
|
||||
```tf
|
||||
resource "helm_release" "grafana" {
|
||||
chart = "grafana"
|
||||
version = "6.58.9"
|
||||
version = "7.3.8"
|
||||
repository = "https://grafana.github.io/helm-charts"
|
||||
|
||||
name = "grafana"
|
||||
@ -528,11 +528,11 @@ As above config applies only at cluster initialization, you may change directly
|
||||
|
||||
Last but not least, we need to add a logging stack. The most popular one is [Elastic Stack](https://www.elastic.co/elastic-stack), but it's very resource intensive. A more lightweight option is to use [Loki](https://grafana.com/oss/loki/), also part of Grafana Labs.
|
||||
|
||||
In order to work on scalable mode, we need to have a S3 storage backend. We will reuse same S3 compatible storage as longhorn backup here, but it's recommended to use a separate bucket and credentials.
|
||||
We need to have a S3 storage backend for long term storage. We will reuse same S3 compatible storage as longhorn backup here, but it's recommended to use a separate bucket and credentials.
|
||||
|
||||
### Loki
|
||||
|
||||
Let's install it now:
|
||||
Let's install it on single binary mode:
|
||||
|
||||
{{< highlight host="demo-kube-k3s" file="logging.tf" >}}
|
||||
|
||||
@ -545,7 +545,7 @@ resource "kubernetes_namespace_v1" "logging" {
|
||||
|
||||
resource "helm_release" "loki" {
|
||||
chart = "loki"
|
||||
version = "5.15.0"
|
||||
version = "6.2.0"
|
||||
repository = "https://grafana.github.io/helm-charts"
|
||||
|
||||
name = "loki"
|
||||
@ -561,6 +561,11 @@ resource "helm_release" "loki" {
|
||||
value = "true"
|
||||
}
|
||||
|
||||
set {
|
||||
name = "loki.compactor.delete_request_store"
|
||||
value = "s3"
|
||||
}
|
||||
|
||||
set {
|
||||
name = "loki.limits_config.retention_period"
|
||||
value = "24h"
|
||||
@ -606,35 +611,74 @@ resource "helm_release" "loki" {
|
||||
value = "1"
|
||||
}
|
||||
|
||||
set {
|
||||
name = "loki.schemaConfig.configs[0].from"
|
||||
value = "2024-01-01"
|
||||
}
|
||||
|
||||
set {
|
||||
name = "loki.schemaConfig.configs[0].store"
|
||||
value = "tsdb"
|
||||
}
|
||||
|
||||
set {
|
||||
name = "loki.schemaConfig.configs[0].object_store"
|
||||
value = "s3"
|
||||
}
|
||||
|
||||
set {
|
||||
name = "loki.schemaConfig.configs[0].schema"
|
||||
value = "v13"
|
||||
}
|
||||
|
||||
set {
|
||||
name = "loki.schemaConfig.configs[0].index.prefix"
|
||||
value = "index_"
|
||||
}
|
||||
|
||||
set {
|
||||
name = "loki.schemaConfig.configs[0].index.period"
|
||||
value = "24h"
|
||||
}
|
||||
|
||||
set {
|
||||
name = "deploymentMode"
|
||||
value = "SingleBinary"
|
||||
}
|
||||
|
||||
set {
|
||||
name = "read.replicas"
|
||||
value = "1"
|
||||
value = "0"
|
||||
}
|
||||
|
||||
set {
|
||||
name = "backend.replicas"
|
||||
value = "1"
|
||||
value = "0"
|
||||
}
|
||||
|
||||
set {
|
||||
name = "write.replicas"
|
||||
value = "2"
|
||||
value = "0"
|
||||
}
|
||||
|
||||
set {
|
||||
name = "write.tolerations[0].key"
|
||||
value = "node-role.kubernetes.io/storage"
|
||||
name = "singleBinary.replicas"
|
||||
value = "1"
|
||||
}
|
||||
|
||||
set {
|
||||
name = "write.tolerations[0].effect"
|
||||
name = "singleBinary.tolerations[0].key"
|
||||
value = "node-role.kubernetes.io/monitor"
|
||||
}
|
||||
|
||||
set {
|
||||
name = "singleBinary.tolerations[0].effect"
|
||||
value = "NoSchedule"
|
||||
}
|
||||
|
||||
set {
|
||||
name = "write.nodeSelector.node-role\\.kubernetes\\.io/storage"
|
||||
type = "string"
|
||||
value = "true"
|
||||
name = "singleBinary.nodeSelector.node\\.kubernetes\\.io/server-usage"
|
||||
value = "monitor"
|
||||
}
|
||||
|
||||
set {
|
||||
@ -661,6 +705,21 @@ resource "helm_release" "loki" {
|
||||
name = "test.enabled"
|
||||
value = "false"
|
||||
}
|
||||
|
||||
set {
|
||||
name = "chunksCache.enabled"
|
||||
value = "false"
|
||||
}
|
||||
|
||||
set {
|
||||
name = "resultsCache.enabled"
|
||||
value = "false"
|
||||
}
|
||||
|
||||
set {
|
||||
name = "lokiCanary.enabled"
|
||||
value = "false"
|
||||
}
|
||||
}
|
||||
```
|
||||
|
||||
@ -677,7 +736,7 @@ Okay so Loki is running but not fed, for that we'll deploy [Promtail](https://gr
|
||||
```tf
|
||||
resource "helm_release" "promtail" {
|
||||
chart = "promtail"
|
||||
version = "6.15.0"
|
||||
version = "6.15.5"
|
||||
repository = "https://grafana.github.io/helm-charts"
|
||||
|
||||
name = "promtail"
|
||||
|
@ -73,7 +73,7 @@ resource "kubernetes_namespace_v1" "gitea" {
|
||||
|
||||
resource "helm_release" "gitea" {
|
||||
chart = "gitea"
|
||||
version = "9.2.0"
|
||||
version = "10.1.4"
|
||||
repository = "https://dl.gitea.io/charts"
|
||||
|
||||
name = "gitea"
|
||||
@ -359,7 +359,7 @@ resource "helm_release" "traefik" {
|
||||
}
|
||||
|
||||
set {
|
||||
name = "ports.ssh.expose"
|
||||
name = "ports.ssh.expose.default"
|
||||
value = "true"
|
||||
}
|
||||
|
||||
@ -510,7 +510,7 @@ resource "kubernetes_namespace_v1" "concourse" {
|
||||
|
||||
resource "helm_release" "concourse" {
|
||||
chart = "concourse"
|
||||
version = "17.2.0"
|
||||
version = "17.3.1"
|
||||
repository = "https://concourse-charts.storage.googleapis.com"
|
||||
|
||||
name = "concourse"
|
||||
|
@ -432,7 +432,7 @@ Let's define the image update automation task for main Flux repository:
|
||||
{{< highlight host="demo-kube-flux" file="clusters/demo/flux-add-ons/image-update-automation.yaml" >}}
|
||||
|
||||
```yaml
|
||||
apiVersion: image.toolkit.fluxcd.io/v1beta1
|
||||
apiVersion: image.toolkit.fluxcd.io/v1beta2
|
||||
kind: ImageUpdateAutomation
|
||||
metadata:
|
||||
name: flux-system
|
||||
@ -465,7 +465,7 @@ Now we need to tell Image Reflector how to scan the repository, as well as the a
|
||||
{{< highlight host="demo-kube-flux" file="clusters/demo/kuberocks/images-demo.yaml" >}}
|
||||
|
||||
```yaml
|
||||
apiVersion: image.toolkit.fluxcd.io/v1beta1
|
||||
apiVersion: image.toolkit.fluxcd.io/v1beta2
|
||||
kind: ImageRepository
|
||||
metadata:
|
||||
name: demo
|
||||
@ -476,7 +476,7 @@ spec:
|
||||
secretRef:
|
||||
name: dockerconfigjson
|
||||
---
|
||||
apiVersion: image.toolkit.fluxcd.io/v1beta1
|
||||
apiVersion: image.toolkit.fluxcd.io/v1beta2
|
||||
kind: ImagePolicy
|
||||
metadata:
|
||||
name: demo
|
||||
|
@ -273,7 +273,7 @@ resource "kubernetes_namespace_v1" "tracing" {
|
||||
|
||||
resource "helm_release" "tempo" {
|
||||
chart = "tempo"
|
||||
version = "1.5.1"
|
||||
version = "1.7.2"
|
||||
repository = "https://grafana.github.io/helm-charts"
|
||||
|
||||
name = "tempo"
|
||||
|
@ -24,12 +24,12 @@ Create dedicated database for SonarQube same as usual, then we can use flux for
|
||||
{{< highlight host="demo-kube-flux" file="clusters/demo/sonarqube/deploy-sonarqube.yaml" >}}
|
||||
|
||||
```yaml
|
||||
apiVersion: apps/v1
|
||||
apiVersion: v1
|
||||
kind: Namespace
|
||||
metadata:
|
||||
name: sonarqube
|
||||
---
|
||||
apiVersion: source.toolkit.fluxcd.io/v1beta2
|
||||
apiVersion: source.toolkit.fluxcd.io/v1
|
||||
kind: HelmRepository
|
||||
metadata:
|
||||
name: sonarqube
|
||||
@ -38,7 +38,7 @@ spec:
|
||||
interval: 1h0m0s
|
||||
url: https://SonarSource.github.io/helm-chart-sonarqube
|
||||
---
|
||||
apiVersion: helm.toolkit.fluxcd.io/v2beta1
|
||||
apiVersion: helm.toolkit.fluxcd.io/v2
|
||||
kind: HelmRelease
|
||||
metadata:
|
||||
name: sonarqube
|
||||
|
@ -880,7 +880,7 @@ After push all CI should build correctly. Then the image policy for auto update:
|
||||
{{< highlight host="demo-kube-flux" file="clusters/demo/kuberocks/images-demo-ui.yaml" >}}
|
||||
|
||||
```yml
|
||||
apiVersion: image.toolkit.fluxcd.io/v1beta1
|
||||
apiVersion: image.toolkit.fluxcd.io/v1beta2
|
||||
kind: ImageRepository
|
||||
metadata:
|
||||
name: demo-ui
|
||||
@ -891,7 +891,7 @@ spec:
|
||||
secretRef:
|
||||
name: dockerconfigjson
|
||||
---
|
||||
apiVersion: image.toolkit.fluxcd.io/v1beta1
|
||||
apiVersion: image.toolkit.fluxcd.io/v1beta2
|
||||
kind: ImagePolicy
|
||||
metadata:
|
||||
name: demo-ui
|
||||
@ -931,7 +931,7 @@ spec:
|
||||
- name: dockerconfigjson
|
||||
containers:
|
||||
- name: front
|
||||
image: gitea.okami101.io/kuberocks/demo-ui:latest # {"$imagepolicy": "flux-system:image-demo-ui"}
|
||||
image: gitea.kube.rocks/kuberocks/demo-ui:latest # {"$imagepolicy": "flux-system:image-demo-ui"}
|
||||
ports:
|
||||
- containerPort: 80
|
||||
---
|
||||
|
File diff suppressed because it is too large
Load Diff
@ -4,8 +4,64 @@ description: "Some boring stuf."
|
||||
layout: "simple"
|
||||
---
|
||||
|
||||
## What We Collect and Receive
|
||||
## Introduction
|
||||
|
||||
In order for us to provide you the best possible experience on our websites, we need to collect and process certain information. Depending on your use of the Services, that may include:
|
||||
Welcome to **Okami101**. We are committed to protecting your privacy. This Privacy Policy explains how we handle any personal data that may be collected when you visit our blog site. While we do not collect user information for tracking or marketing purposes, we use certain third-party services to ensure the security, functionality, and analytics of our site. This policy outlines our approach to privacy and how we ensure compliance with the General Data Protection Regulation (GDPR).
|
||||
|
||||
* **Usage data** — when you visit our site, we will store: the website from which you visited us from, the parts of our site you visit, the date and duration of your visit, your anonymized IP address, information from the device (device type, operating system, screen resolution, language, country you are located in, and web browser type) you used during your visit, and more. We process this usage data in Umami for statistical purposes, to improve our site and to recognize and stop any misuse.
|
||||
## Data Collection
|
||||
|
||||
### Personal Data
|
||||
|
||||
We do not collect, store, or process any personal data from our users for marketing or tracking purposes. However, we do process user IP addresses strictly for security purposes and use anonymized analytics data.
|
||||
|
||||
### IP Addresses
|
||||
|
||||
We use IP addresses solely for the purpose of preventing attacks and ensuring the security of our site. This is done through CrowdSec, a participative security solution that offers crowdsourced protection against malicious IPs. Your IP address may be processed to identify and mitigate potential security threats.
|
||||
|
||||
### Cookies
|
||||
|
||||
Our blog does not use cookies to track or identify visitors for our purposes. However, Cloudflare may use cookies to deliver its services effectively. These cookies are essential for security purposes and to improve site performance. Additionally, Umami, our analytics provider, does not use cookies and ensures user privacy.
|
||||
|
||||
### Log Files
|
||||
|
||||
We do not maintain log files of visitors to our site. However, Cloudflare and CrowdSec may collect log data for security and operational purposes, including IP addresses, browser types, and other technical information.
|
||||
|
||||
## Third-Party Services
|
||||
|
||||
### Cloudflare
|
||||
|
||||
We use Cloudflare for web security and performance optimization. Cloudflare may collect and process certain data as part of its service. This data processing is governed by Cloudflare's Privacy Policy, which can be found [here](https://www.cloudflare.com/privacypolicy/).
|
||||
|
||||
### Crowdsec
|
||||
|
||||
We use CrowdSec to enhance our site's security by protecting against malicious IP addresses. CrowdSec processes IP addresses to identify and mitigate security threats. The data handling practices of CrowdSec are governed by CrowdSec's Privacy Policy, which can be found [here](https://crowdsec.net/privacy-policy).
|
||||
|
||||
### Umami
|
||||
|
||||
We use Umami, a fully GDPR-compliant Google Analytics alternative, to gather anonymized analytics data about our site's usage. Umami does not use cookies or collect personally identifiable information. The data collected by Umami helps us understand site traffic and usage patterns without compromising user privacy. For more information, you can refer to Umami's privacy policy [here](https://umami.is/docs/).
|
||||
|
||||
### giscus
|
||||
|
||||
We use giscus, a GitHub-based commenting system, to manage comments on our blog posts. When you post a comment using giscus, you are interacting with GitHub's platform. This means your comment data, including your GitHub username and any other information you choose to share, is processed by GitHub. The data handling practices for giscus are governed by GitHub's Privacy Policy, which can be found [here](https://docs.github.com/en/site-policy/privacy-policies/github-privacy-statement).
|
||||
|
||||
## Third-Party Links
|
||||
|
||||
Our blog may contain links to other websites. Please be aware that we are not responsible for the privacy practices of other sites. We encourage you to read the privacy statements of each website that collects personal information.
|
||||
|
||||
## Data Protection Rights
|
||||
|
||||
Since we only process personal data (IP addresses) for security purposes and use anonymized analytics, your data protection rights are limited in this context. However, for any concerns or questions about data processed by Cloudflare, Crowdsec, giscus (GitHub), or Umami, please refer to their respective privacy policies.
|
||||
|
||||
## Contact Us
|
||||
|
||||
If you have any questions or concerns about our privacy practices or this policy, please contact us at <adrien@okami101.io>.
|
||||
|
||||
## Changes to This Privacy Policy
|
||||
|
||||
We may update our Privacy Policy from time to time. Any changes will be posted on this page with an updated effective date. We encourage you to review this policy periodically for any changes.
|
||||
|
||||
Effective Date: **19/05/2024**
|
||||
|
||||
---
|
||||
|
||||
By using our blog, you agree to the terms of this Privacy Policy. Thank you for visiting **Okami101**!
|
||||
|
@ -6,13 +6,13 @@
|
||||
apps:
|
||||
- name: vue-ts
|
||||
title: Vue 3 TS Realworld
|
||||
repo: https://gitea.okami101.io/adr1enbe4udou1n/vue-ts-realworld-example-app
|
||||
repo: adr1enbe4udou1n/vue-ts-realworld-example-app
|
||||
ci: conduit-vue-ts
|
||||
demo: https://vuetsrealworld.okami101.io
|
||||
color: green
|
||||
- name: react-ts
|
||||
title: React TS Realworld
|
||||
repo: https://gitea.okami101.io/adr1enbe4udou1n/react-ts-realworld-example-app
|
||||
repo: adr1enbe4udou1n/react-ts-realworld-example-app
|
||||
ci: conduit-react-ts
|
||||
demo: https://reacttsrealworld.okami101.io
|
||||
color: blue
|
||||
@ -22,41 +22,41 @@
|
||||
apps:
|
||||
- name: aspnet-core
|
||||
title: ASP.NET Core Realworld
|
||||
repo: https://gitea.okami101.io/adr1enbe4udou1n/aspnetcore-realworld-example-app
|
||||
repo: adr1enbe4udou1n/aspnetcore-realworld-example-app
|
||||
ci: conduit-aspnet-core
|
||||
demo: https://aspnetcorerealworld.okami101.io/api
|
||||
|
||||
- name: spring-boot
|
||||
title: Spring Boot Realworld
|
||||
repo: https://gitea.okami101.io/adr1enbe4udou1n/spring-boot-realworld-example-app
|
||||
repo: adr1enbe4udou1n/spring-boot-realworld-example-app
|
||||
ci: conduit-spring-boot
|
||||
demo: https://springbootrealworld.okami101.io/api
|
||||
color: green
|
||||
|
||||
- name: symfony
|
||||
title: Symfony Realworld
|
||||
repo: https://gitea.okami101.io/adr1enbe4udou1n/symfony-realworld-example-app
|
||||
repo: adr1enbe4udou1n/symfony-realworld-example-app
|
||||
ci: conduit-symfony
|
||||
demo: https://symfonyrealworld.okami101.io/api
|
||||
color: black
|
||||
|
||||
- name: laravel
|
||||
title: Laravel Realworld
|
||||
repo: https://gitea.okami101.io/adr1enbe4udou1n/laravel-realworld-example-app
|
||||
repo: adr1enbe4udou1n/laravel-realworld-example-app
|
||||
ci: conduit-laravel
|
||||
demo: https://laravelrealworld.okami101.io/api
|
||||
color: orange
|
||||
|
||||
- name: nestjs
|
||||
title: NestJS Realworld
|
||||
repo: https://gitea.okami101.io/adr1enbe4udou1n/nestjs-realworld-example-app
|
||||
repo: adr1enbe4udou1n/nestjs-realworld-example-app
|
||||
ci: conduit-nestjs
|
||||
demo: https://nestjsrealworld.okami101.io/api
|
||||
color: red
|
||||
|
||||
- name: fastapi
|
||||
title: FastAPI Realworld
|
||||
repo: https://gitea.okami101.io/adr1enbe4udou1n/fastapi-realworld-example-app
|
||||
repo: adr1enbe4udou1n/fastapi-realworld-example-app
|
||||
ci: conduit-fastapi
|
||||
demo: https://fastapirealworld.okami101.io/api
|
||||
color: teal
|
||||
@ -69,12 +69,12 @@
|
||||
- name: vuetify-admin
|
||||
title: Vuetify Admin
|
||||
date: 11/2020
|
||||
repo: https://github.com/okami101/vuetify-admin
|
||||
repo: okami101/vuetify-admin
|
||||
demo: https://va-demo.okami101.io/
|
||||
docs: https://www.okami101.io/vuetify-admin
|
||||
|
||||
- name: laravel-rad-stack
|
||||
title: Laravel RAD Stack
|
||||
date: 10/2021
|
||||
repo: https://github.com/adr1enbe4udou1n/laravel-rad-stack
|
||||
repo: adr1enbe4udou1n/laravel-rad-stack
|
||||
demo: https://laravel-rad-stack.okami101.io/
|
||||
|
@ -76,9 +76,8 @@
|
||||
<div class="flex items-center gap-4">
|
||||
<img src="/kube.png" width="30" height="30" alt="Kubernetes"
|
||||
title="Run on K3s over Hetzner Cloud" />
|
||||
|
||||
<a href="https://concourse.okami101.io/teams/main/pipelines/okami-blog" target="_blank">
|
||||
<img src="https://concourse.okami101.io/api/v1/teams/main/pipelines/okami-blog/badge" alt="Blog build status" />
|
||||
<img src="https://concourse.okami101.io/api/v1/teams/main/pipelines/okami-blog/badge" alt="build" />
|
||||
</a>
|
||||
</div>
|
||||
<div class="hidden lg:block">
|
||||
|
@ -1,6 +1,6 @@
|
||||
<div class="chart">
|
||||
{{ $id := delimit (shuffle (seq 1 9)) "" }}
|
||||
<canvas id="{{ $id }}"></canvas>
|
||||
<canvas id="{{ $id }}" height="350"></canvas>
|
||||
<script type="text/javascript">
|
||||
window.addEventListener("DOMContentLoaded", (event) => {
|
||||
const ctx = document.getElementById("{{ $id }}");
|
||||
@ -8,6 +8,7 @@
|
||||
{{ if eq (.Get "type") "timeseries" }}
|
||||
type: 'line',
|
||||
options: {
|
||||
maintainAspectRatio: false,
|
||||
plugins: {
|
||||
title: {
|
||||
display: true,
|
||||
@ -23,13 +24,15 @@
|
||||
},
|
||||
}
|
||||
},
|
||||
{{ if .Get "max" }}
|
||||
y: {
|
||||
stacked: {{ .Get "stacked" }},
|
||||
{{ if .Get "stacked" }}
|
||||
stacked: {{ .Get "stacked" }},
|
||||
{{ end }}
|
||||
beginAtZero: true,
|
||||
{{ if .Get "max" }}
|
||||
suggestedMax: {{ .Get "max" }},
|
||||
{{ end }}
|
||||
}
|
||||
{{ end }}
|
||||
},
|
||||
},
|
||||
data: {
|
||||
|
@ -49,11 +49,7 @@
|
||||
{{ readFile (print "data/works/" .name ".md") | markdownify }}
|
||||
</div>
|
||||
<div class="flex justify-center gap-4">
|
||||
{{ partial "button.html" (dict "text" (partial "icon.html" "github") "href" .repo "color" .color) }}
|
||||
{{ if .ci }}
|
||||
{{ partial "button.html" (dict "text" (partial "icon.html" "bug") "href" (print
|
||||
"https://concourse.okami101.io/teams/main/pipelines/" .ci) "color" .color) }}
|
||||
{{ end }}
|
||||
{{ partial "button.html" (dict "text" (partial "icon.html" "github") "href" (print "https://github.com/" .repo) "color" .color) }}
|
||||
{{ if .demo }}
|
||||
{{ partial "button.html" (dict "text" "Demo" "href" .demo "color" .color) }}
|
||||
{{ end }}
|
||||
|
16
nginx/default.conf
Normal file
16
nginx/default.conf
Normal file
@ -0,0 +1,16 @@
|
||||
server {
|
||||
listen 80;
|
||||
listen [::]:80;
|
||||
server_name localhost;
|
||||
|
||||
location / {
|
||||
root /usr/share/nginx/html;
|
||||
index index.html index.htm;
|
||||
|
||||
if ($new_uri != "") {
|
||||
rewrite ^(.*)$ $new_uri permanent;
|
||||
}
|
||||
}
|
||||
|
||||
error_page 404 /404.html;
|
||||
}
|
3
nginx/redirects.conf
Normal file
3
nginx/redirects.conf
Normal file
@ -0,0 +1,3 @@
|
||||
map $request_uri $new_uri {
|
||||
/2023/12/a-2024-benchmark-of-main-web-apis-frameworks/ /2023/12/a-2024-benchmark-of-main-web-api-frameworks/;
|
||||
}
|
Submodule themes/congo updated: 110bc3414f...5c8ab32b5b
Reference in New Issue
Block a user