Remove docs (#1415)

* remove docsy as submodule

Signed-off-by: Mauro Morales <mauro.morales@spectrocloud.com>

* remove docs

Signed-off-by: Mauro Morales <mauro.morales@spectrocloud.com>

---------

Signed-off-by: Mauro Morales <mauro.morales@spectrocloud.com>
This commit is contained in:
Mauro Morales 2023-05-17 09:58:30 +02:00 committed by GitHub
parent 38da073118
commit fdea673d54
No known key found for this signature in database
GPG Key ID: 4AEE18F83AFDEB23
136 changed files with 0 additions and 15880 deletions

View File

@ -7,7 +7,6 @@ on:
pull_request:
paths:
- '**'
- '!docs/**'
concurrency:
group: ci-arm-${{ github.head_ref || github.ref }}-${{ github.repository }}

View File

@ -5,11 +5,9 @@ on:
- master
paths:
- '**'
- '!docs/**'
pull_request:
paths:
- '**'
- '!docs/**'
workflow_dispatch:
inputs:
immucore_dev:

View File

@ -3,12 +3,9 @@ on:
push:
branches:
- master
paths:
- '!docs/**'
pull_request:
paths:
- '**'
- '!docs/**'
env:
FORCE_COLOR: 1
jobs:

View File

@ -1,42 +0,0 @@
name: Cloud Config Schema
on:
# To test with push events, it's easy to use a test branch e.g. schematest, you will also need to update the checkout ref and the create PR base
# push:
# branches:
# - schematest
push:
tags:
- v*
jobs:
generate-schema:
runs-on: ubuntu-latest
steps:
- name: Checkout code
uses: actions/checkout@v3
with:
ref: master
fetch-depth: 0
- name: setup-docker
uses: docker-practice/actions-setup-docker@master
- name: Install earthly
uses: Luet-lab/luet-install-action@v1
with:
repository: quay.io/kairos/packages
packages: utils/earthly
- name: Generate cloud-config Schema
run: |
earthly +generate-schema
- name: Change ownership
# Chown files generated by earthly since they are owned by root
run: sudo chown -R runner:docker docs/static/*
- name: Create Pull Request
uses: peter-evans/create-pull-request@v5
with:
token: ${{ secrets.PAT_TOKEN }}
push-to-fork: ci-robbot/c3os
base: master
branch: cloud-config-schema-updates
commit-message: ':book: Update Schema'
title: ':book: Update Schema'
body: Update latest cloud config schema release
signoff: true

13
.gitignore vendored
View File

@ -4,22 +4,9 @@
/cli/cli
/kairos
/dist
/docs/bin
/docs/.hugo_build.lock
/build
docs/node_modules
docs/public
!docs/public/favicon
!docs/public/CNAME
docs/dist
docs/.vscode
coverage.out
.DS_Store
/docs/bin/
/docs/public/
/docs/resources/
/docs/node_modules/
/docs/tech-doc-hugo
internal/webui/public/cypress/videos/
node_modules/

7
.gitmodules vendored
View File

@ -1,7 +0,0 @@
[submodule "themes/docsy"]
path = themes/docsy
url = https://github.com/google/docsy
[submodule "docs/themes/docsy"]
path = docs/themes/docsy
url = https://github.com/google/docsy.git

View File

@ -1 +0,0 @@
kairos.io

View File

@ -1,18 +0,0 @@
export HUGO_VERSION?=0.105.0
export HUGO_PLATFORM?=Linux-64bit
export ROOT_DIR:=$(shell dirname $(realpath $(lastword $(MAKEFILE_LIST))))
.DEFAULT_GOAL := build
.PHONY: build
build:
scripts/build.sh
.PHONY: serve
serve:
scripts/serve.sh
.PHONY: publish
publish:
scripts/publish.sh

View File

@ -1,35 +0,0 @@
# :book: Kairos documentation
The Kairos documentation uses [docsy](https://docsy.dev).
## Prerequisites
The following software is needed to preview the documentation changes locally.
* Hugo [v0.105.0+](https://gohugo.io/installation/)
* nodeJs [v16+](https://nodejs.org/en/download/)
## Test your changes
After cloning the repo (with submodules), just run `make serve` to test the website locally.
```
$> git clone --recurse-submodule https://github.com/kairos-io/kairos
$> cd kairos/docs
$> npm run prepare
$> make serve
```
If you have a local copy already checked out, sync the submodules:
```
$> git submodule update --init --recursive --depth 1
```
To run the website locally in other platforms, e.g. MacOS:
```
$> HUGO_PLATFORM=macOS-64bit make serve
```
**Note**: If the `make serve` command does not work for you, try to start hugo directly with the command `hugo server -D`.

View File

@ -1,21 +0,0 @@
<svg width="347" height="125" viewBox="0 0 347 125" fill="none" xmlns="http://www.w3.org/2000/svg">
<rect width="24.8171" height="24.8171" transform="matrix(-0.86601 0.500028 -3.18907e-05 1 64.6367 12.8008)" fill="#AEAEAE"/>
<rect width="24.8171" height="24.8171" transform="matrix(-0.86601 -0.500028 3.18907e-05 1 43.1436 25.2109)" fill="#D6D6D6"/>
<rect width="24.8171" height="24.8171" transform="matrix(-0.866041 0.499972 0.866041 0.499972 43.1436 0.394531)" fill="white"/>
<rect width="24.8171" height="24.8171" transform="matrix(3.19859e-05 1 0.86601 0.500028 21.6487 37.6211)" fill="#D6D6D6"/>
<rect width="24.8171" height="24.8171" transform="matrix(-0.866041 0.499973 0.866041 0.499972 21.6487 62.4336)" fill="white"/>
<rect width="24.8171" height="24.8171" transform="matrix(-3.17834e-05 1 0.866009 -0.500028 0.157593 50.0312)" fill="#AEAEAE"/>
<rect width="24.8171" height="24.8171" transform="matrix(-0.86601 0.500028 -3.19781e-05 1 64.6389 87.2539)" fill="#AEAEAE"/>
<rect width="24.8171" height="24.8171" transform="matrix(-0.866009 -0.500028 3.18033e-05 1 43.1458 99.6641)" fill="#D6D6D6"/>
<rect width="24.8171" height="24.8171" transform="matrix(-0.866041 0.499972 0.866041 0.499973 43.1458 74.8477)" fill="white"/>
<rect width="24.8171" height="24.8171" transform="matrix(0.86601 -0.500028 3.18907e-05 -1 64.6334 112.082)" fill="#AEAEAE"/>
<rect width="24.8171" height="24.8171" transform="matrix(0.86601 0.500028 -3.18907e-05 -1 86.1266 99.6719)" fill="#D6D6D6"/>
<rect width="24.8171" height="24.8171" transform="matrix(0.866041 -0.499972 -0.866041 -0.499972 86.1266 124.488)" fill="white"/>
<rect width="24.8171" height="24.8171" transform="matrix(-0.86601 0.500028 -3.19781e-05 1 129.114 50.0273)" fill="#AEAEAE"/>
<rect width="24.8171" height="24.8171" transform="matrix(-0.866009 -0.500028 3.18033e-05 1 107.62 62.4375)" fill="#D6D6D6"/>
<rect width="24.8171" height="24.8171" transform="matrix(-0.866041 0.499972 0.866041 0.499973 107.62 37.6211)" fill="white"/>
<rect width="24.8171" height="24.8171" transform="matrix(0.86601 -0.500028 3.18907e-05 -1 64.635 37.6289)" fill="#AEAEAE"/>
<rect width="24.8171" height="24.8171" transform="matrix(0.86601 0.500028 -3.18907e-05 -1 86.1282 25.2188)" fill="#D6D6D6"/>
<rect width="24.8171" height="24.8171" transform="matrix(0.866041 -0.499972 -0.866041 -0.499972 86.1282 50.0352)" fill="white"/>
<path d="M189.374 82.9414L174.851 65.159V82.9414H166.846V43.031H174.851V60.9278L189.374 43.031H199.037L182.57 62.8147L199.495 82.9414H189.374ZM202.702 66.9887C202.702 63.7867 203.331 60.9468 204.589 58.4691C205.885 55.9914 207.619 54.0855 209.792 52.7513C212.003 51.4171 214.461 50.7501 217.168 50.7501C219.531 50.7501 221.59 51.2265 223.343 52.1795C225.135 53.1325 226.564 54.3332 227.631 55.7817V51.2647H235.694V82.9414H227.631V78.31C226.602 79.7966 225.173 81.0355 223.343 82.0266C221.551 82.9795 219.474 83.456 217.111 83.456C214.442 83.456 212.003 82.7699 209.792 81.3976C207.619 80.0253 205.885 78.1003 204.589 75.6226C203.331 73.1068 202.702 70.2288 202.702 66.9887ZM227.631 67.103C227.631 65.159 227.25 63.5008 226.488 62.1285C225.725 60.7181 224.696 59.6508 223.4 58.9265C222.104 58.1642 220.713 57.783 219.226 57.783C217.74 57.783 216.367 58.1451 215.109 58.8694C213.851 59.5936 212.822 60.661 212.022 62.0714C211.259 63.4436 210.878 65.0827 210.878 66.9887C210.878 68.8946 211.259 70.5718 212.022 72.0204C212.822 73.4308 213.851 74.5171 215.109 75.2795C216.405 76.0419 217.778 76.4231 219.226 76.4231C220.713 76.4231 222.104 76.061 223.4 75.3367C224.696 74.5743 225.725 73.507 226.488 72.1347C227.25 70.7243 227.631 69.0471 227.631 67.103ZM247.571 47.4909C246.161 47.4909 244.979 47.0525 244.026 46.1758C243.112 45.261 242.654 44.1364 242.654 42.8023C242.654 41.4681 243.112 40.3627 244.026 39.4859C244.979 38.5711 246.161 38.1137 247.571 38.1137C248.982 38.1137 250.144 38.5711 251.059 39.4859C252.012 40.3627 252.489 41.4681 252.489 42.8023C252.489 44.1364 252.012 45.261 251.059 46.1758C250.144 47.0525 248.982 47.4909 247.571 47.4909ZM251.517 51.2647V82.9414H243.512V51.2647H251.517ZM267.431 56.182C268.46 54.5048 269.794 53.1897 271.433 52.2367C273.11 51.2837 275.016 50.8072 277.151 50.8072V59.2124H275.035C272.519 59.2124 270.614 59.8033 269.317 60.985C268.06 62.1666 267.431 64.2251 267.431 67.1602V82.9414H259.426V51.2647H267.431V56.182ZM296.608 83.456C293.559 83.456 290.814 82.7889 288.375 81.4548C285.935 80.0825 284.01 78.1575 282.6 75.6798C281.228 73.202 280.541 70.3431 280.541 67.103C280.541 63.8629 281.247 61.004 282.657 58.5263C284.106 56.0486 286.069 54.1426 288.546 52.8085C291.024 51.4362 293.788 50.7501 296.837 50.7501C299.887 50.7501 302.65 51.4362 305.128 52.8085C307.606 54.1426 309.55 56.0486 310.96 58.5263C312.409 61.004 313.133 63.8629 313.133 67.103C313.133 70.3431 312.39 73.202 310.903 75.6798C309.455 78.1575 307.472 80.0825 304.957 81.4548C302.479 82.7889 299.696 83.456 296.608 83.456ZM296.608 76.4803C298.057 76.4803 299.41 76.1372 300.668 75.4511C301.964 74.7268 302.993 73.6595 303.756 72.2491C304.518 70.8387 304.899 69.1233 304.899 67.103C304.899 64.0916 304.099 61.7855 302.498 60.1845C300.935 58.5454 299.01 57.7258 296.723 57.7258C294.436 57.7258 292.511 58.5454 290.948 60.1845C289.423 61.7855 288.661 64.0916 288.661 67.103C288.661 70.1144 289.404 72.4397 290.891 74.0788C292.415 75.6798 294.321 76.4803 296.608 76.4803ZM331.127 83.456C328.535 83.456 326.209 82.9986 324.151 82.0837C322.093 81.1308 320.453 79.8538 319.234 78.2528C318.052 76.6518 317.404 74.8793 317.29 72.9352H325.352C325.504 74.155 326.095 75.1652 327.124 75.9657C328.192 76.7662 329.507 77.1664 331.07 77.1664C332.594 77.1664 333.776 76.8615 334.615 76.2516C335.491 75.6417 335.93 74.8602 335.93 73.9072C335.93 72.878 335.396 72.1157 334.329 71.6201C333.299 71.0865 331.641 70.5147 329.354 69.9048C326.991 69.333 325.047 68.7421 323.522 68.1322C322.035 67.5223 320.739 66.5884 319.634 65.3305C318.567 64.0726 318.033 62.3763 318.033 60.2416C318.033 58.4882 318.528 56.8872 319.52 55.4387C320.549 53.9902 321.997 52.8466 323.865 52.008C325.771 51.1694 328.001 50.7501 330.555 50.7501C334.329 50.7501 337.34 51.703 339.589 53.609C341.838 55.4768 343.077 58.0117 343.306 61.2137H335.644C335.529 59.9558 334.996 58.9647 334.043 58.2404C333.128 57.478 331.889 57.0968 330.326 57.0968C328.878 57.0968 327.753 57.3637 326.953 57.8973C326.19 58.431 325.809 59.1743 325.809 60.1273C325.809 61.1946 326.343 62.0142 327.41 62.586C328.477 63.1196 330.136 63.6723 332.385 64.2441C334.672 64.8159 336.559 65.4067 338.045 66.0166C339.532 66.6266 340.809 67.5795 341.876 68.8756C342.982 70.1335 343.553 71.8107 343.592 73.9072C343.592 75.7369 343.077 77.3761 342.048 78.8246C341.057 80.2731 339.608 81.4167 337.702 82.2553C335.834 83.0558 333.643 83.456 331.127 83.456Z" fill="white"/>
</svg>

Before

Width:  |  Height:  |  Size: 6.6 KiB

View File

@ -1,376 +0,0 @@
.wrapper:where(.astro-7XAARZHW) {
display: flex;
align-items: center;
justify-content: center;
}
body {
margin: 0;
}
:where(.astro-7XAARZHW) {
font-family: Noto Sans;
box-sizing: border-box;
}
.hero:where(.astro-7XAARZHW) {
position: relative;
overflow: hidden;
}
@media screen and (min-width: 1024px) {
.hero:where(.astro-7XAARZHW) {
height: 45vw;
}
}
.hero:where(.astro-7XAARZHW) {
display: block;
width: 100%;
height: auto;
}
.hero-logo:where(.astro-7XAARZHW) {
margin-bottom: 58px;
}
@media screen and (min-width: 1024px) {
.hero-logo:where(.astro-7XAARZHW) {
margin-right: 260px;
margin-bottom: 0;
}
}
.overlay:where(.astro-7XAARZHW) {
width: 100%;
height: 100%;
min-height: 708px;
display: flex;
flex-direction: row;
align-items: center;
justify-content: center;
padding: 0 24px;
background-color: #03153a;
}
.constraint:where(.astro-7XAARZHW) {
width: 100%;
max-width: 1300px;
display: flex;
flex-direction: row;
align-items: center;
justify-content: center;
}
.constraint:where(.astro-7XAARZHW).vertical {
flex-direction: column;
}
.constraint:where(.astro-7XAARZHW).vertical>.header:where(.astro-7XAARZHW) {
display: flex;
flex-direction: column;
align-items: center;
width: 100%;
max-width: 1200px;
margin-top: 60px;
}
@media screen and (min-width: 1024px) {
.constraint:where(.astro-7XAARZHW).vertical>.header:where(.astro-7XAARZHW) {
flex-direction: row;
margin-top: 0;
}
}
.title:where(.astro-7XAARZHW) {
font-weight: 700;
font-size: 58px;
line-height: 80px;
letter-spacing: .02em;
color: #fff;
margin-top: 0;
}
.subtitle:where(.astro-7XAARZHW) {
font-weight: 500;
font-size: 31px;
line-height: 130%;
letter-spacing: .01em;
color: #fff;
}
.title:where(.astro-7XAARZHW)>span:where(.astro-7XAARZHW) {
color: #e06948;
}
.grid:where(.astro-7XAARZHW) {
display: grid;
grid-gap: 2rem;
}
.sectionTitle:where(.astro-7XAARZHW) {
font-weight: 700;
font-size: var(--f-u8);
margin-top: 4rem;
margin-bottom: 2rem;
}
.roles:where(.astro-7XAARZHW) {
display: flex;
flex-wrap: wrap;
gap: .5em;
font-size: var(--f-d1);
}
.role:where(.astro-7XAARZHW) {
position: relative;
display: inline-block;
white-space: nowrap;
font-weight: 900;
color: var(--t-bg);
background-color: var(--t-fg);
padding: .25em .5em;
z-index: 2;
}
.invert:where(.astro-7XAARZHW) {
position: absolute;
color: var(--t-fg);
display: flex;
align-items: center;
justify-content: center;
width: 100%;
height: 100%;
top: 0;
left: 0;
pointer-events: none;
clip-path: polygon(0% 100%, 100% 100%, 100% 200%, 0% 200%);
transition: clip-path cubic-bezier(.4, 0, .5, 1) .15s;
}
.desc:where(.astro-7XAARZHW) {
font-weight: 400;
font-size: 24px;
line-height: 170%;
color: #fff;
max-width: 748px;
margin: 64px 0;
max-width: 600px;
}
@media screen and (min-width: 1024px) {
.desc:where(.astro-7XAARZHW) {
max-width: 1199px;
font-size: 18px;
}
}
.desc:where(.astro-7XAARZHW) span:where(.astro-7XAARZHW) {
color: #ee5007;
font-weight: 700;
}
.bio:where(.astro-7XAARZHW) {
line-height: 2;
margin-bottom: 2rem;
}
.bio:where(.astro-7XAARZHW)>span:where(.astro-7XAARZHW):first-of-type {
line-height: 1;
margin-bottom: .5em;
display: block;
font-weight: 700;
font-size: var(--f-u4);
}
.orange-line:where(.astro-7XAARZHW) {
height: 16px;
width: 100%;
background: #ee5007;
}
.buttons:where(.astro-7XAARZHW) {
display: flex;
flex-direction: column;
align-items: center;
}
@media screen and (min-width: 420px) {
.buttons:where(.astro-7XAARZHW) {
flex-direction: row;
align-items: flex-start;
}
}
.buttons:where(.astro-7XAARZHW)>a:where(.astro-7XAARZHW) {
display: flex;
flex-direction: row;
justify-content: center;
align-items: center;
padding: 8px 16px;
width: 210px;
height: 50px;
font-weight: 600;
font-size: 22px;
line-height: 100%;
color: #fff;
text-decoration: none;
border-radius: 4px;
margin-top: 53px;
}
@media screen and (min-width: 420px) {
.buttons:where(.astro-7XAARZHW)>a:where(.astro-7XAARZHW) {
margin-right: 40px;
}
}
#quickstart:where(.astro-7XAARZHW) {
background: #ee5007;
box-shadow: 0 0 2px #0003, 0 1px 2px #0000001a, inset 0 -1px #0003;
border-radius: 4px;
}
#download:where(.astro-7XAARZHW) {
border: 1px solid #8facd4;
box-shadow: inset 0 -1px #0003;
filter: drop-shadow(0px 0px 2px rgba(0, 0, 0, .2)) drop-shadow(0px 1px 2px rgba(0, 0, 0, .1));
color: #8facd4;
}
footer:where(.astro-7XAARZHW) {
background: #e06948;
height: 250px;
display: flex;
flex-direction: row;
justify-content: center;
align-items: center;
}
.sections-title:where(.astro-7XAARZHW) {
font-weight: 600;
font-size: 32px;
line-height: 50px;
color: #0a2239;
margin-top: 123px;
text-align: center;
}
.section-wrapper {
display: flex;
flex-direction: column;
align-items: center;
justify-content: center;
}
.sections {
display: flex;
flex-direction: column;
align-items: center;
max-width: 1200px;
justify-content: center;
}
.sections[aria-current=horizontal] {
flex-direction: column;
width: 100%}
@media screen and (min-width: 1024px) {
.sections[aria-current=horizontal] {
flex-direction: row;
}
}
.main-section {
display: flex;
flex-direction: column;
align-items: center;
justify-content: center;
padding: 64px 48px;
box-sizing: border-box;
width: 100%}
.main-section>.sections {
padding-bottom: 0;
}
.main-section>.title {
font-weight: 600;
font-size: 42px;
line-height: 52px;
text-align: center;
color: #161b33;
margin: 43px 0;
}
.secondary-section {
display: flex;
flex-direction: column;
border-bottom: 1px solid rgba(0, 0, 0, .1);
padding: 32px 0;
}
@media screen and (min-width: 1024px) {
.secondary-section {
flex-direction: row;
}
}
.secondary-section img {
margin-top: 80px;
margin-bottom: 40px;
}
.secondary-section:first-child {
padding-top: 0;
}
.secondary-section:last-child {
border-bottom: 0;
padding-bottom: 0;
}
.secondary-section>.title {
min-width: 0px;
width: 100%;
display: flex;
flex-direction: column;
align-items: center;
justify-content: flex-start;
font-family: Noto Sans Mono;
font-weight: 700;
font-size: 24px;
line-height: 38px;
color: #161b33;
text-align: center;
margin-bottom: 32px;
}
@media screen and (min-width: 1024px) {
.secondary-section>.title {
min-width: 480px;
}
}
.sections[aria-current=horizontal] p {
text-align: center;
}
.sections[aria-current=horizontal]>.secondary-section {
flex-direction: column;
align-items: center;
border-bottom: 0;
padding: 0;
width: 100%;
margin: 0;
}
@media screen and (min-width: 1024px) {
.sections[aria-current=horizontal]>.secondary-section {
margin: 0 24px;
}
}
.sections[aria-current=horizontal]>.secondary-section .image-container {
height: 100px;
display: flex;
justify-content: flex-end;
}
.sections[aria-current=horizontal]>.secondary-section .image-container-removeHeight {
display: flex;
justify-content: flex-end;
}
.sections[aria-current=horizontal]>.secondary-section img {
margin: 0;
}
.sections[aria-current=horizontal]>.secondary-section:first-child {
margin-left: 0;
}
.sections[aria-current=horizontal]>.secondary-section:last-child {
margin-right: 0;
}
.sections[aria-current=horizontal]>.secondary-section>.title {
padding: 32px 12px;
flex-grow: 1;
width: 100%;
min-width: auto;
max-width: 340px;
}
.sections[aria-current=horizontal]>.secondary-section>.title>div {
margin: 32px 0;
}
.tertiary-section {
margin-bottom: 24px;
border-bottom: 1px solid rgba(0, 0, 0, .1);
width: 100%}
.tertiary-section:last-child {
border-bottom: 0px;
margin-bottom: 0;
}
.tertiary-section>.title {
font-family: Noto Sans Mono;
font-style: normal;
font-weight: 700;
font-size: 24px;
line-height: 33px;
color: #161b33;
margin-bottom: 16px;
}
.sections p {
font-family: Noto Sans;
font-weight: 400;
font-size: 16px;
line-height: 160%;
letter-spacing: .01em;
color: #344079;
margin-top: 0;
margin-bottom: 24px;
}

View File

@ -1,29 +0,0 @@
/*
Add styles or override variables from the theme here.
*/
$primary: #ee5007;
$secondary: #e1e5ee;
$enable-gradients: true;
$enable-rounded: false;
$enable-shadows: true;
$google_font_name: "Noto Sans";
$google_font_family: "Noto+Sans:0,300;0,400;0,500;0,600;0,700;1,300;1,400;1,500;1,600;1,700";
$td-enable-google-fonts: false;
a {
color: #009ffd
}
a:hover {
color: #03153a
}
// Disables text "Kairos" in the menu. Logo contains it already
.td-navbar .navbar-brand__name {
display: none;
}

View File

@ -1,172 +0,0 @@
baseURL = "https://kairos.io"
title = "Kairos"
enableRobotsTXT = true
# Hugo allows theme composition (and inheritance). The precedence is from left to right.
theme = ["docsy"]
# Will give values to .Lastmod etc.
enableGitInfo = true
# Language settings
contentDir = "content/en"
defaultContentLanguage = "en"
defaultContentLanguageInSubdir = false
# Useful when translating.
enableMissingTranslationPlaceholders = true
enableEmoji = true
disableKinds = ["taxonomy", "taxonomyTerm"]
# Highlighting config
pygmentsCodeFences = true
pygmentsUseClasses = false
# Use the new Chroma Go highlighter in Hugo.
pygmentsUseClassic = false
#pygmentsOptions = "linenos=table"
# See https://help.farbox.com/pygments.html
pygmentsStyle = "tango"
# Configure how URLs look like per section.
[permalinks]
blog = "/:section/:year/:month/:day/:slug/"
## Configuration for BlackFriday markdown parser: https://github.com/russross/blackfriday
[blackfriday]
plainIDAnchors = true
hrefTargetBlank = true
angledQuotes = false
latexDashes = true
# Image processing configuration.
[imaging]
resampleFilter = "CatmullRom"
quality = 75
anchor = "smart"
[services]
[services.googleAnalytics]
# Comment out the next line to disable GA tracking. Also disables the feature described in [params.ui.feedback].
id = "UA-00000000-0"
# Language configuration
[languages]
[languages.en]
title = "Kairos - The immutable Linux meta-distribution for edge Kubernetes"
description = "The immutable edge Kubernetes"
languageName ="English"
# Weight used for sorting.
weight = 1
[[menu.main]]
name = "Contribute"
weight = 50
url = "https://github.com/kairos-io/kairos/contribute"
pre = "<i class='fab fa-github'></i>"
post = ""
[[menu.main]]
name = "Contribution guidelines"
weight = 50
url = "https://github.com/kairos-io/kairos/blob/master/CONTRIBUTING.md"
pre = "<i class='fab fa-github'></i>"
post = ""
[markup]
[markup.goldmark]
[markup.goldmark.renderer]
unsafe = true
[markup.highlight]
# See a complete list of available styles at https://xyproto.github.io/splash/docs/all.html
style = "tango"
# Everything below this are Site Params
[params]
copyright = "Kairos authors"
privacy_policy = "https://policies.google.com/privacy"
prism_syntax_highlighting = true
# First one is picked as the Twitter card image if not set on page.
# images = ["images/project-illustration.png"]
# Menu title if your navbar has a versions selector to access old versions of your site.
# This menu appears only if you have at least one [params.versions] set.
version_menu = "Releases"
# Repository configuration (URLs for in-page links to opening issues and suggesting changes)
github_repo = "https://github.com/kairos-io/kairos"
# An optional link to a related project repo. For example, the sibling repository where your product code lives.
github_project_repo = "https://github.com/kairos-io/kairos"
# Specify a value here if your content directory is not in your repo's root directory
github_subdir = "docs"
github_branch = "master"
# Google Custom Search Engine ID. Remove or comment out to disable search.
#gcs_engine_id = "011737558837375720776:fsdu1nryfng"
# Enable Algolia DocSearch
algolia_docsearch = false
# Enable Lunr.js offline search
offlineSearch = true
[params.softwareVersions]
k3s = "k3sv1.26.3+k3s1"
kairos = "v2.0.0"
flavor = "debian"
armFlavor = "alpine"
registryURL = "quay.io/kairos"
# User interface configuration
[params.ui]
# Enable to show the side bar menu in its compact state.
sidebar_menu_compact = false
# Set to true to disable breadcrumb navigation.
breadcrumb_disable = true
# Set to true to hide the sidebar search box (the top nav search box will still be displayed if search is enabled)
sidebar_search_disable = true
# Set to false if you don't want to display a logo (/assets/icons/logo.svg) in the top nav bar
navbar_logo = true
# Set to true to disable the About link in the site footer
footer_about_disable = false
# Adds a H2 section titled "Feedback" to the bottom of each doc. The responses are sent to Google Analytics as events.
# This feature depends on [services.googleAnalytics] and will be disabled if "services.googleAnalytics.id" is not set.
# If you want this feature, but occasionally need to remove the "Feedback" section from a single page,
# add "hide_feedback: true" to the page's front matter.
[params.ui.feedback]
enable = true
# The responses that the user sees after clicking "yes" (the page was helpful) or "no" (the page was not helpful).
yes = 'Awesome! Glad to hear it! Please <a href="https://github.com/kairos-io/kairos/issues/new">tell us how we can improve</a>.'
no = 'Oh snap! Sorry to hear that. Please <a href="https://github.com/kairos-io/kairos/issues/new">tell us how we can improve</a>.'
[params.links]
[[params.links.developer]]
name = "GitHub"
url = "https://github.com/kairos-io/kairos"
icon = "fab fa-github"
desc = "Development takes place here!"
[[params.links.user]]
name = "Matrix"
url = "https://matrix.to/#/#kairos-io:matrix.org"
icon = "fa fa-message"
desc = "Join us on Matrix!"
[[params.links.user]]
name = "GitHub discussions"
url = "https://github.com/kairos-io/kairos/discussions"
icon = "fa fa-comments"
desc = "Questions?"
[[params.links.user]]
name = "Office hours calendar"
url = "https://calendar.google.com/calendar/embed?src=c_6d65f26502a5a67c9570bb4c16b622e38d609430bce6ce7fc1d8064f2df09c11%40group.calendar.google.com&ctz=Europe%2FRome"
icon = "fa fa-calendar"
desc = "Join us in our Office hours!"
[[params.links.user]]
name = "Slack"
url = "https://join.slack.com/t/spectrocloudcommunity/shared_invite/zt-1k7wsz840-ugSsPKzZCP5gkasJ0kNpqw"
icon = "fab fa-slack"
desc = "Join us on Slack!"
[[params.links.user]]
name = "Newsletter"
url = "https://kairoslinux.substack.com/"
icon = "fa fa-envelope"
desc = "Subscribe to the newsletter!"

View File

@ -1,31 +0,0 @@
---
title: "Kairos at FOSDEM 2023"
date: 2023-02-07T10:53:13+01:00
author: Mauro Morales ([Twitter](https://twitter.com/mauromrls)) ([GitHub](https://github.com/mauromorales))
---
I recently had the opportunity to attend FOSDEM 2023 and share a bit about the Kairos project. In this post I want to summarize what I presented and share other interesting presentations I attended, which I believe are relevant for Kairos and our community.
## How we build and maintain Kairos
I had the opportunity to share about [How we build and maintain Kairos](https://fosdem.org/2023/schedule/event/kairos/). In first half of the presentation, I introduce the different elements that make Kairos a great OS for Edge Kubernetes. During the second half of the presentation you will get an overview of how the Kairos Factory works, starting from those different Linux distributions all the way up to producing Kairos core and standard images. Because my presentation took place in the Distributions Devroom, I put some extra emphasis on the challenges we have to be distribution agnostic.
The talk is intended to newcomers, so I made an effort to describe things in a simple and welcoming language. However, I think it can also be interesting for those who might already know about Kairos but wonder how to extend the core and standard images, or simply have a better understanding of how all the pieces interconnect.
Like I mentioned, the presentation took place in the Distributions Devroom and we're very thankful to them for hosting us. While it was a great experience and the talk seemed to have a good reception, I now realize that the topic is probably more relevant for a different devroom, for example, the [
Image-based Linux and Secure Measured Boot devroom
](https://fosdem.org/2023/schedule/track/image_based_linux_and_secure_measured_boot/), which I'll make sure to send proposals next year.
## Other talks which are relevant to Kairos
There were other interesting presentations I had the opportunity to attend, which I think are also relevant to Kairos and our community. These would be my top picks:
If you're completely new to the concepts of Image-Based Linux, Unified Kernel Image or Discoverable Disk Image, I'd recommend checking Luca Bocassi's talk [Introducing and decoding image-based Linux terminology and concepts](https://fosdem.org/2023/schedule/event/image_linux_secureboot_uki_ddi_ohmy/). As someone who very recently joined the Kairos project, I still get a bit lost with all the different technologies used in Image-Based Linux. The presenter made a good job clarifying some of these technologies and how they work together.
One of the key presentations in my opinion was Lennart Poettering's, [Measured Boot, Protecting Secrets and you](https://fosdem.org/2023/schedule/event/image_linux_secureboot_tpm/), where he talks about Trusted Plataform Modules and upcoming functionality in systemd. I'm pretty sure there will be some of these features which will be relevant for Kairos sooner rather than later.
Last but not least, there was an interesting talk by Gabriel Kerneis about [User-friendly Lightweight TPM Remote Attestation over Bluetooth](https://fosdem.org/2023/schedule/event/image_linux_secureboot_ultrablue/). My guess is that we will continue seeing different methods to do and simplify attestation and because one of our goals at the Kairos project is to be as friendly as we can to our user base, then I can only imagine we will end up introducing some sort of remote attestation technologies like Ultrablue in the future.
## Conclusion
FOSDEM is a very important conference when it comes to free and open source software and I'm very happy that Kairos was present. First of all because I think the work we're doing with Kairos is helping solve some of the most challenging issues of running cloud native applications on the edge, but also because as an open source project, it was nice to introduce ourselves to the community there and start a conversation. Expect us to keep engaging with you in further editions of FOSDEM and other conferences!

View File

@ -1,22 +0,0 @@
---
title: "Kairos at the KCD Amsterdam and Paris 2023"
date: 2023-03-09T10:53:13+01:00
author: Mauro Morales ([Twitter](https://twitter.com/mauromrls)) ([GitHub](https://github.com/mauromorales))
---
We recently had the opportunity to sponsor two Kubernetes events, [KCD Amsterdam][amsterdam] and [KCD Paris][paris]. This blog post, is a summary about my personal experience attending them.
Let me start by saying, that I'm fairly new to Kubernetes and its community :wave:. I know this project is big and that there are many companies building products and services around it, or have an interest in adopting it. So, I was very curious to see what kind of people I was going to meet and understand how Kairos could help them.
Most attendees that approached us at the Kairos booths, were hearing about Kairos for the first time, and genuinely wanted to know what the project was about. I feel confident to say this, because we didn't bring fancy prizes to give away and yet most of them would happily stay with us for 5, 10 and up to 15 minutes hearing about our features and engaging in conversation.
_If you're reading this and would like to know about those cool features I'd recommend going checking out the [Getting Started](/docs/getting-started/), [Web UI](/docs/installation/webui/), [P2P Network](/docs/architecture/network/) and [AuroraBoot](/docs/reference/auroraboot/)_
When you're in the trenches building a product, talking to users or potential users is super valuable because it lets you see first hand, what kind of issues they are trying to solve. I don't like building projects just because they are cool. To me, it's important that they make people's life easier. Some of the folks who reached to us, had clear problems in mind, and they didn't shy to make hard questions about the internals of Kairos, our project's governance and beyond. I'm very pleased to say that some of them left the booth with a smile on their face, because they might have found a good fit.
While I didn't get to attend any of the talks, I saw some really interesting topics, some of them from fantastic organizations like CERN! However, what I did do a bit, was to speak to some of the folks in the other booths, just to see what they were up to :mag: and most importantly to see if there were chances our different projects could leverage each other out :raised_hands:.
Last but not least, let me thank everyone :bow: who attended our booth for your valuable time and feedback. I think every one of my colleagues will agree that we're committed to building a great product, that solves real world problems and we plan to use that feedback accordingly. We have a passion for open-source and we understand that this means much more than just great engineering and best practices. It also means being there for you, the community.
[amsterdam]: https://community.cncf.io/events/details/cncf-kcd-netherlands-presents-kubernetes-community-days-amsterdam-2023/
[paris]: https://community.cncf.io/events/details/cncf-kcd-france-presents-kubernetes-community-days-france-2023/

View File

@ -1,4 +0,0 @@
---
title: Blog
menu: {main: {weight: 50}}
---

View File

@ -1,296 +0,0 @@
---
title: "Access your home-lab Kairos cluster over a Wireguard VPN"
date: 2023-03-29T10:53:13+01:00
author: Dimitris Karakasilis([Personal page](https://dimitris.karakasilis.me)) ([GitHub](https://github.com/jimmykarily)) ([Codeberg](https://codeberg.org/dkarakasilis/))
---
## The problem
You got yourself a Rabserry Pi (or more), and you want to put them to good use.
You decide to make a Kubernetes cluster out of them, so that you can utilise the resources better, use familiar tools and implement infrastructure-as-code.
Up to this point, kudos to you for demanding no less than a real cloud from your home infra.
Like a smart person you are, you probably used [Kairos](https://kairos.io/) to create your cluster and it's now up and running.
It's now time to run some workloads.
Here is my list if you need some ideas:
- A self-hosted Dropbox alternative (e.g. [Seafile](https://www.seafile.com/en/home/), [NextCloud](https://nextcloud.com/) or other)
- [Pihole](https://pi-hole.net/)
- An [mqtt](https://mqtt.org/) broker for your IoT projects
- Your own [Gitea](https://gitea.io/en-us/) instance
- Your own ChatGPT alternative (e.g. using [lama-cli](https://github.com/go-skynet/llama-cli) or [serge](https://github.com/nsarrazin/serge))
None of these workloads is intended for public access. There are ways to expose the cluster to the world (e.g. like I described [in another post](https://dimitris.karakasilis.me/2022/12/26/self-hosted-ci.html))
but it would be better if only devices within a VPN would have access to it.
Once again, there are many VPN solutions out there, but for this blog post, we'll go with [Wireguard](https://www.wireguard.com/).
So here is the problem in one sentence:
> "How do we expose our (possibly behind NAT) cluster, to machines inside the same Wireguard VPN?"
_"NAT" is the main part of the problem because otherwise this would simply be a blog post on how to create a Wireguard VPN. There are many nice tutorials already out there for that._
## A Solution
While trying to solve the problem, I learned 2 things about Wireguard that I didn't know:
1. Wireguard doesn't distinguish between a "server" and a "client". All peers are made equal.
2. Wireguard doesn't provide a solution for NAT traversal. How you access nodes behind NAT, is up to you.
So imagine you have your cluster behind your home router (NAT) and your mobile phone on another network (behind NAT too) trying to access a service on the cluster.
That's not possible, unless there is some public IP address that somehow forwards requests to the cluster.
And that's the idea this solution is based on.
### High level view
![Image describing the solution](/images/kairos-over-wireguard.svg)
The idea is almost similar to the one I described [in another post](https://dimitris.karakasilis.me/2022/12/26/self-hosted-ci.html).
The only difference is, that this time we expose the cluster only to machines inside the VPN.
Prerequisites:
- A VM with a public IP address and SSH access (as small as it gets, it's good enough)
- `kubectl` access to the cluster we want to expose (it doesn't have to be Kairos, even [`k3d`](https://k3d.io) and [`kind`](https://kind.sigs.k8s.io/) will do)
- A machine to test the result (a smartphone where Wireguard can be installed is fine)
### Step by step
From this point on, we will use the IP address `1.2.3.4` as the public IP address of the VM in the cloud.
Replace it with the one matching your VM. We also assume, that the user with SSH access is `root`. Replace if necessary.
#### Setup the cloud VM
SSH to the machine:
```bash
$ ssh root@1.2.3.4
```
Create Wireguard keys:
```bash
$ wg genkey | tee privatekey | wg pubkey > publickey
```
Create Wireguard config:
```bash
$ cat << EOF > /etc/wireguard/wg0.conf
[Interface]
Address = 192.168.6.1/24
PrivateKey = $(cat privatekey)
ListenPort = 41194
# Mobile client
[Peer]
PublicKey = <public key from next step>
AllowedIPs = 192.168.6.2/32
EOF
```
Start and enable the Wireguard service:
```
$ sudo systemctl enable --now wg-quick@wg0
```
Allow binding non-loopback interfaces when creating an SSH reverse tunnel
by setting `GatewayPorts clientspecified` in `/etc/ssh/sshd_config`.
#### Setup the test machine (mobile?)
On some computer with `wg` installed, generate the keys:
```bash
$ wg genkey | tee privatekey | wg pubkey > publickey
```
Create the Wireguard configuration. Follow the instructions for your favorite application.
For Android, you can use this: https://play.google.com/store/apps/details?id=com.wireguard.android
If setting up a Linux machine, you can create the configuration like this:
```bash
$ cat << EOF > /etc/wireguard/wg0.conf
[Interface]
Address = 192.168.6.2/24
PrivateKey = $(cat privatekey)
# The cloud VM
[Peer]
PublicKey = <public key from the previous step>
AllowedIPs = 192.168.6.1/32
Endpoint = 1.2.3.4:41194
EOF
```
Start and enable the Wireguard service. If on a Linux machine, something like this will do:
```
$ sudo systemctl enable --now wg-quick@wg0
```
On a mobile, follow the instructions of your application.
After a while, your client should be able to ping the IP address of the VM: `192.168.6.1`.
You may find the output of `wg show` useful, while waiting for the peers to connect.
#### Setup the cluster
Deploy the helper Pod. We will use an image created [with this Dockerfile](https://codeberg.org/dkarakasilis/self-hosted-ci/src/branch/main/image) and
published [here](https://quay.io/repository/jimmykarily/nginx-ssh-reverse-proxy). The image's entrypoint works with a config
described [here](https://codeberg.org/dkarakasilis/self-hosted-ci/src/commit/20d7c6cbf70cd5318309362b0897e6aeb9842b82/image/start.sh#L5-L27).
The image is not multiarch, but there is one suitable for RasberryPi 4 (see the comment in the file).
If you are are going to create a fresh Kairos cluster, you can use a config like the following to automatically set up the helper Pod (make sure you replace the `id_rsa` and `id_rsa.pub` keys).
If you prefer to not have the keys stored on your Kairos host filesystem, you can simply create the same resources using `kubectl apply -f` after your cluster is up an running.
```
#cloud-config
users:
- name: kairos
passwd: kairos
stages:
after-install-chroot:
- files:
- path: /var/lib/rancher/k3s/server/manifests/rproxy-pod.yaml
content: |
---
apiVersion: v1
data:
id_rsa: the_vms_private_key_in_base64
id_rsa.pub: the_vms_public_key_in_base64
kind: Secret
metadata:
name: jumpbox-ssh-key
type: Opaque
---
apiVersion: v1
kind: ConfigMap
metadata:
name: proxy-config
data:
config.json: |
{
"services": [
{
"bindIP": "192.168.6.1",
"bindPort": "443",
"proxyAddress": "traefik.kube-system.svc",
"proxyPort": "443"
},
{
"bindIP": "192.168.6.1",
"bindPort": "80",
"proxyAddress": "traefik.kube-system.svc",
"proxyPort": "80"
}
],
"jumpbox": {
"url": "1.2.3.4",
"user": "root",
"sshKeyFile": "/ssh/id_rsa"
}
}
---
apiVersion: apps/v1
kind: Deployment
metadata:
annotations:
name: nginx-ssh-reverse-proxy
spec:
replicas: 1
selector:
matchLabels:
app.kubernetes.io/instance: nginx-ssh-reverse-proxy
app.kubernetes.io/name: nginx-ssh-reverse-proxy
template:
metadata:
labels:
app.kubernetes.io/instance: nginx-ssh-reverse-proxy
app.kubernetes.io/name: nginx-ssh-reverse-proxy
spec:
containers:
- name: proxy
# Change to quay.io/jimmykarily/nginx-ssh-reverse-proxy-arm64:latest
# if you are running on a RasberryPi 4
image: quay.io/jimmykarily/nginx-ssh-reverse-proxy:latest
command: ["/start.sh", "/proxy-config/config.json"]
imagePullPolicy: Always
volumeMounts:
- name: ssh-key
mountPath: /ssh
- name: config-volume
mountPath: /proxy-config/
volumes:
- name: ssh-key
secret:
secretName: jumpbox-ssh-key
defaultMode: 0400
- name: proxy-config
- name: config-volume
configMap:
name: proxy-config
```
In a nutshell, the config above is creating a reverse SSH tunnel from the VM
to the Pod. Inside the Pod, nginx redirects traffic to the traefik load balancer running
on the cluster. This has the effect, that any request landing on the VM on ports 80 and 443
will eventually reach the Traefik instance inside the cluster on ports 80 and 443.
As a result, you can point any domain you want to the VM and it will reach the corresponding Ingress defined on your cluster.
{{% alert color="info" %}}
**NOTE:** The SSH tunnel will only bind the IP address `192.168.6.1` on the VM, which means, anyone trying to access the VM using its public IP address, will not be able to access the cluster. Only machines that can talk to `192.168.6.1` have access, in other words, machines inside the VPN.
{{% /alert %}}
#### Test the connection
- Try to access the cluster with the VPN IP address (should work).
From your test peer, open `http://192.168.6.1`. You should see a 404 message from Traefik.
You can also verify it is a response from Traefik in your cluster, by calling curl
on the `https` endpoint (on a "default" k3s installation):
```bash
$ curl -k -v https://192.168.6.1 2>&1 | grep TRAEFIK
* subject: CN=TRAEFIK DEFAULT CERT
* issuer: CN=TRAEFIK DEFAULT CERT
```
- Try to access the cluster with domain pointing to the VPN IP address (should work)
You can create a wildcard DNS record and point it to the VPN IP address if
you want to make it easier for people to access the services you are running.
E.g. by creating an A record like this: `*.mydomainhere.org -> 192.168.6.1`
you will be able create Ingresses for your applications like:
`app1.mydomainhere.org`, `app2.mydomainhere.org`.
- Try to access the cluster using the public IP address (should not work)
```bash
$ curl http://1.2.3.4
```
This command should fail to connect to your cluster
### Conclusion
For non-critical workloads, when 100% uptime is not a hard requirement, the solution we described allows one to use services that would otherwise cost multiple times more by hosting
those on their own hardware. It does so, without exposing the home network to the public.
If you liked this solution or if you have comments, questions or recommendations for improvements, please reach out!
### Useful links
- [Kairos documentation](https://kairos.io/docs/)
- [WireGuard documentation](https://www.wireguard.com/quickstart/)

View File

@ -1,23 +0,0 @@
---
title: "Kairos, SpectroCloud, and Canonical Collaborate to Deliver Revolutionary Telco Radio Edge Solution"
date: 2023-03-13
linkTitle: "Kairos, SpectroCloud, and Canonical Collaboration at MWC"
description: |
Kairos, the open-source distributed infrastructure platform, has collaborated with SpectroCloud and Canonical to develop a revolutionary Telco Radio Edge solution. The solution leverages the latest advances in OpenRAN automation and distributed compute management, and is set to take center stage at this year's Mobile World Congress.
author: Ettore Di Giacinto ([Twitter](https://twitter.com/mudler_it)) ([GitHub](https://github.com/mudler))
---
Hello Kairos community!
We are thrilled to announce that **Kairos** has been used as a key building block for a revolutionary Telco Radio Edge solution developed in collaboration with [Spectro Cloud](https://www.spectrocloud.com/) and [Canonical](https://canonical.com/). This cutting-edge solution showcases the latest advances in OpenRAN automation and distributed compute management, and took center stage at this year's Mobile World Congress.
The Telco Radio Edge solution leverages the power of Kairos, Ubuntu Pro 22.04 LTS with RT kernel, and MicroK8s CAPI provider to deliver highly distributed edge node onboarding, secure deployment, and substrate provisioning. With this innovative technology stack, weve enabled OpenRAN o-DU service orchestration at scale, while optimizing performance, scalability, reliability, and security.
This is an exciting collaboration between the Kairos project, Spectro Cloud and Canonical to develop a solution that is highly performant, efficient, and scalable. The demos that have been presented at MWC showcase the advanced capabilities of the MicroK8s CAPI provider, and highlight the power of Kairos as a building block for distributed infrastructure substrates that can host even the most demanding modern OpenRAN o-DU, 5G UPF or AI/ML use-cases at scale.
This is a true testament to the power of open-source technologies and community collaboration, and we can't wait to see what new possibilities this partnership will bring.
Thank you for your continued support and enthusiasm for Kairos!
**Details**: You can learn more about the Talco Radio Edge solution on the Ubuntu blog https://ubuntu.com/blog/meet-canonical-at-mwc-barcelona-2023 or at https://ubuntu.com/blog/canonical-at-mwc and watch it in action here: https://www.youtube.com/watch?v=wUCSK0O8Ro4

View File

@ -1,9 +0,0 @@
---
title: "Media Section"
date: 2023-01-04T17:17:11+01:00
author: Mauro Morales ([Twitter](https://twitter.com/mauromrls)) ([GitHub](https://github.com/mauromorales))
---
We've added a new media section so it's easy to find the different videos and articles about Kairos. To access it, go to the Documentation and at the bottom of the left menu, you will find a link called Media.
You can also [click here]({{< ref "/media" >}} "Media") to go check it out.

View File

@ -1,41 +0,0 @@
---
title: "Kairos release v1.5"
date: 2023-01-27
linkTitle: "Announcing v1.5 Kairos release"
description: "Introducing Kairos 1.5: A Smarter, More Secure Way to Manage Your Infrastructure"
author: Ettore Di Giacinto ([Twitter](https://twitter.com/mudler_it)) ([GitHub](https://github.com/mudler))
---
<h1 align="center">
<br>
<img width="184" alt="kairos-white-column 5bc2fe34" src="https://user-images.githubusercontent.com/2420543/215073247-96988fd1-7fcf-4877-a28d-7c5802db43ab.png">
<br>
<br>
</h1>
We are thrilled to announce the release of Kairos version 1.5, a major update that brings significant improvements to user experience and security. With this release, we have made it even easier for you to install and set up Kairos, as well as better protect your user data. Our community has been an invaluable source of feedback, bug reports, and contributions, and we are grateful for their support.
You can find Kairos core images at https://github.com/kairos-io/kairos/releases/tag/v1.5.0 and images with k3s pre-bundled here: https://github.com/kairos-io/provider-kairos/releases/tag/v1.5.1.
## Effortless Installation with the WebUI Installer
![WebUI](https://user-images.githubusercontent.com/2420543/214573939-31f887b8-890c-4cce-a02a-0100198ea7d9.png)
Gone are the days of complicated command-line instructions. With the new [WebUI installer](/docs/installation/webui/), installation and setup are a breeze. Simply follow the steps on the web page, and you'll be up and running in no time. You can also use [our core images as an installer](/docs/examples/core/). Take a look at this gif to see the WebUI installer in action:
![Peek 2023-01-04 01-04](https://user-images.githubusercontent.com/2420543/210461794-fb80ad90-5d11-479d-945d-2e3ba3890435.gif)
## Protect Your Data with User Data Encryption at the Edge
Kairos 1.5 now allows you to encrypt your user data with ease, keeping it secure from prying eyes. Encryption is done via TPM and optionally with the Kairos KMS (Key Management Server) for external authentication and management of encrypted secrets. Check out our [documentation](/docs/advanced/partition_encryption) for more information on partition encryption.
## OS updates
We've added RockyLinux and Debian to our list of supported releases, giving you more options to run Kairos on both stable and feature-rich operating systems. We've also updated our Alpine support, so you can now run Kairos on the latest version of Alpine Linux.
## Extend Kairos with Custom Deployment Models (`bundles`)
Kairos 1.5 allows you to extend the configuration of your node with custom, container-based deployment models defined as `bundles`. Check out our [documentation](/docs/advanced/bundles) and [examples](/docs/examples/bundles) to see how to deploy `MetaLB`. `Kubevirt` and `MetalLB` bundles are also availble in the [community-bundles](https://github.com/kairos-io/community-bundles) repository.
---
For a full list of changes, see the [Changelog](https://github.com/kairos-io/kairos/releases/tag/v1.5.0). We hope you find these updates useful and as always, let us know if you have any questions or feedback. Thanks for using Kairos!

View File

@ -1,67 +0,0 @@
---
title: "Kairos release v1.6"
date: 2023-02-26
linkTitle: "Announcing v1.6 Kairos release"
description: "Introducing Kairos 1.6: Get ready to boot with AuroraBoot!"
author: Ettore Di Giacinto ([Twitter](https://twitter.com/mudler_it)) ([GitHub](https://github.com/mudler))
---
<h1 align="center">
<br>
<img width="184" alt="kairos-white-column 5bc2fe34" src="https://user-images.githubusercontent.com/2420543/215073247-96988fd1-7fcf-4877-a28d-7c5802db43ab.png">
<br>
<br>
</h1>
Kairos is a cloud-native meta-Linux distribution that brings the power of public cloud to your on-premises environment. With Kairos, you can build your own cloud with complete control and no vendor lock-in. It allows you to easily spin up a Kubernetes cluster with the Linux distribution of your choice, and manage the entire cluster lifecycle with Kubernetes.
#### Why you should try Kairos:
Kairos provides a wide range of use cases, from Kubernetes applications to appliances and more. You can provision nodes with your own image or use Kairos releases for added flexibility. Kairos also simplifies day-2 operations like node upgrades. It provides the benefits of a unified, cloud-native approach to OS management.
#### What you can do with Kairos:
With Kairos, you can create an immutable infrastructure that stays consistent and free of drift with atomic upgrades. You can manage your cluster's entire lifecycle with Kubernetes, from building to upgrading. Kairos also allows you to automatically create multi-node, single clusters that span across regions for maximum flexibility and scalability.
## Kairos 1.6.0 release
Kairos 1.6.0 has just been released, and we are thrilled to share the latest updates and improvements to the Kairos project. This release includes bug fixes, small improvements to the Kairos core codebase, and the introduction of AuroraBoot, a tool that simplifies bootstrapping of Kairos nodes. In this post, we will explore how AuroraBoot works and its benefits for users deploying Kairos.
### What is AuroraBoot?
[AuroraBoot](https://kairos.io/docs/reference/auroraboot/) is a tool designed to make the process of bootstrapping Kairos machines quick, simple, and efficient. It is specifically designed for the Kairos operating system and provides a comprehensive solution for downloading required artifacts and provisioning a machine, both from network or manually via flashing to USB stick.
AuroraBoot simplifies the bootstrapping process by automating several steps, such as downloading required files, verifying their authenticity, and providing a streamlined interface for customizing the installation media. With AuroraBoot, users can prepare the environment for network-based bootstrapping, download the necessary release assets, and also customize the installation media for USB-based mass-installations.
### The Benefits of AuroraBoot
With AuroraBoot, users can prepare multiple nodes in a lab before shipment or deploy Kairos nodes in a network segment where workload can already be sent to (running AuroraBoot in an already-existing downstream cluster). Additionally, AuroraBoot offers a simple, intuitive, and streamlined way to deploy Kairos automatically and manually. It makes the deployment process faster, more efficient, and less error-prone. Besides, it does leverage the DHCP server already existing in the network for booting, requiring zero-configuration.
You can see AuroraBoot in action here, with [a full e2e example](https://kairos.io/docs/examples/p2p_e2e/) on how to use it with p2p in Kairos, and in the video below:
{{< youtube id="7Vym18wz9Uw" title="Kairos and libp2p" >}}
## Improvements to the WebUI for a simplified user experience
The WebUI got several improvements, we have integrated the documentation inside the web interface, and now can be accessed also offline. The configuration schema is validated and a message is displayed if the configuration is incorrect. You can see how it works here
[Screencast from 2023-02-21 15-24-59.webm](https://user-images.githubusercontent.com/433958/221510120-ce43eb66-e8c0-4b91-885e-3a213fac896b.webm)
## Other Improvements in Kairos 1.6.0
Aside from AuroraBoot, Kairos 1.6.0 includes several improvements and bugfixes, including:
- Integration of documentation into the Web UI
- Initial support for schema validation in the WebUI and the installer
- Support for Rocky Linux in provider builds
- Renaming of kairos-agent and addition of SHA256 signatures
- Addition of custom mounts
- Fix for DHCP hostname issues
- Fix for encryption reset failures
- Fix for systemd-networkd hostname settings
- Fix for Tumbleweed ISO
You can check the full changelog at: https://github.com/kairos-io/kairos/releases/tag/v1.6.0
## Conclusion
Kairos 1.6.0 is a significant step forward in simplifying the deployment process of Kairos nodes. With AuroraBoot, users can deploy Kairos faster, more efficiently, and with less risk of error. Additionally, the bug fixes and improvements in this release demonstrate Kairos' commitment to providing a reliable and robust operating system for users. We invite you to download and try Kairos 1.6.0 and experience the benefits of AuroraBoot for yourself.
---
For a full list of changes, see the [Changelog](https://github.com/kairos-io/kairos/releases/tag/v1.6.0). We hope you find these updates useful and as always, let us know if you have any questions or feedback. Thanks for using Kairos!

View File

@ -1,48 +0,0 @@
---
title: "Kairos release v2.0"
date: 2023-04-13
linkTitle: "Announcing v2.0 Kairos release"
description: "Introducing Kairos 2.0: long live UKI!"
author: Ettore Di Giacinto ([Twitter](https://twitter.com/mudler_it)) ([GitHub](https://github.com/mudler))
---
<h1 align="center">
<br>
<img width="184" alt="kairos-white-column 5bc2fe34" src="https://user-images.githubusercontent.com/2420543/215073247-96988fd1-7fcf-4877-a28d-7c5802db43ab.png">
<br>
<br>
</h1>
Kairos is a cloud-native meta-Linux distribution that brings the power of public cloud to your on-premises environment. With Kairos, you can build your own cloud with complete control and no vendor lock-in. It allows you to easily spin up a Kubernetes cluster with the Linux distribution of your choice, and manage the entire cluster lifecycle with Kubernetes.
#### Why you should try Kairos:
Kairos provides a wide range of use cases, from Kubernetes applications to appliances and more. You can provision nodes with your own image or use Kairos releases for added flexibility. Kairos also simplifies day-2 operations like node upgrades. It provides the benefits of a unified, cloud-native approach to OS management.
#### What you can do with Kairos:
With Kairos, you can create an immutable infrastructure that stays consistent and free of drift with atomic upgrades. You can manage your cluster's entire lifecycle with Kubernetes, from building to upgrading. Kairos also allows you to automatically create multi-node, single clusters that span across regions for maximum flexibility and scalability.
## Kairos 2.0.0 release
Kairos 2.0.0 has just been released, and we are thrilled to share the latest updates and improvements to the Kairos project. This release is a major release as reflect changes to internal core components.
### What changed?
We replaced the former dracut modules (a set of bash scripts/dracut/systemd services), which were responsible for the immutability management of Kairos, with https://github.com/kairos-io/immucore , a self-contained binary which doesn't have dependencies and can run without dracut and systemd. While changes shouldn't be impactful for most of our users, as changes impacted only in internal components, we suggest to try the upgrade in a lab environment before upgrading from earlier versions (v1.x).
The 2.0 release allows us to:
- not depend anymore on systemd while set up immutability on boot ( allowing us to unblock several stories, for instance create Alpine images with vanilla kernels )
- have hybrid images, that boots both [UKI](https://github.com/uapi-group/specifications/blob/main/specs/unified_kernel_image.md) as a single file image, and as well as pivoting (as we are doing currently)
- pave the way for things like SecureBoot, Static Measured boot and much more
- debug things more cleanly, have a better testbed, and allow to ease out maintenance of the codebase
- be a step closer to our Confidential computing roadmap, indeed now you can try out running [Confidential computing workload](https://kairos.io/docs/advanced/coco/).
Besides, we have now full SBOM list attached to images, as part of the release process, and `in-toto` attestation, allowing [you to verify attestation also of SBOM lists](https://docs.sigstore.dev/cosign/attestation/), and have full audit of images. We also have integrated `grype` and `trivy` in our pipelines, and as such now releases contains also CVE reports, and finally we upload the generated reports as sarif file to GitHub to have notifications and see with more ease the impact of CVEs to the images. See also our [documentation](https://kairos.io/docs/upgrade/kubernetes/#verify-images-attestation-during-upgrades) on how to gate upgrades and allow only verified images to be used during the process.
There were also fixes to the Debian flavor (thanks to the community for reporting issues!) and now manual upgrades with private registries are supported, too.
Finally, it is also now possible to specify custom bind mounts path to overlay on top of the persistent partition, allowing to easily specify paths that you want to be persistent in the system via the cloud config file: https://kairos.io/docs/advanced/customizing/#customizing-the-file-system-hierarchy-using-custom-mounts .
If you are curious on what's next, check out our [Roadmap](https://github.com/orgs/kairos-io/projects/2) and feel free to engage with our [community](https://kairos.io/community/)!
---
For a full list of changes, see the [Changelog](https://github.com/kairos-io/kairos/releases/tag/v2.0.0). We hope you find these updates useful and as always, let us know if you have any questions or feedback. Thanks for using Kairos!

View File

@ -1,50 +0,0 @@
---
title: "Kairos is now part of the Secure Edge-Native Architecture by Spectro Cloud and Intel"
date: 2023-04-18
linkTitle: "The Secure Edge-Native Architecture"
description: "Learn about how Kairos is now part of SENA, the Secure Edge-Native Architecture announced by Spectro Cloud and developed in collaboration with Intel, enabling organizations to securely deploy, provision, operate and manage at scale edge locations. Discover the benefits of SENA and what's coming up in the future roadmap of Kairos' secure edge computing solutions."
author: Ettore Di Giacinto ([Twitter](https://twitter.com/mudler_it)) ([GitHub](https://github.com/mudler))
---
The Kairos team is thrilled to announce the release of the Secure Edge-Native Architecture (SENA) whitepaper! You can download it [here](https://github.com/kairos-io/kairos/files/11250843/Secure-Edge-Native-Architecture-white-paper-20240417.3.pdf)
## What is SENA?
SENA stands for "Secure Edge-Native Architecture." It is a comprehensive solution architecture that outlines the tools and practices to address the modern requirements for deploying and managing Kubernetes-based edge applications at scale. SENAs objective is to establish a new industry standard in the form of a well-defined framework that leverages best-in-class security and other concepts, design principles and tools, bringing together the most innovative hardware and software security capabilities.
SENA covers considerations across the full lifecycle of edge hardware and software to enable teams to efficiently deploy, provision, operate and manage edge environments at scale.
## Kairos and SENA
Kairos is a core foundation of SENA, providing capabilities in combination with other components across the following areas:
### When deploying hardware edge devices
- Ease of deployment: Kairos enables zero-touch provisioning through our [Kubernetes Native API](https://kairos.io/docs/installation/automated/) and locally with [AuroraBoot](https://kairos.io/docs/reference/auroraboot/).
- Self-coordinated deployment: Enable self-coordinated, fully autonomous deployments with [Integrated Kairos P2P support](https://kairos.io/docs/installation/p2p/).
- Flexible deployments: Kairos can be fully customized to meet your Infrastructure needs. Extend [Kairos images easily](https://kairos.io/docs/advanced/customizing/), or [build your own using the Kairos framework](https://kairos.io/docs/reference/build-from-scratch/), even at scale [by leveraging the power of Kubernetes](https://kairos.io/docs/advanced/build/).
### When provisioning the complete edge stack
- Ensuring the provenance of the image attestation before deployments and during upgrades via the Kubernetes control plane with [kyverno](https://kyverno.io/docs/writing-policies/verify-images/). Instructions can be found [here](https://kairos.io/docs/upgrade/kubernetes/#verify-images-attestation-during-upgrades).
- Ensuring provenance of the artifacts and comply with SLSA: Kairos releases SBOM artifacts, and builds on Github Actions, allowing you to identify and track components included in the released images with [cosign](https://github.com/sigstore/cosign).
## When operating the edge application
- Immutable, read-only OS stack: Kairos is a single [container image](https://kairos.io/docs/architecture/container/), [immutable system](https://kairos.io/docs/architecture/immutable/) which is read-only and cannot be modified during runtime.
- Ensuring the privacy of user data at rest and in use. You can [encrypt data at rest](https://kairos.io/docs/advanced/partition_encryption/#offline-mode) using the TPM chip and with the Kairos Key Management Server (KMS) 'kcrypt'.The KMS also accepts only hardware devices with a TPM chip, ensuring onboarding of trusted devices.
- Providing the ability for applications to execute in a Trusted Execution Environment (TEE) leveraging [Gramine](https://github.com/gramineproject/gramine). A TEE is an environment where hardware mechanisms are used to ensure the integrity and privacy of process execution, protecting against privileged (root) processes and physical snooping of electrical signals or devices in the system. You can already run workloads in a TEE with Kairos. For instructions check out [Confidential computing](https://kairos.io/docs/advanced/coco/)
## What's next
Here are some of the items in our roadmap:
- Static and Dynamic measured boot: We are planning to have UKI-flavored variants to boot the full OS in a single file. This will enable measurement, signing, and verification, simplifying maintenance and management, and leading to true immutability with a reduced attack surface.
- Ensuring the provenance and integrity of the OS during boot and runtime. We plan to integrate measured boot and SecureBoot on top of UKI images, integrating with Keylime, enabling remote attestation of system integrity after boot
- Ensuring the provenance and integrity of the application stack in runtime. Integration with GSC, [MarbleRun](https://github.com/edgelesssys/marblerun) - to seamlessly run confidential applications in your Kubernetes cluster and running attestation of confidential workloads.
- Management of hardware at scale: OpenAMT - Offering ways to automatically register Kairos boxes to an OpenAMT-supported management platform.
You can already benefit from the SENA Architecture today with Kairos and you can follow our roadmap to see what's coming up in the next releases [here](https://github.com/orgs/kairos-io/projects/2).
Stay tuned! More to come!

View File

@ -1,141 +0,0 @@
---
title: "Understanding Immutable Linux OS: Benefits, Architecture, and Challenges"
date: 2023-03-22
linkTitle: "Understanding Immutable Linux OS: Benefits, Architecture, and Challenges"
description: "In this post we are trying to answer some of the typical questions that help understanding Immutable OSes principles and we will dive a bit in what solutions are out there, and what are the challenges in the field"
author: Ettore Di Giacinto ([Twitter](https://twitter.com/mudler_it)) ([GitHub](https://github.com/mudler))
---
For years, the traditional Linux operating system has been a top pick for its flexibility and ability to be customized. But as great as it is, there are use cases in which stricter security rules and higher reliability standards are needed. That's where immutable Linux operating systems come in - offering a more secure and reliable option, especially in settings where security is paramount.
{{< card header="![castle](https://user-images.githubusercontent.com/2420543/226939911-6801ecab-d023-4357-92f2-b782ae086462.png)" subtitle="_An illustration of a fortress surrounded by a moat and guarded by armored knights, with a banner flying the Linux penguin logo, medieval, fortified, secure, trending on Artstation._ Author: _Midjourney AI_"
>}}
{{< /card >}}
In this post, we'll be addressing some common questions to help you understand the principles behind immutable operating systems. We'll also be exploring the various solutions available and the challenges faced in this field. So, get ready to dive in!
## What is an Immutable Linux OS?
Explaining the concept of an immutable Linux OS to a newcomer can often turn into a detailed discussion on system internals. However, we'll simplify it here as much as possible, even for those unfamiliar with the concepts.
Formally defined, an immutable Linux OS (also known as Immutable Infrastructure or Immutable Deployment) is an operating system designed to be unchangeable and read-only. This means that once the operating system has been installed, the system files and directories cannot be modified. Any changes made to the system are temporary and lost when the system is rebooted. Think of it as a snapshot of a standard Linux system that cannot be changed. Any updates or changes are made by creating a new instance of the OS, deploying it, and switching over to the new instance. You can also find a very good writeup by Adrian Hornsby [here](https://medium.com/the-cloud-architect/immutable-infrastructure-21f6613e7a23).
If you're already a Linux user, you'll know that as `root` (Administrator), you can write anywhere in the filesystem, potentially corrupting the OS portion responsible for booting or management. In an immutable OS, however, any command that attempts to modify the system files will fail, as those files are only accessible for reading.
Immutable systems are particularly useful in environments where security is a top priority, such as cloud computing, embedded systems, kiosks, and container execution. Essentially, any environment that needs to scale can benefit from the security and reliability of an immutable OS.
{{% alert color="info" %}}
_"But what does that really mean? And what problem are Immutable systems trying to solve?"_
{{% /alert %}}
There are several advantages to using immutable Linux systems over traditional Linux systems. Firstly, there is an additional layer of **security** as it's not possible to tamper with the runtime OS. Changes, if accepted, are discarded on the next reboot. This means that if a hacker or a malicious actor gains access to the system, they cannot make permanent changes that could compromise the system's security.
Secondly, **maintenance** of immutable systems is easier because they do not require regular updates or patches at the atomic package level. Instead, the entire OS is updated, similar to how updates are handled on Android phones.
Finally, because the system is read-only, it is more **reliable** and less prone to failure. A declarative configuration model is usually tied to it, simplifying the configuration of the OS when orchestrated with other tools such as Ansible, Terraform, or similar.
{{% alert color="info" %}}
_"Right, but how do I manage upgrades?"_
{{% /alert %}}
Instead of upgrading the system in place, upgrades are typically handled by creating a new, updated image of the operating system and replacing the existing image, in an atomic operation. This process is commonly referred to as "image-based upgrade". The image can also be delivered to the end system, but this differs depending on the implementation, and there is no building on the node side.
![Upgrade](https://user-images.githubusercontent.com/2420543/224147132-50d6808e-0a1c-48d0-8f44-627bd0dfa9f2.png)
In contrast, standard Linux systems typically use package managers such as `apt` or `yum` to upgrade software packages in place. This can be a more complex process because the package manager must ensure that all dependencies are satisfied and that there are no conflicts between different software packages. Additionally, upgrades in standard Linux systems can sometimes cause issues if there are conflicts between different versions of software packages or if the upgrade process is interrupted.
## Challenges at scale
In standard Linux systems, the package manager has a lot of responsibilities and interacts directly with the system to apply changes. It can install packages, upgrade packages, merge configurations, and generate additional data required for the package. This makes installing software, upgrading, and running a system easy as a couple of interactions away with the package manager.
When it comes to upgrading an installed system, the package manager should take care of many aspects, such as: correctly ordering dependencies (which may require a solver), verifying which packages are installed or not, which new packages will be installed, and handling file transmission securely. However, as the complexity of the stack grows, conflicts between packages can arise, and the package manager may prompt the user to solve them. This is not ideal for scaling out deployments, upgrades, and cutting operational costs since it exposes the infrastructure to drift.
{{< card header="![Screenshot from 2023-03-09 18-25-17](https://user-images.githubusercontent.com/2420543/224106950-7d652652-c8e0-4ee4-980d-b057e4af903f.png)"
footer="">}}
_Huh, didn't we get rid of package conflicts already? ([screenshot](https://www.reddit.com/r/openSUSE/comments/z4ld75/this_seems_to_be_common_in_opensuse_should_i_wait/))_
{{< /card >}}
Tools like Ansible, Salt, Puppet, or Chef can manage and control standard systems upgrade mechanisms without requiring any interaction with each system during high-scale upgrades. In the standard model, clients handle certain portions of upgrades and installations, such as updating configuration files, or regenerating the initramfs. However, these actions could eventually raise the infrastructure drift level, causing a configuration merging to block everything or cause damage to your infrastructure and interrupt services. To avoid such issues, preparing fallback or switching services connections after an upgrade has been rolled out is one way to approach it.
Transactional upgrades, are a step toward making standard mutable Linux systems, act more similarly to image-based upgrades in immutable Linux systems. In a transactional upgrade, the new software packages are prepared, usually into a separate partition, and applied after the first boot, similar to how an image-based upgrade works. However, unlike an immutable system, the existing system files can still be modified during the upgrade process.
On the other hand, immutable OSes simplify managing the OS stack by not exposing the node to complexities during upgrades or installation. The image is built ahead of time, using a well-tested, reproducible recipe that does not modify the system itself. The package manager is responsible for preparing a new, pristine environment that the real system will boot into afterward. For instance, immutable Linux OSes that use A/B partitioning create a new image of the operating system with the updated software packages or configuration changes. The new image is deployed to a transitive partition, which then becomes the new active partition. If the upgrade fails, the system can simply boot on the passive partition.
## Immutable OS: a look at the current landscape
Here are some popular Immutable OS solutions, although this list is not exhaustive. There are much better and updated ones you can find [on Github](https://github.com/castrojo/awesome-immutable). Each of the solutions was created to tackle its own set of challenges, and they differ in their implementation details depending on their target environments.
The following are some of the most popular Immutable OS solutions:
- CoreOS: A Linux-based operating system designed for containers and cloud computing, which uses an immutable file system called "Container Linux". CoreOS has now merged with Red Hat Enterprise Linux.
- Project Atomic: A CentOS-based Linux distribution, that focuses on container deployment and management, using a layered approach that allows for easy rollbacks.
- Ubuntu Core: Ubuntu Core is a version of the Ubuntu operating system designed and [engineered for IoT and embedded systems](https://ubuntu.com/core/services/guide/intro-ubuntu-core). It uses snap packages exclusively to create a confined and transaction-based system. It also updates itself and its applications automatically.
- RancherOS: - A Linux-based operating system that is designed to be minimal, lightweight, and optimized for running containers. RancherOS uses Docker for all system processes, and its file system is mounted read-only, making it immutable.
- Talos: An open-source Linux distribution designed to run Kubernetes, K3s, or other container orchestration systems. It features a highly secure, API-managed infrastructure with automated and scalable operations and is suitable for cloud, containers, and general-purpose environments.
- K3OS (discontinued): A minimal Linux distribution designed specifically for running Kubernetes clusters. k3os is built around k3s, a lightweight Kubernetes distribution, and uses the immutable Container Linux file system with an A/B update model to ensure smooth and reliable updates. It is suitable for cloud and container environments.
- Flatcar Container Linux: A Linux-based operating system that is based on CoreOS and is designed for use in containerized environments. Like CoreOS, Flatcar Container Linux uses an immutable file system to provide stability and security.
- Fedora Silverblue: A Fedora-based Linux distribution, that uses an immutable file system and a transactional update model, to provide a stable and secure environment. Fedora Silverblue is designed for use in desktop and containerized environments. A nice overview can be found [here](ttps://www.lifeintech.com/2021/11/19/immutable-os/) or [here](https://www.redhat.com/sysadmin/immutability-silverblue)
- Photon OS: A Linux-based operating system developed by VMware, which is designed to run containerized workloads. Photon OS uses a minimal package set and an immutable file system for enhanced security and manageability.
To simplify the comparison between the different Immutable OS solutions, the following table highlights their key differences and the environments they are targeted for:
| Solution | Based on | Update Model | Target Environment |
|---|---|---|---|
| CoreOS | Gentoo | Transactional Updates | Cloud |
| Talos | Nothing | Container image update | Cloud, Containers, General purpose |
| K3OS | Alpine | A/B | Cloud, Containers |
| Project Atomic | CentOS | Layered Packages | Containers |
| Ubuntu Core | Ubuntu | Transactional Updates | IoT, Embedded Systems |
| RancherOS | Linux | Docker for System Processes | Containers |
| Flatcar Container Linux | CoreOS | Transactional Updates | Cloud |
| Red Hat Atomic Host | Red Hat | Transactional Updates | Cloud, optimized for running containers |
| Fedora Silverblue | Fedora | Transactional Updates | Desktop, Containers |
| Photon OS | Linux | Immutable File System | Cloud |
| Kairos | Any Linux distribution | Immutable File System | Cloud, Edge, General purpose |
{{% alert color="info" %}}
_"So, what's Kairos? What's the challenges that Kairos tries to overcome?"_
{{% /alert %}}
## How Kairos fits in the ecosystem
Kairos is a great fit when you want to deploy a Linux system on real hardware at the Edge[^1] or in a datacenter, whether it's in your cloud on-premises or in the Edge. Specifically, if you're looking for:
- Zero-touch configuration and high-scalable deployments. [See how to perform automated installs](/docs/installation/automated/) or [how to create custom appliances](/docs/advanced/build/)
- A single distribution center of upgrades across your infrastructure using container registries. [See docs](/docs/architecture/container/#benefits)
- Strong security posture, including [online data encryption at-rest via TPM](/docs/advanced/partition_encryption/), Supply chain verification and Service bill of material
- Good hardware support
- Simplified Kubernetes deployment with [self-coordinated K3s](/docs/installation/p2p/)
- [Flexibility in customization](/docs/advanced/customizing/), including fine-grained control over the OS layer (packages installed, versions), and complete support maintenance level by [building images from scratch](/docs/reference/build-from-scratch)
- Complete control over your infrastructure
- A [community-driven](/community/), [open roadmap](https://github.com/orgs/kairos-io/projects/2), office hours, and the opportunity to get involved
**Maintenance** - One thing you may have noticed when comparing Kairos to other alternatives, is that it doesn't tie you to a specific OS. Instead, Kairos is flexible and portable, supporting all the popular Linux distributions, such as Ubuntu, Debian, and Fedora, among others. This, unties you from typical vendor lock-in strategies, forcing you to choose a specific distribution only for the immutability aspect.
The design shines also for its support for **long-term maintenance**. Each framework image released by Kairos allows the conversion of any OS to the given Kairos version, which could potentially enable maintenance for as long as the base OS support model allows. [You can learn more about it here](/docs/reference/build-from-scratch).
**Container based** - Kairos treats every operating system (OS) as a set of packages and represents the OS with a standard container image that can be executed with tools such as `podman`, `docker`, and so on. [This container image](/docs/architecture/container/) includes all the necessary components for booting. Kairos components manage all node lifecycle operations, such as upgrading, installing, and resetting. These components are packaged within the [framework images](/docs/reference/image_matrix/#framework-images), which can be overlaid while creating a standard container image. Unlike traditional Linux distributions, the kairos-agent handles upgrades by pulling new container images as systems to boot, instead of relying on the OS package manager.
All installation and upgrades are delivered exclusively through container images, which are overlaid at boot time, eliminating the need for a container engine at runtime. The container image used for booting includes the kernel, initrd, and all other required pieces. This allows for customization directly within a Dockerfile. The container being booted is the image itself, and there is no actual container runtime running the image. The container is used to construct an image internally, which is then used to boot the system in an A/B fashion, without adding any overhead.
This approach offers several benefits, including the ability to verify the image with security scans and treat it similarly to a standard application that can be distributed via a container registry.
**Separation of concerns** - The separation of concerns between the OS and the management interface is clear in Kairos. The OS is responsible for providing the booting components and packages necessary for its operation, while Kairos provides the framework for managing the node's lifecycle and immutability interface. The relationship between the image and Kairos is governed by a [contract](/docs/reference/build-from-scratch), which enables package handling without vendor lock-in.
This separation of concerns simplifies the delegation of package maintenance, CVE monitoring, and security fixes to the OS layer. Upgrades to container images can be achieved by chaining Dockerfiles or manually committing changes to the image.
**Automatic deployments** - To further [automate](/docs/installation/automated/) custom deployment models, the Kairos Kubernetes Native Extensions can be used to create customized configurations either directly from Kubernetes or via the command line interface (CLI).
**Self co-ordinated**: [Configuring multiple nodes](/docs/installation/p2p/) at the Edge to form a single cluster can present challenges at various levels, from the network stack (such as assigning IPs to machines) to the configuration of the cluster topology (such as determining which machine will be the master). However, Kairos enables completely self-coordinated deployments, including for high availability (HA), eliminating the need for any configuration templating mechanism or specific role assignments for nodes.
## Conclusion
In conclusion, an immutable Linux OS, provides a more secure and reliable environment than a standard Linux system. However, it may not be suitable for all use cases, such as those that require frequent updates or modifications to the system. Upgrades in immutable systems are handled differently from standard Linux systems, using an image-based approach rather than package-based upgrades. While transactional upgrades in standard mutable Linux systems offer some benefits over traditional package-based upgrades, they still do not provide the same level of security and reliability as image-based upgrades in immutable Linux systems. Overall, the decision to use an immutable Linux system should be based on the specific requirements of the use case, and the benefits and limitations should be carefully considered, something that we can't just let ChatGPT decide :wink:
Immutable Linux OSes offer a higher degree of reliability, security, and fault tolerance compared to traditional Linux systems. By using read-only file systems, separate update partitions and A/B partitioning, Immutable Linux OSes provide a safe, reliable way to update the system without downtime or the risk of breaking the system. Immutable Linux OSes are particularly well-suited for critical systems such as cloud container platforms, embedded systems, or IoT devices, where stability, security and scalability are of the utmost importance.
## Footnotes
[^1]: (Author note) As I dislike marketing buzzwords, I prefer to describe the Edge as the last-mile of computing. It involves a dedicated hardware that needs to be controlled by the Cloud in some way, such as a small server running Kubernetes, performing measurements and communicating with the Cloud. The term "Edge" is a broad, generic term that encompasses various computing scenarios, such as near-edge and far-edge computing, each with its own specialized deployment solution.
To put it simply, Kairos can be deployed on bare-metal hardware, and it provides robust support for hardware deployment.

View File

@ -1,6 +0,0 @@
---
title: Community
menu:
main:
weight: 40
---

View File

@ -1,8 +0,0 @@
---
title: "Advanced"
linkTitle: "Advanced"
weight: 5
description: >
Advanced settings
---

View File

@ -1,117 +0,0 @@
---
title: "Pushing configuration to a node after installation"
linkTitle: "After install"
weight: 1
description: >
---
Kairos configuration mechanism is based on the `cloud-config `file given during installation, however, it's possible to extend the configuration by providing additional cloud-configs in either `/oem` or `/usr/local/cloud-config`.
By default, `kairos` reads in lexicographic order YAML cloud-config files in the directories above, indeed, after installation you should be able to see the configuration generated by the interactive-installer as `/oem/99_custom.yaml` in the system.
This mechanism can be used to set and enable persistent configuration on boot after node deployment.
We are going to see how to do that manually or with Kubernetes by using the
## Manually
SSH into the node and copy the config file you want to add into `/oem`. For instance, to add zram on boot we can copy the following file in `/oem/100_zram.yaml` or `/usr/local/cloud-config/100_zram.yaml` and reboot:
```yaml
stages:
boot:
- name: "zram setup"
commands:
- modprobe zram
- echo lzo > /sys/block/zram0/comp_algorithm
- echo 1G > /sys/block/zram0/disksize
- mkswap --label zram0 /dev/zram0
- swapon --priority 100 /dev/zram0
name: "zfs setup"
```
## With Kubernetes
To push configurations to a node, it is necessary [system-upgrade-controller](https://github.com/rancher/system-upgrade-controller) to be deployed in the target cluster which executes plan to the cluster nodes. In the example below, we use a plan to push a swapfile of 3gb enabled during boot, and restart the node afterward.
To install [system-upgrade-controller](https://github.com/rancher/system-upgrade-controller), use kubectl:
```bash
kubectl apply -f https://github.com/rancher/system-upgrade-controller/releases/download/v0.9.1/system-upgrade-controller.yaml
```
{{% alert title="Note" %}}
Several roll-out strategies can be used with `system-upgrade-controller` which are not illustrated here in this example. For instance, it can be specified in the number of hosts which are running the upgrades, filtering by labels, and more. [Refer to the project documentation](https://github.com/rancher/system-upgrade-controller) on how to create efficient strategies to roll plans on the nodes. In the example above, the plans are applied to every host of the cluster, one-by-one in sequence.
{{% /alert %}}
The following pushes a new cloud config over the `/oem` directory and reboots the node:
```bash
cat <<'EOF' | kubectl apply -f -
---
apiVersion: v1
kind: Secret
metadata:
name: custom-script
namespace: system-upgrade
type: Opaque
stringData:
swapfile.yaml: |
stages:
boot:
- name: "Setup swapfile"
if: "[ ! -e /usr/local/swapfile ]"
commands:
- dd if=/dev/zero of=/usr/local/swapfile bs=1M count=3K
- mkswap /usr/local/swapfile
- name: "Enable swapfile"
if: "[ -e /usr/local/swapfile ]"
commands:
- swapon /usr/local/swapfile
add-oem-file.sh: |
#!/bin/sh
set -e
if diff /host/run/system-upgrade/secrets/custom-script/swapfile.yaml /host/oem/10_swapfile.yaml >/dev/null; then
echo Swapfile present
exit 0
fi
# Note: this is a symlink. We can also cp -L, but be aware that standard cp doesn't work.
cat /host/run/system-upgrade/secrets/custom-script/swapfile.yaml > /host/oem/10_swapfile.yaml
sync
mount --rbind /host/dev /dev
mount --rbind /host/run /run
nsenter -i -m -t 1 -- reboot
exit 1
---
apiVersion: upgrade.cattle.io/v1
kind: Plan
metadata:
name: add-swapfile
namespace: system-upgrade
spec:
concurrency: 1
# This is the version (tag) of the image.
# The version is referred to the kairos version plus the k3s version.
version: "v1.0.0-rc2-k3sv1.23.9-k3s1"
nodeSelector:
matchExpressions:
- { key: kubernetes.io/hostname, operator: Exists }
serviceAccountName: system-upgrade
cordon: false
upgrade:
# Here goes the image which is tied to the flavor being used.
# Currently can pick between opensuse and alpine
image: quay.io/kairos/kairos-opensuse-leap
command:
- "/bin/bash"
- "-c"
args:
- bash /host/run/system-upgrade/secrets/custom-script/add-oem-file.sh
secrets:
- name: custom-script
path: /host/run/system-upgrade/secrets/custom-script
EOF
```

View File

@ -1,288 +0,0 @@
---
title: "Build Kairos appliances"
linkTitle: "Build"
weight: 5
description: >
---
{{% alert title="Note" %}}
This page is a work in progress!
The feature is experimental and API is likely going to be subject to changes, don't rely on it yet!
{{% /alert %}}
This documentation section describes how the Kairos Kubernetes Native API extensions can be used to build custom appliances or booting medium for Kairos.
While it's possible to just run Kairos from the artifacts provided by our release process, there are specific use-cases which needs extended customization, for example when
additional kernel modules, or custom, user-defined logic that you might want to embed in the media used for installations.
Note the same can be achieved by using advanced configuration and actually modify the images during installation phase by leveraging the `chroot` stages that takes place in the image - this is discouraged - as it goes in opposite with the "Single Image", "No infrastructure drift" approach of Kairos. The idea here is to create a system from "scratch" and apply that on the nodes - not to run any specific logic on the node itself.
To achieve that, Kairos provides a set of Kubernetes Native Extensions that allow to programmatically generate Installable mediums, Cloud Images and Netboot artifacts. These provide on-demand customization and exploit Kubernetes patterns to automatically provision nodes using control-plane management clusters - however, the same toolset can be used to build appliances for local development and debugging.
The [automated](/docs/installation/automated) section already shows some examples of how to leverage the Kubernetes Native Extensions and use the Kairos images to build appliances, in this section we will cover and describe in detail how to leverage the CRDs and the Kairos factory to build custom appliances.
## Prerequisites
When building locally, only `docker` is required to be installed on the system. To build with the Kubernetes Native extensions, a Kubernetes cluster is required and `helm` and `kubectl` installed locally. Note [kind](https://github.com/kubernetes-sigs/kind) can be used as well. The Native extensions don't require any special permission, and run completely unprivileged.
### Kubernetes
To build with Kubernetes we need to install the Kairos `osbuilder` controller.
The chart depends on cert-manager. You can install the latest version of cert-manager by running the following commands:
```bash
kubectl apply -f https://github.com/jetstack/cert-manager/releases/latest/download/cert-manager.yaml
kubectl wait --for=condition=Available deployment --timeout=2m -n cert-manager --all
```
Install the Kubernetes charts with `helm`:
```bash
helm repo add kairos https://kairos-io.github.io/helm-charts
helm repo update
helm install kairos-crd kairos/kairos-crds
helm install kairos-osbuilder kairos/osbuilder
```
Among the things deployed by the helm chart, is also an nginx server which is used to
serve the artifact files after they are built. See below for more.
## Build an ISO
To build an ISO, consider the following spec, which provides a hybrid bootable ISO (UEFI/MBR), with the `core` kairos image, adding `helm`:
```yaml
kind: OSArtifact
apiVersion: build.kairos.io/v1alpha1
metadata:
name: hello-kairos
spec:
imageName: "quay.io/kairos/core-opensuse-leap:latest"
iso: true
bundles:
# Bundles available at: https://packages.kairos.io/Kairos/
- quay.io/kairos/packages:helm-utils-3.10.1
cloudConfig: |
#cloud-config
users:
- name: "kairos"
passwd: "kairos"
install:
device: "auto"
reboot: true
poweroff: false
auto: true # Required, for automated installations
```
Apply the manifest with `kubectl apply`.
Note, the CRD allows to specify a custom Cloud config file, [check out the full configuration reference](/docs/reference/configuration).
As mentioned above, there is an nginx server that will serve the built artifacts as soon as they are ready.
By default, it is exposed with a `NodePort` type of service. Use the following commands
to get its URL:
The controller will create a pod that builds the ISO ( we can follow the process by tailing to the containers log ) and later makes it accessible to its own dedicated service (nodeport by default):
```bash
$ PORT=$(kubectl get svc osartifactbuilder-operator-osbuilder-nginx -o json | jq '.spec.ports[0].nodePort')
$ curl http://<node-ip>:$PORT/hello-kairos.iso -o output.iso
```
## Netboot artifacts
It is possible to use the CRD to prepare artifacts required for netbooting, by enabling `netboot: true` for instance:
```yaml
kind: OSArtifact
metadata:
name: hello-kairos
spec:
imageName: "quay.io/kairos/core-opensuse-leap:latest"
netboot: true
netbootURL: ...
bundles: ...
cloudConfig: ...
```
## Build a Cloud Image
Cloud images are images that automatically boots into recovery mode and can be used to deploy whatever image you want to the VM.
Custom user-data from the Cloud provider is automatically retrieved, additionally the CRD allows to embed a custom cloudConfig so that we can use to make configuration permanent also for VM images running outside a cloud provider.
A Cloud Image boots in QEMU and also in AWS, consider:
```yaml
apiVersion: build.kairos.io/v1alpha1
kind: OSArtifact
metadata:
name: hello-kairos
spec:
imageName: "quay.io/kairos/core-opensuse-leap:latest"
cloudImage: true
cloudConfig: |
#cloud-config
users:
- name: "kairos"
passwd: "kairos"
name: "Default deployment"
stages:
boot:
- name: "Repart image"
layout:
device:
label: COS_RECOVERY
add_partitions:
- fsLabel: COS_STATE
size: 16240 # At least 16gb
pLabel: state
- name: "Repart image"
layout:
device:
label: COS_RECOVERY
add_partitions:
- fsLabel: COS_PERSISTENT
pLabel: persistent
size: 0 # all space
- if: '[ -f "/run/cos/recovery_mode" ] && [ ! -e /usr/local/.deployed ]'
name: "Deploy cos-system"
commands:
- |
# Use `elemental reset --system.uri docker:<img-ref>` to deploy a custom image
elemental reset && \
touch /usr/local/.deployed && \
reboot
```
Note: Since the image come with only the `recovery` system populated, we need to apply a cloud-config similar to this one which tells which container image we want to deploy.
The first steps when the machine boots into is to actually create the partitions needed to boot the active and the passive images, and its populated during the first boot.
After applying the spec, the controller will create a Kubernetes Job which runs the build process and
then copy the produced `hello-kairos.raw` file to the nginx server (see above). This file is an EFI bootable raw disk, bootable in QEMU and compatible with AWS which automatically provisions the node:
```bash
$ PORT=$(kubectl get svc osartifactbuilder-operator-osbuilder-nginx -o json | jq '.spec.ports[0].nodePort')
$ curl http://<node-ip>:$PORT/hello-kairos.raw -o output.raw
```
Note, in order to use the image with QEMU, we need to resize the disk at least to 32GB, this can be done with the CRD by setting `diskSize: 32000` or by truncating the file after downloading:
```bash
truncate -s "+$((32000*1024*1024))" hello-kairos.raw
```
This is not required if running the image in the Cloud as providers usually resize the disk during import or creation of new instances.
To run the image locally with QEMU we need `qemu` installed in the system, and we need to be able to run VMs with EFI, for example:
```bash
qemu-system-x86_64 -m 2048 -bios /usr/share/qemu/ovmf-x86_64.bin -drive if=virtio,media=disk,file=output.raw
```
### Use the Image in AWS
To consume the image, copy it into an s3 bucket:
```bash
aws s3 cp <cos-raw-image> s3://<your_s3_bucket>
```
Create a `container.json` file referring to it:
```json
{
"Description": "Kairos custom image",
"Format": "raw",
"UserBucket": {
"S3Bucket": "<your_s3_bucket>",
"S3Key": "<cos-raw-image>"
}
}
```
Import the image:
```bash
aws ec2 import-snapshot --description "Kairos custom image" --disk-container file://container.json
```
Follow the procedure described in [AWS docs](https://docs.aws.amazon.com/AWSEC2/latest/UserGuide/creating-an-ami-ebs.html#creating-launching-ami-from-snapshot) to register an AMI from snapshot. Use all default settings except for the firmware, set to force to UEFI boot.
## Build a Cloud Image for Azure
Similarly we can build images for Azure, consider:
```yaml
apiVersion: build.kairos.io/v1alpha1
kind: OSArtifact
metadata:
name: hello-kairos
spec:
imageName: "quay.io/kairos/core-opensuse-leap:latest"
azureImage: true
...
```
Will generate a compressed disk `hello-kairos-azure.vhd` ready to be used in GCE.
```bash
$ PORT=$(kubectl get svc osartifactbuilder-operator-osbuilder-nginx -o json | jq '.spec.ports[0].nodePort')
$ curl http://<node-ip>:$PORT/hello-kairos-azure.vhd -o output.vhd
```
### How to use the image in Azure
Upload the Azure Cloud VHD disk in `.vhda` format to your bucket:
```bash
az storage copy --source <cos-azure-image> --destination https://<account>.blob.core.windows.net/<container>/<destination-azure-image>
```
Import the disk:
```bash
az image create --resource-group <resource-group> --source https://<account>.blob.core.windows.net/<container>/<destination-azure-image> --os-type linux --hyper-v-generation v2 --name <image-name>
```
Note: There is currently no way of altering the boot disk of an Azure VM via GUI, use the `az` to launch the VM with an expanded OS disk if needed
## Build a Cloud Image for GCE
Similarly we can build images for GCE, consider:
```yaml
apiVersion: build.kairos.io/v1alpha1
kind: OSArtifact
metadata:
name: hello-kairos
spec:
imageName: "quay.io/kairos/core-opensuse-leap:latest"
gceImage: true
...
```
Will generate a compressed disk `hello-kairos.gce.raw.tar.gz` ready to be used in GCE.
```bash
$ PORT=$(kubectl get svc osartifactbuilder-operator-osbuilder-nginx -o json | jq '.spec.ports[0].nodePort')
$ curl http://<node-ip>:$PORT/hello-kairos.gce.raw.tar.gz -o output.gce.raw.tar.gz
```
### How to use the image in GCE
To upload the image in GCE (compressed):
```bash
gsutil cp <cos-gce-image> gs://<your_bucket>/
```
Import the disk:
```bash
gcloud compute images create <new_image_name> --source-uri=<your_bucket>/<cos-gce-image> --guest-os-features=UEFI_COMPATIBLE
```
See [here how to use a cloud-init with Google cloud](https://cloud.google.com/container-optimized-os/docs/how-to/create-configure-instance#using_cloud-init_with_the_cloud_config_format).

View File

@ -1,151 +0,0 @@
---
title: "Bundles"
linkTitle: "Bundles"
weight: 5
description: >
Bundles are a powerful feature of Kairos that allow you to customize and configure your operating system. This section explains how to use and build custom bundles.
---
Bundles are a powerful feature of Kairos that allow you to customize and configure your operating system, as well as your Kubernetes cluster. Whether you want to add custom logic, install additional packages, or make any other changes to your system, bundles make it easy to apply these changes after installation or before bootstrapping a node.
Bundles are container images containing only files (and not full OS) that can be used to install new software or extend the cloud-init syntax. You can find community-supported bundles in the [community-bundles](https://github.com/kairos-io/community-bundles) repository.
## Consuming Bundles
To use a bundle in your Kairos configuration, you will need to specify the type of bundle and the target image in your cloud-config file.
To apply a bundle before Kubernetes starts, you can include it in your config like this:
```yaml
#cloud-config
bundles:
- targets:
- run://<image>
```
Replace `<image>` with the URL or path to the bundle image. The prefix (e.g. `run://`) indicates the type of bundle being used.
To install a bundle after installation instead (for those bundles that explicitly supports that), use the following:
```yaml
#cloud-config
install:
bundles:
- targets:
- run://<image>
```
One of the benefits of using bundles is that they can also extend the cloud-config keywords available during installation. This means that by adding bundles to your configuration file, you can add new blocks of configuration options and customize your system even further.
A full config using a bundle from [community-bundles](https://github.com/kairos-io/community-bundles) that configures `metalLB` might look like this:
```yaml
#cloud-config
hostname: kairoslab-{{ trunc 4 .MachineID }}
users:
- name: kairos
ssh_authorized_keys:
# Add your github user here!
- github:mudler
k3s:
enable: true
args:
- --disable=servicelb
# Specify the bundle to use
bundles:
- targets:
- run://quay.io/kairos/community-bundles:metallb_latest
# Specify metallb settings
metallb:
version: 0.13.7
address_pool: 192.168.1.10-192.168.1.20
```
## Bundle types
Bundles can carry also binaries that can be overlayed in the rootfs, either while [building images](/docs/advanced/build) or with [Live layering](https://kairos.io/docs/advanced/livelayering/).
Kairos supports three types of bundles:
- **Container**: This type is a bare container that simply contains files that need to be copied to the system. It is useful for copying over configuration files, scripts, or any other static content that you want to include on your system (prefixed with `container:` or `docker:`).
- **Run**: This type is also a bare container, but it comes with a script that can be run during the installation phase to add custom logic. This is useful for performing any necessary setup or configuration tasks that need to be done before the cluster is fully deployed (prefixed with `run:`).
- **Package**: This type is a [luet](https://luet.io) package that will be installed in the system. It requires you to specify a `luet` repository in order to work. Luet packages are a powerful way to manage dependencies and install software on your system (prefixed with `luet:`).
{{% alert title="Note" %}}
In the future, Kairos will also support a local type for use in airgap situations, where you can pre-add bundles to the image before deployment.
{{% /alert %}}
It's important to note that bundles do not have any special meaning in terms of immutability. They install files over paths that are mutable in the system, as they are simply overlaid during the boot process. This means that you can use bundles to make changes to your system at any time, even after it has been deployed.
## Create bundles
To build your own bundle, you will need to create a Dockerfile and any necessary files and scripts. A bundle is simply a container image that includes all the necessary assets to perform a specific task.
To create a bundle, you will need to define a base image and copy over any necessary files and scripts to the image. For example, you might use the following Dockerfile to create a bundle image that deploys everything inside `assets` in the Kubernetes cluster:
```Dockerfile
FROM alpine
COPY ./run.sh /
COPY ./assets /assets
```
And the associated `run.sh` that installs the assets depending on a cloud-config keyword can be:
```bash
#!/bin/bash
K3S_MANIFEST_DIR="/var/lib/rancher/k3s/server/manifests/"
mkdir -p $K3S_MANIFEST_DIR
# IF the user sets `example.enable` in the input cloud config, we install our assets
if [ "$(kairos-agent config get example.enable | tr -d '\n')" == "true" ]; then
cp -rf assets/* $K3S_MANIFEST_DIR
fi
```
This Dockerfile creates an image based on the Alpine base image, and copies over a script file and some assets to the image.
You can then add any additional instructions to the Dockerfile to install necessary packages, set environment variables, or perform any other tasks required by your bundle.
Once you have created your Dockerfile and any necessary script files, you can build your bundle image by running docker build and specifying the path to your Dockerfile.
For example:
```bash
docker build -t <image> .
```
This command will build an image with the name you specify ( replace `<image>` accordingly ) based on the instructions in your Dockerfile.
After building your bundle image, you will need to push it to a registry so that it can be accessed by Kairos. You can use a public registry like Docker Hub. To push your image to a registry, use the docker push command. For example:
```bash
docker push <image>
```
This will push the `<image>` to your specified registry.
And use it with Kairos:
```yaml
#cloud-config
bundles:
- targets:
# e.g. run://quay.io/...:tag
- run://<image>
example:
enable: true
```
See the [community-bundles repository](https://github.com/kairos-io/community-bundles) for further examples.

View File

@ -1,99 +0,0 @@
---
title: "Confidential computing setup"
linkTitle: "Confidential Computing"
weight: 9
description: >
---
{{% alert title="Note" %}}
This page describes features that are still experimental in Kairos. There are a lot of things that can be improved and might be more streamlined in the future.
{{% /alert %}}
Confidential computing is a type of secure computing that allows users to encrypt and decrypt data on a secure, isolated computing environment.
It works by encrypting the data before it is sent to the cloud or other computing resources. This allows users to keep their data private and secure, even if it is accessed by unauthorized parties.
This makes it useful for sensitive data such as financial information, health records, and other confidential data.
One important aspect of Confidential Computing is the ability to encrypt data even in-memory. This document describes how to setup Kairos to use [`enclave-cc`](https://github.com/confidential-containers/enclave-cc)
in order to run confidential workloads.
## Create a Kairos cluster
The [`coco community bundle`](https://github.com/kairos-io/community-bundles/tree/main/coco) is supported since Kairos version `v2.0.0-alpha3` ("coco" stands for "**Co**nfidential **Co**mputing").
A configuration file like the following should be used (see the `bundles` section):
```
#cloud-config
bundles:
- targets:
- run://quay.io/kairos/community-bundles:system-upgrade-controller_latest
- run://quay.io/kairos/community-bundles:cert-manager_latest
- run://quay.io/kairos/community-bundles:kairos_latest
- run://quay.io/kairos/community-bundles:coco_latest
install:
auto: true
device: auto
reboot: true
k3s:
enabled: true
users:
- name: kairos
passwd: kairos
```
The bundle is making some changes on the host's filesystem (installs a customized containerd binary among other things) and a restart of the node is needed in order for the changes to be applied fully.
When this file appears, reboot the node: `/etc/containerd/.sentinel`.
## Additional steps
- [Label our node](https://github.com/confidential-containers/documentation/blob/main/quickstart.md#prerequisites):
```
kubectl label --overwrite node $(kubectl get nodes -o jsonpath='{.items[].metadata.name}') node-role.kubernetes.io/worker=""
```
- [Deploy the operator](https://github.com/confidential-containers/documentation/blob/main/quickstart.md#deploy-the-the-operator)
```
kubectl apply -k github.com/confidential-containers/operator/config/release?ref=v0.4.0
```
- [Deploy the `ccruntime` resource]
```
kubectl apply -k github.com/confidential-containers/operator/config/samples/ccruntime/ssh-demo?ref=v0.4.0
```
(wait until they are all running: `kubectl get pods -n confidential-containers-system --watch`)
- [Deploy a workload](https://github.com/confidential-containers/documentation/blob/main/quickstart.md#test-creating-a-workload-from-the-sample-encrypted-image)
The last part with the verification will only work from within a Pod because the IP address is internal:
`ssh -i ccv0-ssh root@$(kubectl get service ccv0-ssh -o jsonpath="{.spec.clusterIP}")`
You can create a Pod like this:
```
apiVersion: v1
kind: Pod
metadata:
name: kubectl
spec:
containers:
- name: kubectl
image: opensuse/leap
command: ["/bin/sh", "-ec", "trap : TERM INT; sleep infinity & wait"]
```
Get a shell to it and run the verification commands (You will need to install `ssh` in the Pod first).
## Known limitations
The above solution has some known limitations that might be addressed in future releases of Kairos. Namely:
- After a Kairos upgrade, the above process has to be repeated in order to install the customized `containerd` and the relevant configuration.
- There is no simple way to upgrade to newer versions of the bundle ([this is a general bundles limitation](https://github.com/kairos-io/kairos/issues/974)).

View File

@ -1,159 +0,0 @@
---
title: "Customizing the system image"
linkTitle: "Customization"
weight: 2
description: >
---
Kairos is an open source, container-based operating system. To modify Kairos and add a package, you'll need to build a container image from the [Kairos images](/docs/reference/image_matrix/). Here's an example with Docker which adds `figlet`:
```docker
# Use images from docs/reference/image_matrix/
FROM quay.io/kairos/kairos:opensuse-latest
RUN zypper in -y figlet
RUN export VERSION="my-version"
RUN envsubst '${VERSION}' </etc/os-release
```
After creating your Dockerfile, you can build your own image by running the following command:
```bash
$ docker build -t docker.io/<yourorg>/myos:0.1 .
Sending build context to Docker daemon 2.048kB
Step 1/3 : FROM quay.io/kairos/kairos-opensuse:latest
---> 897dc0cddf91
Step 2/3 : RUN zypper install -y figlet
---> Using cache
---> d57ff48546e7
Step 3/3 : RUN MY_VERSION="my-version" >> /etc/os-release
---> Running in b7bcb24969f5
Removing intermediate container b7bcb24969f5
---> ca21930a4585
Successfully built ca21930a4585
Successfully tagged <your-org>/myos:0.1
```
Once you have built your image, you can publish it to Docker Hub or another registry with the following command:
```bash
$ docker push <your-org>/myos:0.1
The push refers to repository [docker.io/<your-org>/myos]
c58930881bc4: Pushed
7111ee985500: Pushed
...
```
You can use your custom image when [upgrading nodes manually](/docs/upgrade/manual), [with Kubernetes](/docs/upgrade/kubernetes) or [specifying it in the cloud-config during installation](/docs/examples/core). Here's how to do it manually with the `kairos-agent` command:
```
node:/home/kairos # kairos-agent upgrade --image docker.io/<your-org>/myos:0.1
INFO[2022-12-01T13:49:41Z] Starting elemental version v0.0.1
INFO[2022-12-01T13:49:42Z] Upgrade called
INFO[2022-12-01T13:49:42Z] Applying 'before-upgrade' hook
INFO[2022-12-01T13:49:42Z] Running before-upgrade hook
INFO[2022-12-01T13:49:42Z] deploying image docker.io/oz123/myos:0.1 to /run/initramfs/cos-state/cOS/transition.img
INFO[2022-12-01T13:49:42Z] Creating file system image /run/initramfs/cos-state/cOS/transition.img
INFO[2022-12-01T13:49:42Z] Copying docker.io/oz123/myos:0.1 source...
INFO[0000] Unpacking a container image: docker.io/oz123/myos:0.1
INFO[0000] Pulling an image from remote repository
...
INFO[2022-12-01T13:52:33Z] Finished moving /run/initramfs/cos-state/cOS/transition.img to /run/initramfs/cos-state/cOS/active.img
INFO[2022-12-01T13:52:33Z] Upgrade completed
INFO[2022-12-01T13:52:33Z] Upgrade completed
node:/home/kairos # which figlet
which: no figlet in (/sbin:/usr/sbin:/usr/local/sbin:/root/bin:/usr/local/bin:/usr/bin:/bin)
node:/home/kairos # reboot
```
Now, reboot your OS and ssh again to it to use figlet:
```
$ ssh -l kairos node:
Welcome to Kairos!
Refer to https://kairos.io for documentation.
kairos@node2:~> figlet kairos rocks!
_ _ _ _
| | ____ _(_)_ __ ___ ___ _ __ ___ ___| | _____| |
| |/ / _` | | '__/ _ \/ __| | '__/ _ \ / __| |/ / __| |
| < (_| | | | | (_) \__ \ | | | (_) | (__| <\__ \_|
|_|\_\__,_|_|_| \___/|___/ |_| \___/ \___|_|\_\___(_)
```
## Customizing the Kernel
Kairos allows you to customize the kernel and initrd as part of your container-based operating system. If you are using a glibc-based distribution, such as OpenSUSE or Ubuntu, you can use the distribution's package manager to replace the kernel with the one you want, and then rebuild the initramfs with `dracut`.
Here's an example of how to do this:
```bash
# Replace the existing kernel with a new one, depending on the base image it can differ
apt-get install -y ...
# Create the kernel symlink
kernel=$(ls /boot/vmlinuz-* | head -n1)
ln -sf "${kernel#/boot/}" /boot/vmlinuz
# Regenerate the initrd, in openSUSE we could just use "mkinitrd"
kernel=$(ls /lib/modules | head -n1)
dracut -v -f "/boot/initrd-${kernel}" "${kernel}"
ln -sf "initrd-${kernel}" /boot/initrd
# Update the module dependencies
kernel=$(ls /lib/modules | head -n1)
depmod -a "${kernel}"
```
{{% alert title="Note" %}}
If you are using an Alpine-based distribution, modifying the kernel is only possible by rebuilding the kernel and initrd outside of the Dockerfile and then embedding it into the image. This is because dracut and systemd are not supported in musl-based distributions. We are currently exploring ways to provide initramfs that can be generated from musl systems as well.
{{% /alert %}}
After you have modified the kernel and initrd, you can use the kairos-agent upgrade command to update your nodes, or [within Kubernetes](/docs/upgrade/kubernetes).
## Customizing the file system hierarchy using custom mounts.
### Bind mounts
For clusters that needs to mount network block storage you might want to add
custom mount point that bind mounted to your system. For example, when using
Ceph file system, the OS mounts drives to `/var/lib/ceph` (for example).
To achieve this you need to add the key `bind_mounts` to the `install` section
you pass the install, and specify a list of one or more bind mounts path.
```
install:
auto: true
device: "auto"
# changes persist reboot - mount as BIND
bind_mounts:
- /var/lib/ceph
...
```
### Ephemeral mounts
One can also specifying custom mounts which are ephemeral. These are writable,
however changes are discarded at boot (like `/etc/` already does).
```
install:
auto: true
device: "auto"
# changes persist reboot - mount as BIND
bind_mounts:
- /var/lib/ceph
ephemeral_mounts:
- /opt/scratch/
...
```
Note, that these paths should exist in the container file-system used to create the ISO.
See [ISO customization](/docs/Advanced/customizing/) above.

View File

@ -1,96 +0,0 @@
---
title: "Live layering"
linkTitle: "Live layering"
weight: 4
description: >
---
Live layering allows to activates/deactivates system extension images. System extension images may dynamically at runtime — extend the `/usr/` and `/opt/` directory hierarchies with additional files.
Kairos supports live layering with `systemd-sysext`. Currently it is supported only on the `openSUSE`, `Fedora` and `Ubuntu` flavors with `systemd-sysext`.
## Description
For general reference on how `systemd-sysext` works, please read the [official](https://www.freedesktop.org/software/systemd/man/systemd-sysext.html) documentation.
Systemd system extensions can be located in the directories `/etc/extensions/`, `/run/extensions/`, `/var/lib/extensions/`, `/usr/lib/extensions/` and `/usr/local/lib/extensions/`.
In order to install extensions in runtime, they need to be placed into `/usr/local/lib/extensions` which is mounted over the `COS_PERSISTENT` partition. The other paths are reserved for the system image, which could ship extension directly from the container image used for upgrade or deployment.
## Installing extensions
In order to install extensions, you can just place them into `/usr/local/lib/extensions`.
For example, on a running Kairos node to install an extension from a container image:
```bash
luet util unpack <image> /usr/local/lib/extensions/<extension_name>
```
To load an extension during installation of a Kairos node, it can be supplied as a bundle in the `install` block in the node configuration:
```yaml
#cloud-config
# Set username and password
stages:
initramfs:
- name: "Set user and password"
users:
kairos:
passwd: "kairos"
hostname: kairos-{{ trunc 4 .Random }}
# Install configuration block
install:
auto: true
reboot: true
device: auto
# Bundles to install
bundles:
- rootfs_path: /usr/local/lib/extensions/<name>
targets:
- container://<image>
```
## Building extensions
Systemd extensions can be images, directory or files, quoting the systemd-sysext documentation:
- Plain directories or btrfs subvolumes containing the OS tree
- Disk images with a GPT disk label, following the Discoverable Partitions Specification
- Disk images lacking a partition table, with a naked Linux file system (e.g. squashfs or ext4)
All of those can be shipped as a container image and loaded as a bundle.
For example, a bundle can be defined as a naked container image containing only the files that we want to overlay in the system.
Consider the following Dockerfile to create an extension which adds `/usr/bin/ipfs` to the system:
{{% alert title="Note" %}}
Note that systemd extensions require an extension-release file, which can be used to validate different aspects of the system being run.
If you don't want to limit to a single OS, you can use the special key `_any` but keep in mind that this is only available in systemd versions 252+.
On the other hand if you do want to have a validation, or if you're running an older version of systemd, you will need to set at least the `ID` and the `VERSION_ID` of the OS.
These need to match with the values in the `/etc/os-release` file.
Read more here about systemd-sysext [here](https://www.freedesktop.org/software/systemd/man/systemd-sysext.html)
{{% /alert %}}
```docker
FROM alpine as build
# Install a binary
RUN wget https://github.com/ipfs/kubo/releases/download/v0.15.0/kubo_v0.15.0_linux-amd64.tar.gz -O kubo.tar.gz
RUN tar xvf kubo.tar.gz
RUN mv kubo/ipfs /usr/bin/ipfs
RUN mkdir -p /usr/lib/extension-release.d/
RUN echo ID=_any > /usr/lib/extension-release.d/extension-release.kubo
FROM scratch
COPY --from=build /usr/bin/ipfs /usr/bin/ipfs
COPY --from=build /usr/lib/extension-release.d /usr/lib/extension-release.d
```

View File

@ -1,164 +0,0 @@
---
title: "Networking"
linkTitle: "Networking"
weight: 3
description: >
---
By default, Kairos ISOs are configured to automatically get an IP from the network interface. However, depending on the base system you have chosen, there are different way to configure networking. This section collects information on setting network configuration depending on the base that is being chosen (openSUSE, Alpine, Ubuntu).
There are different network managers depending on the distro:
- `connman` is available on Alpine-based distribution. By default is enabled on Kairos Alpine flavored variants.
- systemd-based flavors are all using `systemd-networkd`
## Static IP
To get a static IP, you can additionally define the following in your configuration file, depending on the network-manager being used:
{{< tabpane text=true right=true >}}
{{% tab header="connman" %}}
```yaml
stages:
initramfs:
- files:
- path: /var/lib/connman/default.config
permission: 0644
content: |
[service_eth0]
Type = ethernet
IPv4 = 10.1.1.1/16/10.1.0.1
Nameservers = 10.1.0.1
```
{{% /tab %}}
{{% tab header="systemd-networkd" %}}
```yaml
stages:
initramfs:
- files:
- path: /etc/systemd/network/01-man.network
permissions: 0644
content: |
[Match]
Name=ens18
[Network]
Address=10.1.1.1/16
Gateway=10.1.0.1
DNS=10.1.0.1
```
{{% /tab %}}
{{< /tabpane >}}
## Bonding
Bonding setup with Ubuntu can be configured via systemd-networkd (Ubuntu based images) and wicked (openSUSE based images), consider the following examples:
{{< tabpane text=true right=true >}}
{{% tab header="systemd-networkd" %}}
```yaml
#cloud-config
name: "My Deployment"
stages:
boot:
- name: "Setup network"
commands:
- systemctl restart systemd-networkd
initramfs:
# Drop network config file
- name: "Setup hostname"
hostname: "hostname"
- name: "Setup network files"
files:
- path: /etc/systemd/network/10-bond0.network
content: |
[Match]
Name=bond0
[Network]
DHCP=yes
permissions: 0644
owner: 0
group: 0
- path: /etc/systemd/network/10-bond0.netdev
content: |
[NetDev]
Name=bond0
Kind=bond
[Bond]
Mode=802.3ad
permissions: 0644
owner: 0
group: 0
- path: /etc/systemd/network/15-enp.network
content: |
[Match]
Name=enp*
[Network]
Bond=bond0
permissions: 0644
owner: 0
group: 0
- path: /etc/systemd/network/05-bond0.link
content: |
[Match]
Driver=bonding
Name=bond0
[Link]
MACAddress=11:22:33:44:55:66
permissions: 0644
owner: 0
group: 0
network:
- name: "Setup user ssh-keys"
authorized_keys:
kairos:
- "ssh-rsa AAA..."
- "ssh-rsa AAA..."
# k3s settings
k3s-agent:
enabled: true
env:
K3S_TOKEN: "KubeSecret"
K3S_URL: https://hostname:6443
```
{{% /tab %}}
{{% tab header="connman" %}}
```yaml
stages:
boot:
- name: "Setup network"
commands:
- modprobe bonding mode=4 miimon=100
- ifenslave bond0 eno1
- ifenslave bond0 eno2
- ifenslave bond0 eno3
- ifenslave bond0 eno4
- ifconfig bond0 up hw ether 11:22:33:44:55:66
- ifup bond0
- sleep 5
- rc-service connman restart
initramfs:
- name: "Setup network files"
files:
- path: /var/lib/connman/default.config
content: |
[service_eth]
Type = ethernet
IPv4 = off
IPv6 = off
[service_bond0]
Type = ethernet
DeviceName = bond0
IPv4 = dhcp
MAC = 11:22:33:44:55:66
permissions: 0644
owner: 0
group: 0
```
{{% /tab %}}
{{< /tabpane >}}
### References
- https://kerlilow.me/blog/setting-up-systemd-networkd-with-bonding/

View File

@ -1,301 +0,0 @@
---
title: "Encrypting User Data with Kairos"
linkTitle: "Encrypting User Data with Kairos"
weight: 5
description: >
This section describes how to encrypt partition with LUKS in Kairos.
---
{{% alert title="Note" color="warning" %}}
This feature will be available in Kairos version `1.5.0` and in all future releases.
{{% /alert %}}
Kairos offers the ability to encrypt user data partitions with `LUKS`. User-data partitions are dedicated to persist data for a running system, stored separately from the OS images. This encryption mechanism can also be used to encrypt additional partitions created during the installation process.
Kairos supports the following encryption scenarios:
1. **Offline mode** - Encryption key for partitions is stored on the machine inside the TPM chip.
1. **Online mode (Automated)** - Keypair used to encrypt the partition passphrase is stored on the TPM chip, and an external server is used to store the encrypted passphrases.
1. **Online mode (Manually configured)** - Plaintext passphrase is stored in the KMS server and returned to the node after TPM challenging.
![encryption1_1674470732563_0](https://user-images.githubusercontent.com/2420543/214405291-97a30f2d-d70a-45ba-b842-5282c722c79e.png)
Kairos uses the TPM chip to encrypt partition passphrases, and for offline encryption, it stores the passphrase in the non-volatile registries of the chip.
To enable encryption, you will need to specify the labels of the partitions you want to encrypt, a minimum configuration for offline encryption can be seen below:
```yaml
#cloud-config
install:
# Label of partitions to encrypt
# COS_PERSISTENT is the OS partition
# dedicated to user-persistent data.
encrypted_partitions:
- COS_PERSISTENT
```
Please note that for online mode, you will also need to specify the key management server address that will be used to store the keys, a complete configuration reference is the following:
```yaml
#cloud-config
# Install block
install:
# Label of partitions to encrypt
# COS_PERSISTENT is the OS partition
# dedicated to user-persistent data.
encrypted_partitions:
- COS_PERSISTENT
# Kcrypt configuration block
kcrypt:
challenger:
# External KMS Server address. This must be reachable by the node
challenger_server: "http://192.168.68.109:30000"
# (optional) Custom Non-Volatile index to use to store encoded blobs
nv_index: ""
# (optional) Custom Index for the RSA Key pair
c_index: ""
# (optional) Custom TPM device
tpm_device: ""
```
| Option | Description |
| --- | --- |
| `install.encrypted_partitions` | Label of partitions to encrypt |
| `kcrypt.challenger.challenger_server` | External KMS Server address |
| `kcrypt.challenger.nv_index` | Custom Non-Volatile index to use to store encoded blobs |
| `kcrypt.challenger.c_index` | Custom Index for the RSA Key pair |
| `kcrypt.challenger.tpm_device` | Custom TPM device |
## Requirements
The host machine must have a TPM chip version 2.0 or higher to use encryption with Kairos. A list of TPM chips/HW can be found [in the list of certified products](https://trustedcomputinggroup.org/membership/certification/tpm-certified-products/), however, any modern machine has a TPM 2.0 chip.
## Components
The Kairos encryption design involves three components to manage partitions encryption and decryption lifecycle:
- [kcrypt](https://github.com/kairos-io/kcrypt) runs on the machine and attempts to unlock partitions by using plugins to delegate encryption/decryption business logic.
- [kcrypt-discovery-challenger](https://github.com/kairos-io/kcrypt-challenger) runs on the machine, it is called by `kcrypt` and uses the TPM chip to retrieve the passphrase as described below.
- [kcrypt-challenger](https://github.com/kairos-io/kcrypt-challenger) is the KMS (Key Management Server) component, deployed in Kubernetes, which manages secrets and partitions of the nodes.
## Offline mode
This scenario covers encryption of data at rest without any third party or KMS server. The keys used to encrypt the partitions are stored in the TPM chip.
### Scenario: Offline encryption
A high level overview of the interaction between the components can be observed here:
![offline](https://user-images.githubusercontent.com/2420543/214795800-f7d54309-2a3c-4d29-b6da-c74644424244.png)
A complete cloud config example for this scenario can be:
```yaml
#cloud-config
install:
encrypted_partitions:
- COS_PERSISTENT
hostname: metal-{{ trunc 4 .MachineID }}
users:
- name: kairos
# Change to your pass here
passwd: kairos
ssh_authorized_keys:
# Replace with your github user and un-comment the line below:
# - github:mudler
```
Note, we define a list of partition labels that we want to encrypt. In the example above we set `COS_PERSISTENT` to be encrypted, which in turns will encrypt all the user-data of the machine (this includes, for instance, Kubernetes pulled images, or any runtime persisting data on the machine).
## Online mode
Online mode involves an external service (the Key Management Server, **KMS**) to boot the machines. The KMS role is to enable machine to boot by providing the encrypted secrets, or passphrases to unlock the encrypted drive. Authentication with the KMS is done via TPM challenging.
In this scenario, we need to first deploy the KMS server to an existing Kubernetes cluster, and associate the TPM hash of the nodes that we want to manage. During deployment, we specify the KMS server inside the cloud-config of the nodes to be provisioned.
### Requirements
- A Kubernetes cluster
- Kcrypt-challenger reachable by the nodes attempting to boot
### Install the KMS (`kcrypt-challenger`)
To install the KMS (`kcrypt-challenger`), you will first need to make sure that certificate manager is installed. You can do this by running the following command:
```
kubectl apply -f https://github.com/jetstack/cert-manager/releases/latest/download/cert-manager.yaml
kubectl wait --for=condition=Available deployment --timeout=2m -n cert-manager --all
```
To install `kcrypt-challenger` on a Kubernetes cluster with `helm`, you can use the commands below:
```
# Install the helm repository
helm repo add kairos https://kairos-io.github.io/helm-charts
helm repo update
# Install the Kairos CRDs
helm install kairos-crd kairos/kairos-crds
# Deploy the KMS challenger
helm install kairos-challenger kairos/kairos-challenger --set service.challenger.type="NodePort"
# we can also set up a specific port and a version:
# helm install kairos-challenger kairos/kairos-challenger --set image.tag="v0.2.2" --set service.challenger.type="NodePort" --set service.challenger.nodePort=30000
```
A service must be used to expose the challenger. If using the node port, we can retrieve the address with:
```bash
export EXTERNAL_IP=$(kubectl get nodes -o jsonpath='{.items[].status.addresses[?(@.type == "ExternalIP")].address}')
export PORT=$(kubectl get svc kairos-challenger-escrow-service -o json | jq '.spec.ports[0].nodePort')
```
### Register a node
In order to register a node on the KMS, the TPM hash of the node needs to be retrieved first.
You can get a node TPM hash by running `/system/discovery/kcrypt-discovery-challenger` as root from the LiveCD:
```
kairos@localhost:~> ID=$(sudo /system/discovery/kcrypt-discovery-challenger)
kairos@localhost:~> echo $ID
7441c78f1976fb23e6a5c68f0be35be8375b135dcb36fb03cecc60f39c7660bd
```
This is the hash you should use in the definition of the `SealedVolume` in the
examples below.
### Scenario: Automatically generated keys
![encryption3_1674472162848_0](https://user-images.githubusercontent.com/2420543/214405310-78f7deec-b43e-4581-a99b-a358492cc7ac.png)
The TPM chip generates unique RSA keys for each machine during installation, which are used to encrypt a generated secret. These keys can only be accessed by the TPM and not by the KMS, thus ensuring that both the KMS and the TPM chip are required to boot the machine. As a result, even if the machine or its disks are stolen, the drive remains sealed and encrypted.
Deployment using this method, will store the encrypted key used to boot into the KMS, and the keypair used to encrypt it in the TPM chip of the machine during installation. This means that, only the TPM chip can decode the passphrase, and the passphrase is stored in the KMS such as it can't be decrypted by it. As such, nodes can boot only with the KMS, and the disk can be decrypted only by the node.
To register a node to kubernetes, use the TPM hash retrieved before (see section ["Register a node"](#register-a-node))
and replace it in this example command:
```bash
cat <<EOF | kubectl apply -f -
apiVersion: keyserver.kairos.io/v1alpha1
kind: SealedVolume
metadata:
name: test2
namespace: default
spec:
TPMHash: "7441c78f1976fb23e6a5c68f0be35be8375b135dcb36fb03cecc60f39c7660bd"
partitions:
- label: COS_PERSISTENT
quarantined: false
EOF
```
This command will register the node on the KMS.
A node can use the following during deployment, specifying the address of the challenger server:
``` yaml
#cloud-config
install:
encrypted_partitions:
- COS_PERSISTENT
grub_options:
extra_cmdline: "rd.neednet=1"
kcrypt:
challenger:
challenger_server: "http://192.168.68.109:30000"
nv_index: ""
c_index: ""
tpm_device: ""
hostname: metal-{{ trunc 4 .MachineID }}
users:
- name: kairos
# Change to your pass here
passwd: kairos
ssh_authorized_keys:
# Replace with your github user and un-comment the line below:
# - github:mudler
```
### Scenario: Static keys
![encryption4_1674472306435_0](https://user-images.githubusercontent.com/2420543/214405316-63882311-ca27-4b6e-9465-70d702ab6dc1.png)
In this scenario the Kubernetes administrator knows the passphrase of the nodes, and sets explicitly during configuration the passphrase for each partitions of the nodes. This scenario is suitable for cases when the passphrase needs to be carried over, and not to be tied specifically to the TPM chip.
The TPM chip is still used for authentication a machine. The discovery-challenger needs still to know the TPM hash of each of the nodes before installation.
To register a node to kubernetes, replace the `TPMHash` in the following example with the TPM hash retrieved before, and specify a passphrase with a secret reference for the partition:
```bash
cat <<EOF | kubectl apply -f -
apiVersion: v1
kind: Secret
metadata:
name: mysecret
namespace: default
type: Opaque
stringData:
pass: "awesome-plaintext-passphrase"
---
apiVersion: keyserver.kairos.io/v1alpha1
kind: SealedVolume
metadata:
name: test2
namespace: default
spec:
TPMHash: "7441c78f1976fb23e6a5c68f0be35be8375b135dcb36fb03cecc60f39c7660bd"
partitions:
- label: COS_PERSISTENT
secret:
name: mysecret
path: pass
quarantined: false
EOF
```
The node doesn't need any specific configuration beside the kcrypt challenger, so for instance:
```yaml
#cloud-config
install:
encrypted_partitions:
- COS_PERSISTENT
grub_options:
extra_cmdline: "rd.neednet=1"
kcrypt:
challenger:
challenger_server: "http://192.168.68.109:30000"
nv_index: ""
c_index: ""
tpm_device: ""
hostname: metal-{{ trunc 4 .MachineID }}
users:
- name: kairos
# Change to your pass here
passwd: kairos
ssh_authorized_keys:
# Replace with your github user and un-comment the line below:
# - github:mudler
```
## Troubleshooting
- Invoking `/system/discovery/kcrypt-discovery-challenger` without arguments returns the TPM pubhash.
- Invoking `kcrypt-discovery-challenger` with 'discovery.password' triggers the logic to retrieve the passphrase, for instance can be used as such:
```bash
echo '{ "data": "{ \"label\": \"LABEL\" }"}' | sudo -E WSS_SERVER="http://localhost:30000" /system/discovery/kcrypt-discovery-challenger "discovery.password"
```
## Notes
If encryption is enabled and `COS_PERSISTENT` is set to be encrypted, every cloud config file in `/usr/local/cloud-config` will be protected and can be used to store sensitive data. However, it's important to keep in mind that although the contents of /usr/local are retained between reboots and upgrades, they will not be preserved during a [resets](/docs/reference/reset).

View File

@ -1,7 +0,0 @@
---
title: "Architecture"
linkTitle: "Architecture"
weight: 4
description: >
---

View File

@ -1,73 +0,0 @@
---
title: "Cloud init based"
linkTitle: "Cloud init based"
weight: 3
date: 2022-11-13
description: >
---
Kairos supports the [standard cloud-init syntax](https://github.com/mudler/yip#compatibility-with-cloud-init-format) and [its own extended syntax](https://github.com/mudler/yip) to allow to configure a system declaratively with a cloud-config centric approach.
If you are not familiar with the concepts of cloud-init, [official cloud-init](https://cloud-init.io/) is a recommended read.
## Configuration persistency
Kairos is an Immutable OS and the only configuration that is persistent across reboots is the cloud-init configuration.
Multiple cloud-init files can be present in the system and Kairos will read them and process them in sequence (lexicographic order) allowing to extend the configuration with additional pieces also after deployment, or to manage logical configuration pieces separately.
In Kairos the `/oem` directory keeps track of all the configuration of the system and stores the configuration files. Multiple files are allowed and they are all executed during the various system stages. `/usr/local/cloud-config` can be optionally used as well to store cloud config files in the persistent partition instead. `/system/oem` is instead reserved to default cloud-init files that are shipped by the base OS image.
By using the standard cloud-config syntax, a subset of the functionalities are available and the settings will be executed in the boot stage.
## Boot stages
During boot the stages are emitted in an event-based pattern until a system completes its boot process
![Kairos-boot-events](https://user-images.githubusercontent.com/2420543/195111193-3167eab8-8058-4676-a1a0-f64aea745646.png)
The events can be used in the cloud-config extended syntax to hook into the various stages, which can allow to hook inside the different stages of a node lifecycle.
For instance, to execute something before reset is sufficient to add the following to the config file used to bootstrap a node:
```yaml
name: "Run something before reset"
stages:
before-reset:
- name: "Setting"
commands:
- |
echo "Run a command before reset the node!"
```
Below there is a detailed list of the stages available that can be used in the cloud-init configuration files:
| **Stage** | **Description** |
|------------------------|------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------|
| _rootfs_ | This is the earliest stage, running before switching root, just right after the root is mounted in /sysroot and before applying the immutable rootfs configuration. This stage is executed over initrd root, no chroot is applied. |
| _initramfs_ | This is still an early stage, running before switching root. Here you can apply radical changes to the booting setup of Elemental. Despite this is executed before switching root this exection runs chrooted into the target root after the immutable rootfs is set up and ready. |
| _boot_ | This stage is executed after initramfs has switched root, during the systemd bootup process. |
| _fs_ | This stage is executed when fs is mounted and is guaranteed to have access to the state and persistent partitions ( `COS_STATE` and `COS_PERSISTENT` respectively). |
| _network_ | This stage is executed when network is available |
| _reconcile_ | This stage is executed 5m after boot and periodically each 60m. |
| _after-install_ | This stage is executed after installation of the OS has ended |
| _after-install-chroot_ | This stage is executed after installation of the OS has ended. |
| _after-upgrade_ | This stage is executed after upgrade of the OS has ended. |
| _after-upgrade-chroot_ | This stage is executed after upgrade of the OS has ended (chroot call). |
| _after-reset_ | This stage is executed after reset of the OS has ended. |
| _after-reset-chroot_ | This stage is executed after reset of the OS has ended (chroot call). |
| _before-install_ | This stage is executed before installation |
| _before-upgrade_ | This stage is executed before upgrade |
| _before-reset_ | This stage is executed before reset |
Note: Steps executed at the `chroot` stage are running inside the new OS as chroot, allowing to write persisting changes to the image, for example by downloading and installing additional software.
### Sentinels
When a Kairos boots it creates sentinel files in order to allow to execute cloud-init steps programmaticaly.
- /run/cos/recovery_mode is being created when booting from the recovery partition
- /run/cos/live_mode is created when booting from the LiveCD
To execute a block using the sentinel files you can specify: `if: '[ -f "/run/cos/..." ]'`, for instance:

View File

@ -1,120 +0,0 @@
---
title: "Container based"
linkTitle: "Container"
weight: 2
date: 2022-11-13
description: >
---
Kairos is a container-based operating system (OS).
A container-based operating system is an OS that is shipped via containers. Indeed, if it happens to be based on Linux (most probably), you can run the container image as well on your Docker daemon. The image being booted is the container, which contains all the required pieces in order to boot (Kernel, Initrd, Init system). There is no real container runtime running the image. The container is used to construct an image internally that is then used to boot the system in an A/B fashion, so there is no overhead introduced. The system being booted is actually a snapshot of the container.
- **Single-image** The OS is a single container image which contains all the OS components, including Kernel and Initrd.
- **Tamper-proof upgrades** Upgrades are atomic, A/B swaps with fallback mechanisms and automatic boot assessment.
- **Distributed via container registries** Bootable images are standard OCI artifacts that can be hosted in any container regist
- **Platform Engineer-friendly** Adapt the infrastructure to your needs by plugging images into your already-existing workflow pipeline. Customizing an immutable OS becomes as easy as writing a Dockerfile.
## A/B Upgrades
![upgrade](https://user-images.githubusercontent.com/2420543/197806999-587632a1-0292-44df-bb8f-176ff702f62d.png)
Upgrades are atomic operations that can be triggered manually or via Kubernetes. The node will create a transition image that will be swapped for the Active system, and the Active system becomes Passive. This ensures tamper-proof upgrades and automated fallback and boot assessment strategies are in place to automatically boot from the fallback system. The recovery image can be furthermore exploited to completely automatize node recovery.
## Benefits
- Container registries are already widely supported and used by anyone.
- Reduce infrastructure drift, by pushing upgrades as single images, with atomic upgrades.
If you are operating a Kubernetes cluster and deploying applications on top, chances are that you already have a container registry deployed somewhere and configured to store them or manage your infrastructure stack. By using container images, you can reuse the same infrastructure to propagate upgrades to the nodes and handle customizations.
![kairos-factory](https://user-images.githubusercontent.com/2420543/197808767-e213709d-af21-4e32-9a78-818f34170077.png)
Container images can be extended after a build by using standard container building practices and seamlessly plug into your existing pipelines. Kairos allows to seamlessly upgrade to container images that are derived from other versions.
We believe that bringing rollbacks, or incremental patches upgrades increases the exposure to infrastructure drift. In opposition, immutable, single images are deployed to the nodes as they were apps - no more discrepancies in your nodes - no need of configuration management tools like Chef, Ansible, or alikes.
This means that to customize a Kairos version, all that is required is to build a standard container image with a plain Dockerfile—plus, the bits that are actually needed - we can't touch a system as we are typically used to.
If you are familiar with Dockerfiles, then you are good to go to roll your own custom OS version to provision in the nodes. That removes any friction to questions like, "How do I add this package to my nodes?", or more complex ones as, "How can I replace with my own Kernel?".
## Container Image based OS
The Image support matrix in [here](/docs/reference/image_matrix) lists all the container images built from our CI on every release of Kairos.
To inspect an image and run it locally, you can use a container engine like Docker or Podman:
```
$ docker pull {{< registryURL >}}/core-{{< flavor >}}:{{< kairosVersion >}}
```
We can run it locally with docker as a container to inspect it, as it is runnable:
```
$ docker run -ti --rm {{< registryURL >}}/core-{{< flavor >}}:{{< kairosVersion >}}
/ # cat /etc/os-release
...
KAIROS_NAME="kairos-core-{{< flavor >}}"
KAIROS_VERSION="{{< kairosVersion >}}"
KAIROS_ID="kairos"
KAIROS_ID_LIKE="kairos-core-{{< flavor >}}"
KAIROS_VERSION_ID="{{< kairosVersion >}}"
KAIROS_PRETTY_NAME="kairos-core-{{< flavor >}} {{< kairosVersion >}}"
KAIROS_BUG_REPORT_URL="https://github.com/kairos-io/kairos/issues"
KAIROS_HOME_URL="https://github.com/kairos-io/kairos"
KAIROS_IMAGE_REPO="{{< registryURL >}}/core-{{< flavor >}}"
KAIROS_IMAGE_LABEL="latest"
KAIROS_GITHUB_REPO="kairos-io/kairos"
KAIROS_VARIANT="core"
KAIROS_FLAVOR="{{< flavor >}}"
```
And check out things like what's the kernel inside:
```bash
/ $ ls -liah /boot/
total 102M
6692018 drwxr-xr-x 2 root root 4.0K Apr 16 2020 .
6817515 drwxr-xr-x 1 root root 4.0K Oct 10 16:11 ..
6692019 -rw-r--r-- 1 root root 65 Apr 16 2020 .vmlinuz-5.14.21-150400.24.21-default.hmac
6692020 -rw-r--r-- 1 root root 4.9M Apr 16 2020 System.map-5.14.21-150400.24.21-default
6692021 -rw-r--r-- 1 root root 1.7K Apr 16 2020 boot.readme
6692022 -rw-r--r-- 1 root root 245K Apr 16 2020 config-5.14.21-150400.24.21-default
6692023 lrwxrwxrwx 1 root root 35 Apr 16 2020 initrd -> initrd-5.14.21-150400.24.21-default
6692024 -rw------- 1 root root 69M Apr 16 2020 initrd-5.14.21-150400.24.21-default
6692025 -rw-r--r-- 1 root root 443K Apr 16 2020 symvers-5.14.21-150400.24.21-default.gz
6692026 -rw-r--r-- 1 root root 484 Apr 16 2020 sysctl.conf-5.14.21-150400.24.21-default
6692027 -rw-r--r-- 1 root root 17M Apr 16 2020 vmlinux-5.14.21-150400.24.21-default.gz
6692028 lrwxrwxrwx 1 root root 36 Apr 16 2020 vmlinuz -> vmlinuz-5.14.21-150400.24.21-default
6692029 -rw-r--r-- 1 root root 11M Apr 16 2020 vmlinuz-5.14.21-150400.24.21-default
```
The CI process generates bootable medium by the container images, and similarly, we can modify this image to introduce our changes and remaster an ISO as described in [Automated installation](/docs/installation/automated), but that can be resumed in the following steps:
```bash
$ docker run -ti --name custom-container {{< registryURL >}}/core-{{< flavor >}}:{{< kairosVersion >}}
# # Do your changes inside the container..
# echo "foo" > /foo
# ...
# exit
$ docker commit custom-container custom-image
> sha256:37176f104a870480f9c3c318ab51f6c456571b6612b6a47b96af71b95a0a27c7
# Builds an ISO from it
$ docker run -v $PWD:/cOS -v /var/run/docker.sock:/var/run/docker.sock -i --rm quay.io/kairos/osbuilder-tools:v0.1.1 --name "custom-iso" --debug build-iso --date=false --local custom-image --output /cOS/
> ...
> ...
> xorriso : UPDATE : Writing: 147456s 84.0% fifo 100% buf 50% 60.5xD
> ISO image produced: 175441 sectors
> Written to medium : 175472 sectors at LBA 48
> Writing to '/cOS/custom-iso.iso' completed successfully.
$ ls
custom-iso.iso custom-iso.iso.sha256
```
In order to go further and upgrade nodes using this image, now the only requirement is to push it in a container registry and upgrade the nodes using that container image.
For upgrading to a container image see [manual upgrades](/docs/upgrade/manual) and [kubernetes upgrades](/docs/upgrade/kubernetes).
## See also
- [ISO remastering](/docs/installation/automated#iso-remastering)

View File

@ -1,61 +0,0 @@
---
title: "Immutable"
linkTitle: "Immutable"
weight: 1
date: 2022-11-13
description: >
---
Kairos adopts an immutable layout and derivatives created with its toolkit, inherit the same immutability attributes.
An immutable OS is a carefully engineered system which boots in a restricted, permissionless mode, where certain paths of the system are not writable. For instance, after installation it's not possible to add additional packages to the system, and any configuration change is discarded after reboot.
A running Linux-based OS system will have the following paths:
```
/usr/local - persistent ( partition label COS_PERSISTENT)
/oem - persistent ( partition label COS_OEM)
/etc - ephemeral
/usr - read only
/ immutable
```
`/usr/local` will contain all the persistent data which will be carried over in-between upgrades, unlike the changes made to `/etc` which will be discarded.
## Benefits of using an Immutable System
There are many reasons why you would like to use an immutable system, in this article we'll present two of them.
1. From a security standpoint, it's far more secure than traditional systems. This is because most attack vectors rely on writing on the system, or installing persistent tools after a vector has been exploited.
2. From a maintenance perspective, configuration management tools like Chef, Puppet, or the likes aren't needed because immutable systems only have one configuration entry point. Every other configuration is cleaned up automatically after a reboot.
The benefit of rolling out the same system over a set of machines are the following:
- **No snowflakes** - All the machines are based on the same image, configuration settings and behavior. This allows to have a predictable infrastructure, predictable upgrades, and homogeneous configurations across your cluster.
- **Configuration is driven via cloud-init** - There is only one source of truth for the configuration, and that happens at bootstrap time. Anything else is handled afterwards—natively via Kubernetes, so no configuration management software is required.
- **Reduced attack surface** - Immutable systems cannot be modified or tampered at runtime. This enhances the security of a running OS, as changes on the system are not allowed.
Tools like Chef, Puppet, and Ansible share the same underlying issues when it comes to configuration management. That is, nodes can have different version matrices of software and OS, which makes your set of nodes inhomogeneous and difficult to maintain and orchestrate from day 1 to day 2.
Kairos tackles the issue from different angle, as can turn _any_ distribution to an "immutable" system, distributed as a standard container image, which gets provisioned to the devices as declared. This allows to treat OSes with the same repeatable portability as containers for apps, removing snowflakes in your cluster. Container registries can be used either internally or externally to the cluster to propagate upgrades with customized versions of the OS (kernel, packages, and so on).
## Design
Kairos after installation will create the following partitions:
- A state partition that stores the container images, which are going to be booted (active and passive, stored in `.img` format which are loopback mounted)
- A recovery partition that stores the container images, used for recovery (in `.squashfs` format)
- An OEM partition (optional) that stores user configuration and cloud-config files
- A persistent partition to keep the data across reboots
![Kairos-installation-partitioning](https://user-images.githubusercontent.com/2420543/195111190-3bdfb917-312a-40f4-b0bc-4a65a701c06b.png)
The persistent partition is mounted during boot on `/usr/local`, and additional paths are mount-bind to it. Those configuration aspects are defined in a [cloud-config](https://github.com/kairos-io/kairos/blob/a1a9bef4dff30e0718fa4d2697f075ce37c7ed90/overlay/files/system/oem/11_persistency.yaml#L11) file. It is possible to override such configuration, via a custom cloud-config, during installation.
The Recovery system allows to perform emergency tasks, in case of failure from the active and passive images. Furthermore a fallback mechanism will take place, so that in case of failures the booting sequence will be as follows: “A -> B -> Recovery”.
The upgrade happens in a transition image and takes place only after all the necessary steps are completed. An upgrade of the A/B partitions can be done [with Kubernetes](/upgrade/kubernetes) or [manually](/upgrade/manual). The upgrade will create a new pristine image, that will be selected as active for the next reboot, the old one will be flagged as passive. If we are performing the same from the passive system, only the active is subject to changes.
### Kernel and Initrd
The Kernel and Initrd are loaded from the system images and are expected to be present in the container, that is pulled down and used for upgrades. Differently from standard approaches, Kairos focuses on having static Initrds, which are generated while building images used for upgrades - in opposite of generating Initramfs locally on the node. A typical setup has kernels and initrd in a special boot partition dedicated for boot files - in Kairos instead the Kernel and Initrd are being loaded from the images, which are chainloaded from the bootloader (GRUB). This is a design choice to keep the entire OS stack confined as a single layer which gets pulled and swapped atomically during upgrades.

View File

@ -1,42 +0,0 @@
---
title: "Meta-Distribution"
linkTitle: "Meta-Distribution"
weight: 4
date: 2022-11-13
description: >
---
We like to define Kairos as a meta-Linux Distribution, as its goal is to convert other distros to an immutable layout with Kubernetes Native components.
## Kairos
The Kairos stack is composed of the following:
- A core OS image release for each flavor in ISO, qcow2, and other similar formats (see [the list of supported distributions](/docs/reference/image_matrix)) provided for user convenience
- A release with K3s embedded.
- A set of Kubernetes Native API components (CRDs) to install into the control-plane node, to manage deployment, artifacts creation, and lifecycle (WIP).
- A set of Kubernetes Native API components (CRDs) to install into the target nodes to manage and control the node after deployment (WIP).
- An agent installed into the nodes to be compliant with Kubernetes Native API components mentioned above.
Every component is extensible and modular such as it can be customized and replaced in the stack and built off either locally or with Kubernetes.
### Internal components
Kairos encompasses several components, external and internal.
Internal:
- [kairos](https://github.com/kairos-io/kairos) is the main repository, building the `kairos-agent` and containing the image definitions which runs on our CI pipelines.
- [immucore](https://github.com/kairos-io/immucore) is the immutability management interface.
- [AuroraBoot](https://github.com/kairos-io/AuroraBoot) is the Kairos Node bootstrapper
- [elemental-cli](https://github.com/kairos-io/elemental-cli) manages the installation, reset, and upgrade of the Kairos node.
- [system packages](https://github.com/kairos-io/packages) contains additional packages, cross-distro, partly used in framework images
- [kcrypt](https://github.com/kairos-io/kcrypt) is the component responsible for encryption and decryption of data at rest
- [kcrypt-challenger](https://github.com/kairos-io/kcrypt-challenger) is the `kairos` plugin that works with the TPM chip to unlock LUKS partitions
- [osbuilder](https://github.com/kairos-io/osbuilder) is used to build bootable artifacts from container images
- [entangle](https://github.com/kairos-io/entangle) a CRD to interconnect Kubernetes clusters
- [entangle-proxy](https://github.com/kairos-io/entangle-proxy) a CRD to control interconnetted clusters
External:
- [K3s](https://k3s.io) as a Kubernetes distribution
- [edgevpn](https://mudler.github.io/edgevpn) (optional) as fabric for the distributed network, node coordination and bootstrap. Provides also embedded DNS capabilities for the cluster. Internally uses [libp2p](https://github.com/libp2p/go-libp2p) for the P2P mesh capabilities.
- [nohang](https://github.com/hakavlad/nohang) A sophisticated low memory handler for Linux.

View File

@ -1,167 +0,0 @@
---
title: "P2P Network"
linkTitle: "P2P Network"
weight: 5
date: 2023-02-15
description: >
How Kairos leverage Peer-to-peer (P2P) to self-coordinate clusters at the edge.
---
## Introduction
As more organizations seek to take advantage of the benefits of Kubernetes for their edge applications, the difficulties of managing large-scale clusters become apparent. Managing, configuring, and coordinating multiple clusters can be a complex and time-consuming process. We need solutions that offer zero-touch configuration and self-coordination.
To address these challenges, Kairos provides an easy and robust solution for deploying Kubernetes workloads at the edge. By utilizing peer-to-peer (p2p) technology, Kairos can automatically coordinate and create Kubernetes clusters without requiring a control management interface. This frees users up to concentrate on running and scaling their applications instead of spending time on cluster management.
In this document, we will examine the advantages of using Kairos to deploy Kubernetes clusters at the edge, and how p2p technology facilitates self-coordination for a zero-touch configuration experience. We will also explore how Kairos' highly adaptable and container-based approach, combined with an immutable OS and meta-distribution, makes it an excellent choice for edge deployments.
{{% alert title="Note" %}}
You can also watch our [Kairos and libp2p video]({{< ref "docs/media/#how-kairos-uses-libp2p" >}} "Media") in the [Media Section]({{< ref "docs/media" >}} "Media")
{{% /alert %}}
## Overview: P2P for self-coordination
<img align="right" width="200" src="https://user-images.githubusercontent.com/2420543/219048504-986da0e9-aca3-4c9e-b980-ba2a6dc03bf7.png">
Kairos creates self-coordinated, fully meshed clusters at the edge by using a combination of P2P technology, VPN, and Kubernetes.
This design is made up of several components:
- The Kairos base OS with support for different distribution flavors and k3s combinations (see our support matrix [here](/docs/reference/image_matrix)).
- A Virtual private network interface ([EdgeVPN](https://github.com/mudler/edgevpn) which leverages [libp2p](https://github.com/libp2p/go-libp2p)).
- K3s/CNI configured to work with the VPN interface.
- A shared ledger accessible to all nodes in the p2p private network.
By using libp2p as the transport layer, Kairos can abstract connections between the nodes and use it as a coordination mechanism. The shared ledger serves as a cache to store additional data, such as node tokens to join nodes to the cluster or the cluster topology, and is accessible to all nodes in the P2P private network. The VPN interface is automatically configured and self-coordinated, requiring zero-configuration and no user intervention.
Moreover, any application at the OS level can use P2P functionalities by using Virtual IPs within the VPN. The user only needs to provide a generated shared token containing OTP seeds for rendezvous points used during connection bootstrapping between the peers. It's worth noting that the VPN is optional, and the shared ledger can be used to coordinate and set up other forms of networking between the cluster nodes, such as KubeVIP. (See this example [here](/docs/examples/multi-node-p2p-ha-kubevip))
## Implementation
Peer-to-peer (P2P) networking is used to coordinate and bootstrap nodes. When this functionality is enabled, there is a distributed ledger accessible over the nodes that can be programmatically accessed and used to store metadata.
Kairos can automatically set up a VPN between nodes using a shared secret. This enables the nodes to automatically coordinate, discover, configure, and establish a network overlay spanning across multiple regions. [EdgeVPN](https://github.com/mudler/edgevpn) is used for this purpose.
The private network is bootstrapped in three phases, with discovery driven by a distributed hash table (DHT) and multicast DNS (mDNS), which can be selectively disabled or enabled. The three phases are:
1. Discovery
1. Gossip network
1. Full connectivity
During the discovery phase, which can occur via mDNS (for LAN) or DHT (for WAN), nodes discover each other by broadcasting their presence to the network.
In the second phase, rendezvous points are rotated by OTP (one-time password). A shared token containing OTP seeds is used to generate these rendezvous points, which serve as a secure way to bootstrap connections between nodes. This is essential for establishing a secure and self-coordinated P2P network.
In the third phase, a gossip network is formed among nodes, which shares shared ledger blocks symmetrically encrypted with AES. The key used to encrypt these blocks is rotated via OTP. This ensures that the shared ledger is secure and that each node has access to the most up-to-date version of the shared configuration. The ledger is used to store arbitrary metadata from the nodes of the network. On each update, a new block is created with the new information and propagated via gossip.
Optionally, full connectivity can be established by bringing up a TUN interface, which routes packets via the libp2p network. This enables any application at the OS level to leverage P2P functionalities by using VirtualIPs accessible within the VPN.
The coordination process in Kairos is designed to be resilient and self-coordinated, with no need for complex network configurations or control management interfaces. By using this approach, Kairos simplifies the process of deploying and managing Kubernetes clusters at the edge, making it easy for users to focus on running and scaling their applications.
<p align="center">
<img src="https://mudler.github.io/edgevpn/docs/concepts/architecture/edevpn_bootstrap_hu8e61a09dccbf3a67bf1fc604ae4924fd_64246_1200x550_fit_catmullrom_3.png">
</p>
### Why Peer-to-Peer?
Kairos has chosen Peer-to-Peer as an internal component to enable automatic coordination of Kairos nodes. To understand why [EdgeVPN](https://github.com/mudler/edgevpn) has been selected, see the comparison table below, which compares EdgeVPN with other popular VPN solutions:
| | Wireguard | OpenVPN | EdgeVPN |
|------|-----------|-------------|----------------------------------------------------|
| Memory Space | Kernel-module | Userspace | Userspace |
| Protocol | UDP | UDP, TCP | TCP, UDP/quick, UDP, ws, everything supported by libp2p |
| P2P | Yes | Yes | Yes |
| Fully meshed | No | No | Yes |
| Management Server (SPOF) | Yes | Yes | No |
| Self-coordinated | No | No | Yes |
Key factors, such as self-coordination and the ability to share metadata between nodes, have led to the selection of EdgeVPN. However, there are tradeoffs and considerations to note in the current architecture, such as:
- Routing all traffic to a VPN can introduce additional latency
- Gossip protocols can be chatty, especially if using DHT, creating VPNs that span across regions
- EdgeVPN is in user-space, which can be slower compared to kernel-space solutions such as Wireguard
- For highly trafficked environments, there will be an increase in CPU usage due to the additional encryption layers introduced by EdgeVPN
Nonetheless, these tradeoffs can be overcome, and new features can be added due to EdgeVPN's design. For example:
- There is no need for any server to handle traffic (no SPOF), and no additional configuration is necessary
- The p2p layer is decentralized and can span across different networks by using DHT and a bootstrap server
- Self-coordination simplifies the provisioning experience
- Internal cluster traffic can also be offloaded to other mechanisms if network performance is a prerequisite
- For instance, with [KubeVIP](/docs/examples/multi-node-p2p-ha-kubevip), new nodes can join the network and become cluster members even after the cluster provisioning phase, making EdgeVPN a scalable solution.
### Why a VPN ?
A VPN allows for the configuration of a Kubernetes cluster without depending on the underlying network configuration. This design model is popular in certain use cases at the edge where fixed IPs are not a viable solution. We can summarize the implications as follows:
| | K8s Without VPN | K8s With VPN |
|----------|---------------------|----------------------------------------------------------|
| IP management | Needs to have static IP assigned by DHCP or manually configured (can be automated) | Automatically coordinated Virtual IPs for nodes. Or manually assign them |
| Network Configuration | `etcd` needs to be configured with IPs assigned by your network/fixed | Automatically assigned, fixed VirtualIPs for `etcd`. |
| Networking | Cluster IPs, and networking is handled by CNIs natively (no layers) | Kubernetes Network services will have Cluster IPs sitting below the VPN. <br> Every internal kubernetes communication goes through VPN. <br> The additional e2e encrypted network layer might add additional latency, 0-1ms in LAN.|
The use of a VPN for a Kubernetes cluster has significant implications. With a VPN, IP management is automatic and does not require static IP addresses assigned by DHCP or manually configured. Nodes can be assigned virtual IPs that are automatically coordinated or manually assigned, which eliminates the need for manual configuration of IP addresses. Additionally, EdgeVPN implements distributed DHCP, so there are no Single point of Failures.
Additionally, network configuration is simplified with a VPN. Without a VPN, `etcd` needs to be configured with IPs assigned by your network or fixed. With a VPN, virtual IPs are automatically assigned for `etcd`.
In terms of networking, a Kubernetes cluster without a VPN handles cluster IPs and networking natively without additional layers. However, with a VPN, Kubernetes network services will have Cluster IPs below the VPN. This means that all internal Kubernetes communication goes through the VPN. While the additional end-to-end encrypted network layer might add some latency, it is observed typically to be only 0-1ms in LAN. However, due to the Encryption layers, the CPU usage might be high if used for high-demanding traffic.
It's also worth noting that while a VPN provides a unified network environment, it may not be necessary or appropriate for all use cases. Users can choose to opt-out of using the VPN and leverage only the coordination aspect, for example, with KubeVIP. Ultimately, the decision to use a VPN should be based on the specific needs and requirements of your Kubernetes cluster, and as such you can just use the co-ordination aspect and leverage for instance [KubeVIP](/docs/examples/multi-node-p2p-ha-kubevip).
### Packet flow
The Virtual Private Network used is [EdgeVPN](https://github.com/mudler/edgevpn), which leverages [libp2p](https://github.com/libp2p/go-libp2p) for the transport layer.
To explain how the packet flow works between two nodes, Node A and Node B, refer to the diagram below:
<p align="center">
<img src="https://user-images.githubusercontent.com/2420543/219048445-300de7e8-428f-4ded-848d-bf73c56acca1.png">
</p>
While participating actively on a network, each node keeps the shared ledger up-to-date with information about itself and how to be reached by advertizing its own IP and the libp2p identity, allowing nodes to discover each other and how to route packets.
Assuming that we want to establish an SSH connection from Node A to Node B through the VPN network, which exposes the `sshd` service, the process is as follows:
1. Node A (`10.1.0.1`) uses `ssh` to dial the VirtualIP of the Node B (`10.1.0.2`) in the network.
2. EdgeVPN reads the frame from the TUN interface.
3. If EdgeVPN finds a match in the ledger between the VirtualIP and an associated Identity, it opens a p2p stream to Node B using the libp2p Identity.
4. Node B receives the incoming p2p stream from EdgeVPN.
5. Node B performs a lookup in the shared ledger.
6. If a match is found, Node B routes the packet back to the TUN interface, up to the application level.
### Controller
A set of Kubernetes Native Extensions ([Entangle](/docs/reference/entangle)) provides peer-to-peer functionalities also to existing clusters by allowing to bridge connection with the same design architecture described above.
It can be used to:
- Bridge services between clusters
- Bridge external connections to cluster
- Setup EdgeVPN as a daemonset between cluster nodes
See also the Entangle [documentation](/docs/reference/entangle) to learn more about it.
## Benefits
<p align="center">
<img src="https://user-images.githubusercontent.com/2420543/195459436-236139cf-605d-4608-9018-ea80381d4e77.png">
</p>
The use of p2p technology to enable self-coordination of Kubernetes clusters in Kairos offers a number of benefits:
1. **Simplified deployment**: Deploying Kubernetes clusters at the edge is greatly simplified. Users dont need to specify any network settings or use a control management interface to set up and manage their clusters.
1. **Easy customization**: Kairos offers a highly customizable approach to deploying Kubernetes clusters at the edge. Users can choose from a range of meta distributions, including openSUSE, Ubuntu, Alpine and [many others](/docs/reference/image_matrix), and customize the configuration of their clusters as needed.
1. **Automatic coordination**: With Kairos, the coordination of Kubernetes clusters is completely automated. The p2p network is used as a coordination mechanism for the nodes, allowing them to communicate and coordinate with each other without the need for any external management interface. This means that users can set up and manage their Kubernetes clusters at the edge with minimal effort, freeing up their time to focus on other tasks.
1. **Secure and replicated**: The use of rendezvous points and a shared ledger, encrypted with AES and rotated via OTP, ensures that the p2p network is secure and resilient. This is especially important when deploying Kubernetes clusters at the edge, where network conditions can be unpredictable.
1. **Resilient**: Kairos ensures that the cluster remains resilient, even in the face of network disruptions or failures. By using VirtualIPs, nodes can communicate with each other without the need for static IPs, and the cluster's etcd database remains unaffected by any disruptions.
1. **Scalable**: Kairos is designed to be highly scalable. With the use of p2p technology, users can easily add or remove nodes from the cluster, without the need for any external management interface.
By leveraging p2p technology, Kairos makes it easy for users to deploy and manage their clusters without the need for complex network configurations or external management interfaces. The cluster remains secure, resilient, and scalable, ensuring that it can handle the challenges of deploying Kubernetes at the edge.
## Conclusions
In conclusion, Kairos offers an innovative approach to deploying and managing Kubernetes clusters at the edge. By leveraging peer-to-peer technology, Kairos eliminates the need for a control management interface and enables self-coordination of clusters. This makes it easier to deploy and manage Kubernetes clusters at the edge, saving users time and effort.
The use of libp2p, shared ledger, and OTP for bootstrapping and coordination thanks to [EdgeVPN](https://github.com/mudler/edgevpn) make the solution secure and resilient. Additionally, the use of VirtualIPs and the option to establish a TUN interface ensures that the solution is flexible and can be adapted to a variety of network configurations without requiring exotic configurations.
With Kairos, users can boost large-scale Kubernetes adoption at the edge, achieve zero-touch configuration, and have their cluster's lifecycle completely managed, all while enjoying the benefits of self-coordination and zero network configuration. This allows users to focus on running and scaling their applications, rather than worrying about the complexities of managing their Kubernetes clusters.

View File

@ -1,6 +0,0 @@
---
title: "Development"
linkTitle: "Development"
weight: 7
description: >
---

View File

@ -1,246 +0,0 @@
---
title: "Debugging station"
linkTitle: "Debugging station"
weight: 4
date: 2023-03-15
description: >
Debugging station
---
When developing or troubleshooting Kairos, it can be useful to share a local cluster with another peer. This section illustrates how to use [Entangle](/docs/reference/entangle) to achieve that. We call this setup _debugging-station_.
## Configuration
{{% alert title="Note" color="warning" %}}
This section describes the configuration step by step. If you are in a hurry, you can skip this section and directly go to **Deploy with AuroraBoot**.
{{% /alert %}}
When deploying a new cluster, we can use [Bundles](/docs/advanced/bundles) to install the `entangle` and `cert-manager` chart automatically. We specify the bundles in the cloud config file as shown below:
```yaml
bundles:
- targets:
- run://quay.io/kairos/community-bundles:cert-manager_latest
- run://quay.io/kairos/community-bundles:kairos_latest
```
We also need to enable entangle by setting `kairos.entangle.enable: true`.
Next, we generate a new token that we will use to connect to the cluster later.
```bash
docker run -ti --rm quay.io/mudler/edgevpn -b -g
```
In order for `entangle` to use the token, we can define a `Entanglement` to expose ssh in the mesh network like the following:
```yaml
apiVersion: v1
kind: Secret
metadata:
name: ssh-entanglement
namespace: kube-system
type: Opaque
stringData:
network_token: ___GENERATED TOKEN HERE___
---
apiVersion: entangle.kairos.io/v1alpha1
kind: Entanglement
metadata:
name: ssh-entanglement
namespace: kube-system
spec:
serviceUUID: "ssh"
secretRef: "ssh-entanglement"
host: "127.0.0.1"
port: "22"
hostNetwork: true
```
{{% alert title="Note" color="warning" %}}
If you have already a kubernetes cluster, you can install the [Entangle](/docs/reference/entangle) chart and just apply the manifest.
{{% /alert %}}
This entanglement will expose the port `22` in the node over the mesh network with the `ssh` service UUID so we can later connect to it. Replace `___GENERATED TOKEN HERE___` with the token you previously generated with the `docker` command (check out the [documentation](/docs/reference/entangle) for advanced usage).
In order to deploy the `Entanglement` automatically, we can add it to the `k3s` manifests folder in the cloud config file:
```yaml
write_files:
- path: /var/lib/rancher/k3s/server/manifests/expose-ssh.yaml
permissions: "0644"
owner: "root"
content: |
apiVersion: v1
kind: Secret
metadata:
name: ssh-entanglement
namespace: kube-system
type: Opaque
stringData:
network_token: ___GENERATED TOKEN HERE___
---
apiVersion: entangle.kairos.io/v1alpha1
kind: Entanglement
metadata:
name: ssh-entanglement
namespace: kube-system
spec:
serviceUUID: "ssh"
secretRef: "ssh-entanglement"
host: "127.0.0.1"
port: "22"
hostNetwork: true
```
Here's an example of a complete cloud configuration file which automatically install a Kairos node in the bigger disk, and exposes ssh with `entangle`:
```yaml
#cloud-config
install:
device: "auto"
auto: true
reboot: true
hostname: debugging-station-{{ trunc 4 .MachineID }}
users:
- name: kairos
passwd: kairos
ssh_authorized_keys:
- github:mudler
k3s:
enabled: true
# Specify the bundle to use
bundles:
- targets:
- run://quay.io/kairos/community-bundles:system-upgrade-controller_latest
- run://quay.io/kairos/community-bundles:cert-manager_latest
- run://quay.io/kairos/community-bundles:kairos_latest
kairos:
entangle:
enable: true
write_files:
- path: /var/lib/rancher/k3s/server/manifests/expose-ssh.yaml
permissions: "0644"
owner: "root"
content: |
apiVersion: v1
kind: Secret
metadata:
name: ssh-entanglement
namespace: kube-system
type: Opaque
stringData:
network_token: ___GENERATED TOKEN HERE___
---
apiVersion: entangle.kairos.io/v1alpha1
kind: Entanglement
metadata:
name: ssh-entanglement
namespace: kube-system
spec:
serviceUUID: "ssh"
secretRef: "ssh-entanglement"
host: "127.0.0.1"
port: "22"
hostNetwork: true
```
In this file, you can specify various settings for your debugging station. For example, the `hostname` field sets the name of the machine, and the `users` field creates a new user with the name "kairos" and a pre-defined password and SSH key. The `k3s` field enables the installation of the k3s Kubernetes distribution.
## Deploy with AuroraBoot
To automatically boot and install the debugging station, we can use [Auroraboot](/docs/reference/auroraboot). The following example shows how to use the cloud config above with it:
```bash
cat <<EOF | docker run --rm -i --net host quay.io/kairos/auroraboot \
--cloud-config - \
--set "container_image=quay.io/kairos/kairos-opensuse-leap:v1.6.1-k3sv1.26.1-k3s1"
#cloud-config
install:
device: "auto"
auto: true
reboot: true
hostname: debugging-station-{{ trunc 4 .MachineID }}
users:
- name: kairos
passwd: kairos
ssh_authorized_keys:
- github:mudler
k3s:
enabled: true
# Specify the bundle to use
bundles:
- targets:
- run://quay.io/kairos/community-bundles:system-upgrade-controller_latest
- run://quay.io/kairos/community-bundles:cert-manager_latest
- run://quay.io/kairos/community-bundles:kairos_latest
kairos:
entangle:
enable: true
write_files:
- path: /var/lib/rancher/k3s/server/manifests/expose-ssh.yaml
permissions: "0644"
owner: "root"
content: |
apiVersion: v1
kind: Secret
metadata:
name: ssh-entanglement
namespace: kube-system
type: Opaque
stringData:
network_token: ___GENERATED TOKEN HERE___
---
apiVersion: entangle.kairos.io/v1alpha1
kind: Entanglement
metadata:
name: ssh-entanglement
namespace: kube-system
spec:
serviceUUID: "ssh"
secretRef: "ssh-entanglement"
host: "127.0.0.1"
port: "22"
hostNetwork: true
EOF
```
## Connecting to the cluster
To connect to the cluster, we first need to open the tunnel in one terminal and then ssh from another one.
In one terminal, run the following command (it will run in the foreground):
```bash
# Run in a terminal (it is foreground)
export EDGEVPNTOKEN="___GENERATED TOKEN HERE___"
docker run -e "EDGEVPNTOKEN=$EDGEVPNTOKEN" --net host quay.io/mudler/edgevpn service-connect ssh 127.0.0.1:2222
```
In another terminal, run the following command to ssh to the box:
```bash
# Run in another terminal
ssh kairos@127.0.0.1 -p 2222
```
Note: it might take few attempts to establish a connection

View File

@ -1,131 +0,0 @@
---
title: "Development notes"
linkTitle: "Development"
weight: 1
date: 2022-11-13
description: >
Guidelines when developing Kairos
---
Here you can find development notes intended for maintainers and guidance for new contributors.
## Repository structure
Kairos uses [earthly](https://earthly.dev/) as a build system instead of Makefiles. This ensures that despite the environment you should be able to build `Kairos` seamlessly. To track external packages (like kernels, additional binaries, and so on) which follow their own versioning [luet](https://luet.io) is used and there is a separate [repository](https://github.com/kairos-io/packages) with package building specifications.
- [The Kairos repository](https://github.com/kairos-io/kairos) contains the `kairos-agent` code, the OS definitions (`Dockerfile`s) and configuration. The releases generate core ISOs without any Kubernetes engine.
- [The packages repository](https://github.com/kairos-io/packages) contains package specifications used by `kairos` while building OS images.
- [The provider-kairos repository](https://github.com/kairos-io/provider-kairos) contains the kairos provider component which uses the SDK to bring up a Kubernetes cluster with `k3s`. It uses images from `kairos` core to remaster images with `k3s` and the `provider` embedded. This allows to automatically bootstrap Kubernetes cluster. Note, Kairos core in runtime can be extended to add providers or deploy automatically images with the embedded provider.
## Build Kairos
To build Kairos you need only Docker installed locally, and there is a convenience script in the root of the repository (`earthly.sh`) which wraps `earthly` inside Docker to avoid to install locally which can be used instead of `earthly` (e.g. `./earthly.sh +iso ...`). However, for daily development, it is strongly suggested to install it in your workstation. The `earthly.sh` script runs `earthly` in a container, and as such there are limitations on image caching between builds.
To build a Kairos ISO, you need to specify the flavor. For example, to build Kairos Alpine with `earthly` installed locally:
```bash
earthly -P +iso --FLAVOR=alpine
```
This will build a container image from scratch and create an ISO which is ready to be booted.
Note earthly targets are prefixed with `+` while variables are passed as flags, and `ARGS` can be passed as parameters with `--`.
### Adding flavors
Every source image used as a flavor is inside the `images` folder in the top-level directory. Any Dockerfile have the extension corresponding to the flavor which can be used as an argument for earthly builds (you will find a `Dockerfile.alpine` that will be used by our `earthly -P +iso --FLAVOR=alpine` above).
To add a flavor is enough to create a Dockerfile corresponding to the flavor and check if any specific setting is required for it in the `+framework` target.
Generally to add a flavor the image needs to have installed:
- An init system (systemd or openRC are supported)
- Kernel
- GRUB
- rsync
If you are building a flavor without Earthly, be sure to consume the packages from our repository to convert it to a Kairos-based version.
### Bumping packages
Let's assume there is some change you introduce in a package consumed by kairos
(e.g. [kcrypt](https://github.com/kairos-io/kcrypt)). In order to build a kairos image
with the updated package, first tag the repository (`kcrypt` in our example).
Then trigger [the auto-bump pipeline](https://github.com/kairos-io/packages/actions/workflows/autobump.yaml)
on the packages repository. This should create at least on PR which bumps the desired package to the latest tag.
It may also create more PRs if other packages had new tags recently. When PR passes CI, merge it.
Next, in order to bump the packages on kairos, manually trigger [the bump-repos pipeline](https://github.com/kairos-io/kairos/actions/workflows/bump_repos.yml).
This will automatically open a PR on the kairos repository which can be merged when it passes CI.
After this, any images produced by the kairos repository, will have the latest version of the package(s).
## New controllers
Kairos-io adopts [operator-sdk](https://github.com/operator-framework/operator-sdk).
To install `operator-sdk` locally you can use the `kairos` repositories:
1. Install Luet:
`curl https://luet.io/install.sh | sudo sh`
2. Enable the Kairos repository locally:
`luet repo add kairos --url quay.io/kairos/packages --type docker`
3. Install operator-sdk:
`luet install -y utils/operator-sdk`
### Create the controller
Create a directory and let's init our new project it with the operator-sdk:
```bash
$ mkdir kairos-controller-foo
$ cd kairos-controller-foo
$ operator-sdk init --domain kairos.io --repo github.com/kairos-io/kairos-controller-foo
```
### Create a resource
To create a resource boilerplate:
```
$ operator-sdk create api --group <groupname> --version v1alpha1 --kind <resource> --resource --controller
```
### Convert to a Helm chart
operator-sdk does not have direct support to render Helm charts (see [issue](https://github.com/operator-framework/operator-sdk/issues/4930)), we use [kubesplit](https://github.com/spectrocloud/kubesplit) to render Helm templates by piping kustomize manifests to it. `kubesplit` will split every resource and add a minimal `helm` templating logic, that will guide you into creating the Helm chart.
If you have already enabled the `kairos` repository locally, you can install `kubesplit` with:
```
$ luet install -y utils/kubesplit
```
### Test with Kind
Operator-sdk will generate a Makefile for the project. You can add the following and edit as needed to add kind targets:
```
CLUSTER_NAME?="kairos-controller-e2e"
kind-setup:
kind create cluster --name ${CLUSTER_NAME} || true
$(MAKE) kind-setup-image
kind-setup-image: docker-build
kind load docker-image --name $(CLUSTER_NAME) ${IMG}
.PHONY: test_deps
test_deps:
go install -mod=mod github.com/onsi/ginkgo/v2/ginkgo
go install github.com/onsi/gomega/...
.PHONY: unit-tests
unit-tests: test_deps
$(GINKGO) -r -v --covermode=atomic --coverprofile=coverage.out -p -r ./pkg/...
e2e-tests:
GINKGO=$(GINKGO) KUBE_VERSION=${KUBE_VERSION} $(ROOT_DIR)/script/test.sh
kind-e2e-tests: ginkgo kind-setup install undeploy deploy e2e-tests
```

View File

@ -1,269 +0,0 @@
---
title: "Booting Kairos on Nvidia Jetson ARM"
linkTitle: "Booting Kairos on Nvidia Jetson ARM"
weight: 5
date: 2022-11-13
description: >
This page contains a reference on how to run Kairos on Nvidia Jetson ARM
---
{{% alert title="Note" %}}
Please note that the following page contains only development reference. At the time of writing, we have tried porting Kairos to Jetson Nano eMMC without success. This is due to the old kernel supported (4.9), not properly working with `EFISTUB` and `U-boot` (you can see the [issue here](https://github.com/kairos-io/kairos/issues/45)). However, the steps outlined _should_ be a good reference to port Kairos to those architecture _when_ a new kernel version is available. We have tested, and have successfully booted a Jetson Nano with the 5.15 kernel, however, due to the lack of driver support, eMMC partitions are not properly recognized.
{{% /alert %}}
This page is a development reference in order to boot Kairos in Nvidia Jetson devices. Nvidia Jetson images by default ship `extlinux` as bootloader, without EFI boot. This guide explains how to get instead u-boot to chainload to `grub2`, which can be used to boot and load `Kairos`.
Note that currently there are no official Kairos core images for Jetson images, this page will refer to Jetson Nano eMMC version as the current reference, but the steps should be similar, as outline how to use the Nvidia SDK to flash the OS onboard in the eMMC of the device.
The steps involved are:
- Prepare the Kernel (if you have one, compatible with `EFISTUB`, you can skip this part)
- Flash u-boot (If the U-boot version support booting efi shells, you might skip this part too)
- Prepare the Kairos partitions
- Flash the image to the board
## Prerequisites
You need the Nvidia SDK and few other dependencies in the system. Note that for the Jetson Nano you can't use the latest SDK version as it is not anymore supporting it. The latest version available with support for Jetson Nano is [r32.7.3](https://developer.nvidia.com/embedded/linux-tegra-r3273):
```bash
# Build dependencies
apt update && apt install -y git-core build-essential bc wget xxd kmod flex libelf-dev bison libssl-dev
mkdir build
build_dir=$PWD/build
cd build
# Get Jetson SDK compatible with Jetson NANO
wget https://developer.nvidia.com/downloads/remetpack-463r32releasev73t210jetson-210linur3273aarch64tbz2 -O Jetson-210_Linux_R32.7.3_aarch64.tbz2
tar xvf Jetson-210_Linux_R32.7.3_aarch64.tbz2
```
## Prepare the Kernel
The only requirement of the kernel in order to this to work is that has to have `CONFIG_EFI_STUB` and `CONFIG_EFI` enabled.
The default kernel with the Nvidia Jetson Nano is `4.9` and it turns out to not have those enabled.
### Build from official Nvidia sources
If your kernel is not compiled to boot as _EFI stub_ you can refer to the steps below to compile the official Nvidia kernel with `EFISTUB`:
```bash
cd build
wget https://developer.nvidia.com/downloads/remack-sdksjetpack-463r32releasev73sourcest210publicsourcestbz2 -O public_sources.tbz2
wget https://developer.nvidia.com/embedded/dlc/l4t-gcc-7-3-1-toolchain-64-bit
tar xvf https://developer.nvidia.com/embedded/dlc/l4t-gcc-7-3-1-toolchain-64-bit
# gcc-linaro-7.3.1-2018.05-x86_64_aarch64-linux-gnu/....
export CROSS_COMPILE_AARCH64_PATH=$PWD/gcc-linaro-7.3.1-2018.05-x86_64_aarch64-linux-gnu/
cd Linux_for_Tegra/source/public
tar xvf kernel_src.bz2
mkdir kernel_out
echo "CONFIG_EFI_STUB=y" >> ./kernel/kernel-4.9/arch/arm64/configs/tegra_defconfig
echo "CONFIG_EFI=y" >> ./kernel/kernel-4.9/arch/arm64/configs/tegra_defconfig
# https://forums.developer.nvidia.com/t/kernel-build-script-nvbuild-sh-with-output-dir-option-not-working/173087
sed -i '86s/.*/ O_OPT=(O="${KERNEL_OUT_DIR}")/' nvbuild.sh
## See workaround for DTB errors in Troubleshooting (edit Kconfig.include..)
./nvbuild.sh -o $PWD/kernel_out
```
Note that, with the Jetson NANO, the kernel will fail to boot allocating the memory during the EFI stub boot phase.
### Build from official linux kernel
Seems the kernel `5.15` boots fine on the Jetson Nano, however, it fails to load eMMC drivers to detect eMMC partitions. A configuration reference can be found [here](https://github.com/kairos-io/packages/blob/main/packages/kernels/linux-tegra/config).
```bash
build_dir=$PWD/build
cd build
# Clone the kernel
git clone --branch v5.15 --depth 1 https://github.com/torvalds/linux.git kernel-4.9
wget https://developer.nvidia.com/downloads/remack-sdksjetpack-463r32releasev73sourcest210publicsourcestbz2 -O public_sources.tbz2
tar xvf public_sources.tbz2
wget https://developer.nvidia.com/embedded/dlc/l4t-gcc-7-3-1-toolchain-64-bit
tar xvf l4t-gcc-7-3-1-toolchain-64-bit
# Replace the kernel in the SDK
pushd Linux_for_Tegra/source/public && tar xvf kernel_src.tbz2 && rm -rf kernel/kernel-4.9 && mv $build_dir/kernel-4.9 ./kernel/ && popd
# Use the tegra config, patch nvbuild.sh
mkdir kernel_out && \
wget https://raw.githubusercontent.com/kairos-io/packages/main/packages/kernels/linux-tegra/config -O ./kernel/kernel-4.9/arch/arm64/configs/defconfig && \
wget https://raw.githubusercontent.com/kairos-io/packages/main/packages/kernels/linux-tegra/nvbuild.sh -O nvbuild.sh && chmod +x nvbuild.sh
# gcc 12 patches
pushd Linux_for_Tegra/source/public/kernel/kernel-4.9 && curl -L https://raw.githubusercontent.com/kairos-io/packages/main/packages/kernels/linux-tegra/patch.patch | patch -p1 && popd
# Build the kernel
pushd Linux_for_Tegra/source/public && \
CROSS_COMPILE_AARCH64_PATH=$build_dir/gcc-linaro-7.3.1-2018.05-x86_64_aarch64-linux-gnu/ ./nvbuild.sh -o $PWD/kernel_out
```
## Prepare container image (Kairos)
Now we need a container image with the OS image. The image need to contain the kernel and the initramfs generated with `dracut`.
For instance, given that the kernel is available at `/boot/Image`, and the modules at `/lib/modules`:
```Dockerfile
FROM ....
RUN ln -sf Image /boot/vmlinuz
RUN kernel=$(ls /lib/modules | head -n1) && \
dracut -f "/boot/initrd-${kernel}" "${kernel}" && \
ln -sf "initrd-${kernel}" /boot/initrd && \
depmod -a "${kernel}"
```
## Flashing
In order to flash to the `eMMC` we need the Nvidia SDK.
```bash
mkdir work
cd work
wget https://developer.nvidia.com/downloads/remetpack-463r32releasev73t210jetson-210linur3273aarch64tbz2
tar xvf Jetson-210_Linux_R32.7.3_aarch64.tbz2
```
### Replace U-boot (optional)
If the version of `u-boot` is old and doesn't support EFI booting, you can replace the `u-boot` binary like so:
```bash
wget http://download.opensuse.org/ports/aarch64/tumbleweed/repo/oss/aarch64/u-boot-p3450-0000-2023.01-2.1.aarch64.rpm
mkdir u-boot
cd u-boot
rpm2cpio ../u-boot-p3450-0000-2023.01-2.1.aarch64.rpm | cpio -idmv
cd ..
cd Linux_for_Tegra
# "p3450-0000" Depends on your board
cp -rfv ../u-boot/boot/u-boot.bin bootloader/t210ref/p3450-0000/u-boot.bin
```
### Disable Extlinux
We need to disable extlinux, in order for u-boot to scan for EFI shells:
```bash
# Drop extlinux
echo "" > ./bootloader/extlinux.conf
```
### Prepare Partitions
We need to prepare the partitions from the container image we want to boot, in order to achieve this, we can use `osbuilder`, which will prepare the `img` files ready to be flashed for the SDK:
```bash
cd Linux_for_Tegra
docker run --privileged -e container_image=$IMAGE -v $PWD/bootloader:/bootloader --entrypoint /prepare_arm_images.sh -ti --rm quay.io/kairos/osbuilder-tools
```
This command should create `efi.img`, `oem.img`, `persistent.img`, `recovery_partition.img`, `state_partition.img` in the `bootloader` directory
### Configure the SDK
In order to flash the partitions to the eMMC of the board, we need to configure the SDK to write the partitions to the board via its configuration files.
For the Jetson Nano, the configuration file for the partitions is located at `bootloader/t210ref/cfg/flash_l4t_t210_emmc_p3448.xml`, where we replace the `partition name=APP` with:
```xml
<partition name="esp" type="data">
<allocation_policy> sequential </allocation_policy>
<filesystem_type> basic </filesystem_type>
<size> 20971520 </size>
<file_system_attribute> 0 </file_system_attribute>
<partition_type_guid> C12A7328-F81F-11D2-BA4B-00A0C93EC93B </partition_type_guid>
<allocation_attribute> 0x8 </allocation_attribute>
<percent_reserved> 0 </percent_reserved>
<filename> efi.img </filename>
<description> **Required.** Contains a redundant copy of CBoot. </description>
</partition>
<partition name="COS_RECOVERY" type="data">
<allocation_policy> sequential </allocation_policy>
<filesystem_type> basic </filesystem_type>
<size> 2298478592 </size>
<allocation_attribute> 0x8 </allocation_attribute>
<filename> recovery_partition.img </filename>
<description> </description>
</partition>
<partition name="COS_STATE" type="data">
<allocation_policy> sequential </allocation_policy>
<filesystem_type> basic </filesystem_type>
<size> 5234491392 </size>
<allocation_attribute> 0x8 </allocation_attribute>
<filename> state_partition.img </filename>
<description> </description>
</partition>
<partition name="COS_OEM" type="data">
<allocation_policy> sequential </allocation_policy>
<filesystem_type> basic </filesystem_type>
<size> 67108864 </size>
<allocation_attribute> 0x8 </allocation_attribute>
<filename> oem.img </filename>
<description> </description>
</partition>
<partition name="COS_PERSISTENT" type="data">
<allocation_policy> sequential </allocation_policy>
<filesystem_type> basic </filesystem_type>
<size> 2147483648 </size>
<allocation_attribute> 0x8 </allocation_attribute>
<filename> persistent.img </filename>
<description> </description>
</partition>
```
Note: The order matters here. We want to replace the default "APP" partition with our set of partitions.
If you didn't changed the default size of the images you should be fine, however, you should check the `<size></size>` of each of the blocks if corresponds to the files generated from your container image:
```bash
stat -c %s bootloader/efi.img
stat -c %s bootloader/recovery_partition.img
stat -c %s bootloader/state_partition.img
stat -c %s bootloader/oem.img
stat -c %s bootloader/persistent.img
```
### Flash
Turn the board in recovery mode, depending on the model this process might differ:
- Turn off the board
- Jump the FCC REC pin to ground
- Plug the USB cable
- Power on the board
If you see the board ready to be flashed, you should see the following:
```bash
$ lsusb
Bus 003 Device 092: ID 0955:7f21 NVIDIA Corp. APX
```
To flash the configuration to the board, run:
```bash
./flash.sh -r jetson-nano-devkit-emmc mmcblk0p1
```
## Troubleshooting notes
You can use `picom` to see the serial console:
```bash
picocom -b 115200 /dev/ttyUSB0
```
## References
- https://docs.nvidia.com/jetson/archives/r35.1/DeveloperGuide/text/SD/SoftwarePackagesAndTheUpdateMechanism.html#update-with-partition-layout-changes
- https://docs.nvidia.com/jetson/archives/r34.1/DeveloperGuide/text/SD/Kernel/KernelCustomization.html?highlight=kernel
- https://en.opensuse.org/HCL:Jetson_Nano#Update_Firmware
- https://nullr0ute.com/2020/11/installing-fedora-on-the-nvidia-jetson-nano/
- https://forums.developer.nvidia.com/t/support-nano-on-openwrt/219168/7

View File

@ -1,17 +0,0 @@
---
title: "Examples"
linkTitle: "Examples"
weight: 5
description: >
This section contains various examples, how-to and tutorial to use Kairos
---
Welcome to the examples section of the Kairos documentation! Here, you will find a variety of examples that demonstrate how to use Kairos to create and manage Kubernetes clusters on bare metal.
## Getting Started
- [Quick Start Guide](/docs/getting-started): This guide will walk you through the process of installing Kairos and creating your first Kubernetes cluster on bare metal.
## Troubleshooting
- [Troubleshooting common issues](/docs/reference/troubleshooting): This page provides solutions to some common issues that you may encounter while using Kairos.

View File

@ -1,122 +0,0 @@
---
title: "How to Create an Airgap K3s Installation with Kairos"
linkTitle: "Airgapped ISO with AuroraBoot"
weight: 4
description: >
This section describe examples on how to use AuroraBoot and Kairos bundles to create ISOs for airgapped installs
---
If you want to create an [airgap K3s installation](https://docs.k3s.io/installation/airgap), Kairos provides a convenient way to do so using AuroraBoot. In this guide, we will go through the process of creating a custom ISO of Kairos that contains a configuration file and a [bundle](/docs/advanced/bundles/) that executes preparatory steps after installation. The bundle will overlay new files in the system and prepare the node for having an airgapped K3s installation.
{{% alert title="Note" %}}
If you already have a Kubernetes cluster, you can use the osbuilder controller to generate container images with your additional files already inside.
{{% /alert %}}
## Prerequisites
Docker running in the host
## Creating the Bundle
First, we need to create a bundle that contains the K3s images used for the airgap installation. The bundle will place the images in the `/var/lib/rancher/k3s/agent/images` directory. The `/var/lib/rancher` is already configured as persistent by Kairos defaults and every change to that directory persist reboots. You can add additional persistent paths in the system with [the cloud config](/docs/advanced/customizing/#bind-mounts)
1. Create a new directory named `images-bundle`, and create a new file inside it called `Dockerfile`.
2. Paste the following code into the `Dockerfile`:
```Dockerfile
FROM alpine
WORKDIR /build
RUN wget https://github.com/k3s-io/k3s/releases/download/v1.23.16%2Bk3s1/k3s-airgap-images-amd64.tar.gz
FROM scratch
COPY ./run.sh /
COPY --from=alpine /build/k3s-airgap-images-amd64.tar.gz /assets
```
3. Create a new file called `run.sh` inside the `images-bundle` directory, and paste the following code:
```bash
#!/bin/bash
mkdir -p /usr/local/.state/var-lib-rancher.bind/k3s/agent/images/
cp -rfv ./k3s-airgap-images-amd64.tar.gz /usr/local/.state/var-lib-rancher.bind/k3s/agent/images/
```
4. Make the `run.sh` file executable by running the following command:
```bash
chmod +x run.sh
```
5. Build the container image by running the following command inside the images-bundle directory. This will save the image as `data/bundle.tar`:
```bash
docker build -t images-bundle .
```
6. Save the bundle:
```
$ ls
images-bundle
# create a directory
$ mkdir data
$ docker save images-bundle -o data/bundle.tar
```
## Building the Offline ISO for Airgap
Now that we have created the bundle, we can use it to build an offline ISO for the airgap installation.
1. Create a cloud config for the ISO and save it as config.yaml. The config.yaml file should contain your cloud configuration for Kairos and is used to set up the system when it is installed. An example can be:
```yaml
#cloud-config
install:
auto: true
device: "auto"
reboot: true
bundles:
# This bundle needs to run after-install as it consumes assets from the LiveCD
# which is not accessible otherwise at the first boot (there is no live-cd with any bundle.tar)
- targets:
- run:///run/initramfs/live/bundle.tar
local_file: true
# Define the user accounts on the node.
users:
- name: "kairos" # The username for the user.
passwd: "kairos" # The password for the user.
ssh_authorized_keys: # A list of SSH keys to add to the user's authorized keys.
- github:mudler # A key from the user's GitHub account.
k3s:
enabled: true
```
2. Build the ISO with [AuroraBoot](/docs/reference/auroraboot) by running the following command:
```bash
IMAGE=quay.io/kairos/kairos-opensuse-leap:v1.6.1-k3sv1.26.1-k3s1
docker pull $IMAGE
docker run -v $PWD/config.yaml:/config.yaml \
-v $PWD/build:/tmp/auroraboot \
-v /var/run/docker.sock:/var/run/docker.sock \
-v $PWD/data:/tmp/data \
--rm -ti quay.io/kairos/auroraboot:v0.2.0 \
--set "disable_http_server=true" \
--set "disable_netboot=true" \
--set "container_image=docker://$IMAGE" \
--set "iso.data=/tmp/data" \
--cloud-config /config.yaml \
--set "state_dir=/tmp/auroraboot"
```
The resulting ISO should be available at: `build/iso/kairos.iso`
This example is also available in the [AuroraBoot repository](https://github.com/kairos-io/AuroraBoot/tree/master/examples/airgap) in the `examples/airgap` directory, where you can run `build_docker.sh` to reproduce the example.
## See also
- [Customize the OS image](/docs/advanced/customizing/)
- [Live layer bundles](/docs/advanced/livelayering/)
- [Create ISOs with Kubernetes](/docs/installation/automated/#kubernetes)
- [Bundles reference](https://kairos.io/docs/advanced/bundles/)

View File

@ -1,59 +0,0 @@
---
title: "Bundles"
linkTitle: "Bundles"
weight: 4
description: >
This section describe examples on how to use a Kairos bundle to deploy MetalLB on top of K3s
---
Welcome to the guide on setting up MetalLB on a Kairos cluster with K3s! This tutorial will walk you through the steps of using a Kairos [bundle](/docs/advanced/bundles) to automatically configure MetalLB on your local network with an IP range of `192.168.1.10-192.168.1.20`. Check out the [MetalLB](/docs/examples/metallb) example to configure it without a [bundle](/docs/advanced/bundles).
For those unfamiliar with [MetalLB](https://metallb.universe.tf/), it is an open-source load balancer implementation for bare metal Kubernetes clusters that utilizes standard routing protocols. When used with K3s on Kairos, it provides load balancing capabilities and helps manage IP addresses within a cluster.
## Prerequisites
Before we begin, you will need to have the following:
1. Kairos [provider-kairos](https://github.com/kairos-io/provider-kairos) artifacts which includes K3s
1. A baremetal node to run the installation
## Installation
1. Follow the [Installation](/docs/installation) documentation for Kairos.
1. Use the following cloud configuration file when setting up Kairos:
```yaml
#cloud-config
hostname: metal-{{ trunc 4 .MachineID }}
users:
- name: kairos
# Change to your pass here
passwd: kairos
ssh_authorized_keys:
# Replace with your github user and un-comment the line below:
# - github:mudler
k3s:
enabled: true
args:
- --disable=traefik,servicelb
# Specify the bundle to use
bundles:
- targets:
- run://quay.io/kairos/community-bundles:metallb_latest
# Specify metallb settings, available only with the bundle.
metallb:
version: 0.13.7
address_pool: 192.168.1.10-192.168.1.20
```
There are a few key points to note in the configuration file:
- The `metallb` block is provided by the MetalLB bundle and allows us to specify the version of MetalLB that we want to deploy, as well as the `address_pool` available for our services.
- The `bundles` block enables the `run` [bundle](/docs/advanced/bundles) type. The bundle we are using is part of the [community-bundles](https://github.com/kairos-io/community-bundles) repository.
And that's it! With these steps, you should now have MetalLB configured and ready to use on your Kairos cluster. If you have any questions or run into any issues, don't hesitate to check out the [bundle documentation](/docs/advanced/bundles) or reach out to the community for support.

View File

@ -1,60 +0,0 @@
---
title: "Using Kairos Core Images as an Installer"
linkTitle: "Using Kairos Core Images as an Installer"
weight: 4
description: >
Core images serve as the foundation for creating downstream images or as an installer for deploying other images during the installation process. In this guide, we'll take a closer look at using Kairos core images as an installer to deploy other container images.
---
Kairos is a powerful, open-source meta-distribution that allows you to easily deploy and manage nodes on your Immutable infrastructure.
One key feature of Kairos is the use of its core images, which are released as part of the [kairos-io/kairos](https://github.com/kairos-io/kairos) repository and can be found in the releases section. These core images serve as the foundation for creating [downstream images](/docs/advanced/customizing) or as an installer for deploying other images during the installation process. In this guide, we'll take a closer look at using Kairos core images as an installer to deploy other container images.
## Getting started
To begin using Kairos core images as an installer, you'll need to start by using the artifacts from the [Kairos core](https://github.com/kairos-io/kairos/releases) repository. These images do not include the Kubernetes engine, so you'll need to configure the container image you want to deploy in the `install.image` field of your cloud config file. A list of available images can be found in [our support matrix](/docs/reference/image_matrix).
For example, let's say you want to use an image from the provider-kairos repository. Your cloud config file might look something like this:
```yaml
#cloud-config
install:
# Here we specify the image that we want to deploy
image: "docker:quay.io/kairos/kairos-opensuse-leap:v1.4.0-k3sv1.26.0-k3s1"
```
Once you've chosen your image, you can move on to the installation process by following the steps outlined in our [Installation](/docs/installation) documentation.
For example, a full cloud-config might look like this:
```yaml
#cloud-config
install:
device: "auto"
auto: true
reboot: true
# Here we specify the image that we want to deploy
image: "docker:quay.io/kairos/kairos-opensuse-leap:v1.4.0-k3sv1.26.0-k3s1"
hostname: "test"
users:
- name: "kairos"
passwd: "kairos"
ssh_authorized_keys:
- github:mudler
k3s:
enable: true
```
## Configuring the installation
As you move through the installation process, there are a few key points to keep in mind when configuring your cloud config file:
- We set `install.image` to the container image that we want to deploy. This can be an image from [our support matrix](/docs/reference/image_matrix), a [custom image](/docs/advanced/customizing) or an [image from scratch](/docs/reference/build-from-scratch).
- After the installation is complete, the configuration in the `k3s` block will take effect. This is because after the installation, the system will boot into the image specified in the `install.image` field, which in the example above is an image with the Kairos K3s provider, as such the configuration in the k3s block will become active.
With these steps, you should now be able to use Kairos core images as an installer to deploy other container images. The process is straightforward and gives you the flexibility to customize your deployments and build custom images as needed.
You can also refer our [troubleshoot](/docs/reference/troubleshooting) document if you are facing any issue while following the installation process.

View File

@ -1,118 +0,0 @@
---
title: "High Availability K3s deployments"
linkTitle: "HA with K3s"
weight: 3
description: >
This section contains instructions how to deploy Kairos with a High Available control-plane for K3s
---
Please refer to the [k3s HA](https://docs.k3s.io/installation/ha-embedded) documentation.
This document describes how to configure Kairos with `k3s` by following the same documentation outline, to show how to apply `k3s` configuration to `Kairos`. It is implied that you are using a Kairos version with `k3s` included.
## New cluster
To run Kairos and k3s in this mode, you must have an odd number of server nodes. K3s documentation recommends starting with three nodes.
To get started, first launch a server node with the cluster-init flag added in `k3s.args` to enable clustering. A token here can be specified, and will be used as a shared secret to join additional servers to the cluster. Note, if you don't provide one, a token will be generated automatically on your behalf and available at `/var/lib/rancher/k3s/server/node-token`.
```yaml
#cloud-config
hostname: metal-{{ trunc 4 .MachineID }}
users:
- name: kairos
# Change to your pass here
passwd: kairos
#ssh_authorized_keys:
## Add your github user here!
#- github:mudler
k3s:
enabled: true
args:
- --cluster-init
# Token will be generated if not specified at /var/lib/rancher/k3s/server/node-token
env:
K3S_TOKEN: "TOKEN_GOES_HERE"
```
After launching the first server, join the other servers to the cluster using the shared secret (`K3S_TOKEN`):
```yaml
#cloud-config
hostname: metal-{{ trunc 4 .MachineID }}
users:
- name: kairos
# Change to your pass here
passwd: kairos
ssh_authorized_keys:
# Add your github user here!
- github:mudler
k3s:
enabled: true
args:
- --server https://<ip or hostname of server1>:6443
env:
K3S_TOKEN: "TOKEN_GOES_HERE"
```
Now you have a highly available control plane. Any successfully clustered server can be used in the `--server` argument to join additional server and worker nodes.
### Joining a worker
Joining additional worker nodes to the cluster follows the same procedure as a single server cluster.
To join a worker when deploying a Kairos node, use the `k3s-agent` block:
```yaml
#cloud-config
hostname: metal-{{ trunc 4 .MachineID }}
users:
- name: kairos
# Change to your pass here
passwd: kairos
#ssh_authorized_keys:
## Add your github user here!
#- github:mudler
k3s-agent:
enabled: true
env:
K3S_TOKEN: "TOKEN_GOES_HERE"
K3S_URL: "https://<ip or hostname of server1>:6443"
```
## External DB
K3s requires two or more server nodes for this HA configuration. See the [K3s requirements guide](https://docs.k3s.io/installation/requirements) for minimum machine requirements.
When running the k3s as a server, you must set the datastore-endpoint parameter so that K3s knows how to connect to the external datastore.
```yaml
#cloud-config
hostname: metal-{{ trunc 4 .MachineID }}
users:
- name: kairos
# Change to your pass here
passwd: kairos
#ssh_authorized_keys:
## Add your github user here!
#- github:mudler
k3s:
enabled: true
args:
- --datastore-endpoint mysql://username:password@tcp(hostname:3306)/database-name
# Token will be generated if not specified at /var/lib/rancher/k3s/server/node-token
env:
K3S_TOKEN: "TOKEN_GOES_HERE"
```
## Resources
- [High Availability with Embedded DB](https://docs.k3s.io/installation/ha-embedded)
- [High Availability with External DB](https://docs.k3s.io/installation/ha)

View File

@ -1,88 +0,0 @@
---
title: "MetalLB"
linkTitle: "MetalLB"
weight: 4
description: >
This section describe examples on how to deploy Kairos with k3s and MetalLB
---
Welcome to the guide on using MetalLB with Kairos and K3s on a bare metal host!
In this tutorial, we'll walk through the steps of setting up a Kairos node on your local network using the `192.168.1.10-192.168.1.20` IP range, with MetalLB and K3s.
But first, let's talk a little bit about what [MetalLB](https://metallb.universe.tf/) and [K3s](https://k3s.io/) are. MetalLB is a load balancer implementation for bare metal Kubernetes clusters that uses standard routing protocols. It's particularly useful when used with K3s in Kairos, as it provides load balancing for bare metal clusters and helps manage IP addresses within the cluster. K3s is a lightweight Kubernetes distribution that is easy to install and maintain.
Now that you have an understanding of what we'll be working with, let's dive into the installation process.
Check out the [bundle](/docs/examples/bundles) example to configure `MetalLB` with bundles. Bundles provides a streamlined way to publish and re-use configuration between nodes.
To get started, you'll need to use the [provider-kairos](https://github.com/kairos-io/provider-kairos) artifacts, which include k3s. We'll be using the [k3s manifest method](/docs/reference/configuration#kubernetes-manifests) to deploy MetalLB.
Follow the [Installation](/docs/installation) documentation, and use the following cloud config file with Kairos:
```yaml
#cloud-config
hostname: metal-{{ trunc 4 .MachineID }}
users:
- name: kairos
# Change to your pass here
passwd: kairos
ssh_authorized_keys:
# Add your github user here!
- github:mudler
k3s:
enabled: true
args:
- --disable=traefik,servicelb
# Additional manifests that are applied by k3s on boot
write_files:
- path: /var/lib/rancher/k3s/server/manifests/metallb.yaml
permissions: "0644"
content: |
apiVersion: v1
kind: Namespace
metadata:
name: metallb-system
---
apiVersion: helm.cattle.io/v1
kind: HelmChart
metadata:
name: metallb
namespace: metallb-system
spec:
chart: https://github.com/metallb/metallb/releases/download/metallb-chart-0.13.7/metallb-0.13.7.tgz
- path: /var/lib/rancher/k3s/server/manifests/addresspool.yaml
permissions: "0644"
content: |
apiVersion: metallb.io/v1beta1
kind: IPAddressPool
metadata:
name: default
namespace: metallb-system
spec:
addresses:
- 192.168.1.10-192.168.1.20
---
apiVersion: metallb.io/v1beta1
kind: L2Advertisement
metadata:
name: default
namespace: metallb-system
spec:
ipAddressPools:
- default
```
There are a few things to note in this configuration file:
- In the `k3s` block, we use the `--disable` flag to disable `traefik` and `servicelb`, which are the default load balancers for k3s.
- In the `write_files` block, we write manifests (in `/var/lib/rancher/k3s/server/manifests/` see [docs](/docs/reference/configuration#kubernetes-manifests)) to deploy MetalLB and configure it to use the `192.168.1.10-192.168.1.20` IP range. Make sure to choose an IP range that doesn't interfere with your local DHCP network.
And that's it! You should now have MetalLB and K3s set up on your Kairos node.
## Resources
- [TNS blog post](https://thenewstack.io/livin-kubernetes-on-the-immutable-edge-with-kairos-project/)

View File

@ -1,75 +0,0 @@
---
title: "Deploying a High-Availability K3s Cluster with KubeVIP"
linkTitle: "Deploying a High-Availability K3s Cluster with KubeVIP"
weight: 6
date: 2022-11-13
description: >
This guide walks through the process of deploying a highly-available, P2P self-coordinated k3s cluster with KubeVIP, which provides a high available Elastic IP for the control plane.
---
{{% alert title="Note" %}}
This feature is crazy and experimental! Do not run in production servers.
Feedback and bug reports are welcome, as we are improving the p2p aspects of Kairos.
{{% /alert %}}
K3s is a lightweight Kubernetes distribution that is easy to install and operate. It's a great choice for small and edge deployments, but it can also be used to create a high-availability (HA) cluster with the help of [KubeVIP](https://kube-vip.io/). In this guide, we'll walk through the process of deploying a highly-available k3s cluster with KubeVIP, which provides a high available ip for the control plane.
The first step is to set up the cluster. Kairos automatically deploys an HA k3s cluster with KubeVIP to provide a high available ip for the control plane. KubeVIP allows to setup an ElasticIP that is advertized in the node's network and, as managed as a daemonset in kubernetes it is already running in HA.
The difference between this setup is that we just use the p2p network to automatically co-ordinate nodes, while the connection of the cluster is not being routed to a VPN. The p2p network is used for co-ordination, self-management, and used to add nodes on day 2.
In order to deploy this setup you need to configure the cloud-config file. You can see the example of the yaml file below. You need to configure the hostname, user and ssh_authorized_keys. You need also to configure kubevip with the elastic ip and the p2p network with the options you prefer.
```yaml
#cloud-config
hostname: kairoslab-{{ trunc 4 .MachineID }}
users:
- name: kairos
ssh_authorized_keys:
# Replace with your github user and un-comment the line below:
# - github:mudler
kubevip:
eip: "192.168.1.110"
p2p:
# Disabling DHT makes co-ordination to discover nodes only in the local network
disable_dht: true #Enabled by default
vpn:
create: false # defaults to true
use: false # defaults to true
# network_token is the shared secret used by the nodes to co-ordinate with p2p.
# Setting a network token implies auto.enable = true.
# To disable, just set auto.enable = false
network_token: ""
# Automatic cluster deployment configuration
auto:
# Enables Automatic node configuration (self-coordination)
# for role assignment
enable: true
# HA enables automatic HA roles assignment.
# A master cluster init is always required,
# Any additional master_node is configured as part of the
# HA control plane.
# If auto is disabled, HA has no effect.
ha:
# Enables HA control-plane
enable: true
# Number of HA additional master nodes.
# A master node is always required for creating the cluster and is implied.
# The setting below adds 2 additional master nodes, for a total of 3.
master_nodes: 2
```
When configuring the `p2p` section, start by adding your desired `network_token` under the p2p configuration in the cloud-config file. To generate a network token, see [documentation](/docs/installation/p2p/#network_token).
Next, set up an Elastic IP (`kubevip.eip`) with a free IP in your network. KubeVIP will advertise this IP, so make sure to select an IP that is available for use on your network.
In the VPN configuration, the create and use options are disabled, so the VPN setup is skipped and not used to route any traffic into.

View File

@ -1,61 +0,0 @@
---
title: "Configuring Automatic High Availability in Kairos"
linkTitle: "Configuring Automatic High Availability in Kairos"
weight: 6
date: 2022-11-13
description: >
Kairos makes it easy to configure automatic High Availability (HA) in your cluster by using cloud-config. With just a few simple steps, you can have a fully-functioning HA setup in your cluster.
---
{{% alert title="Note" %}}
This feature is crazy and experimental! Do not run in production servers.
Feedback and bug reports are welcome, as we are improving the p2p aspects of Kairos.
{{% /alert %}}
To enable automatic HA rollout, enable the `p2p.auto.ha.enable` option in your cloud-config, and set up a number of `master_nodes`. The number of `master_nodes` is the number of additional masters in addition to the initial HA role. There will always be a minimum of 1 master, which is already taken into account. For example, setting up `master_nodes` to two will result in a total of 3 master nodes in your cluster.
To make this process even easier, Kairos automatically configures each node in the cluster from a unique cloud-config. This way, you don't have to manually configure each node, but provide instead a config file for all of the machines during [Installation](/docs/installation).
Here is an example of what your cloud-config might look like:
```yaml
#cloud-config
hostname: kairoslab-{{ trunc 4 .MachineID }}
users:
- name: kairos
ssh_authorized_keys:
# Replace with your github user and un-comment the line below:
# - github:mudler
p2p:
# Disabling DHT makes co-ordination to discover nodes only in the local network
disable_dht: true #Enabled by default
# network_token is the shared secret used by the nodes to co-ordinate with p2p.
# Setting a network token implies auto.enable = true.
# To disable, just set auto.enable = false
network_token: ""
# Automatic cluster deployment configuration
auto:
# Enables Automatic node configuration (self-coordination)
# for role assignment
enable: true
# HA enables automatic HA roles assignment.
# A master cluster init is always required,
# Any additional master_node is configured as part of the
# HA control plane.
# If auto is disabled, HA has no effect.
ha:
# Enables HA control-plane
enable: true
# Number of HA additional master nodes.
# A master node is always required for creating the cluster and is implied.
# The setting below adds 2 additional master nodes, for a total of 3.
master_nodes: 2
```
Note: In order for the automatic HA rollout to work, you need to generate a network token. You can find more information on how to do this in the [dedicated section](/docs/installation/p2p/#network_token).

View File

@ -1,47 +0,0 @@
---
title: "P2P multi-node cluster"
linkTitle: "P2P multi-node cluster"
weight: 6
date: 2022-11-13
description: >
Install Kairos with p2p support, on a multi-node cluster
---
{{% alert title="Note" %}}
This feature is crazy and experimental! Do not run in production servers.
Feedback and bug reports are welcome, as we are improving the p2p aspects of Kairos.
{{% /alert %}}
A multi-node scenario with non-HA is the default peer-to-peer (P2P) configuration in Kairos. To set this up, you will need to configure the `network_token` under the `p2p` configuration in your cloud-config file. Once you have set this, Kairos will handle the configuration of each node.
Consider the following example, which uses cloud-config to automatically configure the cluster:
```yaml
#cloud-config
hostname: kairoslab-{{ trunc 4 .MachineID }}
users:
- name: kairos
ssh_authorized_keys:
# Replace with your github user and un-comment the line below:
# - github:mudler
p2p:
# Disabling DHT makes co-ordination to discover nodes only in the local network
disable_dht: true #Enabled by default
# network_token is the shared secret used by the nodes to co-ordinate with p2p.
# Setting a network token implies auto.enable = true.
# To disable, just set auto.enable = false
network_token: ""
```
To set up a multi-node P2P scenario with non-HA in Kairos, start by adding your desired `network_token` under the p2p configuration in the cloud-config file. To generate a network token, see [documentation](/docs/installation/p2p/#network_token).
Be sure to set `disable_dht` to true. This will ensure that coordination to discover nodes only happens on the local network.
Once you done with the above step, you can also customize the hostname to your liking by modifying the `hostname` field, adding your github user to the `ssh_authorized_keys` field, and adding any other necessary configurations.

View File

@ -1,64 +0,0 @@
---
title: "Multi Node k3s cluster"
linkTitle: "Multi node k3s cluster"
weight: 1
description: >
This section describe examples on how to deploy Kairos with k3s as a multi-node cluster
---
In the example below we will use a bare metal host to provision a Kairos cluster in the local network with K3s and one master node.
## Installation
Use the [provider-kairos](https://github.com/kairos-io/provider-kairos) artifacts which contains `k3s`.
Follow the [Installation](/docs/installation) documentation, and use the following cloud config file with Kairos for the master and worker:
{{< tabpane text=true right=true >}}
{{% tab header="server" %}}
```yaml
#cloud-config
hostname: metal-{{ trunc 4 .MachineID }}
users:
- name: kairos
# Change to your pass here
passwd: kairos
ssh_authorized_keys:
# Replace with your github user and un-comment the line below:
# - github:mudler
k3s:
enabled: true
args:
- --disable=traefik,servicelb
```
{{% /tab %}}
{{% tab header="worker" %}}
```yaml
#cloud-config
hostname: metal-{{ trunc 4 .MachineID }}
users:
- name: kairos
# Change to your pass here
passwd: kairos
ssh_authorized_keys:
# Add your github user here!
- github:mudler
k3s-agent:
enabled: true
env:
K3S_TOKEN: ...
K3S_URL: ...
```
{{% /tab %}}
{{< /tabpane >}}
Deploy first the server; the value to use for `K3S_TOKEN` in the worker is stored at /var/lib/rancher/k3s/server/node-token on your server node.
Notably:
- we use the `k3s` block to disable `traefik` and `servicelb` (the default `k3s` load balancer)
- You can add additional configuration as args to k3s here, see [k3s](https://docs.k3s.io/reference/server-config#listeners) documentation

View File

@ -1,165 +0,0 @@
---
title: "P2P multi-node cluster with AuroraBoot"
linkTitle: "P2P multi-node cluster with AuroraBoot"
weight: 6
date: 2023-02-15
description: >
Full end to end example to bootstrap a self-coordinated cluster with Kairos and AuroraBoot
---
{{% alert title="Note" %}}
The p2p feature of Kairos is crazy and experimental! Do not run in production servers.
Feedback and bug reports are welcome, as we are improving the p2p aspects of Kairos.
{{% /alert %}}
Deploying Kubernetes at the Edge can be a complex and time-consuming process, especially when it comes to setting up and managing multiple clusters. To make this process easier, Kairos leverages peer-to-peer technology to automatically coordinate and create Kubernetes clusters without the need of a control management interface.
To leverage p2p self-coordination capabilities of Kairos, you will need to configure the `network_token` under the `p2p` configuration block in your cloud-config file. Once you have set this, Kairos will handle the configuration of each node.
{{% alert title="Note" %}}
You can see this example live in the [Kairos and libp2p video]({{< ref "docs/media/#how-kairos-uses-libp2p" >}} "Media") in the [Media Section]({{< ref "docs/media" >}} "Media")
{{% /alert %}}
## Description
In the following example we are going to bootstrap a new multi-node, single cluster with Kairos. We will use at least 2 nodes, one as a master and one as a worker. Note how we don't specify any role, or either pin any IP in the following configurations.
We will first create a cloud config file for our deployment, and then run [AuroraBoot](/docs/reference/auroraboot) locally. We then start 2 VMs configured for netbooting.
## Prepare your `cloud-config` file
Consider the following example, which uses cloud-config to automatically configure the cluster:
We start by creating a cloud config file locally, that could look similar to this:
``` yaml
#cloud-config
hostname: kairoslab-{{ trunc 4 .MachineID }}
users:
- name: kairos
passwd: kairos
ssh_authorized_keys:
# Replace with your github user and un-comment the line below:
- github:mudler
- github:mauromorales
p2p:
disable_dht: true # Enable for LAN-only clusters
network_token: ""
```
As we want the installation to be triggered automatically, we add also the `install block`:
``` yaml
install:
auto: true
device: "auto"
reboot: true
```
In order to leverage p2p and automatic node co-ordination, we need to generate a unique pre-shared token that will be used by all the nodes that we want to be part of our cluster.
We can generate a network token by using the `edgevpn` images, by running it locally:
```
$ docker run -ti --rm quay.io/mudler/edgevpn -b -g
b3RwOgogIGRodDoKICAgIGludGVydmFsOiA5MDAwCiAgICBrZXk6IGtkdGtoY21sMHVJM2hzVUFUMXpUY1B2aDhBblkzNDZUbHJ3NklVRmUxYUoKICAgIGxlbmd0aDogNDMKICBjcnlwdG86CiAgICBpbnRlcnZhbDogOTAwMAogICAga2V5OiBIcEJGaGxxdlFrcTZVd3BPSTBPVkJWQ1daRjNRYlE3WGdDa1R1bnI0cGV3CiAgICBsZW5ndGg6IDQzCnJvb206IGFBUE5oRTdlODgyZUZhM2NMTW56VkM0ZDZjWFdpTU5EYlhXMDE4Skl2Q3oKcmVuZGV6dm91czogOHVzaGhzNnFrTU92U2ZvQmZXMHZPaEY1ZFlodVZlN1Flc00zRWlMM2pNMwptZG5zOiBJZ0ljaGlvRlVYOFN6V1VKQjNXQ0NyT2UzZXZ3YzE4MWVIWm42SmlYZjloCm1heF9tZXNzYWdlX3NpemU6IDIwOTcxNTIwCg==
```
This command will generate a network token that we can use in the configuration, which now looks like the following:
``` yaml
#cloud-config
# https://github.com/kairos-io/kairos/issues/885
config_url: ""
install:
auto: true
device: "auto"
reboot: true
hostname: kairoslab-{{ trunc 4 .MachineID }}
users:
- name: kairos
passwd: kairos
ssh_authorized_keys:
- github:mudler
- github:mauromorales
p2p:
disable_dht: true #Enabled by default
network_token: "b3RwOgogIGRodDoKICAgIGludGVydmFsOiA5MDAwCiAgICBrZXk6IGtkdGtoY21sMHVJM2hzVUFUMXpUY1B2aDhBblkzNDZUbHJ3NklVRmUxYUoKICAgIGxlbmd0aDogNDMKICBjcnlwdG86CiAgICBpbnRlcnZhbDogOTAwMAogICAga2V5OiBIcEJGaGxxdlFrcTZVd3BPSTBPVkJWQ1daRjNRYlE3WGdDa1R1bnI0cGV3CiAgICBsZW5ndGg6IDQzCnJvb206IGFBUE5oRTdlODgyZUZhM2NMTW56VkM0ZDZjWFdpTU5EYlhXMDE4Skl2Q3oKcmVuZGV6dm91czogOHVzaGhzNnFrTU92U2ZvQmZXMHZPaEY1ZFlodVZlN1Flc00zRWlMM2pNMwptZG5zOiBJZ0ljaGlvRlVYOFN6V1VKQjNXQ0NyT2UzZXZ3YzE4MWVIWm42SmlYZjloCm1heF9tZXNzYWdlX3NpemU6IDIwOTcxNTIwCg=="
```
Change also accordingly the users that can access to the machine:
``` yaml
ssh_authorized_keys:
- github:mudler <--- put your GitHub handle here
```
## Provisioning with AuroraBoot
We now can run [AuroraBoot](/docs/reference/auroraboot) with `quay.io/kairos/kairos-opensuse-leap:v1.5.1-k3sv1.21.14-k3s1` to provision `openSUSE Leap` machines with `k3s 1.21.14` and Kairos `1.5.1`.
AuroraBoot takes `cloud-config` files also from _STDIN_, so we will pipe the configuration file to it, and specify the container image that we want to use for our nodes:
``` bash
cat <<EOF | docker run --rm -i --net host quay.io/kairos/auroraboot \
--cloud-config - \
--set "container_image=quay.io/kairos/kairos-opensuse-leap:v1.5.1-k3sv1.21.14-k3s1"
#cloud-config
# https://github.com/kairos-io/kairos/issues/885
config_url: ""
install:
auto: true
device: "auto"
reboot: true
hostname: kairoslab-{{ trunc 4 .MachineID }}
users:
- name: kairos
passwd: kairos
ssh_authorized_keys:
- github:mudler
- github:mauromorales
p2p:
disable_dht: true #Enabled by default
network_token: "b3RwOgogIGRodDoKICAgIGludGVydmFsOiA5MDAwCiAgICBrZXk6IGtkdGtoY21sMHVJM2hzVUFUMXpUY1B2aDhBblkzNDZUbHJ3NklVRmUxYUoKICAgIGxlbmd0aDogNDMKICBjcnlwdG86CiAgICBpbnRlcnZhbDogOTAwMAogICAga2V5OiBIcEJGaGxxdlFrcTZVd3BPSTBPVkJWQ1daRjNRYlE3WGdDa1R1bnI0cGV3CiAgICBsZW5ndGg6IDQzCnJvb206IGFBUE5oRTdlODgyZUZhM2NMTW56VkM0ZDZjWFdpTU5EYlhXMDE4Skl2Q3oKcmVuZGV6dm91czogOHVzaGhzNnFrTU92U2ZvQmZXMHZPaEY1ZFlodVZlN1Flc00zRWlMM2pNMwptZG5zOiBJZ0ljaGlvRlVYOFN6V1VKQjNXQ0NyT2UzZXZ3YzE4MWVIWm42SmlYZjloCm1heF9tZXNzYWdlX3NpemU6IDIwOTcxNTIwCg=="
EOF
```
## Booting and access the cluster
Start the Machines (VM, or baremetal) with Netboot ( see also [here](/docs/reference/auroraboot/#3-start-nodes) ) and wait for the installation to finish.
Afterward, you should be able to ssh to one of the machines and be able to use your Kubernetes cluster:
``` bash
$ ssh kairos@IP
$ sudo su -
$ kairos get-kubeconfig > kubeconfig
$ KUBECONFIG=kubeconfig k9s
```
## Notes
By default, the Kubernetes API endpoint is not exposed outside the VPN. This is an opinionated configuration from Kairos. To check out configurations without VPN, see also [the KubeVIP example](/docs/examples/multi-node-p2p-ha-kubevip).
## Troubleshooing
During the first-boot, you can check the provisioning status by looking at the `kairos-agent` logs:
``` bash
$ systemctl status kairos-agent
$ journalctl -fu kairos-agent
```
## See also
- [Installation with p2p](/docs/installation/p2p)
- [P2P Architecture](/docs/architecture/network)

View File

@ -1,54 +0,0 @@
---
title: "P2P single-node cluster"
linkTitle: "P2P single-node cluster"
weight: 6
date: 2022-11-13
description: >
This documentation page provides instructions on how to install Kairos with P2P support on a single-node cluster
---
{{% alert title="Note" color="warning" %}}
This feature is crazy and experimental! Do not run in production servers.
Feedback and bug reports are welcome, as we are improving the p2p aspects of Kairos.
{{% /alert %}}
Installing Kairos with P2P support on a single-node cluster requires a few specific steps. To begin, it's important to note that in a single-node scenario, the role must be enforced to a specific role. In a non-HA (high availability) setup, that role can be either `master` or `worker`. In a single-node cluster, there will be only one master node that needs to be configured explicitly.
To set up a single-node cluster over P2P, consider the following example, which uses cloud-config to automatically configure the cluster:
```yaml
#cloud-config
hostname: kairoslab-{{ trunc 4 .MachineID }}
users:
- name: kairos
ssh_authorized_keys:
# Add your github user here!
- github:mudler
p2p:
role: "master"
# Disabling DHT makes co-ordination to discover nodes only in the local network
disable_dht: true #Enabled by default
# network_token is the shared secret used by the nodes to co-ordinate with p2p.
# Setting a network token implies auto.enable = true.
# To disable, just set auto.enable = false
network_token: ""
```
{{% alert title="Note" %}}
One important note is that this example requires the YAML format when editing the configuration file, and that the indentation needs to be accurate, otherwise the configuration will fail.
{{% /alert %}}
The above cloud-config configures the hostname, creates a new user `kairos`, and sets the `role` to `master`. Additionally, it disables DHT (distributed hash table) to make the VPN functional only within the local network and use *mDNS* for discovery. If you wish to make the VPN work across different networks, you can set `disable_dht` to `false` or unset it.
The `network_token` field is a shared secret used by the nodes to coordinate with P2P. Setting a network token implies `auto.enable`. If you wish to disable it, simply set `auto.enable` to false. To generate a network token, see [documentation](/docs/installation/p2p/#network_token).
Keep in mind that, this example is a minimal configuration, and you can add more options depending on your needs. The above configuration can be used as a starting point and can be customized further.

View File

@ -1,49 +0,0 @@
---
title: "Single Node k3s cluster"
linkTitle: "Single node k3s cluster"
weight: 1
description: >
This section describe examples on how to deploy Kairos with k3s as a single-node cluster
---
In the example below we will use a bare metal host to provision a Kairos node in the local network with K3s.
## Installation
Use the [provider-kairos](https://github.com/kairos-io/provider-kairos) artifacts which contains `k3s`.
Follow the [Installation](/docs/installation) documentation, and use the following cloud config file with Kairos:
```yaml
#cloud-config
hostname: metal-{{ trunc 4 .MachineID }}
users:
- name: kairos
# Change to your pass here
passwd: kairos
ssh_authorized_keys:
# Replace with your github user and un-comment the line below:
# - github:mudler
k3s:
enabled: true
args:
- --disable=traefik,servicelb
```
Notably:
- We use the `k3s` block to disable `traefik` and `servicelb` (the default `k3s` load balancer).
- In a single-node setup, you may wish to use a non-generated node name. This can be achieved with these options:
```
k3s:
enabled: true
replace_args: true
args:
- --node-name=my-node
```
{{% alert title="Note" %}}
`replace_args` replaces all arguments otherwise passed to k3s by Kairos with those supplied here. Make sure you pass all the arguments you need.
{{% /alert %}}
- We use `write_files` to write manifests to the default `k3s` manifest directory (`/var/lib/rancher/k3s/server/manifests/`) see [docs](/docs/reference/configuration#kubernetes-manifests) to deploy `MetalLB` and configure it with the `192.168.1.10-192.168.1.20` IP range. Make sure to pick up a range which doesn't interfere with your local DHCP network.

View File

@ -1,320 +0,0 @@
---
title: "Getting Started"
linkTitle: "Getting Started"
weight: 2
description: >
Getting started with Kairos
---
{{% alert title="Note" %}}
If you prefer video format, you can also watch our [Introduction to Kairos video]({{< ref "docs/media/#introduction-to-kairos" >}} "Media") on the [Media Section]({{< ref "docs/media" >}} "Media")
{{% /alert %}}
Ready to launch your Kubernetes cluster with ease? With Kairos, deployment is a breeze! Simply download the pre-packaged artifacts, boot up on a VM or bare metal, and let Kairos handle the rest. Whether you're a Linux or Windows user, our quickstart guide will have you up and running in no time. Kairos can build a Kubernetes cluster for you with just a few simple steps!
The goal of this quickstart is to help you quickly and easily deploy a Kubernetes cluster using Kairos releases. With Kairos, you can easily build a k3s cluster in a VM, or a baremetal using our pre-packaged artifacts, even if you don't already have a cluster. This process can also be used on bare metal hosts with some configuration adjustments. Check out our documentation further for more detailed instructions and [examples](/docs/examples).
To create a Kubernetes cluster with Kairos, the only thing needed is one or more machines that will become the Kubernetes nodes. No previously existing clusters is needed.
Once the installation is complete, you can begin using your Kubernetes cluster.
## Prerequisites
- A VM (hypervisor) or a physical server (bare-metal) that boots ISOs
- A Linux or a Windows machine where to run the Kairos CLI (optional, we will see)
- A `cloud-init` configuration file (example below)
- At least 30+ Gb of available disk space.
## Download
1. Visit the Kairos [release page on GitHub](https://github.com/kairos-io/provider-kairos/releases)
1. Select the latest release and download the assets of your flavor. For example,
pick the [kairos-opensuse-{{<kairosVersion>}}-{{<k3sVersion>}}.iso](https://github.com/kairos-io/provider-kairos/releases/download/v/kairos-opensuse-{{<kairosVersion>}}-{{<k3sVersion>}}.iso)
ISO file for the openSUSE based version, where `{{< k3sVersion >}}` in the name is the `k3s` version and `{{< kairosVersion >}}` is the Kairos one to deploy on a VM.
1. You can also use [netboot](/docs/installation/netboot) to boot Kairos over the network
{{% alert title="Note" %}}
The releases in the [kairos-io/kairos](https://github.com/kairos-io/kairos/releases) repository are the Kairos
core images that ship **without** K3s and P2P full-mesh functionalities; Core images can be used as a
generic installer to [deploy container images](/docs/examples/core).
The releases in [kairos-io/provider-kairos](https://github.com/kairos-io/provider-kairos/releases)
**contains** already k3s and P2P full-mesh instead. These options need to be explicitly enabled.
In follow-up releases, _k3s-only_ artifacts will also be available.
See [Image Matrix Support](/docs/reference/image_matrix) for additional supported images and kernels.
{{% /alert %}}
## Checking artifacts signatures
{{% alert title="Note" color="warning" %}}
This feature will be available in Kairos version `1.5.0` and in all future releases.
{{% /alert %}}
Our ISO releases have sha256 files to checksum the validity of the artifacts. At the same time, our sha256 files are signed automatically in the CI during the
release workflow to verify that they haven't been tampered with, adding an extra step to the supply chain.
It is recommended that before starting any installation the whole security chain is validated by verifying our sha256 signature and validating that the checksum matches with the download artifacts.
To validate the whole chain you would need:
1. `sha256sum` which is usually installed by default on most linux distributions.
2. `cosign` to verify the signatures of the sha256 file. You can install cosign via their [installation docs](https://docs.sigstore.dev/cosign/installation/)
3. ISO, sha256, certificate and signature files for the release/flavor that you want to verify. All the artifacts are available on the [kairos release page](https://github.com/kairos-io/kairos/releases)
In this example we will use the `v1.5.1` version and `opensuse-leap` flavor
First we check that we have all needed files:
```bash
$ ls
kairos-opensuse-leap-v1.5.1.iso kairos-opensuse-leap-v1.5.1.iso.sha256.pem
kairos-opensuse-leap-v1.5.1.iso.sha256 kairos-opensuse-leap-v1.5.1.iso.sha256.sig
```
We first verify that the sha256 checksums haven't been tampered with:
```bash
$ COSIGN_EXPERIMENTAL=1 cosign verify-blob --cert kairos-opensuse-leap-v1.5.1.iso.sha256.pem --signature kairos-opensuse-leap-v1.5.1.iso.sha256.sig kairos-opensuse-leap-v1.5.1.iso.sha256
tlog entry verified with uuid: 51ef927a43557386ad7912802607aa421566772524319703a99f8331f0bb778f index: 11977200
Verified OK
```
Once we see that `Verified OK` we can be sure that the file hasn't been tampered with, and we can continue verifying the iso checksum.
For an example of a failure validation see below:
```bash
$ COSIGN_EXPERIMENTAL=1 cosign verify-blob --enforce-sct --cert kairos-opensuse-leap-v1.5.1.iso.sha256.pem --signature kairos-opensuse-leap-v1.5.1.iso.sha256.sig kairos-opensuse-leap-v1.5.1.iso.sha256.modified
Error: verifying blob [kairos-opensuse-leap-v1.5.1.iso.sha256.modified]: invalid signature when validating ASN.1 encoded signature
main.go:62: error during command execution: verifying blob [kairos-opensuse-leap-v1.5.1.iso.sha256.modified]: invalid signature when validating ASN.1 encoded signature
```
{{% alert title="Info" %}}
We use `COSIGN_EXPERIMENTAL=1` to verify the blob using the keyless method. That means that only ephemeral keys are created to sign, and it relays on using
OIDC Identity Tokens to authenticate so not even Kairos developers have access to the private keys and can modify an existing signature. All signatures are done
via the CI with no external access to the signing process. For more information about keyless signing please check the [cosign docs](https://github.com/sigstore/cosign/blob/main/KEYLESS.md)
{{% /alert %}}
Now we can verify that the integrity of the ISO hasnt been compromise:
```bash
$ sha256sum -c kairos-opensuse-leap-v1.5.1.iso.sha256
kairos-opensuse-leap-v1.5.1.iso: OK
```
Once we reached this point, we can be sure that from the ISO hasn't been tampered with since it was created by our release workflow.
## Booting
Now that you have the ISO at hand, it's time to boot!
Here are some additional helpful tips depending on the physical/virtual machine you're using.
{{< tabpane text=true right=true >}}
{{% tab header="**Machine**:" disabled=true /%}}
{{% tab header="Bare-Metal" %}}
When deploying on a bare metal server, directly flash the image into a USB stick. There are multiple ways to do this:
**From the command line using the `dd` command**
```bash
dd if=/path/to/iso of=/path/to/dev bs=4MB
```
<br/>
**From the GUI**
For example using an application like [balenaEtcher](https://www.balena.io/etcher/) but can be any other application which allows you to write bootable USBs.
{{% /tab %}}
{{< tab header="QEMU" >}}
{{% alert title="Warning" %}}
Make sure you have KVM enabled, this will improve the performance of your VM significantly!
{{% /alert %}}
This would be the way to start it via the command line, but you can also use the GUI
{{< highlight bash >}}
virt-install --name my-first-kairos-vm \
--vcpus 1 \
--memory 1024 \
--cdrom /path/to/kairos-opensuse-{{< kairosVersion >}}-{{< k3sVersion >}}.iso \
--disk size=30 \
--os-variant opensuse-factory \
--virt-type kvm
{{< / highlight >}}
Immediately after open a viewer so you can interact with the boot menu:
{{< highlight bash >}}
virt-viewer my-first-kairos-vm
{{< / highlight >}}
{{% /tab %}}
{{< /tabpane >}}
After booting you'll be greeted with a GRUB boot menu with multiple options.
The option you choose will depend on how you plan to install Kairos:
- The first entry will boot into installation with a QR code or [WebUI](/docs/installation/webui),
which we'll cover in the next step.
- The second entry will boot into [Manual installation mode](/docs/installation/manual),
where you can install Kairos manually using the console.
- The third boot option boots into [Interactive installation mode](/docs/installation/interactive),
where you can use the terminal host to drive the installation and skip the Configuration and Provisioning step.
To begin the installation process, select the first entry and let the machine boot. Eventually, a QR code will be printed on the screen. Follow the next step in the documentation to complete the installation.
![livecd](https://user-images.githubusercontent.com/2420543/189219806-29b4deed-b4a1-4704-b558-7a60ae31caf2.gif)
## Configuration
After booting up the ISO, the machine will wait for you to provide configuration details before continuing with the installation process. There are different ways to provide these details:
- Use the [WebUI](/docs/installation/webui) to continue the installation.
- Serve the configuration via QR code.
- Connect to the machine via [SSH](/docs/installation/manual) and start the installation process with a configuration file ( with `kairos-agent manual-install <config>`).
- [Use a datasource iso, or a generating a custom one](/docs/installation/automated)
The configuration file is a YAML file with `cloud-init` syntax and additional Kairos configuration details. In this example, we'll configure the node as a single-node Kubernetes cluster using K3s. We'll also set a default password for the Kairos user and define SSH keys.
Here's an example configuration file that you can use as a starting point:
{{% alert title="Warning" %}}
The `#cloud-config` at the top is not a comment. Make sure to start your configuration file with it.
{{% /alert %}}
```yaml
#cloud-config
# Define the user accounts on the node.
users:
- name: "kairos" # The username for the user.
passwd: "kairos" # The password for the user.
ssh_authorized_keys: # A list of SSH keys to add to the user's authorized keys.
- github:mudler # A key from the user's GitHub account.
- "ssh-rsa AAA..." # A raw SSH key.
# Enable K3s on the node.
k3s:
enabled: true # Set to true to enable K3s.
```
Save this file as config.yaml and use it to start the installation process with kairos-agent manual-install config.yaml. This will configure the node as a single-node Kubernetes cluster and set the default password and SSH keys as specified in the configuration file.
[Check out the full configuration reference](/docs/reference/configuration).
**Note**:
- `users`: This block defines the user accounts on the node. In this example, it creates a user named `kairos` with the password `kairos` and adds two SSH keys to the user's authorized keys.
- `k3s`: This block enables K3s on the node.
- If you want to enable experimental P2P support, check out [P2P installation](/docs/installation/p2p)
{{% alert title="Note" %}}
Several configurations can be added at this stage. [See the configuration reference](/docs/reference/configuration) for further reading.
{{% /alert %}}
## Provisioning
{{% alert title="Note" %}}
You can find instructions showing how to use the Kairos CLI below. In case you prefer to install via SSH and log in to the box, see the [Manual installation](/docs/installation/manual) section or the [Interactive installation](/docs/installation/interactive) section to perform the installation manually from the console.
{{% /alert %}}
To trigger the installation process via QR code, you need to use the Kairos CLI. The CLI is currently available only for Linux and Windows. It can be downloaded from the release artifact:
```bash
curl -L https://github.com/kairos-io/provider-kairos/releases/download/v1.0.0/kairos-cli-v1.0.0-Linux-x86_64.tar.gz -o - | tar -xvzf - -C .
```
```bash
# optionally, install the CLI locally
mv kairos-cli /usr/local/bin/kairos
chmod +x /usr/local/bin/kairos
```
The CLI allows to register a node with a screenshot, an image, or a token. During pairing, the configuration is sent over, and the node will continue the installation process.
In a terminal window from your desktop/workstation, run:
```
kairos register --reboot --device /dev/sda --config config.yaml
```
**Note**:
- By default, the CLI will automatically take a screenshot to get the QR code. Make sure it fits into the screen. Alternatively, an image path or a token can be supplied via arguments (e.g. `kairos register /img/path` or `kairos register <token>`).
- The `--reboot` flag will make the node reboot automatically after the installation is completed.
- The `--device` flag determines the specific drive where Kairos will be installed. Replace `/dev/sda` with your drive. Any existing data will be overwritten, so please be cautious.
- The `--config` flag is used to specify the config file used by the installation process.
After a few minutes, the configuration is distributed to the node and the installation starts. At the end of the installation, the system is automatically rebooted.
## Accessing the Node
After the boot process, the node starts and is loaded into the system. You should already have SSH connectivity when the console is available.
To access to the host, log in as `kairos`:
```bash
ssh kairos@IP
```
**Note**:
- `sudo` permissions are configured for the Kairos user.
You will be greeted with a welcome message:
```
Welcome to Kairos!
Refer to https://kairos.io for documentation.
kairos@kairos:~>
```
It can take a few moments to get the K3s server running. However, you should be able to inspect the service and see K3s running. For example, with systemd-based flavors:
```
$ sudo systemctl status k3s
● k3s.service - Lightweight Kubernetes
Loaded: loaded (/etc/systemd/system/k3s.service; enabled; vendor preset: disabled)
Drop-In: /etc/systemd/system/k3s.service.d
└─override.conf
Active: active (running) since Thu 2022-09-01 12:02:39 CEST; 4 days ago
Docs: https://k3s.io
Main PID: 1834 (k3s-server)
Tasks: 220
```
The K3s `kubeconfig` file is available at `/etc/rancher/k3s/k3s.yaml`. Please refer to the [K3s](https://rancher.com/docs/k3s/latest/en/) documentation.
## See Also
There are other ways to install Kairos:
- [Automated installation](/docs/installation/automated)
- [Manual login and installation](/docs/installation/manual)
- [Create decentralized clusters](/docs/installation/p2p)
- [Take over installation](/docs/installation/takeover)
- [Installation via network](/docs/installation/netboot)
- [Raspberry Pi](/docs/installation/raspberry)
- [CAPI Lifecycle Management (TODO)]()
## What's Next?
- [Upgrade nodes with Kubernetes](/docs/upgrade/kubernetes)
- [Upgrade nodes manually](/docs/upgrade/manual)
- [Encrypt partitions](/docs/advanced/partition_encryption)
- [Immutable architecture](/docs/architecture/immutable)

View File

@ -1,8 +0,0 @@
---
title: "Installation"
linkTitle: "Installation"
weight: 2
description: >
Kairos Installation reference
---

View File

@ -1,224 +0,0 @@
---
title: "Automated"
linkTitle: "Automated"
weight: 3
date: 2022-11-13
description: >
Install Kairos automatically, with zero touch provisioning
---
To automate Kairos installation, you can configure a specific portion of the installation configuration file. The configuration file can then be supplied in a few different ways, such as creating an additional ISO to mount, specifying a URL, or even creating an ISO from a container image with an embedded configuration file.
Here's an example of how you might customize the install block:
```yaml
install:
# Device for automated installs
device: "/dev/sda"
# Reboot after installation
reboot: true
# Power off after installation
poweroff: true
# Set to true to enable automated installations
auto: true
# A list of bundles
bundles:
- quay.io/kairos/packages:k9s-utils-0.26.7
```
This block allows you to specify the device on which to install Kairos, whether to reboot or power off after installation, and which bundles to include.
## Data source
To supply your Kairos configuration file, you can create an ISO that contains both a user-data file (which contains your configuration) and a meta-data file.
Here's an example `user-data` configuration that is set up to automatically install Kairos onto /dev/sda and reboot after installation:
```yaml
#cloud-config
install:
device: "/dev/sda"
reboot: true
poweroff: false
auto: true # Required, for automated installations
kairos:
network_token: ....
# extra configuration
```
Save this file as `cloud_init.yaml`, then create an ISO with the following steps:
1. Create a new directory and navigate to it:
```bash
$ mkdir -p build
$ cd build
```
2. Create empty `meta-data` and copy your config as `user-data`:
```bash
$ touch meta-data
$ cp -rfv cloud_init.yaml user-data
```
3. Use `mkisofs` to create the ISO file:
```bash
$ mkisofs -output ci.iso -volid cidata -joliet -rock user-data meta-data
```
Once the ISO is created, you can attach it to your machine and boot up as usual, along with the Kairos ISO.
## Via config URL
Another way to supply your Kairos configuration file is to specify a URL as a boot argument during startup. To do this, add `config_url=<URL>` as a boot argument. This will allow the machine to download your configuration from the specified URL and perform the installation using the provided settings.
After installation, the configuration will be available on the system at `/oem/90_custom.yaml`.
If you're not sure where to host your configuration file, a common option is to upload it as a GitHub gist.
## ISO remastering
It is possible to create custom ISOs with an embedded cloud configuration. This allows the machine to automatically boot with a pre-specified configuration file, which will be installed on the system after provisioning is complete. See also [AuroraBoot](/docs/reference/auroraboot) for documentation.
### Locally
To create a custom ISO, you will need Docker installed on your machine.
Here's an example of how you might do this:
{{% alert title="Warning" %}}
The image passed to the osbuilder-tools, needs to have one of the accepted schemes: `docker`, `oci`, `file`, `dir` or `channel`.
If you don't pass one, we will make an attempt to read it as a web URL but depending on your URL this might throw an error.
{{% /alert %}}
{{< tabpane text=true >}}
{{% tab header="AuroraBoot" %}}
We can use [AuroraBoot](/docs/reference/auroraboot) to handle the the ISO build process, for example:
```bash
$ IMAGE=<scheme://host[:port]/path[:tag]>
$ docker pull $IMAGE
# Build the ISO
$ docker run -v $PWD/cloud_init.yaml:/cloud_init.yaml \
-v $PWD/build:/tmp/auroraboot \
-v /var/run/docker.sock:/var/run/docker.sock \
--rm -ti quay.io/kairos/auroraboot \
--set container_image=docker://$IMAGE \
--set "disable_http_server=true" \
--set "disable_netboot=true" \
--cloud-config /cloud_init.yaml \
--set "state_dir=/tmp/auroraboot"
# Artifacts are under build/
$ sudo ls -liah build/iso
total 778M
34648528 drwx------ 2 root root 4.0K Feb 8 16:39 .
34648526 drwxr-xr-x 5 root root 4.0K Feb 8 16:38 ..
34648529 -rw-r--r-- 1 root root 253 Feb 8 16:38 config.yaml
34649370 -rw-r--r-- 1 root root 389M Feb 8 16:38 kairos.iso
34649371 -rw-r--r-- 1 root root 76 Feb 8 16:39 kairos.iso.sha256
```
{{% /tab %}}
{{% tab header="Manually" %}}
```bash
$ IMAGE=<scheme://host[:port]/path[:tag]>
$ mkdir -p files-iso/boot/grub2
# You can replace this step with your own grub config. This GRUB configuration is the boot menu of the ISO
$ wget https://raw.githubusercontent.com/kairos-io/kairos/master/overlay/files-iso/boot/grub2/grub.cfg -O files-iso/boot/grub2/grub.cfg
# Copy the config file
$ cp -rfv cloud_init.yaml files-iso/cloud_config.yaml
# Pull the image locally
$ docker pull $IMAGE
# Optionally, modify the image here!
# docker run --entrypoint /bin/bash --name changes -ti $IMAGE
# docker commit changes $IMAGE
# Build an ISO with $IMAGE
$ docker run -v $PWD:/cOS -v /var/run/docker.sock:/var/run/docker.sock -i --rm quay.io/kairos/osbuilder-tools:latest --name "custom-iso" --debug build-iso --date=false --local --overlay-iso /cOS/files-iso $IMAGE --output /cOS/
```
{{% /tab %}}
{{< /tabpane >}}
This will create a new ISO with your specified cloud configuration embedded in it. You can then use this ISO to boot your machine and automatically install Kairos with your desired settings.
You can as well modify the image in this step and add additional packages before deployment. See [customizing the system image](/docs/advanced/customizing).
Check out the [AuroraBoot documentation](/docs/reference/auroraboot) and the [examples](/docs/examples) for learn more on how to generate customized images for installation.
### Kubernetes
It is possible to create custom ISOs and derivatives using extended Kubernetes API resources with an embedded configuration file. This allows you to drive automated installations and customize the container image without breaking the concept of immutability.
To do this, you will need a Kubernetes cluster. Here's an example of how you might use Kubernetes to create a custom ISO with Kairos:
1. Add the Kairos Helm repository:
```bash
$ helm repo add kairos https://Kairos-io.github.io/helm-charts
"kairos" has been added to your repositories
```
2. Update your Helm repositories:
```bash
$ helm repo update
Hang tight while we grab the latest from your chart repositories...
...Successfully got an update from the "kairos" chart repository
Update Complete. ⎈Happy Helming!⎈
```
3. Install the Kairos CRD chart:
```bash
$ helm install kairos-crd kairos/kairos-crds
NAME: kairos-crd
LAST DEPLOYED: Tue Sep 6 20:35:34 2022
NAMESPACE: default
STATUS: deployed
REVISION: 1
TEST SUITE: None
```
4. Install the Kairos `osbuilder` chart:
```bash
$ helm install kairos-osbuilder kairos/osbuilder
NAME: kairos-osbuilder
LAST DEPLOYED: Tue Sep 6 20:35:53 2022
NAMESPACE: default
STATUS: deployed
REVISION: 1
TEST SUITE: None
```
5. Use `kubectl` to apply an `OSArtifact` spec:
```bash
cat <<'EOF' | kubectl apply -f -
apiVersion: build.kairos.io/v1alpha1
kind: OSArtifact
metadata:
name: hello-kairos
spec:
imageName: "quay.io/kairos/core-opensuse-leap:latest"
iso: true
bundles:
# Bundles available at: https://packages.kairos.io/Kairos/
- quay.io/kairos/packages:helm-utils-3.10.1
cloudConfig: |
#cloud-config
users:
- name: "kairos"
passwd: "kairos"
install:
device: "auto"
reboot: true
poweroff: false
auto: true # Required, for automated installations
EOF
```
This will create a new ISO with Kairos and the specified bundles included. You can then use this ISO to boot your machine and automatically install Kairos with the specified configuration.
Note: If you're using kind, you'll need to use the IP address and port of the nginx service to access the ISO. You can get this with:
```bash
# Note on running with kind:
$ IP=$(docker inspect kind-control-plane | jq -r '.[0].NetworkSettings.Networks.kind.IPAddress')
$ PORT=$(kubectl get svc osartifactbuilder-operator-osbuilder-nginx -o json | jq '.spec.ports[0].nodePort')
$ curl http://$IP:$PORT/hello-kairos.iso -o test.iso
```
Check out the [dedicated section in the documentation](/docs/advanced/build) for further examples.

View File

@ -1,22 +0,0 @@
---
title: "Interactive"
linkTitle: "Interactive"
weight: 2
date: 2022-11-13
description: >
Install Kairos interactively
---
The interactive installation can be accessed from the LiveCD ISO and guides the user into the installation process.
It generates a configuration file, which is later accessible after installation in the `/oem/99_custom.yaml` file.
## From the boot menu
When loading any Kairos ISOs, a GRUB menu, like the following will be displayed. To access the interactive installation, select the third entry (`kairos (interactive install)`).
![interactive](https://user-images.githubusercontent.com/2420543/189219819-6b16d13d-c409-4b9b-889b-12792f800a08.gif)
## Manually
The interactive installer can be also started manually with `kairos-agent interactive-install` from the LiveCD.

View File

@ -1,39 +0,0 @@
---
title: "Manual installation"
linkTitle: "Manual installation"
weight: 1
date: 2022-11-13
description: >
Install Kairos manually
---
To install manually, follow the [quickstart](/docs/getting-started). When the QR code is prompted at the screen, you will be able to log in via SSH to the box with the password `kairos` as `kairos` user.
{{% alert title="Note" %}}
**Note**: After the installation, the password login is disabled, users, and SSH keys to log in must be configured via cloud-init.
{{% /alert %}}
## Installation
To start the installation, run the following command from the console:
```bash
sudo kairos-agent manual-install --device "auto" $CONFIG
```
Where the configuration can be a `cloud-init` file or a URL to it:
```yaml
#cloud-init
p2p:
network_token: ....
# extra configuration
```
**Note**:
- The command is disruptive and will erase any content on the drive.
- The parameter **"auto"** selects the biggest drive available in the machine.

View File

@ -1,128 +0,0 @@
---
title: "Network booting"
linkTitle: "Network booting"
weight: 5
date: 2022-12-1
description: >
Install Kairos from network
---
Most hardware these days, supports booting an operating system from the network.
The technology behind this is called [Preboot Execution Environment](https://en.wikipedia.org/wiki/Preboot_Execution_Environment).
Kairos releases include artifacts to allow booting from the network. In general, the following files are needed:
- The initrd image: It's the system that loads first. It's responsible to load the kernel.
- The kernel: This is the kernel of the operating system that will boot.
- The squashfs: The filesystem of the operating system that will boot.
Booting using these files can happen in multiple ways:
- Either with direct support from the machine BIOS plus network configuration (DHCP server etc).
- Software based network booting. This works with a special ISO, built with
[ipxe](https://ipxe.org/) project. Kairos releases include pre-built ISOs for
netbooting (named like `*.ipxe.iso.ipxe`).
- Use [AuroraBoot](/docs/reference/auroraboot)
Generic hardware based netbooting is out of scope for this document.
Below we give instructions on how to use the Kairos release artifacts to netboot and how to use [AuroraBoot](/docs/reference/auroraboot) to boot from network.
## Boot with pre-built ISOs
The ipxe ISOs from the Kairos release artifacts, were built with a ipxe script that points directly to the
`kernel`, `initrd` and `squashfs` artifacts of the same release on GitHub.
E.g.:
<!-- TODO: change this to include leap in the name once we release 1.5.0-->
```bash
#!ipxe
set url https://github.com/kairos-io/kairos/releases/download/v1.3.0
set kernel kairos-alpine-opensuse-leap-v1.3.0-kernel
set initrd kairos-alpine-opensuse-leap-v1.3.0-initrd
set rootfs kairos-alpine-opensuse-leap-v1.3.0.squashfs
# Configure interface
ifconf
# set config https://example.com/machine-config
# set cmdline extra.values=1
kernel ${url}/${kernel} initrd=${initrd} rd.neednet=1 ip=dhcp rd.cos.disable root=live:${url}/${rootfs} netboot nodepair.enable config_url=${config} console=tty1 console=ttyS0 ${cmdline}
initrd ${url}/${initrd}
boot
```
Booting the ISO will automatically download and boot those artifacts. E.g. using qemu:
```bash
#!/bin/bash
qemu-img create -f qcow2 disk.img 40g
qemu-system-x86_64 \
-m 4096 \
-smp cores=2 \
-nographic \
-drive if=virtio,media=disk,file=disk.img \
-drive if=ide,media=cdrom,file=${1:-kairos.iso}
```
## Use AuroraBoot
[AuroraBoot](/docs/reference/auroraboot) is a Kairos convinience tool that can be used to quickly deploy Kairos from Network with zero-touch configuration, for instance:
```bash
docker run --rm -ti --net host quay.io/kairos/auroraboot \
--set "container_image=quay.io/kairos/kairos-opensuse-leap:v1.5.1-k3sv1.21.14-k3s1"
# Optionally:
# --cloud-config ....
```
Will netboot the `quay.io/kairos/kairos-opensuse-leap:v1.5.1-k3sv1.21.14-k3s1` image. You can find more details in the [AuroraBoot documentation section](/docs/reference/auroraboot).
## Notes on booting from network
Another way to boot with the release artifacts is using [pixiecore](https://github.com/danderson/netboot/tree/master/pixiecore).
`pixiecore` acts as a server which offers net boot files over the network and it's automatically discovered on a network where a DHCP server is running and is compatible with [the pixiecore architecture](https://github.com/danderson/netboot/blob/master/pixiecore/README.booting.md).
Assuming the current directory has the `kernel`, `initrd` and `squashfs` artifacts,
`pixiecore` server can be started with `docker` like this:
<!-- TODO: change this to include leap in the name once we release 1.5.0-->
```bash
#!/bin/bash
VERSION="v1.3.0"
wget "https://github.com/kairos-io/kairos/releases/download/${VERSION}/kairos-opensuse-${VERSION}-kernel"
wget "https://github.com/kairos-io/kairos/releases/download/${VERSION}/kairos-opensuse-${VERSION}-initrd"
wget "https://github.com/kairos-io/kairos/releases/download/${VERSION}/kairos-opensuse-${VERSION}.squashfs"
cat << EOF > config.yaml
#cloud-config
hostname: "hostname.domain.tld"
users:
- name: "kairos"
passwd: "kairos"
EOF
# This will start the pixiecore server.
# Any machine that depends on DHCP to netboot will be send the specified files and the cmd boot line.
docker run \
-d --name pixiecore --net=host -v $PWD:/files quay.io/pixiecore/pixiecore \
boot /files/kairos-opensuse-${VERSION}-kernel /files/kairos-opensuse-${VERSION}-initrd --cmdline="rd.neednet=1 ip=dhcp rd.cos.disable root=live:{{ ID \"/files/kairos-opensuse-${VERSION}.squashfs\" }} netboot nodepair.enable config_url={{ ID \"/files/config.yaml\" }} console=tty1 console=ttyS0 console=tty0"
```
If your machine doesn't support netbooting, you can use our [generic image](https://github.com/kairos-io/ipxe-dhcp/releases), which is built using an ipxe script [from the pixiecore project](https://github.com/danderson/netboot/blob/master/pixiecore/boot.ipxe). The ISO will wait for a DHCP proxy response from pixiecore.
If pixiecore is successfully reached, you should see an output similar to this in the `pixiecore` docker container:
```
$ docker logs pixiecore
[DHCP] Offering to boot 08:00:27:e5:22:8c
[DHCP] Offering to boot 08:00:27:e5:22:8c
[HTTP] Sending ipxe boot script to 192.168.1.49:4371
[HTTP] Sent file "kernel" to 192.168.1.49:4371
[HTTP] Sent file "initrd-0" to 192.168.1.49:4371
```

View File

@ -1,281 +0,0 @@
---
title: "P2P support"
linkTitle: "P2P support"
weight: 6
date: 2022-11-13
description: >
Install Kairos with p2p support
---
{{% alert title="Note" %}}
This feature is crazy and experimental! Do not run in production servers.
Feedback and bug reports are welcome, as we are improving the p2p aspects of Kairos.
{{% /alert %}}
Deploying Kubernetes at the Edge can be a complex and time-consuming process, especially when it comes to setting up and managing multiple clusters. To make this process easier, Kairos leverages peer-to-peer technology to automatically coordinate and create Kubernetes clusters without the need of a control management interface.
With this feature, users don't need to specify any network settings. They can just set the desired number of master nodes (in the case of an HA cluster) and the necessary configuration details, and Kairos will take care of the rest. The peer-to-peer technology allows the nodes in the cluster to communicate and coordinate with each other, ensuring that the clusters are set up correctly and efficiently with K3s.
This makes it easier to deploy and manage Kubernetes clusters at the Edge, saving user's time and effort, allowing them to focus on running and scaling their applications. For more information about how does it work behind the scenes, [check out the architecture section](/docs/architecture/network).
You can find full examples in our [examples section](/docs/examples):
- [Full end to end example to bootstrap a self-coordinated cluster with Kairos and AuroraBoot](/docs/examples/p2p_e2e/)
- [Self-coordinated K3s HA cluster with KubeVIP](/docs/examples/multi-node-p2p-ha-kubevip/)
- [Multi-node, single master setup](/docs/examples/multi-node-p2p/)
- [Multi-node, HA setup](/docs/examples/multi-node-p2p-ha/)
- [Single-node setup](/docs/examples/single-node-p2p/)
This feature is currently experimental and can be optionally enabled by adding the following configuration to the node deployment file. If you are not familiar with the installation process, it is suggested to follow the [quickstart](/docs/getting-started):
```yaml
p2p:
# Disabling DHT makes co-ordination to discover nodes only in the local network
disable_dht: true #Enabled by default
# Automatic cluster deployment configuration
auto:
ha:
# Enables HA control-plane
enable: true
# number of HA master node (beside the one used for init) for the control-plane
master_nodes: 2
# network_token is the shared secret used by the nodes to co-ordinate with p2p.
# Setting a network token implies auto.enable = true.
# To disable, just set auto.enable = false
network_token: "YOUR_TOKEN_GOES_HERE"
```
To enable the automatic cluster deployment with peer-to-peer technology, specify a `p2p.network_token`. To enable HA, set `p2p.auto.ha.master_nodes` to the number of wanted HA/master nodes. Additionally, the p2p block can be used to configure the VPN and other settings as needed.
With these settings used to deploy all the nodes, those will automatically communicate and coordinate with each other to deploy and manage the Kubernetes cluster without the need for a control management interface and user intervention.
## Configuration
A minimum configuration file, that bootstraps a cluster with a simple single-master topology, can look like the following:
```yaml
#cloud-config
hostname: "kubevip-{{ trunc 4 .MachineID }}"
users:
- name: "kairos"
passwd: "kairos"
ssh_authorized_keys:
- github:mudler
p2p:
network_token: "YOUR_TOKEN_GOES_HERE"
```
The `p2p` block is used to configure settings related to the mesh functionalities. The minimum required argument is the `network_token` and there is no need to configure `k3s` manually with the `k3s` block as it is already implied.
{{% alert title="Note" %}}
The `k3s` block can still be used to override other `k3s` settings, e.g. `args`.
{{% /alert %}}
The network token is a shared secret available to all the nodes of the cluster. It allows the node to co-ordinate and automatically assign roles. To generate a network token, see [documentation](/docs/installation/p2p/#network_token).
Simply applying the same configuration file to all the nodes should eventually bring one master and all the other nodes as workers. Adding nodes can be done also in a later step, which will automatically setup the node without any further configuration.
Full example:
```yaml
#cloud-config
install:
auto: true
device: "auto"
reboot: true
hostname: "kubevip-{{ trunc 4 .MachineID }}"
users:
- name: "kairos"
passwd: "kairos"
ssh_authorized_keys:
- github:mudler
## Sets the Elastic IP used in KubeVIP
kubevip:
eip: "192.168.1.110"
# Specify a manifest URL for KubeVIP. Empty uses default
manifest_url: ""
# Enables KubeVIP
enable: true
# Specifies a KubeVIP Interface
interface: "ens18"
p2p:
role: "" # Set an hardcoded role, optional
# Disabling DHT makes co-ordination to discover nodes only in the local network
disable_dht: true #Enabled by default
# Configures a VPN for the cluster nodes
vpn:
create: false # defaults to true
use: false # defaults to true
env:
.....
# Automatic cluster deployment configuration
auto:
# Enables Automatic node configuration (self-coordination)
# for role assignment
enable: true
# HA enables automatic HA roles assignment.
# A master cluster init is always required,
# Any additional master_node is configured as part of the
# HA control plane.
# If auto is disabled, HA has no effect.
ha:
# Enables HA control-plane
enable: true
# Number of HA additional master nodes.
# A master node is always required for creating the cluster and is implied.
# The setting below adds 2 additional master nodes, for a total of 3.
master_nodes: 2
# Use an External database for the HA control plane
external_db: "external-db-string"
# network_token is the shared secret used by the nodes to co-ordinate with p2p
network_token: "YOUR_TOKEN_GOES_HERE"
```
In the YAML configuration example, there are several important keywords that control the behavior of the automatic cluster deployment:
| Keyword | Description |
| --- | --- |
| `p2p` | Configures the peer to peer networking of the cluster |
| `p2p.disable_dht` | Disables the distributed hash table for cluster discovery |
| `p2p.network_token` | The shared secret used by the nodes to coordinate with p2p |
| `p2p.network_id` | Optional, unique identifier for the kubernetes cluster. It allows bootstrapping of multiple cluster using the same network token |
| `p2p.role` | Force a specific role for the node of the cluster |
| `p2p.vpn` | Configures a VPN for the cluster nodes |
| `p2p.vpn.create` | Enables the creation of the VPN |
| `p2p.vpn.use` | Enables the use of the VPN for routing Kubernetes traffic of the cluster |
| `p2p.vpn.env` | Configures the environment variables used to start for the VPN |
| `p2p.vpn.auto` | Configures the automatic deployment of the cluster |
| `p2p.auto.enable` | Enables automatic node configuration for role assignment |
| `p2p.auto.ha` | Configures the high availability settings for the cluster |
| `p2p.auto.ha.enable` | Enables the high availability settings |
| `p2p.auto.ha.master_nodes` | The number of additional HA master nodes expected in the cluster. |
| `p2p.auto.ha.external_db` | The external database used for high availability |
## Elastic IP
If deploying a cluster in a Local network, it might be preferable to disable the VPN functionalities.
We use KubeVIP to provide an elastic ip for the control plane that can be configured via a specific block:
```yaml
p2p:
network_token: ".."
vpn:
# Disable VPN, so traffic is not configured with a VPN
create: false
use: false
## Sets the Elastic IP used in KubeVIP
kubevip:
eip: "192.168.1.110"
# Specify a manifest URL for KubeVIP. Empty uses default
manifest_url: ""
# Enables KubeVIP
enable: true
# Specifies a KubeVIP Interface
interface: "ens18"
```
| Keyword | Description |
| --- | --- |
| `kubevip` | Block to configure KubeVIP for the cluster |
| `kubevip.eip` | The Elastic IP used for KubeVIP. Specifying one automatically enables KubeVIP. Choose a free IP that is not in a DHCP range of your network. |
| `kubevip.manifest_url` | The URL for the KubeVIP manifest |
| `kubevip.enable` | Enables KubeVIP for the cluster |
| `kubevip.interface` | The interface used for KubeVIP |
A full example, with KubeVIP and HA:
```yaml
#cloud-config
install:
auto: true
device: "auto"
reboot: true
hostname: "kubevip-{{ trunc 4 .MachineID }}"
users:
- name: "kairos"
passwd: "kairos"
ssh_authorized_keys:
- github:mudler
p2p:
network_token: "..."
ha:
master_nodes: 2
vpn:
# Disable VPN, so traffic is not configured with a VPN
create: false
use: false
kubevip:
eip: "192.168.1.110"
```
## `network_token`
The `network_token` is a unique code that is shared among nodes and can be created with the Kairos CLI or `edgevpn`. This allows nodes to automatically connect to the same network and generates private/public key pairs for secure communication using end-to-end encryption.
To generate a new token, run:
{{< tabpane text=true right=true >}}
{{% tab header="docker" %}}
```bash
docker run -ti --rm quay.io/mudler/edgevpn -b -g
```
{{% /tab %}}
{{% tab header="CLI" %}}
```bash
kairos generate-token
```
{{% /tab %}}
{{< /tabpane >}}
## Join new nodes
To add new nodes to the network, follow the same process as before and use the same configuration file for all machines. Unless you have specified roles for each node, no further changes to the configuration are necessary. The machines will automatically connect to each other, whether they are on a local or remote network.
## Connect to the nodes
To connect to the nodes, you can use the kairos-cli and provide the network_token to establish a tunnel to the nodes network.
```bash
sudo kairos bridge --network-token <TOKEN>
```
This command creates a TUN device on your machine and allows you to communicate with each node in the cluster.
{{% alert title="Note" color="info" %}}
The command requires root permissions in order to create a TUN/TAP device on the host.
{{% /alert %}}
An API will be also available at [localhost:8080](http://localhost:8080) for inspecting the network status.
## Get kubeconfig
To get the cluster `kubeconfig`, you can log in to the master node and retrieve it from the engine (e.g., it is located at `/etc/rancher/k3s/k3s.yaml` for K3s) or use the Kairos CLI. If using the CLI, you must be connected to the bridge or logged in from one of the nodes and run the following command in the console:
```bash
kairos get-kubeconfig > kubeconfig
```
{{% alert title="Note" color="info" %}}
Note that you must run kairos bridge in a separate window as act like `kubectl proxy` and access the Kubernetes cluster VPN. Keep the kairos bridge command running to operate the cluster.
{{% /alert %}}

View File

@ -1,43 +0,0 @@
---
title: "QR Code"
linkTitle: "QR Code"
weight: 1
date: 2022-11-13
description: >
Use the QR code displayed at boot to drive the installation
---
{{% alert title="Warning" %}}
You will need a Standard Kairos OS image in order to use QR Code feature.
{{% /alert %}}
By default Kairos will display a QR code after booting the ISO to install the machine:
![livecd](https://user-images.githubusercontent.com/2420543/189219806-29b4deed-b4a1-4704-b558-7a60ae31caf2.gif)
To trigger the installation process via QR code, you need to use the Kairos CLI and provide a Cloud Config, as described in the [Getting started guide](/docs/getting-started). You can also see some Cloud Config examples in our [Examples section](/docs/examples). The CLI is currently available only for Linux and Windows. It can be downloaded from the release artifact:
```bash
VERSION=$(wget -q -O- https://api.github.com/repos/kairos-io/provider-kairos/releases/latest | jq -r '.tag_name')
curl -L https://github.com/kairos-io/provider-kairos/releases/download/${VERSION}/kairos-cli-${VERSION}-Linux-x86_64.tar.gz -o - | tar -xvzf - -C .
```
The CLI allows to register a node with a screenshot, an image, or a token. During pairing, the configuration is sent over, and the node will continue the installation process.
In a terminal window from your desktop/workstation, run:
```
kairos register --reboot --device /dev/sda --config config.yaml
```
- The `--reboot` flag will make the node reboot automatically after the installation is completed.
- The `--device` flag determines the specific drive where Kairos will be installed. Replace `/dev/sda` with your drive. Any existing data will be overwritten, so please be cautious.
- The `--config` flag is used to specify the config file used by the installation process.
{{% alert title="Note" %}}
By default, the CLI will automatically take a screenshot to get the QR code. Make sure it fits into the screen. Alternatively, an image path or a token can be supplied via arguments (e.g. `kairos register /img/path` or `kairos register <token>`).
{{% /alert %}}
After a few minutes, the configuration is distributed to the node and the installation starts. At the end of the installation, the system is automatically rebooted.

View File

@ -1,73 +0,0 @@
---
title: "RaspberryPi"
linkTitle: "RaspberryPi"
weight: 4
date: 2022-11-13
description: >
Install Kairos on RaspberryPi 3 and 4
---
Kairos supports Raspberry Pi model 3 and 4 with 64bit architecture.
If you are not familiar with the process, it is suggested to follow the [quickstart](/docs/getting-started) first to see how Kairos works.
## Prerequisites
- An SD card which size is at least 16 GB
- Etcher or `dd`
- A Linux host where to flash the device
## Download
Extract the `img` file from a container image as described [in this page](/docs/reference/image_matrix)
## Flash the image
Plug the SD card to your system. To flash the image, you can either use Etcher or `dd`. Note it's compressed with "XZ", so we need to decompress it first:
```bash
xzcat kairos-opensuse-leap-arm-rpi-v1.0.0-rc2-k3sv1.21.14+k3s1.img.xz | sudo dd of=<device> oflag=sync status=progress bs=10MB
```
Once the image is flashed, there is no need to carry any other installation steps. We can boot the image, or apply our config.
## Boot
Use the SD Card to boot. The default username/password is `kairos`/`kairos`.
To configure your access or disable password change the `/usr/local/cloud-config/01_defaults.yaml` accordingly.
## Configure your node
To configure the device beforehand, be sure to have the SD plugged in your host. We need to copy a configuration file into `cloud-config` in the `COS_PERSISTENT` partition:
```
$ PERSISTENT=$(blkid -L COS_PERSISTENT)
$ mkdir /tmp/persistent
$ sudo mount $PERSISTENT /tmp/persistent
$ sudo mkdir /tmp/persistent/cloud-config
$ sudo cp cloud-config.yaml /tmp/persistent/cloud-config
$ sudo umount /tmp/persistent
```
You can push additional `cloud config` files. For a full reference check out the [docs](/docs/reference/configuration) and also [configuration after-installation](/docs/advanced/after-install)
## Customizing the disk image
The following shell script shows how to localy rebuild and customize the image with docker
```
IMAGE=quay.io/kairos/kairos-alpine-arm-rpi:v1.1.6-k3sv1.25.3-k3s1
# Pull the image locally
docker pull $IMAGE
mkdir -p build
docker run -v $PWD:/HERE -v /var/run/docker.sock:/var/run/docker.sock --privileged -i --rm --entrypoint=/build-arm-image.sh quay.io/kairos/osbuilder-tools:v0.4.0 \
--model rpi64 \
--state-partition-size 6200 \
--recovery-partition-size 4200 \
--size 15200 \
--images-size 2000 \
--local \
--config /HERE/cloud-config.yaml \
--docker-image $IMAGE /HERE/build/out.img
```

View File

@ -1,29 +0,0 @@
---
title: "Takeover"
linkTitle: "Takeover"
weight: 7
date: 2022-11-13
description: >
---
Kairos supports takeover installations. Here are a few summarized steps:
- From the dedicated control panel (OVH, Hetzner, etc.), boot in *rescue* mode
- [Install docker](https://docs.docker.com/engine/install/debian/) and run for example:
```
export DEVICE=/dev/sda
export IMAGE=quay.io/kairos/core-opensuse-leap:v1.1.4
cat <<'EOF' > config.yaml
#cloud-config
users:
- name: "kairos"
passwd: "kairos"
ssh_authorized_keys:
- github:mudler
EOF
export CONFIG_FILE=config.yaml
docker run --privileged -v $PWD:/data -v /dev:/dev -ti $IMAGE elemental install --cloud-init /data/$CONFIG_FILE --system.uri $IMAGE $DEVICE
```
- Switch back to *booting* from HD and reboot.

View File

@ -1,20 +0,0 @@
---
title: "WebUI"
linkTitle: "WebUI"
weight: 1
date: 2022-11-13
description: >
Use the WebUI at boot to drive the installation
---
{{% alert title="Note" color="warning" %}}
This feature will be available in Kairos version `1.5.0` and in all future releases.
{{% /alert %}}
By default when running the LiveCD, or during installation, Kairos will start a WebUI in the background, listening by default on the `8080` port:
![WebUI](https://user-images.githubusercontent.com/2420543/214573939-31f887b8-890c-4cce-a02a-0100198ea7d9.png)
The WebUI has an input form that accepts the `YAML` config file, features a syntax highlighter and a `YAML` syntax checker. You can find a [full example in our documentation](/docs/reference/configuration) or navigate to our [examples section](/docs/examples).

View File

@ -1,33 +0,0 @@
---
title: "Media"
linkTitle: "Media"
weight: 9
description: >
Presentation Slides, Videos and other media on Kairos
---
## Articles
* [Livin Kubernetes on the (Immutable) Edge with Kairos Project](https://thenewstack.io/livin-kubernetes-on-the-immutable-edge-with-kairos-project/) on The New Stack
## Slides
* [Kairos and libp2p](https://github.com/kairos-io/kairos/files/10743709/Kairos_P2P.pdf)
## Videos
### Introduction to Kairos
{{< youtube id="WzKf6WrL3nE" title="Introduction to Kairos" >}}
### Meet Kairos, an OSS project building the immutable Kubernetes edge
{{< youtube id="kiDQujibz2k" title="Meet Kairos, an OSS project building the immutable Kubernetes edge" >}}
### How we build and maintain Kairos
{{< youtube id="XD5nfMf59v4" title="How we build and maintain Kairos" >}}
### How Kairos uses libp2p
{{< youtube id="7Vym18wz9Uw" title="Kairos and libp2p" >}}

View File

@ -1,6 +0,0 @@
---
title: "Reference"
linkTitle: "Reference"
weight: 6
description: >
---

View File

@ -1,31 +0,0 @@
---
title: "Architecture"
linkTitle: "Architecture"
weight: 1
date: 2022-11-13
description: >
Kairos internal architecture
---
This section contains refrences to how Kairos works internally.
## Setup process
`kairos` node at first boot will start the `kairos-agent` service, you can always check what's happening by running `journalctl -fu kairos-agent`.
This service will setup `k3s` and `edgevpn` dynamically on first-boot, once it configures the machine it does not run on boot anymore, unless `/usr/local/.kairos/deployed` is removed..
Those are the steps executed in sequence by the `kairos-agent` service:
- Will create a `edgevpn@kairos` service and enabled on start. The configuration for the connection is stored in `/etc/systemd/system.conf.d/edgevpn-kairos.env` and depends on the cloud-init configuration file provided during installation time
- Automatic role negotiation starts, nodes will co-ordinate for an IP and a role
- Once roles are defined a node will either set the `k3s` or `k3s-agent` service. Configuration for each service is stored in `/etc/sysconfig/k3s` and `/etc/sysconfig/k3s-agent` respectively
## Paths
The following paths are relevant for Kairos:
| Path | Description |
| :-------------------------- | :--------------------------------------------------------------------------------------------- |
| /usr/local/.kairos/deployed | Sentinel file written after bootstrapping is complete. Remove to retrigger automatic bootstrap |
| /usr/local/.kairos/lease | IP Lease of the node in the network. Delete to change IP address of the node |

View File

@ -1,615 +0,0 @@
---
title: "AuroraBoot"
linkTitle: "AuroraBoot"
weight: 8
date: 2023-02-08
description: >
Automatically provision machines with Kairos and AuroraBoot.
---
**AuroraBoot** is a tool designed to make the process of bootstrapping Kairos machines quick, simple and efficient. It is specifically designed for the Kairos operating system and provides a comprehensive solution for downloading required artifacts and provisioning a machine, both from network or manually via flashing to USB stick.
With AuroraBoot, you can prepare the environment for network-based bootstrapping, download the necessary release assets, and also customize the installation media for USB-based mass-installations. Whether you're looking to install Kairos on a single machine or multiple machines, AuroraBoot makes it easy and efficient.
AuroraBoot can be useful to:
- prepare multiple-nodes in a lab before shipment
- offer a simple, intuitive and streamlined way to deploy Kairos automatically and manually
- deploy Kairos nodes in a network segment where we can already send workload to (running AuroraBoot in an already-existing downstream cluster)
![AuroraBoot](https://user-images.githubusercontent.com/2420543/217617696-f993a8e3-55ac-4d3e-98f0-c2317cb54cb9.png)
## Scope
**AuroraBoot** has the following scope:
- **Download** release assets in order to provision one or more machines
- **Prepare** automatically the environment to boot from network
- **Provision** machines from network with a version of Kairos and cloud config
- **Customize** The installation media for installations from USB
## Prerequisites
- `docker` or a container engine of your choice
- Port `8090`, `8080` and `67` free on the host running AuroraBoot
- The machine running AuroraBoot have to be on the same network segment of the nodes to be bootstrapped
- The nodes need to be configured to boot over network, or be capable of booting via USB for offline mode
- `ProxyDHCP` supported by the `DHCP` network attempting to netboot (see also [pixiecore architecture](https://github.com/danderson/netboot/blob/master/pixiecore/README.booting.md#step-1-dhcpproxydhcp)).
There should be an already running `DHCP` server on your network. AuroraBoot doesn't take over the `DHCP` server, neither require you to do any specific configuration, however a `DHCP` server which is compliant to `ProxyDHCP` requests should be present in the same network running **AuroraBoot** and the machines to boot.
## MacOS
Unfortunately for macOS systems we cannot run the netboot through docker as it's run inside a VM, as it can't see the host network.
Building ISOs still works as long as you mount the container `/tmp` disk to a local dir so its exported there like so:
```bash
docker run --rm -ti -v ${PWD}:/tmp quay.io/kairos/auroraboot \
--set "artifact_version=v1.5.1" \
--set "release_version=v1.5.1" \
--set "flavor=opensuse-leap" \
--set "repository=kairos-io/kairos" \
--set "disable_http_server=true" \
--set "disable_netboot=true" \
--cloud-config /config.yaml
```
This will build the ISO and put the generated artifacts in the current dir under the `${PWD}/iso` dir.
For netboot, we recommend that you run the AuroraBoot binary directly by grabbing it from the [releases page](https://github.com/kairos-io/AuroraBoot/releases).
This requires just one dependency that you can install via [brew](https://brew.sh/) with `brew install xorriso`
## Windows
Netboot in windows is not supported, only iso creation via the docker image.
## Overview
To run AuroraBoot, simply use `docker` or the container engine of your choice (such as `podman`, ...). AuroraBoot images are published in [quay](https://quay.io/repository/kairos/auroraboot) and the source code is available [in GitHub](https://github.com/kairos-io/AuroraBoot).
The basic usage of AuroraBoot involves passing it several parameters that define the installation environment, such as the version of Kairos you want to install, the cloud config you want to use, and other customizations you may need. You can pass these parameters either as command-line arguments, or as a full YAML configuration file.
AuroraBoot will download the artifacts required for bootstrapping the nodes, and prepare the environment required for a zero-touch deployment.
For example, to netboot a machine with the latest version of Kairos and Rocky Linux using a cloud config, you would run the following command:
```bash
docker run --rm -ti --net host quay.io/kairos/auroraboot \
--set "artifact_version=v1.5.0" \
--set "release_version=v1.5.0" \
--set "flavor=rockylinux" \
--set repository="kairos-io/kairos" \
--cloud-config https://...
```
This command will download the necessary artifacts and start the provisioning process. The machine will attempt to boot from network, and will be configured with the specified version of Kairos.
### Network-based bootstrapping
By default AuroraBoot will automatically attempt to bootstrap other machines, which are configured to boot from network, within the same network. No further configuration or settings necessary.
There are only 3 steps involved in the process:
1. Select the release of Kairos that you want to deploy and optionally a cloud config (see also our [examples](/docs/examples))
1. Run AuroraBoot in your workstation with the appropriate CLI args
1. Boot up other nodes, already configured to boot from network
#### 1. Selecting a release
AuroraBoot can bootstrap container images or released assets from our GitHub release process.
To use GitHub releases set a release version with `--set release_version` (the GitHub release), an artifact version with `--set artifact_version` (the artifact version) a flavor with `--set flavor` and a repository with `--set repository`.
Kairos has releases with and without k3s. The [release page at kairos](https://github.com/kairos-io/kairos/releases) are ["core" images that can be used as installer](/docs/examples/core/) while the [provider-kairos](https://github.com/kairos-io/provider-kairos/releases) images contains also `k3s`.
To use a container image, you can use [the Kairos released images](/docs/reference/image_matrix/) or [customized](/docs/advanced/customizing) by specifying `--set container_image` instead with the container image of choice.
#### 2. Run AuroraBoot
Now we can run AuroraBoot with the version we selected, either from GitHub releases or directly from a container image.
In the example below we selected `v1.5.1-k3sv1.21.14-k3s1`, `opensuse-leap` flavor, so we would run either one of the following:
{{< tabpane text=true >}}
{{% tab header="Container image" %}}
By indicating a `container_image`, AuroraBoot will pull the image locally and start to serve it for network booting.
You can use [the Kairos released images](/docs/reference/image_matrix/) or [your own](/docs/advanced/customizing).
```bash
docker run --rm -ti --net host quay.io/kairos/auroraboot \
--set "container_image=quay.io/kairos/kairos-opensuse-leap:v1.5.1-k3sv1.21.14-k3s1"
```
{{% /tab %}}
{{% tab header="Container Image, with dockerd" %}}
By indicating a `container_image` prefixed with `docker://`, AuroraBoot will pull the image from the local daemon and start to serve it for network booting.
This implies that the host has a docker daemon, and we have to give access to its socket with `-v /var/run/docker.sock:/var/run/docker.sock`.
```bash
docker pull quay.io/kairos/kairos-opensuse-leap:v1.5.1-k3sv1.21.14-k3s1
# This will use the container image from the host's docker daemon
docker run --rm -ti -v /var/run/docker.sock:/var/run/docker.sock --net host quay.io/kairos/auroraboot \
--set "container_image=docker://quay.io/kairos/kairos-opensuse-leap:v1.5.1-k3sv1.21.14-k3s1"
```
{{% /tab %}}
{{% tab header="Github releases" %}}
By indicating a `artifact_version`, a `release_version`, a `flavor` and a `repository`, AuroraBoot will use GitHub released assets.
```bash
docker run --rm -ti --net host quay.io/kairos/auroraboot \
--set "artifact_version=v1.5.1-k3sv1.21.14+k3s1" \
--set "release_version=v1.5.1" \
--set "flavor=opensuse-leap" \
--set "repository=kairos-io/provider-kairos"
```
{{% /tab %}}
{{< /tabpane >}}
To specify a cloud config, you can set it with `--cloud-config`. See the sections below for further examples.
#### 3. Start nodes
Generic hardware based netbooting is out of scope for this document.
Nodes need to be configured to boot over network, and after AuroraBoot is started should be ready to accept a connection, a typical output of a successfull run is:
```bash
2023/02/08 14:27:30 DHCP: Offering to boot 08:00:27:54:1a:d1
2023/02/08 14:27:30 TFTP: Sent "08:00:27:54:1a:d1/4" to 192.168.68.113:6489
2023/02/08 14:27:36 DHCP: Offering to boot 08:00:27:54:1a:d1
2023/02/08 14:27:36 HTTP: Sending ipxe boot script to 192.168.68.113:45435
2023/02/08 14:27:36 HTTP: Sent file "kernel" to 192.168.68.113:45435
2023/02/08 14:27:36 HTTP: Sent file "initrd-0" to 192.168.68.113:45435
2023/02/08 14:27:49 HTTP: Sent file "other-0" to 192.168.68.113:43044
```
If trying on a VM, for instance on VirtualBox or QEMU, a typical setup might be:
- Set Netboot as first boot in the boot process order
![Screenshot from 2023-02-08 10-37-59](https://user-images.githubusercontent.com/2420543/217587463-cd293842-575e-4484-aee5-de46c4f053fb.png)
- Use bridge networking with the host (if running AuroraBoot and the VM in the same host)
![Screenshot from 2023-02-08 10-38-05](https://user-images.githubusercontent.com/2420543/217587465-35486742-26a1-4971-bee0-3049d9ec329a.png)
### USB-based bootstrapping
AuroraBoot by default prepares an ISO with the custom cloud init prepared for being flashed to an USB stick either with `dd` or with [BalenaEtcher](https://www.balena.io/etcher).
To disable netboot and provide only offline artifacts, run `auroraboot` with `--set disable_netboot=true`.
#### 1. Node configuration
Create a cloud config file, see [our documentation](/docs/examples) for ready-to use examples, but a minimal configuration that automatically installs, and allows us to login afterward can be the following:
```yaml
#cloud-config
install:
auto: true
device: "auto"
reboot: true
# Define the user accounts on the node.
users:
- name: "kairos" # The username for the user.
passwd: "kairos" # The password for the user.
ssh_authorized_keys: # A list of SSH keys to add to the user's authorized keys.
# - github:mudler # A key from the user's GitHub account.
# - "ssh-rsa AAA..." # A raw SSH key.
```
Save the file locally or remotely, you can pass it by in the arguments with `--cloud-config` to AuroraBoot. Note that can also be a remote http(s) path.
#### 2. Create an offline ISO
Run AuroraBoot with a cloud-config to create an ISO with the embedded configuration:
{{< tabpane text=true >}}
{{% tab header="Container image" %}}
Check we have the cloud config file:
```bash
ls
# config.yaml
```
Build the ISO:
```bash
docker run -v "$PWD"/config.yaml:/config.yaml \
-v "$PWD"/build:/tmp/auroraboot \
--rm -ti quay.io/kairos/auroraboot \
--set container_image=quay.io/kairos/core-rockylinux:v1.5.0 \
--set "disable_http_server=true" \
--set "disable_netboot=true" \
--cloud-config /config.yaml \
--set "state_dir=/tmp/auroraboot"
```
Results should be available under `build/` in the current directory:
```bash
sudo ls -liah build/iso
#
# total 778M
# 34648528 drwx------ 2 root root 4.0K Feb 8 16:39 .
# 34648526 drwxr-xr-x 5 root root 4.0K Feb 8 16:38 ..
# 34648529 -rw-r--r-- 1 root root 253 Feb 8 16:38 config.yaml
# 34649370 -rw-r--r-- 1 root root 389M Feb 8 16:38 kairos.iso
# 34649372 -rw-r--r-- 1 root root 389M Feb 8 16:39 kairos.iso.custom.iso
# 34649371 -rw-r--r-- 1 root root 76 Feb 8 16:39 kairos.iso.sha256
```
{{% /tab %}}
{{% tab header="Github releases" %}}
Check we have the cloud config file:
```bash
ls
# config.yaml
```
Build the ISO:
```bash
docker run -v "$PWD"/build:/tmp/auroraboot -v /var/run/docker.sock:/var/run/docker.sock --rm -ti quay.io/kairos/auroraboot \
--set "artifact_version=v1.5.1-k3sv1.21.14+k3s1" \
--set "release_version=v1.5.1" \
--set "flavor=opensuse-leap" \
--set "repository=kairos-io/provider-kairos" \
--set "disable_http_server=true" \
--set "disable_netboot=true" \
--cloud-config /config.yaml \
--set "state_dir=/tmp/auroraboot"
```
Results should be available under `build/` in the current directory:
```bash
sudo ls -liah build/iso
#
# total 778M
# 34648528 drwx------ 2 root root 4.0K Feb 8 16:39 .
# 34648526 drwxr-xr-x 5 root root 4.0K Feb 8 16:38 ..
# 34648529 -rw-r--r-- 1 root root 253 Feb 8 16:38 config.yaml
# 34649370 -rw-r--r-- 1 root root 389M Feb 8 16:38 kairos.iso
# 34649372 -rw-r--r-- 1 root root 389M Feb 8 16:39 kairos.iso.custom.iso
# 34649371 -rw-r--r-- 1 root root 76 Feb 8 16:39 kairos.iso.sha256
```
{{% /tab %}}
{{< /tabpane >}}
The result process will write an iso `kairos.iso.custom.iso` under `build/iso`. That is the iso with our embedded cloud-config.
#### 2. Run the image
The iso now is ready to be written to USB stick with either `dd` or with [BalenaEtcher](https://www.balena.io/etcher), or attached to a VM.
{{< tabpane text=true right=true >}}
{{% tab header="**Machine**:" disabled=true /%}}
{{% tab header="Bare-Metal" %}}
When deploying on a bare metal server, directly flash the image into a USB stick. There are multiple ways to do this:
**From the command line using the `dd` command**
```bash
dd if=build/kairos.iso.custom.iso of=/path/to/dev bs=4MB
```
or with [BalenaEtcher](https://www.balena.io/etcher).
{{% /tab %}}
{{< tab header="QEMU" >}}
{{% alert title="Warning" %}}
Make sure you have KVM enabled, this will improve the performance of your VM significantly!
{{% /alert %}}
This would be the way to start it via the command line, but you can also use the GUI
{{< highlight bash >}}
virt-install --name my-first-kairos-vm \
--vcpus 1 \
--memory 1024 \
--cdrom build/kairos.iso.custom.iso \
--disk size=30 \
--os-variant opensuse-factory \
--virt-type kvm
{{< / highlight >}}
Immediately after open a viewer so you can interact with the boot menu:
{{< highlight bash >}}
virt-viewer my-first-kairos-vm
{{< / highlight >}}
{{% /tab %}}
{{< /tabpane >}}
## Configuration
The AuroraBoot configuration file reference is the following:
```yaml
# Corresponding artifact versions from the kairos release page (e.g. kubernetes version included)
artifact_version: "v..."
# Version of the release in github
release_version: "v1.5.0"
# Flavor
flavor: "rockylinux"
# Github repository
repository: "kairos-io/kairos"
# Container image (takes over)
container_image: "..."
# Disable netboot
disable_netboot: true
# Disable http server for serving offline generated ISOs
disable_http_server: true
# Specify a directory that will be used by auroraboot to download artifacts
# Reuse the same to cache artifacts.
state_dir: "/tmp/auroraboot"
# Default http binding port for offline ISO generation
listen_addr: ":8080"
# Cloud config to use when booting the machine.
cloud_config: |
```
| Option | Description |
| ------ | ----------- |
| `artifact_version` | Corresponding artifact versions from the Kairos release page (e.g. Kubernetes version included). |
| `release_version` | Version of the release in GitHub. |
| `flavor` | The Kairos flavor to use. See [the Kairos support matrix](/docs/reference/image_matrix/) for a list. |
| `repository` | Github repository to use. This can either be `kairos-io/kairos` or `kairos-io/provider-kairos` for images with `k3s`. |
| `container_image` | Container image. If prefixed with `docker://` it will try to pull from the local docker daemon. If a `container_image` is specified, `artifact_version`, `flavor` and `release_version` are ignored. |
| `disable_netboot` | Disable netboot. |
| `disable_http_server` | Disable http server for serving offline generated ISOs. |
| `netboot_http_port` | Specify a netboot HTTP port (defaults to `8090`). |
| `state_dir` | Specify a directory that will be used by auroraboot to download artifacts and reuse the same to cache artifacts. |
| `listen_addr` | Default http binding port for offline ISO generation. |
| `cloud_config` | Cloud config path to use for the machines. A URL can be specified, use `-` to pass-by the cloud-config from _STDIN_ |
| `iso.data` | Defines a path to be embedded into the resulting iso. When booting, the files will be accessible at `/run/initramfs/live` |
| `netboot.cmdlin` | Override the automatically generated cmdline with a custom one to use during netboot. `config_url` and `rootfs` are automatically constructed. A reasonable value can be `netboot.cmdline=rd.neednet=1 ip=dhcp rd.cos.disable netboot nodepair.enable console=tty0` |
To use the configuration file with AuroraBoot, run AuroraBoot specifying the file or URL of the config as first argument:
```bash
docker run --rm -ti -v "$PWD"/config.yaml:/config.yaml --net host quay.io/kairos/auroraboot /config.yaml
```
The CLI options can be used in place of specifying a file, and to set fields of it. Any field of the YAML file, excluding `cloud_config` can be configured with the `--set` for instance, to disable netboot we can run AuroraBoot with:
```bash
docker run --rm -ti --net host quay.io/kairos/auroraboot .... --set "disable_netboot=true"
```
To specify a cloud config file instead, use `--cloud-config` (can be also url):
```bash
docker run --rm -ti -v "$PWD"/config.yaml:/config.yaml --net host quay.io/kairos/auroraboot .... --cloud-config /config.yaml
```
Both the config file and the cloud-config file can be a URL.
### Cloud config
A custom cloud configuration file can be passed either with the `--cloud-config` flag, or in the AuroraBoot configuration file under the `cloud_config` key.
It is possible to apply templating to a cloud config. Indeed any value passed to `--set` is accessible as a template in the cloud config file with the `[[` and `]]` delimiter, for instance consider the following cloud config file, which allows to set a password for the `kairos` user and a GitHub handle allowed to login to the machine:
```yaml
#cloud-config
install:
auto: true
device: "auto"
reboot: true
# Define the user accounts on the node.
users:
- name: "kairos" # The username for the user.
passwd: "[[.kairos.password]]" # The password for the user.
ssh_authorized_keys: # A list of SSH keys to add to the user's authorized keys.
- github:[[.github.user]]
```
We would then set the user to `mudler` and the password to `foobar` when running AuroraBoot like the following:
```bash
docker run --rm -ti -v "$PWD"/config.yaml:/config.yaml --net host \
quay.io/kairos/auroraboot \
--cloud-config /config.yaml \
--set "github.user=mudler" \
--set "kairos.password=foobar"
```
Config files can be also hosted remotely, and given as URLs to AuroraBoot.
We can indeed use the template in the example folder with the command above:
```bash
docker run --rm -ti --net host \
quay.io/kairos/auroraboot \
--cloud-config https://raw.githubusercontent.com/kairos-io/kairos/master/examples/auroraboot/master-template.yaml \
--set "github.user=mudler" \
--set "kairos.password=foobar"
```
To pass-by a cloud-config via pipes, set `--cloud-config -`, for example:
```yaml
cat <<EOF | docker run --rm -i --net host quay.io/kairos/auroraboot \
--cloud-config - \
--set "container_image=quay.io/kairos/kairos-opensuse-leap:v1.5.1-k3sv1.21.14-k3s1"
#cloud-config
install:
device: "auto"
auto: true
reboot: true
hostname: metal-bundle-test-{{ trunc 4 .MachineID }}
users:
- name: kairos
# Change to your pass here
passwd: kairos
ssh_authorized_keys:
# Replace with your github user and un-comment the line below:
- github:mudler
k3s:
enabled: true
# Specify the bundle to use
bundles:
- targets:
- run://quay.io/kairos/community-bundles:system-upgrade-controller_latest
- run://quay.io/kairos/community-bundles:cert-manager_latest
- run://quay.io/kairos/community-bundles:kairos_latest
kairos:
entangle:
enable: true
EOF
```
## Examples
{{% alert title="Note" %}}
The example below are implying a `config.yaml` cloud config file to be present in the current directory.
{{% /alert %}}
### Offline ISO build from local container image
First make sure we have the image locally with:
```bash
docker pull <IMAGE>
```
Build the custom ISO with the cloud config:
```bash
docker run -v "$PWD"/config.yaml:/config.yaml \
-v "$PWD"/build:/tmp/auroraboot \
-v /var/run/docker.sock:/var/run/docker.sock \
--rm -ti quay.io/kairos/auroraboot \
--set container_image=docker://<IMAGE> \
--set "disable_http_server=true" \
--set "disable_netboot=true" \
--cloud-config /config.yaml \
--set "state_dir=/tmp/auroraboot"
```
### Offline ISO build from container images
Build the custom ISO with the cloud config:
```bash
docker run -v "$PWD"/config.yaml:/config.yaml \
-v "$PWD"/build:/tmp/auroraboot \
--rm -ti quay.io/kairos/auroraboot \
--set container_image=quay.io/kairos/core-rockylinux:v1.5.0 \
--set "disable_http_server=true" \
--set "disable_netboot=true" \
--cloud-config /config.yaml \
--set "state_dir=/tmp/auroraboot"
```
### Override GRUB config file
It is possible to override the default GRUB config file of the ISO by creating a directory that
contains the files that we want to add or replace in it.
For example, to override the GRUB config file:
```bash
mkdir -p data/boot/grub2
# You can replace this step with your own grub config. This GRUB configuration is the boot menu of the ISO
wget https://raw.githubusercontent.com/kairos-io/kairos/master/overlay/files-iso/boot/grub2/grub.cfg -O data/boot/grub2/grub.cfg
docker run -v "$PWD"/config.yaml:/config.yaml \
-v "$PWD"/data:/tmp/data \
-v "$PWD"/build:/tmp/auroraboot \
--rm -ti quay.io/kairos/auroraboot \
--set container_image=quay.io/kairos/core-rockylinux:v1.5.0 \
--set "disable_http_server=true" \
--set "disable_netboot=true" \
--cloud-config /config.yaml \
--set "state_dir=/tmp/auroraboot" \
--set "iso.data=/tmp/data"
```
### Prepare ISO for Airgap installations
See the [Airgap example](/docs/examples/airgap) in the [examples section](/docs/examples).
### Netboot with core images from Github releases
```bash
docker run -v "$PWD"/config.yaml:/config.yaml --rm -ti --net host quay.io/kairos/auroraboot \
--set "artifact_version=v1.5.0" \
--set "release_version=v1.5.0" \
--set "flavor=rockylinux" \
--set repository="kairos-io/kairos" \
--cloud-config /config.yaml
```
### Netboot with k3s images from Github releases
```bash
docker run -v "$PWD"/config.yaml:/config.yaml --rm -ti --net host quay.io/kairos/auroraboot \
--set "artifact_version=v1.5.1-k3sv1.21.14+k3s1" \
--set "release_version=v1.5.1" \
--set "flavor=opensuse-leap" \
--set "repository=kairos-io/provider-kairos" \
--cloud-config /config.yaml
```
### Netboot from container images
```bash
docker run -v "$PWD"/config.yaml:/config.yaml --rm -ti --net host quay.io/kairos/auroraboot \
--set container_image=quay.io/kairos/core-rockylinux:v1.5.0
--cloud-config /config.yaml
```
### Use the config file
Write down an aurora config file as `aurora.yaml`:
```yaml
container_image: "quay.io/kairos/core-rockylinux:v1.5.0"
cloud_config: |
#cloud-config
install:
auto: true
device: "auto"
reboot: true
# Define the user accounts on the node.
users:
- name: "kairos" # The username for the user.
passwd: "kairos" # The password for the user.
ssh_authorized_keys: # A list of SSH keys to add to the user's authorized keys.
# - github:mudler # A key from the user's GitHub account.
# - "ssh-rsa AAA..." # A raw SSH key.
```
And then run:
```bash
docker run -v "$PWD"/aurora.yaml:/aurora.yaml --rm -ti --net host quay.io/kairos/auroraboot /aurora.yaml
```

View File

@ -1,181 +0,0 @@
---
title: "Build Kairos from scratch"
linkTitle: "Build Kairos from scratch"
weight: 5
description: >
This article shows how to bring your own image with Kairos, and build a Kairos derivative from scratch using base container images from popular distributions such as Ubuntu, Fedora, openSUSE, etc.
---
{{% alert title="Note" %}}
By default, Core and Standard Kairos images are pre-configured, optimized and maintained by the Kairos team, meeting most use cases. However, if you're an advanced user interested in creating your own derivative or building new flavors for Kairos core images, this section is reserved just for you.
While the process of building these images is still a work in progress, it's already usable for general consumption. You can follow our development efforts in the [factory epic](https://github.com/kairos-io/kairos/issues/116). For instance, we are currently working on adding features like [conformance tests](https://github.com/kairos-io/kairos/issues/958) to enable users to test images built with this process, ensuring their correctness before attempting to boot the system.
{{% /alert %}}
Kairos enables the creation of a distribution based on any base OS image that satisfies the Kairos model and contract. Essentially, every OS is treated solely as a collection of packages, and upgrades and operations are managed by Kairos components, which abstract the management model.
In practical terms, upgrades are not carried out by the package manager of the OS. Instead, the `kairos-agent` handles upgrades through container images. All installation and upgrades are delivered exclusively through container images. These images are overlayed at boot time, which means there is no additional runtime overhead, as no container engine is required for booting the OS.
The Kairos framework is an abstract layer between the OS and the management interface. It follows an atomic A/B approach, which can be controlled through Kubernetes, the CLI, or a declarative model.
The Kairos contract is straightforward: the OS container image must include everything required for booting, from the kernel to the init system.
The contract has several advantages:
- Delegation of package maintenance, CVE, and security fixes to the OS layer
- Easy issuance of upgrades to container images by chaining Dockerfiles or manually committing changes to the image. See also [Customizing](/docs/advanced/customizing).
- Clear separation of concerns: the OS provides the booting bits and packages necessary for the OS to function, while Kairos provides the operational framework for handling the node's lifecycle and immutability interface.
- Support for long-term maintenance: each framework image allows conversion of any OS to the given Kairos framework version, potentially enabling maintenance for as long as the base OS support model allows.
This document outlines the steps for making any base image fully bootable with the Kairos framework. The steps include:
- Building a container image
- Selecting a base image from the supported OS family (although it should work with any distro)
- Installing the required packages from the package manager of the chosen OS
- Building the initramfs
- Building an offline bootable ISO or netbooting the container image.
## Prerequisites
To follow the steps below, you'll need to have Docker or a container engine installed on your local machine. Additionally, note that the steps have been tested on Linux but should also work in other environments. If you encounter any issues, please feel free to open up issues and help us improve the Documentation!
## Build a container image
To build the container image, follow these steps:
1. Create a new directory for the image and write a Dockerfile inside it. The Dockerfile will contain the instructions for building the image:
```Dockerfile
FROM fedora:36
# Install any package wanted here
# Note we need to install _at least_ the minimum required packages for Kairos to work:
# - An init system (systemd)
# - Grub
# - kernel/initramfs
RUN echo "install_weak_deps=False" >> /etc/dnf/dnf.conf
RUN dnf install -y \
audit \
coreutils \
curl \
device-mapper \
dosfstools \
dracut \
dracut-live \
dracut-network \
dracut-squash \
e2fsprogs \
efibootmgr \
gawk \
gdisk \
grub2 \
grub2-efi-x64 \
grub2-efi-x64-modules \
grub2-pc \
haveged \
kernel \
kernel-modules \
kernel-modules-extra \
livecd-tools \
nano \
NetworkManager \
openssh-server \
parted \
polkit \
rsync \
shim-x64 \
squashfs-tools \
sudo \
systemd \
systemd-networkd \
systemd-resolved \
tar \
which \
&& dnf clean all
RUN mkdir -p /run/lock
RUN touch /usr/libexec/.keep
# Copy the Kairos framework files. We use master builds here for fedora. See https://quay.io/repository/kairos/framework?tab=tags for a list
COPY --from=quay.io/kairos/framework:master_fedora / /
# Activate Kairos services
RUN systemctl enable cos-setup-reconcile.timer && \
systemctl enable cos-setup-fs.service && \
systemctl enable cos-setup-boot.service && \
systemctl enable cos-setup-network.service
## Generate initrd
RUN kernel=$(ls /boot/vmlinuz-* | head -n1) && \
ln -sf "${kernel#/boot/}" /boot/vmlinuz
RUN kernel=$(ls /lib/modules | head -n1) && \
dracut -v -N -f "/boot/initrd-${kernel}" "${kernel}" && \
ln -sf "initrd-${kernel}" /boot/initrd && depmod -a "${kernel}"
RUN rm -rf /boot/initramfs-*
```
In the Dockerfile, note the following:
- The base image we're using is fedora. However, you could also base your image on other distributions. See [the Kairos official images](https://github.com/kairos-io/kairos/tree/master/images) for an example.
- We're installing a set of packages, including `rsync`, `grub`, `systemd`, `kernel`, and we're generating the initramfs inside the image.
- We're copying the Kairos framework image file to the root of the container. Choose the framework image that closely matches your setup. You can find the framework images published here: https://quay.io/repository/kairos/framework?tab=tags
3. Now build the image with:
```bash
docker build -t test-byoi .
```
## Build bootable assets
Once the container image is built, we can proceed directly to creating an ISO or netboot it using [AuroraBoot](/docs/reference/auroraboot). We can use AuroraBoot to handle the ISO build process and even attach a default cloud config if desired. Here's an example for both scenarios:
{{< tabpane text=true >}}
{{% tab header="ISO" %}}
We can use [AuroraBoot](/docs/reference/auroraboot) to handle the the ISO build process and optionally attach it a default cloud config, for example:
```bash
docker run -v "$PWD"/build:/tmp/auroraboot \
-v /var/run/docker.sock:/var/run/docker.sock \
--rm -ti quay.io/kairos/auroraboot:v0.2.2 \
--set container_image=docker://test-byoi \
--set "disable_http_server=true" \
--set "disable_netboot=true" \
--set "state_dir=/tmp/auroraboot"
# 2:45PM INF Pulling container image 'test-byoi' to '/tmp/auroraboot/temp-rootfs' (local: true)
# 2:45PM INF Generating iso 'kairos' from '/tmp/auroraboot/temp-rootfs' to '/tmp/auroraboot/iso'
# $ sudo ls -liah build/iso
# total 449M
# 35142520 drwx------ 2 root root 4.0K Mar 7 15:46 .
# 35142517 drwxr-xr-x 5 root root 4.0K Mar 7 15:42 ..
# 35142521 -rw-r--r-- 1 root root 0 Mar 7 15:45 config.yaml
# 35138094 -rw-r--r-- 1 root root 449M Mar 7 15:46 kairos.iso
```
This will generate an ISO named kairos.iso which will be located at `build/iso/`. You can use either `BalenaEtcher` or `dd` to flash this ISO to a USB stick. Additionally, QEMU can be used to test the ISO:
```bash
qemu-system-x86_64 -m 2048 -drive if=virtio,media=disk,file=build/iso/kairos.iso
```
{{% /tab %}}
{{% tab header="Netboot" %}}
To netboot, we can also use [AuroraBoot](/docs/reference/auroraboot) to handle the process, or refer to [Netboot](/docs/installation/netboot). Here's an example:
```bash
docker run -v --net host \
-v /var/run/docker.sock:/var/run/docker.sock \
--rm -ti quay.io/kairos/auroraboot:v0.2.2 \
--set container_image=docker://test-byoi \
--set "disable_http_server=true" \
--set "netboot.cmdline=rd.neednet=1 ip=dhcp rd.cos.disable netboot nodepair.enable console=tty0 selinux=0"
```
{{% /tab %}}
{{< /tabpane >}}
This example is available in the `examples/byoi/fedora` directory of the [Kairos repository](https://github.com/kairos-io/kairos/tree/master/examples/byoi/fedora), where you can run `build.sh` to reproduce it.

View File

@ -1,151 +0,0 @@
---
title: "CLI"
linkTitle: "CLI"
weight: 3
date: 2022-11-13
description: >
---
A CLI is provided as part of releases associated to each Kairos version.
The CLI can be used from an external machine to generate network tokens and pair nodes on first-boot.
```
./kairos --help
NAME:
kairos - kairos (register|install)
USAGE:
[global options] command [command options] [arguments...]
VERSION:
0.1
DESCRIPTION:
kairos registers and installs kairos boxes
AUTHOR:
Ettore Di Giacinto
COMMANDS:
register
create-config, c
generate-token, g
setup, s
get-kubeconfig
install, i
help, h Shows a list of commands or help for one command
```
## `create-config`
Generates a new Kairos configuration file which can be used as `cloud-init`, with a new unique network token:
```
$ ./kairos create-config
kairos:
network_token: b3RwOgogIGRodDoKICAgIGludGVydmFsOiA5MjIzMzcyMDM2ODU0Nzc1ODA3CiAgICBrZXk6IEVCMzJJMlNXTjJCNFBHNEtCWTNBUVBBS0FWRTY0Q0VLVUlDTktTUFVWVU5BWTM0QklEQ0EKICAgIGxlbmd0aDogMzIKICBjcnlwdG86CiAgICBpbnRlcnZhbDogOTIyMzM3MjAzNjg1NDc3NTgwNwogICAga2V5OiBDMk1RRk5DWEFVRElPWjVHM1pZUUIzVEVHTzVXVEdQR1pZSEVQQkY3SFEyVUROUlZCTkxRCiAgICBsZW5ndGg6IDMyCnJvb206IGp6Q29kQVVOWUZSUklQU3JISmx4d1BVUnVxTGJQQnh4CnJlbmRlenZvdXM6IG5NckRCbllyVVBMdnFPV0Z2dWZvTktXek1adEJIRmpzCm1kbnM6IGpQUUhIbVZza2x6V29xbWNkeVlnbVhMSVFjTE1HUFN6Cm1heF9tZXNzYWdlX3NpemU6IDIwOTcxNTIwCg==
offline: false
reboot: false
device: ""
poweroff: false
```
Now you can use this in your configuration file to create new Kairos nodes:
```yaml
kairos:
network_token: b3RwOgogIGRodDoKICAgIGludGVydmFsOiA5MjIzMzcyMDM2ODU0Nzc1ODA3CiAgICBrZXk6IEVCMzJJMlNXTjJCNFBHNEtCWTNBUVBBS0FWRTY0Q0VLVUlDTktTUFVWVU5BWTM0QklEQ0EKICAgIGxlbmd0aDogMzIKICBjcnlwdG86CiAgICBpbnRlcnZhbDogOTIyMzM3MjAzNjg1NDc3NTgwNwogICAga2V5OiBDMk1RRk5DWEFVRElPWjVHM1pZUUIzVEVHTzVXVEdQR1pZSEVQQkY3SFEyVUROUlZCTkxRCiAgICBsZW5ndGg6IDMyCnJvb206IGp6Q29kQVVOWUZSUklQU3JISmx4d1BVUnVxTGJQQnh4CnJlbmRlenZvdXM6IG5NckRCbllyVVBMdnFPV0Z2dWZvTktXek1adEJIRmpzCm1kbnM6IGpQUUhIbVZza2x6V29xbWNkeVlnbVhMSVFjTE1HUFN6Cm1heF9tZXNzYWdlX3NpemU6IDIwOTcxNTIwCg==
offline: false
reboot: false
device: ""
poweroff: false
stages:
network:
- name: "Setup users"
authorized_keys:
kairos:
- github:yourhandle!
```
## `generate-token`
Generates a new network token which can be used in a configuration file:
```
$ ./kairos generate-token
b3RwOgogIGRodDoKICAgIGludGVydmFsOiA5MjIzMzcyMDM2ODU0Nzc1ODA3CiAgICBrZXk6IFhMMjRYUk1MTlFOQ1pJQTU0SVFLQ1laMk83SENQWEFBU1ZKN0tZSTQ3MzVaUkpKSktRSEEKICAgIGxlbmd0aDogMzIKICBjcnlwdG86CiAgICBpbnRlcnZhbDogOTIyMzM3MjAzNjg1NDc3NTgwNwogICAga2V5OiBMR1dMWFBTUllaU0ZERDdOT0pBNzdKV0ZWQjRHVkZBMjJIWlZPWU1VT0lNSFVYNFZXUURRCiAgICBsZW5ndGg6IDMyCnJvb206IFRtcUt5VnFHQ1ZZam9TRm9CTEVNRGVEdmJzelBkVEdoCnJlbmRlenZvdXM6IGttb3J4Q21sY2NjVVppWmdkSW5xTERvTGJtS3ZGdm9mCm1kbnM6IEZkWVdQc2R4aHdvWHZlb0VzSXNnVHRXbEJUbE9IVHJmCm1heF9tZXNzYWdlX3NpemU6IDIwOTcxNTIwCg==
```
And now:
```yaml
kairos:
network_token: b3RwOgogIGRodDoKICAgIGludGVydmFsOiA5MjIzMzcyMDM2ODU0Nzc1ODA3CiAgICBrZXk6IFhMMjRYUk1MTlFOQ1pJQTU0SVFLQ1laMk83SENQWEFBU1ZKN0tZSTQ3MzVaUkpKSktRSEEKICAgIGxlbmd0aDogMzIKICBjcnlwdG86CiAgICBpbnRlcnZhbDogOTIyMzM3MjAzNjg1NDc3NTgwNwogICAga2V5OiBMR1dMWFBTUllaU0ZERDdOT0pBNzdKV0ZWQjRHVkZBMjJIWlZPWU1VT0lNSFVYNFZXUURRCiAgICBsZW5ndGg6IDMyCnJvb206IFRtcUt5VnFHQ1ZZam9TRm9CTEVNRGVEdmJzelBkVEdoCnJlbmRlenZvdXM6IGttb3J4Q21sY2NjVVppWmdkSW5xTERvTGJtS3ZGdm9mCm1kbnM6IEZkWVdQc2R4aHdvWHZlb0VzSXNnVHRXbEJUbE9IVHJmCm1heF9tZXNzYWdlX3NpemU6IDIwOTcxNTIwCg==
offline: false
reboot: false
device: ""
poweroff: false
stages:
network:
- name: "Setup users"
authorized_keys:
kairos:
- github:yourhandle!
```
## `register`
The **register** command can be used to register and drive installation of nodes via QR code with a `cloud-init` config file (with `--config`).
```
NAME:
register -
USAGE:
register [command options] [arguments...]
OPTIONS:
--config value
--device value
--reboot
--poweroff
```
When booting Kairos via ISO, the boot process ends up in displaying a QR code which can be parsed by `kairos register` from another machine.
### Taking a screenshot
`register` by default takes a screenshot and tries to find a QR code in it:
```
kairos register
```
### Providing a QR code image/screenshot manually
It can be also be specified an image:
```
kairos register <file.png>
```
After the pairing is done, the node will start installation with the provided options.
A `--device` and a `--config` file are required in order to have a functional installation.
## `bridge`
Connect to the nodes in the VPN P2P network by creating a tun device on the host.
It needs a `--network-token`(`$NETWORK_TOKEN`) argument and exposes an API endpoint available at [localhost:8080](http://localhost:8080) to monitor the network status.
## `install`
Is called by Kairos nodes on boot and not meant to be used manually. It kicks in the installation and the QR pairing process.
## `setup`
Is called by Kairos nodes on boot and not meant to be used manually. It prepares `edgevpn` and K3s bootstrapping the node and the VPN.

File diff suppressed because it is too large Load Diff

View File

@ -1,402 +0,0 @@
---
title: "Entangle CRDs"
linkTitle: "Entangle"
weight: 8
date: 2022-11-13
description: >
Inter-connecting Kubernetes clusters without the need of exposing any service to the public via E2E P2P encrypted networks.
---
{{% alert title="Note" %}}
This feature is crazy and experimental! Do not run in production servers.
Feedback and bug reports are welcome, as we are improving the p2p aspects of Kairos.
{{% /alert %}}
Kairos has two Kubernetes Native extensions ( [entangle](https://github.com/kairos-io/entangle) and [entangle-proxy](https://github.com/kairos-io/entangle-proxy) ) that allows to interconnect services between different clusters via P2P with a shared secret.
The clusters won't need to do any specific setting in order to establish a connection, as it uses [libp2p](https://github.com/libp2p/go-libp2p) to establish a connection between the nodes.
Entangle can be used to connect services running on different clusters or can be used with `entangle-proxy` to control another cluster remotely via P2P.
## Prerequisites
To `entangle` two or more clusters you need one or more Kubernetes cluster; `entangle` depends on `cert-manager`:
```bash
kubectl apply -f https://github.com/jetstack/cert-manager/releases/latest/download/cert-manager.yaml
kubectl wait --for=condition=Available deployment --timeout=2m -n cert-manager --all
```
- `entangle` needs to run on all the clusters that you wish to interconnect. It provides capabilities to interconnect services between clusters
- `entangle-proxy` only on the cluster that you wish to use as control cluster
### Install the CRD and `entangle`
First, add the kairos helm repository:
```bash
helm repo add kairos https://kairos-io.github.io/helm-charts
helm repo update
```
Install the CRDs with:
```bash
helm install kairos-crd kairos/kairos-crds
```
Install `entangle`:
```bash
helm install kairos-entangle kairos/entangle
## To use a different image:
## helm install kairos-entangle kairos/entangle --set image.serviceTag=v0.18.0 --set image.tag=latest
```
### Install `entangle-proxy`
Now install `entangle-proxy` only on the cluster which is used to control, and which dispatches manifests to downstream clusters.
```bash
helm install kairos-entangle-proxy kairos/entangle-proxy
```
## Controlling a remote cluster
![control](https://user-images.githubusercontent.com/2420543/205872002-894f24aa-ac1c-4f70-bb46-aaad89392a25.png)
To control a remote cluster, you need a cluster where to issue and apply manifest from (the control cluster, where `entangle-proxy` is installed) and a cluster running `entangle` which proxies `kubectl` with a `ServiceAccount`/`Role` associated with it.
They both need to agree on a secret, which is the `network_token` to be able to communicate, otherwise it won't work. There is no other configuration needed in order for the two cluster to talk to each other.
### Generating a network token
Generating a network token is described in [the p2p section](/docs/installation/p2p)
### Managed cluster
The cluster which is the target of our manifests, as specified needs to run a deployment which _entangles_ `kubectl`:
```yaml
apiVersion: v1
kind: Secret
metadata:
name: mysecret
namespace: default
type: Opaque
stringData:
network_token: YOUR_NETWORK_TOKEN_GOES_HERE
---
apiVersion: v1
kind: ServiceAccount
metadata:
name: entangle
namespace: default
---
apiVersion: rbac.authorization.k8s.io/v1
kind: ClusterRole
metadata:
name: entangle
rules:
- apiGroups:
- ""
resources:
- pods
verbs:
- create
- delete
- get
- list
- update
- watch
- apiGroups:
- ""
resources:
- events
verbs:
- create
---
apiVersion: v1
kind: List
items:
- apiVersion: rbac.authorization.k8s.io/v1
kind: ClusterRoleBinding
metadata:
name: entangle
subjects:
- kind: ServiceAccount
name: entangle
namespace: default
roleRef:
kind: ClusterRole
name: entangle
apiGroup: rbac.authorization.k8s.io
---
apiVersion: apps/v1
kind: Deployment
metadata:
labels:
app: agent-proxy
name: agent-proxy
namespace: default
spec:
selector:
matchLabels:
app: agent-proxy
replicas: 1
template:
metadata:
labels:
app: agent-proxy
entanglement.kairos.io/name: "mysecret"
entanglement.kairos.io/service: "foo"
entanglement.kairos.io/target_port: "8001"
entanglement.kairos.io/direction: "entangle"
spec:
serviceAccountName: entangle
containers:
- name: proxy
image: "quay.io/kairos/kubectl"
imagePullPolicy: Always
command: ["/usr/bin/kubectl"]
args:
- "proxy"
```
Note: replace *YOUR_NETWORK_TOKEN_GOES_HERE* with the token generated with the `kairos-cli`.
### Control
To control, from the cluster that has `entangle-proxy` installed we can apply:
```yaml
apiVersion: v1
kind: Secret
metadata:
name: mysecret
namespace: default
type: Opaque
stringData:
network_token: YOUR_NETWORK_TOKEN_GOES_HERE
---
apiVersion: entangle-proxy.kairos.io/v1alpha1
kind: Manifests
metadata:
name: hello
namespace: default
labels:
entanglement.kairos.io/name: "mysecret"
entanglement.kairos.io/service: "foo"
entanglement.kairos.io/target_port: "9090"
spec:
serviceUUID: "foo"
secretRef: "mysecret"
manifests:
- |
apiVersion: v1
kind: Pod
metadata:
name: test
namespace: default
spec:
containers:
- name: hello
image: busybox:1.28
command: ['sh', '-c', 'echo "Hello, ssaa!" && sleep 3600']
restartPolicy: OnFailure
```
Note: replace *YOUR_NETWORK_TOKEN_GOES_HERE* with the token generated with the `kairos-cli` and used in the step above.
## Expose services
The `entangle` CRD can be used to interconnect services of clusters, or create tunnels to cluster services.
- Can inject a sidecar container to access a remote services exposed
- Can create a deployment which exposes a remote service from another cluster
### Deployment
`entangle` can be used to tunnel a connection or a service available from one cluster to another.
![entangle-A](https://user-images.githubusercontent.com/2420543/205871973-d913680d-355f-4322-8cbb-6a94f8505ccb.png)
In the image above, we can see how entangle can create a tunnel for a service running on Cluster A and mirror it to to Cluster B.
It can also expose services that are reachable from the host Network:
![entangle-B](https://user-images.githubusercontent.com/2420543/205871999-17abcde8-1b78-4a71-bc3e-ed77664c5551.png)
Consider the following example that tunnels a cluster `192.168.1.1:80` to another one using an `Entanglement`:
{{< tabpane text=true right=true >}}
{{% tab header="Cluster A (where `192.168.1.1:80` is accessible)" %}}
```yaml
apiVersion: v1
kind: Secret
metadata:
name: mysecret
namespace: default
type: Opaque
stringData:
network_token: _YOUR_SECRET_GOES_HERE_
---
apiVersion: entangle.kairos.io/v1alpha1
kind: Entanglement
metadata:
name: test2
namespace: default
spec:
serviceUUID: "foo2"
secretRef: "mysecret"
host: "192.168.1.1"
port: "80"
hostNetwork: true
```
{{% /tab %}}
{{% tab header="Cluster B (which will have a `ClusterIP` available on the Kubernetes service network)" %}}
```yaml
---
apiVersion: v1
kind: Secret
metadata:
name: mysecret
namespace: default
type: Opaque
stringData:
network_token: _YOUR_SECRET_GOES_HERE_
---
apiVersion: entangle.kairos.io/v1alpha1
kind: Entanglement
metadata:
name: test3
namespace: default
spec:
serviceUUID: "foo2"
secretRef: "mysecret"
host: "127.0.0.1"
port: "8080"
inbound: true
serviceSpec:
ports:
- port: 8080
protocol: TCP
type: ClusterIP
```
{{% /tab %}}
{{< /tabpane >}}
### Sidecar injection
The controller can inject a container which exposes a connection (in both directions):
```yaml
apiVersion: v1
kind: Secret
metadata:
name: mysecret
namespace: default
type: Opaque
stringData:
network_token: _YOUR_SECRET_GOES_HERE_
---
apiVersion: v1
kind: Pod
metadata:
name: hello
namespace: default
labels:
# Here we use the labels to refer to the service on the network, and the secret which contains our network_token
entanglement.kairos.io/name: "mysecret"
entanglement.kairos.io/service: "foo"
entanglement.kairos.io/target_port: "9090"
spec:
containers:
- name: hello
image: busybox:1.28
command: ['sh', '-c', 'echo "Hello, Kubernetes!" && sleep 3600']
restartPolicy: OnFailure
```
Or we can combine them together:
{{< tabpane text=true right=true >}}
{{% tab header="Cluster A" %}}
```yaml
apiVersion: v1
kind: Secret
metadata:
name: mysecret
namespace: default
type: Opaque
stringData:
network_token: _YOUR_SECRET_GOES_HERE_
---
apiVersion: apps/v1
kind: Deployment
metadata:
labels:
app: entangle-proxy
name: entangle-proxy
namespace: default
spec:
selector:
matchLabels:
app: entangle-proxy
replicas: 1
template:
metadata:
labels:
app: entangle-proxy
entanglement.kairos.io/name: "mysecret"
entanglement.kairos.io/service: "foo"
entanglement.kairos.io/target_port: "8001"
entanglement.kairos.io/direction: "entangle"
name: entangle-proxy
spec:
containers:
- name: proxy
image: "quay.io/mudler/k8s-resource-scheduler:latest"
imagePullPolicy: Always
command: ["/usr/bin/kubectl"]
args:
- "proxy"
```
{{% /tab %}}
{{% tab header="Cluster B" %}}
```yaml
apiVersion: v1
kind: Secret
metadata:
name: mysecret
namespace: default
type: Opaque
stringData:
network_token: _YOUR_SECRET_GOES_HERE_
---
apiVersion: entangle.kairos.io/v1alpha1
kind: Entanglement
metadata:
name: test
namespace: default
spec:
serviceUUID: "foo"
secretRef: "mysecret"
host: "127.0.0.1"
port: "8080"
inbound: true
serviceSpec:
ports:
- port: 8080
protocol: TCP
type: ClusterIP
```
{{% /tab %}}
{{< /tabpane >}}

View File

@ -1,35 +0,0 @@
---
title: "FAQ"
linkTitle: "Frequently asked questions"
weight: 9
date: 2022-11-13
description: >
---
## What is the difference between Kairos compared to Talos/Sidero Metal and Flatcar?
Kairos is distro-agnostic by design. Currently, you can pick among a list from the [supported matrix](/docs/reference/image_matrix/#image-flavors), but we are working on CRDs to let assemble OSes from other bases in a Kubernetes native way.
The key difference, is that the OS is distributed as a standard container, similar to how apps are distributed with container registries. You can also use `docker run` locally and inspect the OS, and similarly, push customizations by pointing nodes to a new image.
Also, Kairos is easy to setup. The P2P capabilities allow nodes to self-coordinate, simplifying the setting up of a multi-node cluster.
## What would be the difference between Kairos and Fedora Coreos?
Kairos is distribution agnostic. It supports all the distributions in the [supported matrix](/docs/reference/image_matrix/#image-flavors). In addition, we plan to have K3s automatically deploy Kubernetes (even by self-coordinating nodes).
Additionally, Kairos is OCI-based, and the system is based from a container image. This makes it possible to also run it locally with `docker run` to inspect it, as well to customize and upgrade your nodes by just pointing at it. Think of it like containers apps, but bootable.
## If the OS is a container, what is running the container runtime beneath?
There is no real container runtime. The container is used to construct an image internally, that is then used to boot the system in an A/B fashion, so there is no overhead at all. The system being booted is actually a snapshot of the container.
## Does this let the OS "containers" install extra kernel extensions/drivers?
Every container/OS ships its own kernels and drivers within a single image, so you can customize that down the road quite easily. Since every release is a standard container, you can customize it just by writing your own Dockerfile and point your nodes at it. You can also use the CRDs, that allow you to do that natively inside Kubernetes to automate the process even further.
Kairos also supports live overlaying, but that doesn't apply to kernel modules. However, that is somewhat discouraged, as it introduces snowflakes in your clusters unless you have a management cluster.
## How is the P2P mesh formed? Is there an external service for discovery?
The P2P mesh is optional and internally uses libp2p. You can use your own discovery bootstrap server or use the default already baked in the library. Furthermore you can limit and scope that only to local networks. For machines behinds a NAT, nodes operate automatically as relay servers (hops) when they are detected to be capable of it. You can limit that to specific nodes, or let automatic discovery handle it.

View File

@ -1,129 +0,0 @@
---
title: "Image support matrix"
linkTitle: "Image support matrix"
weight: 5
date: 2022-11-13
description: >
---
Kairos offers several pre-built images for user convenience based on popular Linux distributions such as openSUSE, Alpine Linux, and Ubuntu. The Kairos core team does its best to test these images, but those that are based on systemd (e.g. openSUSE, Ubuntu) are more thoroughly tested due to their homogenous settings. Support for other non-systemd based flavors (e.g. Alpine) may be limited due to team bandwidth. However, as Kairos is an open source community-driven project, we welcome any contributions, bug reports, and bug fixes. Check out our [Contribution guidelines](https://github.com/kairos-io/kairos/contribute) for more information.
In addition, tighter integration with systemd allows for several features that are only available with it, such as live layering.
These images are pushed to quay.io and are available for installation and upgrading. The installable mediums included in the releases are generated using the methods described in the [automated installation reference](/docs/installation/automated/#iso-remastering), and the images can be used for upgrades as well.
## Image flavors
Kairos release processes generates images based on official container images from popular Linux distributions. If you don't see your preferred distribution, check if [we are already planning](https://github.com/kairos-io/kairos/issues?q=is%3Aopen+is%3Aissue+label%3Aarea%2Fflavor) support for it or create a new issue.
Below is a list of the available images and their locations on the quay.io registry:
- The **Core** images do not include any Kubernetes engine and can be used as a base for customizations.
- The **Standard** images include `k3s` and the [kairos provider](https://github.com/kairos-io/provider-kairos), which enables Kubernetes deployments and optionally enables [p2p](/docs/installation/p2p).
Base images are tagged with specific upstream versions (e.g. Ubuntu 20 LTS is pinned to Ubuntu 20:04, openSUSE to openSUSE leap 15.4, etc.).
| **Flavor/Variant** | amd64 | arm64 |
|------------------------------------------|--------------------------------------------------------------------|------------------------------------------------------------------------------------------------------------------------------------------------------------|
| **Alpine Linux based (openSUSE kernel)** | [core][c-alpine-opensuse-leap], [standard][k-alpine-opensuse-leap] | [core][c-alpine-arm-rpi], [core-img][c-alpine-arm-rpi-img], [standard][k-alpine-arm-rpi], [standard-img][k-alpine-arm-rpi-img] |
| **Alpine Linux based (Ubuntu kernel)** | [core][c-alpine-ubuntu], [standard][k-alpine-ubuntu] | |
| **Debian based** | [core][c-debian], [standard][k-debian] | |
| **Fedora based** | [core][c-fedora], [standard][k-fedora] | |
| **openSUSE Leap based** | [core][c-opensuse-leap], [standard][k-opensuse-leap] | [core][c-opensuse-leap-arm-rpi], [core-img][c-opensuse-leap-arm-rpi-img], [standard][k-opensuse-leap-arm-rpi], [standard-img][k-opensuse-leap-arm-rpi-img] |
| **openSUSE Tumbleweed based** | [core][c-opensuse-tumbleweed], [standard][k-opensuse-tumbleweed] | [core][c-opensuse-tumbleweed-arm-rpi], [standard][k-opensuse-tumbleweed-arm-rpi] |
| **Ubuntu based (rolling)** ** | [core][c-ubuntu], [standard][k-ubuntu] | [core][c-ubuntu-rpi], [standard][k-ubuntu-rpi] |
| **Ubuntu based (22 LTS)** ** | [core][c-ubuntu-22-lts], [standard][k-ubuntu-22-lts] | |
| **Ubuntu based (20 LTS)** ** | [core][c-ubuntu-20-lts], [standard][k-ubuntu-20-lts] | |
| **Rocky Linux based** | [core][c-rockylinux], [standard][k-rockylinux] | |
[c-alpine-opensuse-leap]: https://quay.io/repository/kairos/core-alpine-opensuse-leap
[c-alpine-ubuntu]: https://quay.io/repository/kairos/core-alpine-ubuntu
[c-alpine-arm-rpi]: https://quay.io/repository/kairos/core-alpine-arm-rpi
[c-alpine-arm-rpi-img]: https://quay.io/repository/kairos/core-alpine-arm-rpi-img
[c-debian]: https://quay.io/repository/kairos/core-debian
[c-fedora]: https://quay.io/repository/kairos/core-fedora
[c-opensuse-leap]: https://quay.io/repository/kairos/core-opensuse-leap
[c-opensuse-leap-arm-rpi]: https://quay.io/repository/kairos/core-opensuse-leap-arm-rpi
[c-opensuse-leap-arm-rpi-img]: https://quay.io/repository/kairos/core-opensuse-leap-arm-rpi-img
[c-opensuse-tumbleweed]: https://quay.io/repository/kairos/core-opensuse-tumbleweed
[c-opensuse-tumbleweed-arm-rpi]: https://quay.io/repository/kairos/core-opensuse-tumbleweed-arm-rpi
[c-opensuse-tumbleweed-arm-rpi-img]: https://quay.io/repository/kairos/core-opensuse-tumbleweed-arm-rpi-img
[c-ubuntu]: https://quay.io/repository/kairos/core-ubuntu
[c-ubuntu-22-lts]: https://quay.io/repository/kairos/core-ubuntu-22-lts
[c-ubuntu-20-lts]: https://quay.io/repository/kairos/core-ubuntu-20-lts
[c-ubuntu-rpi]: https://quay.io/repository/kairos/core-ubuntu-arm-rpi
[c-rockylinux]: https://quay.io/repository/kairos/core-rockylinux
[k-alpine-opensuse-leap]: https://quay.io/repository/kairos/kairos-alpine-opensuse-leap
[k-alpine-ubuntu]: https://quay.io/repository/kairos/kairos-alpine-ubuntu
[k-alpine-arm-rpi]: https://quay.io/repository/kairos/kairos-alpine-arm-rpi
[k-alpine-arm-rpi-img]: https://quay.io/repository/kairos/kairos-alpine-arm-rpi-img
[k-debian]: https://quay.io/repository/kairos/kairos-debian
[k-fedora]: https://quay.io/repository/kairos/kairos-fedora
[k-opensuse-leap]: https://quay.io/repository/kairos/kairos-opensuse-leap
[k-opensuse-leap-arm-rpi]: https://quay.io/repository/kairos/kairos-opensuse-leap-arm-rpi
[k-opensuse-leap-arm-rpi-img]: https://quay.io/repository/kairos/kairos-opensuse-leap-arm-rpi-img
[k-opensuse-tumbleweed]: https://quay.io/repository/kairos/kairos-opensuse-tumbleweed
[k-opensuse-tumbleweed-arm-rpi]: https://quay.io/repository/kairos/kairos-opensuse-tumbleweed-arm-rpi
[k-opensuse-tumbleweed-arm-rpi-img]: https://quay.io/repository/kairos/kairos-opensuse-tumbleweed-arm-rpi-img
[k-ubuntu]: https://quay.io/repository/kairos/kairos-ubuntu
[k-ubuntu-22-lts]: https://quay.io/repository/kairos/kairos-ubuntu-22-lts
[k-ubuntu-20-lts]: https://quay.io/repository/kairos/kairos-ubuntu-20-lts
[k-ubuntu-rpi]: https://quay.io/repository/kairos/kairos-ubuntu-arm-rpi
[k-rockylinux]: https://quay.io/repository/kairos/kairos-rockylinux
{{% alert title="Note" color="info" %}}
** The `ubuntu` flavor tracks the latest available Ubuntu release (at the time of writing 22.10). The LTS flavors, on the other hand, track the latest LTS available on DockerHub. For example, ubuntu-22-lts uses 22.04 as the base image.
{{% /alert %}}
{{% alert title="Note" color="info" %}}
The pipelines do not publish `img` artifacts for the arm architecture because the files are too large for GitHub Actions (they exceed the artifact size limit). These artifacts can be extracted from the published docker images using the following command:
```bash
export IMAGE={{< registryURL >}}/core-{{< armFlavor >}}-arm-rpi-img:{{< kairosVersion >}}
docker run -ti --rm -v $PWD:/image quay.io/luet/base util unpack "$IMAGE" /image
```
(replace `$IMAGE` with the proper image)
The artifacts can be found in the `build` directory.
{{% /alert %}}
### Framework images
Kairos releases contains also the __framework__ assets that can be used to [build Kairos images from Scratch](/docs/reference/build-from-scratch).
Framework images can be found in quay at: https://quay.io/repository/kairos/framework.
Each tag follow the convention: `<version>_<flavor>`.
## Versioning policy
Kairos follows [Semantic Versioning](https://semver.org/) and our releases signal changes to Kairos components, rather than changes to the underlying OS and package versions. Flavors are pinned to specific upstream OS branches (e.g. `opensuse` to `leap 15.4`) and major version bumps will be reflected through new flavors in our build matrix or through specific releases to follow upstream with regard to minor version bumps (e.g. `leap 15.3` and `leap 15.4`).
Here are some key points to note:
- We only support the latest release branch with patch releases.
- Patch releases (e.g. _1.1.x_) follow a weekly release cadence, unless there are exceptions for highly impactful bugs in Kairos itself or at the OS layer (e.g. high-severity CVEs).
- Minor releases follow a monthly cadence and are expected to bring enhancements through planned releases.
- Major releases signal new advanced features or significant changes to the codebase. In-place upgrades from old to new major release branches are not always guaranteed, but we strive for compatibility across versions.
{{% alert title="Note" color="info" %}}
In order to give users more control over the chosen base image (e.g. `openSUSE`, `Ubuntu`, etc.) and reduce reliance on our CI infrastructure, we are actively working on streamlining the creation of Kairos-based distributions directly from upstream base images. You can track the development progress [here](https://github.com/kairos-io/kairos/issues/116).
If you need to further customize images, including changes to the base image, package updates, and CVE hotfixes, check out the [customization docs](/docs/advanced/customizing).
{{% /alert %}}
## Release changelog
Our changelog is published as part of the release process and contains all the changes, highlights, and release notes that are relevant to the release. We strongly recommend checking the changelog for each release before upgrading or building a customized version of Kairos.
Release changelogs are available for Kairos core and for each component. Below is a list of the components that are part of a Kairos release and their respective release pages with changelogs.
| **Project** | **Release page** |
|----------------------------------------------------- |--------------------------------------------------------- |
| **Kairos core** | https://github.com/kairos-io/kairos/releases |
| **Kairos provider (k3s support)** | https://github.com/kairos-io/provider-kairos/releases |

View File

@ -1,54 +0,0 @@
---
title: "Recovery mode"
linkTitle: "Recovery mode"
weight: 7
date: 2022-11-13
description: >
---
The Kairos recovery mode can be used to recover a damaged system or to regain access remotely (with assistance) to a machine which has been lost access to. The recovery mode is accessible only from the GRUB menu, from both the LiveCD, and an installed system.
{{% alert title="Note" %}}
On installed system, there are two recovery modes available during boot. Below describes only how the Kairos remote recovery works. It can be used to reset the A/B partitions (with the user/pass used during setup) and perform any other operation without remote access.
{{% /alert %}}
## Boot into recovery mode
Kairos recovery mode can be accessed either via ISO or from an installed system.
A GRUB menu will be displayed:
![Screenshot from 2022-04-28 17-48-06](https://user-images.githubusercontent.com/2420543/165800177-3e4cccd8-f67c-43a2-bd88-329478539400.png)
Select the last entry `kairos (remote recovery mode)` and press enter.
At this point the boot process starts, and you should be welcomed by the Kairos screen:
![Screenshot from 2022-04-28 17-48-32](https://user-images.githubusercontent.com/2420543/165800182-9aa29c90-09e9-4c53-b3c7-c8ced262e3ac.png)
After few seconds, the recovery process starts, and right after a QR code will be printed out of the screen along with a password which can be used later to SSH into the machine:
![Screenshot from 2022-04-28 17-48-43](https://user-images.githubusercontent.com/2420543/165800187-4d2fe04e-c501-4ad8-a29f-32a0110eaa72.png)
At this stage, take a screenshot or a photo and save the image with the QR code.
## Connect to the machine
In the another machine that you are using to connect to your server, (your workstation, a jumpbox, or other) use the Kairos CLI to connect over the remote machine:
```
$ ./kairos bridge --qr-code-image /path/to/image.png
INFO Connecting to service kAIsuqiwKR
INFO SSH access password is yTXlkak
INFO SSH server reachable at 127.0.0.1:2200
INFO To connect, keep this terminal open and run in another terminal 'ssh 127.0.0.1 -p 2200' the password is yTXlkak
INFO Note: the connection might not be available instantly and first attempts will likely fail.
INFO Few attempts might be required before establishing a tunnel to the host.
INFO Starting EdgeVPN network
INFO Node ID: 12D3KooWSTRBCTNGZ61wzK5tgYvFi8rQVxkXJCDUYngBWGDSyoBK
INFO Node Addresses: [/ip4/192.168.1.233/tcp/36071 /ip4/127.0.0.1/tcp/36071 /ip6/::1/tcp/37661]
INFO Bootstrapping DHT
```
At this point, the bridge should start, and you should be able to see connection messages in the terminal. You can connect to the remote machine by using `ssh` and pointing it locally at `127.0.0.1:2200`. The username is not relevant, the password is print from the CLI.
The bridge operates in the foreground, so you have to shut it down by using CTRL-C.

View File

@ -1,130 +0,0 @@
---
title: "Reset a node"
linkTitle: "Reset"
weight: 4
date: 2022-11-13
description: >
---
Kairos has a recovery mechanism built-in which can be leveraged to restore the system to a known point. At installation time, the recovery partition is created from the installation medium and can be used to restore the system from scratch, leaving configuration intact and cleaning any persistent data accumulated by usage in the host (e.g. Kubernetes images, persistent volumes, etc. ).
The reset action will regenerate the bootloader configuration and the images in the state partition (labeled `COS_STATE`) by using the recovery image generated at install time, cleaning up the host.
The configuration files in `/oem` are kept intact, the node on the next reboot after a reset will perform the same boot sequence (again) of a first-boot installation.
# How to
{{% alert title="Note" %}}
By following the steps below you will _reset_ entirely a node and the persistent data will be lost. This includes _every_ user-data stored on the machine.
{{% /alert %}}
The reset action can be accessed via the Boot menu, remotely, triggered via Kubernetes or manually. In each scenario the machine will reboot into reset mode, perform the cleanup, and reboot automatically afterwards.
## From the boot menu
It is possible to reset the state of a node by either booting into the "Reset" mode into the boot menu, which automatically will reset the node:
![reset](https://user-images.githubusercontent.com/2420543/191941281-573e2bed-f66c-48db-8c46-e8034417539e.gif?classes=border,shadow)
## Remotely, via command line
On a Kairos booted system, logged as root:
```bash
$ grub2-editenv /oem/grubenv set next_entry=statereset
$ reboot
```
## From Kubernetes
`system-upgrade-controller` can be used to apply a plan to the nodes to use Kubernetes to schedule the reset on the nodes itself, similarly on how upgrades are applied.
Consider the following example which resets a machine by changing the config file used during installation:
```yaml
---
apiVersion: v1
kind: Secret
metadata:
name: custom-script
namespace: system-upgrade
type: Opaque
stringData:
config.yaml: |
#cloud-config
hostname: testcluster-{{ trunc 4 .MachineID }}
k3s:
enabled: true
users:
- name: kairos
passwd: kairos
ssh_authorized_keys:
- github:mudler
add-config-file.sh: |
#!/bin/sh
set -e
if diff /host/run/system-upgrade/secrets/custom-script/config.yaml /host/oem/90_custom.yaml >/dev/null; then
echo config present
exit 0
fi
# we can't cp, that's a symlink!
cat /host/run/system-upgrade/secrets/custom-script/config.yaml > /host/oem/90_custom.yaml
grub2-editenv /host/oem/grubenv set next_entry=statereset
sync
mount --rbind /host/dev /dev
mount --rbind /host/run /run
nsenter -i -m -t 1 -- reboot
exit 1
---
apiVersion: upgrade.cattle.io/v1
kind: Plan
metadata:
name: reset-and-reconfig
namespace: system-upgrade
spec:
concurrency: 2
# This is the version (tag) of the image.
# The version is refered to the kairos version plus the k3s version.
version: "v1.0.0-rc2-k3sv1.23.9-k3s1"
nodeSelector:
matchExpressions:
- { key: kubernetes.io/hostname, operator: Exists }
serviceAccountName: system-upgrade
cordon: false
upgrade:
# Here goes the image which is tied to the flavor being used.
# Currently can pick between opensuse and alpine
image: quay.io/kairos/kairos-opensuse
command:
- "/bin/bash"
- "-c"
args:
- bash /host/run/system-upgrade/secrets/custom-script/add-config-file.sh
secrets:
- name: custom-script
path: /host/run/system-upgrade/secrets/custom-script
```
## Manual reset
It is possible to trigger the reset manually by logging into the recovery from the boot menu and running `kairos reset` from the console.
To optionally change the behavior of the reset process (such as cleaning up also configurations), run `elemental reset` instead which supports options via arg:
| Option | Description |
| ------------------- | --------------------------- |
| --reset-persistent | Clear persistent partitions |
| --reset-oem | Clear OEM partitions |
| --system.uri string | Reset with the given image |
- **Note**: `--reset-oem` resets the system pruning all the configurations.
- `system.uri` allows to reset using another image or a directory.
`string` can be among the following: `dir:/path/to/dir`, `oci:<image>`, `docker:<image>`, `channel:<luet package>` or `file:/path/to/file`.
### Cleaning up state directories
An alternative way and manual of resetting your system is possible by deleting the state paths. You can achieve this by deleting the contents of the `/usr/local` directory. It's recommended that you do this while in recovery mode with all services turned off.
Please note that within `/usr/local`, there are two important folders to keep in mind. The first is `/usr/local/.kairos`, which contains sentinel files that will trigger a complete deployment from scratch when deleted. However, your data will be preserved. The second folder is `/usr/local/`.state`, which contains the bind-mounted data for the system. By deleting these two folders, you can achieve a pristine environment while leaving all other contents of `/usr/local` untouched.

View File

@ -1,44 +0,0 @@
---
title: "Troubleshooting"
linkTitle: "Troubleshooting"
weight: 6
date: 2022-11-13
description: >
---
Things can go wrong. This section tries to give guidelines in helping out identify potential issues.
It is important first to check out if your issue was already submitted [in the issue tracker](https://github.com/kairos-io/kairos/issues).
## Gathering logs
To gather useful logs and help developers spot right away issues, it's suggested to boot with `console=tty0 rd.debug` enabled for example:
![debug](https://user-images.githubusercontent.com/2420543/191934926-7d4ac908-9a4c-4ef4-9891-75820e6b8fe6.gif)
To edit the boot commands, type 'e' in the boot menu. To boot with the changes press 'CTRL+X'.
In case logs can't be acquired, taking screenshot or videos while opening up issues it's strongly reccomended!
## Initramfs breakpoints
Initramfs can be instructed to drop a shell in various phases of the boot process. For instance:
- `rd.break=pre-mount rd.shell`: Drops a shell before setting up mount points.
- `rd.break=pre-pivot rd.shell`: Drops a shell before switch-root
## Disable immutability
It is possible to disable immutability by adding `rd.cos.debugrw` to the kernel boot commands.
## Root permission
By default, there is no root user set. A default user (`kairos`) is created and can use `sudo` without password authentication during LiveCD bootup.
## Get back the kubeconfig
On all nodes, which are deployed with the P2P full-mesh feature of the cluster, it's possible to invoke `kairos get-kubeconfig` to recover the kubeconfig file.
## See also
- [Dracut debug docs](https://fedoraproject.org/wiki/How_to_debug_Dracut_problems)

View File

@ -1,6 +0,0 @@
---
title: "Upgrade"
linkTitle: "Upgrade"
weight: 3
description: >
---

View File

@ -1,248 +0,0 @@
---
title: "Upgrading from Kubernetes"
linkTitle: "Upgrading from Kubernetes"
weight: 1
date: 2022-11-13
description: >
---
Kairos upgrades can be performed either manually or via Kubernetes if the cluster is composed of Kairos nodes. In order to trigger upgrades, it is required to apply a `Plan` spec to the target cluster for the upgrade.
## Prerequisites
- It is necessary [system-upgrade-controller](https://github.com/rancher/system-upgrade-controller) to be deployed in the target cluster.
To install it, use kubectl:
```bash
kubectl apply -f https://github.com/rancher/system-upgrade-controller/releases/download/v0.9.1/system-upgrade-controller.yaml
```
### Upgrading from version X to version Y with Kubernetes
To trigger an upgrade, create a plan for `system-upgrade-controller` which refers to the image version that we want to upgrade.
```bash
cat <<'EOF' | kubectl apply -f -
---
apiVersion: upgrade.cattle.io/v1
kind: Plan
metadata:
name: os-upgrade
namespace: system-upgrade
labels:
k3s-upgrade: server
spec:
concurrency: 1
# This is the version (tag) of the image.
# The version is refered to the kairos version plus the k3s version.
version: "v1.0.0-k3sv1.24.3-k3s1"
nodeSelector:
matchExpressions:
- {key: kubernetes.io/hostname, operator: Exists}
serviceAccountName: system-upgrade
cordon: false
drain:
force: false
disableEviction: true
upgrade:
# Here goes the image which is tied to the flavor being used.
# Currently can pick between opensuse and alpine
image: quay.io/kairos/kairos-opensuse-leap
command:
- "/usr/sbin/suc-upgrade"
EOF
```
To check all the available versions, see the [images](https://quay.io/repository/kairos/kairos-opensuse-leap?tab=tags) available on the container registry, corresponding to the flavor/version selected.
{{% alert title="Note" %}}
Several upgrade strategies can be used with `system-upgrade-controller` which are not illustrated here in this example. For instance, it can be specified in the number of hosts which are running the upgrades, filtering by labels, and more. [Refer to the project documentation](https://github.com/rancher/system-upgrade-controller) on how to create efficient strategies to roll upgrades on the nodes. In the example above, the upgrades are applied to every host of the cluster, one-by-one in sequence.
{{% /alert %}}
A pod should appear right after which carries on the upgrade and automatically reboots the node:
```
$ kubectl get pods -A
...
system-upgrade apply-os-upgrade-on-kairos-with-1a1a24bcf897bd275730bdd8548-h7ffd 0/1 Creating 0 40s
```
Done! We should have all the basics to get our first cluster rolling, but there is much more we can do.
## Verify images attestation during upgrades
Container images can be signed during the build phase of a CI/CD pipeline using [Cosign](https://github.com/sigstore/cosign), Kairos signs every artifact as part of the release process.
To ensure that the images used during upgrades match the expected signatures, [Kyverno](https://kyverno.io/) can be used to set up policies. This is done by checking if the signature is present in the OCI registry and if the image was signed using the specified key. The policy rule check fails if either of these conditions is not met.
To learn more about this specific Kyverno feature, you can refer to the [documentation](https://kyverno.io/docs/writing-policies/verify-images/). This allows for the verification of image authenticity directly at the node level prior to upgrading.
A Kyverno policy for `provider-kairos` images might look like the following:
```yaml
apiVersion: kyverno.io/v1
kind: ClusterPolicy
metadata:
name: check-image
spec:
validationFailureAction: Enforce
background: false
webhookTimeoutSeconds: 30
failurePolicy: Fail
rules:
- name: check-image
match:
any:
- resources:
kinds:
- Pod
verifyImages:
- imageReferences:
- "quay.io/kairos/kairos-*"
attestors:
- entries:
# See: https://kyverno.io/docs/writing-policies/verify-images/#keyless-signing-and-verification
- keyless:
subject: "https://github.com/kairos-io/provider-kairos/.github/workflows/release.yaml@refs/tags/*"
issuer: "https://token.actions.githubusercontent.com"
rekor:
url: https://rekor.sigstore.dev
```
To install Kyverno in a Kairos cluster, you can simply use the community [bundles](/docs/advanced/bundles). For example, you can use the following installation cloud config file:
```yaml
#cloud-config
hostname: kyverno-{{ trunc 4 .MachineID }}
# Specify the bundle to use
bundles:
- targets:
- run://quay.io/kairos/community-bundles:system-upgrade-controller_latest
- run://quay.io/kairos/community-bundles:cert-manager_latest
- run://quay.io/kairos/community-bundles:kyverno_latest
users:
- name: kairos
passwd: kairos
k3s:
enabled: true
```
This configuration file prepare the system with the `cert-manager`, `system-upgrade-controller` and the `kyverno` bundle, enabling `k3s`.
## Customize the upgrade plan
It is possible to run additional commands before the upgrade takes place into the node, consider the following example:
```yaml
---
apiVersion: v1
kind: Secret
metadata:
name: custom-script
namespace: system-upgrade
type: Opaque
stringData:
upgrade.sh: |
#!/bin/sh
set -e
# custom command, for example, that injects or modifies a configuration option
sed -i 's/something/to/g' /host/oem/99_custom.yaml
# run the upgrade script
/usr/sbin/suc-upgrade
---
apiVersion: upgrade.cattle.io/v1
kind: Plan
metadata:
name: custom-os-upgrade
namespace: system-upgrade
spec:
concurrency: 1
# This is the version (tag) of the image.
# The version is refered to the kairos version plus the k3s version.
version: "v1.0.0-rc2-k3sv1.23.9-k3s1"
nodeSelector:
matchExpressions:
- { key: kubernetes.io/hostname, operator: Exists }
serviceAccountName: system-upgrade
cordon: false
drain:
force: false
disableEviction: true
upgrade:
# Here goes the image which is tied to the flavor being used.
# Currently can pick between opensuse and alpine
image: quay.io/kairos/kairos-opensuse-leap
command:
- "/bin/bash"
- "-c"
args:
- bash /host/run/system-upgrade/secrets/custom-script/upgrade.sh
secrets:
- name: custom-script
path: /host/run/system-upgrade/secrets/custom-script
```
## Upgrade from c3os to Kairos
If you already have a `c3os` deployment, upgrading to Kairos requires changing every instance of `c3os` to `kairos` in the configuration file. This can be either done manually or with Kubernetes before rolling the upgrade. Consider customizing the upgrade plan, for instance:
```yaml
---
apiVersion: v1
kind: Secret
metadata:
name: custom-script
namespace: system-upgrade
type: Opaque
stringData:
upgrade.sh: |
#!/bin/sh
set -e
sed -i 's/c3os/kairos/g' /host/oem/99_custom.yaml
/usr/sbin/suc-upgrade
---
apiVersion: upgrade.cattle.io/v1
kind: Plan
metadata:
name: custom-os-upgrade
namespace: system-upgrade
spec:
concurrency: 1
# This is the version (tag) of the image.
# The version is refered to the kairos version plus the k3s version.
version: "v1.0.0-rc2-k3sv1.23.9-k3s1"
nodeSelector:
matchExpressions:
- { key: kubernetes.io/hostname, operator: Exists }
serviceAccountName: system-upgrade
cordon: false
drain:
force: false
disableEviction: true
upgrade:
# Here goes the image which is tied to the flavor being used.
# Currently can pick between opensuse and alpine
image: quay.io/kairos/kairos-opensuse-leap
command:
- "/bin/bash"
- "-c"
args:
- bash /host/run/system-upgrade/secrets/custom-script/upgrade.sh
secrets:
- name: custom-script
path: /host/run/system-upgrade/secrets/custom-script
```
## What's next?
- [Upgrade nodes manually](/docs/upgrade/manual)
- [Immutable architecture](/docs/architecture/immutable)
- [Create decentralized clusters](/docs/installation/p2p)

View File

@ -1,63 +0,0 @@
---
title: "Manual"
linkTitle: "Manual"
weight: 2
date: 2022-11-13
description: >
---
Upgrades can be run manually from the terminal.
Kairos images are released on [quay.io](https://quay.io/organization/kairos).
## List available versions
To see all the available versions:
```bash
$ sudo kairos-agent upgrade list-releases
v0.57.0
v0.57.0-rc2
v0.57.0-rc1
v0.57.0-alpha2
v0.57.0-alpha1
```
## Upgrade
To upgrade to the latest available version, run from a shell of a cluster node the following:
```bash
sudo kairos-agent upgrade
```
To specify a version, run:
```bash
sudo kairos-agent upgrade <version>
```
Use `--force` to force upgrading to avoid checking versions.
To specify a specific image, use the `--image` flag:
```bash
sudo kairos-agent upgrade --image <image>
```
To upgrade with a container image behind a registry with authentication, the upgrade command provides the following flags:
| Flag | Description |
|-------------------------|------------------------------------------------------------------------------------------|
| `--auth-username` | User to authenticate with |
| `--auth-password` | Password to authenticate with |
| `--auth-server-address` | Server address to authenticate to, defaults to docker |
| `--auth-registry-token` | IdentityToken is used to authenticate the user and get an access token for the registry. |
| `--auth-identity-token` | RegistryToken is a bearer token to be sent to a registry |
For instance:
```bash
sudo kairos-agent upgrade --image private/myimage:latest --auth-username MYNAME --auth-password MYPASSWORD
```

View File

@ -1,143 +0,0 @@
---
title: "Welcome"
linkTitle: "Documentation"
weight: 20
menu:
main:
weight: 20
---
Welcome to the Kairos Documentation
Kairos is the open-source project that simplifies Edge, cloud, and bare metal OS lifecycle management. With a unified Cloud Native API, Kairos is community-driven, open source, and distro agnostic.
Our key features include:
- [Immutability](/docs/architecture/immutable): ensure your infrastructure stays consistent with atomic upgrades
- Security: protect your cluster from vulnerabilities and attacks with a read-only system
- [Container-based](/docs/architecture/container): manage your nodes as apps in containers for maximum flexibility and portability
- [P2P Mesh](/docs/architecture/network): self-coordinated, automated, no interaction Kubernetes deployments with P2P
- [Meta-Distribution](/docs/architecture/meta), distro agnostic
In this documentation, you will find everything you need to know about Kairos, from installation and configuration, to examples and advanced features.
To get started with Kairos, follow the instructions in the [quickstart](/docs/getting-started) guide. Then, check out the [examples](/docs/examples) to see how Kairos can be used in real-world scenarios.
For more information, please refer to this documentation. If you have any questions or feedback, feel free to [open an issue](https://github.com/kairos-io/kairos/issues/new) or [join our community forum](https://github.com/kairos-io/kairos/discussions).
{{% alert title="Note" %}}
You can also find some good resources on the [Media Section]({{< ref "docs/media" >}} "Media")
{{% /alert %}}
## What is Kairos ?
Kairos is a cloud-native meta-Linux distribution that runs on Kubernetes and brings the power of the public cloud to your on-premises environment. With Kairos, you can build your own cloud with complete control and no vendor lock-in.
Here are a few reasons why you should try Kairos:
- Build your own cloud on-premises with complete control and no vendor lock-in
- Provision nodes with your own image or use Kairos releases for added flexibility
- Use Kairos for a wide range of use cases, from Kubernetes applications to appliances and more
- Simple and streamlined day-2 operations (e.g. node upgrades)
## What I can do with it ?
With Kairos, you can easily spin up a Kubernetes cluster with the Linux distribution of your choice, and manage the entire cluster lifecycle with Kubernetes. Try Kairos today and experience the benefits of a unified, cloud-native approach to OS management.
With Kairos, you can:
- Spin up a Kubernetes cluster with any Linux distribution in just a few clicks
- Create an immutable infrastructure that stays consistent and free of drift with atomic upgrades
- Manage your cluster's entire lifecycle with Kubernetes, from building to upgrading
- Automatically create multi-node, single clusters that spans across regions for maximum flexibility and scalability
Try Kairos today and experience the benefits of a unified, cloud-native approach to OS management. Say goodbye to the hassle of managing multiple systems, and hello to a more streamlined and efficient way of working.
## Features
- Easily create multi-node Kubernetes clusters with [K3s](https://k3s.io), and enjoy all of [K3s](https://k3s.io)'s features
- Upgrade manually via CLI or with Kubernetes, and use container registries for distribution upgrades
- Enjoy the benefits of an immutable distribution that stays configured to your needs
- Configure nodes with a single cloud-init config file for added simplicity
- Upgrade even in airgap environments with in-cluster container registries
- Extend your image at runtime or build time with Kubernetes Native APIs
- Coming soon: CAPI support with full device lifecycle management and more
- Create private virtual network segments with a full-mesh P2P hybrid VPN network that can stretch up to 10000 km
## More than a Linux distribution
Kairos is more than just an ISO, qcow2, or Netboot artifact. It allows you to turn any Linux distribution into a uniform and compliant distro with an immutable design. This means that any distro "converted" with Kairos will share the same common feature set and can be managed in the same way using Kubernetes Native API components. Kairos treats all OSes homogeneously and upgrades are distributed via container registries. Installations mediums and other assets required for booting bare metal or edge devices are built dynamically by Kairos' Kubernetes Native API components.
![livecd](https://user-images.githubusercontent.com/2420543/189219806-29b4deed-b4a1-4704-b558-7a60ae31caf2.gif)
## Goals
The Kairos ultimate goal is to bridge the gap between Cloud and Edge by creating a smooth user experience. There are several areas in the ecosystem that can be improved for edge deployments to make it in pair with the cloud.
The Kairos project encompasses all the tools and architectural pieces needed to fill those gaps. This spans between providing Kubernetes Native API components to assemble OSes, deliver upgrades, and control nodes after deployment.
Kairos is distro-agnostic, and embraces openness: The user can provide their own underlying base image, and Kairos onboards it and takes it over to make it Cloud Native, immutable that plugs into an already rich ecosystem by leveraging containers as distribution medium.
## Contribute
Kairos is an open source project, and any contribution is more than welcome! The project is big and narrows to various degrees of complexity and problem space. Feel free to join our chat, discuss in our forums and join us in the Office hours. Check out the [contribution guidelines](https://github.com/kairos-io/kairos/contribute) to see how to get started and our [governance](https://github.com/kairos-io/kairos/blob/master/GOVERNANCE.md).
We have an open roadmap, so you can always have a look on what's going on, and actively contribute to it.
Useful links:
- [Upcoming releases](https://github.com/kairos-io/kairos/issues?q=is%3Aissue+is%3Aopen+label%3Arelease)
## Community
You can find us at:
- [#Kairos-io at matrix.org](https://matrix.to/#/#kairos-io:matrix.org)
- [IRC #kairos in libera.chat](https://web.libera.chat/#kairos)
- [GitHub Discussions](https://github.com/kairos-io/kairos/discussions)
### Project Office Hours
Project Office Hours is an opportunity for attendees to meet the maintainers of the project, learn more about the project, ask questions, learn about new features and upcoming updates.
Office hours are happening weekly on Wednesday - 5:30 6:00pm CEST. [Meeting link](https://meet.google.com/aus-mhta-azb)
Besides, we have monthly meetup to participate actively into the roadmap planning and presentation which takes part during the office hours:
#### Roadmap planning
We will discuss on agenda items and groom issues, where we plan where they fall into the release timeline.
Occurring: Monthly on the first Wednesday - 5:30 6:30pm CEST.
#### Roadmap presentation
We will discuss the items of the roadmaps and the expected features on the next releases
Occurring: Monthly on the second Wednesday - 5:30pm CEST.
## Alternatives
There are other projects that are similar to Kairos which are great and worth to mention, and actually Kairos took to some degree inspiration from.
However, Kairos have different goals and takes completely unique approaches to the underlying system, upgrade, and node lifecycle management.
- [k3os](https://github.com/rancher/k3os)
- [Talos](https://github.com/siderolabs/talos)
- [FlatCar](https://flatcar-linux.org/)
- [CoreOS](https://getfedora.org/it/coreos?stream=stable)
## Development
### Building Kairos
Requirements: Needs only Docker.
Run `./earthly.sh +all --FLAVOR=opensuse`, should produce a Docker image along with a working ISO.
## What's next?
See the [quickstart](/docs/getting-started) to install Kairos on a VM and create a Kubernetes cluster!

View File

@ -1,4 +0,0 @@
---
type: "search-index"
url: "index.json"
---

View File

@ -1,6 +0,0 @@
---
title: Search Results
layout: search
---

View File

@ -1,10 +0,0 @@
{{ define "main"}}
<main id="main">
<div>
<h1 id="title">Not found</h1>
<p>Oops! This page doesn't exist. Try going back to our <a href="{{ "/" | relURL }}">home page</a>.</p>
<p>You can learn how to make a 404 page like this in <a href="https://gohugo.io/templates/404/">Custom 404 Pages</a>.</p>
</div>
</main>
{{ end }}

File diff suppressed because one or more lines are too long

View File

@ -1 +0,0 @@
{{$.Page.Site.Params.softwareVersions.armFlavor}}

View File

@ -1 +0,0 @@
{{$.Page.Site.Params.softwareVersions.flavor}}

View File

@ -1,9 +0,0 @@
{{/* https://github.com/haideralipunjabi/hugo-shortcodes/tree/master/github */}}
{{ $dataJ := getJSON "https://api.github.com/repos/" (.Get "repo") "/contents/" (.Get "file") }}
{{ $con := base64Decode $dataJ.content }}
{{ highlight $con (.Get "lang") (.Get "options") }}
<small> <i class='fab fa-github'></i> <i>Complete source code: <a target=_blank href="{{ print "https://github.com/" ( .Get "repo" ) "/blob/master/" (.Get "file" ) }}">{{ print "https://github.com/" ( .Get "repo" ) "/blob/master/" (.Get "file" ) }}</a></i> </small>
<hr>

View File

@ -1 +0,0 @@
{{$.Page.Site.Params.softwareVersions.k3s}}

View File

@ -1 +0,0 @@
{{$.Page.Site.Params.softwareVersions.kairos}}

View File

@ -1 +0,0 @@
{{$.Page.Site.Params.softwareVersions.registryURL}}

View File

@ -1,10 +0,0 @@
# Hugo build configuration for Netlify
# (https://gohugo.io/hosting-and-deployment/hosting-on-netlify/#configure-hugo-version-in-netlify)
[[redirects]]
from = "/docs/contribution-guidelines/"
to = "https://github.com/kairos-io/kairos/blob/master/CONTRIBUTING.md"
status = 200
force = true # COMMENT: ensure that we always redirect
headers = {X-From = "Netlify"}
signed = "API_SIGNATURE_TOKEN"

1599
docs/package-lock.json generated

File diff suppressed because it is too large Load Diff

View File

@ -1,12 +0,0 @@
{
"devDependencies": {
"autoprefixer": "^10.4.13",
"postcss": "^8.4.21",
"postcss-cli": "^10.1.0"
},
"scripts": {
"get:submodule": "git submodule update --init --depth 1",
"_prepare:docsy": "cd themes/docsy && npm install",
"prepare": "npm run get:submodule && npm run _prepare:docsy"
}
}

View File

@ -1,26 +0,0 @@
#!/bin/bash
set -e
BASE_URL="${BASE_URL:-https://kairos.io}"
binpath="${ROOT_DIR}/bin"
publicpath="${ROOT_DIR}/public"
export PATH=$PATH:$binpath
if [ -z "$(type -P hugo)" ];
then
[[ ! -d "${binpath}" ]] && mkdir -p "${binpath}"
wget https://github.com/gohugoio/hugo/releases/download/v"${HUGO_VERSION}"/hugo_extended_"${HUGO_VERSION}"_"${HUGO_PLATFORM}".tar.gz -O "$binpath"/hugo.tar.gz
tar -xvf "$binpath"/hugo.tar.gz -C "${binpath}"
rm -rf "$binpath"/hugo.tar.gz
chmod +x "$binpath"/hugo
fi
rm -rf "${publicpath}" || true
[[ ! -d "${publicpath}" ]] && mkdir -p "${publicpath}"
npm install --save-dev autoprefixer postcss-cli postcss
HUGO_ENV="production" hugo --buildFuture --gc -b "${BASE_URL}" -d "${publicpath}"
cp -rf CNAME "${publicpath}"

Some files were not shown because too many files have changed in this diff Show More