Compare commits

...

83 Commits
v1 ... main

Author SHA1 Message Date
cx 9e82bfc2b5 Chagne node-x ip
Unittests / unittests (push) Successful in 1m23s Details
Unittests / deploy-dev (push) Successful in 1m15s Details
2024-04-15 10:30:05 +00:00
Adam Štrauch e0b5832e75
Fix deps and crashing when deleting nonexistant app
Unittests / unittests (push) Successful in 9s Details
Unittests / deploy-dev (push) Successful in 59s Details
2024-03-03 02:55:05 +01:00
Adam Štrauch 863d857283
Restart container when tech changes
Unittests / unittests (push) Successful in 10s Details
Unittests / deploy-dev (push) Successful in 47s Details
2024-02-01 22:02:22 +01:00
Adam Štrauch 1c5b8d8f50
Add better debug message to SetTechnology
Unittests / unittests (push) Successful in 9s Details
Unittests / deploy-dev (push) Successful in 46s Details
2024-02-01 21:57:34 +01:00
Adam Štrauch bc4b6c7bff
Possibility to set tech during update
Unittests / unittests (push) Successful in 9s Details
Unittests / deploy-dev (push) Successful in 53s Details
2024-02-01 20:57:13 +01:00
Adam Štrauch 45899f3b0c
Set owner of metadata
Unittests / unittests (push) Successful in 9s Details
Unittests / deploy-dev (push) Successful in 45s Details
2024-01-31 00:16:40 +01:00
Adam Štrauch 5513da35b3
Fix endpoint name for metadata
Unittests / unittests (push) Successful in 9s Details
Unittests / deploy-dev (push) Successful in 44s Details
2024-01-30 23:52:33 +01:00
Adam Štrauch 31ba1ce5a3
Save metadata endpoint
Unittests / unittests (push) Successful in 9s Details
Unittests / deploy-dev (push) Successful in 46s Details
2024-01-30 23:50:03 +01:00
Adam Štrauch a3d0ee92ce
Stats errors in Sentry
Unittests / unittests (push) Successful in 9s Details
Unittests / deploy-dev (push) Successful in 52s Details
2024-01-26 21:43:45 +01:00
Adam Štrauch 7a170e56d6
Gather more errors in Sentry
Unittests / unittests (push) Successful in 9s Details
Unittests / deploy-dev (push) Successful in 54s Details
2024-01-26 21:15:14 +01:00
Adam Štrauch 4e9398512e
Removing drone pipeline 2024-01-26 21:13:58 +01:00
Adam Štrauch a4e2bac0ff
Adding Sentry
Unittests / unittests (push) Successful in 16s Details
Unittests / deploy-dev (push) Successful in 47s Details
2024-01-26 18:46:19 +01:00
Adam Štrauch 02cdf5f815
Fix archive path
Unittests / unittests (push) Successful in 14s Details
Unittests / deploy-dev (push) Successful in 1m0s Details
2023-12-13 17:45:17 +01:00
Adam Štrauch 036587a77a
Volume preparation fix
Unittests / unittests (push) Successful in 14s Details
Unittests / deploy-dev (push) Successful in 1m9s Details
2023-12-13 17:40:28 +01:00
Adam Štrauch 6d62b200a4
Change suffix to .tar.zst
Unittests / unittests (push) Successful in 17s Details
Unittests / deploy-dev (push) Successful in 1m7s Details
2023-12-13 17:32:20 +01:00
Adam Štrauch 9564118f40
Add possibility to prepare /srv from an archive
Unittests / unittests (push) Successful in 18s Details
Unittests / deploy-dev (push) Successful in 1m13s Details
2023-12-08 19:01:07 +01:00
Adam Štrauch 0ad979b240
Pipeline migrated to gitea
Unittests / unittests (push) Successful in 14s Details
Unittests / deploy-dev (push) Successful in 1m5s Details
2023-11-26 01:43:02 +01:00
Adam Štrauch 9224675139
Fix sql data type
continuous-integration/drone/push Build is passing Details
continuous-integration/drone Build is passing Details
continuous-integration/drone/promote/production Build is failing Details
2023-10-15 01:20:28 +02:00
Adam Štrauch 5fc6f39529
Our own JSON map
continuous-integration/drone/push Build is failing Details
2023-10-15 00:57:41 +02:00
Adam Štrauch fe8aa885e7
Add env
continuous-integration/drone/push Build is failing Details
2023-10-14 01:13:02 +02:00
Adam Štrauch 00beda8137
Add env
continuous-integration/drone/push Build is failing Details
2023-10-14 01:10:14 +02:00
Adam Štrauch 37ca4ece39
Disable OOM killer for apps with less than 1.5 GB of RAM
continuous-integration/drone/push Build is passing Details
continuous-integration/drone/promote/production Build is passing Details
2023-09-29 13:33:49 +02:00
Adam Štrauch 072a643c1d
Disable oomkiller just for smaller containers
continuous-integration/drone/push Build is passing Details
continuous-integration/drone/promote/production Build is passing Details
2023-09-25 19:46:42 +02:00
Adam Štrauch ed5061fd58
Disable OOM killer
continuous-integration/drone/push Build is passing Details
2023-09-25 18:47:11 +02:00
Adam Štrauch 7f8ed1c018
Fix error when technology couldn't be determined
continuous-integration/drone/push Build is passing Details
2023-09-23 23:41:00 +02:00
Adam Štrauch dab9b52953
Add support for Debian 12 builds
continuous-integration/drone Build is passing Details
continuous-integration/drone/promote/production Build is passing Details
2023-09-16 11:40:34 +02:00
Adam Štrauch 0b5ded2abf
Fix debian 10 deployment
continuous-integration/drone/push Build is passing Details
continuous-integration/drone/promote/production Build is passing Details
2023-07-01 00:02:01 +02:00
Adam Štrauch 494e31c4b1
Add saturn's keys
continuous-integration/drone/push Build is passing Details
continuous-integration/drone/promote/production Build is failing Details
2023-06-30 23:49:06 +02:00
Adam Štrauch d25e0aba36
Fix production deploy
continuous-integration/drone/push Build is passing Details
continuous-integration/drone/promote/production Build is failing Details
2023-06-30 23:39:12 +02:00
Adam Štrauch 6390fb19bb
Fix production deployment
continuous-integration/drone/push Build is passing Details
continuous-integration/drone/promote/production Build is failing Details
2023-06-30 23:27:11 +02:00
Adam Štrauch e9fb6a6b46
SSH keys related logging
continuous-integration/drone/push Build is passing Details
continuous-integration/drone/promote/production Build is failing Details
2023-06-30 00:46:15 +02:00
Adam Štrauch 4a4da2c080
Fix owner issue
continuous-integration/drone/push Build is passing Details
continuous-integration/drone/promote/production Build is passing Details
2023-04-29 00:02:33 +02:00
Adam Štrauch 617f818df4
Fix crash when new app is created
continuous-integration/drone/push Build is passing Details
continuous-integration/drone/promote/production Build is passing Details
2023-04-25 19:11:05 +02:00
Adam Štrauch c6f9620945
Fix link
continuous-integration/drone/push Build is passing Details
2023-04-24 22:42:31 +02:00
Adam Štrauch af1e69e594
Fix symlink
continuous-integration/drone/push Build is passing Details
2023-04-24 22:35:58 +02:00
Adam Štrauch 6fd3278ee8
Debug lines
continuous-integration/drone/push Build is passing Details
2023-04-24 22:29:59 +02:00
Adam Štrauch 1ccf4b8301
Missing get_active_tech handler
continuous-integration/drone/push Build is passing Details
2023-04-24 22:24:37 +02:00
Adam Štrauch 37a5297c88
Get active tech feature
continuous-integration/drone/push Build is passing Details
2023-04-24 14:19:58 +02:00
Adam Štrauch 5b0a46951b
Skip host and deploy keys in another level
continuous-integration/drone/push Build is passing Details
continuous-integration/drone/promote/production Build is passing Details
2023-04-21 12:01:34 +02:00
Adam Štrauch 16bb4e71d5
Return empty strings for host and ssh keys if container is down
continuous-integration/drone/push Build is passing Details
continuous-integration/drone/promote/production Build is passing Details
2023-04-21 11:13:02 +02:00
Adam Štrauch 8c8ecc6379
Fix obtaining host key
continuous-integration/drone/push Build is passing Details
continuous-integration/drone/promote/production Build is passing Details
2023-04-19 23:40:55 +02:00
Adam Štrauch d465421fa0
Prefix to deploy keys
continuous-integration/drone/push Build is passing Details
2023-04-14 19:22:10 +02:00
Adam Štrauch 4c2bcdd3e7
Fix SSH deploy keyType
continuous-integration/drone/push Build was killed Details
2023-04-14 19:19:45 +02:00
Adam Štrauch 45996c4d1b
Fix host keys split
continuous-integration/drone/push Build is passing Details
2023-04-14 19:12:21 +02:00
Adam Štrauch 3ae383d1cd
Error when host key is not found
continuous-integration/drone/push Build is passing Details
2023-04-14 19:05:11 +02:00
Adam Štrauch 7f07a50bfd
Host key refactoring
continuous-integration/drone/push Build is passing Details
2023-04-13 22:54:14 +02:00
Adam Štrauch 036aa4fc28
SSH keys fixes
continuous-integration/drone/push Build is passing Details
2023-04-13 22:49:06 +02:00
Adam Štrauch bcc641307a
Fix key generator
continuous-integration/drone/push Build is passing Details
2023-04-13 22:37:39 +02:00
Adam Štrauch da5ad13ae3
Fix commands
continuous-integration/drone/push Build is passing Details
2023-04-13 22:30:11 +02:00
Adam Štrauch d7b878bbbc
SSH host keyscan typo fix
continuous-integration/drone/push Build is passing Details
2023-04-13 20:17:29 +02:00
Adam Štrauch 9a37518431
Get host ssh key handler
continuous-integration/drone/push Build is passing Details
2023-04-13 20:07:31 +02:00
Adam Štrauch 8051310677
Get SSH host keys message handler
continuous-integration/drone/push Build is passing Details
continuous-integration/drone Build is passing Details
2023-04-11 22:56:15 +02:00
Adam Štrauch df5a390680
Add SSH deploy keys generator
continuous-integration/drone/push Build is failing Details
2023-04-11 22:49:50 +02:00
Adam Štrauch f3f50b0ace
Fix race condition in create endpoint
continuous-integration/drone/push Build is passing Details
continuous-integration/drone Build is passing Details
2023-02-05 00:42:49 +01:00
Adam Štrauch 779d9ba95a
Set password, ssh keys and tech in single step during create
continuous-integration/drone/push Build is passing Details
2023-02-04 10:33:47 +01:00
Adam Štrauch d469c813a1
AppPort support
continuous-integration/drone/push Build is passing Details
2023-01-28 11:46:59 +01:00
Adam Štrauch b7dfa22f94
Fix tests
continuous-integration/drone/push Build is passing Details
continuous-integration/drone Build is passing Details
2022-10-04 20:00:29 +02:00
Adam Štrauch 938f6f89b6
Add oomkilled status to the app in the database
continuous-integration/drone/push Build is failing Details
2022-10-04 19:54:26 +02:00
Adam Štrauch b15e85474e
tarBin
continuous-integration/drone/push Build is passing Details
continuous-integration/drone Build is passing Details
2022-07-23 00:36:57 +02:00
Adam Štrauch 8adbf84362
Switch to /bin/tar
continuous-integration/drone/push Build is failing Details
2022-07-23 00:33:34 +02:00
Adam Štrauch 5b63a0d9aa
CI: zstd fix
continuous-integration/drone/push Build is failing Details
2022-07-23 00:28:29 +02:00
Adam Štrauch 797bc6d654
Update unittest image
continuous-integration/drone/push Build is failing Details
2022-07-23 00:26:30 +02:00
Adam Štrauch 0725b352c6
Sleep before minio boots
continuous-integration/drone/push Build is failing Details
2022-07-23 00:22:17 +02:00
Adam Štrauch 96071cdfcb
Use tar to restore instead of archiver
continuous-integration/drone/push Build is failing Details
2022-07-22 19:49:37 +02:00
Adam Štrauch 37d884e665
Use tar instead of archive
continuous-integration/drone/push Build is failing Details
2022-07-22 19:47:59 +02:00
Adam Štrauch 0ea302d688
Update of archive lib
continuous-integration/drone/push Build is failing Details
continuous-integration/drone Build is passing Details
2022-07-22 18:44:17 +02:00
Adam Štrauch 5caa570055
Potential fix of set tech in the container + logging
continuous-integration/drone/push Build is passing Details
continuous-integration/drone Build is passing Details
2022-06-15 20:16:20 +02:00
Adam Štrauch 61adbd7a28
Stop destroys the container
continuous-integration/drone/push Build is passing Details
2022-05-27 19:12:19 +02:00
Adam Štrauch e8d952cac0
Fast get for get without status
continuous-integration/drone/push Build is passing Details
continuous-integration/drone Build is passing Details
2022-05-27 18:24:39 +02:00
Adam Štrauch 69e147ccf9
Different pipeline for Debian 11
continuous-integration/drone/push Build is passing Details
continuous-integration/drone Build is passing Details
2022-05-21 22:42:17 +02:00
Adam Štrauch 1017288a4c
New production nodes
continuous-integration/drone/push Build is passing Details
continuous-integration/drone Build is passing Details
2022-05-21 21:29:47 +02:00
Adam Štrauch 0cc986d22f
New test node
continuous-integration/drone/push Build is passing Details
2022-05-20 23:53:54 +02:00
Adam Štrauch 24fffe736e
Fix schema creation for performance index
continuous-integration/drone/push Build is passing Details
continuous-integration/drone Build is passing Details
2022-05-07 02:21:21 +02:00
Adam Štrauch 473d561b84
Make it float
continuous-integration/drone/push Build is passing Details
continuous-integration/drone Build is passing Details
2022-05-06 19:08:23 +02:00
Adam Štrauch 52d8f7b250
stats elapsed time in seconds
continuous-integration/drone/push Build is passing Details
2022-05-06 19:05:04 +02:00
Adam Štrauch 02ebfb8eea
Metric for stats elapsed time
continuous-integration/drone/push Build is passing Details
2022-05-06 18:59:58 +02:00
Adam Štrauch 68121fda15
Making metrics faster
continuous-integration/drone/push Build is passing Details
continuous-integration/drone Build is passing Details
2022-05-06 18:40:22 +02:00
Adam Štrauch 35a0c5acd1
Don't care about non existing containers when rebuilding
continuous-integration/drone/push Build is passing Details
2022-04-21 22:31:58 +02:00
Adam Štrauch 3f1a0d9a8a
Getting arch back
continuous-integration/drone/push Build is passing Details
continuous-integration/drone Build is passing Details
2022-04-21 22:21:56 +02:00
Adam Štrauch 29866d0e75
Fix cases where /opt/techs doesn't exist
continuous-integration/drone/push Build is passing Details
continuous-integration/drone Build is passing Details
2022-04-20 14:06:16 +02:00
Adam Štrauch fea1a46a11
Disable platform specification
continuous-integration/drone/push Build is passing Details
2022-02-24 15:13:06 +01:00
Adam Štrauch 7334337c93
New bot pattern shiziyama
continuous-integration/drone/push Build is passing Details
2022-02-10 12:25:25 +01:00
Adam Štrauch 7add860d83
Quick fix of update handler
continuous-integration/drone/push Build is passing Details
continuous-integration/drone Build is passing Details
2022-02-09 16:54:30 +01:00
28 changed files with 3503 additions and 935 deletions

View File

@ -1,130 +0,0 @@
kind: pipeline
type: docker
name: testing
steps:
- name: unittests
image: golang:1.17-buster
environment:
SNAPSHOTS_S3_ENDPOINT: minio:9000
TEST_S3_ENDPOINT: minio:9000
volumes:
- name: dockersock
path: /var/run
commands:
- go mod tidy
- make test
services:
- name: minio
image: minio/minio:latest
environment:
MINIO_ROOT_USER: test
MINIO_ROOT_PASSWORD: testtest
command:
- server
- /data
- --console-address
- :9001
- name: docker
image: docker:dind
privileged: true
volumes:
- name: dockersock
path: /var/run
volumes:
- name: dockersock
temp: {}
---
kind: pipeline
type: docker
name: Dev deploy
steps:
- name: build
# image: golang:1.17-buster # this one is used in production
image: golang:1.17-bullseye # this one is used in dev
commands:
- go mod tidy
- make build
- name: deploy
image: debian:buster
environment:
#NODE: node-x.rosti.cz
NODES: 192.168.1.236
SSH_KEY:
from_secret: SSH_KEY
commands:
- apt update && apt install -y ssh
- |
for NODE in $NODES; do
echo "\033[0;32mDeploying $NODE\033[0m"
mkdir -p ~/.ssh && echo "$SSH_KEY" > ~/.ssh/id_ed25519 && chmod 600 ~/.ssh/id_ed25519
echo "\033[1;33m.. scanning SSH keys\033[0m"
ssh-keyscan $NODE > ~/.ssh/known_hosts
echo "\033[1;33m.. copying the binary\033[0m"
scp node-api root@$NODE:/usr/local/bin/node-api_
echo "\033[1;33m.. replacing the binary\033[0m"
ssh root@$NODE mv /usr/local/bin/node-api_ /usr/local/bin/node-api
echo "\033[1;33m.. restarting service\033[0m"
ssh root@$NODE systemctl restart node-api
done
trigger:
branch:
- main
event:
- push
- custom
depends_on:
- testing
---
kind: pipeline
type: docker
name: Production deploy
steps:
- name: build
image: golang:1.17-buster # this one is used in production
#image: golang:1.17-bullseye # this one is used in dev
commands:
- go mod tidy
- make build
- name: deploy
image: debian:buster
environment:
NODES: node-18.rosti.cz
SSH_KEY:
from_secret: SSH_KEY
commands:
- apt update && apt install -y ssh
- |
for NODE in $NODES; do
echo "\033[0;32mDeploying $NODE\033[0m"
mkdir -p ~/.ssh && echo "$SSH_KEY" > ~/.ssh/id_ed25519 && chmod 600 ~/.ssh/id_ed25519
echo "\033[1;33m.. scanning SSH keys\033[0m"
ssh-keyscan $NODE > ~/.ssh/known_hosts
echo "\033[1;33m.. copying the binary\033[0m"
scp node-api root@$NODE:/usr/local/bin/node-api_
echo "\033[1;33m.. replacing the binary\033[0m"
ssh root@$NODE mv /usr/local/bin/node-api_ /usr/local/bin/node-api
echo "\033[1;33m.. restarting service\033[0m"
ssh root@$NODE systemctl restart node-api
done
depends_on:
- testing
trigger:
event:
- promote
target:
- production

View File

@ -0,0 +1,33 @@
name: Release
on:
release:
types: [published]
# push:
# branches: [main]
workflow_dispatch: {}
jobs:
deploy-production:
runs-on: [amd64, prod]
env:
NODES: node-22.rosti.cz node-23.rosti.cz node-24.rosti.cz node-25.rosti.cz
steps:
- uses: actions/checkout@v4
- name: deploy
run: |
echo "Building for Debian 12 .."
docker run --rm --privileged -ti -v `pwd`:/srv golang:1.21-bookworm /bin/sh -c "cd /srv && go build"
for NODE in $NODES; do
echo "\033[0;32mDeploying $NODE\033[0m"
echo "\033[1;33m.. scanning SSH keys\033[0m"
ssh -o "StrictHostKeyChecking=no" root@$NODE echo "Setting up key"
echo "\033[1;33m.. copying the binary\033[0m"
scp node-api root@$NODE:/usr/local/bin/node-api_
echo "\033[1;33m.. replacing the binary\033[0m"
ssh root@$NODE mv /usr/local/bin/node-api_ /usr/local/bin/node-api
echo "\033[1;33m.. restarting service\033[0m"
ssh root@$NODE systemctl restart node-api
done

View File

@ -0,0 +1,62 @@
name: Unittests
on:
push:
branches: [main]
workflow_dispatch: {}
jobs:
unittests:
runs-on: [amd64, moon]
steps:
- uses: actions/checkout@v4
- uses: actions/setup-go@v3
with:
go-version: 1.21
- name: start minio
run: |
docker run -d --rm --name nodeapi_minio -p 9000:9000 -p 9001:9001 -e MINIO_ROOT_USER=test -e MINIO_ROOT_PASSWORD=testtest minio/minio:latest server /data --console-address :9001
- name: deps
run: apt update && apt install -y tar zstd
- name: Test
run: |
make test
- name: stop minio
if: always()
run: |
docker stop nodeapi_minio
# TODO: probably not supported by Gitea workflows yet
# services:
# minio:
# image: minio/minio:latest
# env:
# MINIO_ROOT_USER: test
# MINIO_ROOT_PASSWORD: testtest
# ports:
# - 9001:9001
# options: server /data --console-address :9001
deploy-dev:
runs-on: [amd64, moon]
env:
NODES: "192.168.1.33"
steps:
- uses: actions/checkout@v4
- name: deploy
run: |
# echo LS1
# ls
docker run --rm --privileged -ti -v `pwd`:/srv golang:1.20-buster /bin/sh -c "cd /srv && go build"
# docker run --rm --privileged -ti -v `pwd`:/srv golang:1.20-buster /bin/sh -c "cd /srv && echo LS2 && ls"
for NODE in $NODES; do
echo "\033[0;32mDeploying $NODE\033[0m"
echo "\033[1;33m.. scanning SSH keys\033[0m"
ssh -o "StrictHostKeyChecking=no" root@$NODE echo "Setting up key"
echo "\033[1;33m.. copying the binary\033[0m"
scp node-api root@$NODE:/usr/local/bin/node-api_
echo "\033[1;33m.. replacing the binary\033[0m"
ssh root@$NODE mv /usr/local/bin/node-api_ /usr/local/bin/node-api
echo "\033[1;33m.. restarting service\033[0m"
ssh root@$NODE systemctl restart node-api
done

View File

@ -1,8 +1,8 @@
.PHONY: test
test:
go test -v apps/*.go
go test -v apps/drivers/*.go
go test -v detector/*.go
go test -race -v apps/*.go
go test -race -v apps/drivers/*.go
go test -race -v detector/*.go
# env DOCKER_SOCKET="unix:///var/run/docker.sock" go test -v containers/*.go # Doesn't work in Drone right now
build:

View File

@ -67,7 +67,7 @@ func (a *AppsProcessor) New(name string, SSHPort int, HTTPPort int, image string
}
// Update changes value about app in the database
func (a *AppsProcessor) Update(name string, SSHPort int, HTTPPort int, image string, CPU int, memory int) (*App, error) {
func (a *AppsProcessor) Update(name string, SSHPort int, HTTPPort int, image string, CPU int, memory int, env map[string]string) (*App, error) {
var app App
err := a.DB.Where("name = ?", name).First(&app).Error
@ -98,6 +98,10 @@ func (a *AppsProcessor) Update(name string, SSHPort int, HTTPPort int, image str
app.HTTPPort = HTTPPort
}
if len(env) != 0 {
app.SetEnv(env)
}
validationErrors := app.Validate()
if len(validationErrors) != 0 {
return &app, ValidationError{
@ -111,9 +115,10 @@ func (a *AppsProcessor) Update(name string, SSHPort int, HTTPPort int, image str
}
// UpdateResources updates various metrics saved in the database
func (a *AppsProcessor) UpdateResources(name string, state string, CPUUsage float64, memory int, diskUsageBytes int, diskUsageInodes int, flags detector.Flags) error {
func (a *AppsProcessor) UpdateResources(name string, state string, OOMKilled bool, CPUUsage float64, memory int, diskUsageBytes int, diskUsageInodes int, flags detector.Flags) error {
err := a.DB.Model(&App{}).Where("name = ?", name).Updates(App{
State: state,
OOMKilled: OOMKilled,
CPUUsage: CPUUsage,
MemoryUsage: memory,
DiskUsageBytes: diskUsageBytes,
@ -124,9 +129,10 @@ func (a *AppsProcessor) UpdateResources(name string, state string, CPUUsage floa
}
// UpdateState sets container's state
func (a *AppsProcessor) UpdateState(name string, state string) error {
func (a *AppsProcessor) UpdateState(name string, state string, OOMKilled bool) error {
err := a.DB.Model(&App{}).Where("name = ?", name).Updates(App{
State: state,
State: state,
OOMKilled: OOMKilled,
}).Error
return err
}

View File

@ -70,7 +70,10 @@ func TestAppsProcessorUpdate(t *testing.T) {
err := processor.New("updateapp_1224", 1002, 1003, "testimage", 2, 256)
assert.Nil(t, err)
_, err = processor.Update("updateapp_1224", 1052, 1053, "testimage2", 4, 512)
env := make(map[string]string)
env["TEST"] = "test"
_, err = processor.Update("updateapp_1224", 1052, 1053, "testimage2", 4, 512, env)
assert.Nil(t, err)
app, err := processor.Get("updateapp_1224")
@ -92,13 +95,14 @@ func TestAppsProcessorUpdateResources(t *testing.T) {
err := processor.New("updateresources_1224", 1002, 1003, "testimage", 2, 256)
assert.Nil(t, err)
err = processor.UpdateResources("updateresources_1224", "running", 1000, 256, 100, 200, detector.Flags{"test"})
err = processor.UpdateResources("updateresources_1224", "running", true, 1000, 256, 100, 200, detector.Flags{"test"})
assert.Nil(t, err)
app, err := processor.Get("updateresources_1224")
assert.Nil(t, err)
assert.Equal(t, "running", app.State)
assert.Equal(t, true, app.OOMKilled)
assert.Equal(t, float64(1000), app.CPUUsage)
assert.Equal(t, 256, app.MemoryUsage)
assert.Equal(t, 100, app.DiskUsageBytes)
@ -115,7 +119,7 @@ func TestAppsProcessorUpdateState(t *testing.T) {
err := processor.New("update_1224", 1002, 1003, "testimage", 2, 256)
assert.Nil(t, err)
err = processor.UpdateState("update_1224", "no-container")
err = processor.UpdateState("update_1224", "no-container", false)
assert.Nil(t, err)
app, err := processor.Get("update_1224")

View File

@ -6,11 +6,11 @@ import (
"fmt"
"log"
"os"
"os/exec"
"path"
"strings"
"time"
"github.com/mholt/archiver/v3"
"github.com/rosti-cz/node-api/apps/drivers"
uuid "github.com/satori/go.uuid"
)
@ -20,6 +20,7 @@ const dateFormat = "20060102_150405"
const keySplitCharacter = ":"
const metadataPrefix = "_metadata"
const metadataKeyTemplate = metadataPrefix + "/%s"
const tarBin = "/bin/tar"
// Snapshot contains metadata about a single snapshot
type Snapshot struct {
@ -123,18 +124,6 @@ func (s *SnapshotProcessor) metadataForSnapshotKey(snapshotKey string) (Snapshot
// Returns key under which is the snapshot stored and/or error if there is any.
// Metadata about the snapshot are stored in extra object under metadata/ prefix.
func (s *SnapshotProcessor) CreateSnapshot(appName string, labels []string) (string, error) {
// Create an archive
archive := archiver.TarZstd{
Tar: &archiver.Tar{
MkdirAll: true,
ContinueOnError: true,
OverwriteExisting: false,
ImplicitTopLevelFolder: false,
},
// CompressionLevel: 6,
// SelectiveCompression: true,
}
snapshot := Snapshot{
UUID: uuid.NewV4().String(),
AppName: appName,
@ -149,7 +138,7 @@ func (s *SnapshotProcessor) CreateSnapshot(appName string, labels []string) (str
return snapshot.KeyName(s.IndexLabel), fmt.Errorf("change working directory error: %v", err)
}
err = archive.Archive([]string{"./"}, tmpSnapshotArchivePath)
err = exec.Command(tarBin, "-acf", tmpSnapshotArchivePath, "./").Run()
if err != nil {
return snapshot.KeyName(s.IndexLabel), fmt.Errorf("compression error: %v", err)
}
@ -203,18 +192,7 @@ func (s *SnapshotProcessor) RestoreSnapshot(key string, newAppName string) error
return fmt.Errorf("getting the archive from S3 error: %v", err)
}
archive := archiver.TarZstd{
Tar: &archiver.Tar{
MkdirAll: true,
ContinueOnError: true,
OverwriteExisting: false,
ImplicitTopLevelFolder: false,
},
// CompressionLevel: 6,
// SelectiveCompression: true,
}
err = archive.Unarchive(tmpSnapshotArchivePath, "./")
err = exec.Command(tarBin, "-axf", tmpSnapshotArchivePath).Run()
if err != nil {
return fmt.Errorf("unarchiving error: %v", err)
}

View File

@ -6,6 +6,7 @@ import (
"time"
"github.com/rosti-cz/node-api/detector"
"github.com/rosti-cz/node-api/jsonmap"
)
// ValidationError is error that holds multiple validation error messages
@ -31,6 +32,7 @@ type Label struct {
// AppState contains info about runnint application, it's not saved in the database
type AppState struct {
State string `json:"state"`
OOMKilled bool `json:"oom_killed"`
CPUUsage float64 `json:"cpu_usage"` // in percents
MemoryUsage int `json:"memory_usage"` // in MB
DiskUsageBytes int `json:"disk_usage_bytes"`
@ -66,6 +68,9 @@ type App struct {
// Unique: true
// Example: 10002
HTTPPort int `json:"http_port"`
// Port of the application inside the container. Default is 0 which means default by the image.
// But it has to be between 1024 and 65536 with exception of 8000.
AppPort int `json:"app_port"`
// Runtime image
Image string `json:"image"`
// Number of CPUs ticks assigned, 100 means one CPU, 200 are two
@ -78,6 +83,8 @@ type App struct {
// Current status of the application (underlaying container)
State string `json:"state"`
// True if the current container has been killed by OOM killer
OOMKilled bool `json:"oom_killed"`
// CPU usage in percents
CPUUsage float64 `json:"cpu_usage"` // in percents
// Memory usage in bytes
@ -95,6 +102,34 @@ type App struct {
// This is not store in the database but used in create request when the app suppose to be created from an existing snapshot
Snapshot string `json:"snapshot" gorm:"-"`
EnvRaw jsonmap.JSONMap `json:"env" sql:"type:json"`
// Fields to setup during creating of the app, this is not stored in the database
Setup struct {
SSHKeys string `json:"ssh_keys"`
Tech string `json:"tech"`
TechVersion string `json:"tech_version"`
Password string `json:"password"`
ArchiveURL string `json:"archive_url"` // Archive with content of /srv
} `json:"setup,omitempty" gorm:"-"`
}
// Return env as map[string]string
func (a *App) GetEnv() map[string]string {
env := make(map[string]string)
for key, value := range a.EnvRaw {
env[key] = value.(string)
}
return env
}
// SetEnv sets env from map[string]string
func (a *App) SetEnv(env map[string]string) {
a.EnvRaw = make(jsonmap.JSONMap)
for key, value := range env {
a.EnvRaw[key] = value
}
}
// Validate do basic checks of the struct values
@ -114,6 +149,10 @@ func (a *App) Validate() []string {
errors = append(errors, "HTTP port has to be between 1 and 65536")
}
if a.AppPort != 0 && ((a.AppPort < 1024 && a.AppPort > 65536) || a.AppPort == 8000) {
errors = append(errors, "App port has to be between 1024 and 65536 with exception of 8000")
}
if a.Image == "" {
errors = append(errors, "image cannot be empty")
}

View File

@ -3,7 +3,7 @@ package main
import (
"strings"
"github.com/labstack/echo"
"github.com/labstack/echo/v4"
)
var skipPaths []string = []string{"/metrics"}

View File

@ -23,6 +23,8 @@ type Config struct {
SnapshotsS3SSL bool `envconfig:"SNAPSHOTS_S3_SSL" required:"false" default:"true"`
SnapshotsS3Bucket string `envconfig:"SNAPSHOTS_S3_BUCKET" required:"false" default:"snapshots"`
SnapshotsIndexLabel string `envconfig:"SNAPSHOTS_INDEX_LABEL" required:"false" default:"owner_id"` // Label that will be part of the object name and it will be used as index to quick listing
SentryDSN string `envconfig:"SENTRY_DSN" required:"false"`
SentryENV string `envconfig:"SENTRY_ENV" default:"development"`
}
// GetConfig return configuration created based on environment variables

View File

@ -79,8 +79,10 @@ func (d *Driver) nameToID(name string) (string, error) {
}
// Status return current status of container with given name
func (d *Driver) Status(name string) (string, error) {
status := "unknown"
func (d *Driver) Status(name string) (ContainerStatus, error) {
status := ContainerStatus{
Status: "unknown",
}
cli, err := d.getClient()
if err != nil {
@ -90,7 +92,8 @@ func (d *Driver) Status(name string) (string, error) {
containerID, err := d.nameToID(name)
if err != nil && err.Error() == "no container found" {
return "no-container", err
status.Status = "no-container"
return status, err
}
if err != nil {
return status, err
@ -101,7 +104,10 @@ func (d *Driver) Status(name string) (string, error) {
return status, err
}
return info.State.Status, nil
status.Status = info.State.Status
status.OOMKilled = info.State.OOMKilled
return status, nil
}
@ -196,13 +202,13 @@ func (d *Driver) Remove(name string) error {
return err
}
timeout := time.Duration(dockerTimeout * time.Second)
err = cli.ContainerStop(context.TODO(), containerID, &timeout)
timeout := dockerTimeout
err = cli.ContainerStop(context.TODO(), containerID, container.StopOptions{Timeout: &timeout})
if err != nil {
return err
}
err = cli.ContainerRemove(context.TODO(), containerID, types.ContainerRemoveOptions{})
err = cli.ContainerRemove(context.TODO(), containerID, container.RemoveOptions{})
return err
}
@ -240,8 +246,8 @@ func (d *Driver) Stop(name string) error {
return err
}
timeout := time.Duration(dockerTimeout * time.Second)
err = cli.ContainerStop(context.TODO(), containerID, &timeout)
timeout := dockerTimeout
err = cli.ContainerStop(context.TODO(), containerID, container.StopOptions{Timeout: &timeout})
return err
}
@ -301,7 +307,7 @@ func (d *Driver) pullImage(image string) error {
// cmd - string slice of command and its arguments
// volumePath - host's directory to mount into the container
// returns container ID
func (d *Driver) Create(name string, image string, volumePath string, HTTPPort int, SSHPort int, CPU int, memory int, cmd []string) (string, error) {
func (d *Driver) Create(name string, image string, volumePath string, HTTPPort int, SSHPort int, CPU int, memory int, cmd []string, env map[string]string) (string, error) {
log.Println("Creating container " + name)
cli, err := d.getClient()
if err != nil {
@ -332,11 +338,21 @@ func (d *Driver) Create(name string, image string, volumePath string, HTTPPort i
}
}
OOMKillDisable := false
if memory < 1500 {
OOMKillDisable = true
}
envList := []string{}
for key, value := range env {
envList = append(envList, key+"="+value)
}
createdContainer, err := cli.ContainerCreate(
context.Background(),
&container.Config{
Hostname: name,
Env: []string{},
Env: envList,
Image: image,
Cmd: cmd,
ExposedPorts: nat.PortSet{
@ -350,6 +366,7 @@ func (d *Driver) Create(name string, image string, volumePath string, HTTPPort i
CPUQuota: int64(CPU) * 1000,
Memory: int64(memory*110/100) * 1024 * 1024, // Allow 10 % more memory so we have space for MemoryReservation
MemoryReservation: int64(memory) * 1024 * 1024, // This should provide softer way how to limit the memory of our containers
OomKillDisable: &OOMKillDisable,
},
PortBindings: portBindings,
AutoRemove: false,

View File

@ -25,7 +25,10 @@ func TestGetProcesses(t *testing.T) {
driver.Remove("test")
_, err := driver.Create("test", "docker.io/library/busybox", "/tmp", 8990, 8922, 1, 128, []string{"sleep", "3600"})
env := make(map[string]string)
env["TEST"] = "test"
_, err := driver.Create("test", "docker.io/library/busybox", "/tmp", 8990, 8922, 1, 128, []string{"sleep", "3600"}, env)
assert.Nil(t, err)
err = driver.Start("test")

View File

@ -2,18 +2,18 @@ package containers
// ContainerStats contains fields returned in docker stats function stream
type ContainerStats struct {
Pids struct {
Current int `json:"current"`
} `json:"pids_stats"`
CPU struct {
Usage struct {
Total int64 `json:"total_usage"`
} `json:"cpu_usage"`
} `json:"cpu_stats"`
Memory struct {
Usage int `json:"usage"`
MaxUsage int `json:"max_usage"`
Limit int `json:"limit"`
} `json:"memory_stats"`
ID string `json:"id"`
Pids struct {
Current int `json:"current"`
} `json:"pids_stats"`
CPU struct {
Usage struct {
Total int64 `json:"total_usage"`
} `json:"cpu_usage"`
} `json:"cpu_stats"`
Memory struct {
Usage int `json:"usage"`
MaxUsage int `json:"max_usage"`
Limit int `json:"limit"`
} `json:"memory_stats"`
ID string `json:"id"`
}

View File

@ -2,10 +2,12 @@ package containers
import (
"bytes"
"fmt"
"log"
"os"
"os/exec"
"path/filepath"
"regexp"
"strconv"
"strings"
"time"
@ -135,3 +137,53 @@ func CPUMemoryStats(applist *[]apps.App, sample int) (*[]apps.App, error) {
return &updatedApps, nil
}
func getTechAndVersion(symlink string) (*TechInfo, error) {
link, err := os.Readlink(symlink)
if os.IsNotExist(err) {
return &TechInfo{
Tech: "default",
Version: "",
}, nil
}
if err != nil {
return nil, fmt.Errorf("error reading symlink: %w", err)
}
absLink, err := filepath.Abs(link)
if err != nil {
return nil, fmt.Errorf("error getting absolute path: %w", err)
}
absLink = strings.TrimSuffix(absLink, "/bin")
dirName := filepath.Base(absLink)
parts := strings.Split(dirName, "-")
fmt.Println("DEBUG", symlink)
fmt.Println("DEBUG", absLink)
fmt.Println("DEBUG", dirName)
fmt.Println("DEBUG", parts)
if len(parts) < 2 {
return &TechInfo{
Tech: "default",
Version: "",
}, nil
}
re := regexp.MustCompile(`\d+\.\d+\.\d+`)
version := re.FindString(parts[1])
if version == "" {
// In case version couldn't be determined we return "unknown", otherwise returning
// error in this case was crashing admin when user fucked up the symlink for the
// tech manually.
log.Println("failed to extract version from symlink")
return &TechInfo{
Tech: "unknown",
Version: "",
}, nil
}
return &TechInfo{
Tech: parts[0],
Version: version,
}, nil
}

View File

@ -1,9 +1,12 @@
package containers
import (
"bytes"
"errors"
"fmt"
"log"
"path"
"strconv"
"strings"
"github.com/rosti-cz/node-api/apps"
@ -12,7 +15,16 @@ import (
// username in the containers under which all containers run
const appUsername = "app"
const owner = "app:app"
const passwordFile = "/srv/.rosti"
const deployKeyType = "ed25519"
const deployKeyPrefix = "rosti_deploy"
// Structure containing info about technology and its version
type TechInfo struct {
Tech string `json:"tech"`
Version string `json:"version"`
}
// Process contains info about background application usually running in supervisor
type Process struct {
@ -20,6 +32,12 @@ type Process struct {
State string `json:"state"`
}
// Current status of the container
type ContainerStatus struct {
Status string `json:"status"`
OOMKilled bool `json:"oom_killed"`
}
// Container extends App struct from App
type Container struct {
App *apps.App `json:"app"`
@ -39,7 +57,7 @@ func (c *Container) getDriver() *Driver {
}
// volumeHostPath each container has one volume mounted into it,
func (c *Container) volumeHostPath() string {
func (c *Container) VolumeHostPath() string {
return path.Join(c.AppsPath, c.App.Name)
}
@ -79,7 +97,8 @@ func (c *Container) GetState() (*apps.AppState, error) {
}
state := apps.AppState{
State: status,
State: status.Status,
OOMKilled: status.OOMKilled,
// CPUUsage: cpu,
// MemoryUsage: memory,
CPUUsage: -1.0,
@ -94,26 +113,26 @@ func (c *Container) GetState() (*apps.AppState, error) {
// Status returns state of the container
// Possible values: running, exited (stopped), no-container, unknown
func (c *Container) Status() (string, error) {
status := "unknown"
func (c *Container) Status() (ContainerStatus, error) {
status := ContainerStatus{
Status: "unknown",
}
driver := c.getDriver()
containerStatus, err := driver.Status(c.App.Name)
if err != nil && err.Error() == "no container found" {
return "no-container", nil
return ContainerStatus{Status: "no-container"}, nil
}
if err != nil {
return status, err
}
status = containerStatus
return status, nil
return containerStatus, nil
}
// DiskUsage returns number of bytes and inodes used by the container in it's mounted volume
func (c *Container) DiskUsage() (int, int, error) {
return du(c.volumeHostPath())
return du(c.VolumeHostPath())
}
// ResourceUsage returns amount of memory in B and CPU in % that the app occupies
@ -134,12 +153,13 @@ func (c *Container) Create() error {
_, err := driver.Create(
c.App.Name,
c.App.Image,
c.volumeHostPath(),
c.VolumeHostPath(),
c.App.HTTPPort,
c.App.SSHPort,
c.App.CPU,
c.App.Memory,
[]string{},
c.App.GetEnv(),
)
return err
@ -189,7 +209,7 @@ func (c *Container) Delete() error {
// does two things, deleting the container and the data and when
// the deleted container doesn't exist we actually don't care
// and we can continue to remove the data.
if status != "no-container" {
if status.Status != "no-container" {
err = c.Destroy()
if err != nil {
return err
@ -222,6 +242,147 @@ func (c *Container) SetPassword(password string) error {
return err
}
// Generate SSH keys and copies it into authorized keys
// Returns true if the key was generated in this call and error if there is any.
// The container has to run for this to work.
func (c *Container) GenerateDeploySSHKeys() (bool, error) {
driver := c.getDriver()
privateKey, pubKey, _ := c.GetDeploySSHKeys()
if privateKey != "" || pubKey != "" {
return false, nil
}
_, err := driver.Exec(c.App.Name, []string{"mkdir", "-p", "/srv/.ssh"}, "", []string{}, true)
if err != nil {
return false, err
}
_, err = driver.Exec(c.App.Name, []string{"ssh-keygen", "-t", deployKeyType, "-f", "/srv/.ssh/" + deployKeyPrefix + "_id_" + deployKeyType, "-P", ""}, "", []string{}, true)
if err != nil {
return false, err
}
_, err = driver.Exec(c.App.Name, []string{"chown", "app:app", "-R", "/srv/.ssh"}, "", []string{}, true)
if err != nil {
return false, err
}
return true, nil
}
// Generate SSH keys and copies it into authorized keys
// Return private key, public key and error.
// The container has to run for this to work.
func (c *Container) GetDeploySSHKeys() (string, string, error) {
driver := c.getDriver()
privateKey, err := driver.Exec(c.App.Name, []string{"cat", "/srv/.ssh/" + deployKeyPrefix + "_id_" + deployKeyType}, "", []string{}, true)
if err != nil {
return "", "", err
}
pubKey, err := driver.Exec(c.App.Name, []string{"cat", "/srv/.ssh/" + deployKeyPrefix + "_id_" + deployKeyType + ".pub"}, "", []string{}, true)
if err != nil {
return "", "", err
}
if privateKey != nil && pubKey != nil && !bytes.Contains(*privateKey, []byte("No such file")) && !bytes.Contains(*pubKey, []byte("No such file")) {
return string(*privateKey), string(*pubKey), nil
}
return "", "", nil
}
// Return host key without hostname
// The container has to run for this to work.
func (c *Container) GetHostKey() (string, error) {
driver := c.getDriver()
hostKeyRaw, err := driver.Exec(c.App.Name, []string{"ssh-keyscan", "localhost"}, "", []string{}, true)
if err != nil {
return "", err
}
// Loop over lines and search for localhost ssh
line := ""
if hostKeyRaw != nil {
for _, line = range strings.Split(string(*hostKeyRaw), "\n") {
if strings.HasPrefix(line, "localhost ssh") {
line = strings.TrimSpace(line)
break
}
}
}
if line == "" {
return "", errors.New("key not found")
}
parts := strings.SplitN(line, " ", 2)
if len(parts) > 1 {
return parts[1], nil
}
return "", errors.New("key not found")
}
// Append text to a file in the container
func (c *Container) AppendFile(filename string, text string, mode string) error {
driver := c.getDriver()
directory := path.Dir(filename)
_, err := driver.Exec(c.App.Name, []string{"mkdir", "-p", directory}, "", []string{}, false)
if err != nil {
return err
}
_, err = driver.Exec(c.App.Name, []string{"tee", "-a", filename}, text, []string{}, false)
if err != nil {
return err
}
_, err = driver.Exec(c.App.Name, []string{"chmod", mode, filename}, "", []string{}, false)
if err != nil {
return err
}
_, err = driver.Exec(c.App.Name, []string{"chown", owner, directory}, "", []string{}, false)
if err != nil {
return err
}
_, err = driver.Exec(c.App.Name, []string{"chown", owner, filename}, "", []string{}, false)
if err != nil {
return err
}
return nil
}
// SetAppPort changes application port in the container
func (c *Container) SetAppPort(port int) error {
driver := c.getDriver()
_, err := driver.Exec(
c.App.Name,
[]string{
"sed",
"-i",
"s+proxy_pass[ ]*http://127.0.0.1:8080/;+proxy_pass http://127.0.0.1:" + strconv.Itoa(port) + "/;+g",
"/srv/conf/nginx.d/app.conf",
},
"",
[]string{},
false,
)
if err != nil {
return err
}
return err
}
// SetFileContent uploads text into a file inside the container. It's greate for uploading SSH keys.
// The method creates the diretory where the file is located and sets mode of the final file
func (c *Container) SetFileContent(filename string, text string, mode string) error {
@ -239,12 +400,12 @@ func (c *Container) SetFileContent(filename string, text string, mode string) er
return err
}
_, err = driver.Exec(c.App.Name, []string{"chown", "app:app", directory}, "", []string{}, false)
_, err = driver.Exec(c.App.Name, []string{"chown", owner, directory}, "", []string{}, false)
if err != nil {
return err
}
_, err = driver.Exec(c.App.Name, []string{"chown", "app:app", filename}, "", []string{}, false)
_, err = driver.Exec(c.App.Name, []string{"chown", owner, filename}, "", []string{}, false)
if err != nil {
return err
}
@ -262,11 +423,15 @@ func (c *Container) SetTechnology(tech string, version string) error {
var err error
// TODO: script injection here?
var output *[]byte
if version == "" {
_, err = driver.Exec(c.App.Name, []string{"su", "app", "-c", "rosti " + tech}, "", []string{}, false)
output, err = driver.Exec(c.App.Name, []string{"su", "app", "-c", "rosti " + tech}, "", []string{}, false)
} else {
_, err = driver.Exec(c.App.Name, []string{"su", "app", "-c", "rosti " + tech + " " + version}, "", []string{}, false)
output, err = driver.Exec(c.App.Name, []string{"su", "app", "-c", "rosti " + tech + " " + version}, "", []string{}, false)
}
log.Printf("DEBUG: enable tech %s/%s for %s output: %s", tech, version, c.App.Name, string(*output))
return err
}
@ -295,6 +460,47 @@ func (c *Container) GetProcessList() ([]Process, error) {
return processes, nil
}
// Restarts supervisord process
func (c *Container) RestartProcess(name string) error {
driver := c.getDriver()
_, err := driver.Exec(c.App.Name, []string{"supervisorctl", "restart", name}, "", []string{}, false)
return err
}
// Starts supervisord process
func (c *Container) StartProcess(name string) error {
driver := c.getDriver()
_, err := driver.Exec(c.App.Name, []string{"supervisorctl", "start", name}, "", []string{}, false)
return err
}
// Stops supervisord process
func (c *Container) StopProcess(name string) error {
driver := c.getDriver()
_, err := driver.Exec(c.App.Name, []string{"supervisorctl", "stop", name}, "", []string{}, false)
return err
}
// Reread supervisord config
func (c *Container) ReloadSupervisor() error {
driver := c.getDriver()
_, err := driver.Exec(c.App.Name, []string{"supervisorctl", "reread"}, "", []string{}, false)
if err != nil {
return err
}
_, err = driver.Exec(c.App.Name, []string{"supervisorctl", "update"}, "", []string{}, false)
if err != nil {
return err
}
return err
}
// GetSystemProcesses return list of running system processes
func (c *Container) GetSystemProcesses() ([]string, error) {
driver := c.getDriver()
@ -340,7 +546,18 @@ func (c *Container) GetTechs() (apps.AppTechs, error) {
driver := c.getDriver()
stdouterr, err := driver.Exec(c.App.Name, []string{"ls", "/opt/techs"}, "", []string{}, true)
stdouterr, err := driver.Exec(c.App.Name, []string{"ls", "/opt"}, "", []string{}, true)
if err != nil {
// in case there is an error just return empty response
return techs, nil
}
// Check if /opt/techs exists
if !strings.Contains(string(*stdouterr), "techs") {
return techs, nil
}
stdouterr, err = driver.Exec(c.App.Name, []string{"ls", "/opt/techs"}, "", []string{}, true)
if err != nil {
// in case there is an error just return empty response
return techs, nil
@ -360,9 +577,19 @@ func (c *Container) GetTechs() (apps.AppTechs, error) {
Version: techParts[1],
})
} else {
return techs, errors.New("one of the tech has wrong number of parts")
return techs, fmt.Errorf("one of the tech has wrong number of parts (%s)", techRaw)
}
}
return techs, nil
}
// Returns info about active technology
func (c *Container) GetActiveTech() (*TechInfo, error) {
info, err := getTechAndVersion(path.Join(c.VolumeHostPath(), "bin", "primary_tech"))
if err != nil {
return info, err
}
return info, nil
}

39
containers/types_test.go Normal file
View File

@ -0,0 +1,39 @@
package containers
import (
"os"
"path/filepath"
"testing"
)
func TestGetTechAndVersion(t *testing.T) {
// Create a temporary directory for testing
tempDir := t.TempDir()
// Create a fake language directory with version in the temporary directory
fakeLangDir := filepath.Join(tempDir, "techs", "python-3.10.4", "bin")
err := os.MkdirAll(fakeLangDir, 0755)
if err != nil {
t.Fatalf("Failed to create fake language directory: %v", err)
}
// Create a symlink for testing
symlink := filepath.Join(tempDir, "primary_tech")
err = os.Symlink(fakeLangDir, symlink)
if err != nil {
t.Fatalf("Failed to create symlink: %v", err)
}
// Test parseLanguageAndVersion function
info, err := getTechAndVersion(symlink)
if err != nil {
t.Fatalf("Unexpected error: %v", err)
}
expectedLanguage := "python"
expectedVersion := "3.10.4"
if info.Tech != expectedLanguage || info.Version != expectedVersion {
t.Errorf("Expected language: %s, version: %s, but got language: %s, version: %s",
expectedLanguage, expectedVersion, info.Tech, info.Version)
}
}

View File

@ -7,5 +7,6 @@ var patterns map[string][]string = map[string][]string{
},
"bot": {
`youtube\-dl`,
`shiziyama`,
},
}

View File

@ -3,7 +3,13 @@ package glue
import (
"errors"
"fmt"
"io"
"log"
"net/http"
"os"
"os/exec"
"path"
"strings"
"time"
"github.com/jinzhu/gorm"
@ -14,6 +20,9 @@ import (
"github.com/rosti-cz/node-api/node"
)
// Wait for the container a little bit longer
const ENABLE_TECH_WAIT = 10
// Processor separates logic of apps, containers, detector and node from handlers.
// It defines common interface for both types of handlers, HTTP and the events.
type Processor struct {
@ -90,7 +99,7 @@ func (p *Processor) waitForApp() error {
if err != nil {
return err
}
if status == "running" {
if status.Status == "running" {
return nil
}
@ -101,24 +110,27 @@ func (p *Processor) waitForApp() error {
}
// List returns list of apps
func (p *Processor) List() (apps.Apps, error) {
// noUpdate skips stats gathering to speed things up
func (p *Processor) List(noUpdate bool) (apps.Apps, error) {
appList := apps.Apps{}
statsProcessor := StatsProcessor{
DB: p.DB,
DockerSock: p.DockerSock,
BindIPHTTP: p.BindIPHTTP,
BindIPSSH: p.BindIPSSH,
AppsPath: p.AppsPath,
}
if !noUpdate {
statsProcessor := StatsProcessor{
DB: p.DB,
DockerSock: p.DockerSock,
BindIPHTTP: p.BindIPHTTP,
BindIPSSH: p.BindIPSSH,
AppsPath: p.AppsPath,
}
err := statsProcessor.GatherStates()
if err != nil {
return appList, fmt.Errorf("backend error: %v", err)
err := statsProcessor.GatherStates()
if err != nil {
return appList, fmt.Errorf("backend error: %v", err)
}
}
processor := p.getAppProcessor()
appList, err = processor.List()
appList, err := processor.List()
if err != nil {
return appList, fmt.Errorf("backend error: %v", err)
@ -128,69 +140,137 @@ func (p *Processor) List() (apps.Apps, error) {
}
// Get returns one app
func (p *Processor) Get() (apps.App, error) {
func (p *Processor) Get(noUpdate bool) (apps.App, error) {
app := apps.App{}
statsProcessor := StatsProcessor{
DB: p.DB,
DockerSock: p.DockerSock,
BindIPHTTP: p.BindIPHTTP,
BindIPSSH: p.BindIPSSH,
AppsPath: p.AppsPath,
}
if !noUpdate {
statsProcessor := StatsProcessor{
DB: p.DB,
DockerSock: p.DockerSock,
BindIPHTTP: p.BindIPHTTP,
BindIPSSH: p.BindIPSSH,
AppsPath: p.AppsPath,
}
err := statsProcessor.UpdateState(p.AppName)
if err != nil {
return app, err
err := statsProcessor.UpdateState(p.AppName)
if err != nil {
return app, err
}
}
processor := p.getAppProcessor()
app, err = processor.Get(p.AppName)
app, err := processor.Get(p.AppName)
if err != nil {
return app, err
}
// Gather runtime info about the container
container := docker.Container{
App: &app,
DockerSock: p.DockerSock,
BindIPHTTP: p.BindIPHTTP,
BindIPSSH: p.BindIPSSH,
AppsPath: p.AppsPath,
}
status, err := container.Status()
if err != nil {
return app, err
}
if status == "running" {
var err error
app.Techs, err = container.GetTechs()
if err != nil {
return app, err
}
app.PrimaryTech, err = container.GetPrimaryTech()
if err != nil {
return app, err
if !noUpdate {
container := docker.Container{
App: &app,
DockerSock: p.DockerSock,
BindIPHTTP: p.BindIPHTTP,
BindIPSSH: p.BindIPSSH,
AppsPath: p.AppsPath,
}
processList, err := container.GetSystemProcesses()
status, err := container.Status()
if err != nil {
return app, err
}
if status.Status == "running" {
var err error
app.Techs, err = container.GetTechs()
if err != nil {
return app, err
}
app.PrimaryTech, err = container.GetPrimaryTech()
if err != nil {
return app, err
}
flags, err := detector.Check(processList)
if err != nil {
return app, err
processList, err := container.GetSystemProcesses()
if err != nil {
return app, err
}
flags, err := detector.Check(processList)
if err != nil {
return app, err
}
app.Flags = flags.String()
}
app.Flags = flags.String()
}
return app, nil
}
// Takes URL with an tar archive and prepares container's volume from it.
func (p *Processor) volumeFromURL(url string, container *docker.Container) error {
// Validation, check if url ends with tar.zst
if !strings.HasSuffix(url, ".tar.zst") {
return fmt.Errorf("archive has to end with .tar.zst")
}
volumePath := container.VolumeHostPath()
// Prepare volume path
err := os.MkdirAll(volumePath, 0755)
if err != nil {
return fmt.Errorf("failed to create volume path: %v", err)
}
// Download the archive
archivePath := path.Join(volumePath, "archive.tar.zst")
log.Printf("%s: downloading archive from %s\n", container.App.Name, url)
f, err := os.Create(archivePath)
if err != nil {
return fmt.Errorf("failed to create archive file: %v", err)
}
defer f.Close()
resp, err := http.Get(url)
if err != nil {
return fmt.Errorf("failed to download archive: %v", err)
}
defer resp.Body.Close()
n, err := io.Copy(f, resp.Body)
if err != nil {
return fmt.Errorf("failed to download archive: %v", err)
}
log.Printf("downloaded %d bytes\n", n)
// Extract the archive
log.Printf("%s: extracting archive\n", container.App.Name)
// Call tar xf archive.tar.zst -C /volume
cmd := exec.Command("tar", "-xf", archivePath, "-C", volumePath)
err = cmd.Run()
if err != nil {
log.Printf("%s: failed to extract archive: %v", container.App.Name, err)
return err
}
// Remove archive
log.Printf("%s: removing archive\n", container.App.Name)
err = os.Remove(volumePath + "/archive.tar.zst")
if err != nil {
return fmt.Errorf("failed to remove archive: %v", err)
}
log.Printf("%s: volume preparing done\n", container.App.Name)
return nil
}
// Create creates a single app in the system
func (p *Processor) Create(appTemplate apps.App) error {
if appTemplate.EnvRaw == nil {
appTemplate.EnvRaw = make(map[string]interface{})
}
err := p.Register(appTemplate)
if err != nil {
return err
@ -204,6 +284,17 @@ func (p *Processor) Create(appTemplate apps.App) error {
AppsPath: p.AppsPath,
}
if len(appTemplate.Snapshot) > 0 && len(appTemplate.Setup.ArchiveURL) > 0 {
return fmt.Errorf("snapshot and archive_url cannot be used together")
}
if len(appTemplate.Setup.ArchiveURL) > 0 {
err = p.volumeFromURL(appTemplate.Setup.ArchiveURL, &container)
if err != nil {
return fmt.Errorf("failed to prepare volume: %v", err)
}
}
err = container.Create()
if err != nil {
return err
@ -216,12 +307,62 @@ func (p *Processor) Create(appTemplate apps.App) error {
// Restore the data
err = p.SnapshotProcessor.RestoreSnapshot(appTemplate.Snapshot, appTemplate.Name)
if err != nil {
return err
return fmt.Errorf("failed to restore snapshot: %v", err)
}
}
err = container.Start()
return err
if err != nil {
return fmt.Errorf("failed to start container: %v", err)
}
// Wait for the app to be created
err = p.waitForApp()
if err != nil {
return fmt.Errorf("failed to wait for app: %v", err)
}
time.Sleep(5 * time.Second) // We wait for a little bit longer to make sure the container is fully started
// Setup SSH keys if it's noted in the request
log.Println("Checking if SSH key is required")
if len(appTemplate.Setup.SSHKeys) > 0 && len(appTemplate.Snapshot) == 0 {
log.Println("Setting up SSH keys")
err = p.UpdateKeys(appTemplate.Setup.SSHKeys)
if err != nil {
return fmt.Errorf("failed to update keys: %v", err)
}
}
// Setup technology if it's noted in the request
if len(appTemplate.Setup.Tech) > 0 && len(appTemplate.Snapshot) == 0 {
err = p.EnableTech(appTemplate.Setup.Tech, appTemplate.Setup.TechVersion)
if err != nil {
return fmt.Errorf("failed to enable tech: %v", err)
}
}
// Set password if it's noted in the request
if len(appTemplate.Setup.Password) > 0 && len(appTemplate.Snapshot) == 0 {
err = p.SetPassword(appTemplate.Setup.Password)
if err != nil {
return fmt.Errorf("failed to set password: %v", err)
}
}
// Changes port of the app hosted inside the container
if appTemplate.AppPort != 0 && len(appTemplate.Snapshot) == 0 {
err = container.SetAppPort(appTemplate.AppPort)
if err != nil {
return fmt.Errorf("failed to change app port to %d: %v", appTemplate.AppPort, err)
}
err = container.RestartProcess("nginx")
if err != nil {
return fmt.Errorf("failed to restart nginx: %v", err)
}
}
return nil
}
// Register registers app without creating a container for it
@ -241,8 +382,12 @@ func (p *Processor) Register(appTemplate apps.App) error {
// Update updates application
func (p *Processor) Update(appTemplate apps.App) error {
if appTemplate.EnvRaw == nil {
appTemplate.EnvRaw = make(map[string]interface{})
}
processor := p.getAppProcessor()
app, err := processor.Update(appTemplate.Name, appTemplate.SSHPort, appTemplate.HTTPPort, appTemplate.Image, appTemplate.CPU, appTemplate.Memory)
app, err := processor.Update(appTemplate.Name, appTemplate.SSHPort, appTemplate.HTTPPort, appTemplate.Image, appTemplate.CPU, appTemplate.Memory, appTemplate.GetEnv())
if err != nil {
if validationError, ok := err.(apps.ValidationError); ok {
return fmt.Errorf("validation error: %v", validationError.Error())
@ -277,6 +422,25 @@ func (p *Processor) Update(appTemplate apps.App) error {
return err
}
// Setup technology if it's noted in the request
if len(appTemplate.Setup.Tech) > 0 {
err := p.waitForApp()
if err != nil {
return err
}
err = p.EnableTech(appTemplate.Setup.Tech, appTemplate.Setup.TechVersion)
if err != nil {
return fmt.Errorf("failed to enable tech: %v", err)
}
// We restart the container so everything can use the new tech
err = container.Restart()
if err != nil {
return err
}
}
return nil
}
@ -286,6 +450,7 @@ func (p *Processor) Delete() error {
container, err := p.getContainer()
if err != nil {
log.Println("ERROR: delete app:", err.Error())
return err
}
status, err := container.Status()
@ -293,7 +458,7 @@ func (p *Processor) Delete() error {
return err
}
if status != "no-container" {
if status.Status != "no-container" {
// We stop the container first
err = container.Stop()
if err != nil {
@ -318,6 +483,9 @@ func (p *Processor) Delete() error {
// Stop stops app
func (p *Processor) Stop() error {
container, err := p.getContainer()
if err != nil {
return err
}
status, err := container.Status()
if err != nil {
@ -325,11 +493,16 @@ func (p *Processor) Stop() error {
}
// Stop the container only when it exists
if status != "no-container" {
if status.Status != "no-container" {
err = container.Stop()
if err != nil {
return err
}
err = container.Destroy()
if err != nil {
return err
}
}
return nil
@ -346,7 +519,7 @@ func (p *Processor) Start() error {
if err != nil {
return err
}
if status == "no-container" {
if status.Status == "no-container" {
err = container.Create()
if err != nil {
return err
@ -389,7 +562,8 @@ func (p *Processor) UpdateKeys(keys string) error {
return err
}
err = container.SetFileContent(sshPubKeysLocation, keys+"\n", "0600")
log.Println("Storing keys into " + sshPubKeysLocation)
err = container.AppendFile(sshPubKeysLocation, keys+"\n", "0600")
if err != nil {
return err
}
@ -417,6 +591,105 @@ func (p *Processor) SetPassword(password string) error {
return nil
}
// Generate SSH key and adds it into authorized_keys
// These pair of keys is used for deployment.
// Returns private key, pubkey and error.
// Keys are returned every time even if it was already generated
func (p *Processor) GenerateDeploySSHKeys() (string, string, error) {
container, err := p.getContainer()
if err != nil {
return "", "", err
}
// If the container is not running we skip this code
status, err := container.Status()
if err != nil {
return "", "", err
}
if status.Status != "running" {
return "", "", nil
}
created, err := container.GenerateDeploySSHKeys()
if err != nil {
return "", "", err
}
privateKey, pubKey, err := container.GetDeploySSHKeys()
if err != nil {
return "", "", err
}
if created {
err = container.AppendFile(sshPubKeysLocation, pubKey+"\n", "0600")
if err != nil {
return "", "", err
}
}
return privateKey, pubKey, nil
}
// Return SSH host key without hostname (first part of the line)
func (p *Processor) GetHostKey() (string, error) {
container, err := p.getContainer()
if err != nil {
return "", err
}
// If the container is not running we skip this code
status, err := container.Status()
if err != nil {
return "", err
}
if status.Status != "running" {
return "", nil
}
hostKey, err := container.GetHostKey()
if err != nil {
return "", err
}
return hostKey, nil
}
// Save meta data about app into a file
func (p *Processor) SaveMetadata(metadata string) error {
container, err := p.getContainer()
if err != nil {
return err
}
volumePath := container.VolumeHostPath()
f, err := os.Create(path.Join(volumePath, ".metadata.json"))
if err != nil {
return err
}
defer f.Close()
_, err = f.Write([]byte(metadata))
if err != nil {
return err
}
// Set permissions
err = os.Chmod(path.Join(volumePath, ".metadata.json"), 0600)
if err != nil {
return err
}
// Set owner
err = os.Chown(path.Join(volumePath, ".metadata.json"), ownerUID, ownerGID)
if err != nil {
return err
}
return nil
}
// Processes returns list of supervisord processes
func (p *Processor) Processes() ([]docker.Process, error) {
container, err := p.getContainer()
@ -439,6 +712,8 @@ func (p *Processor) EnableTech(service, version string) error {
return err
}
time.Sleep(ENABLE_TECH_WAIT * time.Second)
container, err := p.getContainer()
if err != nil {
return err
@ -460,7 +735,7 @@ func (p *Processor) Rebuild() error {
}
err = container.Destroy()
if err != nil {
if err != nil && !strings.Contains(err.Error(), "no container found") {
return err
}
@ -526,7 +801,7 @@ func (p *Processor) RestoreFromSnapshot(snapshotName string) error {
}
// Stop the container only when it exists
if status != "no-container" {
if status.Status != "no-container" {
err = container.Stop()
if err != nil {
return err
@ -544,7 +819,7 @@ func (p *Processor) RestoreFromSnapshot(snapshotName string) error {
if err != nil {
return err
}
if status == "no-container" {
if status.Status == "no-container" {
err = container.Create()
if err != nil {
return err
@ -657,3 +932,18 @@ func (p *Processor) DeleteAppSnapshots() error {
return nil
}
// Returns active technology in the app
func (p *Processor) GetActiveTech() (*containers.TechInfo, error) {
container, err := p.getContainer()
if err != nil {
return nil, err
}
tech, err := container.GetActiveTech()
if err != nil {
return tech, err
}
return tech, nil
}

View File

@ -84,7 +84,7 @@ func TestProcessorGet(t *testing.T) {
err := processor.Create(testAppTemplate)
assert.Nil(t, err)
app, err := processor.Get()
app, err := processor.Get(false)
assert.Nil(t, err)
assert.Equal(t, "running", app.State)
@ -96,7 +96,7 @@ func TestProcessorRegister(t *testing.T) {
err := processor.Register(testAppTemplate)
assert.Nil(t, err)
app, err := processor.Get()
app, err := processor.Get(false)
assert.Nil(t, err)
assert.Equal(t, "no-container", app.State)
@ -108,7 +108,7 @@ func TestProcessorUpdate(t *testing.T) {
err := processor.Create(testAppTemplate)
assert.Nil(t, err)
app, err := processor.Get()
app, err := processor.Get(false)
assert.Nil(t, err)
assert.Equal(t, "running", app.State)
@ -119,7 +119,7 @@ func TestProcessorUpdate(t *testing.T) {
assert.Nil(t, err)
assert.Equal(t, "running", app.State)
app, err = processor.Get()
app, err = processor.Get(false)
assert.Nil(t, err)
assert.Equal(t, 1024, app.Memory)
@ -141,7 +141,7 @@ func TestProcessorStop(t *testing.T) {
err = processor.Stop()
assert.Nil(t, err)
app, err := processor.Get()
app, err := processor.Get(false)
assert.Nil(t, err)
assert.Equal(t, "exited", app.State)
@ -156,14 +156,14 @@ func TestProcessorStart(t *testing.T) {
err = processor.Stop()
assert.Nil(t, err)
app, err := processor.Get()
app, err := processor.Get(false)
assert.Nil(t, err)
assert.Equal(t, "exited", app.State)
err = processor.Start()
assert.Nil(t, err)
app, err = processor.Get()
app, err = processor.Get(false)
assert.Nil(t, err)
assert.Equal(t, "running", app.State)

View File

@ -3,6 +3,7 @@ package glue
import (
"log"
"github.com/getsentry/sentry-go"
"github.com/jinzhu/gorm"
"github.com/rosti-cz/node-api/apps"
docker "github.com/rosti-cz/node-api/containers"
@ -52,6 +53,7 @@ func (s *StatsProcessor) UpdateUsage(name string) error {
err = processor.UpdateResources(
name,
state.State,
state.OOMKilled,
state.CPUUsage,
state.MemoryUsage,
state.DiskUsageBytes,
@ -84,7 +86,8 @@ func (s *StatsProcessor) UpdateState(name string) error {
err = processor.UpdateState(
app.Name,
state,
state.Status,
state.OOMKilled,
)
return err
}
@ -100,6 +103,7 @@ func (s *StatsProcessor) GatherStats() error {
for _, app := range appList {
err := s.UpdateUsage(app.Name)
if err != nil {
sentry.CaptureException(err)
log.Println("STATS ERROR:", err.Error())
}
}
@ -118,6 +122,7 @@ func (s *StatsProcessor) GatherStates() error {
for _, app := range appList {
err := s.UpdateState(app.Name)
if err != nil {
sentry.CaptureException(err)
log.Println("STATE ERROR:", err.Error())
}
}

View File

@ -2,6 +2,9 @@ package glue
import "github.com/rosti-cz/node-api/apps"
const ownerUID = 1000
const ownerGID = 1000
// Path where authorized keys are
const sshPubKeysLocation = "/srv/.ssh/authorized_keys"

34
go.mod
View File

@ -3,31 +3,31 @@ module github.com/rosti-cz/node-api
go 1.14
require (
github.com/Microsoft/go-winio v0.4.18 // indirect
github.com/StackExchange/wmi v0.0.0-20210224194228-fe8f1750fd46 // indirect
github.com/containerd/containerd v1.5.9 // indirect
github.com/docker/docker v20.10.12+incompatible
github.com/Microsoft/go-winio v0.6.1 // indirect
github.com/StackExchange/wmi v1.2.1 // indirect
github.com/containerd/log v0.1.0 // indirect
github.com/distribution/reference v0.5.0 // indirect
github.com/docker/docker v25.0.3+incompatible
github.com/docker/go-connections v0.4.0
github.com/go-ole/go-ole v1.2.5 // indirect
github.com/docker/go-units v0.5.0 // indirect
github.com/getsentry/sentry-go v0.26.0
github.com/gobuffalo/packr v1.30.1
github.com/golang/protobuf v1.5.2 // indirect
github.com/gorilla/mux v1.8.0 // indirect
github.com/gogo/protobuf v1.3.2 // indirect
github.com/jinzhu/gorm v1.9.14
github.com/jinzhu/now v1.1.4 // indirect
github.com/kelseyhightower/envconfig v1.4.0
github.com/labstack/echo v3.3.10+incompatible
github.com/labstack/gommon v0.3.0 // indirect
github.com/mholt/archiver/v3 v3.5.0
github.com/labstack/echo/v4 v4.10.0
github.com/minio/minio-go/v7 v7.0.14
github.com/moby/term v0.0.0-20210619224110-3f7ff695adc6 // indirect
github.com/moby/term v0.5.0 // indirect
github.com/morikuni/aec v1.0.0 // indirect
github.com/nats-io/nats-server/v2 v2.6.1 // indirect
github.com/nats-io/nats.go v1.12.3
github.com/nats-io/nats.go v1.23.0
github.com/opencontainers/image-spec v1.0.2
github.com/pkg/errors v0.9.1
github.com/satori/go.uuid v1.2.0
github.com/shirou/gopsutil v2.20.6+incompatible
github.com/stretchr/testify v1.7.0
google.golang.org/grpc v1.44.0 // indirect
gotest.tools/v3 v3.1.0 // indirect
github.com/stretchr/testify v1.8.4
go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp v0.49.0 // indirect
go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracehttp v1.24.0 // indirect
gorm.io/driver/mysql v1.4.7
gorm.io/gorm v1.25.2-0.20230530020048-26663ab9bf55
gotest.tools/v3 v3.5.1 // indirect
)

2841
go.sum

File diff suppressed because it is too large Load Diff

View File

@ -2,13 +2,14 @@ package main
import (
"fmt"
"io"
"io/ioutil"
"log"
"net/http"
"os"
"strings"
"github.com/labstack/echo"
"github.com/labstack/echo/v4"
"github.com/rosti-cz/node-api/apps"
"github.com/rosti-cz/node-api/common"
"github.com/rosti-cz/node-api/glue"
@ -29,7 +30,7 @@ func listAppsHandler(c echo.Context) error {
AppsPath: config.AppsPath,
}
applications, err := processor.List()
applications, err := processor.List(false)
if err != nil {
return c.JSONPretty(http.StatusInternalServerError, Message{Message: err.Error()}, JSONIndent)
}
@ -40,6 +41,7 @@ func listAppsHandler(c echo.Context) error {
// Returns one app
func getAppHandler(c echo.Context) error {
name := c.Param("name")
fast := c.Param("fast")
processor := glue.Processor{
AppName: name,
@ -49,7 +51,7 @@ func getAppHandler(c echo.Context) error {
BindIPSSH: config.AppsBindIPSSH,
AppsPath: config.AppsPath,
}
app, err := processor.Get()
app, err := processor.Get(fast == "1")
if err != nil {
return c.JSONPretty(http.StatusInternalServerError, Message{Message: err.Error()}, JSONIndent)
}
@ -361,6 +363,33 @@ func getOrphansHander(c echo.Context) error {
return c.JSON(http.StatusOK, []string{})
}
// Save metadata for the app
func saveMetadataHandler(c echo.Context) error {
name := c.Param("name")
processor := glue.Processor{
AppName: name,
DB: common.GetDBConnection(),
SnapshotProcessor: &snapshotProcessor,
DockerSock: config.DockerSocket,
BindIPHTTP: config.AppsBindIPHTTP,
BindIPSSH: config.AppsBindIPSSH,
AppsPath: config.AppsPath,
}
body, err := io.ReadAll(c.Request().Body)
if err != nil {
return fmt.Errorf("error reading request body: %v", err)
}
err = processor.SaveMetadata(string(body))
if err != nil {
return fmt.Errorf("error while save metadata: %v", err.Error())
}
return nil
}
// Return info about the node including performance index
func getNodeInfoHandler(c echo.Context) error {
processor := glue.Processor{
@ -430,7 +459,11 @@ func metricsHandler(c echo.Context) error {
metrics += fmt.Sprintf("rosti_node_memory_index{hostname=\"%s\"} %f\n", hostname, node.MemoryIndex)
metrics += fmt.Sprintf("rosti_node_sold_memory{hostname=\"%s\"} %d\n", hostname, node.SoldMemory)
apps, err := processor.List()
if elapsedMetric != -1 {
metrics += fmt.Sprintf("rosti_node_stats_time_elapsed{hostname=\"%s\"} %f\n", hostname, float64(elapsedMetric)/1000000000)
}
apps, err := processor.List(true)
if err != nil {
return c.JSONPretty(http.StatusInternalServerError, Message{Message: err.Error()}, JSONIndent)
}

View File

@ -18,6 +18,7 @@ import (
"regexp"
"strings"
"github.com/getsentry/sentry-go"
"github.com/nats-io/nats.go"
"github.com/pkg/errors"
"github.com/rosti-cz/node-api/apps"
@ -34,6 +35,7 @@ func _messageHandler(m *nats.Msg) error {
message := RequestMessage{}
err := json.Unmarshal(m.Data, &message)
if err != nil {
sentry.CaptureException(err)
log.Println(errors.Wrap(err, "invalid JSON data in the incoming message"))
return err
}
@ -42,6 +44,7 @@ func _messageHandler(m *nats.Msg) error {
eventHandlerMap := map[string](func(m *nats.Msg, message *RequestMessage) error){
"list": listEventHandler,
"get": getEventHandler,
"fast_get": fastGetEventHandler, // same as get but without status update
"create": createEventHandler,
"register": registerEventHandler,
"update": updateEventHandler,
@ -49,6 +52,9 @@ func _messageHandler(m *nats.Msg) error {
"stop": stopEventHandler,
"start": startEventHandler,
"restart": restartEventHandler,
"get_deploy_ssh_keys": getDeploySSHKeysEventHandler,
"get_ssh_host_key": getSSHHostKeyEventHandler,
"get_active_tech": getActiveTechHandler,
"update_keys": updateKeysEventHandler,
"set_password": setPasswordEventHandler,
"processes": processesEventHandler,
@ -57,6 +63,7 @@ func _messageHandler(m *nats.Msg) error {
"add_label": addLabelEventHandler,
"remove_label": removeLabelEventHandler,
"list_orphans": listOrphansEventHandler,
"save_metadata": saveMetadataEventHandler,
"node": getNodeEventHandler,
"create_snapshot": createSnapshotEventHandler,
"restore_from_snapshot": restoreFromSnapshotEventHandler,
@ -70,7 +77,12 @@ func _messageHandler(m *nats.Msg) error {
}
if eventHandler, ok := eventHandlerMap[message.Type]; ok {
return eventHandler(m, &message)
err = eventHandler(m, &message)
if err != nil {
sentry.CaptureException(err)
}
return err
} else {
log.Println("ERROR: event handler not defined for " + message.Type)
}
@ -101,7 +113,7 @@ func listEventHandler(m *nats.Msg, message *RequestMessage) error {
AppsPath: config.AppsPath,
}
applications, err := processor.List()
applications, err := processor.List(false)
if err != nil {
return errorReplyFormater(m, "backend error", err)
}
@ -134,7 +146,43 @@ func getEventHandler(m *nats.Msg, message *RequestMessage) error {
AppsPath: config.AppsPath,
}
app, err := processor.Get()
app, err := processor.Get(false)
if err != nil {
log.Printf("backend error: %v\n", err)
return errorReplyFormater(m, "backend error", err)
}
// Assembling reply message
reply := ReplyMessage{
AppName: app.Name,
Payload: app,
}
data, err := json.Marshal(reply)
if err != nil {
return errorReplyFormater(m, "reply formatter error", err)
}
err = m.Respond(data)
if err != nil {
log.Println("ERROR: get app:", err.Error())
}
return err
}
// Returns one app fast which means with no immediate status update
func fastGetEventHandler(m *nats.Msg, message *RequestMessage) error {
processor := glue.Processor{
AppName: message.AppName,
DB: common.GetDBConnection(),
DockerSock: config.DockerSocket,
BindIPHTTP: config.AppsBindIPHTTP,
BindIPSSH: config.AppsBindIPSSH,
AppsPath: config.AppsPath,
}
app, err := processor.Get(true)
if err != nil {
log.Printf("backend error: %v\n", err)
return errorReplyFormater(m, "backend error", err)
@ -215,7 +263,7 @@ func registerEventHandler(m *nats.Msg, message *RequestMessage) error {
BindIPSSH: config.AppsBindIPSSH,
AppsPath: config.AppsPath,
}
err = processor.Create(appTemplate)
err = processor.Register(appTemplate)
if err != nil && strings.Contains(err.Error(), "validation error") {
publish(message.AppName, "validation error", true)
return err
@ -241,6 +289,11 @@ func updateEventHandler(m *nats.Msg, message *RequestMessage) error {
return err
}
// This is small inconsistency because the app name is not coming from the admin which this line fixes.
// TODO: We should probably somehow fixed this for this and all other endpoints too. Message app name and payload
// TODO: app name have to always the same.
appTemplate.Name = message.AppName
processor := glue.Processor{
AppName: message.AppName,
DB: common.GetDBConnection(),
@ -356,6 +409,85 @@ func restartEventHandler(m *nats.Msg, message *RequestMessage) error {
return nil
}
func getDeploySSHKeysEventHandler(m *nats.Msg, message *RequestMessage) error {
processor := glue.Processor{
AppName: message.AppName,
DB: common.GetDBConnection(),
SnapshotProcessor: &snapshotProcessor,
DockerSock: config.DockerSocket,
BindIPHTTP: config.AppsBindIPHTTP,
BindIPSSH: config.AppsBindIPSSH,
AppsPath: config.AppsPath,
}
privateKey, pubKey, err := processor.GenerateDeploySSHKeys()
if err != nil {
log.Printf("backend error: %v\n", err)
return errorReplyFormater(m, "backend error", err)
}
// Assembling reply message
reply := ReplyMessage{
Payload: struct {
PrivateKey string `json:"private_key"`
PublicKey string `json:"public_key"`
}{
PrivateKey: privateKey,
PublicKey: pubKey,
},
}
data, err := json.Marshal(reply)
if err != nil {
return errorReplyFormater(m, "reply formatter error", err)
}
err = m.Respond(data)
if err != nil {
log.Println("ERROR: get app:", err.Error())
return err
}
return nil
}
// Returns SSH host key
func getSSHHostKeyEventHandler(m *nats.Msg, message *RequestMessage) error {
processor := glue.Processor{
AppName: message.AppName,
DB: common.GetDBConnection(),
SnapshotProcessor: &snapshotProcessor,
DockerSock: config.DockerSocket,
BindIPHTTP: config.AppsBindIPHTTP,
BindIPSSH: config.AppsBindIPSSH,
AppsPath: config.AppsPath,
}
hostKey, err := processor.GetHostKey()
if err != nil {
log.Printf("backend error: %v\n", err)
return errorReplyFormater(m, "backend error", err)
}
// Assembling reply message
reply := ReplyMessage{
Payload: hostKey,
}
data, err := json.Marshal(reply)
if err != nil {
return errorReplyFormater(m, "reply formatter error", err)
}
err = m.Respond(data)
if err != nil {
log.Println("ERROR: get app:", err.Error())
return err
}
return nil
}
// Copies body of the request into /srv/.ssh/authorized_keys
func updateKeysEventHandler(m *nats.Msg, message *RequestMessage) error {
body := message.Payload
@ -588,10 +720,28 @@ func listOrphansEventHandler(m *nats.Msg, message *RequestMessage) error {
return nil
}
// Save metadata for the app
func saveMetadataEventHandler(m *nats.Msg, message *RequestMessage) error {
processor := glue.Processor{
AppName: message.AppName,
DB: common.GetDBConnection(),
SnapshotProcessor: &snapshotProcessor,
DockerSock: config.DockerSocket,
BindIPHTTP: config.AppsBindIPHTTP,
BindIPSSH: config.AppsBindIPSSH,
AppsPath: config.AppsPath,
}
err := processor.SaveMetadata(message.Payload)
if err != nil {
return fmt.Errorf("error while save metadata: %v", err.Error())
}
return nil
}
/*
getNodeEventHandler returns info about the node including performance index
*/
func getNodeEventHandler(m *nats.Msg, message *RequestMessage) error {
processor := glue.Processor{
@ -635,7 +785,7 @@ func getNodeEventHandler(m *nats.Msg, message *RequestMessage) error {
/*
createSnapshotEventHandler create snapshot of given application
Uses appName from the message struct
# Uses appName from the message struct
Payload: no payload needed
Response: notification when it's done or error
@ -701,7 +851,7 @@ func restoreFromSnapshotEventHandler(m *nats.Msg, message *RequestMessage) error
/*
listSnapshotsEventHandler returns list of snapshots related to a single application
Uses appName from the message
# Uses appName from the message
Payload: no payload needed
Response: replies with list of snapshots or an error message
@ -913,7 +1063,7 @@ func deleteSnapshotEventHandler(m *nats.Msg, message *RequestMessage) error {
/*
deleteAppSnapshotsEventHandler deletes all snapshots related to a single application
Uses appName from the message struct
# Uses appName from the message struct
Payload: no payload needed
Response: notification when it's done or error
@ -938,3 +1088,39 @@ func deleteAppSnapshotsEventHandler(m *nats.Msg, message *RequestMessage) error
return nil
}
func getActiveTechHandler(m *nats.Msg, message *RequestMessage) error {
processor := glue.Processor{
AppName: message.AppName,
DB: common.GetDBConnection(),
SnapshotProcessor: &snapshotProcessor,
DockerSock: config.DockerSocket,
BindIPHTTP: config.AppsBindIPHTTP,
BindIPSSH: config.AppsBindIPSSH,
AppsPath: config.AppsPath,
}
tech, err := processor.GetActiveTech()
if err != nil {
log.Printf("backend error: %v\n", err)
return errorReplyFormater(m, "backend error", err)
}
// Assembling reply message
reply := ReplyMessage{
Payload: tech,
}
data, err := json.Marshal(reply)
if err != nil {
return errorReplyFormater(m, "reply formatter error", err)
}
err = m.Respond(data)
if err != nil {
log.Println("ERROR: get app:", err.Error())
return err
}
return nil
}

100
jsonmap/jsonmap.go Normal file
View File

@ -0,0 +1,100 @@
package jsonmap
import (
"bytes"
"context"
"database/sql/driver"
"encoding/json"
"errors"
"fmt"
"strings"
"gorm.io/driver/mysql"
"gorm.io/gorm"
"gorm.io/gorm/clause"
"gorm.io/gorm/schema"
)
// JSONMap defined JSON data type, need to implements driver.Valuer, sql.Scanner interface
type JSONMap map[string]interface{}
// Value return json value, implement driver.Valuer interface
func (m JSONMap) Value() (driver.Value, error) {
if m == nil {
return nil, nil
}
ba, err := m.MarshalJSON()
return string(ba), err
}
// Scan scan value into Jsonb, implements sql.Scanner interface
func (m *JSONMap) Scan(val interface{}) error {
if val == nil {
*m = make(JSONMap)
return nil
}
var ba []byte
switch v := val.(type) {
case []byte:
ba = v
case string:
ba = []byte(v)
default:
return errors.New(fmt.Sprint("Failed to unmarshal JSONB value:", val))
}
t := map[string]interface{}{}
rd := bytes.NewReader(ba)
decoder := json.NewDecoder(rd)
decoder.UseNumber()
err := decoder.Decode(&t)
*m = t
return err
}
// MarshalJSON to output non base64 encoded []byte
func (m JSONMap) MarshalJSON() ([]byte, error) {
if m == nil {
return []byte("null"), nil
}
t := (map[string]interface{})(m)
return json.Marshal(t)
}
// UnmarshalJSON to deserialize []byte
func (m *JSONMap) UnmarshalJSON(b []byte) error {
t := map[string]interface{}{}
err := json.Unmarshal(b, &t)
*m = JSONMap(t)
return err
}
// GormDataType gorm common data type
func (m JSONMap) GormDataType() string {
return "jsonmap"
}
// GormDBDataType gorm db data type
func (JSONMap) GormDBDataType(db *gorm.DB, field *schema.Field) string {
switch db.Dialector.Name() {
case "sqlite3":
return "JSON"
case "mysql":
return "JSON"
case "postgres":
return "JSONB"
case "sqlserver":
return "NVARCHAR(MAX)"
}
return ""
}
func (jm JSONMap) GormValue(ctx context.Context, db *gorm.DB) clause.Expr {
data, _ := jm.MarshalJSON()
switch db.Dialector.Name() {
case "mysql":
if v, ok := db.Dialector.(*mysql.Dialector); ok && !strings.Contains(v.ServerVersion, "MariaDB") {
return gorm.Expr("CAST(? AS JSON)", string(data))
}
}
return gorm.Expr("?", string(data))
}

27
main.go
View File

@ -5,7 +5,9 @@ import (
"log"
"time"
"github.com/labstack/echo"
"github.com/getsentry/sentry-go"
sentryecho "github.com/getsentry/sentry-go/echo"
"github.com/labstack/echo/v4"
"github.com/nats-io/nats.go"
"github.com/rosti-cz/node-api/apps"
"github.com/rosti-cz/node-api/apps/drivers"
@ -22,15 +24,26 @@ var nc *nats.Conn
var snapshotProcessor apps.SnapshotProcessor
var nodeProcessor node.Processor
var elapsedMetric int // time elapsed while loading stats about apps
func _init() {
var err error
// Load config from environment variables
config = *common.GetConfig()
// Sentry
sentry.Init(sentry.ClientOptions{
Dsn: config.SentryDSN,
AttachStacktrace: true,
Environment: config.SentryENV,
TracesSampleRate: 0.1,
})
// Connect to the NATS service
nc, err = nats.Connect(config.NATSURL)
if err != nil {
sentry.CaptureException(err)
log.Fatalln(err)
}
@ -58,11 +71,16 @@ func _init() {
nodeProcessor = node.Processor{
DB: common.GetDBConnection(),
}
nodeProcessor.Init()
// Reset elapsed stats
elapsedMetric = -1
}
func main() {
_init()
defer nc.Drain()
defer sentry.Flush(time.Second * 10)
// Close database at the end
db := common.GetDBConnection()
@ -86,9 +104,11 @@ func main() {
start := time.Now()
err := statsProcessor.GatherStats()
if err != nil {
sentry.CaptureException(err)
log.Println("LOOP ERROR:", err.Error())
}
elapsed := time.Since(start)
elapsedMetric = int(elapsed)
log.Printf("Stats gathering elapsed time: %.2fs\n", elapsed.Seconds())
time.Sleep(300 * time.Second)
}
@ -99,6 +119,7 @@ func main() {
for {
err := nodeProcessor.Log()
if err != nil {
sentry.CaptureException(err)
log.Println("NODE PERFORMANCE LOG ERROR:", err.Error())
}
time.Sleep(5 * time.Minute)
@ -110,6 +131,7 @@ func main() {
e.Renderer = t
e.Use(TokenMiddleware)
e.Use(sentryecho.New(sentryecho.Options{}))
// NATS handling
// admin.apps.ALIAS.events
@ -161,6 +183,9 @@ func main() {
// Rebuilds existing app, it keeps the data but creates the container again
e.PUT("/v1/apps/:name/rebuild", rebuildAppHandler)
// Save metadata about app
e.POST("/v1/apps/:name/metadata", saveMetadataHandler)
// Adds new label
e.POST("/v1/apps/:name/labels", addLabelHandler)

View File

@ -5,7 +5,7 @@ import (
"io"
"github.com/gobuffalo/packr"
"github.com/labstack/echo"
"github.com/labstack/echo/v4"
)
// Template struct