node-api/docker/docker.go

306 lines
6.8 KiB
Go

package docker
import (
"context"
"errors"
"io"
"io/ioutil"
"log"
"os"
"strconv"
"strings"
"time"
"github.com/docker/docker/api/types"
"github.com/docker/docker/api/types/container"
"github.com/docker/docker/api/types/network"
dockerClient "github.com/docker/docker/client"
"github.com/docker/go-connections/nat"
)
// Docker timeout
const dockerTimeout = 10
// DOCKER_SOCK tells where to connect to docker, it will be always local sock
const dockerSock = "unix:///var/run/docker.sock"
// DOCKER_API_VERSION set API version of Docker, 1.40 belongs to Docker 19.03.11
const dockerAPIVersion = "1.40"
// Driver keeps everything for connection to Docker
type Driver struct{}
func (d *Driver) getClient() (*dockerClient.Client, error) {
cli, err := dockerClient.NewClient(dockerSock, dockerAPIVersion, nil, nil)
return cli, err
}
// ConnectionStatus checks connection to the Docker daemon
func (d *Driver) ConnectionStatus() (bool, error) {
cli, err := d.getClient()
if err != nil {
return false, err
}
_, err = cli.ServerVersion(context.TODO())
if err != nil {
return false, err
}
return true, nil
}
func (d *Driver) nameToID(name string) (string, error) {
containerIDs, err := d.IsExist(name)
if err != nil {
return "", err
}
if len(containerIDs) == 0 {
return "", errors.New("no container found")
}
if len(containerIDs) > 1 {
return "", errors.New("multiple containers with the same name")
}
return containerIDs[0], nil
}
// Status return current status of container with given name
func (d *Driver) Status(name string) (string, error) {
status := "unknown"
cli, err := d.getClient()
if err != nil {
return status, err
}
containerID, err := d.nameToID(name)
if err != nil {
status = "no-container"
return status, err
}
info, err := cli.ContainerInspect(context.TODO(), containerID)
if err != nil {
return status, err
}
if info.State.Running {
status = "running"
} else {
status = "stopped"
}
return status, nil
}
// Stats returns current CPU and memory usage
func (d *Driver) Stats(name string) (float64, int, error) {
cli, err := d.getClient()
if err != nil {
return 0.0, 0, err
}
containerID, err := d.nameToID(name)
if err != nil {
return 0.0, 0, err
}
stats, err := cli.ContainerStats(context.TODO(), containerID, false)
if err != nil {
return 0.0, 0, nil
}
data, err := ioutil.ReadAll(stats.Body)
if err != nil {
return 0.0, 0, err
}
log.Println(data)
return 0.0, 0, nil
}
// Remove removes container represented by containerID
func (d *Driver) Remove(name string) error {
log.Println("Removing container " + name)
cli, err := d.getClient()
if err != nil {
return err
}
containerID, err := d.nameToID(name)
if err != nil {
return err
}
timeout := time.Duration(dockerTimeout * time.Second)
err = cli.ContainerStop(context.TODO(), containerID, &timeout)
if err != nil {
return err
}
err = cli.ContainerRemove(context.TODO(), containerID, types.ContainerRemoveOptions{})
return err
}
// Start starts container represented by containerID
func (d *Driver) Start(name string) error {
log.Println("Starting container " + name)
cli, err := d.getClient()
if err != nil {
return err
}
containerID, err := d.nameToID(name)
if err != nil {
return err
}
err = cli.ContainerStart(context.TODO(), containerID, types.ContainerStartOptions{})
return err
}
// Stop stops container represented by containerID
func (d *Driver) Stop(name string) error {
log.Println("Stopping container " + name)
cli, err := d.getClient()
if err != nil {
return err
}
containerID, err := d.nameToID(name)
if err != nil {
return err
}
timeout := time.Duration(dockerTimeout * time.Second)
err = cli.ContainerStop(context.TODO(), containerID, &timeout)
return err
}
// IsExist checks existence of the container based on container name
// Returns container IDs in case of existence. Otherwise
// empty slice.
func (d *Driver) IsExist(name string) ([]string, error) {
var containerIDs = make([]string, 0)
cli, err := d.getClient()
if err != nil {
return containerIDs, err
}
containers, err := cli.ContainerList(context.TODO(), types.ContainerListOptions{All: true})
if err != nil {
return containerIDs, err
}
// We go through the containers and pick the ones which match the task name
for _, containerObject := range containers {
for _, containerName := range containerObject.Names {
containerName = strings.TrimLeft(containerName, "/")
if containerName == name {
containerIDs = append(containerIDs, containerObject.ID)
}
}
}
return containerIDs, nil
}
// pullImage pulls image into local docker instance
func (d *Driver) pullImage(image string) error {
log.Println("Pulling image " + image)
cli, err := d.getClient()
if err != nil {
return err
}
stream, err := cli.ImagePull(context.TODO(), image, types.ImagePullOptions{})
if err != nil {
return err
}
defer stream.Close()
io.Copy(os.Stdout, stream)
return nil
}
// Create creates the container
// image - docker image
// cmd - string slice of command and its arguments
// volumePath - host's directory to mount into the container
// returns container ID
func (d *Driver) Create(name string, image string, volumePath string, HTTPPort int, SSHPort int, CPU int, memory int, cmd []string) (string, error) {
log.Println("Creating container " + name)
cli, err := d.getClient()
if err != nil {
return "", err
}
err = d.pullImage(image)
if err != nil {
return "", err
}
portmaps := make(nat.PortMap, 1)
portbindingsHTTP := make([]nat.PortBinding, 1)
portbindingsHTTP[0] = nat.PortBinding{
HostPort: strconv.Itoa(HTTPPort) + "/tcp",
}
portmaps["8000/tcp"] = portbindingsHTTP
if SSHPort != 0 {
portbindingsSSH := make([]nat.PortBinding, 1)
portbindingsSSH[0] = nat.PortBinding{
HostPort: strconv.Itoa(SSHPort) + "/tcp",
}
portmaps["22/tcp"] = portbindingsSSH
}
createdContainer, err := cli.ContainerCreate(
context.TODO(),
&container.Config{
Hostname: name,
Env: []string{},
Image: image,
Cmd: cmd,
},
&container.HostConfig{
Resources: container.Resources{
CPUPeriod: 100000,
CPUQuota: int64(CPU) * 1000,
Memory: int64(memory*110/100)*1024 ^ 2, // Allow 10 % more memory so we have space for MemoryReservation
MemoryReservation: int64(memory)*1024 ^ 2, // This should provide softer way how to limit the memory of our containers
},
PortBindings: portmaps,
AutoRemove: false,
RestartPolicy: container.RestartPolicy{
Name: "on-failure",
MaximumRetryCount: 3,
},
Binds: []string{
volumePath + ":/srv",
},
},
&network.NetworkingConfig{},
name,
)
if err != nil {
return "", err
}
containerID := createdContainer.ID
// I dunno if we want this
// err = cli.ContainerStart(context.TODO(), createdContainer.ID, types.ContainerStartOptions{})
return containerID, nil
}