311 lines
9.6 KiB
Go
311 lines
9.6 KiB
Go
package docker
|
|
|
|
import (
|
|
"context"
|
|
"errors"
|
|
"io"
|
|
"io/ioutil"
|
|
"log"
|
|
"os"
|
|
"strings"
|
|
"time"
|
|
|
|
"github.com/docker/docker/api/types"
|
|
"github.com/docker/docker/api/types/container"
|
|
"github.com/docker/docker/api/types/network"
|
|
dockerClient "github.com/docker/docker/client"
|
|
"github.com/docker/go-connections/nat"
|
|
)
|
|
|
|
// Docker timeout
|
|
const dockerTimeout = 10
|
|
|
|
// DOCKER_SOCK tells where to connect to docker, it will be always local sock
|
|
const dockerSock = "unix:///var/run/docker.sock"
|
|
|
|
// DOCKER_API_VERSION set API version of Docker, 1.40 belongs to Docker 19.03.11
|
|
const dockerAPIVersion = "1.24"
|
|
|
|
// Driver keeps everything for connection to Docker
|
|
type Driver struct{}
|
|
|
|
func (d *Driver) getClient() (*dockerClient.Client, error) {
|
|
cli, err := dockerClient.NewClient(dockerSock, dockerAPIVersion, nil, nil)
|
|
return cli, err
|
|
}
|
|
|
|
// ConnectionStatus checks connection to the Docker daemon
|
|
func (d *Driver) ConnectionStatus() (bool, error) {
|
|
cli, err := d.getClient()
|
|
if err != nil {
|
|
return false, err
|
|
}
|
|
|
|
_, err = cli.ServerVersion(context.TODO())
|
|
if err != nil {
|
|
return false, err
|
|
}
|
|
|
|
return true, nil
|
|
}
|
|
|
|
func (d *Driver) nameToID(name string) (string, error) {
|
|
containerIDs, err := d.IsExist(name)
|
|
if err != nil {
|
|
return "", err
|
|
}
|
|
|
|
if len(containerIDs) == 0 {
|
|
return "", errors.New("no container found")
|
|
}
|
|
if len(containerIDs) > 1 {
|
|
return "", errors.New("multiple containers with the same name")
|
|
}
|
|
|
|
return containerIDs[0], nil
|
|
}
|
|
|
|
// Status return current status of container with given name
|
|
func (d *Driver) Status(name string) (string, error) {
|
|
status := "unknown"
|
|
|
|
cli, err := d.getClient()
|
|
if err != nil {
|
|
return status, err
|
|
}
|
|
|
|
containerID, err := d.nameToID(name)
|
|
if err != nil && err.Error() == "no container found" {
|
|
return "no-container", err
|
|
}
|
|
if err != nil {
|
|
return status, err
|
|
}
|
|
|
|
info, err := cli.ContainerInspect(context.TODO(), containerID)
|
|
if err != nil {
|
|
return status, err
|
|
}
|
|
|
|
if info.State.Running {
|
|
status = "running"
|
|
} else {
|
|
status = "stopped"
|
|
}
|
|
|
|
return status, nil
|
|
|
|
}
|
|
|
|
// Stats returns current CPU and memory usage
|
|
func (d *Driver) Stats(name string) (float64, int, error) {
|
|
cli, err := d.getClient()
|
|
if err != nil {
|
|
return 0.0, 0, err
|
|
}
|
|
|
|
containerID, err := d.nameToID(name)
|
|
if err != nil {
|
|
return 0.0, 0, err
|
|
}
|
|
|
|
stats, err := cli.ContainerStats(context.TODO(), containerID, false)
|
|
if err != nil {
|
|
return 0.0, 0, nil
|
|
}
|
|
|
|
data, err := ioutil.ReadAll(stats.Body)
|
|
if err != nil {
|
|
return 0.0, 0, err
|
|
}
|
|
// It returns one JSON:
|
|
// {"read":"2020-07-11T20:42:31.486726241Z","preread":"2020-07-11T20:42:30.484048602Z","pids_stats":{"current":7},"blkio_stats":{"io_service_bytes_recursive":[{"major":253,"minor":0,"op":"Read","value":0},{"major":253,"minor":0,"op":"Write","value":20480},{"major":253,"minor":0,"op":"Sync","value":12288},{"major":253,"minor":0,"op":"Async","value":8192},{"major":253,"minor":0,"op":"Discard","value":0},{"major":253,"minor":0,"op":"Total","value":20480}],"io_serviced_recursive":[{"major":253,"minor":0,"op":"Read","value":0},{"major":253,"minor":0,"op":"Write","value":5},{"major":253,"minor":0,"op":"Sync","value":3},{"major":253,"minor":0,"op":"Async","value":2},{"major":253,"minor":0,"op":"Discard","value":0},{"major":253,"minor":0,"op":"Total","value":5}],"io_queue_recursive":[],"io_service_time_recursive":[],"io_wait_time_recursive":[],"io_merged_recursive":[],"io_time_recursive":[],"sectors_recursive":[]},"num_procs":0,"storage_stats":{},"cpu_stats":{"cpu_usage":{"total_usage":758392753,"percpu_usage":[302688474,0,11507116,124238500,222136766,5656446,3009320,0,19406386,1397028,6201423,62151294,0,0,0,0],"usage_in_kernelmode":100000000,"usage_in_usermode":640000000},"system_cpu_usage":119385810000000,"online_cpus":12,"throttling_data":{"periods":21,"throttled_periods":1,"throttled_time":2995938}},"precpu_stats":{"cpu_usage":{"total_usage":758282347,"percpu_usage":[302688474,0,11507116,124238500,222026360,5656446,3009320,0,19406386,1397028,6201423,62151294,0,0,0,0],"usage_in_kernelmode":100000000,"usage_in_usermode":640000000},"system_cpu_usage":119373720000000,"online_cpus":12,"throttling_data":{"periods":21,"throttled_periods":1,"throttled_time":2995938}},"memory_stats":{"usage":21626880,"max_usage":22630400,"stats":{"active_anon":15949824,"active_file":0,"cache":0,"dirty":0,"hierarchical_memory_limit":144179200,"hierarchical_memsw_limit":288358400,"inactive_anon":0,"inactive_file":0,"mapped_file":0,"pgfault":13167,"pgmajfault":0,"pgpgin":7293,"pgpgout":3406,"rss":15900672,"rss_huge":0,"total_active_anon":15949824,"total_active_file":0,"total_cache":0,"total_dirty":0,"total_inactive_anon":0,"total_inactive_file":0,"total_mapped_file":0,"total_pgfault":13167,"total_pgmajfault":0,"total_pgpgin":7293,"total_pgpgout":3406,"total_rss":15900672,"total_rss_huge":0,"total_unevictable":0,"total_writeback":0,"unevictable":0,"writeback":0},"limit":144179200},"name":"/test_1234","id":"576878d645efecc8e5e2a57b88351f7b5c551e3fc72dc8473fd965d10dfddbec","networks":{"eth0":{"rx_bytes":6150,"rx_packets":37,"rx_errors":0,"rx_dropped":0,"tx_bytes":0,"tx_packets":0,"tx_errors":0,"tx_dropped":0}}}
|
|
log.Println(string(data))
|
|
|
|
return 0.0, 0, nil
|
|
}
|
|
|
|
// Remove removes container represented by containerID
|
|
func (d *Driver) Remove(name string) error {
|
|
log.Println("Removing container " + name)
|
|
cli, err := d.getClient()
|
|
if err != nil {
|
|
return err
|
|
}
|
|
|
|
containerID, err := d.nameToID(name)
|
|
if err != nil {
|
|
return err
|
|
}
|
|
|
|
timeout := time.Duration(dockerTimeout * time.Second)
|
|
err = cli.ContainerStop(context.TODO(), containerID, &timeout)
|
|
if err != nil {
|
|
return err
|
|
}
|
|
|
|
err = cli.ContainerRemove(context.TODO(), containerID, types.ContainerRemoveOptions{})
|
|
|
|
return err
|
|
}
|
|
|
|
// Start starts container represented by containerID
|
|
func (d *Driver) Start(name string) error {
|
|
log.Println("Starting container " + name)
|
|
cli, err := d.getClient()
|
|
if err != nil {
|
|
return err
|
|
}
|
|
|
|
containerID, err := d.nameToID(name)
|
|
if err != nil {
|
|
return err
|
|
}
|
|
|
|
err = cli.ContainerStart(context.TODO(), containerID, types.ContainerStartOptions{})
|
|
|
|
return err
|
|
}
|
|
|
|
// Stop stops container represented by containerID
|
|
func (d *Driver) Stop(name string) error {
|
|
log.Println("Stopping container " + name)
|
|
cli, err := d.getClient()
|
|
if err != nil {
|
|
return err
|
|
}
|
|
|
|
containerID, err := d.nameToID(name)
|
|
if err != nil {
|
|
return err
|
|
}
|
|
|
|
timeout := time.Duration(dockerTimeout * time.Second)
|
|
err = cli.ContainerStop(context.TODO(), containerID, &timeout)
|
|
|
|
return err
|
|
}
|
|
|
|
// IsExist checks existence of the container based on container name
|
|
// Returns container IDs in case of existence. Otherwise
|
|
// empty slice.
|
|
func (d *Driver) IsExist(name string) ([]string, error) {
|
|
var containerIDs = make([]string, 0)
|
|
|
|
cli, err := d.getClient()
|
|
if err != nil {
|
|
return containerIDs, err
|
|
}
|
|
|
|
containers, err := cli.ContainerList(context.TODO(), types.ContainerListOptions{All: true})
|
|
if err != nil {
|
|
return containerIDs, err
|
|
}
|
|
|
|
// We go through the containers and pick the ones which match the task name
|
|
for _, containerObject := range containers {
|
|
for _, containerName := range containerObject.Names {
|
|
containerName = strings.TrimLeft(containerName, "/")
|
|
if containerName == name {
|
|
containerIDs = append(containerIDs, containerObject.ID)
|
|
}
|
|
}
|
|
}
|
|
|
|
return containerIDs, nil
|
|
}
|
|
|
|
// pullImage pulls image into local docker instance
|
|
func (d *Driver) pullImage(image string) error {
|
|
log.Println("Pulling image " + image)
|
|
cli, err := d.getClient()
|
|
if err != nil {
|
|
return err
|
|
}
|
|
|
|
stream, err := cli.ImagePull(context.TODO(), image, types.ImagePullOptions{})
|
|
if err != nil {
|
|
return err
|
|
}
|
|
defer stream.Close()
|
|
|
|
io.Copy(os.Stdout, stream)
|
|
|
|
return nil
|
|
}
|
|
|
|
// Create creates the container
|
|
// image - docker image
|
|
// cmd - string slice of command and its arguments
|
|
// volumePath - host's directory to mount into the container
|
|
// returns container ID
|
|
func (d *Driver) Create(name string, image string, volumePath string, HTTPPort int, SSHPort int, CPU int, memory int, cmd []string) (string, error) {
|
|
log.Println("Creating container " + name)
|
|
cli, err := d.getClient()
|
|
if err != nil {
|
|
return "", err
|
|
}
|
|
|
|
err = d.pullImage(image)
|
|
if err != nil {
|
|
return "", err
|
|
}
|
|
|
|
portmaps := nat.PortMap{}
|
|
|
|
// portbindingsHTTP := make([]nat.PortBinding, 1)
|
|
// portbindingsHTTP[0] = nat.PortBinding{
|
|
// HostIP: "0.0.0.0",
|
|
// HostPort: strconv.Itoa(HTTPPort) + "/tcp",
|
|
// }
|
|
// portmaps["8000/tcp"] = portbindingsHTTP
|
|
|
|
// if SSHPort != 0 {
|
|
// portbindingsSSH := make([]nat.PortBinding, 1)
|
|
// portbindingsSSH[0] = nat.PortBinding{
|
|
// HostIP: "0.0.0.0",
|
|
// HostPort: strconv.Itoa(SSHPort) + "/tcp",
|
|
// }
|
|
// portmaps["22/tcp"] = portbindingsSSH
|
|
// }
|
|
|
|
createdContainer, err := cli.ContainerCreate(
|
|
context.Background(),
|
|
&container.Config{
|
|
Hostname: name,
|
|
Env: []string{},
|
|
Image: image,
|
|
Cmd: cmd,
|
|
},
|
|
&container.HostConfig{
|
|
Resources: container.Resources{
|
|
CPUPeriod: 100000,
|
|
CPUQuota: int64(CPU) * 1000,
|
|
Memory: int64(memory*110/100)*1024 ^ 2, // Allow 10 % more memory so we have space for MemoryReservation
|
|
MemoryReservation: int64(memory)*1024 ^ 2, // This should provide softer way how to limit the memory of our containers
|
|
},
|
|
PortBindings: portmaps,
|
|
AutoRemove: false,
|
|
RestartPolicy: container.RestartPolicy{
|
|
Name: "on-failure",
|
|
MaximumRetryCount: 3,
|
|
},
|
|
Binds: []string{
|
|
volumePath + ":/srv",
|
|
},
|
|
},
|
|
&network.NetworkingConfig{},
|
|
name,
|
|
)
|
|
if err != nil {
|
|
return "", err
|
|
}
|
|
|
|
containerID := createdContainer.ID
|
|
|
|
// I dunno if we want this
|
|
// err = cli.ContainerStart(context.TODO(), createdContainer.ID, types.ContainerStartOptions{})
|
|
|
|
return containerID, nil
|
|
}
|