Crack open a refreshing beverage and freshen up with various Docker and Docker compose features.

Docker CLI

# show all containers
$ docker ps
# list all containers, including stopped ones
$ docker container ls --all
 
# stop a container
$ docker stop example
# start a stopped container
$ docker start example
 
# restart a running container
$ docker restart <container-id>
 
# remove a stopped container
$ docker rm example
# only stopped containers can be removed
$ docker rm <stopped-container-id>

Dockerfile

Below is an sudo example of a multi stage Dockerfile.

FROM alpine:latest as builder
# set the working directory
WORKDIR /build
# run any shell command during build
RUN apk add --no-cache npm
# copy files and folders
COPY ./source /build
# commands to run when the contianer starts
CMD ["npm", "run", "build"]
 
FROM alpine:latest
WORKDIR /serv
RUN apk add --no-cache npm
# copy files from different stages
COPY --from=builder /build /serv 
# what ports the container should listen to at runtime
EXPOSE 420
# this is the contianer's main executable
ENTRYPOINT ["npm", "start"]

In order to build and run this Dockerfile.

$ docker build . --tag example:1.0
 
# this will pulish port to 69 on your host irregardles of your firewall config
# use --publish 127.0.0.1:96:420 to bind to the host
$ docker run --publish 69:420 example
 
# open an interactive shell (like connecting to a server)
# or replace sh with any command
$ docker exec --interactive --tty <container-id> sh
# exit the container
$ exit

docker-compose

As you collect Dockerfiles like the gremlin you are, it will eventually get easier to orchestrate your Docker deployments.

version: '3.9'
 
services:
  notebook:
    image: jupyter/minimal-notebook
    container_name: jupyter
    hostname: jupyter
    restart: always
    networks: 
      - tunnel
    ports:
      - "90:8888"
    volumes:
      - jupyter_data:/home/jupyter
    runtime: nvidia
    user: root
    command: "start-notebook.sh"
    environment:
      NB_USER: jupyter
      NB_UID: 1000
      NB_GID: 1000
      CHOWN_HOME: 'yes'
      CHOWN_HOME_OPTS: -R
      NVIDIA_VISIBLE_DEVICES: 'all'
 
volumes: 
  jupyter_data:
 
networks:
  tunnel:
    driver: bridge