이 포스트는 Korea Azure User Group에서 진행하는 Docker/Container 스터디 그룹에 참여하며 작성했습니다.
스터디를 진행한 Katacoda 강좌: Docker & Containers(https://katacoda.com/courses/docker)
진행한 강의 목록
- See Container Metrics With Docker Stats
- Creating Optimised Docker Images using Multi-Stage Builds
- Formatting PS Output
- Run Docker From Rootless Users
스터디는 Katacoda의 화면을 참조하되, 실습은 제 개인 PC에서 VM을 구성해서 진행했습니다. 따라해볼 수 있는 것만 별도로 정리했습니다.
구성환경
- Windows 10 Pro 1809
- Hyper-V
- CentOS 7.6.1810 (Kernel 4.20.2-1.el7)
내용 정리
# 컨테이너의 자원 사용량을 확인해보자
[root@docker-master home]# docker run -d -p 80:80 -e DEFAULT_HOST=proxy.example -v /var/run/docker.sock:/tmp/docker.sock:ro --name nginx jwilder/nginx-proxy:alpine # 컨테이너 실행
97036b5129b740599d6149ef8ca8ca0640d670be5f08d491d5059b7a57ebd4b4
[root@docker-master home]# docker stats nginx # 자원 사용 확인
CONTAINER ID NAME CPU % MEM USAGE / LIMIT MEM % NET I/O BLOCK I/O PIDS
97036b5129b7 nginx 100.78% 12.73MiB / 3.829GiB 0.32% 766B / 0B 0B / 0B 29
[root@docker-master home]# docker ps -q | xargs docker stats # 여러 컨테이너의 사용량 확인
# 이미지 빌드시 여러 이미지를 이용해서 하나로 묶을 수 있다.
[root@docker-master golang-http-server]# git clone https://github.com/katacoda/golang-http-server.git # 실험대상 소스를 받아온다.
Cloning into 'golang-http-server'...
remote: Enumerating objects: 78, done.
remote: Total 78 (delta 0), reused 0 (delta 0), pack-reused 78
Unpacking objects: 100% (78/78), done.
[root@docker-master golang-http-server]# vi Dockerfile.multi # 파일을 작성한다.
# First Stage
FROM golang:1.6-alpine
RUN mkdir /app
ADD . /app/
WORKDIR /app
RUN CGO_ENABLED=0 GOOS=linux go build -a -installsuffix cgo -o main .
# Second Stage
FROM alpine
EXPOSE 80
CMD ["/app"]
# Copy from first stage
COPY --from=0 /app/main /app
[root@docker-master golang-http-server]# docker build -f Dockerfile.multi -t golang-app . # 빌드를 한다.
Sending build context to Docker daemon 2.09MB
Step 1/9 : FROM golang:1.6-alpine
---> 1ea38172de32
Step 2/9 : RUN mkdir /app
---> Using cache
---> 5840c4f7763c
Step 3/9 : ADD . /app/
---> f2961eb6daee
Step 4/9 : WORKDIR /app
---> Running in b52a9230da9b
Removing intermediate container b52a9230da9b
---> 58949d05ae91
Step 5/9 : RUN CGO_ENABLED=0 GOOS=linux go build -a -installsuffix cgo -o main .
---> Running in 3c0119fb6ecc
Removing intermediate container 3c0119fb6ecc
---> 95bf13c7b188
Step 6/9 : FROM alpine
latest: Pulling from library/alpine
8e402f1a9c57: Already exists
Digest: sha256:644fcb1a676b5165371437feaa922943aaf7afcfa8bfee4472f6860aad1ef2a0
Status: Downloaded newer image for alpine:latest
---> 5cb3aa00f899
Step 7/9 : EXPOSE 80
---> Running in 70291a3d813e
Removing intermediate container 70291a3d813e
---> 2b81383e9b9e
Step 8/9 : CMD ["/app"]
---> Running in b896170c0b4c
Removing intermediate container b896170c0b4c
---> 3b8d7430007f
Step 9/9 : COPY --from=0 /app/main /app
---> 05cb1c5217cd
Successfully built 05cb1c5217cd
Successfully tagged golang-app:latest
[root@docker-master golang-http-server]# docker images # 이미지 확인을 한다.
REPOSITORY TAG IMAGE ID CREATED SIZE
golang-app latest 05cb1c5217cd 19 seconds ago 13.1MB
[root@docker-master golang-http-server]# docker run -d -p 80:80 golang-app # 실행해본다.
8edf6c0a04011378f8945c743ec1108b9d0e51d1779dd44e9beae09cd073d1d7
[root@docker-master golang-http-server]# curl localhost # 테스트한다.
<h1>This request was processed by host: 8edf6c0a0401</h1>
[root@docker-master golang-http-server]# docker ps # 컨테이너 재확인
CONTAINER ID IMAGE COMMAND CREATED STATUS PORTS NAMES
8edf6c0a0401 golang-app "/app" 13 seconds ago Up 13 seconds 0.0.0.0:80->80/tcp eager_lalande
# docker ps 입력시 나오는 정보는 방대하다고 느낄 수 있다.
# --format 옵션을 통해 원하는 정보만 표기되도록 할 수 있다.
[root@docker-master ~]# docker run -d redis # 만만한 redis cache 실행
b8ce96117bff7918d8565d399fa24d94e69d900e3e190cb6cf9beaa802142844
[root@docker-master ~]# docker ps # 실행중인 컨테이너 확인
CONTAINER ID IMAGE COMMAND CREATED STATUS PORTS NAMES
b8ce96117bff redis "docker-entrypoint.s…" 3 seconds ago Up 2 seconds 6379/tcp goofy_brown
[root@docker-master ~]# docker ps --format '{{.Names}} container is using {{.Image}} image' # 컨테이너 이름과 이미지 이름으로 문장처럼 출력
goofy_brown container is using redis image
[root@docker-master ~]# docker ps --format 'table {{.Names}}\t{{.Image}}' # 테이블 형식으로 출력
NAMES IMAGE
goofy_brown redis
[root@docker-master ~]# docker ps -q | xargs docker inspect --format '{{ .Id }} - {{ .Name }} - {{ .NetworkSettings.IPAddress }}' # 파이프라인으로 컨테이너의 ID, 이름, IP 출력
b8ce96117bff7918d8565d399fa24d94e69d900e3e190cb6cf9beaa802142844 - /goofy_brown - 172.17.0.2
# 컨테이너는 Docker daemon으로 구동된다. 기본적인 구동은 root(관리자)로 구동된다.
# 이로인해 발생할 수 있는 보안을 강화하기 위해 root 계정이 아닌 사용자 계정으로 컨테이너를 배포하고 실행하도록 할 수 있다.
[root@docker-master ~]# useradd -m -d /home/lowprivuser -p $(openssl passwd -1 password) lowprivuser # lowprivuser 계정 생성
[root@docker-master ~]# su lowprivuser # 계정전환
[lowprivuser@docker-master root]$ touch /root/blocked # 권한 부여 확인
touch: cannot touch `/root/blocked': 허가 거부
[lowprivuser@docker-master root]$ docker ps # docker 명령어 사용여부 확인
Got permission denied while trying to connect to the Docker daemon socket at unix:///var/run/docker.sock: Get http://%2Fvar%2Frun%2Fdocker.sock/v1.39/containers/json: dial unix /var/run/docker.sock: connect: permission denied
[lowprivuser@docker-master ~]$ curl -sSL https://get.docker.com/rootless | sh # 스크립트 다운 및 실행
% Total % Received % Xferd Average Speed Time Time Time Current
Dload Upload Total Spent Left Speed
100 59.1M 100 59.1M 0 0 8994k 0 0:00:06 0:00:06 --:--:-- 12.0M
% Total % Received % Xferd Average Speed Time Time Time Current
Dload Upload Total Spent Left Speed
100 13.9M 100 13.9M 0 0 4584k 0 0:00:03 0:00:03 --:--:-- 4583k
# systemd not detected, dockerd daemon needs to be started manually
/home/lowprivuser/bin/dockerd-rootless.sh --experimental --storage-driver vfs
# Docker binaries are installed in /home/lowprivuser/bin
# WARN: dockerd is not in your current PATH or pointing to /home/lowprivuser/bin/dockerd
# Make sure the following environment variables are set (or add them to ~/.bashrc):\n
export XDG_RUNTIME_DIR=/tmp/docker-1000
export PATH=/home/lowprivuser/bin:$PATH
export DOCKER_HOST=unix:///tmp/docker-1000/docker.sock
# export로 PATH 및 연결 경로를 잡아준다.
[lowprivuser@docker-master ~]$ export XDG_RUNTIME_DIR=/tmp/docker-1000
[lowprivuser@docker-master ~]$ export PATH=/home/lowprivuser/bin:$PATH
[lowprivuser@docker-master ~]$ export DOCKER_HOST=unix:///tmp/docker-1000/docker.sock
[lowprivuser@docker-master ~]$ /home/lowprivuser/bin/dockerd-rootless.sh --experimental --storage-driver vfs
+ '[' -w /tmp/docker-1000 ']'
+ '[' -w /home/lowprivuser ']'
+ rootlesskit=
+ for f in docker-rootlesskit rootlesskit
+ which docker-rootlesskit
+ for f in docker-rootlesskit rootlesskit
+ which rootlesskit
+ rootlesskit=rootlesskit
+ break
+ '[' -z rootlesskit ']'
+ net=
+ mtu=
+ which slirp4netns
+ '[' -z ']'
+ which vpnkit
+ net=vpnkit
+ mtu=1500
+ '[' -z ']'
+ _DOCKERD_ROOTLESS_CHILD=1
+ export _DOCKERD_ROOTLESS_CHILD
+ rootlesskit --net=vpnkit --mtu=1500 --disable-host-loopback --copy-up=/etc --copy-up=/run /home/lowprivuser/bin/dockerd-rootless.sh --experimental --storage-driver vfs
+ '[' -w /tmp/docker-1000 ']'
+ '[' -w /home/lowprivuser ']'
+ rootlesskit=
+ for f in docker-rootlesskit rootlesskit
+ which docker-rootlesskit
+ for f in docker-rootlesskit rootlesskit
+ which rootlesskit
+ rootlesskit=rootlesskit
+ break
+ '[' -z rootlesskit ']'
+ net=
+ mtu=
+ which slirp4netns
+ '[' -z ']'
+ which vpnkit
+ net=vpnkit
+ mtu=1500
+ '[' -z 1 ']'
+ '[' 1 = 1 ']'
+ rm -f /run/docker /run/xtables.lock
+ dockerd --experimental --storage-driver vfs
WARN[2019-03-22T17:10:08.450399702+09:00] Running experimental build
WARN[2019-03-22T17:10:08.450451403+09:00] Running in rootless mode. Cgroups, AppArmor, and CRIU are disabled.
WARN[2019-03-22T17:10:08.450918111+09:00] Error while setting daemon root propagation, this is not generally critical but may cause some functionality to not work or fallback to less desirable behavior dir=/home/lowprivuser/.local/share/docker error="error writing file to signal mount cleanup on shutdown: open /tmp/docker-1000/docker/unmount-on-shutdown: no such file or directory"
INFO[2019-03-22T17:10:08.451724226+09:00] libcontainerd: started new containerd process pid=8644
INFO[2019-03-22T17:10:08.451763926+09:00] parsed scheme: "unix" module=grpc
INFO[2019-03-22T17:10:08.451777927+09:00] scheme "unix" not registered, fallback to default scheme module=grpc
INFO[2019-03-22T17:10:08.451814627+09:00] ccResolverWrapper: sending new addresses to cc: [{unix:///tmp/docker-1000/docker/containerd/containerd.sock 0 <nil>}] module=grpc
INFO[2019-03-22T17:10:08.451860228+09:00] ClientConn switching balancer to "pick_first" module=grpc
INFO[2019-03-22T17:10:08.451894129+09:00] pickfirstBalancer: HandleSubConnStateChange: 0xc00013f990, CONNECTING module=grpc
INFO[2019-03-22T17:10:08.463370238+09:00] starting containerd revision=bb71b10fd8f58240ca47fbb579b9d1028eea7c84 version=v1.2.5
INFO[2019-03-22T17:10:08.463734145+09:00] loading plugin "io.containerd.content.v1.content"... type=io.containerd.content.v1
INFO[2019-03-22T17:10:08.463798646+09:00] loading plugin "io.containerd.snapshotter.v1.btrfs"... type=io.containerd.snapshotter.v1
WARN[2019-03-22T17:10:08.463973349+09:00] failed to load plugin io.containerd.snapshotter.v1.btrfs error="path /home/lowprivuser/.local/share/docker/containerd/daemon/io.containerd.snapshotter.v1.btrfs must be a btrfs filesystem to be used with the btrfs snapshotter"
INFO[2019-03-22T17:10:08.464008050+09:00] loading plugin "io.containerd.snapshotter.v1.aufs"... type=io.containerd.snapshotter.v1
WARN[2019-03-22T17:10:08.464985067+09:00] failed to load plugin io.containerd.snapshotter.v1.aufs error="modprobe aufs failed: "modprobe: FATAL: Module aufs not found.\n": exit status 1"
INFO[2019-03-22T17:10:08.465026268+09:00] loading plugin "io.containerd.snapshotter.v1.native"... type=io.containerd.snapshotter.v1
INFO[2019-03-22T17:10:08.465100170+09:00] loading plugin "io.containerd.snapshotter.v1.overlayfs"... type=io.containerd.snapshotter.v1
INFO[2019-03-22T17:10:08.465243472+09:00] loading plugin "io.containerd.snapshotter.v1.zfs"... type=io.containerd.snapshotter.v1
WARN[2019-03-22T17:10:08.465491977+09:00] failed to load plugin io.containerd.snapshotter.v1.zfs error="path /home/lowprivuser/.local/share/docker/containerd/daemon/io.containerd.snapshotter.v1.zfs must be a zfs filesystem to be used with the zfs snapshotter"
INFO[2019-03-22T17:10:08.465524777+09:00] loading plugin "io.containerd.metadata.v1.bolt"... type=io.containerd.metadata.v1
WARN[2019-03-22T17:10:08.465561778+09:00] could not use snapshotter zfs in metadata plugin error="path /home/lowprivuser/.local/share/docker/containerd/daemon/io.containerd.snapshotter.v1.zfs must be a zfs filesystem to be used with the zfs snapshotter"
WARN[2019-03-22T17:10:08.465589478+09:00] could not use snapshotter btrfs in metadata plugin error="path /home/lowprivuser/.local/share/docker/containerd/daemon/io.containerd.snapshotter.v1.btrfs must be a btrfs filesystem to be used with the btrfs snapshotter"
WARN[2019-03-22T17:10:08.465603679+09:00] could not use snapshotter aufs in metadata plugin error="modprobe aufs failed: "modprobe: FATAL: Module aufs not found.\n": exit status 1"
INFO[2019-03-22T17:10:08.470945876+09:00] loading plugin "io.containerd.differ.v1.walking"... type=io.containerd.differ.v1
INFO[2019-03-22T17:10:08.470990977+09:00] loading plugin "io.containerd.gc.v1.scheduler"... type=io.containerd.gc.v1
INFO[2019-03-22T17:10:08.471042178+09:00] loading plugin "io.containerd.service.v1.containers-service"... type=io.containerd.service.v1
INFO[2019-03-22T17:10:08.471054178+09:00] loading plugin "io.containerd.service.v1.content-service"... type=io.containerd.service.v1
INFO[2019-03-22T17:10:08.471062678+09:00] loading plugin "io.containerd.service.v1.diff-service"... type=io.containerd.service.v1
INFO[2019-03-22T17:10:08.471075078+09:00] loading plugin "io.containerd.service.v1.images-service"... type=io.containerd.service.v1
INFO[2019-03-22T17:10:08.471084879+09:00] loading plugin "io.containerd.service.v1.leases-service"... type=io.containerd.service.v1
INFO[2019-03-22T17:10:08.471093279+09:00] loading plugin "io.containerd.service.v1.namespaces-service"... type=io.containerd.service.v1
INFO[2019-03-22T17:10:08.471112779+09:00] loading plugin "io.containerd.service.v1.snapshots-service"... type=io.containerd.service.v1
INFO[2019-03-22T17:10:08.471137880+09:00] loading plugin "io.containerd.runtime.v1.linux"... type=io.containerd.runtime.v1
INFO[2019-03-22T17:10:08.471274182+09:00] loading plugin "io.containerd.runtime.v2.task"... type=io.containerd.runtime.v2
INFO[2019-03-22T17:10:08.471350683+09:00] loading plugin "io.containerd.monitor.v1.cgroups"... type=io.containerd.monitor.v1
INFO[2019-03-22T17:10:08.471599788+09:00] loading plugin "io.containerd.service.v1.tasks-service"... type=io.containerd.service.v1
INFO[2019-03-22T17:10:08.471633689+09:00] loading plugin "io.containerd.internal.v1.restart"... type=io.containerd.internal.v1
INFO[2019-03-22T17:10:08.471665089+09:00] loading plugin "io.containerd.grpc.v1.containers"... type=io.containerd.grpc.v1
INFO[2019-03-22T17:10:08.471707590+09:00] loading plugin "io.containerd.grpc.v1.content"... type=io.containerd.grpc.v1
INFO[2019-03-22T17:10:08.471743691+09:00] loading plugin "io.containerd.grpc.v1.diff"... type=io.containerd.grpc.v1
INFO[2019-03-22T17:10:08.471766391+09:00] loading plugin "io.containerd.grpc.v1.events"... type=io.containerd.grpc.v1
INFO[2019-03-22T17:10:08.471795692+09:00] loading plugin "io.containerd.grpc.v1.healthcheck"... type=io.containerd.grpc.v1
INFO[2019-03-22T17:10:08.471807692+09:00] loading plugin "io.containerd.grpc.v1.images"... type=io.containerd.grpc.v1
INFO[2019-03-22T17:10:08.471815492+09:00] loading plugin "io.containerd.grpc.v1.leases"... type=io.containerd.grpc.v1
INFO[2019-03-22T17:10:08.471839892+09:00] loading plugin "io.containerd.grpc.v1.namespaces"... type=io.containerd.grpc.v1
INFO[2019-03-22T17:10:08.471849693+09:00] loading plugin "io.containerd.internal.v1.opt"... type=io.containerd.internal.v1
INFO[2019-03-22T17:10:08.471909094+09:00] loading plugin "io.containerd.grpc.v1.snapshots"... type=io.containerd.grpc.v1
INFO[2019-03-22T17:10:08.471938494+09:00] loading plugin "io.containerd.grpc.v1.tasks"... type=io.containerd.grpc.v1
INFO[2019-03-22T17:10:08.471968995+09:00] loading plugin "io.containerd.grpc.v1.version"... type=io.containerd.grpc.v1
INFO[2019-03-22T17:10:08.471980595+09:00] loading plugin "io.containerd.grpc.v1.introspection"... type=io.containerd.grpc.v1
INFO[2019-03-22T17:10:08.472093397+09:00] serving... address="/tmp/docker-1000/docker/containerd/containerd-debug.sock"
INFO[2019-03-22T17:10:08.472145198+09:00] serving... address="/tmp/docker-1000/docker/containerd/containerd.sock"
INFO[2019-03-22T17:10:08.472171698+09:00] containerd successfully booted in 0.009503s
INFO[2019-03-22T17:10:08.472385202+09:00] pickfirstBalancer: HandleSubConnStateChange: 0xc00013f990, READY module=grpc
INFO[2019-03-22T17:10:18.477988481+09:00] parsed scheme: "unix" module=grpc
INFO[2019-03-22T17:10:18.478028082+09:00] scheme "unix" not registered, fallback to default scheme module=grpc
INFO[2019-03-22T17:10:18.478085283+09:00] parsed scheme: "unix" module=grpc
INFO[2019-03-22T17:10:18.478093883+09:00] scheme "unix" not registered, fallback to default scheme module=grpc
INFO[2019-03-22T17:10:18.478112184+09:00] ccResolverWrapper: sending new addresses to cc: [{unix:///tmp/docker-1000/docker/containerd/containerd.sock 0 <nil>}] module=grpc
INFO[2019-03-22T17:10:18.478153084+09:00] ClientConn switching balancer to "pick_first" module=grpc
INFO[2019-03-22T17:10:18.478182285+09:00] pickfirstBalancer: HandleSubConnStateChange: 0xc000935120, CONNECTING module=grpc
INFO[2019-03-22T17:10:18.478197385+09:00] ccResolverWrapper: sending new addresses to cc: [{unix:///tmp/docker-1000/docker/containerd/containerd.sock 0 <nil>}] module=grpc
INFO[2019-03-22T17:10:18.478243986+09:00] ClientConn switching balancer to "pick_first" module=grpc
INFO[2019-03-22T17:10:18.478278187+09:00] pickfirstBalancer: HandleSubConnStateChange: 0xc000134cd0, CONNECTING module=grpc
INFO[2019-03-22T17:10:18.478283687+09:00] pickfirstBalancer: HandleSubConnStateChange: 0xc000935120, READY module=grpc
INFO[2019-03-22T17:10:18.478289787+09:00] blockingPicker: the picked transport is not ready, loop back to repick module=grpc
INFO[2019-03-22T17:10:18.478416189+09:00] pickfirstBalancer: HandleSubConnStateChange: 0xc000134cd0, READY module=grpc
INFO[2019-03-22T17:10:18.488729977+09:00] Loading containers: start.
INFO[2019-03-22T17:10:18.543024567+09:00] Default bridge (docker0) is assigned with an IP address 172.17.0.0/16. Daemon option --bip can be used to set a preferred IP address
INFO[2019-03-22T17:10:18.572219499+09:00] Loading containers: done.
INFO[2019-03-22T17:10:18.578842120+09:00] Docker daemon commit=56bb8fb graphdriver(s)=vfs version=master-dockerproject-2019-03-21
INFO[2019-03-22T17:10:18.578943622+09:00] Daemon has completed initialization
INFO[2019-03-22T17:10:18.594522006+09:00] API listen on /tmp/docker-1000/docker.sock
# 두번째 세션창을 연다.
[root@docker-master ~]# su lowprivuser # 사용자 계정으로 접근한다.
[lowprivuser@docker-master root]$ id # 계정의 ID 확인
uid=1000(lowprivuser) gid=1000(lowprivuser) groups=100(lowprivuser)
# 동일하게 해당 세션에서 export로 경로 잡아준다.
[lowprivuser@docker-master root]$ export XDG_RUNTIME_DIR=/tmp/docker-1000
[lowprivuser@docker-master root]$ export PATH=/home/lowprivuser/bin:$PATH
[lowprivuser@docker-master root]$ export DOCKER_HOST=unix:///tmp/docker-1000/docker.sock
[lowprivuser@docker-master root]$ docker ps # docker 명령어가 실행된다.
CONTAINER ID IMAGE COMMAND CREATED STATUS PORTS NAMES
[lowprivuser@docker-master root]$ docker info
Client:
Debug Mode: false
Server:
Containers: 0
Running: 0
Paused: 0
Stopped: 0
Images: 0
Server Version: master-dockerproject-2019-03-21
Storage Driver: vfs
Logging Driver: json-file
Cgroup Driver: cgroupfs
Plugins:
Volume: local
Network: bridge host ipvlan macvlan null overlay
Log: awslogs fluentd gcplogs gelf journald json-file local logentries splunk syslog
Swarm: inactive
Runtimes: runc
Default Runtime: runc
Init Binary: docker-init
containerd version: bb71b10fd8f58240ca47fbb579b9d1028eea7c84
runc version: 2b18fe1d885ee5083ef9f0838fee39b62d653e30
init version: fec3683
Security Options:
seccomp
Profile: default
rootless
Kernel Version: 4.20.2-1.el7.elrepo.x86_64
Operating System: CentOS Linux 7 (Core)
OSType: linux
Architecture: x86_64
CPUs: 4
Total Memory: 3.829GiB
Name: docker-master
ID: aa606e97-1381-49d8-a562-6cd396bf8fef
Docker Root Dir: /home/lowprivuser/.local/share/docker
Debug Mode: false
Registry: https://index.docker.io/v1/
Labels:
Experimental: true
Insecure Registries:
127.0.0.0/8
Live Restore Enabled: false
WARNING: bridge-nf-call-iptables is disabled
WARNING: bridge-nf-call-ip6tables is disabled
[lowprivuser@docker-master root]$ docker run -it ubuntu bash
Unable to find image 'ubuntu:latest' locally
latest: Pulling from library/ubuntu
898c46f3b1a1: Pull complete
63366dfa0a50: Pull complete
041d4cd74a92: Pull complete
6e1bee0f8701: Pull complete
Digest: sha256:017eef0b616011647b269b5c65826e2e2ebddbe5d1f8c1e56b3599fb14fabec8
Status: Downloaded newer image for ubuntu:latest
root@cdec9d8c0272:/# id
uid=0(root) gid=0(root) groups=0(root)
root@cdec9d8c0272:/# id; ps aux | grep lowprivuser
uid=0(root) gid=0(root) groups=0(root)
root 13 0.0 0.0 11464 1156 pts/0 S+ 08:13 0:00 grep --color=auto lowprivuser
root@cdec9d8c0272:/# exit
exit
[lowprivuser@docker-master root]$ id;
uid=1000(lowprivuser) gid=1000(lowprivuser) groups=1000(lowprivuser)
[lowprivuser@docker-master root]$ ps aux | grep lowprivuser
root 8510 0.0 0.1 191792 4632 pts/0 S 17:08 0:00 su lowprivuser
lowpriv+ 8582 0.0 0.0 113192 3032 pts/0 S+ 17:10 0:00 /bin/sh /home/lowprivuser/bin/dockerd-rootless.sh --experimental --storage-driver vfs
lowpriv+ 8587 0.0 0.2 109844 9920 pts/0 Sl+ 17:10 0:00 rootlesskit --net=vpnkit --mtu=1500 --disable-host-loopback --copy-up=/etc --copy-up=/run /home/lowprivuser/bin/dockerd-rootless.sh --experimental --storage-driver vfs
lowpriv+ 8592 1.1 0.3 111636 12208 pts/0 Sl+ 17:10 0:02 /proc/self/exe --net=vpnkit --mtu=1500 --disable-host-loopback --copy-up=/etc --copy-up=/run /home/lowprivuser/bin/dockerd-rootless.sh --experimental --storage-driver vfs
lowpriv+ 8629 0.0 0.0 113192 3076 pts/0 S+ 17:10 0:00 /bin/sh /home/lowprivuser/bin/dockerd-rootless.sh --experimental --storage-driver vfs
root 8766 0.0 0.1 191792 4600 pts/1 S 17:11 0:00 su lowprivuser
lowpriv+ 8942 0.0 0.0 116884 2392 pts/1 S+ 17:13 0:00 grep --color=auto lowprivuser
[lowprivuser@docker-master root]$