When attempting to perform a Build within OpenShift, it immediately fails with the error "cannot be started due to lack of disk space" showing up in the Events viewer.
I can't seem to figure out why OpenShift is thinking I'm low on disk when everything appears to be fine. Here are some details on the server.
# docker -v
Docker version 1.8.2-el7, build a01dc02/1.8.2
# docker info
Containers: 2 Images: 36 Storage Driver: devicemapper Pool Name: docker--vg-docker--pool Pool Blocksize: 524.3 kB Backing Filesystem: xfs Data file: Metadata file: Data Space Used: 1.923 GB Data Space Total: 13.72 GB Data Space Available: 11.8 GB Metadata Space Used: 688.1 kB Metadata Space Total: 37.75 MB Metadata Space Available: 37.06 MB Udev Sync Supported: true Deferred Removal Enabled: true Library Version: 1.02.107-RHEL7 (2015-12-01) Execution Driver: native-0.2 Logging Driver: json-file Kernel Version: 3.10.0-229.14.1.el7.x86_64 Operating System: Red Hat Enterprise Linux Server 7.1 (Maipo) CPUs: 4 Total Memory: 7.64 GiB Name: oshift101.dev.omitted.ca ID: BGBJ:475D:NUO6:FORT:ZQQF:TZ4Z:QAX4:7AFK:VCCQ:7WYU:HNI2:5EAC WARNING: bridge-nf-call-iptables is disabled
# df -h
Filesystem Size Used Avail Use% Mounted on /dev/mapper/vg00-root 1014M 770M 245M 76% / devtmpfs 3.9G 0 3.9G 0% /dev tmpfs 3.9G 0 3.9G 0% /dev/shm tmpfs 3.9G 121M 3.8G 4% /run tmpfs 3.9G 0 3.9G 0% /sys/fs/cgroup /dev/mapper/vg00-usr 4.0G 1.4G 2.7G 33% /usr /dev/mapper/vg00-opt 509M 26M 483M 6% /opt /dev/mapper/vg00-tmp 4.0G 33M 4.0G 1% /tmp /dev/sda1 509M 109M 400M 22% /boot /dev/mapper/vg00-var 12G 3.0G 9.1G 25% /var hnas01:/home 250G 83G 168G 33% /home hnas01:/docker-registry 512G 6.1G 506G 2% /dockerregistry hnas01:/opt-omitted/linux 20G 69M 20G 1% /opt/omitted/nfs tmpfs 3.9G 4.0K 3.9G 1% /var/lib/origin/openshift.local.volumes/pods/19fb86f1-d9b1-11e5-bb64-005056969894/volumes/kubernetes.io~secret/builder-dockercfg-budmf-push tmpfs 3.9G 8.0K 3.9G 1% /var/lib/origin/openshift.local.volumes/pods/19fb86f1-d9b1-11e5-bb64-005056969894/volumes/kubernetes.io~secret/name-ommitted-source tmpfs 3.9G 8.0K 3.9G 1% /var/lib/origin/openshift.local.volumes/pods/19fb86f1-d9b1-11e5-bb64-005056969894/volumes/kubernetes.io~secret/builder-token-b79p6
# dmsetup status
vg00-tmp: 0 8388608 linear vg00-swap0: 0 8388608 linear vg00-swap0: 8388608 8388608 linear docker--vg-docker--pool: 0 26804224 thin-pool 117 168/9216 3668/26176 - rw no_discard_passdown queue_if_no_space vg00-usr: 0 8388608 linear vg00-var: 0 8388608 linear vg00-var: 8388608 16777216 linear vg00-root: 0 2097152 linear docker--vg-docker--pool_tdata: 0 26804224 linear docker--vg-docker--pool_tmeta: 0 73728 linear vg00-opt: 0 1048576 linear
# dmsetup table
vg00-tmp: 0 8388608 linear 8:2 16779264 vg00-swap0: 0 8388608 linear 8:2 25167872 vg00-swap0: 8388608 8388608 linear 8:2 36702208 docker--vg-docker--pool: 0 26804224 thin-pool 253:6 253:7 1024 0 1 skip_block_zeroing vg00-usr: 0 8388608 linear 8:2 8390656 vg00-var: 0 8388608 linear 8:2 2048 vg00-var: 8388608 16777216 linear 8:2 45090816 vg00-root: 0 2097152 linear 8:2 33556480 docker--vg-docker--pool_tdata: 0 26804224 linear 8:3 75776 docker--vg-docker--pool_tmeta: 0 73728 linear 8:3 2048 vg00-opt: 0 1048576 linear 8:2 35653632
# pvscan
PV /dev/sda2 VG vg00 lvm2 [31.50 GiB / 2.00 GiB free] PV /dev/sda3 VG docker-vg lvm2 [32.00 GiB / 19.14 GiB free] Total: 2 [63.49 GiB] / in use: 2 [63.49 GiB] / in no VG: 0 [0 ]
# lvscan
ACTIVE '/dev/docker-vg/docker-pool' [12.78 GiB] inherit ACTIVE '/dev/vg00/var' [12.00 GiB] inherit ACTIVE '/dev/vg00/usr' [4.00 GiB] inherit ACTIVE '/dev/vg00/tmp' [4.00 GiB] inherit ACTIVE '/dev/vg00/swap0' [8.00 GiB] inherit ACTIVE '/dev/vg00/root' [1.00 GiB] inherit ACTIVE '/dev/vg00/opt' [512.00 MiB] inherit
# cat /etc/sysconfig/docker-storage
DOCKER_STORAGE_OPTIONS=--storage-driver devicemapper --storage-opt dm.fs=xfs --storage-opt dm.thinpooldev=/dev/mapper/docker--vg-docker--pool --storage-opt dm.use_deferred_removal=true
# mount
proc on /proc type proc (rw,nosuid,nodev,noexec,relatime) sysfs on /sys type sysfs (rw,nosuid,nodev,noexec,relatime) devtmpfs on /dev type devtmpfs (rw,nosuid,size=3998872k,nr_inodes=999718,mode=755) securityfs on /sys/kernel/security type securityfs (rw,nosuid,nodev,noexec,relatime) tmpfs on /dev/shm type tmpfs (rw,nosuid,nodev) devpts on /dev/pts type devpts (rw,nosuid,noexec,relatime,gid=5,mode=620,ptmxmode=000) tmpfs on /run type tmpfs (rw,nosuid,nodev,mode=755) tmpfs on /sys/fs/cgroup type tmpfs (rw,nosuid,nodev,noexec,mode=755) cgroup on /sys/fs/cgroup/systemd type cgroup (rw,nosuid,nodev,noexec,relatime,xattr,release_agent=/usr/lib/systemd/systemd-cgroups-agent,name=systemd) pstore on /sys/fs/pstore type pstore (rw,nosuid,nodev,noexec,relatime) cgroup on /sys/fs/cgroup/cpuset type cgroup (rw,nosuid,nodev,noexec,relatime,cpuset) cgroup on /sys/fs/cgroup/cpu,cpuacct type cgroup (rw,nosuid,nodev,noexec,relatime,cpuacct,cpu) cgroup on /sys/fs/cgroup/memory type cgroup (rw,nosuid,nodev,noexec,relatime,memory) cgroup on /sys/fs/cgroup/devices type cgroup (rw,nosuid,nodev,noexec,relatime,devices) cgroup on /sys/fs/cgroup/freezer type cgroup (rw,nosuid,nodev,noexec,relatime,freezer) cgroup on /sys/fs/cgroup/net_cls type cgroup (rw,nosuid,nodev,noexec,relatime,net_cls) cgroup on /sys/fs/cgroup/blkio type cgroup (rw,nosuid,nodev,noexec,relatime,blkio) cgroup on /sys/fs/cgroup/perf_event type cgroup (rw,nosuid,nodev,noexec,relatime,perf_event) cgroup on /sys/fs/cgroup/hugetlb type cgroup (rw,nosuid,nodev,noexec,relatime,hugetlb) configfs on /sys/kernel/config type configfs (rw,relatime) /dev/mapper/vg00-root on / type xfs (rw,relatime,attr2,inode64,noquota) /dev/mapper/vg00-usr on /usr type xfs (rw,relatime,attr2,inode64,noquota) systemd-1 on /proc/sys/fs/binfmt_misc type autofs (rw,relatime,fd=36,pgrp=1,timeout=300,minproto=5,maxproto=5,direct) debugfs on /sys/kernel/debug type debugfs (rw,relatime) mqueue on /dev/mqueue type mqueue (rw,relatime) hugetlbfs on /dev/hugepages type hugetlbfs (rw,relatime) /dev/mapper/vg00-opt on /opt type xfs (rw,relatime,attr2,inode64,noquota) /dev/mapper/vg00-tmp on /tmp type xfs (rw,relatime,attr2,inode64,noquota) /dev/sda1 on /boot type xfs (rw,relatime,attr2,inode64,noquota) /dev/mapper/vg00-var on /var type xfs (rw,relatime,attr2,inode64,noquota) hnas01:/home on /home type nfs (rw,relatime,vers=3,rsize=32768,wsize=32768,namlen=255,hard,proto=tcp,timeo=600,retrans=2,sec=sys,mountaddr=10.139.0.11,mountvers=3,mountport=4048,mountproto=tcp,local_lock=none,addr=10.139.0.11) hnas01:/docker-registry on /dockerregistry type nfs (ro,relatime,vers=3,rsize=32768,wsize=32768,namlen=255,hard,proto=tcp,timeo=600,retrans=2,sec=sys,mountaddr=10.139.0.11,mountvers=3,mountport=4048,mountproto=tcp,local_lock=none,addr=10.139.0.11) hnas01:/opt-omitted/linux on /opt/omitted/nfs type nfs (ro,relatime,vers=3,rsize=32768,wsize=32768,namlen=255,hard,proto=tcp,timeo=600,retrans=2,sec=sys,mountaddr=10.139.0.11,mountvers=3,mountport=4048,mountproto=tcp,local_lock=none,addr=10.139.0.11) binfmt_misc on /proc/sys/fs/binfmt_misc type binfmt_misc (rw,relatime) tmpfs on /var/lib/origin/openshift.local.volumes/pods/19fb86f1-d9b1-11e5-bb64-005056969894/volumes/kubernetes.io~secret/builder-dockercfg-budmf-push type tmpfs (rw,relatime) tmpfs on /var/lib/origin/openshift.local.volumes/pods/19fb86f1-d9b1-11e5-bb64-005056969894/volumes/kubernetes.io~secret/johnadacssh6-source type tmpfs (rw,relatime) tmpfs on /var/lib/origin/openshift.local.volumes/pods/19fb86f1-d9b1-11e5-bb64-005056969894/volumes/kubernetes.io~secret/builder-token-b79p6 type tmpfs (rw,relatime)
Edited to add the following additional details.
Still having a problem with this. It's looking like it may actually be in Kubernetes that the problem exists, but I'm not sure.
I ran the following GO program, based on what Kubernetes uses for determining whether or not there's enough disk space.
package main
import (
"fmt"
"os"
"syscall"
)
func main() {
pathArg := os.Args[1]
stat := syscall.Statfs_t{}
err := syscall.Statfs(pathArg, &stat)
if err != nil {
fmt.Println(err.Error())
return
}
//bsize := stat.Bsize
//fmt.Println(stat)
s := fmt.Sprintf(`
Statfs_t {
Type %d
Bsize %d
Blocks %d
Bfree %d
Bavail %d
Files %d
Ffree %d
Frsize %d
Flags %d
}
`, stat.Type,
stat.Bsize,
stat.Blocks,
stat.Bfree,
stat.Bavail,
stat.Files,
stat.Ffree,
stat.Frsize,
stat.Flags)
fmt.Println(s)
}
I then ran it via the following
for x in $(sudo df -h | grep -v Filesys| awk '{print $6}'); do echo "Running on $x"; sudo ./fsinfo $x; done;
And received the following results, which don't seem to show any disk space issues either.
I'm wondering if I should submit this as a bug report to Kubernetes?
Running on /
Statfs_t {
Type 1481003842
Bsize 4096
Blocks 259584
Bfree 62600
Bavail 62600
Files 1048576
Ffree 878047
Frsize 4096
Flags 4128
}
Running on /dev
Statfs_t {
Type 16914836
Bsize 4096
Blocks 999718
Bfree 999718
Bavail 999718
Files 999718
Ffree 999333
Frsize 4096
Flags 34
}
Running on /dev/shm
Statfs_t {
Type 16914836
Bsize 4096
Blocks 1001444
Bfree 1001444
Bavail 1001444
Files 1001444
Ffree 1001443
Frsize 4096
Flags 38
}
Running on /run
Statfs_t {
Type 16914836
Bsize 4096
Blocks 1001444
Bfree 923475
Bavail 923475
Files 1001444
Ffree 1000914
Frsize 4096
Flags 38
}
Running on /sys/fs/cgroup
Statfs_t {
Type 16914836
Bsize 4096
Blocks 1001444
Bfree 1001444
Bavail 1001444
Files 1001444
Ffree 1001431
Frsize 4096
Flags 46
}
Running on /usr
Statfs_t {
Type 1481003842
Bsize 4096
Blocks 1046016
Bfree 703392
Bavail 703392
Files 4194304
Ffree 4155732
Frsize 4096
Flags 4128
}
Running on /opt
Statfs_t {
Type 1481003842
Bsize 4096
Blocks 130219
Bfree 123593
Bavail 123593
Files 524288
Ffree 524263
Frsize 4096
Flags 4128
}
Running on /tmp
Statfs_t {
Type 1481003842
Bsize 4096
Blocks 1046016
Bfree 1037760
Bavail 1037760
Files 4194304
Ffree 4194273
Frsize 4096
Flags 4128
}
Running on /boot
Statfs_t {
Type 1481003842
Bsize 4096
Blocks 130219
Bfree 102353
Bavail 102353
Files 524288
Ffree 523955
Frsize 4096
Flags 4128
}
Running on /var
Statfs_t {
Type 1481003842
Bsize 4096
Blocks 3143168
Bfree 2416721
Bavail 2416721
Files 12582912
Ffree 12579830
Frsize 4096
Flags 4128
}
Running on /home
Statfs_t {
Type 26985
Bsize 32768
Blocks 8192000
Bfree 5523429
Bavail 5523429
Files 2682388480
Ffree 1534765937
Frsize 32768
Flags 4128
}
Running on /dockerregistry
Statfs_t {
Type 26985
Bsize 32768
Blocks 16777216
Bfree 16578608
Bavail 16578608
Files 1072955392
Ffree 659992996
Frsize 32768
Flags 4129
}
Running on /opt/cbc/nfs
Statfs_t {
Type 26985
Bsize 32768
Blocks 655360
Bfree 653153
Bavail 653153
Files 1072955392
Ffree 659992996
Frsize 32768
Flags 4129
}
Running on /var/lib/origin/openshift.local.volumes/pods/19fb86f1-d9b1-11e5-bb64-005056969894/volumes/kubernetes.io~secret/builder-dockercfg-bsamf-push
Statfs_t {
Type 16914836
Bsize 4096
Blocks 1001444
Bfree 1001443
Bavail 1001443
Files 1001444
Ffree 1001442
Frsize 4096
Flags 4128
}
Running on /var/lib/origin/openshift.local.volumes/pods/19fb86f1-d9b1-11e5-bb64-005056969894/volumes/kubernetes.io~secret/omitted6-source
Statfs_t {
Type 16914836
Bsize 4096
Blocks 1001444
Bfree 1001442
Bavail 1001442
Files 1001444
Ffree 1001441
Frsize 4096
Flags 4128
}
Running on /var/lib/origin/openshift.local.volumes/pods/19fb86f1-d9b1-11e5-bb64-005056969894/volumes/kubernetes.io~secret/builder-token-b7asp6
Statfs_t {
Type 16914836
Bsize 4096
Blocks 1001444
Bfree 1001442
Bavail 1001442
Files 1001444
Ffree 1001441
Frsize 4096
Flags 4128
}
# lvs
# lvs -o +lv_major,lv_minor,lv_kernel_major,lv_kernel_minor,lv_size,seg_count,snap_percent,segtype,stripes,stripesize,chunksize,seg_start,seg_size
LV VG Attr LSize Pool Origin Data% Meta% Move Log Cpy%Sync Convert Maj Min KMaj KMin LSize #Seg Snap% Type #Str Stripe Chunk Start SSize
docker-pool docker-vg twi-a-t--- 12.78g 13.62 1.71 -1 -1 253 8 12.78g 1 thin-pool 1 0 512.00k 0 12.78g
opt vg00 -wi-ao---- 512.00m -1 -1 253 5 512.00m 1 linear 1 0 0 0 512.00m
root vg00 -wi-ao---- 1.00g -1 -1 253 1 1.00g 1 linear 1 0 0 0 1.00g
swap0 vg00 -wi-ao---- 8.00g -1 -1 253 2 8.00g 2 linear 1 0 0 0 4.00g
swap0 vg00 -wi-ao---- 8.00g -1 -1 253 2 8.00g 2 linear 1 0 0 4.00g 4.00g
tmp vg00 -wi-ao---- 4.00g -1 -1 253 4 4.00g 1 linear 1 0 0 0 4.00g
usr vg00 -wi-ao---- 4.00g -1 -1 253 0 4.00g 1 linear 1 0 0 0 4.00g
var vg00 -wi-ao---- 12.00g -1 -1 253 3 12.00g 2 linear 1 0 0 0 4.00g
var vg00 -wi-ao---- 12.00g -1 -1 253 3 12.00g 2 linear 1 0 0 4.00g 8.00g