Input buildinfo: https://buildinfos.debian.net/buildinfo-pool/f/fenicsx-performance-tests/fenicsx-performance-tests_0.0~git20210119.80e82ac-1_amd64.buildinfo Use metasnap for getting required timestamps New buildinfo file: /tmp/fenicsx-performance-tests-0.0~git20210119.80e82ac-1szskzl93/fenicsx-performance-tests_0.0~git20210119.80e82ac-1_amd64.buildinfo Get source package info: fenicsx-performance-tests=0.0~git20210119.80e82ac-1 Source URL: http://snapshot.notset.fr/mr/package/fenicsx-performance-tests/0.0~git20210119.80e82ac-1/srcfiles?fileinfo=1 env -i PATH=/usr/sbin:/usr/bin:/sbin:/bin TMPDIR=/tmp mmdebstrap --arch=amd64 --include=adduser=3.118 autoconf=2.69-14 automake=1:1.16.3-2 autopoint=0.21-4 autotools-dev=20180224.1+nmu1 base-files=11 base-passwd=3.5.49 bash=5.1-2 binutils=2.35.1-7 binutils-common=2.35.1-7 binutils-x86-64-linux-gnu=2.35.1-7 bsdextrautils=2.36.1-7 bsdutils=1:2.36.1-7 build-essential=12.9 bzip2=1.0.8-4 cmake=3.18.4-2 cmake-data=3.18.4-2 coreutils=8.32-4+b1 cpp=4:10.2.1-1 cpp-10=10.2.1-6 dash=0.5.11+git20200708+dd9ef66-5 debconf=1.5.74 debhelper=13.3.3 debianutils=4.11.2 dh-autoreconf=20 dh-strip-nondeterminism=1.11.0-1 diffutils=1:3.7-5 dpkg=1.20.7.1 dpkg-dev=1.20.7.1 dwz=0.13+20210201-1 file=1:5.39-3 findutils=4.8.0-1 fonts-mathjax=2.7.9+dfsg-1 g++=4:10.2.1-1 g++-10=10.2.1-6 gcc=4:10.2.1-1 gcc-10=10.2.1-6 gcc-10-base=10.2.1-6 gettext=0.21-4 gettext-base=0.21-4 gfortran=4:10.2.1-1 gfortran-10=10.2.1-6 grep=3.6-1 groff-base=1.22.4-6 gzip=1.10-2 hostname=3.23 ibverbs-providers=33.1-1 icu-devtools=67.1-6 init-system-helpers=1.60 intltool-debian=0.35.0+20060710.5 libacl1=2.2.53-10 libaec-dev=1.0.4-1 libaec0=1.0.4-1 libamd2=1:5.8.1+dfsg-2 libarchive-zip-perl=1.68-1 libarchive13=3.4.3-2 libarpack2=3.8.0-1 libarpack2-dev=3.8.0-1 libasan6=10.2.1-6 libatomic1=10.2.1-6 libattr1=1:2.4.48-6 libaudit-common=1:3.0-2 libaudit1=1:3.0-2 libbasix-dev=0.0.1~git20210122.4f10ef2-2 libbasix0=0.0.1~git20210122.4f10ef2-2 libbinutils=2.35.1-7 libblas-dev=3.9.0-3 libblas3=3.9.0-3 libblkid1=2.36.1-7 libboost-atomic1.74-dev=1.74.0-8 libboost-atomic1.74.0=1.74.0-8 libboost-chrono-dev=1.74.0.3 libboost-chrono1.74-dev=1.74.0-8 libboost-chrono1.74.0=1.74.0-8 libboost-date-time1.74-dev=1.74.0-8 libboost-date-time1.74.0=1.74.0-8 libboost-dev=1.74.0.3 libboost-filesystem-dev=1.74.0.3 libboost-filesystem1.74-dev=1.74.0-8 libboost-filesystem1.74.0=1.74.0-8 libboost-iostreams-dev=1.74.0.3 libboost-iostreams1.74-dev=1.74.0-8 libboost-iostreams1.74.0=1.74.0-8 libboost-math-dev=1.74.0.3 libboost-math1.74-dev=1.74.0-8 libboost-math1.74.0=1.74.0-8 libboost-mpi-dev=1.74.0.3 libboost-mpi1.74-dev=1.74.0-8 libboost-mpi1.74.0=1.74.0-8 libboost-program-options-dev=1.74.0.3 libboost-program-options1.74-dev=1.74.0-8 libboost-program-options1.74.0=1.74.0-8 libboost-regex1.74-dev=1.74.0-8 libboost-regex1.74.0=1.74.0-8 libboost-serialization-dev=1.74.0.3 libboost-serialization1.74-dev=1.74.0-8 libboost-serialization1.74.0=1.74.0-8 libboost-system1.74-dev=1.74.0-8 libboost-system1.74.0=1.74.0-8 libboost-thread-dev=1.74.0.3 libboost-thread1.74-dev=1.74.0-8 libboost-thread1.74.0=1.74.0-8 libboost-timer-dev=1.74.0.3 libboost-timer1.74-dev=1.74.0-8 libboost-timer1.74.0=1.74.0-8 libboost1.74-dev=1.74.0-8 libbrotli1=1.0.9-2+b2 libbsd0=0.11.3-1 libbtf1=1:5.8.1+dfsg-2 libbz2-1.0=1.0.8-4 libc-bin=2.31-9 libc-dev-bin=2.31-9 libc6=2.31-9 libc6-dev=2.31-9 libcamd2=1:5.8.1+dfsg-2 libcap-ng0=0.7.9-2.2+b1 libcbor0=0.5.0+dfsg-2 libcc1-0=10.2.1-6 libccolamd2=1:5.8.1+dfsg-2 libcholmod3=1:5.8.1+dfsg-2 libcolamd2=1:5.8.1+dfsg-2 libcom-err2=1.46.1-1 libcombblas1.16.0=1.6.2-6 libcrypt-dev=1:4.4.17-1 libcrypt1=1:4.4.17-1 libctf-nobfd0=2.35.1-7 libctf0=2.35.1-7 libcurl4=7.74.0-1 libcxsparse3=1:5.8.1+dfsg-2 libdb5.3=5.3.28+dfsg1-0.8 libdebconfclient0=0.256 libdebhelper-perl=13.3.3 libdolfinx-dev=2019.2.0~git20210130.c14cb0a-3 libdolfinx-real-dev=2019.2.0~git20210130.c14cb0a-3 libdolfinx-real2019.2=2019.2.0~git20210130.c14cb0a-3 libdpkg-perl=1.20.7.1 libedit2=3.1-20191231-2+b1 libeigen3-dev=3.3.9-2 libelf1=0.183-1 libevent-2.1-7=2.1.12-stable-1 libevent-core-2.1-7=2.1.12-stable-1 libevent-dev=2.1.12-stable-1 libevent-extra-2.1-7=2.1.12-stable-1 libevent-openssl-2.1-7=2.1.12-stable-1 libevent-pthreads-2.1-7=2.1.12-stable-1 libexpat1=2.2.10-1 libfabric1=1.11.0-2 libffi7=3.3-5 libfftw3-bin=3.3.8-2 libfftw3-dev=3.3.8-2 libfftw3-double3=3.3.8-2 libfftw3-long3=3.3.8-2 libfftw3-mpi-dev=3.3.8-2 libfftw3-mpi3=3.3.8-2 libfftw3-quad3=3.3.8-2 libfftw3-single3=3.3.8-2 libfido2-1=1.6.0-2 libfile-stripnondeterminism-perl=1.11.0-1 libgcc-10-dev=10.2.1-6 libgcc-s1=10.2.1-6 libgcrypt20=1.8.7-2 libgdbm-compat4=1.19-2 libgdbm6=1.19-2 libgfortran-10-dev=10.2.1-6 libgfortran5=10.2.1-6 libglib2.0-0=2.66.7-1 libgmp10=2:6.2.1+dfsg-1 libgnutls30=3.7.0-6 libgomp1=10.2.1-6 libgpg-error0=1.38-2 libgraphblas3=1:5.8.1+dfsg-2 libgssapi-krb5-2=1.18.3-4 libhdf5-mpi-dev=1.10.6+repack-2 libhdf5-openmpi-103-1=1.10.6+repack-2 libhdf5-openmpi-cpp-103-1=1.10.6+repack-2 libhdf5-openmpi-dev=1.10.6+repack-2 libhdf5-openmpi-fortran-102=1.10.6+repack-2 libhdf5-openmpi-hl-100=1.10.6+repack-2 libhdf5-openmpi-hl-cpp-100=1.10.6+repack-2 libhdf5-openmpi-hl-fortran-100=1.10.6+repack-2 libhogweed6=3.7-2 libhwloc-dev=2.4.1~rc3+dfsg-1 libhwloc-plugins=2.4.1~rc3+dfsg-1 libhwloc15=2.4.1~rc3+dfsg-1 libhypre=2.18.2-1 libhypre-dev=2.18.2-1 libibverbs-dev=33.1-1 libibverbs1=33.1-1 libicu-dev=67.1-6 libicu67=67.1-6 libidn2-0=2.3.0-5 libisl23=0.23-1 libitm1=10.2.1-6 libjpeg-dev=1:2.0.5-2 libjpeg62-turbo=1:2.0.5-2 libjpeg62-turbo-dev=1:2.0.5-2 libjs-jquery=3.5.1+dfsg+~3.5.5-7 libjs-jquery-ui=1.12.1+dfsg-8 libjs-mathjax=2.7.9+dfsg-1 libjsoncpp24=1.9.4-4 libk5crypto3=1.18.3-4 libkeyutils1=1.6.1-2 libklu1=1:5.8.1+dfsg-2 libkrb5-3=1.18.3-4 libkrb5support0=1.18.3-4 liblapack-dev=3.9.0-3 liblapack3=3.9.0-3 libldap-2.4-2=2.4.57+dfsg-1 libldl2=1:5.8.1+dfsg-2 liblsan0=10.2.1-6 libltdl-dev=2.4.6-15 libltdl7=2.4.6-15 liblz4-1=1.9.3-1 liblzma5=5.2.5-1.0 libmagic-mgc=1:5.39-3 libmagic1=1:5.39-3 libmd0=1.0.3-3 libmetis5=5.1.0.dfsg-7 libmongoose2=1:5.8.1+dfsg-2 libmount1=2.36.1-7 libmpc3=1.2.0-1 libmpdec3=2.5.1-1 libmpfr6=4.1.0-3 libmumps-5.3=5.3.5-1 libmumps-dev=5.3.5-1 libmumps-headers-dev=5.3.5-1 libncurses6=6.2+20201114-2 libncursesw6=6.2+20201114-2 libnettle8=3.7-2 libnghttp2-14=1.42.0-1 libnl-3-200=3.4.0-1+b1 libnl-3-dev=3.4.0-1+b1 libnl-route-3-200=3.4.0-1+b1 libnl-route-3-dev=3.4.0-1+b1 libnsl-dev=1.3.0-2 libnsl2=1.3.0-2 libnuma-dev=2.0.12-1+b1 libnuma1=2.0.12-1+b1 libopenmpi-dev=4.1.0-7 libopenmpi3=4.1.0-7 libp11-kit0=0.23.22-1 libpam-modules=1.4.0-4 libpam-modules-bin=1.4.0-4 libpam-runtime=1.4.0-4 libpam0g=1.4.0-4 libparpack2=3.8.0-1 libparpack2-dev=3.8.0-1 libpciaccess0=0.16-1 libpcre2-8-0=10.36-2 libpcre3=2:8.39-13 libperl5.32=5.32.1-2 libpetsc-real3.14=3.14.4+dfsg1-1 libpetsc-real3.14-dev=3.14.4+dfsg1-1 libpetsc3.14-dev-common=3.14.4+dfsg1-1 libpipeline1=1.5.3-1 libpmix-dev=4.0.0-4 libpmix2=4.0.0-4 libprocps8=2:3.3.17-2 libpsl5=0.21.0-1.2 libpsm-infinipath1=3.3+20.604758e7-6.1 libpsm2-2=11.2.185-1 libptscotch-6.1=6.1.0-2 libptscotch-dev=6.1.0-2 libpython3-stdlib=3.9.1-1 libpython3.9-minimal=3.9.1-4 libpython3.9-stdlib=3.9.1-4 libquadmath0=10.2.1-6 librbio2=1:5.8.1+dfsg-2 librdmacm1=33.1-1 libreadline8=8.1-1 librhash0=1.4.1-1 librtmp1=2.4+20151223.gitfa8646d.1-2+b2 libsasl2-2=2.1.27+dfsg-2.1 libsasl2-modules-db=2.1.27+dfsg-2.1 libscalapack-mpi-dev=2.1.0-4 libscalapack-openmpi-dev=2.1.0-4 libscalapack-openmpi2.1=2.1.0-4 libscotch-6.1=6.1.0-2 libscotch-dev=6.1.0-2 libseccomp2=2.5.1-1 libselinux1=3.1-3 libsemanage-common=3.1-1 libsemanage1=3.1-1+b2 libsepol1=3.1-1 libsigsegv2=2.13-1 libslepc-real3.14=3.14.1+dfsg1-1+b1 libslepc-real3.14-dev=3.14.1+dfsg1-1+b1 libsliplu1=1:5.8.1+dfsg-2 libsmartcols1=2.36.1-7 libspqr2=1:5.8.1+dfsg-2 libsqlite3-0=3.34.1-1 libssh2-1=1.9.0-2 libssl-dev=1.1.1i-3 libssl1.1=1.1.1i-3 libstdc++-10-dev=10.2.1-6 libstdc++6=10.2.1-6 libsub-override-perl=0.09-2 libsuitesparse-dev=1:5.8.1+dfsg-2 libsuitesparseconfig5=1:5.8.1+dfsg-2 libsuperlu-dev=5.2.2+dfsg1-2 libsuperlu-dist-dev=6.2.0+dfsg1-3 libsuperlu-dist6=6.2.0+dfsg1-3 libsuperlu5=5.2.2+dfsg1-2 libsystemd0=247.3-1 libsz2=1.0.4-1 libtasn1-6=4.16.0-2 libtinfo6=6.2+20201114-2 libtirpc-common=1.3.1-1 libtirpc-dev=1.3.1-1 libtirpc3=1.3.1-1 libtool=2.4.6-15 libtsan0=10.2.1-6 libubsan1=10.2.1-6 libuchardet0=0.0.7-1 libucx0=1.10.0~rc1-7 libudev1=247.3-1 libumfpack5=1:5.8.1+dfsg-2 libunistring2=0.9.10-4 libuuid1=2.36.1-7 libuv1=1.40.0-1 libx11-6=2:1.7.0-2 libx11-data=2:1.7.0-2 libxau6=1:1.0.9-1 libxcb1=1.14-3 libxdmcp6=1:1.1.2-3 libxext6=2:1.3.3-1.1 libxml2=2.9.10+dfsg-6.3+b1 libxml2-dev=2.9.10+dfsg-6.3+b1 libxnvctrl0=460.32.03-1 libzstd1=1.4.8+dfsg-1 linux-libc-dev=5.10.13-1 login=1:4.8.1-1 lsb-base=11.1.0 m4=1.4.18-5 make=4.3-4 man-db=2.9.4-1 mawk=1.3.4.20200120-2 media-types=4.0.0 mpi-default-bin=1.13 mpi-default-dev=1.13 ncurses-base=6.2+20201114-2 ncurses-bin=6.2+20201114-2 ocl-icd-libopencl1=2.2.14-2 ocl-icd-opencl-dev=2.2.14-2 opencl-c-headers=3.0~2020.12.18-1 opencl-clhpp-headers=3.0~2.0.13-1 openmpi-bin=4.1.0-7 openmpi-common=4.1.0-7 openssh-client=1:8.4p1-3 passwd=1:4.8.1-1 patch=2.7.6-7 perl=5.32.1-2 perl-base=5.32.1-2 perl-modules-5.32=5.32.1-2 pkg-config=0.29.2-1 po-debconf=1.0.21+nmu1 procps=2:3.3.17-2 python3=3.9.1-1 python3-basix=0.0.1~git20210122.4f10ef2-2 python3-cffi=1.14.4-1 python3-cffi-backend=1.14.4-1+b1 python3-ffcx=2019.2.0~git20210122.22ca4c0-2 python3-fiat=2019.2.0~git20210116.0439689-2 python3-minimal=3.9.1-1 python3-mpmath=1.2.0-1 python3-numpy=1:1.19.5-1 python3-pkg-resources=52.0.0-1 python3-ply=3.11-4 python3-pycparser=2.20-3 python3-sympy=1.7.1-2 python3-ufl=2019.2.0~git20200928.057c5f6-1 python3.9=3.9.1-4 python3.9-minimal=3.9.1-4 readline-common=8.1-1 sed=4.7-1 sensible-utils=0.0.14 sysvinit-utils=2.96-5 tar=1.32+dfsg-1 tzdata=2021a-1 util-linux=2.36.1-7 xz-utils=5.2.5-1.0 zlib1g=1:1.2.11.dfsg-2 zlib1g-dev=1:1.2.11.dfsg-2 --variant=apt --aptopt=Acquire::Check-Valid-Until "false" --aptopt=Acquire::http::Dl-Limit "1000"; --aptopt=Acquire::https::Dl-Limit "1000"; --aptopt=Acquire::Retries "5"; --aptopt=APT::Get::allow-downgrades "true"; --keyring=/usr/share/keyrings/ --essential-hook=chroot "$1" sh -c "apt-get --yes install fakeroot util-linux" --essential-hook=copy-in /usr/share/keyrings/debian-archive-bullseye-automatic.gpg /usr/share/keyrings/debian-archive-bullseye-security-automatic.gpg /usr/share/keyrings/debian-archive-bullseye-stable.gpg /usr/share/keyrings/debian-archive-buster-automatic.gpg /usr/share/keyrings/debian-archive-buster-security-automatic.gpg /usr/share/keyrings/debian-archive-buster-stable.gpg /usr/share/keyrings/debian-archive-keyring.gpg /usr/share/keyrings/debian-archive-removed-keys.gpg /usr/share/keyrings/debian-archive-stretch-automatic.gpg /usr/share/keyrings/debian-archive-stretch-security-automatic.gpg /usr/share/keyrings/debian-archive-stretch-stable.gpg /usr/share/keyrings/debian-ports-archive-keyring-removed.gpg /usr/share/keyrings/debian-ports-archive-keyring.gpg /usr/share/keyrings/debian-keyring.gpg /etc/apt/trusted.gpg.d/ --essential-hook=chroot "$1" sh -c "rm /etc/apt/sources.list && echo 'deb http://snapshot.notset.fr/archive/debian/20210814T212851Z/ bookworm main deb-src http://snapshot.notset.fr/archive/debian/20210814T212851Z/ bookworm main deb http://snapshot.notset.fr/archive/debian/20210212T144947Z/ unstable main' >> /etc/apt/sources.list && apt-get update" --customize-hook=chroot "$1" useradd --no-create-home -d /nonexistent -p "" builduser -s /bin/bash --customize-hook=chroot "$1" env sh -c "apt-get source --only-source -d fenicsx-performance-tests=0.0~git20210119.80e82ac-1 && mkdir -p /build/fenicsx-performance-tests-494eAj && dpkg-source --no-check -x /*.dsc /build/fenicsx-performance-tests-494eAj/fenicsx-performance-tests-0.0-git20210119.80e82ac && chown -R builduser:builduser /build/fenicsx-performance-tests-494eAj" --customize-hook=chroot "$1" env --unset=TMPDIR runuser builduser -c "cd /build/fenicsx-performance-tests-494eAj/fenicsx-performance-tests-0.0-git20210119.80e82ac && env DEB_BUILD_OPTIONS="parallel=4" LC_ALL="C.UTF-8" SOURCE_DATE_EPOCH="1613144121" dpkg-buildpackage -uc -a amd64 --build=any" --customize-hook=sync-out /build/fenicsx-performance-tests-494eAj /tmp/fenicsx-performance-tests-0.0~git20210119.80e82ac-1szskzl93 bullseye /dev/null deb http://snapshot.notset.fr/archive/debian/20210212T144947Z unstable main I: automatically chosen mode: root I: chroot architecture amd64 is equal to the host's architecture I: automatically chosen format: tar I: using /tmp/mmdebstrap.mPaZJNMT34 as tempdir I: running apt-get update... I: downloading packages with apt... I: extracting archives... I: installing essential packages... I: running --essential-hook in shell: sh -c 'chroot "$1" sh -c "apt-get --yes install fakeroot util-linux"' exec /tmp/mmdebstrap.mPaZJNMT34 Reading package lists... Building dependency tree... util-linux is already the newest version (2.36.1-7). The following NEW packages will be installed: fakeroot libfakeroot 0 upgraded, 2 newly installed, 0 to remove and 0 not upgraded. Need to get 134 kB of archives. After this operation, 397 kB of additional disk space will be used. Get:1 http://snapshot.notset.fr/archive/debian/20210212T144947Z unstable/main amd64 libfakeroot amd64 1.25.3-1.1 [47.0 kB] Get:2 http://snapshot.notset.fr/archive/debian/20210212T144947Z unstable/main amd64 fakeroot amd64 1.25.3-1.1 [87.0 kB] debconf: delaying package configuration, since apt-utils is not installed Fetched 134 kB in 0s (505 kB/s) Selecting previously unselected package libfakeroot:amd64. (Reading database ... (Reading database ... 5% (Reading database ... 10% (Reading database ... 15% (Reading database ... 20% (Reading database ... 25% (Reading database ... 30% (Reading database ... 35% (Reading database ... 40% (Reading database ... 45% (Reading database ... 50% (Reading database ... 55% (Reading database ... 60% (Reading database ... 65% (Reading database ... 70% (Reading database ... 75% (Reading database ... 80% (Reading database ... 85% (Reading database ... 90% (Reading database ... 95% (Reading database ... 100% (Reading database ... 4662 files and directories currently installed.) Preparing to unpack .../libfakeroot_1.25.3-1.1_amd64.deb ... Unpacking libfakeroot:amd64 (1.25.3-1.1) ... Selecting previously unselected package fakeroot. Preparing to unpack .../fakeroot_1.25.3-1.1_amd64.deb ... Unpacking fakeroot (1.25.3-1.1) ... Setting up libfakeroot:amd64 (1.25.3-1.1) ... Setting up fakeroot (1.25.3-1.1) ... update-alternatives: using /usr/bin/fakeroot-sysv to provide /usr/bin/fakeroot (fakeroot) in auto mode Processing triggers for libc-bin (2.31-9) ... I: running special hook: copy-in /usr/share/keyrings/debian-archive-bullseye-automatic.gpg /usr/share/keyrings/debian-archive-bullseye-security-automatic.gpg /usr/share/keyrings/debian-archive-bullseye-stable.gpg /usr/share/keyrings/debian-archive-buster-automatic.gpg /usr/share/keyrings/debian-archive-buster-security-automatic.gpg /usr/share/keyrings/debian-archive-buster-stable.gpg /usr/share/keyrings/debian-archive-keyring.gpg /usr/share/keyrings/debian-archive-removed-keys.gpg /usr/share/keyrings/debian-archive-stretch-automatic.gpg /usr/share/keyrings/debian-archive-stretch-security-automatic.gpg /usr/share/keyrings/debian-archive-stretch-stable.gpg /usr/share/keyrings/debian-ports-archive-keyring-removed.gpg /usr/share/keyrings/debian-ports-archive-keyring.gpg /usr/share/keyrings/debian-keyring.gpg /etc/apt/trusted.gpg.d/ I: running --essential-hook in shell: sh -c 'chroot "$1" sh -c "rm /etc/apt/sources.list && echo 'deb http://snapshot.notset.fr/archive/debian/20210814T212851Z/ bookworm main deb-src http://snapshot.notset.fr/archive/debian/20210814T212851Z/ bookworm main deb http://snapshot.notset.fr/archive/debian/20210212T144947Z/ unstable main' >> /etc/apt/sources.list && apt-get update"' exec /tmp/mmdebstrap.mPaZJNMT34 Get:1 http://snapshot.notset.fr/archive/debian/20210814T212851Z bookworm InRelease [81.6 kB] Hit:2 http://snapshot.notset.fr/archive/debian/20210212T144947Z unstable InRelease Ign:3 http://snapshot.notset.fr/archive/debian/20210814T212851Z bookworm/main Sources Ign:4 http://snapshot.notset.fr/archive/debian/20210814T212851Z bookworm/main amd64 Packages Ign:3 http://snapshot.notset.fr/archive/debian/20210814T212851Z bookworm/main Sources Ign:4 http://snapshot.notset.fr/archive/debian/20210814T212851Z bookworm/main amd64 Packages Ign:3 http://snapshot.notset.fr/archive/debian/20210814T212851Z bookworm/main Sources Ign:4 http://snapshot.notset.fr/archive/debian/20210814T212851Z bookworm/main amd64 Packages Get:3 http://snapshot.notset.fr/archive/debian/20210814T212851Z bookworm/main Sources [11.4 MB] Get:4 http://snapshot.notset.fr/archive/debian/20210814T212851Z bookworm/main amd64 Packages [11.1 MB] Fetched 22.6 MB in 21s (1075 kB/s) Reading package lists... I: installing remaining packages inside the chroot... I: running --customize-hook in shell: sh -c 'chroot "$1" useradd --no-create-home -d /nonexistent -p "" builduser -s /bin/bash' exec /tmp/mmdebstrap.mPaZJNMT34 I: running --customize-hook in shell: sh -c 'chroot "$1" env sh -c "apt-get source --only-source -d fenicsx-performance-tests=0.0~git20210119.80e82ac-1 && mkdir -p /build/fenicsx-performance-tests-494eAj && dpkg-source --no-check -x /*.dsc /build/fenicsx-performance-tests-494eAj/fenicsx-performance-tests-0.0-git20210119.80e82ac && chown -R builduser:builduser /build/fenicsx-performance-tests-494eAj"' exec /tmp/mmdebstrap.mPaZJNMT34 Reading package lists... NOTICE: 'fenicsx-performance-tests' packaging is maintained in the 'Git' version control system at: https://salsa.debian.org/science-team/fenics/fenicsx-performance-tests.git Please use: git clone https://salsa.debian.org/science-team/fenics/fenicsx-performance-tests.git to retrieve the latest (possibly unreleased) updates to the package. Need to get 21.0 kB of source archives. Get:1 http://snapshot.notset.fr/archive/debian/20210814T212851Z bookworm/main fenicsx-performance-tests 0.0~git20210119.80e82ac-1 (dsc) [2515 B] Get:2 http://snapshot.notset.fr/archive/debian/20210814T212851Z bookworm/main fenicsx-performance-tests 0.0~git20210119.80e82ac-1 (tar) [13.8 kB] Get:3 http://snapshot.notset.fr/archive/debian/20210814T212851Z bookworm/main fenicsx-performance-tests 0.0~git20210119.80e82ac-1 (diff) [4704 B] Fetched 21.0 kB in 0s (113 kB/s) Download complete and in download only mode W: Download is performed unsandboxed as root as file 'fenicsx-performance-tests_0.0~git20210119.80e82ac-1.dsc' couldn't be accessed by user '_apt'. - pkgAcquire::Run (13: Permission denied) dpkg-source: info: extracting fenicsx-performance-tests in /build/fenicsx-performance-tests-494eAj/fenicsx-performance-tests-0.0-git20210119.80e82ac dpkg-source: info: unpacking fenicsx-performance-tests_0.0~git20210119.80e82ac.orig.tar.gz dpkg-source: info: unpacking fenicsx-performance-tests_0.0~git20210119.80e82ac-1.debian.tar.xz I: running --customize-hook in shell: sh -c 'chroot "$1" env --unset=TMPDIR runuser builduser -c "cd /build/fenicsx-performance-tests-494eAj/fenicsx-performance-tests-0.0-git20210119.80e82ac && env DEB_BUILD_OPTIONS="parallel=4" LC_ALL="C.UTF-8" SOURCE_DATE_EPOCH="1613144121" dpkg-buildpackage -uc -a amd64 --build=any"' exec /tmp/mmdebstrap.mPaZJNMT34 dpkg-buildpackage: info: source package fenicsx-performance-tests dpkg-buildpackage: info: source version 0.0~git20210119.80e82ac-1 dpkg-buildpackage: info: source distribution unstable dpkg-buildpackage: info: source changed by Drew Parsons dpkg-source --before-build . dpkg-buildpackage: info: host architecture amd64 debian/rules clean dh clean --buildsystem=cmake --sourcedirectory=src dh_auto_clean -O--buildsystem=cmake -O--sourcedirectory=src dh_autoreconf_clean -O--buildsystem=cmake -O--sourcedirectory=src dh_clean -O--buildsystem=cmake -O--sourcedirectory=src debian/rules binary-arch dh binary-arch --buildsystem=cmake --sourcedirectory=src dh_update_autotools_config -a -O--buildsystem=cmake -O--sourcedirectory=src dh_autoreconf -a -O--buildsystem=cmake -O--sourcedirectory=src dh_auto_configure -a -O--buildsystem=cmake -O--sourcedirectory=src cd obj-x86_64-linux-gnu && cmake -DCMAKE_INSTALL_PREFIX=/usr -DCMAKE_BUILD_TYPE=None -DCMAKE_INSTALL_SYSCONFDIR=/etc -DCMAKE_INSTALL_LOCALSTATEDIR=/var -DCMAKE_EXPORT_NO_PACKAGE_REGISTRY=ON -DCMAKE_FIND_PACKAGE_NO_PACKAGE_REGISTRY=ON -DCMAKE_INSTALL_RUNSTATEDIR=/run -DCMAKE_SKIP_INSTALL_ALL_DEPENDENCY=ON "-GUnix Makefiles" -DCMAKE_VERBOSE_MAKEFILE=ON -DCMAKE_INSTALL_LIBDIR=lib/x86_64-linux-gnu ../src -- The C compiler identification is GNU 10.2.1 -- The CXX compiler identification is GNU 10.2.1 -- Detecting C compiler ABI info -- Detecting C compiler ABI info - done -- Check for working C compiler: /usr/bin/cc - skipped -- Detecting C compile features -- Detecting C compile features - done -- Detecting CXX compiler ABI info -- Detecting CXX compiler ABI info - done -- Check for working CXX compiler: /usr/bin/c++ - skipped -- Detecting CXX compile features -- Detecting CXX compile features - done -- Found MPI_C: /usr/lib/x86_64-linux-gnu/openmpi/lib/libmpi.so (found version "3.1") -- Found MPI_CXX: /usr/lib/x86_64-linux-gnu/openmpi/lib/libmpi_cxx.so (found version "3.1") -- Found MPI: TRUE (found version "3.1") -- Found Boost 1.74.0 at /usr/lib/x86_64-linux-gnu/cmake/Boost-1.74.0 -- Requested configuration: QUIET REQUIRED COMPONENTS timer;filesystem -- Found boost_headers 1.74.0 at /usr/lib/x86_64-linux-gnu/cmake/boost_headers-1.74.0 -- Found boost_timer 1.74.0 at /usr/lib/x86_64-linux-gnu/cmake/boost_timer-1.74.0 -- [x] libboost_timer.so.1.74.0 -- [ ] libboost_timer.a -- Adding boost_timer dependencies: chrono;headers -- Found boost_chrono 1.74.0 at /usr/lib/x86_64-linux-gnu/cmake/boost_chrono-1.74.0 -- [x] libboost_chrono.so.1.74.0 -- [ ] libboost_chrono.a -- Adding boost_chrono dependencies: headers -- Found boost_filesystem 1.74.0 at /usr/lib/x86_64-linux-gnu/cmake/boost_filesystem-1.74.0 -- [x] libboost_filesystem.so.1.74.0 -- [ ] libboost_filesystem.a -- Adding boost_filesystem dependencies: headers -- Found Boost: /usr/lib/x86_64-linux-gnu/cmake/Boost-1.74.0/BoostConfig.cmake (found suitable version "1.74.0", minimum required is "1.70") found components: timer filesystem -- Found PkgConfig: /usr/bin/pkg-config (found version "0.29.2") -- Checking for one of the modules 'craypetsc_real;petsc;PETSc' -- Looking for sys/types.h CMake Warning (dev) at /usr/share/cmake-3.18/Modules/CheckIncludeFile.cmake:80 (message): Policy CMP0075 is not set: Include file check macros honor CMAKE_REQUIRED_LIBRARIES. Run "cmake --help-policy CMP0075" for policy details. Use the cmake_policy command to set the policy and suppress this warning. CMAKE_REQUIRED_LIBRARIES is set to: /usr/lib/x86_64-linux-gnu/openmpi/lib/libmpi.so For compatibility with CMake 3.11 and below this check is ignoring it. Call Stack (most recent call first): /usr/share/cmake-3.18/Modules/CheckTypeSize.cmake:230 (check_include_file) /usr/share/dolfinx/cmake/FindPETSc.cmake:222 (check_type_size) /usr/share/cmake-3.18/Modules/CMakeFindDependencyMacro.cmake:47 (find_package) /usr/share/dolfinx/cmake/DOLFINXConfig.cmake:61 (find_dependency) CMakeLists.txt:13 (find_package) This warning is for project developers. Use -Wno-dev to suppress it. -- Looking for sys/types.h - found -- Looking for stdint.h -- Looking for stdint.h - found -- Looking for stddef.h -- Looking for stddef.h - found -- Check size of PetscInt -- Check size of PetscInt - done -- Looking for PETSC_USE_COMPLEX -- Looking for PETSC_USE_COMPLEX - not found -- Checking for one of the modules 'crayslepc_real;slepc;SLEPc' -- Found Boost 1.74.0 at /usr/lib/x86_64-linux-gnu/cmake/Boost-1.74.0 -- Requested configuration: QUIET REQUIRED COMPONENTS program_options -- Found boost_program_options 1.74.0 at /usr/lib/x86_64-linux-gnu/cmake/boost_program_options-1.74.0 -- [x] libboost_program_options.so.1.74.0 -- [ ] libboost_program_options.a -- Adding boost_program_options dependencies: headers -- Found Boost: /usr/lib/x86_64-linux-gnu/cmake/Boost-1.74.0/BoostConfig.cmake (found suitable version "1.74.0", minimum required is "1.70") found components: program_options -- Configuring done -- Generating done CMake Warning: Manually-specified variables were not used by the project: CMAKE_EXPORT_NO_PACKAGE_REGISTRY CMAKE_INSTALL_LIBDIR CMAKE_INSTALL_LOCALSTATEDIR CMAKE_INSTALL_RUNSTATEDIR CMAKE_INSTALL_SYSCONFDIR -- Build files have been written to: /build/fenicsx-performance-tests-494eAj/fenicsx-performance-tests-0.0-git20210119.80e82ac/obj-x86_64-linux-gnu dh_auto_build -a -O--buildsystem=cmake -O--sourcedirectory=src cd obj-x86_64-linux-gnu && make -j4 "INSTALL=install --strip-program=true" VERBOSE=1 make[1]: Entering directory '/build/fenicsx-performance-tests-494eAj/fenicsx-performance-tests-0.0-git20210119.80e82ac/obj-x86_64-linux-gnu' /usr/bin/cmake -S/build/fenicsx-performance-tests-494eAj/fenicsx-performance-tests-0.0-git20210119.80e82ac/src -B/build/fenicsx-performance-tests-494eAj/fenicsx-performance-tests-0.0-git20210119.80e82ac/obj-x86_64-linux-gnu --check-build-system CMakeFiles/Makefile.cmake 0 /usr/bin/cmake -E cmake_progress_start /build/fenicsx-performance-tests-494eAj/fenicsx-performance-tests-0.0-git20210119.80e82ac/obj-x86_64-linux-gnu/CMakeFiles /build/fenicsx-performance-tests-494eAj/fenicsx-performance-tests-0.0-git20210119.80e82ac/obj-x86_64-linux-gnu//CMakeFiles/progress.marks make -f CMakeFiles/Makefile2 all make[2]: Entering directory '/build/fenicsx-performance-tests-494eAj/fenicsx-performance-tests-0.0-git20210119.80e82ac/obj-x86_64-linux-gnu' make -f CMakeFiles/dolfinx-scaling-test.dir/build.make CMakeFiles/dolfinx-scaling-test.dir/depend make[3]: Entering directory '/build/fenicsx-performance-tests-494eAj/fenicsx-performance-tests-0.0-git20210119.80e82ac/obj-x86_64-linux-gnu' [ 11%] Generating Elasticity.c ffcx /build/fenicsx-performance-tests-494eAj/fenicsx-performance-tests-0.0-git20210119.80e82ac/src/Elasticity.ufl [ 22%] Generating Poisson.c ffcx /build/fenicsx-performance-tests-494eAj/fenicsx-performance-tests-0.0-git20210119.80e82ac/src/Poisson.ufl cd /build/fenicsx-performance-tests-494eAj/fenicsx-performance-tests-0.0-git20210119.80e82ac/obj-x86_64-linux-gnu && /usr/bin/cmake -E cmake_depends "Unix Makefiles" /build/fenicsx-performance-tests-494eAj/fenicsx-performance-tests-0.0-git20210119.80e82ac/src /build/fenicsx-performance-tests-494eAj/fenicsx-performance-tests-0.0-git20210119.80e82ac/src /build/fenicsx-performance-tests-494eAj/fenicsx-performance-tests-0.0-git20210119.80e82ac/obj-x86_64-linux-gnu /build/fenicsx-performance-tests-494eAj/fenicsx-performance-tests-0.0-git20210119.80e82ac/obj-x86_64-linux-gnu /build/fenicsx-performance-tests-494eAj/fenicsx-performance-tests-0.0-git20210119.80e82ac/obj-x86_64-linux-gnu/CMakeFiles/dolfinx-scaling-test.dir/DependInfo.cmake --color= Dependee "/build/fenicsx-performance-tests-494eAj/fenicsx-performance-tests-0.0-git20210119.80e82ac/obj-x86_64-linux-gnu/CMakeFiles/dolfinx-scaling-test.dir/DependInfo.cmake" is newer than depender "/build/fenicsx-performance-tests-494eAj/fenicsx-performance-tests-0.0-git20210119.80e82ac/obj-x86_64-linux-gnu/CMakeFiles/dolfinx-scaling-test.dir/depend.internal". Dependee "/build/fenicsx-performance-tests-494eAj/fenicsx-performance-tests-0.0-git20210119.80e82ac/obj-x86_64-linux-gnu/CMakeFiles/CMakeDirectoryInformation.cmake" is newer than depender "/build/fenicsx-performance-tests-494eAj/fenicsx-performance-tests-0.0-git20210119.80e82ac/obj-x86_64-linux-gnu/CMakeFiles/dolfinx-scaling-test.dir/depend.internal". Scanning dependencies of target dolfinx-scaling-test make[3]: Leaving directory '/build/fenicsx-performance-tests-494eAj/fenicsx-performance-tests-0.0-git20210119.80e82ac/obj-x86_64-linux-gnu' make -f CMakeFiles/dolfinx-scaling-test.dir/build.make CMakeFiles/dolfinx-scaling-test.dir/build make[3]: Entering directory '/build/fenicsx-performance-tests-494eAj/fenicsx-performance-tests-0.0-git20210119.80e82ac/obj-x86_64-linux-gnu' [ 33%] Building CXX object CMakeFiles/dolfinx-scaling-test.dir/main.cpp.o /usr/bin/c++ -DBOOST_ALL_NO_LIB -DBOOST_CHRONO_DYN_LINK -DBOOST_PROGRAM_OPTIONS_DYN_LINK -DBOOST_TIMER_DYN_LINK -DDOLFINX_VERSION=\"2019.2.9.99\" -DEIGEN_MAX_ALIGN_BYTES=32 -DHAS_SLEPC -I/build/fenicsx-performance-tests-494eAj/fenicsx-performance-tests-0.0-git20210119.80e82ac/obj-x86_64-linux-gnu -I/build/fenicsx-performance-tests-494eAj/fenicsx-performance-tests-0.0-git20210119.80e82ac/src -isystem /usr/lib/python3/dist-packages/ffcx/codegeneration -isystem /usr/include/eigen3 -isystem /usr/include/hdf5/openmpi -isystem /usr/lib/x86_64-linux-gnu/openmpi/include/openmpi -isystem /usr/lib/x86_64-linux-gnu/openmpi/include -isystem /usr/lib/petscdir/petsc3.14/x86_64-linux-gnu-real/include -isystem /usr/lib/slepcdir/slepc3.14/x86_64-linux-gnu-real/include -g -O2 -ffile-prefix-map=/build/fenicsx-performance-tests-494eAj/fenicsx-performance-tests-0.0-git20210119.80e82ac=. -fstack-protector-strong -Wformat -Werror=format-security -Wdate-time -D_FORTIFY_SOURCE=2 -O3 -DNDEBUG -pthread -std=c++17 -o CMakeFiles/dolfinx-scaling-test.dir/main.cpp.o -c /build/fenicsx-performance-tests-494eAj/fenicsx-performance-tests-0.0-git20210119.80e82ac/src/main.cpp [ 55%] Building CXX object CMakeFiles/dolfinx-scaling-test.dir/elasticity_problem.cpp.o [ 55%] Building CXX object CMakeFiles/dolfinx-scaling-test.dir/poisson_problem.cpp.o /usr/bin/c++ -DBOOST_ALL_NO_LIB -DBOOST_CHRONO_DYN_LINK -DBOOST_PROGRAM_OPTIONS_DYN_LINK -DBOOST_TIMER_DYN_LINK -DDOLFINX_VERSION=\"2019.2.9.99\" -DEIGEN_MAX_ALIGN_BYTES=32 -DHAS_SLEPC -I/build/fenicsx-performance-tests-494eAj/fenicsx-performance-tests-0.0-git20210119.80e82ac/obj-x86_64-linux-gnu -I/build/fenicsx-performance-tests-494eAj/fenicsx-performance-tests-0.0-git20210119.80e82ac/src -isystem /usr/lib/python3/dist-packages/ffcx/codegeneration -isystem /usr/include/eigen3 -isystem /usr/include/hdf5/openmpi -isystem /usr/lib/x86_64-linux-gnu/openmpi/include/openmpi -isystem /usr/lib/x86_64-linux-gnu/openmpi/include -isystem /usr/lib/petscdir/petsc3.14/x86_64-linux-gnu-real/include -isystem /usr/lib/slepcdir/slepc3.14/x86_64-linux-gnu-real/include -g -O2 -ffile-prefix-map=/build/fenicsx-performance-tests-494eAj/fenicsx-performance-tests-0.0-git20210119.80e82ac=. -fstack-protector-strong -Wformat -Werror=format-security -Wdate-time -D_FORTIFY_SOURCE=2 -O3 -DNDEBUG -pthread -std=c++17 -o CMakeFiles/dolfinx-scaling-test.dir/elasticity_problem.cpp.o -c /build/fenicsx-performance-tests-494eAj/fenicsx-performance-tests-0.0-git20210119.80e82ac/src/elasticity_problem.cpp /usr/bin/c++ -DBOOST_ALL_NO_LIB -DBOOST_CHRONO_DYN_LINK -DBOOST_PROGRAM_OPTIONS_DYN_LINK -DBOOST_TIMER_DYN_LINK -DDOLFINX_VERSION=\"2019.2.9.99\" -DEIGEN_MAX_ALIGN_BYTES=32 -DHAS_SLEPC -I/build/fenicsx-performance-tests-494eAj/fenicsx-performance-tests-0.0-git20210119.80e82ac/obj-x86_64-linux-gnu -I/build/fenicsx-performance-tests-494eAj/fenicsx-performance-tests-0.0-git20210119.80e82ac/src -isystem /usr/lib/python3/dist-packages/ffcx/codegeneration -isystem /usr/include/eigen3 -isystem /usr/include/hdf5/openmpi -isystem /usr/lib/x86_64-linux-gnu/openmpi/include/openmpi -isystem /usr/lib/x86_64-linux-gnu/openmpi/include -isystem /usr/lib/petscdir/petsc3.14/x86_64-linux-gnu-real/include -isystem /usr/lib/slepcdir/slepc3.14/x86_64-linux-gnu-real/include -g -O2 -ffile-prefix-map=/build/fenicsx-performance-tests-494eAj/fenicsx-performance-tests-0.0-git20210119.80e82ac=. -fstack-protector-strong -Wformat -Werror=format-security -Wdate-time -D_FORTIFY_SOURCE=2 -O3 -DNDEBUG -pthread -std=c++17 -o CMakeFiles/dolfinx-scaling-test.dir/poisson_problem.cpp.o -c /build/fenicsx-performance-tests-494eAj/fenicsx-performance-tests-0.0-git20210119.80e82ac/src/poisson_problem.cpp [ 66%] Building CXX object CMakeFiles/dolfinx-scaling-test.dir/mesh.cpp.o /usr/bin/c++ -DBOOST_ALL_NO_LIB -DBOOST_CHRONO_DYN_LINK -DBOOST_PROGRAM_OPTIONS_DYN_LINK -DBOOST_TIMER_DYN_LINK -DDOLFINX_VERSION=\"2019.2.9.99\" -DEIGEN_MAX_ALIGN_BYTES=32 -DHAS_SLEPC -I/build/fenicsx-performance-tests-494eAj/fenicsx-performance-tests-0.0-git20210119.80e82ac/obj-x86_64-linux-gnu -I/build/fenicsx-performance-tests-494eAj/fenicsx-performance-tests-0.0-git20210119.80e82ac/src -isystem /usr/lib/python3/dist-packages/ffcx/codegeneration -isystem /usr/include/eigen3 -isystem /usr/include/hdf5/openmpi -isystem /usr/lib/x86_64-linux-gnu/openmpi/include/openmpi -isystem /usr/lib/x86_64-linux-gnu/openmpi/include -isystem /usr/lib/petscdir/petsc3.14/x86_64-linux-gnu-real/include -isystem /usr/lib/slepcdir/slepc3.14/x86_64-linux-gnu-real/include -g -O2 -ffile-prefix-map=/build/fenicsx-performance-tests-494eAj/fenicsx-performance-tests-0.0-git20210119.80e82ac=. -fstack-protector-strong -Wformat -Werror=format-security -Wdate-time -D_FORTIFY_SOURCE=2 -O3 -DNDEBUG -pthread -std=c++17 -o CMakeFiles/dolfinx-scaling-test.dir/mesh.cpp.o -c /build/fenicsx-performance-tests-494eAj/fenicsx-performance-tests-0.0-git20210119.80e82ac/src/mesh.cpp [ 77%] Building C object CMakeFiles/dolfinx-scaling-test.dir/Elasticity.c.o /usr/bin/cc -DBOOST_ALL_NO_LIB -DBOOST_CHRONO_DYN_LINK -DBOOST_PROGRAM_OPTIONS_DYN_LINK -DBOOST_TIMER_DYN_LINK -DDOLFINX_VERSION=\"2019.2.9.99\" -DEIGEN_MAX_ALIGN_BYTES=32 -DHAS_SLEPC -I/build/fenicsx-performance-tests-494eAj/fenicsx-performance-tests-0.0-git20210119.80e82ac/obj-x86_64-linux-gnu -I/build/fenicsx-performance-tests-494eAj/fenicsx-performance-tests-0.0-git20210119.80e82ac/src -isystem /usr/lib/python3/dist-packages/ffcx/codegeneration -isystem /usr/include/eigen3 -isystem /usr/include/hdf5/openmpi -isystem /usr/lib/x86_64-linux-gnu/openmpi/include/openmpi -isystem /usr/lib/x86_64-linux-gnu/openmpi/include -isystem /usr/lib/petscdir/petsc3.14/x86_64-linux-gnu-real/include -isystem /usr/lib/slepcdir/slepc3.14/x86_64-linux-gnu-real/include -g -O2 -ffile-prefix-map=/build/fenicsx-performance-tests-494eAj/fenicsx-performance-tests-0.0-git20210119.80e82ac=. -fstack-protector-strong -Wformat -Werror=format-security -Wdate-time -D_FORTIFY_SOURCE=2 -O3 -DNDEBUG -pthread -o CMakeFiles/dolfinx-scaling-test.dir/Elasticity.c.o -c /build/fenicsx-performance-tests-494eAj/fenicsx-performance-tests-0.0-git20210119.80e82ac/obj-x86_64-linux-gnu/Elasticity.c [ 88%] Building C object CMakeFiles/dolfinx-scaling-test.dir/Poisson.c.o /usr/bin/cc -DBOOST_ALL_NO_LIB -DBOOST_CHRONO_DYN_LINK -DBOOST_PROGRAM_OPTIONS_DYN_LINK -DBOOST_TIMER_DYN_LINK -DDOLFINX_VERSION=\"2019.2.9.99\" -DEIGEN_MAX_ALIGN_BYTES=32 -DHAS_SLEPC -I/build/fenicsx-performance-tests-494eAj/fenicsx-performance-tests-0.0-git20210119.80e82ac/obj-x86_64-linux-gnu -I/build/fenicsx-performance-tests-494eAj/fenicsx-performance-tests-0.0-git20210119.80e82ac/src -isystem /usr/lib/python3/dist-packages/ffcx/codegeneration -isystem /usr/include/eigen3 -isystem /usr/include/hdf5/openmpi -isystem /usr/lib/x86_64-linux-gnu/openmpi/include/openmpi -isystem /usr/lib/x86_64-linux-gnu/openmpi/include -isystem /usr/lib/petscdir/petsc3.14/x86_64-linux-gnu-real/include -isystem /usr/lib/slepcdir/slepc3.14/x86_64-linux-gnu-real/include -g -O2 -ffile-prefix-map=/build/fenicsx-performance-tests-494eAj/fenicsx-performance-tests-0.0-git20210119.80e82ac=. -fstack-protector-strong -Wformat -Werror=format-security -Wdate-time -D_FORTIFY_SOURCE=2 -O3 -DNDEBUG -pthread -o CMakeFiles/dolfinx-scaling-test.dir/Poisson.c.o -c /build/fenicsx-performance-tests-494eAj/fenicsx-performance-tests-0.0-git20210119.80e82ac/obj-x86_64-linux-gnu/Poisson.c [100%] Linking CXX executable dolfinx-scaling-test /usr/bin/cmake -E cmake_link_script CMakeFiles/dolfinx-scaling-test.dir/link.txt --verbose=1 /usr/bin/c++ -g -O2 -ffile-prefix-map=/build/fenicsx-performance-tests-494eAj/fenicsx-performance-tests-0.0-git20210119.80e82ac=. -fstack-protector-strong -Wformat -Werror=format-security -Wdate-time -D_FORTIFY_SOURCE=2 -O3 -DNDEBUG -Wl,-z,relro -pthread CMakeFiles/dolfinx-scaling-test.dir/main.cpp.o CMakeFiles/dolfinx-scaling-test.dir/mesh.cpp.o CMakeFiles/dolfinx-scaling-test.dir/elasticity_problem.cpp.o CMakeFiles/dolfinx-scaling-test.dir/poisson_problem.cpp.o CMakeFiles/dolfinx-scaling-test.dir/Elasticity.c.o CMakeFiles/dolfinx-scaling-test.dir/Poisson.c.o -o dolfinx-scaling-test -Wl,-rpath,/usr/lib/x86_64-linux-gnu/openmpi/lib:/usr/lib/x86_64-linux-gnu/hdf5/openmpi:/usr/lib/slepcdir/slepc3.14/x86_64-linux-gnu-real/lib:/usr/lib/petscdir/petsc3.14/x86_64-linux-gnu-real/lib: /usr/lib/x86_64-linux-gnu/libdolfinx_real.so.2019.2.9.99 /usr/lib/x86_64-linux-gnu/libboost_program_options.so.1.74.0 /usr/lib/x86_64-linux-gnu/libbasix.so /usr/lib/x86_64-linux-gnu/libboost_timer.so.1.74.0 /usr/lib/x86_64-linux-gnu/libboost_chrono.so.1.74.0 /usr/lib/x86_64-linux-gnu/openmpi/lib/libmpi_cxx.so /usr/lib/x86_64-linux-gnu/openmpi/lib/libmpi.so /usr/lib/x86_64-linux-gnu/hdf5/openmpi/libhdf5.so /usr/lib/x86_64-linux-gnu/libsz.so /usr/lib/x86_64-linux-gnu/libz.so /usr/lib/x86_64-linux-gnu/libdl.so -lm /usr/lib/slepcdir/slepc3.14/x86_64-linux-gnu-real/lib/libslepc_real.so /usr/lib/petscdir/petsc3.14/x86_64-linux-gnu-real/lib/libpetsc_real.so make[3]: Leaving directory '/build/fenicsx-performance-tests-494eAj/fenicsx-performance-tests-0.0-git20210119.80e82ac/obj-x86_64-linux-gnu' [100%] Built target dolfinx-scaling-test make[2]: Leaving directory '/build/fenicsx-performance-tests-494eAj/fenicsx-performance-tests-0.0-git20210119.80e82ac/obj-x86_64-linux-gnu' /usr/bin/cmake -E cmake_progress_start /build/fenicsx-performance-tests-494eAj/fenicsx-performance-tests-0.0-git20210119.80e82ac/obj-x86_64-linux-gnu/CMakeFiles 0 make[1]: Leaving directory '/build/fenicsx-performance-tests-494eAj/fenicsx-performance-tests-0.0-git20210119.80e82ac/obj-x86_64-linux-gnu' debian/rules override_dh_auto_test make[1]: Entering directory '/build/fenicsx-performance-tests-494eAj/fenicsx-performance-tests-0.0-git20210119.80e82ac' debian/tests/run-dolfinx-scaling-test -p /build/fenicsx-performance-tests-494eAj/fenicsx-performance-tests-0.0-git20210119.80e82ac/obj-x86_64-linux-gnu ==== elasticity weak scaling (8 processes with ndofs=10000) ==== UnitCube (29x26x32) to be refined 0 times ---------------------------------------------------------------- Test problem summary dolfinx version: 2019.2.9.99 dolfinx hash: unknown ufl hash: ac78d56bf5c7fcc5585cc7f0654188e75f1c73b1 petsc version: Petsc Release Version 3.14.4, Feb 03, 2021 Problem type: elasticity Scaling type: weak Num processes: 8 Num cells 144768 Total degrees of freedom: 80190 Average degrees of freedom per process: 10023 ---------------------------------------------------------------- KSP Object: 8 MPI processes type: cg maximum iterations=10000, initial guess is zero tolerances: relative=1e-08, absolute=1e-50, divergence=10000. left preconditioning using PRECONDITIONED norm type for convergence test PC Object: 8 MPI processes type: gamg type is MULTIPLICATIVE, levels=3 cycles=v Cycles per PCApply=1 Using externally compute Galerkin coarse grid matrices GAMG specific options Threshold for dropping small values in graph on each level = 0. Threshold scaling factor for each level not specified = 1. AGG specific options Symmetric graph false Number of levels to square graph 1 Number smoothing steps 1 Complexity: grid = 1.40065 Coarse grid solver -- level ------------------------------- KSP Object: (mg_coarse_) 8 MPI processes type: preonly maximum iterations=10000, initial guess is zero tolerances: relative=1e-05, absolute=1e-50, divergence=10000. left preconditioning using NONE norm type for convergence test PC Object: (mg_coarse_) 8 MPI processes type: bjacobi number of blocks = 8 Local solver is the same for all blocks, as in the following KSP and PC objects on rank 0: KSP Object: (mg_coarse_sub_) 1 MPI processes type: preonly maximum iterations=1, initial guess is zero tolerances: relative=1e-05, absolute=1e-50, divergence=10000. left preconditioning using NONE norm type for convergence test PC Object: (mg_coarse_sub_) 1 MPI processes type: lu out-of-place factorization tolerance for zero pivot 2.22045e-14 using diagonal shift on blocks to prevent zero pivot [INBLOCKS] matrix ordering: nd factor fill ratio given 5., needed 1.45817 Factored matrix follows: Mat Object: 1 MPI processes type: seqaij rows=522, cols=522, bs=6 package used to perform factorization: petsc total: nonzeros=222732, allocated nonzeros=222732 using I-node routines: found 157 nodes, limit used is 5 linear system matrix = precond matrix: Mat Object: 1 MPI processes type: seqaij rows=522, cols=522, bs=6 total: nonzeros=152748, allocated nonzeros=152748 total number of mallocs used during MatSetValues calls=0 using I-node routines: found 174 nodes, limit used is 5 linear system matrix = precond matrix: Mat Object: 8 MPI processes type: mpiaij rows=522, cols=522, bs=6 total: nonzeros=152748, allocated nonzeros=152748 total number of mallocs used during MatSetValues calls=0 using I-node (on process 0) routines: found 174 nodes, limit used is 5 Down solver (pre-smoother) on level 1 ------------------------------- KSP Object: (mg_levels_1_) 8 MPI processes type: chebyshev eigenvalue estimates used: min = 0.174437, max = 1.91881 eigenvalues estimate via cg min 0.019383, max 1.74437 eigenvalues estimated using cg with translations [0. 0.1; 0. 1.1] KSP Object: (mg_levels_1_esteig_) 8 MPI processes type: cg maximum iterations=10, initial guess is zero tolerances: relative=1e-12, absolute=1e-50, divergence=10000. left preconditioning using PRECONDITIONED norm type for convergence test estimating eigenvalues using noisy right hand side maximum iterations=2, nonzero initial guess tolerances: relative=1e-05, absolute=1e-50, divergence=10000. left preconditioning using NONE norm type for convergence test PC Object: (mg_levels_1_) 8 MPI processes type: jacobi linear system matrix = precond matrix: Mat Object: 8 MPI processes type: mpiaij rows=6450, cols=6450, bs=6 total: nonzeros=1216692, allocated nonzeros=1216692 total number of mallocs used during MatSetValues calls=0 using scalable MatPtAP() implementation using I-node (on process 0) routines: found 224 nodes, limit used is 5 Up solver (post-smoother) same as down solver (pre-smoother) Down solver (pre-smoother) on level 2 ------------------------------- KSP Object: (mg_levels_2_) 8 MPI processes type: chebyshev eigenvalue estimates used: min = 0.260719, max = 2.86791 eigenvalues estimate via cg min 0.0485577, max 2.60719 eigenvalues estimated using cg with translations [0. 0.1; 0. 1.1] KSP Object: (mg_levels_2_esteig_) 8 MPI processes type: cg maximum iterations=10, initial guess is zero tolerances: relative=1e-12, absolute=1e-50, divergence=10000. left preconditioning using PRECONDITIONED norm type for convergence test estimating eigenvalues using noisy right hand side maximum iterations=2, nonzero initial guess tolerances: relative=1e-05, absolute=1e-50, divergence=10000. left preconditioning using NONE norm type for convergence test PC Object: (mg_levels_2_) 8 MPI processes type: jacobi linear system matrix = precond matrix: Mat Object: 8 MPI processes type: mpiaij rows=80190, cols=80190, bs=3 total: nonzeros=3418020, allocated nonzeros=3418020 total number of mallocs used during MatSetValues calls=0 has attached near null space using I-node (on process 0) routines: found 3272 nodes, limit used is 5 Up solver (post-smoother) same as down solver (pre-smoother) linear system matrix = precond matrix: Mat Object: 8 MPI processes type: mpiaij rows=80190, cols=80190, bs=3 total: nonzeros=3418020, allocated nonzeros=3418020 total number of mallocs used during MatSetValues calls=0 has attached near null space using I-node (on process 0) routines: found 3272 nodes, limit used is 5 [MPI_AVG] Summary of timings | reps wall avg wall tot -------------------------------------------------------------------------------------------- Build BoxMesh | 1 41.401339 41.401339 Build dofmap data | 2 1.055780 2.111559 Build sparsity | 1 0.492545 0.492545 Compute SCOTCH graph re-ordering | 2 0.022255 0.044509 Compute dof reordering map | 2 0.072939 0.145878 Compute entities of dim = 1 | 1 3.483299 3.483299 Compute entities of dim = 2 | 1 3.052537 3.052537 Compute graph partition (SCOTCH) | 1 35.559317 35.559317 Compute local part of mesh dual graph | 1 0.229084 0.229084 Compute local-to-local map | 1 0.000777 0.000777 Compute non-local part of mesh dual graph | 1 0.392812 0.392812 Compute-local-to-global links for global/local adjacency list | 1 0.000384 0.000384 Distribute in graph creation AdjacencyList | 1 0.334308 0.334308 Extract partition boundaries from SCOTCH graph | 1 0.001176 0.001176 Fetch float data from remote processes | 1 0.250840 0.250840 Get SCOTCH graph data | 1 0.000014 0.000014 Init dofmap from element dofmap | 2 0.017185 0.034370 PETSc Krylov solver | 1 59.993399 59.993399 SCOTCH: call SCOTCH_dgraphBuild | 1 0.079479 0.079479 SCOTCH: call SCOTCH_dgraphHalo | 1 0.118476 0.118476 SCOTCH: call SCOTCH_dgraphPart | 1 35.336246 35.336246 SCOTCH: call SCOTCH_graphBuild | 2 0.000011 0.000022 SCOTCH: call SCOTCH_graphOrder | 2 0.003704 0.007408 SparsityPattern::assemble | 1 1.568948 1.568948 TOPOLOGY: Create sets | 1 0.267519 0.267519 ZZZ Assemble matrix | 1 2.536932 2.536932 ZZZ Assemble vector | 1 0.174956 0.174956 ZZZ Create Mesh | 1 41.417549 41.417549 ZZZ Create RHS function | 1 0.580997 0.580997 ZZZ Create boundary conditions | 1 0.321985 0.321985 ZZZ Create forms | 1 0.000146 0.000146 ZZZ Create mesh entity permutations | 1 6.650523 6.650523 ZZZ Create near-nullspace | 1 3.309590 3.309590 ZZZ FunctionSpace | 1 1.518685 1.518685 ZZZ Solve | 1 60.767408 60.767408 *** Number of Krylov iterations: 16 *** Solution norm: 6.02584e-05 ************************************************************************************************************************ *** WIDEN YOUR WINDOW TO 120 CHARACTERS. Use 'enscript -r -fCourier9' to print this document *** ************************************************************************************************************************ ---------------------------------------------- PETSc Performance Summary: ---------------------------------------------- /build/fenicsx-performance-tests-494eAj/fenicsx-performance-tests-0.0-git20210119.80e82ac/obj-x86_64-linux-gnu/dolfinx-scaling-test on a named d16a17b025ea with 8 processors, by builduser Tue Oct 12 19:17:22 2021 Using Petsc Release Version 3.14.4, Feb 03, 2021 Max Max/Min Avg Total Time (sec): 1.220e+02 1.000 1.220e+02 Objects: 3.780e+02 1.005 3.762e+02 Flop: 2.694e+08 1.334 2.251e+08 1.800e+09 Flop/sec: 2.207e+06 1.334 1.844e+06 1.475e+07 MPI Messages: 2.228e+03 1.697 1.752e+03 1.401e+04 MPI Message Lengths: 1.407e+07 1.970 5.712e+03 8.005e+07 MPI Reductions: 6.490e+02 1.000 Flop counting convention: 1 flop = 1 real number operation of type (multiply/divide/add/subtract) e.g., VecAXPY() for real vectors of length N --> 2N flop and VecAXPY() for complex vectors of length N --> 8N flop Summary of Stages: ----- Time ------ ----- Flop ------ --- Messages --- -- Message Lengths -- -- Reductions -- Avg %Total Avg %Total Count %Total Avg %Total Count %Total 0: Main Stage: 1.2203e+02 100.0% 1.8004e+09 100.0% 1.401e+04 100.0% 5.712e+03 100.0% 6.420e+02 98.9% ------------------------------------------------------------------------------------------------------------------------ See the 'Profiling' chapter of the users' manual for details on interpreting output. Phase summary info: Count: number of times phase was executed Time and Flop: Max - maximum over all processors Ratio - ratio of maximum to minimum over all processors Mess: number of messages sent AvgLen: average message length (bytes) Reduct: number of global reductions Global: entire computation Stage: stages of a computation. Set stages with PetscLogStagePush() and PetscLogStagePop(). %T - percent time in this phase %F - percent flop in this phase %M - percent messages in this phase %L - percent message lengths in this phase %R - percent reductions in this phase Total Mflop/s: 10e-6 * (sum of flop over all processors)/(max time over all processors) ------------------------------------------------------------------------------------------------------------------------ Event Count Time (sec) Flop --- Global --- --- Stage ---- Total Max Ratio Max Ratio Max Ratio Mess AvgLen Reduct %T %F %M %L %R %T %F %M %L %R Mflop/s ------------------------------------------------------------------------------------------------------------------------ --- Event Stage 0: Main Stage BuildTwoSided 95 1.0 6.1225e+00 1.2 0.00e+00 0.0 7.8e+02 4.0e+00 9.5e+01 5 0 6 0 15 5 0 6 0 15 0 BuildTwoSidedF 76 1.0 4.0279e+00 1.3 0.00e+00 0.0 3.7e+02 6.3e+04 7.6e+01 3 0 3 29 12 3 0 3 29 12 0 MatMult 192 1.0 1.2706e+01 1.2 1.20e+08 1.1 7.1e+03 3.9e+03 0.0e+00 10 51 50 34 0 10 51 50 34 0 72 MatMultAdd 34 1.0 2.7611e+00 1.2 8.79e+06 1.1 7.1e+02 1.2e+03 0.0e+00 2 4 5 1 0 2 4 5 1 0 24 MatMultTranspose 34 1.0 2.2680e+00 2.0 8.81e+06 1.1 7.1e+02 1.2e+03 0.0e+00 1 4 5 1 0 1 4 5 1 0 30 MatSolve 17 0.0 7.4111e-03 0.0 7.56e+06 0.0 0.0e+00 0.0e+00 0.0e+00 0 0 0 0 0 0 0 0 0 0 1021 MatLUFactorSym 1 1.0 1.9888e-0112692.0 0.00e+00 0.0 0.0e+00 0.0e+00 0.0e+00 0 0 0 0 0 0 0 0 0 0 0 MatLUFactorNum 1 1.0 4.0695e-01224339.2 6.04e+07 0.0 0.0e+00 0.0e+00 0.0e+00 0 3 0 0 0 0 3 0 0 0 148 MatScale 6 1.0 1.9376e-01 2.0 6.23e+05 1.1 7.4e+01 1.1e+03 0.0e+00 0 0 1 0 0 0 0 1 0 0 25 MatResidual 34 1.0 2.7750e+00 1.4 2.07e+07 1.1 1.3e+03 3.7e+03 0.0e+00 2 9 9 6 0 2 9 9 6 0 57 MatAssemblyBegin 41 1.0 3.1757e+00 1.3 0.00e+00 0.0 3.7e+02 6.3e+04 1.6e+01 2 0 3 29 2 2 0 3 29 2 0 MatAssemblyEnd 41 1.0 7.1713e+00 1.1 2.13e+05 3.3 8.8e+02 3.8e+02 7.0e+01 6 0 6 0 11 6 0 6 0 11 0 MatGetRowIJ 1 0.0 2.3177e-04 0.0 0.00e+00 0.0 0.0e+00 0.0e+00 0.0e+00 0 0 0 0 0 0 0 0 0 0 0 MatCreateSubMat 2 1.0 1.9851e+00 1.1 0.00e+00 0.0 4.9e+01 3.5e+04 3.0e+01 2 0 0 2 5 2 0 0 2 5 0 MatGetOrdering 1 0.0 5.0684e-04 0.0 0.00e+00 0.0 0.0e+00 0.0e+00 0.0e+00 0 0 0 0 0 0 0 0 0 0 0 MatCoarsen 2 1.0 1.4855e+00 1.1 0.00e+00 0.0 6.0e+02 7.8e+02 9.0e+00 1 0 4 1 1 1 0 4 1 1 0 MatZeroEntries 2 1.0 3.1232e-04 2.3 0.00e+00 0.0 0.0e+00 0.0e+00 0.0e+00 0 0 0 0 0 0 0 0 0 0 0 MatView 6 1.5 4.8392e-01 2.2 0.00e+00 0.0 0.0e+00 0.0e+00 4.0e+00 0 0 0 0 1 0 0 0 0 1 0 MatAXPY 2 1.0 2.5827e-01 1.5 6.65e+04 1.1 0.0e+00 0.0e+00 2.0e+00 0 0 0 0 0 0 0 0 0 0 2 MatTranspose 4 1.0 9.7416e-02 5.4 0.00e+00 0.0 0.0e+00 0.0e+00 0.0e+00 0 0 0 0 0 0 0 0 0 0 0 MatMatMultSym 6 1.0 2.9044e+00 1.1 0.00e+00 0.0 3.7e+02 7.0e+03 2.0e+01 2 0 3 3 3 2 0 3 3 3 0 MatMatMultNum 2 1.0 4.9502e-01 1.2 7.30e+06 1.2 7.4e+01 2.2e+04 2.0e+00 0 3 1 2 0 0 3 1 2 0 110 MatPtAPSymbolic 2 1.0 5.0346e+00 1.0 0.00e+00 0.0 4.4e+02 3.6e+04 1.4e+01 4 0 3 20 2 4 0 3 20 2 0 MatPtAPNumeric 2 1.0 5.0018e+00 1.0 7.49e+07 1.4 3.3e+02 4.2e+04 1.2e+01 4 28 2 17 2 4 28 2 17 2 102 MatTrnMatMultSym 1 1.0 1.9998e+00 1.1 0.00e+00 0.0 2.5e+02 2.0e+04 1.3e+01 2 0 2 6 2 2 0 2 6 2 0 MatGetLocalMat 7 1.0 7.9578e-0223.4 0.00e+00 0.0 0.0e+00 0.0e+00 0.0e+00 0 0 0 0 0 0 0 0 0 0 0 MatGetBrAoCol 6 1.0 8.3004e-01 1.4 0.00e+00 0.0 5.2e+02 3.2e+04 0.0e+00 1 0 4 20 0 1 0 4 20 0 0 VecDot 36 1.0 2.2299e+00 1.2 7.45e+05 1.1 0.0e+00 0.0e+00 3.6e+01 2 0 0 0 6 2 0 0 0 6 3 VecMDot 20 1.0 1.4793e+00 1.5 1.23e+06 1.1 0.0e+00 0.0e+00 2.0e+01 1 1 0 0 3 1 1 0 0 3 6 VecTDot 74 1.0 5.6795e+00 1.3 1.13e+06 1.1 0.0e+00 0.0e+00 7.4e+01 4 0 0 0 11 4 0 0 0 12 2 VecNorm 68 1.0 5.3019e+00 1.3 9.86e+05 1.1 0.0e+00 0.0e+00 6.8e+01 4 0 0 0 10 4 0 0 0 11 1 VecScale 28 1.0 1.6718e-0247.1 1.85e+05 1.1 0.0e+00 0.0e+00 0.0e+00 0 0 0 0 0 0 0 0 0 0 86 VecCopy 116 1.0 1.1194e-03 1.4 0.00e+00 0.0 0.0e+00 0.0e+00 0.0e+00 0 0 0 0 0 0 0 0 0 0 0 VecSet 179 1.0 3.9115e-03 7.8 0.00e+00 0.0 0.0e+00 0.0e+00 0.0e+00 0 0 0 0 0 0 0 0 0 0 0 VecAXPY 83 1.0 5.2464e-03 3.2 1.31e+06 1.1 0.0e+00 0.0e+00 0.0e+00 0 1 0 0 0 0 1 0 0 0 1947 VecAYPX 237 1.0 9.4916e-0232.9 2.03e+06 1.1 0.0e+00 0.0e+00 0.0e+00 0 1 0 0 0 0 1 0 0 0 166 VecAXPBYCZ 68 1.0 9.7933e-03 7.8 1.90e+06 1.1 0.0e+00 0.0e+00 0.0e+00 0 1 0 0 0 0 1 0 0 0 1504 VecMAXPY 22 1.0 4.6518e-03 4.7 1.45e+06 1.1 0.0e+00 0.0e+00 0.0e+00 0 1 0 0 0 0 1 0 0 0 2421 VecAssemblyBegin 61 1.0 1.6717e+00 1.9 0.00e+00 0.0 0.0e+00 0.0e+00 6.0e+01 1 0 0 0 9 1 0 0 0 9 0 VecAssemblyEnd 61 1.0 2.6713e-04 1.4 0.00e+00 0.0 0.0e+00 0.0e+00 0.0e+00 0 0 0 0 0 0 0 0 0 0 0 VecPointwiseMult 180 1.0 6.3421e-03 2.6 1.01e+06 1.1 0.0e+00 0.0e+00 0.0e+00 0 0 0 0 0 0 0 0 0 0 1230 VecScatterBegin 327 1.0 1.0703e-0110.7 0.00e+00 0.0 1.1e+04 2.9e+03 0.0e+00 0 0 79 41 0 0 0 79 41 0 0 VecScatterEnd 327 1.0 1.7946e+01 1.1 3.42e+04 4.9 0.0e+00 0.0e+00 0.0e+00 14 0 0 0 0 14 0 0 0 0 0 VecSetRandom 2 1.0 1.0229e-03 1.5 0.00e+00 0.0 0.0e+00 0.0e+00 0.0e+00 0 0 0 0 0 0 0 0 0 0 0 VecNormalize 28 1.0 2.0243e+00 1.3 5.54e+05 1.1 0.0e+00 0.0e+00 2.8e+01 2 0 0 0 4 2 0 0 0 4 2 SFSetGraph 19 1.0 7.2686e-05 1.5 0.00e+00 0.0 0.0e+00 0.0e+00 0.0e+00 0 0 0 0 0 0 0 0 0 0 0 SFSetUp 19 1.0 3.3041e+00 1.4 0.00e+00 0.0 1.2e+03 3.7e+02 1.9e+01 2 0 9 1 3 2 0 9 1 3 0 SFBcastOpBegin 303 1.0 1.1378e-0113.8 0.00e+00 0.0 1.1e+04 3.0e+03 0.0e+00 0 0 77 40 0 0 0 77 40 0 0 SFBcastOpEnd 303 1.0 1.6665e+01 1.1 0.00e+00 0.0 0.0e+00 0.0e+00 0.0e+00 13 0 0 0 0 13 0 0 0 0 0 SFReduceBegin 35 1.0 1.4284e-03 2.8 0.00e+00 0.0 7.5e+02 1.3e+03 0.0e+00 0 0 5 1 0 0 0 5 1 0 0 SFReduceEnd 35 1.0 2.3431e+00 1.9 3.42e+04 4.9 0.0e+00 0.0e+00 0.0e+00 1 0 0 0 0 1 0 0 0 0 0 SFPack 338 1.0 1.0952e-02 8.5 0.00e+00 0.0 0.0e+00 0.0e+00 0.0e+00 0 0 0 0 0 0 0 0 0 0 0 SFUnpack 338 1.0 4.7439e-04 1.6 3.42e+04 4.9 0.0e+00 0.0e+00 0.0e+00 0 0 0 0 0 0 0 0 0 0 254 KSPSetUp 7 1.0 5.2160e+00 1.0 1.35e+07 1.1 7.4e+02 3.7e+03 7.2e+01 4 6 5 3 11 4 6 5 3 11 20 KSPSolve 1 1.0 5.9308e+01 1.0 2.66e+08 1.3 1.4e+04 5.5e+03 5.5e+02 49 99 97 93 85 49 99 97 93 86 30 KSPGMRESOrthog 20 1.0 1.4804e+00 1.5 2.46e+06 1.1 0.0e+00 0.0e+00 2.0e+01 1 1 0 0 3 1 1 0 0 3 13 PCGAMGGraph_AGG 2 1.0 4.7044e+00 1.0 6.61e+05 1.1 3.7e+02 4.3e+02 3.8e+01 4 0 3 0 6 4 0 3 0 6 1 PCGAMGCoarse_AGG 2 1.0 3.9679e+00 1.0 0.00e+00 0.0 1.0e+03 5.9e+03 3.0e+01 3 0 7 8 5 3 0 7 8 5 0 PCGAMGProl_AGG 2 1.0 7.1647e+00 1.0 0.00e+00 0.0 2.4e+03 1.5e+03 1.9e+02 6 0 17 5 29 6 0 17 5 29 0 PCGAMGPOpt_AGG 2 1.0 8.1167e+00 1.0 2.31e+07 1.1 1.2e+03 5.8e+03 8.2e+01 7 10 8 9 13 7 10 8 9 13 22 GAMG: createProl 2 1.0 2.3779e+01 1.0 2.38e+07 1.1 5.0e+03 3.4e+03 3.4e+02 19 10 36 21 52 19 10 36 21 53 8 Graph 4 1.0 4.7042e+00 1.0 6.61e+05 1.1 3.7e+02 4.3e+02 3.8e+01 4 0 3 0 6 4 0 3 0 6 1 MIS/Agg 2 1.0 1.4855e+00 1.1 0.00e+00 0.0 6.0e+02 7.8e+02 9.0e+00 1 0 4 1 1 1 0 4 1 1 0 SA: col data 2 1.0 5.4820e+00 1.0 0.00e+00 0.0 2.2e+03 1.2e+03 1.7e+02 4 0 16 3 26 4 0 16 3 26 0 SA: frmProl0 2 1.0 1.1875e+00 1.1 0.00e+00 0.0 1.8e+02 5.0e+03 1.2e+01 1 0 1 1 2 1 0 1 1 2 0 SA: smooth 2 1.0 3.3077e+00 1.0 7.88e+06 1.2 4.4e+02 9.5e+03 2.8e+01 3 3 3 5 4 3 3 3 5 4 18 GAMG: partLevel 2 1.0 1.2037e+01 1.0 7.49e+07 1.4 8.6e+02 3.6e+04 8.1e+01 10 28 6 39 12 10 28 6 39 13 42 repartition 1 1.0 2.1762e+00 1.1 0.00e+00 0.0 9.4e+01 1.9e+04 5.5e+01 2 0 1 2 8 2 0 1 2 9 0 Invert-Sort 1 1.0 1.9083e-04 1.3 0.00e+00 0.0 0.0e+00 0.0e+00 6.0e+00 0 0 0 0 1 0 0 0 0 1 0 Move A 1 1.0 8.1264e-01 1.0 0.00e+00 0.0 3.5e+01 4.9e+04 1.6e+01 1 0 0 2 2 1 0 0 2 2 0 Move P 1 1.0 1.2806e+00 1.1 0.00e+00 0.0 1.4e+01 4.3e+02 1.7e+01 1 0 0 0 3 1 0 0 0 3 0 PCSetUp 2 1.0 4.2313e+01 1.0 1.51e+08 1.7 6.6e+03 7.7e+03 5.0e+02 34 47 47 64 77 34 47 47 64 78 20 PCSetUpOnBlocks 17 1.0 6.0650e-013780.2 6.04e+07 0.0 0.0e+00 0.0e+00 0.0e+00 0 3 0 0 0 0 3 0 0 0 100 PCApply 39 1.0 1.3452e+01 1.1 1.61e+08 1.7 6.5e+03 3.1e+03 4.0e+00 11 48 46 25 1 11 48 46 25 1 64 ------------------------------------------------------------------------------------------------------------------------ Memory usage is given in bytes: Object Type Creations Destructions Memory Descendants' Mem. Reports information only for process 0. --- Event Stage 0: Main Stage Matrix 68 68 24959460 0. Matrix Coarsen 2 2 1264 0. Matrix Null Space 1 1 704 0. Index Set 55 55 114764 0. IS L to G Mapping 6 6 86804 0. Vec Scatter 17 17 13872 0. Vector 185 185 4346832 0. Star Forest Graph 19 19 21888 0. Krylov Solver 9 9 72064 0. Preconditioner 9 9 9148 0. Viewer 3 2 1696 0. PetscRandom 4 4 2616 0. ======================================================================================================================== Average time to get PetscTime(): 4.86e-08 Average time for MPI_Barrier(): 0.0200122 Average time for zero size MPI_Send(): 0.0125035 #PETSc Option Table entries: --ndofs 10000 --problem_type elasticity --scaling_type weak -ksp_rtol 1.0e-8 -ksp_type cg -ksp_view -log_view -matptap_via scalable -mg_levels_esteig_ksp_type cg -mg_levels_ksp_type chebyshev -mg_levels_pc_type jacobi -options_left -pc_gamg_coarse_eq_limit 1000 -pc_type gamg #End of PETSc Option Table entries Compiled without FORTRAN kernels Compiled with full precision matrices (default) sizeof(short) 2 sizeof(int) 4 sizeof(long) 8 sizeof(void*) 8 sizeof(PetscScalar) 8 sizeof(PetscInt) 4 Configure options: --build=x86_64-linux-gnu --prefix=/usr --includedir=${prefix}/include --mandir=${prefix}/share/man --infodir=${prefix}/share/info --sysconfdir=/etc --localstatedir=/var --with-option-checking=0 --with-silent-rules=0 --libdir=${prefix}/lib/x86_64-linux-gnu --runstatedir=/run --with-maintainer-mode=0 --with-dependency-tracking=0 --with-debugging=0 --shared-library-extension=_real --with-shared-libraries --with-pic=1 --with-cc=mpicc --with-cxx=mpicxx --with-fc=mpif90 --with-cxx-dialect=C++11 --with-opencl=1 --with-blas-lib=-lblas --with-lapack-lib=-llapack --with-scalapack=1 --with-scalapack-lib=-lscalapack-openmpi --with-ptscotch=1 --with-ptscotch-include=/usr/include/scotch --with-ptscotch-lib="-lptesmumps -lptscotch -lptscotcherr" --with-fftw=1 --with-fftw-include="[]" --with-fftw-lib="-lfftw3 -lfftw3_mpi" --with-superlu_dist=1 --with-superlu_dist-include=/usr/include/superlu-dist --with-superlu_dist-lib=-lsuperlu_dist --with-hdf5-include=/usr/include/hdf5/openmpi --with-hdf5-lib="-L/usr/lib/x86_64-linux-gnu/hdf5/openmpi -L/usr/lib/x86_64-linux-gnu/openmpi/lib -lhdf5 -lmpi" --CXX_LINKER_FLAGS=-Wl,--no-as-needed --with-hypre=1 --with-hypre-include=/usr/include/hypre --with-hypre-lib=-lHYPRE_core --with-mumps=1 --with-mumps-include="[]" --with-mumps-lib="-ldmumps -lzmumps -lsmumps -lcmumps -lmumps_common -lpord" --with-suitesparse=1 --with-suitesparse-include=/usr/include/suitesparse --with-suitesparse-lib="-lumfpack -lamd -lcholmod -lklu" --with-superlu=1 --with-superlu-include=/usr/include/superlu --with-superlu-lib=-lsuperlu --prefix=/usr/lib/petscdir/petsc3.14/x86_64-linux-gnu-real --PETSC_ARCH=x86_64-linux-gnu-real CFLAGS="-g -O2 -ffile-prefix-map=/build/petsc-vu0cnm/petsc-3.14.4+dfsg1=. -fstack-protector-strong -Wformat -Werror=format-security -fPIC" CXXFLAGS="-g -O2 -ffile-prefix-map=/build/petsc-vu0cnm/petsc-3.14.4+dfsg1=. -fstack-protector-strong -Wformat -Werror=format-security -fPIC" FCFLAGS="-g -O2 -ffile-prefix-map=/build/petsc-vu0cnm/petsc-3.14.4+dfsg1=. -fstack-protector-strong -fPIC -ffree-line-length-0" FFLAGS="-g -O2 -ffile-prefix-map=/build/petsc-vu0cnm/petsc-3.14.4+dfsg1=. -fstack-protector-strong -fPIC -ffree-line-length-0" CPPFLAGS="-Wdate-time -D_FORTIFY_SOURCE=2" LDFLAGS="-Wl,-z,relro -fPIC" MAKEFLAGS=w ----------------------------------------- Libraries compiled on 2021-02-11 15:51:02 on reproducible Machine characteristics: Linux-4.19.0-14-amd64-x86_64-with-glibc2.31 Using PETSc directory: /usr/lib/petscdir/petsc3.14/x86_64-linux-gnu-real Using PETSc arch: ----------------------------------------- Using C compiler: mpicc -g -O2 -ffile-prefix-map=/build/petsc-vu0cnm/petsc-3.14.4+dfsg1=. -fstack-protector-strong -Wformat -Werror=format-security -fPIC -Wdate-time -D_FORTIFY_SOURCE=2 Using Fortran compiler: mpif90 -g -O2 -ffile-prefix-map=/build/petsc-vu0cnm/petsc-3.14.4+dfsg1=. -fstack-protector-strong -fPIC -ffree-line-length-0 -Wdate-time -D_FORTIFY_SOURCE=2 ----------------------------------------- Using include paths: -I/usr/lib/petscdir/petsc3.14/x86_64-linux-gnu-real/include -I/usr/include/hypre -I/usr/include/suitesparse -I/usr/include/superlu -I/usr/include/superlu-dist -I/usr/include/scotch -I/usr/include/hdf5/openmpi ----------------------------------------- Using C linker: mpicc Using Fortran linker: mpif90 Using libraries: -L/usr/lib/petscdir/petsc3.14/x86_64-linux-gnu-real/lib -L/usr/lib/petscdir/petsc3.14/x86_64-linux-gnu-real/lib -lpetsc_real -L/usr/lib/x86_64-linux-gnu/hdf5/openmpi -L/usr/lib/x86_64-linux-gnu/openmpi/lib -L/usr/lib/x86_64-linux-gnu/openmpi/lib/fortran/gfortran -L/usr/lib/gcc/x86_64-linux-gnu/10 -L/usr/lib/x86_64-linux-gnu -L/lib/x86_64-linux-gnu -lHYPRE_core -ldmumps -lzmumps -lsmumps -lcmumps -lmumps_common -lpord -lscalapack-openmpi -lumfpack -lamd -lcholmod -lklu -lsuperlu -lsuperlu_dist -lfftw3 -lfftw3_mpi -llapack -lblas -lptesmumps -lptscotch -lptscotcherr -lhdf5 -lmpi -lm -lOpenCL -lstdc++ -ldl -lmpi_usempif08 -lmpi_usempi_ignore_tkr -lmpi_mpifh -lmpi -lopen-rte -lopen-pal -lhwloc -levent_core -levent_pthreads -lutil -lgfortran -lm -lrt -lz -lgfortran -lm -lgfortran -lgcc_s -lquadmath -lpthread -lquadmath -lstdc++ -ldl ----------------------------------------- #PETSc Option Table entries: --ndofs 10000 --problem_type elasticity --scaling_type weak -ksp_rtol 1.0e-8 -ksp_type cg -ksp_view -log_view -matptap_via scalable -mg_levels_esteig_ksp_type cg -mg_levels_ksp_type chebyshev -mg_levels_pc_type jacobi -options_left -pc_gamg_coarse_eq_limit 1000 -pc_type gamg #End of PETSc Option Table entries WARNING! There are options you set that were not used! WARNING! could be spelling mistake, etc! There are 3 unused database options. They are: Option left: name:--ndofs value: 10000 Option left: name:--problem_type value: elasticity Option left: name:--scaling_type value: weak ==== elasticity strong scaling (8 processes with ndofs=80000) ==== UnitCube (29x26x32) to be refined 0 times ---------------------------------------------------------------- Test problem summary dolfinx version: 2019.2.9.99 dolfinx hash: unknown ufl hash: ac78d56bf5c7fcc5585cc7f0654188e75f1c73b1 petsc version: Petsc Release Version 3.14.4, Feb 03, 2021 Problem type: elasticity Scaling type: strong Num processes: 8 Num cells 144768 Total degrees of freedom: 80190 Average degrees of freedom per process: 10023 ---------------------------------------------------------------- KSP Object: 8 MPI processes type: cg maximum iterations=10000, initial guess is zero tolerances: relative=1e-08, absolute=1e-50, divergence=10000. left preconditioning using PRECONDITIONED norm type for convergence test PC Object: 8 MPI processes type: gamg type is MULTIPLICATIVE, levels=3 cycles=v Cycles per PCApply=1 Using externally compute Galerkin coarse grid matrices GAMG specific options Threshold for dropping small values in graph on each level = 0. Threshold scaling factor for each level not specified = 1. AGG specific options Symmetric graph false Number of levels to square graph 1 Number smoothing steps 1 Complexity: grid = 1.39561 Coarse grid solver -- level ------------------------------- KSP Object: (mg_coarse_) 8 MPI processes type: preonly maximum iterations=10000, initial guess is zero tolerances: relative=1e-05, absolute=1e-50, divergence=10000. left preconditioning using NONE norm type for convergence test PC Object: (mg_coarse_) 8 MPI processes type: bjacobi number of blocks = 8 Local solver is the same for all blocks, as in the following KSP and PC objects on rank 0: KSP Object: (mg_coarse_sub_) 1 MPI processes type: preonly maximum iterations=1, initial guess is zero tolerances: relative=1e-05, absolute=1e-50, divergence=10000. left preconditioning using NONE norm type for convergence test PC Object: (mg_coarse_sub_) 1 MPI processes type: lu out-of-place factorization tolerance for zero pivot 2.22045e-14 using diagonal shift on blocks to prevent zero pivot [INBLOCKS] matrix ordering: nd factor fill ratio given 5., needed 1.43952 Factored matrix follows: Mat Object: 1 MPI processes type: seqaij rows=504, cols=504, bs=6 package used to perform factorization: petsc total: nonzeros=205632, allocated nonzeros=205632 using I-node routines: found 149 nodes, limit used is 5 linear system matrix = precond matrix: Mat Object: 1 MPI processes type: seqaij rows=504, cols=504, bs=6 total: nonzeros=142848, allocated nonzeros=142848 total number of mallocs used during MatSetValues calls=0 using I-node routines: found 168 nodes, limit used is 5 linear system matrix = precond matrix: Mat Object: 8 MPI processes type: mpiaij rows=504, cols=504, bs=6 total: nonzeros=142848, allocated nonzeros=142848 total number of mallocs used during MatSetValues calls=0 using I-node (on process 0) routines: found 168 nodes, limit used is 5 Down solver (pre-smoother) on level 1 ------------------------------- KSP Object: (mg_levels_1_) 8 MPI processes type: chebyshev eigenvalue estimates used: min = 0.180193, max = 1.98212 eigenvalues estimate via cg min 0.0239018, max 1.80193 eigenvalues estimated using cg with translations [0. 0.1; 0. 1.1] KSP Object: (mg_levels_1_esteig_) 8 MPI processes type: cg maximum iterations=10, initial guess is zero tolerances: relative=1e-12, absolute=1e-50, divergence=10000. left preconditioning using PRECONDITIONED norm type for convergence test estimating eigenvalues using noisy right hand side maximum iterations=2, nonzero initial guess tolerances: relative=1e-05, absolute=1e-50, divergence=10000. left preconditioning using NONE norm type for convergence test PC Object: (mg_levels_1_) 8 MPI processes type: jacobi linear system matrix = precond matrix: Mat Object: 8 MPI processes type: mpiaij rows=6402, cols=6402, bs=6 total: nonzeros=1209348, allocated nonzeros=1209348 total number of mallocs used during MatSetValues calls=0 using scalable MatPtAP() implementation using I-node (on process 0) routines: found 250 nodes, limit used is 5 Up solver (post-smoother) same as down solver (pre-smoother) Down solver (pre-smoother) on level 2 ------------------------------- KSP Object: (mg_levels_2_) 8 MPI processes type: chebyshev eigenvalue estimates used: min = 0.261303, max = 2.87434 eigenvalues estimate via cg min 0.0508667, max 2.61303 eigenvalues estimated using cg with translations [0. 0.1; 0. 1.1] KSP Object: (mg_levels_2_esteig_) 8 MPI processes type: cg maximum iterations=10, initial guess is zero tolerances: relative=1e-12, absolute=1e-50, divergence=10000. left preconditioning using PRECONDITIONED norm type for convergence test estimating eigenvalues using noisy right hand side maximum iterations=2, nonzero initial guess tolerances: relative=1e-05, absolute=1e-50, divergence=10000. left preconditioning using NONE norm type for convergence test PC Object: (mg_levels_2_) 8 MPI processes type: jacobi linear system matrix = precond matrix: Mat Object: 8 MPI processes type: mpiaij rows=80190, cols=80190, bs=3 total: nonzeros=3418020, allocated nonzeros=3418020 total number of mallocs used during MatSetValues calls=0 has attached near null space using I-node (on process 0) routines: found 3347 nodes, limit used is 5 Up solver (post-smoother) same as down solver (pre-smoother) linear system matrix = precond matrix: Mat Object: 8 MPI processes type: mpiaij rows=80190, cols=80190, bs=3 total: nonzeros=3418020, allocated nonzeros=3418020 total number of mallocs used during MatSetValues calls=0 has attached near null space using I-node (on process 0) routines: found 3347 nodes, limit used is 5 [MPI_AVG] Summary of timings | reps wall avg wall tot -------------------------------------------------------------------------------------------- Build BoxMesh | 1 34.334412 34.334412 Build dofmap data | 2 1.207781 2.415563 Build sparsity | 1 0.499985 0.499985 Compute SCOTCH graph re-ordering | 2 0.008090 0.016181 Compute dof reordering map | 2 0.075480 0.150960 Compute entities of dim = 1 | 1 3.203835 3.203835 Compute entities of dim = 2 | 1 3.151443 3.151443 Compute graph partition (SCOTCH) | 1 28.396776 28.396776 Compute local part of mesh dual graph | 1 0.216723 0.216723 Compute local-to-local map | 1 0.000775 0.000775 Compute non-local part of mesh dual graph | 1 0.529389 0.529389 Compute-local-to-global links for global/local adjacency list | 1 0.009824 0.009824 Distribute in graph creation AdjacencyList | 1 0.105037 0.105037 Extract partition boundaries from SCOTCH graph | 1 0.000944 0.000944 Fetch float data from remote processes | 1 0.292404 0.292404 Get SCOTCH graph data | 1 0.000010 0.000010 Init dofmap from element dofmap | 2 0.022540 0.045080 PETSc Krylov solver | 1 62.847257 62.847257 SCOTCH: call SCOTCH_dgraphBuild | 1 0.156271 0.156271 SCOTCH: call SCOTCH_dgraphHalo | 1 0.187872 0.187872 SCOTCH: call SCOTCH_dgraphPart | 1 28.018535 28.018535 SCOTCH: call SCOTCH_graphBuild | 2 0.000010 0.000020 SCOTCH: call SCOTCH_graphOrder | 2 0.001987 0.003975 SparsityPattern::assemble | 1 1.412812 1.412812 TOPOLOGY: Create sets | 1 0.369555 0.369555 ZZZ Assemble matrix | 1 2.626369 2.626369 ZZZ Assemble vector | 1 0.151414 0.151414 ZZZ Create Mesh | 1 34.345059 34.345059 ZZZ Create RHS function | 1 0.530136 0.530136 ZZZ Create boundary conditions | 1 0.349056 0.349056 ZZZ Create forms | 1 0.000080 0.000080 ZZZ Create mesh entity permutations | 1 6.456692 6.456692 ZZZ Create near-nullspace | 1 3.847992 3.847992 ZZZ FunctionSpace | 1 1.176510 1.176510 ZZZ Solve | 1 63.708493 63.708493 *** Number of Krylov iterations: 16 *** Solution norm: 6.02584e-05 ************************************************************************************************************************ *** WIDEN YOUR WINDOW TO 120 CHARACTERS. Use 'enscript -r -fCourier9' to print this document *** ************************************************************************************************************************ ---------------------------------------------- PETSc Performance Summary: ---------------------------------------------- /build/fenicsx-performance-tests-494eAj/fenicsx-performance-tests-0.0-git20210119.80e82ac/obj-x86_64-linux-gnu/dolfinx-scaling-test on a named d16a17b025ea with 8 processors, by builduser Tue Oct 12 19:20:23 2021 Using Petsc Release Version 3.14.4, Feb 03, 2021 Max Max/Min Avg Total Time (sec): 1.175e+02 1.002 1.174e+02 Objects: 3.780e+02 1.005 3.762e+02 Flop: 2.712e+08 1.363 2.230e+08 1.784e+09 Flop/sec: 2.310e+06 1.363 1.900e+06 1.520e+07 MPI Messages: 2.234e+03 1.968 1.614e+03 1.291e+04 MPI Message Lengths: 1.376e+07 1.956 6.050e+03 7.810e+07 MPI Reductions: 6.480e+02 1.000 Flop counting convention: 1 flop = 1 real number operation of type (multiply/divide/add/subtract) e.g., VecAXPY() for real vectors of length N --> 2N flop and VecAXPY() for complex vectors of length N --> 8N flop Summary of Stages: ----- Time ------ ----- Flop ------ --- Messages --- -- Message Lengths -- -- Reductions -- Avg %Total Avg %Total Count %Total Avg %Total Count %Total 0: Main Stage: 1.1739e+02 100.0% 1.7841e+09 100.0% 1.291e+04 100.0% 6.050e+03 100.0% 6.410e+02 98.9% ------------------------------------------------------------------------------------------------------------------------ See the 'Profiling' chapter of the users' manual for details on interpreting output. Phase summary info: Count: number of times phase was executed Time and Flop: Max - maximum over all processors Ratio - ratio of maximum to minimum over all processors Mess: number of messages sent AvgLen: average message length (bytes) Reduct: number of global reductions Global: entire computation Stage: stages of a computation. Set stages with PetscLogStagePush() and PetscLogStagePop(). %T - percent time in this phase %F - percent flop in this phase %M - percent messages in this phase %L - percent message lengths in this phase %R - percent reductions in this phase Total Mflop/s: 10e-6 * (sum of flop over all processors)/(max time over all processors) ------------------------------------------------------------------------------------------------------------------------ Event Count Time (sec) Flop --- Global --- --- Stage ---- Total Max Ratio Max Ratio Max Ratio Mess AvgLen Reduct %T %F %M %L %R %T %F %M %L %R Mflop/s ------------------------------------------------------------------------------------------------------------------------ --- Event Stage 0: Main Stage BuildTwoSided 95 1.0 6.1955e+00 1.3 0.00e+00 0.0 7.6e+02 4.0e+00 9.5e+01 5 0 6 0 15 5 0 6 0 15 0 BuildTwoSidedF 76 1.0 4.3731e+00 1.4 0.00e+00 0.0 3.9e+02 5.9e+04 7.6e+01 3 0 3 29 12 3 0 3 29 12 0 MatMult 192 1.0 1.2800e+01 1.1 1.21e+08 1.1 6.5e+03 4.1e+03 0.0e+00 10 51 51 34 0 10 51 51 34 0 71 MatMultAdd 34 1.0 3.3399e+00 1.3 8.63e+06 1.1 7.0e+02 1.2e+03 0.0e+00 3 4 5 1 0 3 4 5 1 0 20 MatMultTranspose 34 1.0 2.3626e+00 1.8 8.63e+06 1.1 7.0e+02 1.2e+03 0.0e+00 2 4 5 1 0 2 4 5 1 0 28 MatSolve 17 0.0 7.7037e-03 0.0 6.98e+06 0.0 0.0e+00 0.0e+00 0.0e+00 0 0 0 0 0 0 0 0 0 0 906 MatLUFactorSym 1 1.0 2.0083e-0110288.2 0.00e+00 0.0 0.0e+00 0.0e+00 0.0e+00 0 0 0 0 0 0 0 0 0 0 0 MatLUFactorNum 1 1.0 9.0523e-01409236.4 5.30e+07 0.0 0.0e+00 0.0e+00 0.0e+00 0 3 0 0 0 0 3 0 0 0 59 MatScale 6 1.0 1.7847e-0111.2 6.12e+05 1.1 6.8e+01 1.2e+03 0.0e+00 0 0 1 0 0 0 0 1 0 0 27 MatResidual 34 1.0 2.6247e+00 1.5 2.08e+07 1.1 1.2e+03 3.9e+03 0.0e+00 2 9 9 6 0 2 9 9 6 0 60 MatAssemblyBegin 41 1.0 3.2061e+00 1.5 0.00e+00 0.0 3.9e+02 5.9e+04 1.6e+01 2 0 3 29 2 2 0 3 29 2 0 MatAssemblyEnd 41 1.0 7.1222e+00 1.1 1.92e+05 2.9 8.5e+02 3.8e+02 7.0e+01 6 0 7 0 11 6 0 7 0 11 0 MatGetRowIJ 1 0.0 1.5267e-04 0.0 0.00e+00 0.0 0.0e+00 0.0e+00 0.0e+00 0 0 0 0 0 0 0 0 0 0 0 MatCreateSubMat 2 1.0 2.7739e+00 1.1 0.00e+00 0.0 4.9e+01 3.3e+04 3.0e+01 2 0 0 2 5 2 0 0 2 5 0 MatGetOrdering 1 0.0 3.8208e-04 0.0 0.00e+00 0.0 0.0e+00 0.0e+00 0.0e+00 0 0 0 0 0 0 0 0 0 0 0 MatCoarsen 2 1.0 1.4686e+00 1.1 0.00e+00 0.0 4.8e+02 8.4e+02 8.0e+00 1 0 4 1 1 1 0 4 1 1 0 MatZeroEntries 2 1.0 2.4548e-02159.7 0.00e+00 0.0 0.0e+00 0.0e+00 0.0e+00 0 0 0 0 0 0 0 0 0 0 0 MatView 6 1.5 3.8413e-01 2.0 0.00e+00 0.0 0.0e+00 0.0e+00 4.0e+00 0 0 0 0 1 0 0 0 0 1 0 MatAXPY 2 1.0 2.9523e-01 1.6 6.71e+04 1.1 0.0e+00 0.0e+00 2.0e+00 0 0 0 0 0 0 0 0 0 0 2 MatTranspose 4 1.0 1.0089e-0117.2 0.00e+00 0.0 0.0e+00 0.0e+00 0.0e+00 0 0 0 0 0 0 0 0 0 0 0 MatMatMultSym 6 1.0 3.3135e+00 1.1 0.00e+00 0.0 3.5e+02 7.1e+03 2.0e+01 3 0 3 3 3 3 0 3 3 3 0 MatMatMultNum 2 1.0 4.9713e-01 1.6 7.36e+06 1.2 6.8e+01 2.3e+04 2.0e+00 0 3 1 2 0 0 3 1 2 0 109 MatPtAPSymbolic 2 1.0 5.1121e+00 1.0 0.00e+00 0.0 4.3e+02 3.5e+04 1.4e+01 4 0 3 19 2 4 0 3 19 2 0 MatPtAPNumeric 2 1.0 4.8810e+00 1.0 6.97e+07 1.3 3.3e+02 4.1e+04 1.2e+01 4 28 3 17 2 4 28 3 17 2 103 MatTrnMatMultSym 1 1.0 1.7084e+00 1.0 0.00e+00 0.0 2.4e+02 2.1e+04 1.3e+01 1 0 2 7 2 1 0 2 7 2 0 MatGetLocalMat 7 1.0 9.4288e-0227.4 0.00e+00 0.0 0.0e+00 0.0e+00 0.0e+00 0 0 0 0 0 0 0 0 0 0 0 MatGetBrAoCol 6 1.0 9.8763e-01 1.5 0.00e+00 0.0 4.8e+02 3.3e+04 0.0e+00 1 0 4 20 0 1 0 4 20 0 0 VecDot 36 1.0 2.8594e+00 1.2 7.34e+05 1.1 0.0e+00 0.0e+00 3.6e+01 2 0 0 0 6 2 0 0 0 6 2 VecMDot 20 1.0 1.8408e+00 1.9 1.23e+06 1.1 0.0e+00 0.0e+00 2.0e+01 1 1 0 0 3 1 1 0 0 3 5 VecTDot 74 1.0 5.0600e+00 1.1 1.12e+06 1.1 0.0e+00 0.0e+00 7.4e+01 4 0 0 0 11 4 0 0 0 12 2 VecNorm 68 1.0 5.7801e+00 1.4 9.82e+05 1.1 0.0e+00 0.0e+00 6.8e+01 4 0 0 0 10 4 0 0 0 11 1 VecScale 28 1.0 6.9022e-04 1.8 1.84e+05 1.1 0.0e+00 0.0e+00 0.0e+00 0 0 0 0 0 0 0 0 0 0 2077 VecCopy 116 1.0 1.5973e-03 1.6 0.00e+00 0.0 0.0e+00 0.0e+00 0.0e+00 0 0 0 0 0 0 0 0 0 0 0 VecSet 179 1.0 1.6708e-0231.2 0.00e+00 0.0 0.0e+00 0.0e+00 0.0e+00 0 0 0 0 0 0 0 0 0 0 0 VecAXPY 82 1.0 8.0396e-0244.1 1.29e+06 1.1 0.0e+00 0.0e+00 0.0e+00 0 1 0 0 0 0 1 0 0 0 125 VecAYPX 237 1.0 9.8671e-0229.2 2.03e+06 1.1 0.0e+00 0.0e+00 0.0e+00 0 1 0 0 0 0 1 0 0 0 160 VecAXPBYCZ 68 1.0 9.5128e-0269.0 1.90e+06 1.1 0.0e+00 0.0e+00 0.0e+00 0 1 0 0 0 0 1 0 0 0 155 VecMAXPY 22 1.0 1.1862e-03 1.1 1.45e+06 1.1 0.0e+00 0.0e+00 0.0e+00 0 1 0 0 0 0 1 0 0 0 9490 VecAssemblyBegin 61 1.0 1.6692e+00 1.4 0.00e+00 0.0 0.0e+00 0.0e+00 6.0e+01 1 0 0 0 9 1 0 0 0 9 0 VecAssemblyEnd 61 1.0 6.0537e-04 3.0 0.00e+00 0.0 0.0e+00 0.0e+00 0.0e+00 0 0 0 0 0 0 0 0 0 0 0 VecPointwiseMult 180 1.0 6.9982e-03 2.6 1.01e+06 1.1 0.0e+00 0.0e+00 0.0e+00 0 0 0 0 0 0 0 0 0 0 1114 VecScatterBegin 327 1.0 1.0311e-0110.2 0.00e+00 0.0 1.0e+04 3.2e+03 0.0e+00 0 0 79 41 0 0 0 79 41 0 0 VecScatterEnd 327 1.0 1.9421e+01 1.1 3.25e+04 4.6 0.0e+00 0.0e+00 0.0e+00 15 0 0 0 0 15 0 0 0 0 0 VecSetRandom 2 1.0 8.5632e-04 1.4 0.00e+00 0.0 0.0e+00 0.0e+00 0.0e+00 0 0 0 0 0 0 0 0 0 0 0 VecNormalize 28 1.0 2.3681e+00 2.3 5.53e+05 1.1 0.0e+00 0.0e+00 2.8e+01 2 0 0 0 4 2 0 0 0 4 2 SFSetGraph 19 1.0 2.4592e-04 4.9 0.00e+00 0.0 0.0e+00 0.0e+00 0.0e+00 0 0 0 0 0 0 0 0 0 0 0 SFSetUp 19 1.0 2.6969e+00 1.2 0.00e+00 0.0 1.1e+03 3.8e+02 1.9e+01 2 0 9 1 3 2 0 9 1 3 0 SFBcastOpBegin 302 1.0 1.0148e-0111.9 0.00e+00 0.0 9.8e+03 3.2e+03 0.0e+00 0 0 76 40 0 0 0 76 40 0 0 SFBcastOpEnd 302 1.0 1.8149e+01 1.2 0.00e+00 0.0 0.0e+00 0.0e+00 0.0e+00 14 0 0 0 0 14 0 0 0 0 0 SFReduceBegin 35 1.0 7.3068e-04 1.4 0.00e+00 0.0 7.3e+02 1.3e+03 0.0e+00 0 0 6 1 0 0 0 6 1 0 0 SFReduceEnd 35 1.0 2.3540e+00 1.8 3.25e+04 4.6 0.0e+00 0.0e+00 0.0e+00 2 0 0 0 0 2 0 0 0 0 0 SFPack 337 1.0 1.3037e-02 8.5 0.00e+00 0.0 0.0e+00 0.0e+00 0.0e+00 0 0 0 0 0 0 0 0 0 0 0 SFUnpack 337 1.0 6.4963e-04 2.1 3.25e+04 4.6 0.0e+00 0.0e+00 0.0e+00 0 0 0 0 0 0 0 0 0 0 179 KSPSetUp 7 1.0 5.9751e+00 1.0 1.36e+07 1.1 6.8e+02 3.9e+03 7.2e+01 5 6 5 3 11 5 6 5 3 11 17 KSPSolve 1 1.0 6.2203e+01 1.0 2.68e+08 1.4 1.3e+04 5.8e+03 5.5e+02 53 99 97 93 85 53 99 97 93 85 28 KSPGMRESOrthog 20 1.0 1.8424e+00 1.9 2.46e+06 1.1 0.0e+00 0.0e+00 2.0e+01 1 1 0 0 3 1 1 0 0 3 10 PCGAMGGraph_AGG 2 1.0 3.9797e+00 1.1 6.66e+05 1.1 3.4e+02 4.6e+02 3.8e+01 3 0 3 0 6 3 0 3 0 6 1 PCGAMGCoarse_AGG 2 1.0 4.0081e+00 1.0 0.00e+00 0.0 8.8e+02 6.8e+03 2.9e+01 3 0 7 8 4 3 0 7 8 5 0 PCGAMGProl_AGG 2 1.0 6.1707e+00 1.0 0.00e+00 0.0 2.1e+03 1.7e+03 1.9e+02 5 0 16 5 29 5 0 16 5 29 0 PCGAMGPOpt_AGG 2 1.0 8.9119e+00 1.0 2.33e+07 1.1 1.1e+03 6.2e+03 8.2e+01 8 10 9 9 13 8 10 9 9 13 20 GAMG: createProl 2 1.0 2.2805e+01 1.0 2.39e+07 1.1 4.4e+03 3.7e+03 3.4e+02 19 10 34 21 52 19 10 34 21 53 8 Graph 4 1.0 3.9795e+00 1.1 6.66e+05 1.1 3.4e+02 4.6e+02 3.8e+01 3 0 3 0 6 3 0 3 0 6 1 MIS/Agg 2 1.0 1.4686e+00 1.1 0.00e+00 0.0 4.8e+02 8.4e+02 8.0e+00 1 0 4 1 1 1 0 4 1 1 0 SA: col data 2 1.0 4.2719e+00 1.0 0.00e+00 0.0 1.9e+03 1.4e+03 1.7e+02 4 0 15 3 26 4 0 15 3 26 0 SA: frmProl0 2 1.0 1.3993e+00 1.1 0.00e+00 0.0 2.0e+02 4.5e+03 1.2e+01 1 0 2 1 2 1 0 2 1 2 0 SA: smooth 2 1.0 3.8019e+00 1.0 7.93e+06 1.2 4.2e+02 9.7e+03 2.8e+01 3 3 3 5 4 3 3 3 5 4 15 GAMG: partLevel 2 1.0 1.4614e+01 1.0 6.97e+07 1.3 8.5e+02 3.6e+04 8.1e+01 12 28 7 39 12 12 28 7 39 13 34 repartition 1 1.0 4.7980e+00 1.0 0.00e+00 0.0 9.4e+01 1.8e+04 5.5e+01 4 0 1 2 8 4 0 1 2 9 0 Invert-Sort 1 1.0 6.9093e-01 1.6 0.00e+00 0.0 0.0e+00 0.0e+00 6.0e+00 0 0 0 0 1 0 0 0 0 1 0 Move A 1 1.0 1.4946e+00 1.2 0.00e+00 0.0 3.5e+01 4.6e+04 1.6e+01 1 0 0 2 2 1 0 0 2 2 0 Move P 1 1.0 1.5081e+00 1.1 0.00e+00 0.0 1.4e+01 4.3e+02 1.7e+01 1 0 0 0 3 1 0 0 0 3 0 PCSetUp 2 1.0 4.5006e+01 1.0 1.49e+08 1.7 6.0e+03 8.3e+03 5.0e+02 38 47 46 63 77 38 47 46 63 78 19 PCSetUpOnBlocks 17 1.0 1.1066e+005515.6 5.30e+07 0.0 0.0e+00 0.0e+00 0.0e+00 0 3 0 0 0 0 3 0 0 0 48 PCApply 39 1.0 1.4248e+01 1.1 1.57e+08 1.7 6.0e+03 3.3e+03 4.0e+00 12 48 47 26 1 12 48 47 26 1 60 ------------------------------------------------------------------------------------------------------------------------ Memory usage is given in bytes: Object Type Creations Destructions Memory Descendants' Mem. Reports information only for process 0. --- Event Stage 0: Main Stage Matrix 68 68 25190292 0. Matrix Coarsen 2 2 1264 0. Matrix Null Space 1 1 704 0. Index Set 55 55 111672 0. IS L to G Mapping 6 6 87380 0. Vec Scatter 17 17 13872 0. Vector 185 185 4443944 0. Star Forest Graph 19 19 21888 0. Krylov Solver 9 9 72064 0. Preconditioner 9 9 9148 0. Viewer 3 2 1696 0. PetscRandom 4 4 2616 0. ======================================================================================================================== Average time to get PetscTime(): 5.78e-08 Average time for MPI_Barrier(): 0.0647081 Average time for zero size MPI_Send(): 0.0215454 #PETSc Option Table entries: --ndofs 80000 --problem_type elasticity --scaling_type strong -ksp_rtol 1.0e-8 -ksp_type cg -ksp_view -log_view -matptap_via scalable -mg_levels_esteig_ksp_type cg -mg_levels_ksp_type chebyshev -mg_levels_pc_type jacobi -options_left -pc_gamg_coarse_eq_limit 1000 -pc_type gamg #End of PETSc Option Table entries Compiled without FORTRAN kernels Compiled with full precision matrices (default) sizeof(short) 2 sizeof(int) 4 sizeof(long) 8 sizeof(void*) 8 sizeof(PetscScalar) 8 sizeof(PetscInt) 4 Configure options: --build=x86_64-linux-gnu --prefix=/usr --includedir=${prefix}/include --mandir=${prefix}/share/man --infodir=${prefix}/share/info --sysconfdir=/etc --localstatedir=/var --with-option-checking=0 --with-silent-rules=0 --libdir=${prefix}/lib/x86_64-linux-gnu --runstatedir=/run --with-maintainer-mode=0 --with-dependency-tracking=0 --with-debugging=0 --shared-library-extension=_real --with-shared-libraries --with-pic=1 --with-cc=mpicc --with-cxx=mpicxx --with-fc=mpif90 --with-cxx-dialect=C++11 --with-opencl=1 --with-blas-lib=-lblas --with-lapack-lib=-llapack --with-scalapack=1 --with-scalapack-lib=-lscalapack-openmpi --with-ptscotch=1 --with-ptscotch-include=/usr/include/scotch --with-ptscotch-lib="-lptesmumps -lptscotch -lptscotcherr" --with-fftw=1 --with-fftw-include="[]" --with-fftw-lib="-lfftw3 -lfftw3_mpi" --with-superlu_dist=1 --with-superlu_dist-include=/usr/include/superlu-dist --with-superlu_dist-lib=-lsuperlu_dist --with-hdf5-include=/usr/include/hdf5/openmpi --with-hdf5-lib="-L/usr/lib/x86_64-linux-gnu/hdf5/openmpi -L/usr/lib/x86_64-linux-gnu/openmpi/lib -lhdf5 -lmpi" --CXX_LINKER_FLAGS=-Wl,--no-as-needed --with-hypre=1 --with-hypre-include=/usr/include/hypre --with-hypre-lib=-lHYPRE_core --with-mumps=1 --with-mumps-include="[]" --with-mumps-lib="-ldmumps -lzmumps -lsmumps -lcmumps -lmumps_common -lpord" --with-suitesparse=1 --with-suitesparse-include=/usr/include/suitesparse --with-suitesparse-lib="-lumfpack -lamd -lcholmod -lklu" --with-superlu=1 --with-superlu-include=/usr/include/superlu --with-superlu-lib=-lsuperlu --prefix=/usr/lib/petscdir/petsc3.14/x86_64-linux-gnu-real --PETSC_ARCH=x86_64-linux-gnu-real CFLAGS="-g -O2 -ffile-prefix-map=/build/petsc-vu0cnm/petsc-3.14.4+dfsg1=. -fstack-protector-strong -Wformat -Werror=format-security -fPIC" CXXFLAGS="-g -O2 -ffile-prefix-map=/build/petsc-vu0cnm/petsc-3.14.4+dfsg1=. -fstack-protector-strong -Wformat -Werror=format-security -fPIC" FCFLAGS="-g -O2 -ffile-prefix-map=/build/petsc-vu0cnm/petsc-3.14.4+dfsg1=. -fstack-protector-strong -fPIC -ffree-line-length-0" FFLAGS="-g -O2 -ffile-prefix-map=/build/petsc-vu0cnm/petsc-3.14.4+dfsg1=. -fstack-protector-strong -fPIC -ffree-line-length-0" CPPFLAGS="-Wdate-time -D_FORTIFY_SOURCE=2" LDFLAGS="-Wl,-z,relro -fPIC" MAKEFLAGS=w ----------------------------------------- Libraries compiled on 2021-02-11 15:51:02 on reproducible Machine characteristics: Linux-4.19.0-14-amd64-x86_64-with-glibc2.31 Using PETSc directory: /usr/lib/petscdir/petsc3.14/x86_64-linux-gnu-real Using PETSc arch: ----------------------------------------- Using C compiler: mpicc -g -O2 -ffile-prefix-map=/build/petsc-vu0cnm/petsc-3.14.4+dfsg1=. -fstack-protector-strong -Wformat -Werror=format-security -fPIC -Wdate-time -D_FORTIFY_SOURCE=2 Using Fortran compiler: mpif90 -g -O2 -ffile-prefix-map=/build/petsc-vu0cnm/petsc-3.14.4+dfsg1=. -fstack-protector-strong -fPIC -ffree-line-length-0 -Wdate-time -D_FORTIFY_SOURCE=2 ----------------------------------------- Using include paths: -I/usr/lib/petscdir/petsc3.14/x86_64-linux-gnu-real/include -I/usr/include/hypre -I/usr/include/suitesparse -I/usr/include/superlu -I/usr/include/superlu-dist -I/usr/include/scotch -I/usr/include/hdf5/openmpi ----------------------------------------- Using C linker: mpicc Using Fortran linker: mpif90 Using libraries: -L/usr/lib/petscdir/petsc3.14/x86_64-linux-gnu-real/lib -L/usr/lib/petscdir/petsc3.14/x86_64-linux-gnu-real/lib -lpetsc_real -L/usr/lib/x86_64-linux-gnu/hdf5/openmpi -L/usr/lib/x86_64-linux-gnu/openmpi/lib -L/usr/lib/x86_64-linux-gnu/openmpi/lib/fortran/gfortran -L/usr/lib/gcc/x86_64-linux-gnu/10 -L/usr/lib/x86_64-linux-gnu -L/lib/x86_64-linux-gnu -lHYPRE_core -ldmumps -lzmumps -lsmumps -lcmumps -lmumps_common -lpord -lscalapack-openmpi -lumfpack -lamd -lcholmod -lklu -lsuperlu -lsuperlu_dist -lfftw3 -lfftw3_mpi -llapack -lblas -lptesmumps -lptscotch -lptscotcherr -lhdf5 -lmpi -lm -lOpenCL -lstdc++ -ldl -lmpi_usempif08 -lmpi_usempi_ignore_tkr -lmpi_mpifh -lmpi -lopen-rte -lopen-pal -lhwloc -levent_core -levent_pthreads -lutil -lgfortran -lm -lrt -lz -lgfortran -lm -lgfortran -lgcc_s -lquadmath -lpthread -lquadmath -lstdc++ -ldl ----------------------------------------- #PETSc Option Table entries: --ndofs 80000 --problem_type elasticity --scaling_type strong -ksp_rtol 1.0e-8 -ksp_type cg -ksp_view -log_view -matptap_via scalable -mg_levels_esteig_ksp_type cg -mg_levels_ksp_type chebyshev -mg_levels_pc_type jacobi -options_left -pc_gamg_coarse_eq_limit 1000 -pc_type gamg #End of PETSc Option Table entries WARNING! There are options you set that were not used! WARNING! could be spelling mistake, etc! There are 3 unused database options. They are: Option left: name:--ndofs value: 80000 Option left: name:--problem_type value: elasticity Option left: name:--scaling_type value: strong ==== poisson weak scaling (8 processes with ndofs=10000) ==== UnitCube (42x42x42) to be refined 0 times ---------------------------------------------------------------- Test problem summary dolfinx version: 2019.2.9.99 dolfinx hash: unknown ufl hash: ac78d56bf5c7fcc5585cc7f0654188e75f1c73b1 petsc version: Petsc Release Version 3.14.4, Feb 03, 2021 Problem type: poisson Scaling type: weak Num processes: 8 Num cells 444528 Total degrees of freedom: 79507 Average degrees of freedom per process: 9938 ---------------------------------------------------------------- KSP Object: 8 MPI processes type: cg maximum iterations=10000, initial guess is zero tolerances: relative=1e-08, absolute=1e-50, divergence=10000. left preconditioning using PRECONDITIONED norm type for convergence test PC Object: 8 MPI processes type: hypre HYPRE BoomerAMG preconditioning Cycle type V Maximum number of levels 25 Maximum number of iterations PER hypre call 1 Convergence tolerance PER hypre call 0. Threshold for strong coupling 0.5 Interpolation truncation factor 0. Interpolation: max elements per row 0 Number of levels of aggressive coarsening 0 Number of paths for aggressive coarsening 1 Maximum row sums 0.9 Sweeps down 1 Sweeps up 1 Sweeps on coarse 1 Relax down symmetric-SOR/Jacobi Relax up symmetric-SOR/Jacobi Relax on coarse Gaussian-elimination Relax weight (all) 1. Outer relax weight (all) 1. Using CF-relaxation Not using more complex smoothers. Measure type local Coarsen type Falgout Interpolation type classical linear system matrix = precond matrix: Mat Object: 8 MPI processes type: mpiaij rows=79507, cols=79507 total: nonzeros=1148743, allocated nonzeros=1148743 total number of mallocs used during MatSetValues calls=0 not using I-node (on process 0) routines [MPI_AVG] Summary of timings | reps wall avg wall tot -------------------------------------------------------------------------------------------- Build BoxMesh | 1 68.667475 68.667475 Build dofmap data | 2 1.848135 3.696271 Build sparsity | 1 0.630156 0.630156 Compute SCOTCH graph re-ordering | 2 0.047207 0.094414 Compute connectivity 2-3 | 1 0.017895 0.017895 Compute dof reordering map | 2 0.217330 0.434661 Compute entities of dim = 1 | 1 6.402701 6.402701 Compute entities of dim = 2 | 1 4.617055 4.617055 Compute graph partition (SCOTCH) | 1 60.215214 60.215214 Compute local part of mesh dual graph | 1 0.663528 0.663528 Compute local-to-local map | 1 0.006690 0.006690 Compute non-local part of mesh dual graph | 1 0.573753 0.573753 Compute-local-to-global links for global/local adjacency list | 1 0.002152 0.002152 Distribute in graph creation AdjacencyList | 1 0.311780 0.311780 Extract partition boundaries from SCOTCH graph | 1 0.006068 0.006068 Fetch float data from remote processes | 1 0.234161 0.234161 Get SCOTCH graph data | 1 0.000015 0.000015 Init dofmap from element dofmap | 2 0.115539 0.231078 PETSc Krylov solver | 1 95.021319 95.021319 SCOTCH: call SCOTCH_dgraphBuild | 1 0.175808 0.175808 SCOTCH: call SCOTCH_dgraphHalo | 1 0.147937 0.147937 SCOTCH: call SCOTCH_dgraphPart | 1 59.845157 59.845157 SCOTCH: call SCOTCH_graphBuild | 2 0.000022 0.000043 SCOTCH: call SCOTCH_graphOrder | 2 0.011573 0.023146 SparsityPattern::assemble | 1 1.723470 1.723470 TOPOLOGY: Create sets | 1 0.611032 0.611032 ZZZ Assemble | 1 9.755633 9.755633 ZZZ Assemble matrix | 1 1.677206 1.677206 ZZZ Assemble vector | 1 0.215074 0.215074 ZZZ Create Mesh | 1 68.736634 68.736634 ZZZ Create mesh entity permutations | 1 11.344573 11.344573 ZZZ FunctionSpace | 1 1.927721 1.927721 ZZZ Solve | 1 95.590745 95.590745 *** Number of Krylov iterations: 8 *** Solution norm: 44731.2 ************************************************************************************************************************ *** WIDEN YOUR WINDOW TO 120 CHARACTERS. Use 'enscript -r -fCourier9' to print this document *** ************************************************************************************************************************ ---------------------------------------------- PETSc Performance Summary: ---------------------------------------------- /build/fenicsx-performance-tests-494eAj/fenicsx-performance-tests-0.0-git20210119.80e82ac/obj-x86_64-linux-gnu/dolfinx-scaling-test on a named d16a17b025ea with 8 processors, by builduser Tue Oct 12 19:25:37 2021 Using Petsc Release Version 3.14.4, Feb 03, 2021 Max Max/Min Avg Total Time (sec): 1.876e+02 1.000 1.876e+02 Objects: 3.800e+01 1.000 3.800e+01 Flop: 4.036e+06 1.057 3.911e+06 3.129e+07 Flop/sec: 2.152e+04 1.057 2.085e+04 1.668e+05 MPI Messages: 1.140e+02 2.000 8.075e+01 6.460e+02 MPI Message Lengths: 6.037e+05 1.498 5.978e+03 3.862e+06 MPI Reductions: 6.900e+01 1.000 Flop counting convention: 1 flop = 1 real number operation of type (multiply/divide/add/subtract) e.g., VecAXPY() for real vectors of length N --> 2N flop and VecAXPY() for complex vectors of length N --> 8N flop Summary of Stages: ----- Time ------ ----- Flop ------ --- Messages --- -- Message Lengths -- -- Reductions -- Avg %Total Avg %Total Count %Total Avg %Total Count %Total 0: Main Stage: 1.8756e+02 100.0% 3.1288e+07 100.0% 6.460e+02 100.0% 5.978e+03 100.0% 6.200e+01 89.9% ------------------------------------------------------------------------------------------------------------------------ See the 'Profiling' chapter of the users' manual for details on interpreting output. Phase summary info: Count: number of times phase was executed Time and Flop: Max - maximum over all processors Ratio - ratio of maximum to minimum over all processors Mess: number of messages sent AvgLen: average message length (bytes) Reduct: number of global reductions Global: entire computation Stage: stages of a computation. Set stages with PetscLogStagePush() and PetscLogStagePop(). %T - percent time in this phase %F - percent flop in this phase %M - percent messages in this phase %L - percent message lengths in this phase %R - percent reductions in this phase Total Mflop/s: 10e-6 * (sum of flop over all processors)/(max time over all processors) ------------------------------------------------------------------------------------------------------------------------ Event Count Time (sec) Flop --- Global --- --- Stage ---- Total Max Ratio Max Ratio Max Ratio Mess AvgLen Reduct %T %F %M %L %R %T %F %M %L %R Mflop/s ------------------------------------------------------------------------------------------------------------------------ --- Event Stage 0: Main Stage BuildTwoSided 4 1.0 9.6311e-01 1.8 0.00e+00 0.0 1.4e+02 4.0e+00 4.0e+00 0 0 21 0 6 0 0 21 0 6 0 BuildTwoSidedF 1 1.0 5.0880e-01 5.1 0.00e+00 0.0 6.8e+01 1.6e+04 1.0e+00 0 0 11 28 1 0 0 11 28 2 0 MatMult 8 1.0 6.2048e-01 1.6 2.28e+06 1.1 2.7e+02 4.1e+03 0.0e+00 0 57 42 29 0 0 57 42 29 0 29 MatConvert 1 1.0 1.3997e+00 1.1 0.00e+00 0.0 0.0e+00 0.0e+00 4.0e+00 1 0 0 0 6 1 0 0 0 6 0 MatAssemblyBegin 2 1.0 5.2431e-01 3.2 0.00e+00 0.0 6.8e+01 1.6e+04 1.0e+00 0 0 11 28 1 0 0 11 28 2 0 MatAssemblyEnd 2 1.0 1.5486e+00 1.1 7.64e+03 1.9 6.8e+01 1.0e+03 9.0e+00 1 0 11 2 13 1 0 11 2 15 0 MatGetRowIJ 2 1.0 1.9240e-06 1.4 0.00e+00 0.0 0.0e+00 0.0e+00 0.0e+00 0 0 0 0 0 0 0 0 0 0 0 MatZeroEntries 1 1.0 1.0871e-03 8.0 0.00e+00 0.0 0.0e+00 0.0e+00 0.0e+00 0 0 0 0 0 0 0 0 0 0 0 MatView 1 1.0 9.9997e-02 4.1 0.00e+00 0.0 0.0e+00 0.0e+00 1.0e+00 0 0 0 0 1 0 0 0 0 2 0 VecTDot 16 1.0 1.4319e+00 1.3 3.31e+05 1.1 0.0e+00 0.0e+00 1.6e+01 1 8 0 0 23 1 8 0 0 26 2 VecNorm 10 1.0 1.0341e+00 2.1 2.07e+05 1.1 0.0e+00 0.0e+00 1.0e+01 0 5 0 0 14 0 5 0 0 16 2 VecCopy 2 1.0 1.5253e-04 1.6 0.00e+00 0.0 0.0e+00 0.0e+00 0.0e+00 0 0 0 0 0 0 0 0 0 0 0 VecSet 12 1.0 1.7750e-04 1.5 0.00e+00 0.0 0.0e+00 0.0e+00 0.0e+00 0 0 0 0 0 0 0 0 0 0 0 VecAXPY 16 1.0 4.6972e-04 1.2 3.31e+05 1.1 0.0e+00 0.0e+00 0.0e+00 0 8 0 0 0 0 8 0 0 0 5416 VecAYPX 7 1.0 4.2163e-04 2.2 1.45e+05 1.1 0.0e+00 0.0e+00 0.0e+00 0 4 0 0 0 0 4 0 0 0 2640 VecScatterBegin 11 1.0 1.1740e-03 2.3 0.00e+00 0.0 3.7e+02 3.4e+03 0.0e+00 0 0 58 33 0 0 0 58 33 0 0 VecScatterEnd 11 1.0 8.2173e-01 2.1 1.13e+03 1.9 0.0e+00 0.0e+00 0.0e+00 0 0 0 0 0 0 0 0 0 0 0 SFSetGraph 3 1.0 2.0550e-05 2.0 0.00e+00 0.0 0.0e+00 0.0e+00 0.0e+00 0 0 0 0 0 0 0 0 0 0 0 SFSetUp 3 1.0 7.4830e-01 1.4 0.00e+00 0.0 2.0e+02 6.0e+02 3.0e+00 0 0 32 3 4 0 0 32 3 5 0 SFBcastOpBegin 10 1.0 1.0711e-03 2.7 0.00e+00 0.0 3.4e+02 3.6e+03 0.0e+00 0 0 53 32 0 0 0 53 32 0 0 SFBcastOpEnd 10 1.0 7.2182e-01 2.0 0.00e+00 0.0 0.0e+00 0.0e+00 0.0e+00 0 0 0 0 0 0 0 0 0 0 0 SFReduceBegin 1 1.0 4.4780e-05 2.3 0.00e+00 0.0 3.4e+01 1.5e+03 0.0e+00 0 0 5 1 0 0 0 5 1 0 0 SFReduceEnd 1 1.0 1.0106e-01 6.1 1.13e+03 1.9 0.0e+00 0.0e+00 0.0e+00 0 0 0 0 0 0 0 0 0 0 0 SFPack 11 1.0 4.6200e-04 7.1 0.00e+00 0.0 0.0e+00 0.0e+00 0.0e+00 0 0 0 0 0 0 0 0 0 0 0 SFUnpack 11 1.0 4.9278e-05 2.0 1.13e+03 1.9 0.0e+00 0.0e+00 0.0e+00 0 0 0 0 0 0 0 0 0 0 131 KSPSetUp 1 1.0 1.5692e-04 1.7 0.00e+00 0.0 0.0e+00 0.0e+00 0.0e+00 0 0 0 0 0 0 0 0 0 0 0 KSPSolve 1 1.0 9.4980e+01 1.0 3.28e+06 1.1 2.7e+02 4.1e+03 2.9e+01 51 81 42 29 42 51 81 42 29 47 0 PCSetUp 1 1.0 4.9871e+01 1.0 0.00e+00 0.0 0.0e+00 0.0e+00 4.0e+00 27 0 0 0 6 27 0 0 0 6 0 PCApply 9 1.0 4.2741e+01 1.0 0.00e+00 0.0 0.0e+00 0.0e+00 0.0e+00 23 0 0 0 0 23 0 0 0 0 0 ------------------------------------------------------------------------------------------------------------------------ Memory usage is given in bytes: Object Type Creations Destructions Memory Descendants' Mem. Reports information only for process 0. --- Event Stage 0: Main Stage Matrix 4 4 2112504 0. Index Set 6 6 17080 0. IS L to G Mapping 4 4 175200 0. Vec Scatter 3 3 2448 0. Vector 14 14 373072 0. Star Forest Graph 3 3 3456 0. Krylov Solver 1 1 1488 0. Preconditioner 1 1 1456 0. Viewer 2 1 848 0. ======================================================================================================================== Average time to get PetscTime(): 4.95e-08 Average time for MPI_Barrier(): 0.0984164 Average time for zero size MPI_Send(): 0.0245981 #PETSc Option Table entries: --ndofs 10000 --problem_type poisson --scaling_type weak -ksp_rtol 1.0e-8 -ksp_type cg -ksp_view -log_view -options_left -pc_hypre_boomeramg_strong_threshold 0.5 -pc_hypre_type boomeramg -pc_type hypre #End of PETSc Option Table entries Compiled without FORTRAN kernels Compiled with full precision matrices (default) sizeof(short) 2 sizeof(int) 4 sizeof(long) 8 sizeof(void*) 8 sizeof(PetscScalar) 8 sizeof(PetscInt) 4 Configure options: --build=x86_64-linux-gnu --prefix=/usr --includedir=${prefix}/include --mandir=${prefix}/share/man --infodir=${prefix}/share/info --sysconfdir=/etc --localstatedir=/var --with-option-checking=0 --with-silent-rules=0 --libdir=${prefix}/lib/x86_64-linux-gnu --runstatedir=/run --with-maintainer-mode=0 --with-dependency-tracking=0 --with-debugging=0 --shared-library-extension=_real --with-shared-libraries --with-pic=1 --with-cc=mpicc --with-cxx=mpicxx --with-fc=mpif90 --with-cxx-dialect=C++11 --with-opencl=1 --with-blas-lib=-lblas --with-lapack-lib=-llapack --with-scalapack=1 --with-scalapack-lib=-lscalapack-openmpi --with-ptscotch=1 --with-ptscotch-include=/usr/include/scotch --with-ptscotch-lib="-lptesmumps -lptscotch -lptscotcherr" --with-fftw=1 --with-fftw-include="[]" --with-fftw-lib="-lfftw3 -lfftw3_mpi" --with-superlu_dist=1 --with-superlu_dist-include=/usr/include/superlu-dist --with-superlu_dist-lib=-lsuperlu_dist --with-hdf5-include=/usr/include/hdf5/openmpi --with-hdf5-lib="-L/usr/lib/x86_64-linux-gnu/hdf5/openmpi -L/usr/lib/x86_64-linux-gnu/openmpi/lib -lhdf5 -lmpi" --CXX_LINKER_FLAGS=-Wl,--no-as-needed --with-hypre=1 --with-hypre-include=/usr/include/hypre --with-hypre-lib=-lHYPRE_core --with-mumps=1 --with-mumps-include="[]" --with-mumps-lib="-ldmumps -lzmumps -lsmumps -lcmumps -lmumps_common -lpord" --with-suitesparse=1 --with-suitesparse-include=/usr/include/suitesparse --with-suitesparse-lib="-lumfpack -lamd -lcholmod -lklu" --with-superlu=1 --with-superlu-include=/usr/include/superlu --with-superlu-lib=-lsuperlu --prefix=/usr/lib/petscdir/petsc3.14/x86_64-linux-gnu-real --PETSC_ARCH=x86_64-linux-gnu-real CFLAGS="-g -O2 -ffile-prefix-map=/build/petsc-vu0cnm/petsc-3.14.4+dfsg1=. -fstack-protector-strong -Wformat -Werror=format-security -fPIC" CXXFLAGS="-g -O2 -ffile-prefix-map=/build/petsc-vu0cnm/petsc-3.14.4+dfsg1=. -fstack-protector-strong -Wformat -Werror=format-security -fPIC" FCFLAGS="-g -O2 -ffile-prefix-map=/build/petsc-vu0cnm/petsc-3.14.4+dfsg1=. -fstack-protector-strong -fPIC -ffree-line-length-0" FFLAGS="-g -O2 -ffile-prefix-map=/build/petsc-vu0cnm/petsc-3.14.4+dfsg1=. -fstack-protector-strong -fPIC -ffree-line-length-0" CPPFLAGS="-Wdate-time -D_FORTIFY_SOURCE=2" LDFLAGS="-Wl,-z,relro -fPIC" MAKEFLAGS=w ----------------------------------------- Libraries compiled on 2021-02-11 15:51:02 on reproducible Machine characteristics: Linux-4.19.0-14-amd64-x86_64-with-glibc2.31 Using PETSc directory: /usr/lib/petscdir/petsc3.14/x86_64-linux-gnu-real Using PETSc arch: ----------------------------------------- Using C compiler: mpicc -g -O2 -ffile-prefix-map=/build/petsc-vu0cnm/petsc-3.14.4+dfsg1=. -fstack-protector-strong -Wformat -Werror=format-security -fPIC -Wdate-time -D_FORTIFY_SOURCE=2 Using Fortran compiler: mpif90 -g -O2 -ffile-prefix-map=/build/petsc-vu0cnm/petsc-3.14.4+dfsg1=. -fstack-protector-strong -fPIC -ffree-line-length-0 -Wdate-time -D_FORTIFY_SOURCE=2 ----------------------------------------- Using include paths: -I/usr/lib/petscdir/petsc3.14/x86_64-linux-gnu-real/include -I/usr/include/hypre -I/usr/include/suitesparse -I/usr/include/superlu -I/usr/include/superlu-dist -I/usr/include/scotch -I/usr/include/hdf5/openmpi ----------------------------------------- Using C linker: mpicc Using Fortran linker: mpif90 Using libraries: -L/usr/lib/petscdir/petsc3.14/x86_64-linux-gnu-real/lib -L/usr/lib/petscdir/petsc3.14/x86_64-linux-gnu-real/lib -lpetsc_real -L/usr/lib/x86_64-linux-gnu/hdf5/openmpi -L/usr/lib/x86_64-linux-gnu/openmpi/lib -L/usr/lib/x86_64-linux-gnu/openmpi/lib/fortran/gfortran -L/usr/lib/gcc/x86_64-linux-gnu/10 -L/usr/lib/x86_64-linux-gnu -L/lib/x86_64-linux-gnu -lHYPRE_core -ldmumps -lzmumps -lsmumps -lcmumps -lmumps_common -lpord -lscalapack-openmpi -lumfpack -lamd -lcholmod -lklu -lsuperlu -lsuperlu_dist -lfftw3 -lfftw3_mpi -llapack -lblas -lptesmumps -lptscotch -lptscotcherr -lhdf5 -lmpi -lm -lOpenCL -lstdc++ -ldl -lmpi_usempif08 -lmpi_usempi_ignore_tkr -lmpi_mpifh -lmpi -lopen-rte -lopen-pal -lhwloc -levent_core -levent_pthreads -lutil -lgfortran -lm -lrt -lz -lgfortran -lm -lgfortran -lgcc_s -lquadmath -lpthread -lquadmath -lstdc++ -ldl ----------------------------------------- #PETSc Option Table entries: --ndofs 10000 --problem_type poisson --scaling_type weak -ksp_rtol 1.0e-8 -ksp_type cg -ksp_view -log_view -options_left -pc_hypre_boomeramg_strong_threshold 0.5 -pc_hypre_type boomeramg -pc_type hypre #End of PETSc Option Table entries WARNING! There are options you set that were not used! WARNING! could be spelling mistake, etc! There are 3 unused database options. They are: Option left: name:--ndofs value: 10000 Option left: name:--problem_type value: poisson Option left: name:--scaling_type value: weak ==== poisson strong scaling (8 processes with ndofs=80000) ==== UnitCube (42x42x42) to be refined 0 times ---------------------------------------------------------------- Test problem summary dolfinx version: 2019.2.9.99 dolfinx hash: unknown ufl hash: ac78d56bf5c7fcc5585cc7f0654188e75f1c73b1 petsc version: Petsc Release Version 3.14.4, Feb 03, 2021 Problem type: poisson Scaling type: strong Num processes: 8 Num cells 444528 Total degrees of freedom: 79507 Average degrees of freedom per process: 9938 ---------------------------------------------------------------- KSP Object: 8 MPI processes type: cg maximum iterations=10000, initial guess is zero tolerances: relative=1e-08, absolute=1e-50, divergence=10000. left preconditioning using PRECONDITIONED norm type for convergence test PC Object: 8 MPI processes type: hypre HYPRE BoomerAMG preconditioning Cycle type V Maximum number of levels 25 Maximum number of iterations PER hypre call 1 Convergence tolerance PER hypre call 0. Threshold for strong coupling 0.5 Interpolation truncation factor 0. Interpolation: max elements per row 0 Number of levels of aggressive coarsening 0 Number of paths for aggressive coarsening 1 Maximum row sums 0.9 Sweeps down 1 Sweeps up 1 Sweeps on coarse 1 Relax down symmetric-SOR/Jacobi Relax up symmetric-SOR/Jacobi Relax on coarse Gaussian-elimination Relax weight (all) 1. Outer relax weight (all) 1. Using CF-relaxation Not using more complex smoothers. Measure type local Coarsen type Falgout Interpolation type classical linear system matrix = precond matrix: Mat Object: 8 MPI processes type: mpiaij rows=79507, cols=79507 total: nonzeros=1148743, allocated nonzeros=1148743 total number of mallocs used during MatSetValues calls=0 not using I-node (on process 0) routines [MPI_AVG] Summary of timings | reps wall avg wall tot ---------------------------------------------------------------------------------------------- Build BoxMesh | 1 68.711347 68.711347 Build dofmap data | 2 1.757802 3.515605 Build sparsity | 1 0.754422 0.754422 Compute SCOTCH graph re-ordering | 2 0.061292 0.122583 Compute connectivity 2-3 | 1 0.028930 0.028930 Compute dof reordering map | 2 0.245356 0.490712 Compute entities of dim = 1 | 1 6.455093 6.455093 Compute entities of dim = 2 | 1 6.216170 6.216170 Compute graph partition (SCOTCH) | 1 59.855264 59.855264 Compute local part of mesh dual graph | 1 0.747464 0.747464 Compute local-to-local map | 1 0.007737 0.007737 Compute non-local part of mesh dual graph | 1 0.571804 0.571804 Compute-local-to-global links for global/local adjacency list | 1 0.001145 0.001145 Distribute in graph creation AdjacencyList | 1 0.332034 0.332034 Extract partition boundaries from SCOTCH graph | 1 0.003170 0.003170 Fetch float data from remote processes | 1 0.342892 0.342892 Get SCOTCH graph data | 1 0.000015 0.000015 Init dofmap from element dofmap | 2 0.140414 0.280827 PETSc Krylov solver | 1 102.765000 102.765000 SCOTCH: call SCOTCH_dgraphBuild | 1 0.153417 0.153417 SCOTCH: call SCOTCH_dgraphHalo | 1 0.119891 0.119891 SCOTCH: call SCOTCH_dgraphPart | 1 59.516269 59.516269 SCOTCH: call SCOTCH_graphBuild | 2 0.009311 0.018622 SCOTCH: call SCOTCH_graphOrder | 2 0.029326 0.058653 SparsityPattern::assemble | 1 1.775319 1.775319 TOPOLOGY: Create sets | 1 0.702018 0.702018 ZZZ Assemble | 1 10.205440 10.205440 ZZZ Assemble matrix | 1 1.586537 1.586537 ZZZ Assemble vector | 1 0.235179 0.235179 ZZZ Create Mesh | 1 68.739316 68.739316 ZZZ Create mesh entity permutations | 1 13.020256 13.020256 ZZZ FunctionSpace | 1 1.911786 1.911786 ZZZ Solve | 1 103.438709 103.438709 *** Number of Krylov iterations: 8 *** Solution norm: 47944.7 ************************************************************************************************************************ *** WIDEN YOUR WINDOW TO 120 CHARACTERS. Use 'enscript -r -fCourier9' to print this document *** ************************************************************************************************************************ ---------------------------------------------- PETSc Performance Summary: ---------------------------------------------- /build/fenicsx-performance-tests-494eAj/fenicsx-performance-tests-0.0-git20210119.80e82ac/obj-x86_64-linux-gnu/dolfinx-scaling-test on a named d16a17b025ea with 8 processors, by builduser Tue Oct 12 19:31:10 2021 Using Petsc Release Version 3.14.4, Feb 03, 2021 Max Max/Min Avg Total Time (sec): 1.985e+02 1.001 1.984e+02 Objects: 3.800e+01 1.000 3.800e+01 Flop: 4.013e+06 1.056 3.911e+06 3.129e+07 Flop/sec: 2.023e+04 1.057 1.971e+04 1.577e+05 MPI Messages: 1.330e+02 2.333 9.025e+01 7.220e+02 MPI Message Lengths: 6.021e+05 1.456 5.385e+03 3.888e+06 MPI Reductions: 6.900e+01 1.000 Flop counting convention: 1 flop = 1 real number operation of type (multiply/divide/add/subtract) e.g., VecAXPY() for real vectors of length N --> 2N flop and VecAXPY() for complex vectors of length N --> 8N flop Summary of Stages: ----- Time ------ ----- Flop ------ --- Messages --- -- Message Lengths -- -- Reductions -- Avg %Total Avg %Total Count %Total Avg %Total Count %Total 0: Main Stage: 1.9839e+02 100.0% 3.1287e+07 100.0% 7.220e+02 100.0% 5.385e+03 100.0% 6.200e+01 89.9% ------------------------------------------------------------------------------------------------------------------------ See the 'Profiling' chapter of the users' manual for details on interpreting output. Phase summary info: Count: number of times phase was executed Time and Flop: Max - maximum over all processors Ratio - ratio of maximum to minimum over all processors Mess: number of messages sent AvgLen: average message length (bytes) Reduct: number of global reductions Global: entire computation Stage: stages of a computation. Set stages with PetscLogStagePush() and PetscLogStagePop(). %T - percent time in this phase %F - percent flop in this phase %M - percent messages in this phase %L - percent message lengths in this phase %R - percent reductions in this phase Total Mflop/s: 10e-6 * (sum of flop over all processors)/(max time over all processors) ------------------------------------------------------------------------------------------------------------------------ Event Count Time (sec) Flop --- Global --- --- Stage ---- Total Max Ratio Max Ratio Max Ratio Mess AvgLen Reduct %T %F %M %L %R %T %F %M %L %R Mflop/s ------------------------------------------------------------------------------------------------------------------------ --- Event Stage 0: Main Stage BuildTwoSided 4 1.0 7.7667e-01 1.6 0.00e+00 0.0 1.5e+02 4.0e+00 4.0e+00 0 0 21 0 6 0 0 21 0 6 0 BuildTwoSidedF 1 1.0 3.0595e-0111.7 0.00e+00 0.0 7.6e+01 1.4e+04 1.0e+00 0 0 11 28 1 0 0 11 28 2 0 MatMult 8 1.0 7.6693e-01 2.1 2.27e+06 1.1 3.0e+02 3.7e+03 0.0e+00 0 57 42 29 0 0 57 42 29 0 23 MatConvert 1 1.0 1.4038e+00 1.1 0.00e+00 0.0 0.0e+00 0.0e+00 4.0e+00 1 0 0 0 6 1 0 0 0 6 0 MatAssemblyBegin 2 1.0 3.1778e-01 2.7 0.00e+00 0.0 7.6e+01 1.4e+04 1.0e+00 0 0 11 28 1 0 0 11 28 2 0 MatAssemblyEnd 2 1.0 1.5073e+00 1.2 7.63e+03 1.9 7.6e+01 9.3e+02 9.0e+00 1 0 11 2 13 1 0 11 2 15 0 MatGetRowIJ 2 1.0 6.4780e-06 5.1 0.00e+00 0.0 0.0e+00 0.0e+00 0.0e+00 0 0 0 0 0 0 0 0 0 0 0 MatZeroEntries 1 1.0 5.1914e-04 3.7 0.00e+00 0.0 0.0e+00 0.0e+00 0.0e+00 0 0 0 0 0 0 0 0 0 0 0 MatView 1 1.0 6.4108e-02244.6 0.00e+00 0.0 0.0e+00 0.0e+00 1.0e+00 0 0 0 0 1 0 0 0 0 2 0 VecTDot 16 1.0 1.5049e+00 1.9 3.28e+05 1.1 0.0e+00 0.0e+00 1.6e+01 1 8 0 0 23 1 8 0 0 26 2 VecNorm 10 1.0 1.2791e+00 4.1 2.05e+05 1.1 0.0e+00 0.0e+00 1.0e+01 0 5 0 0 14 0 5 0 0 16 1 VecCopy 2 1.0 1.6746e-04 2.5 0.00e+00 0.0 0.0e+00 0.0e+00 0.0e+00 0 0 0 0 0 0 0 0 0 0 0 VecSet 12 1.0 1.8259e-04 1.5 0.00e+00 0.0 0.0e+00 0.0e+00 0.0e+00 0 0 0 0 0 0 0 0 0 0 0 VecAXPY 16 1.0 5.3186e-04 1.4 3.28e+05 1.1 0.0e+00 0.0e+00 0.0e+00 0 8 0 0 0 0 8 0 0 0 4784 VecAYPX 7 1.0 2.5693e-04 1.3 1.43e+05 1.1 0.0e+00 0.0e+00 0.0e+00 0 4 0 0 0 0 4 0 0 0 4332 VecScatterBegin 11 1.0 4.5773e-03 9.4 0.00e+00 0.0 4.2e+02 3.1e+03 0.0e+00 0 0 58 33 0 0 0 58 33 0 0 VecScatterEnd 11 1.0 1.0550e+00 2.1 1.13e+03 1.8 0.0e+00 0.0e+00 0.0e+00 0 0 0 0 0 0 0 0 0 0 0 SFSetGraph 3 1.0 2.1198e-05 1.7 0.00e+00 0.0 0.0e+00 0.0e+00 0.0e+00 0 0 0 0 0 0 0 0 0 0 0 SFSetUp 3 1.0 6.6301e-01 1.5 0.00e+00 0.0 2.3e+02 5.4e+02 3.0e+00 0 0 32 3 4 0 0 32 3 5 0 SFBcastOpBegin 10 1.0 4.4720e-0311.4 0.00e+00 0.0 3.8e+02 3.3e+03 0.0e+00 0 0 53 32 0 0 0 53 32 0 0 SFBcastOpEnd 10 1.0 9.5907e-01 2.2 0.00e+00 0.0 0.0e+00 0.0e+00 0.0e+00 0 0 0 0 0 0 0 0 0 0 0 SFReduceBegin 1 1.0 3.9897e-05 1.8 0.00e+00 0.0 3.8e+01 1.4e+03 0.0e+00 0 0 5 1 0 0 0 5 1 0 0 SFReduceEnd 1 1.0 1.2842e-01 3.1 1.13e+03 1.8 0.0e+00 0.0e+00 0.0e+00 0 0 0 0 0 0 0 0 0 0 0 SFPack 11 1.0 1.2958e-04 1.7 0.00e+00 0.0 0.0e+00 0.0e+00 0.0e+00 0 0 0 0 0 0 0 0 0 0 0 SFUnpack 11 1.0 3.0587e-03114.3 1.13e+03 1.8 0.0e+00 0.0e+00 0.0e+00 0 0 0 0 0 0 0 0 0 0 2 KSPSetUp 1 1.0 1.7657e-04 2.4 0.00e+00 0.0 0.0e+00 0.0e+00 0.0e+00 0 0 0 0 0 0 0 0 0 0 0 KSPSolve 1 1.0 1.0270e+02 1.0 3.25e+06 1.1 3.0e+02 3.7e+03 2.9e+01 52 81 42 29 42 52 81 42 29 47 0 PCSetUp 1 1.0 5.5343e+01 1.0 0.00e+00 0.0 0.0e+00 0.0e+00 4.0e+00 28 0 0 0 6 28 0 0 0 6 0 PCApply 9 1.0 4.5331e+01 1.0 0.00e+00 0.0 0.0e+00 0.0e+00 0.0e+00 23 0 0 0 0 23 0 0 0 0 0 ------------------------------------------------------------------------------------------------------------------------ Memory usage is given in bytes: Object Type Creations Destructions Memory Descendants' Mem. Reports information only for process 0. --- Event Stage 0: Main Stage Matrix 4 4 2087196 0. Index Set 6 6 26596 0. IS L to G Mapping 4 4 182028 0. Vec Scatter 3 3 2448 0. Vector 14 14 390744 0. Star Forest Graph 3 3 3456 0. Krylov Solver 1 1 1488 0. Preconditioner 1 1 1456 0. Viewer 2 1 848 0. ======================================================================================================================== Average time to get PetscTime(): 8.08e-08 Average time for MPI_Barrier(): 0.0975038 Average time for zero size MPI_Send(): 2.42211e-05 #PETSc Option Table entries: --ndofs 80000 --problem_type poisson --scaling_type strong -ksp_rtol 1.0e-8 -ksp_type cg -ksp_view -log_view -options_left -pc_hypre_boomeramg_strong_threshold 0.5 -pc_hypre_type boomeramg -pc_type hypre #End of PETSc Option Table entries Compiled without FORTRAN kernels Compiled with full precision matrices (default) sizeof(short) 2 sizeof(int) 4 sizeof(long) 8 sizeof(void*) 8 sizeof(PetscScalar) 8 sizeof(PetscInt) 4 Configure options: --build=x86_64-linux-gnu --prefix=/usr --includedir=${prefix}/include --mandir=${prefix}/share/man --infodir=${prefix}/share/info --sysconfdir=/etc --localstatedir=/var --with-option-checking=0 --with-silent-rules=0 --libdir=${prefix}/lib/x86_64-linux-gnu --runstatedir=/run --with-maintainer-mode=0 --with-dependency-tracking=0 --with-debugging=0 --shared-library-extension=_real --with-shared-libraries --with-pic=1 --with-cc=mpicc --with-cxx=mpicxx --with-fc=mpif90 --with-cxx-dialect=C++11 --with-opencl=1 --with-blas-lib=-lblas --with-lapack-lib=-llapack --with-scalapack=1 --with-scalapack-lib=-lscalapack-openmpi --with-ptscotch=1 --with-ptscotch-include=/usr/include/scotch --with-ptscotch-lib="-lptesmumps -lptscotch -lptscotcherr" --with-fftw=1 --with-fftw-include="[]" --with-fftw-lib="-lfftw3 -lfftw3_mpi" --with-superlu_dist=1 --with-superlu_dist-include=/usr/include/superlu-dist --with-superlu_dist-lib=-lsuperlu_dist --with-hdf5-include=/usr/include/hdf5/openmpi --with-hdf5-lib="-L/usr/lib/x86_64-linux-gnu/hdf5/openmpi -L/usr/lib/x86_64-linux-gnu/openmpi/lib -lhdf5 -lmpi" --CXX_LINKER_FLAGS=-Wl,--no-as-needed --with-hypre=1 --with-hypre-include=/usr/include/hypre --with-hypre-lib=-lHYPRE_core --with-mumps=1 --with-mumps-include="[]" --with-mumps-lib="-ldmumps -lzmumps -lsmumps -lcmumps -lmumps_common -lpord" --with-suitesparse=1 --with-suitesparse-include=/usr/include/suitesparse --with-suitesparse-lib="-lumfpack -lamd -lcholmod -lklu" --with-superlu=1 --with-superlu-include=/usr/include/superlu --with-superlu-lib=-lsuperlu --prefix=/usr/lib/petscdir/petsc3.14/x86_64-linux-gnu-real --PETSC_ARCH=x86_64-linux-gnu-real CFLAGS="-g -O2 -ffile-prefix-map=/build/petsc-vu0cnm/petsc-3.14.4+dfsg1=. -fstack-protector-strong -Wformat -Werror=format-security -fPIC" CXXFLAGS="-g -O2 -ffile-prefix-map=/build/petsc-vu0cnm/petsc-3.14.4+dfsg1=. -fstack-protector-strong -Wformat -Werror=format-security -fPIC" FCFLAGS="-g -O2 -ffile-prefix-map=/build/petsc-vu0cnm/petsc-3.14.4+dfsg1=. -fstack-protector-strong -fPIC -ffree-line-length-0" FFLAGS="-g -O2 -ffile-prefix-map=/build/petsc-vu0cnm/petsc-3.14.4+dfsg1=. -fstack-protector-strong -fPIC -ffree-line-length-0" CPPFLAGS="-Wdate-time -D_FORTIFY_SOURCE=2" LDFLAGS="-Wl,-z,relro -fPIC" MAKEFLAGS=w ----------------------------------------- Libraries compiled on 2021-02-11 15:51:02 on reproducible Machine characteristics: Linux-4.19.0-14-amd64-x86_64-with-glibc2.31 Using PETSc directory: /usr/lib/petscdir/petsc3.14/x86_64-linux-gnu-real Using PETSc arch: ----------------------------------------- Using C compiler: mpicc -g -O2 -ffile-prefix-map=/build/petsc-vu0cnm/petsc-3.14.4+dfsg1=. -fstack-protector-strong -Wformat -Werror=format-security -fPIC -Wdate-time -D_FORTIFY_SOURCE=2 Using Fortran compiler: mpif90 -g -O2 -ffile-prefix-map=/build/petsc-vu0cnm/petsc-3.14.4+dfsg1=. -fstack-protector-strong -fPIC -ffree-line-length-0 -Wdate-time -D_FORTIFY_SOURCE=2 ----------------------------------------- Using include paths: -I/usr/lib/petscdir/petsc3.14/x86_64-linux-gnu-real/include -I/usr/include/hypre -I/usr/include/suitesparse -I/usr/include/superlu -I/usr/include/superlu-dist -I/usr/include/scotch -I/usr/include/hdf5/openmpi ----------------------------------------- Using C linker: mpicc Using Fortran linker: mpif90 Using libraries: -L/usr/lib/petscdir/petsc3.14/x86_64-linux-gnu-real/lib -L/usr/lib/petscdir/petsc3.14/x86_64-linux-gnu-real/lib -lpetsc_real -L/usr/lib/x86_64-linux-gnu/hdf5/openmpi -L/usr/lib/x86_64-linux-gnu/openmpi/lib -L/usr/lib/x86_64-linux-gnu/openmpi/lib/fortran/gfortran -L/usr/lib/gcc/x86_64-linux-gnu/10 -L/usr/lib/x86_64-linux-gnu -L/lib/x86_64-linux-gnu -lHYPRE_core -ldmumps -lzmumps -lsmumps -lcmumps -lmumps_common -lpord -lscalapack-openmpi -lumfpack -lamd -lcholmod -lklu -lsuperlu -lsuperlu_dist -lfftw3 -lfftw3_mpi -llapack -lblas -lptesmumps -lptscotch -lptscotcherr -lhdf5 -lmpi -lm -lOpenCL -lstdc++ -ldl -lmpi_usempif08 -lmpi_usempi_ignore_tkr -lmpi_mpifh -lmpi -lopen-rte -lopen-pal -lhwloc -levent_core -levent_pthreads -lutil -lgfortran -lm -lrt -lz -lgfortran -lm -lgfortran -lgcc_s -lquadmath -lpthread -lquadmath -lstdc++ -ldl ----------------------------------------- #PETSc Option Table entries: --ndofs 80000 --problem_type poisson --scaling_type strong -ksp_rtol 1.0e-8 -ksp_type cg -ksp_view -log_view -options_left -pc_hypre_boomeramg_strong_threshold 0.5 -pc_hypre_type boomeramg -pc_type hypre #End of PETSc Option Table entries WARNING! There are options you set that were not used! WARNING! could be spelling mistake, etc! There are 3 unused database options. They are: Option left: name:--ndofs value: 80000 Option left: name:--problem_type value: poisson Option left: name:--scaling_type value: strong make[1]: Leaving directory '/build/fenicsx-performance-tests-494eAj/fenicsx-performance-tests-0.0-git20210119.80e82ac' create-stamp debian/debhelper-build-stamp dh_testroot -a -O--buildsystem=cmake -O--sourcedirectory=src dh_prep -a -O--buildsystem=cmake -O--sourcedirectory=src dh_auto_install -a -O--buildsystem=cmake -O--sourcedirectory=src cd obj-x86_64-linux-gnu && make -j4 install DESTDIR=/build/fenicsx-performance-tests-494eAj/fenicsx-performance-tests-0.0-git20210119.80e82ac/debian/tmp AM_UPDATE_INFO_DIR=no "INSTALL=install --strip-program=true" make[1]: Entering directory '/build/fenicsx-performance-tests-494eAj/fenicsx-performance-tests-0.0-git20210119.80e82ac/obj-x86_64-linux-gnu' /usr/bin/cmake -S/build/fenicsx-performance-tests-494eAj/fenicsx-performance-tests-0.0-git20210119.80e82ac/src -B/build/fenicsx-performance-tests-494eAj/fenicsx-performance-tests-0.0-git20210119.80e82ac/obj-x86_64-linux-gnu --check-build-system CMakeFiles/Makefile.cmake 0 make -f CMakeFiles/Makefile2 preinstall make[2]: Entering directory '/build/fenicsx-performance-tests-494eAj/fenicsx-performance-tests-0.0-git20210119.80e82ac/obj-x86_64-linux-gnu' make[2]: Nothing to be done for 'preinstall'. make[2]: Leaving directory '/build/fenicsx-performance-tests-494eAj/fenicsx-performance-tests-0.0-git20210119.80e82ac/obj-x86_64-linux-gnu' Install the project... /usr/bin/cmake -P cmake_install.cmake -- Install configuration: "Release" -- Installing: /build/fenicsx-performance-tests-494eAj/fenicsx-performance-tests-0.0-git20210119.80e82ac/debian/tmp/usr/bin/dolfinx-scaling-test -- Set runtime path of "/build/fenicsx-performance-tests-494eAj/fenicsx-performance-tests-0.0-git20210119.80e82ac/debian/tmp/usr/bin/dolfinx-scaling-test" to "" make[1]: Leaving directory '/build/fenicsx-performance-tests-494eAj/fenicsx-performance-tests-0.0-git20210119.80e82ac/obj-x86_64-linux-gnu' dh_install -a -O--buildsystem=cmake -O--sourcedirectory=src dh_installdocs -a -O--buildsystem=cmake -O--sourcedirectory=src dh_installchangelogs -a -O--buildsystem=cmake -O--sourcedirectory=src dh_installman -a -O--buildsystem=cmake -O--sourcedirectory=src dh_installsystemduser -a -O--buildsystem=cmake -O--sourcedirectory=src dh_perl -a -O--buildsystem=cmake -O--sourcedirectory=src dh_link -a -O--buildsystem=cmake -O--sourcedirectory=src dh_strip_nondeterminism -a -O--buildsystem=cmake -O--sourcedirectory=src dh_compress -a -O--buildsystem=cmake -O--sourcedirectory=src dh_fixperms -a -O--buildsystem=cmake -O--sourcedirectory=src dh_missing -a -O--buildsystem=cmake -O--sourcedirectory=src dh_dwz -a -a -O--buildsystem=cmake -O--sourcedirectory=src dh_strip -a -a -O--buildsystem=cmake -O--sourcedirectory=src dh_makeshlibs -a -a -O--buildsystem=cmake -O--sourcedirectory=src dh_shlibdeps -a -a -O--buildsystem=cmake -O--sourcedirectory=src dh_installdeb -a -O--buildsystem=cmake -O--sourcedirectory=src dh_gencontrol -a -O--buildsystem=cmake -O--sourcedirectory=src dh_md5sums -a -O--buildsystem=cmake -O--sourcedirectory=src dh_builddeb -a -O--buildsystem=cmake -O--sourcedirectory=src dpkg-deb: building package 'fenicsx-performance-tests' in '../fenicsx-performance-tests_0.0~git20210119.80e82ac-1_amd64.deb'. dpkg-deb: building package 'fenicsx-performance-tests-dbgsym' in '../fenicsx-performance-tests-dbgsym_0.0~git20210119.80e82ac-1_amd64.deb'. dpkg-genbuildinfo --build=any dpkg-genchanges --build=any >../fenicsx-performance-tests_0.0~git20210119.80e82ac-1_amd64.changes dpkg-genchanges: info: binary-only arch-specific upload (source code and arch-indep packages not included) dpkg-source --after-build . dpkg-buildpackage: info: binary-only upload (no source included) I: running special hook: sync-out /build/fenicsx-performance-tests-494eAj /tmp/fenicsx-performance-tests-0.0~git20210119.80e82ac-1szskzl93 I: cleaning package lists and apt cache... I: creating tarball... I: done I: removing tempdir /tmp/mmdebstrap.mPaZJNMT34... I: success in 1832.6895 seconds md5: fenicsx-performance-tests-dbgsym_0.0~git20210119.80e82ac-1_amd64.deb: OK md5: fenicsx-performance-tests_0.0~git20210119.80e82ac-1_amd64.deb: OK sha1: fenicsx-performance-tests-dbgsym_0.0~git20210119.80e82ac-1_amd64.deb: OK sha1: fenicsx-performance-tests_0.0~git20210119.80e82ac-1_amd64.deb: OK sha256: fenicsx-performance-tests-dbgsym_0.0~git20210119.80e82ac-1_amd64.deb: OK sha256: fenicsx-performance-tests_0.0~git20210119.80e82ac-1_amd64.deb: OK Checksums: OK