From dba767169462cd40ad78590952674c41e221f41e Mon Sep 17 00:00:00 2001 From: Laura Abbott Date: Wed, 6 Nov 2019 12:47:55 -0500 Subject: Better xz compression Borrowed from downstream, we can be a little bit smarter on how we compress modules. --- parallel_xz.sh | 26 ++++++++++++++++++++++++++ 1 file changed, 26 insertions(+) mode change 100644 => 100755 parallel_xz.sh (limited to 'parallel_xz.sh') diff --git a/parallel_xz.sh b/parallel_xz.sh old mode 100644 new mode 100755 index e69de29bb..bc08a548d --- a/parallel_xz.sh +++ b/parallel_xz.sh @@ -0,0 +1,26 @@ +#!/bin/sh +# Reads filenames on stdin, xz-compresses each in place. +# Not optimal for "compress relatively few, large files" scenario! + +# How many xz's to run in parallel: +procgroup="" +while test "$#" != 0; do + # Get it from -jNUM + N="${1#-j}" + if test "$N" = "$1"; then + # Not -j - warn and ignore + echo "parallel_xz: warning: unrecognized argument: '$1'" + else + procgroup="$N" + fi + shift +done + +# If told to use only one cpu: +test "$procgroup" || exec xargs -r xz +test "$procgroup" = 1 && exec xargs -r xz + +# xz has some startup cost. If files are really small, +# this cost might be significant. To combat this, +# process several files (in sequence) by each xz process via -n 16: +exec xargs -r -n 16 -P $procgroup xz -- cgit