diff options
author | Christopher Haster <chaster@utexas.edu> | 2021-01-05 12:12:39 +0300 |
---|---|---|
committer | Christopher Haster <chaster@utexas.edu> | 2021-01-10 22:20:11 +0300 |
commit | c9110617b3833a3020e7f13025f2055c549e1b08 (patch) | |
tree | 2190e713b4d7f148fa67c515eb86f51f02a596b8 /.github/workflows/test.yml | |
parent | 104d65113d4a73e4f38cc976e70a3afeb743d52a (diff) |
Added post-release script, cleaned up workflows
This helps an outstanding maintainer annoyance: updating dependencies to
bring in new versions on each littlefs release.
But instead of adding a bunch of scripts to the tail end of the release
workflow, the post-release script just triggers a single
"repository_dispatch" event in the newly created littlefs.post-release
repo. From there any number of post-release workflows can be run.
This indirection should let the post-release scripts move much quicker
than littlefs itself, which helps offset how fragile these sort of scripts
are.
---
Also finished cleaning up the workflows now that they are mostly
working.
Diffstat (limited to '.github/workflows/test.yml')
-rw-r--r-- | .github/workflows/test.yml | 176 |
1 files changed, 81 insertions, 95 deletions
diff --git a/.github/workflows/test.yml b/.github/workflows/test.yml index 7966784..907224c 100644 --- a/.github/workflows/test.yml +++ b/.github/workflows/test.yml @@ -8,7 +8,7 @@ env: jobs: # run tests test: - runs-on: ubuntu-latest + runs-on: ubuntu-18.04 strategy: fail-fast: false matrix: @@ -38,7 +38,7 @@ jobs: # cross-compile with ARM Thumb (32-bit, little-endian) - name: install-thumb - if: matrix.arch == 'thumb' + if: ${{matrix.arch == 'thumb'}} run: | sudo apt-get install -qq \ gcc-arm-linux-gnueabi \ @@ -50,7 +50,7 @@ jobs: qemu-arm -version # cross-compile with MIPS (32-bit, big-endian) - name: install-mips - if: matrix.arch == 'mips' + if: ${{matrix.arch == 'mips'}} run: | sudo apt-get install -qq \ gcc-mips-linux-gnu \ @@ -62,7 +62,7 @@ jobs: qemu-mips -version # cross-compile with PowerPC (32-bit, big-endian) - name: install-powerpc - if: matrix.arch == 'powerpc' + if: ${{matrix.arch == 'powerpc'}} run: | sudo apt-get install -qq \ gcc-powerpc-linux-gnu \ @@ -76,71 +76,71 @@ jobs: # make sure example can at least compile - name: test-example run: | - sed -n '/``` c/,/```/{/```/d; p}' README.md > test.c && \ + sed -n '/``` c/,/```/{/```/d; p}' README.md > test.c make all CFLAGS+=" \ -Duser_provided_block_device_read=NULL \ -Duser_provided_block_device_prog=NULL \ -Duser_provided_block_device_erase=NULL \ -Duser_provided_block_device_sync=NULL \ -include stdio.h" + rm test.c -# # test configurations -# # normal+reentrant tests -# - name: test-default -# run: | -# make clean -# make test TESTFLAGS+="-nrk" -# # NOR flash: read/prog = 1 block = 4KiB -# - name: test-nor -# run: | -# make clean -# make test TESTFLAGS+="-nrk \ -# -DLFS_READ_SIZE=1 -DLFS_BLOCK_SIZE=4096" -# # SD/eMMC: read/prog = 512 block = 512 -# - name: test-emmc -# run: | -# make clean -# make test TESTFLAGS+="-nrk \ -# -DLFS_READ_SIZE=512 -DLFS_BLOCK_SIZE=512" -# # NAND flash: read/prog = 4KiB block = 32KiB -# - name: test-nand -# run: | -# make clean -# make test TESTFLAGS+="-nrk \ -# -DLFS_READ_SIZE=4096 -DLFS_BLOCK_SIZE=\(32*1024\)" -# # other extreme geometries that are useful for various corner cases -# - name: test-no-intrinsics -# run: | -# make clean -# make test TESTFLAGS+="-nrk \ -# -DLFS_NO_INTRINSICS" -# - name: test-byte-writes -# # it just takes too long to test byte-level writes when in qemu, -# # should be plenty covered by the other configurations -# if: matrix.arch == 'x86_64' -# run: | -# make clean -# make test TESTFLAGS+="-nrk \ -# -DLFS_READ_SIZE=1 -DLFS_CACHE_SIZE=1" -# - name: test-block-cycles -# run: | -# make clean -# make test TESTFLAGS+="-nrk \ -# -DLFS_BLOCK_CYCLES=1" -# - name: test-odd-block-count -# run: | -# make clean -# make test TESTFLAGS+="-nrk \ -# -DLFS_BLOCK_COUNT=1023 -DLFS_LOOKAHEAD_SIZE=256" -# - name: test-odd-block-size -# run: | -# make clean -# make test TESTFLAGS+="-nrk \ -# -DLFS_READ_SIZE=11 -DLFS_BLOCK_SIZE=704" + # test configurations + # normal+reentrant tests + - name: test-default + run: | + make clean + make test TESTFLAGS+="-nrk" + # NOR flash: read/prog = 1 block = 4KiB + - name: test-nor + run: | + make clean + make test TESTFLAGS+="-nrk \ + -DLFS_READ_SIZE=1 -DLFS_BLOCK_SIZE=4096" + # SD/eMMC: read/prog = 512 block = 512 + - name: test-emmc + run: | + make clean + make test TESTFLAGS+="-nrk \ + -DLFS_READ_SIZE=512 -DLFS_BLOCK_SIZE=512" + # NAND flash: read/prog = 4KiB block = 32KiB + - name: test-nand + run: | + make clean + make test TESTFLAGS+="-nrk \ + -DLFS_READ_SIZE=4096 -DLFS_BLOCK_SIZE=\(32*1024\)" + # other extreme geometries that are useful for various corner cases + - name: test-no-intrinsics + run: | + make clean + make test TESTFLAGS+="-nrk \ + -DLFS_NO_INTRINSICS" + - name: test-byte-writes + # it just takes too long to test byte-level writes when in qemu, + # should be plenty covered by the other configurations + if: ${{matrix.arch == 'x86_64'}} + run: | + make clean + make test TESTFLAGS+="-nrk \ + -DLFS_READ_SIZE=1 -DLFS_CACHE_SIZE=1" + - name: test-block-cycles + run: | + make clean + make test TESTFLAGS+="-nrk \ + -DLFS_BLOCK_CYCLES=1" + - name: test-odd-block-count + run: | + make clean + make test TESTFLAGS+="-nrk \ + -DLFS_BLOCK_COUNT=1023 -DLFS_LOOKAHEAD_SIZE=256" + - name: test-odd-block-size + run: | + make clean + make test TESTFLAGS+="-nrk \ + -DLFS_READ_SIZE=11 -DLFS_BLOCK_SIZE=704" - # upload coveragefor later coverage + # upload coverage for later coverage - name: upload-coverage - continue-on-error: true uses: actions/upload-artifact@v2 with: name: coverage @@ -149,7 +149,6 @@ jobs: # update results - name: results-code - continue-on-error: true run: | mkdir -p results make clean @@ -161,7 +160,6 @@ jobs: -DLFS_NO_ERROR" \ CODEFLAGS+="-o results/code-${{matrix.arch}}.csv" - name: results-code-readonly - continue-on-error: true run: | mkdir -p results make clean @@ -174,7 +172,6 @@ jobs: -DLFS_READONLY" \ CODEFLAGS+="-o results/code-${{matrix.arch}}-readonly.csv" - name: results-code-threadsafe - continue-on-error: true run: | mkdir -p results make clean @@ -187,7 +184,6 @@ jobs: -DLFS_THREADSAFE" \ CODEFLAGS+="-o results/code-${{matrix.arch}}-threadsafe.csv" - name: results-code-migrate - continue-on-error: true run: | mkdir -p results make clean @@ -200,7 +196,6 @@ jobs: -DLFS_MIGRATE" \ CODEFLAGS+="-o results/code-${{matrix.arch}}-migrate.csv" - name: upload-results - continue-on-error: true uses: actions/upload-artifact@v2 with: name: results @@ -208,28 +203,25 @@ jobs: # limit reporting to Thumb, otherwise there would be too many numbers # flying around for the results to be easily readable - name: collect-status - continue-on-error: true - if: matrix.arch == 'thumb' + if: ${{matrix.arch == 'thumb'}} run: | mkdir -p status - for f in results/code*.csv + for f in $(shopt -s nullglob ; echo results/code*.csv) do - [ -e "$f" ] || continue export STEP="results-code$( echo $f | sed -n 's/.*code-.*-\(.*\).csv/-\1/p')" export CONTEXT="results / code$( echo $f | sed -n 's/.*code-.*-\(.*\).csv/ (\1)/p')" export PREV="$(curl -sS \ "$GITHUB_API_URL/repos/$GITHUB_REPOSITORY/status/master" \ - | jq -re "select(.sha != env.GITHUB_SHA) | .statuses[] + | jq -re 'select(.sha != env.GITHUB_SHA) | .statuses[] | select(.context == env.CONTEXT).description - | capture(\"Code size is (?<result>[0-9]+)\").result" \ + | capture("Code size is (?<result>[0-9]+)").result' \ || echo 0)" - echo $PREV export DESCRIPTION="$(./scripts/code.py -u $f -s | awk ' NR==2 {printf "Code size is %d B",$2} - NR==2 && ENVIRON["PREV"] != 0 { - printf " (%+.1f%%)",100*($2-ENVIRON["PREV"])/$2}')" + NR==2 && ENVIRON["PREV"]+0 != 0 { + printf " (%+.1f%%)",100*($2-ENVIRON["PREV"])/ENVIRON["PREV"]}')" jq -n '{ state: "success", context: env.CONTEXT, @@ -240,8 +232,7 @@ jobs: echo $f | sed -n 's/.*code-.*-\(.*\).csv/-\1/p').json done - name: upload-status - continue-on-error: true - if: matrix.arch == 'thumb' + if: ${{matrix.arch == 'thumb'}} uses: actions/upload-artifact@v2 with: name: status @@ -250,7 +241,7 @@ jobs: # run under Valgrind to check for memory errors valgrind: - runs-on: ubuntu-latest + runs-on: ubuntu-18.04 steps: - uses: actions/checkout@v2 - name: install @@ -264,14 +255,14 @@ jobs: sudo apt-get update -qq sudo apt-get install -qq valgrind valgrind --version -# # normal tests, we don't need to test all geometries -# - name: test-valgrind -# run: make test TESTFLAGS+="-k --valgrind" + # normal tests, we don't need to test all geometries + - name: test-valgrind + run: make test TESTFLAGS+="-k --valgrind" # self-host with littlefs-fuse for a fuzz-like test fuse: - runs-on: ubuntu-latest - if: "!endsWith(github.ref, '-prefix')" + runs-on: ubuntu-18.04 + if: ${{!endsWith(github.ref, '-prefix')}} steps: - uses: actions/checkout@v2 - name: install @@ -316,8 +307,8 @@ jobs: # test migration using littlefs-fuse migrate: - runs-on: ubuntu-latest - if: "!endsWith(github.ref, '-prefix')" + runs-on: ubuntu-18.04 + if: ${{!endsWith(github.ref, '-prefix')}} steps: - uses: actions/checkout@v2 - name: install @@ -383,9 +374,8 @@ jobs: # collect coverage info coverage: - runs-on: ubuntu-latest + runs-on: ubuntu-18.04 needs: [test] - continue-on-error: true steps: - uses: actions/checkout@v2 - name: install @@ -393,7 +383,7 @@ jobs: sudo apt-get update -qq sudo apt-get install -qq python3 python3-pip lcov sudo pip3 install toml - # yes we continue-on-error on every step, continue-on-error + # yes we continue-on-error nearly every step, continue-on-error # at job level apparently still marks a job as failed, which isn't # what we want - uses: actions/download-artifact@v2 @@ -410,12 +400,10 @@ jobs: ./scripts/coverage.py results/coverage.info -o results/coverage.csv - name: upload-results uses: actions/upload-artifact@v2 - continue-on-error: true with: name: results path: results - name: collect-status - continue-on-error: true run: | mkdir -p status [ -e results/coverage.csv ] || exit 0 @@ -423,15 +411,14 @@ jobs: export CONTEXT="results / coverage" export PREV="$(curl -sS \ "$GITHUB_API_URL/repos/$GITHUB_REPOSITORY/status/master" \ - | jq -re "select(.sha != env.GITHUB_SHA) | .statuses[] + | jq -re 'select(.sha != env.GITHUB_SHA) | .statuses[] | select(.context == env.CONTEXT).description - | capture(\"Coverage is (?<result>[0-9\\\\.]+)\").result" \ + | capture("Coverage is (?<result>[0-9\\.]+)").result' \ || echo 0)" export DESCRIPTION="$( - ./scripts/coverage.py -u results/coverage.csv -s \ - | awk -F '[ /%]+' ' + ./scripts/coverage.py -u results/coverage.csv -s | awk -F '[ /%]+' ' NR==2 {printf "Coverage is %.1f%% of %d lines",$4,$3} - NR==2 && ENVIRON["PREV"] != 0 { + NR==2 && ENVIRON["PREV"]+0 != 0 { printf " (%+.1f%%)",$4-ENVIRON["PREV"]}')" jq -n '{ state: "success", @@ -442,7 +429,6 @@ jobs: | tee status/coverage.json - name: upload-status uses: actions/upload-artifact@v2 - continue-on-error: true with: name: status path: status |