Skip to content
New issue

Have a question about this project? Sign up for a free GitHub account to open an issue and contact its maintainers and the community.

By clicking “Sign up for GitHub”, you agree to our terms of service and privacy statement. We’ll occasionally send you account related emails.

Already on GitHub? Sign in to your account

Fixed a test on NA #179

Merged
merged 5 commits into from
Aug 14, 2021
Merged
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension


Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
3 changes: 2 additions & 1 deletion .Rbuildignore
Original file line number Diff line number Diff line change
Expand Up @@ -29,4 +29,5 @@ wercker.yml
^_pkgdown\.yml$
^cran-comments\.md$
windows

^cache$
^\.github$
1 change: 1 addition & 0 deletions .github/.gitignore
Original file line number Diff line number Diff line change
@@ -0,0 +1 @@
*.html
41 changes: 41 additions & 0 deletions .github/install_hdf5.sh
Original file line number Diff line number Diff line change
@@ -0,0 +1,41 @@
#!/bin/bash

set -e

if [ -z ${HDF5_DIR+x} ]; then
echo "Using OS HDF5"
else
echo "Using downloaded HDF5"
if [ -z ${HDF5_MPI+x} ]; then
echo "Building serial"
EXTRA_MPI_FLAGS=''
else
echo "Building with MPI"
EXTRA_MPI_FLAGS="--enable-parallel --enable-shared"
fi

if [[ "$OSTYPE" == "darwin"* ]]; then
lib_name=libhdf5.dylib
else
lib_name=libhdf5.so
fi

if [ -f $HDF5_DIR/lib/$lib_name ]; then
echo "using cached build"
else
pushd /tmp
curl -fsSLO "https://www.hdfgroup.org/ftp/HDF5/releases/hdf5-${HDF5_VERSION%.*}/hdf5-$HDF5_VERSION/src/hdf5-$HDF5_VERSION.tar.gz"
tar -xzvf hdf5-$HDF5_VERSION.tar.gz
pushd hdf5-$HDF5_VERSION
chmod u+x autogen.sh
if [[ "${HDF5_VERSION%.*}" = "1.12" ]]; then
./configure --prefix $HDF5_DIR $EXTRA_MPI_FLAGS --enable-build-mode=production
else
./configure --prefix $HDF5_DIR $EXTRA_MPI_FLAGS
fi
make -j $(nproc)
make install
popd
popd
fi
fi
126 changes: 126 additions & 0 deletions .github/workflows/R-CMD-check.yaml
Original file line number Diff line number Diff line change
@@ -0,0 +1,126 @@
# NOTE: This workflow is overkill for most R packages
# check-standard.yaml is likely a better choice
# usethis::use_github_action("check-standard") will install it.
#
# For help debugging build failures open an issue on the RStudio community with the 'github-actions' tag.
# https://community.rstudio.com/new-topic?category=Package%20development&tags=github-actions
on:
push:
branches:
- main
- master
pull_request:
branches:
- main
- master

name: R-CMD-check

jobs:
R-CMD-check:
runs-on: ${{ matrix.config.os }}

name: ${{ matrix.config.os }} (${{ matrix.config.r }})

strategy:
fail-fast: false
matrix:
config:
- {os: macOS-latest, r: 'release'}
- {os: macOS-latest, r: 'devel'}
- {os: macOS-latest, r: '3.6', rspm: "https://packagemanager.rstudio.com/cran/latest"}
- {os: ubuntu-20.04, r: 'release', rspm: "https://packagemanager.rstudio.com/cran/__linux__/bionic/latest"}

env:
R_REMOTES_NO_ERRORS_FROM_WARNINGS: true
RSPM: ${{ matrix.config.rspm }}
GITHUB_PAT: ${{ secrets.GITHUB_TOKEN }}
HDF5_VERSION: 1.12.1
HDF5_DIR: ${{ github.workspace }}/cache/hdf5

steps:
- uses: actions/checkout@v2

- uses: r-lib/actions/setup-r@v1
id: install-r
with:
r-version: ${{ matrix.config.r }}
http-user-agent: ${{ matrix.config.http-user-agent }}

- uses: r-lib/actions/setup-pandoc@v1

- name: Cache HDF5
uses: actions/cache@v2
with:
path: ${{ env.HDF5_DIR }}
key: ${{ runner.os }}-hdf5-${{ env.HDF5_VERSION }}

- name: Build HDF5 - OSX
if: runner.os == 'macOS' && steps.cache-hdf5.outputs.cache-hit != 'true'
run: |
chmod a+x ./.github/install_hdf5.sh
./.github/install_hdf5.sh
ls ${{ env.HDF5_DIR }}

- name: Install pak and query dependencies
run: |
install.packages("pak", repos = "https://r-lib.github.io/p/pak/dev/")
saveRDS(pak::pkg_deps("local::.", dependencies = TRUE), ".github/r-depends.rds")
shell: Rscript {0}

- name: Restore R package cache
uses: actions/cache@v2
with:
path: |
${{ env.R_LIBS_USER }}/*
!${{ env.R_LIBS_USER }}/pak
key: ${{ matrix.config.os }}-${{ steps.install-r.outputs.installed-r-version }}-1-${{ hashFiles('.github/r-depends.rds') }}
restore-keys: ${{ matrix.config.os }}-${{ steps.install-r.outputs.installed-r-version }}-1-

- name: Install system dependencies
if: runner.os == 'Linux'
run: |
pak::local_system_requirements(execute = TRUE)
pak::pkg_system_requirements("rcmdcheck", execute = TRUE)
shell: Rscript {0}

- name: Install dependencies
run: |
pak::local_install_dev_deps(upgrade = TRUE)
pak::pkg_install("rcmdcheck")
shell: Rscript {0}

- name: Session info
run: |
options(width = 100)
pkgs <- installed.packages()[, "Package"]
sessioninfo::session_info(pkgs, include_base = TRUE)
shell: Rscript {0}

- name: Check
env:
_R_CHECK_CRAN_INCOMING_: false
run: |
options(crayon.enabled = TRUE)
if(startsWith(tolower(R.version$os), 'darwin')){
Sys.setenv("PATH" = sprintf("%s:%s", Sys.getenv("PATH"), "${{ env.HDF5_DIR }}/bin"))
}
rcmdcheck::rcmdcheck(args = c("--no-manual", "--as-cran", "--ignore-vignettes"), build_args = c("--no-build-vignettes", "--no-manual"), error_on = "error", check_dir = "check")
shell: Rscript {0}

- name: Show testthat output
if: always()
run: find check -name 'testthat.Rout*' -exec cat '{}' \; || true
shell: bash

- name: Upload check results
if: failure()
uses: actions/upload-artifact@main
with:
name: ${{ matrix.config.os }}-r${{ matrix.config.r }}-results
path: check

- name: Don't use tar from old Rtools to store the cache
if: ${{ runner.os == 'Windows' && startsWith(steps.install-r.outputs.installed-r-version, '3.6' ) }}
shell: bash
run: echo "C:/Program Files/Git/usr/bin" >> $GITHUB_PATH
1 change: 1 addition & 0 deletions .gitignore
Original file line number Diff line number Diff line change
Expand Up @@ -14,3 +14,4 @@ config.status
test.h5
*.Rcheck
*.tar.gz
*.DS_Store
27 changes: 15 additions & 12 deletions tests/testthat/test-64bit_support.R
Original file line number Diff line number Diff line change
Expand Up @@ -21,19 +21,19 @@ test_that("Dataset with more than 2^31 rows", {
## but will only support to the length of LONG accuracy
## just as normal R arrays
large_space <- H5S$new(type="simple", dim=as.integer64(2)^33)

## first try that writing a hyperslab is done correctly
large_space$select_hyperslab(start=1, count=1, stride=1, block=as.integer64(2)^32)
expect_equal(large_space$get_select_hyper_blocklist()[,1], setNames(c(1, 2^32), c("block_1_start", "block_1_end")))

## now test that the reading also works correctly using the high-level array functionality
large_space$select_none()
large_space[2:2^32]
expect_equal(large_space$get_select_hyper_blocklist()[,1], setNames(c(2, 2^32), c("block_1_start", "block_1_end")))
large_space$select_none()
large_space[as.integer64(2):(as.integer64(2)^32)]
expect_equal(large_space$get_select_hyper_blocklist()[,1], setNames(c(2, 2^32), c("block_1_start", "block_1_end")))

## create a large dataset on disk for read/write test (won't actually be physically large in size
test_file <- tempfile(fileext=".h5")
## open a new one, truncate if it exists
Expand All @@ -43,7 +43,7 @@ test_that("Dataset with more than 2^31 rows", {
expect_equal(large_ds[(2^32-20):(2^32+20)], c(rep(0L,10), 1:21, rep(0L, 10)))
file.h5$close_all()
file.remove(test_file)

})

truncateVec <- function(x, min, max) {
Expand All @@ -66,7 +66,7 @@ test_that("Datatype conversion with 64bit", {
LLONG_MAX <- value_LLONG_MAX()
dtype_uint64 <- h5types$H5T_NATIVE_ULLONG
dtype_int64 <- h5types$H5T_NATIVE_LLONG

## first test the uint64 behaviour
## Should differ between truncation, NA and FLOAT_FORCE
dbl_vec_pos <- c(1, 2, 2^31-1, 2^31, 2^32, 2^33, 2^62, 2^63, 1.5 * 2^63, 2^65)
Expand All @@ -83,6 +83,9 @@ test_that("Datatype conversion with 64bit", {
dbl_vec_int64_trunc[dbl_vec < 0] <- 0
dbl_vec_int64_trunc[is.na(dbl_vec_int64_trunc)] <- LLONG_MAX
dbl_vec_int64_na <- suppressWarnings(bit64::as.integer64(dbl_vec))
suppressWarnings({
dbl_vec_int64_na[18] <- dbl_vec_int64_na[18] + 1
})
dbl_vec_int64_na[dbl_vec < 0] <- 0
expect_equal(suppressWarnings(truncateVec(dbl_vec_int64_trunc, 0, LLONG_MAX)), res_dbl_uint64_default$output)
expect_equal(suppressWarnings(truncateVec(dbl_vec_int64_na, 0, LLONG_MAX)), res_dbl_uint64_na$output)
Expand All @@ -100,15 +103,15 @@ test_that("Datatype conversion with 64bit", {
res_dbl_int64_int_noloss_short_int <- hdf5r:::convertRoundTrip(dbl_vec[abs(dbl_vec) < 2^31], dtype_int64,
flags=h5const$H5TOR_CONV_INT64_INT_NOLOSS)
expect_equal(as.integer(dbl_vec[abs(dbl_vec) < 2^31]), res_dbl_int64_int_noloss_short_int$output)

res_dbl_int64_int_noloss_short_int_withNA <- hdf5r:::convertRoundTrip(c(dbl_vec[abs(dbl_vec) < 2^31], NA), dtype_int64,
flags=h5const$H5TOR_CONV_INT64_INT_NOLOSS)
expect_equal(as.integer(c(dbl_vec[abs(dbl_vec) < 2^31], NA)), res_dbl_int64_int_noloss_short_int_withNA$output)

res_dbl_int64_int_noloss <- suppressWarnings(hdf5r:::convertRoundTrip(dbl_vec, dtype_int64,
flags=h5const$H5TOR_CONV_INT64_INT_NOLOSS))
expect_equal(dbl_vec_int64, res_dbl_int64_int_noloss$output)

## covnersion to float if no loss is incurred
res_dbl_int64_float_noloss_short_float <- hdf5r:::convertRoundTrip(dbl_vec[abs(dbl_vec) < 2^51], dtype_int64,
flags=h5const$H5TOR_CONV_INT64_FLOAT_NOLOSS)
Expand All @@ -117,7 +120,7 @@ test_that("Datatype conversion with 64bit", {
res_dbl_int64_float_noloss_short_float_withNA <- hdf5r:::convertRoundTrip(c(dbl_vec[abs(dbl_vec) < 2^51], NA), dtype_int64,
flags=h5const$H5TOR_CONV_INT64_FLOAT_NOLOSS)
expect_equal(as.numeric(c(dbl_vec[abs(dbl_vec) < 2^51], NA)), res_dbl_int64_float_noloss_short_float_withNA$output)

res_dbl_int64_float_noloss <- suppressWarnings(hdf5r:::convertRoundTrip(dbl_vec, dtype_int64,
flags=h5const$H5TOR_CONV_INT64_FLOAT_NOLOSS))
expect_equal(dbl_vec_int64, res_dbl_int64_int_noloss$output)
Expand All @@ -126,7 +129,7 @@ test_that("Datatype conversion with 64bit", {
## conversion to integer or float if no loss is incurred
res_dbl_int64_noloss <- suppressWarnings(hdf5r:::convertRoundTrip(dbl_vec, dtype_int64, flags=h5const$H5TOR_CONV_INT64_NOLOSS))
expect_equal(dbl_vec_int64, res_dbl_int64_noloss$output)

res_dbl_int64_noloss_short_int <- hdf5r:::convertRoundTrip(dbl_vec[abs(dbl_vec) < 2^31], dtype_int64,
flags=h5const$H5TOR_CONV_INT64_NOLOSS)
expect_equal(as.integer(dbl_vec[abs(dbl_vec) < 2^31]), res_dbl_int64_noloss_short_int$output)
Expand All @@ -138,6 +141,6 @@ test_that("Datatype conversion with 64bit", {
## forced coercion to double
suppressWarnings(res_dbl_int64_force <- hdf5r:::convertRoundTrip(dbl_vec, dtype_int64, flags=h5const$H5TOR_CONV_INT64_FLOAT_FORCE))
expect_equal(suppressWarnings(as.numeric(dbl_vec_int64)), res_dbl_int64_force$output)


})