diff --git a/.github/workflows/build-accumulo.yml b/.github/workflows/build-accumulo.yml new file mode 100644 index 00000000000..c1e15ef3d43 --- /dev/null +++ b/.github/workflows/build-accumulo.yml @@ -0,0 +1,99 @@ +name: Build Accumulo snapshot and update DataWave to use + +on: + workflow_dispatch: + inputs: + accumuloBranch: + required: true + default: "2.1" + description: "Branch name to build. Will be used as image tag." + accumuloRepo: + required: true + default: "apache/accumulo" + description: "Accumulo Repo to use. Expected to be at Github. Example: apache/accumulo" + deployAccumulo: + required: true + default: "false" + description: "Set to false if this accumulo version has already been pushed to Github Packages" + +# Defines two custom environment variables for the workflow. These are used for the Container registry domain, and a name for the Docker image that this workflow builds. +env: + REGISTRY: ghcr.io + IMAGE_NAME: ${{ github.repository.lowercase }} + ACCUMULO_JAVA_VERSION: '17' + DATAWAVE_JAVA_VERSION: '11' + JAVA_DISTRIBUTION: 'zulu' #This is the default on v1 of the action for 1.8 + USER_NAME: ${{ secrets.GHCR_WRITE_USER_NAME }} + ACCESS_TOKEN: ${{ secrets.GHCR_WRITE_ACCESS_TOKEN }} + +jobs: + build-and-deploy-accumulo: + runs-on: ubuntu-latest + # Sets the permissions granted to the `GITHUB_TOKEN` for the actions in this job. + permissions: + contents: read + packages: write + # + steps: + - name: Checkout DataWave + uses: actions/checkout@v4 + with: + path: datawave + + - name: Checkout Accumulo + uses: actions/checkout@v4 + id: accumuloCheckout + with: + repository: ${{ github.event.inputs.accumuloRepo }} + path: accumulo + ref: ${{ github.event.inputs.accumuloBranch }} + + - name: Set up JDK ${{env.ACCUMULO_JAVA_VERSION}} + uses: actions/setup-java@v4 + with: + distribution: ${{env.JAVA_DISTRIBUTION}} + java-version: ${{env.ACCUMULO_JAVA_VERSION}} + cache: 'maven' + - run: echo "ACCUMULO_JAVA=$JAVA_HOME" >> $GITHUB_ENV + - name: Set up JDK ${{env.DATAWAVE_JAVA_VERSION}} + uses: actions/setup-java@v4 + with: + distribution: ${{env.JAVA_DISTRIBUTION}} + java-version: ${{env.DATAWAVE_JAVA_VERSION}} + cache: 'maven' + - run: echo "DATAWAVE_JAVA=$JAVA_HOME" >> $GITHUB_ENV + + - name: Get Accumulo Version + id: get-accumulo-version + run: | + export JAVA_HOME="$ACCUMULO_JAVA" + cd "$GITHUB_WORKSPACE/accumulo" + mvn build-helper:parse-version versions:set -DgenerateBackupPoms=false -DnewVersion=\${parsedVersion.majorVersion}.\${parsedVersion.minorVersion}.\${parsedVersion.incrementalVersion}-dwv-$(git rev-parse --short HEAD) + export newVersion=$(mvn -q help:evaluate -DforceStdout -Dexpression=project.version) + echo accumuloVersion=$newVersion >> $GITHUB_OUTPUT + - name: Deploy Accumulo + if: ${{ github.event.inputs.deployAccumulo == 'true'}} + run: | + export JAVA_HOME="$ACCUMULO_JAVA" + cd "$GITHUB_WORKSPACE/accumulo" + mvn -DaltDeploymentRepository=github-datawave::https://maven.pkg.github.com/NationalSecurityAgency/datawave -V -B -e -ntp "-Dstyle.color=always" -DskipTests -T1C clean source:jar deploy -s "$GITHUB_WORKSPACE/datawave/.github/workflows/settings.xml" + - name: Log in to the Container registry + uses: docker/login-action@65b78e6e13532edd9afa3aa52ac7964289d1a9c1 + with: + registry: ${{ env.REGISTRY }} + username: ${{ env.USER_NAME }} + password: ${{ env.ACCESS_TOKEN }} + + - name: Update DataWave Dependency Version + run: | + + export JAVA_HOME="$DATAWAVE_JAVA" + cd "$GITHUB_WORKSPACE/datawave" + mvn -s "$GITHUB_WORKSPACE/datawave/.github/workflows/settings.xml" versions:set-property -Dproperty=version.accumulo -DnewVersion=${{ steps.get-accumulo-version.outputs.accumuloVersion }} -DgenerateBackupPoms=false + - name: Build Web and Ingest Docker Images (Maven) + run: | + export JAVA_HOME="$DATAWAVE_JAVA" + cd "$GITHUB_WORKSPACE/datawave" + mvn -s "$GITHUB_WORKSPACE/datawave/.github/workflows/settings.xml" clean install -Prpm,kubernetes,assemble,deploy-ws -Ddist -Pdocker -DpushImage -Ddocker-release -DskipTests -Ddocker.image.accumulo.tag=${{ steps.get-accumulo-version.outputs.accumuloVersion }} + + diff --git a/.github/workflows/build-images.yml b/.github/workflows/build-images.yml new file mode 100644 index 00000000000..33ff967db6d --- /dev/null +++ b/.github/workflows/build-images.yml @@ -0,0 +1,46 @@ +# +name: Create and publish a Docker image + +on: + push: + paths-ignore: ['*.md', 'CODEOWNERS', 'LICENSE'] + tags: + - "**" + branches: + - 'integration' + - 'release/version*' + - 'docker-images' + workflow_dispatch: + +# Defines two custom environment variables for the workflow. These are used for the Container registry domain, and a name for the Docker image that this workflow builds. +env: + REGISTRY: ghcr.io + IMAGE_NAME: ${{ github.repository.lowercase }} + JAVA_VERSION: '11' + JAVA_DISTRIBUTION: 'zulu' #This is the default on v1 of the action for 1.8 + USER_NAME: ${{ secrets.GHCR_WRITE_USER_NAME }} + ACCESS_TOKEN: ${{ secrets.GHCR_WRITE_ACCESS_TOKEN }} + +jobs: + build-and-push-datawave-images: + runs-on: ubuntu-latest + steps: + - name: Log in to the Container registry + uses: docker/login-action@65b78e6e13532edd9afa3aa52ac7964289d1a9c1 + with: + registry: ${{ env.REGISTRY }} + username: ${{ env.USER_NAME }} + password: ${{ env.ACCESS_TOKEN }} + - name: Checkout Code + uses: actions/checkout@v4 + - name: Set up JDK ${{env.JAVA_VERSION}} + uses: actions/setup-java@v4 + with: + distribution: ${{env.JAVA_DISTRIBUTION}} + java-version: ${{env.JAVA_VERSION}} + cache: 'maven' + - name: Build Web and Ingest Docker Images (Maven) + run: | + mvn -s $GITHUB_WORKSPACE/.github/workflows/settings.xml clean install -Prpm,kubernetes,assemble,deploy-ws -Ddist -Pdocker -DpushImage -Ddocker-release -DskipTests + + diff --git a/.github/workflows/microservice-build-image.yaml b/.github/workflows/microservice-build-image.yaml new file mode 100644 index 00000000000..fe9e199ce50 --- /dev/null +++ b/.github/workflows/microservice-build-image.yaml @@ -0,0 +1,41 @@ +# +name: Create and publish a Docker image + +on: + workflow_call: + secrets: + USER_NAME: + description: "User Name for maven pulls" + required: true + ACCESS_TOKEN: + description: "Access token for maven pulls" + required: true + + +jobs: + build-and-push-datawave-images: + runs-on: ubuntu-latest + steps: + - name: Log in to the Container registry + uses: docker/login-action@v3 + with: + registry: ghcr.io + username: ${{ secrets.USER_NAME }} + password: ${{ secrets.ACCESS_TOKEN }} + - name: Checkout Code + uses: actions/checkout@v4 + - name: Set up JDK 11 + uses: actions/setup-java@v4 + with: + distribution: "zulu" + java-version: 11 + cache: 'maven' + - name: Build And Push Docker Image (Maven) + env: + MAVEN_OPTS: "-Dhttps.protocols=TLSv1.2 -Dorg.slf4j.simpleLogger.log.org.apache.maven.cli.transfer.Slf4jMavenTransferListener=WARN -Djava.awt.headless=true" + USER_NAME: ${{ secrets.USER_NAME }} + ACCESS_TOKEN: ${{ secrets.ACCESS_TOKEN }} + run: | + mvn -s $GITHUB_WORKSPACE/.github/workflows/settings.xml -V -B -e clean install -Pdocker,exec -Ddocker.image.prefix=ghcr.io/nationalsecurityagency/ -DpushImage + + diff --git a/.github/workflows/microservice-maven-tests.yaml b/.github/workflows/microservice-maven-tests.yaml new file mode 100644 index 00000000000..4f20d49c3b8 --- /dev/null +++ b/.github/workflows/microservice-maven-tests.yaml @@ -0,0 +1,72 @@ +name: Tests + +on: + workflow_call: + secrets: + USER_NAME: + description: "User Name for maven pulls" + required: true + ACCESS_TOKEN: + description: "Access token for maven pulls" + required: true + +env: + MAVEN_OPTS: "-Djansi.force=true -Dhttps.protocols=TLSv1.2 -Dorg.slf4j.simpleLogger.log.org.apache.maven.cli.transfer.Slf4jMavenTransferListener=WARN -Djava.awt.headless=true -XX:ThreadStackSize=1m" + +jobs: + # Runs the pom sorter and code formatter to ensure that the code + # is formatted and poms are sorted according to project rules. This + # will fail if the formatter makes any changes. + check-code-formatting: + runs-on: ubuntu-latest + steps: + - name: Checkout Code + uses: actions/checkout@v4 + - name: Set up JDK 11 + uses: actions/setup-java@v4 + with: + java-version: 11 + distribution: 'zulu' + - uses: actions/cache@v4 + with: + path: ~/.m2/repository + key: ${{ runner.os }}-maven-format-${{ hashFiles('**/pom.xml') }} + restore-keys: | + ${{ runner.os }}-maven-format- + ${{ runner.os }}-maven- + - name: Format code + run: | + mvn -s $GITHUB_WORKSPACE/.github/workflows/settings.xml -V -B -e clean formatter:format sortpom:sort -Pautoformat + git status + git diff-index --quiet HEAD || (echo "Error! There are modified files after formatting." && false) + env: + MAVEN_OPTS: "-Dhttps.protocols=TLSv1.2 -Dorg.slf4j.simpleLogger.log.org.apache.maven.cli.transfer.Slf4jMavenTransferListener=WARN -Djava.awt.headless=true" + USER_NAME: ${{ secrets.USER_NAME }} + ACCESS_TOKEN: ${{ secrets.ACCESS_TOKEN }} + + # Build the code and run the unit/integration tests. + build-and-test: + runs-on: ubuntu-latest + steps: + - name: Checkout Code + uses: actions/checkout@v4 + - name: Set up JDK 11 + uses: actions/setup-java@v4 + with: + java-version: 11 + distribution: 'zulu' + - uses: actions/cache@v4 + with: + path: ~/.m2/repository + key: ${{ runner.os }}-maven-build-${{ hashFiles('**/pom.xml') }} + restore-keys: | + ${{ runner.os }}-maven-build- + ${{ runner.os }}-maven-format- + ${{ runner.os }}-maven- + - name: Build and Run Unit Tests + run: mvn -s $GITHUB_WORKSPACE/.github/workflows/settings.xml -V -B -e -Ddist clean verify + env: + MAVEN_OPTS: "-Dhttps.protocols=TLSv1.2 -Dorg.slf4j.simpleLogger.log.org.apache.maven.cli.transfer.Slf4jMavenTransferListener=WARN -Djava.awt.headless=true" + USER_NAME: ${{ secrets.USER_NAME }} + ACCESS_TOKEN: ${{ secrets.ACCESS_TOKEN }} + diff --git a/.github/workflows/settings.xml b/.github/workflows/settings.xml new file mode 100644 index 00000000000..d8be2eb498d --- /dev/null +++ b/.github/workflows/settings.xml @@ -0,0 +1,23 @@ + + + + + + + + + + + + + + + + github-datawave + ${env.USER_NAME} + ${env.ACCESS_TOKEN} + + + diff --git a/.github/workflows/tests.yml b/.github/workflows/tests.yml index 2f4302f86bf..0e38efb41c1 100644 --- a/.github/workflows/tests.yml +++ b/.github/workflows/tests.yml @@ -6,9 +6,8 @@ on: branches: - 'integration' - 'release/version*' - - 'feature/accumulo-2.0' pull_request: - paths-ignore: ['*.md', 'CODEOWNERS', 'LICENSE'] + paths-ignore: ['*.md', 'CODEOWNERS', 'LICENSE', '.github/workflows/microservice*.yaml'] workflow_dispatch: env: @@ -24,19 +23,24 @@ jobs: runs-on: ubuntu-latest steps: - name: Checkout Code - uses: actions/checkout@v3 - with: - submodules: 'recursive' + uses: actions/checkout@v4 - name: Set up JDK ${{env.JAVA_VERSION}} - uses: actions/setup-java@v3 + uses: actions/setup-java@v4 with: distribution: ${{env.JAVA_DISTRIBUTION}} java-version: ${{env.JAVA_VERSION}} maven-version: 3.9.5 cache: 'maven' + - name: Extract branch name + shell: bash + run: echo "branch=${GITHUB_HEAD_REF:-${GITHUB_REF#refs/heads/}}" >> $GITHUB_OUTPUT + id: extract_branch - name: Format code + env: + USER_NAME: ${{ secrets.USER_NAME }} + ACCESS_TOKEN: ${{ secrets.ACCESS_TOKEN }} run: | - mvn -V -B -e -ntp "-Dstyle.color=always" clean formatter:format sortpom:sort impsort:sort -Dmaven.build.cache.enabled=false -Pautoformat + mvn -s $GITHUB_WORKSPACE/.github/workflows/settings.xml -V -B -e -ntp "-Dstyle.color=always" clean formatter:format sortpom:sort impsort:sort -Dmaven.build.cache.enabled=false -Pautoformat git status git diff-index --quiet HEAD || (echo "Modified files found. Creating new commit with formatting fixes" && echo "diffs_found=true" >> "$GITHUB_ENV") - name: Commit Changes @@ -44,8 +48,10 @@ jobs: if [ "$diffs_found" = true ]; then git config --global user.name "GitHub Actions" git config --global user.email "datawave@github.com" - git commit -am "Formatting job fix" - git push + git pull origin ${{ steps.extract_branch.outputs.branch }} --rebase --autostash + git checkout -b ${{ steps.extract_branch.outputs.branch }} + git commit -am "GitHub Actions: Fix Formatting" + git push origin ${{ steps.extract_branch.outputs.branch }} else echo "Nothing to do" fi @@ -55,17 +61,20 @@ jobs: runs-on: ubuntu-latest steps: - name: Checkout Code - uses: actions/checkout@v3 + uses: actions/checkout@v4 - name: Set up JDK ${{env.JAVA_VERSION}} - uses: actions/setup-java@v3 + uses: actions/setup-java@v4 with: distribution: ${{env.JAVA_DISTRIBUTION}} java-version: ${{env.JAVA_VERSION}} maven-version: 3.9.5 cache: 'maven' - name: Build and Run Unit Tests + env: + USER_NAME: ${{ secrets.USER_NAME }} + ACCESS_TOKEN: ${{ secrets.ACCESS_TOKEN }} run: | - RUN_TESTS="mvn -V -B -e -ntp "-Dstyle.color=always" -Pdev,examples,assemble,spotbugs -Dmaven.build.cache.enabled=false -Ddeploy -Ddist -T1C clean verify" + RUN_TESTS="mvn -s $GITHUB_WORKSPACE/.github/workflows/settings.xml -V -B -e -ntp "-Dstyle.color=always" -Pdev,examples,assemble,spotbugs -Dmaven.build.cache.enabled=false -Ddeploy -Ddist -T1C clean verify" $RUN_TESTS \ || { echo "***** TESTS FAILED. Attempting retry."; $RUN_TESTS; } \ || { echo "***** TESTS FAILED. Attempting final retry."; $RUN_TESTS; } @@ -80,15 +89,15 @@ jobs: # runs-on: ubuntu-latest # steps: # - name: Checkout Code - # uses: actions/checkout@v3 + # uses: actions/checkout@v4 # with: # submodules: 'recursive' # - name: Set up JDK ${{env.JAVA_VERSION}} - # uses: actions/setup-java@v3 + # uses: actions/setup-java@v4 # with: # distribution: ${{env.JAVA_DISTRIBUTION}} # java-version: ${{env.JAVA_VERSION}} - # - uses: actions/cache@v3 + # - uses: actions/cache@v4 # with: # path: ~/.m2/repository # key: ${{ runner.os }}-maven-build-${{ hashFiles('**/pom.xml') }} @@ -97,12 +106,18 @@ jobs: # ${{ runner.os }}-maven-format- # ${{ runner.os }}-maven- # - name: Build Project + # env: + # USER_NAME: ${{ secrets.USER_NAME }} + # ACCESS_TOKEN: ${{ secrets.ACCESS_TOKEN }} # run: | - # BUILD="mvn -V -B -e -Pdev,examples,assemble,spotbugs -Dmaven.build.cache.enabled=false -DskipServices -Ddeploy -Ddist -T1C -pl "-:config-service" clean install -DskipTests" + # BUILD="mvn -s $GITHUB_WORKSPACE/.github/workflows/settings.xml -V -B -e -Pdev,examples,assemble,spotbugs -Dmaven.build.cache.enabled=false -DskipServices -Ddeploy -Ddist -T1C -pl "-:config-service" clean install -DskipTests" # $BUILD # - name: Run Microservice Unit Tests + # env: + # USER_NAME: ${{ secrets.USER_NAME }} + # ACCESS_TOKEN: ${{ secrets.ACCESS_TOKEN }} # run: | - # RUN_TESTS="mvn -V -B -e -Dmaven.build.cache.enabled=false verify" + # RUN_TESTS="mvn -s $GITHUB_WORKSPACE/.github/workflows/settings.xml -V -B -e -Dmaven.build.cache.enabled=false verify" # cd microservices # $RUN_TESTS \ # || { echo "***** TESTS FAILED. Attempting retry."; $RUN_TESTS; } \ @@ -112,9 +127,9 @@ jobs: runs-on: ubuntu-latest steps: - name: Checkout Code - uses: actions/checkout@v3 + uses: actions/checkout@v4 - name: Set up JDK ${{env.JAVA_VERSION}} - uses: actions/setup-java@v3 + uses: actions/setup-java@v4 with: distribution: ${{env.JAVA_DISTRIBUTION}} java-version: ${{env.JAVA_VERSION}} @@ -133,12 +148,154 @@ jobs: # Builds the quickstart docker image and run the query tests - name: Quickstart Query Tests env: - DW_DATAWAVE_BUILD_COMMAND: "mvn -B -V -e -ntp -Dstyle.color=always -Dmaven.build.cache.enabled=false -Pdev -Ddeploy -Dtar -DskipTests clean package" + DW_DATAWAVE_BUILD_COMMAND: "mvn -s $GITHUB_WORKSPACE/.github/workflows/settings.xml -B -V -e -ntp -Dstyle.color=always -Dmaven.build.cache.enabled=false -Pdev -Ddeploy -Dtar -DskipTests clean package" DOCKER_BUILD_OPTS: "--squash --force-rm" + USER_NAME: ${{ secrets.USER_NAME }} + ACCESS_TOKEN: ${{ secrets.ACCESS_TOKEN }} run: | - TAG=$(mvn -q -N -Dmaven.build.cache.enabled=false -Dexec.executable='echo' -Dexec.args='${project.version}' exec:exec) + TAG=$(mvn -s $GITHUB_WORKSPACE/.github/workflows/settings.xml -q -N -Dmaven.build.cache.enabled=false -Dexec.executable='echo' -Dexec.args='${project.version}' exec:exec) contrib/datawave-quickstart/docker/docker-build.sh ${TAG} --docker-opts "${DOCKER_BUILD_OPTS}" + compose-build-and-test-latest-snapshots: + runs-on: ubuntu-latest + steps: + - name: Free up some space + run: | + sudo rm -rf /usr/share/dotnet + sudo rm -rf /opt/ghc + sudo rm -rf /usr/local/share/boost + sudo rm -rf $AGENT_TOOLSDIRECTORY + - name: Checkout Code + uses: actions/checkout@v4 + with: + submodules: 'recursive' + - name: Set up JDK ${{env.JAVA_VERSION}} + uses: actions/setup-java@v4 + with: + distribution: ${{env.JAVA_DISTRIBUTION}} + java-version: ${{env.JAVA_VERSION}} + maven-version: 3.9.5 + cache: 'maven' + # Builds the quickstart and microservice docker images and runs a query test + - name: Docker Compose Query Tests + env: + USER_NAME: ${{ secrets.USER_NAME }} + ACCESS_TOKEN: ${{ secrets.ACCESS_TOKEN }} + run: | + # update datawave dependencies to use the latest snapshots + mvn -s $GITHUB_WORKSPACE/.github/workflows/settings.xml -B -V -e versions:update-properties versions:update-parent -DallowSnapshots=true -Dincludes=gov.nsa.* + + mvn -s $GITHUB_WORKSPACE/.github/workflows/settings.xml -B -V -e -Pcompose -Dmicroservice-docker -Dquickstart-docker -Ddeploy -Dtar -DskipTests clean install + + # free up some space so that we don't run out + docker system prune -f + mvn -s $GITHUB_WORKSPACE/.github/workflows/settings.xml -B -V -e -Pcompose -Dmicroservice-docker -Dquickstart-docker -Ddeploy -Dtar -DskipTests clean + + cd docker + ./bootstrap.sh + + attempt=0 + max_attempts=20 + while [ $attempt -lt $max_attempts ]; do + attempt=$((attempt+1)) + + echo "Starting docker compose (Attempt ${attempt}/${max_attempts})" + nohup docker compose up -d --no-recreate < /dev/null > compose.out 2>&1 & + sleep 60s + cat compose.out + + # check to see if the query service is running + QUERY="$(docker compose ps --status running --services | grep query || true)" + + if [ "$QUERY" == "query" ] ; then + echo "Docker compose started successfully" + break + elif [ $attempt -eq $max_attempts ] ; then + echo "Failed to start docker compose" + exit 1 + fi + done + + cd scripts + ./testAll.sh + + - name: Dump Logs + if: failure() + run: | + cd docker + docker compose logs + + compose-build-and-test: + runs-on: ubuntu-latest + steps: + - name: Free up some space + run: | + sudo rm -rf /usr/share/dotnet + sudo rm -rf /opt/ghc + sudo rm -rf /usr/local/share/boost + sudo rm -rf $AGENT_TOOLSDIRECTORY + - name: Checkout Code + uses: actions/checkout@v4 + with: + submodules: 'recursive' + - name: Set up JDK ${{env.JAVA_VERSION}} + uses: actions/setup-java@v4 + with: + distribution: ${{env.JAVA_DISTRIBUTION}} + java-version: ${{env.JAVA_VERSION}} + - uses: actions/cache@v4 + with: + path: ~/.m2/repository + key: ${{ runner.os }}-maven-build-${{ hashFiles('**/pom.xml') }} + restore-keys: | + ${{ runner.os }}-maven-build- + ${{ runner.os }}-maven-format- + ${{ runner.os }}-maven- + # Builds the quickstart and microservice docker images and runs a query test + - name: Docker Compose Query Tests + env: + USER_NAME: ${{ secrets.USER_NAME }} + ACCESS_TOKEN: ${{ secrets.ACCESS_TOKEN }} + run: | + # set some bogus URLs to trigger dependency download via maven + DIST_URLS="-Durl.zookeeper=https://bogus.apache.org/zookeeper/zookeeper-3.7.2/apache-zookeeper-3.7.2-bin.tar.gz.tar.gz \ + -Durl.accumulo=https://bogus.apache.org/accumulo/2.1.3/accumulo-2.1.3-bin.tar.gz \ + -Durl.wildfly=https://bogus.jboss.org/wildfly/17.0.1.Final/wildfly-17.0.1.Final.tar.gz \ + -Durl.hadoop=https://bogus.apache.org/hadoop/common/hadoop-3.3.6/hadoop-3.3.6.tar.gz \ + -Durl.maven=https://bogus.apache.org/maven/maven-3/3.8.8/binaries/apache-maven-3.8.8-bin.tar.gz" + + mvn -s $GITHUB_WORKSPACE/.github/workflows/settings.xml -B -V -e -Pcompose -Dmicroservice-docker -Dquickstart-docker -Dquickstart-maven ${DIST_URLS} -Ddeploy -Dtar -DskipTests -Dmaven.build.cache.enabled=false clean install + # free up some space so that we don't run out + docker system prune -f + mvn -s $GITHUB_WORKSPACE/.github/workflows/settings.xml -B -V -e -Pcompose -Dmicroservice-docker -Dquickstart-docker -Dquickstart-maven ${DIST_URLS} -Ddeploy -Dtar -DskipTests -Dmaven.build.cache.enabled=false clean + cd docker + ./bootstrap.sh + attempt=0 + max_attempts=20 + while [ $attempt -lt $max_attempts ]; do + attempt=$((attempt+1)) + echo "Starting docker compose (Attempt ${attempt}/${max_attempts})" + nohup docker compose up -d --no-recreate < /dev/null > compose.out 2>&1 & + sleep 60s + cat compose.out + # check to see if the query service is running + QUERY="$(docker compose ps --status running --services | grep query || true)" + if [ "$QUERY" == "query" ] ; then + echo "Docker compose started successfully" + break + elif [ $attempt -eq $max_attempts ] ; then + echo "Failed to start docker compose" + exit 1 + fi + done + cd scripts + ./testAll.sh + - name: Dump Logs + if: failure() + run: | + cd docker + docker compose logs + # Here's an example of how you'd deploy the image to the github package registry. # We don't want to do this by default since packages on github cannot be deleted # or overwritten. So this could only be done for tags, however it seems the quickstart @@ -148,9 +305,11 @@ jobs: # IMAGE_REGISTRY: "docker.pkg.github.com" # IMAGE_USERNAME: "brianloss" # IMAGE_NAME: "datawave/quickstart" + # USER_NAME: ${{ secrets.USER_NAME }} + # ACCESS_TOKEN: ${{ secrets.ACCESS_TOKEN }} # run: | # # Set up env vars - # TAG=$(mvn -q -N -Dmaven.build.cache.enabled=false -Dexec.executable='echo' -Dexec.args='${project.version}' exec:exec) + # TAG=$(mvn -s $GITHUB_WORKSPACE/.github/workflows/settings.xml -q -N -Dmaven.build.cache.enabled=false -Dexec.executable='echo' -Dexec.args='${project.version}' exec:exec) # REMOTE_IMAGE_NAME="${IMAGE_REGISTRY}/${IMAGE_USERNAME}/${IMAGE_NAME}" # # Log in to the package registry # echo ${{ secrets.GITHUB_TOKEN }} | docker login docker.pkg.github.com --username ${GITHUB_ACTOR} --password-stdin @@ -158,4 +317,3 @@ jobs: # docker tag ${IMAGE_NAME}:${TAG} ${REMOTE_IMAGE_NAME}:${TAG} # docker images # docker push ${REMOTE_IMAGE_NAME}:${TAG} - diff --git a/.gitignore b/.gitignore index 9cff6e33f22..adb29ab2c20 100644 --- a/.gitignore +++ b/.gitignore @@ -5,6 +5,7 @@ !**/application/src/main/wildfly/overlay/bin !**/web-services/deploy/application/src/main/wildfly/overlay/bin !**/web-services/examples/http-client/bin +**/*.env **/*.iml **/*.orig **/*.prefs @@ -49,3 +50,5 @@ web-services/geoserver/geoserver-application/overlays/ services/sample_configuration/*-dev.yml /.metadata/ +/.dockerignore +/.maven-dockerignore diff --git a/.gitmodules b/.gitmodules index c5ed6ab8d72..26ad0ff918c 100644 --- a/.gitmodules +++ b/.gitmodules @@ -61,3 +61,30 @@ [submodule "microservices/microservice-service-parent"] path = microservices/microservice-service-parent url = git@github.com:NationalSecurityAgency/datawave-service-parent.git +[submodule "contrib/datawave-utils"] + path = contrib/datawave-utils + url = git@github.com:NationalSecurityAgency/datawave-utils.git +[submodule "microservices/starters/query"] + path = microservices/starters/query + url = git@github.com:NationalSecurityAgency/datawave-spring-boot-starter-query.git +[submodule "microservices/services/query"] + path = microservices/services/query + url = git@github.com:NationalSecurityAgency/datawave-query-service.git +[submodule "microservices/services/query-executor"] + path = microservices/services/query-executor + url = git@github.com:NationalSecurityAgency/datawave-query-executor-service.git +[submodule "microservices/services/modification"] + path = microservices/services/modification + url = git@github.com:NationalSecurityAgency/datawave-modification-service.git +[submodule "microservices/services/mapreduce-query"] + path = microservices/services/mapreduce-query + url = git@github.com:NationalSecurityAgency/datawave-mapreduce-query-service.git +[submodule "microservices/starters/cached-results"] + path = microservices/starters/cached-results + url = git@github.com:NationalSecurityAgency/datawave-spring-boot-starter-cached-results.git +[submodule "microservices/services/map"] + path = microservices/services/map + url = git@github.com:NationalSecurityAgency/datawave-map-service.git +[submodule "microservices/services/file-provider"] + path = microservices/services/file-provider + url = git@github.com:NationalSecurityAgency/datawave-file-provider-service.git diff --git a/.mvn/maven-build-cache-config.xml b/.mvn/maven-build-cache-config.xml index 8def3e68c94..549ab1212ed 100644 --- a/.mvn/maven-build-cache-config.xml +++ b/.mvn/maven-build-cache-config.xml @@ -23,7 +23,7 @@ --> - true + false SHA-256 true @@ -71,6 +71,22 @@ install + + + build + push + + + + + copy-resources + + + + + repackage + + diff --git a/BUILDME.md b/BUILDME.md index bd01e71a5be..fbd6744675f 100644 --- a/BUILDME.md +++ b/BUILDME.md @@ -1,5 +1,28 @@ # Building Datawave +## Generating a Github Repository access token + +In order to download datawave artifacts from the github package repository, you will need to set credentials in +your maven `settings.xml` file. + +You should first create a classic personal access token on github [here](https://github.com/settings/tokens). Be +sure to give the token at least the following permissions: + * `read:packages` + +Save the token value, and create a server entry for the github package repo in your maven `settings.xml` file, like so: +```xml + + + github-datawave + PUT_YOUR_GITHUB_USERNAME_HERE + PUT_YOUR_PERSONAL_ACCESS_TOKEN_HERE + + +``` +The id of the server matters, and should match what is used in the datawave parent pom. + +## Building Datawave + To perform a full (non-release) 'dev' build without unit tests: ```bash @@ -41,47 +64,38 @@ mvn -Pdev,assemble,rpm -Ddeploy -Dtar -Ddist -DskipTests clean install Datawave web services utilize several microservices at runtime (currently authorization and auditing, although that list will expand soon). Datawave depends on api modules for some of these services, and the dependencies are set in -the parent pom (see `version.microservice.*` properties) to released versions. If you wish to build the microservices +the parent pom (see `version.datawave.*` properties) to released versions. If you wish to build the microservices for some reason, you can simply add `-Dservices` to your maven build command. ### Releasing Microservices Each subdirectory under the `services` folder is treated as a separate project. Therefore if you wish to build a release for any of the services (or their APIs), change directory to the appropriate service and build and deploy -the release with `mvn -Ddist clean deploy`. Note that due to licensing restrictions, we are currently unable to deploy -to maven's central repository (though we hope to have that changed soon), so we are temporarily deploying to a branch -in github. Therefore, to execute the deployment, you will need to set credentials in your maven `settings.xml` file. -You should first create a personal access token on github [here](https://github.com/settings/tokens). Be sure to give -the token at least the following permissions: - * `repo:status` - * `repo_deployment` - * `public_repo` - * `notifications` - * `user:email` -Save the token value, and add it to a `github` profile in your maven `settings.xml` file, like so: +the release with `mvn -Ddist clean deploy`. We are currently deploying our artifacts to the github package repo. +Therefore, to execute the deployment, you will need to set credentials in your maven `settings.xml` file. +You should first create a classic personal access token on github [here](https://github.com/settings/tokens). Be +sure to give the token at least the following permissions: + * `write:packages` + * `delete:packages` + +Save the token value, and create a server entry for the github package repo in your maven `settings.xml` file, like so: ```xml - - github - - - - github - - PUT_YOUR_GITHUB_USERNAME_HERE - PUT_YOUR_PERSONAL_ACCESS_TOKEN_HERE - - - + + + github-datawave + PUT_YOUR_GITHUB_USERNAME_HERE + PUT_YOUR_PERSONAL_ACCESS_TOKEN_HERE + + ``` -The name of the profile doesn't actually matter. The important fact is that the specific maven properties -are defined. +The id of the server matters, and should match what is used in the datawave parent pom. Releases for individual services are generally tagged using the pattern `svc__`. For example, the authorization service API version 1.0 is tagged with `svc_authorization-api_1.0`. Note that simply building a new API or service release won't ensure that it is used anywhere. You will need to update build properties in either the datawave parent pom or within other service poms (for cross-service dependencies) to -ensure that the new version is used. Look for properties starting with `version.microservice.` to see what to update. +ensure that the new version is used. Look for properties starting with `version.datawave.` to see what to update. If you are updating an API module, you should be careful. In general, the associated service will need to be updated as well to support the API changes. The service should _add_ a new version of the API and continue to support the old version until it can be ensured that there are no more consumers of the old API. diff --git a/README.md b/README.md index 9fa8de8d554..2b154209b7b 100644 --- a/README.md +++ b/README.md @@ -2,7 +2,7 @@

-[![Apache License][li]][ll] ![Build Status](https://github.com/NationalSecurityAgency/datawave/workflows/Tests/badge.svg) +[![Apache License][li]][ll] ![Build Status](https://github.com/NationalSecurityAgency/datawave/actions/workflows/tests.yml/badge.svg) DataWave is a Java-based ingest and query framework that leverages [Apache Accumulo](http://accumulo.apache.org/) to provide fast, secure access to your data. DataWave supports a wide variety of use cases, including but not limited to... @@ -56,5 +56,9 @@ mvn -Pdocker,dist -DskipMicroservices clean install -T 1C git submodule deinit --all ``` +### DataWave Microservices + +For more information about deploying the datawave quickstart and microservices, check out the [Docker Readme](docker/README.md#usage) + [li]: http://img.shields.io/badge/license-ASL-blue.svg [ll]: https://www.apache.org/licenses/LICENSE-2.0 diff --git a/checkstyle.xml b/checkstyle.xml index 8744f2ba1a0..6ac2c34afa1 100644 --- a/checkstyle.xml +++ b/checkstyle.xml @@ -2,12 +2,14 @@ + + - - - - - + + + + + diff --git a/common-test/pom.xml b/common-test/pom.xml index aef4f88263a..c340d1e48aa 100644 --- a/common-test/pom.xml +++ b/common-test/pom.xml @@ -4,7 +4,7 @@ gov.nsa.datawave datawave-parent - 6.5.0-SNAPSHOT + 7.13.0-SNAPSHOT datawave-common-test ${project.artifactId} @@ -63,5 +63,10 @@ provided true + + org.junit.jupiter + junit-jupiter-engine + test + diff --git a/common-test/src/main/java/datawave/common/test/logging/TestLogCollector.java b/common-test/src/main/java/datawave/common/test/logging/TestLogCollector.java index 55b20c8c162..679753de40e 100644 --- a/common-test/src/main/java/datawave/common/test/logging/TestLogCollector.java +++ b/common-test/src/main/java/datawave/common/test/logging/TestLogCollector.java @@ -4,6 +4,7 @@ import java.io.IOException; import java.io.Writer; import java.util.ArrayList; +import java.util.Collections; import java.util.List; import org.apache.log4j.bridge.FilterAdapter; @@ -27,7 +28,7 @@ */ public class TestLogCollector extends ExternalResource { - private List messages = new ArrayList<>(); + private List messages = Collections.synchronizedList(new ArrayList<>()); private List loggers; private Writer writer = new CharArrayWriter(); diff --git a/common-test/src/main/java/datawave/common/test/utils/ProcessUtils.java b/common-test/src/main/java/datawave/common/test/utils/ProcessUtils.java index c3b57af8df6..c8e3ea704f5 100644 --- a/common-test/src/main/java/datawave/common/test/utils/ProcessUtils.java +++ b/common-test/src/main/java/datawave/common/test/utils/ProcessUtils.java @@ -46,6 +46,8 @@ public static List buildApplicationCommandLine(String clzName, List/>} + s=${s//'"'/"} + printf -- %s "$s" +} + +function datawaveModification() { + + ! datawaveIsInstalled && info "DataWave Web is not installed. Try 'datawaveInstall'" && return 1 + ! datawaveWebIsRunning && info "DataWave Web is not running. Try 'datawaveWebStart'" && return 1 + + # Reset + + DW_QUERY_RESPONSE_BODY="" + DW_QUERY_RESPONSE_CODE="" + DW_QUERY_RESPONSE_TYPE="" + DW_QUERY_TOTAL_TIME="" + + # Both 'Content-Type: application/x-www-form-urlencoded' and 'Accept: application/json' + # added by default, but may be overridden, if needed, via --header,-H option + + DW_REQUEST_HEADERS="" + + # Defaults + + DW_MODIFICATION_COMMAND="INSERT" + DW_MODIFICATION_SERVICE="MutableMetadataUUIDService" + DW_MODIFICATION_VIZ="BAR&FOO" + DW_MODIFICATION_VERBOSE=false + + configureUserIdentity || return 1 + configureModification "$@" || return $? + + local curlcmd="/usr/bin/curl \ + --silent --write-out 'HTTP_STATUS_CODE:%{http_code};TOTAL_TIME:%{time_total};CONTENT_TYPE:%{content_type}' \ + --insecure --cert "${DW_CURL_CERT}" --key "${DW_CURL_KEY_RSA}" --cacert "${DW_CURL_CA}" \ + --header 'Content-Type: application/xml;charset=UTF-8' --header 'Accept: application/xml' \ + ${DW_REQUEST_HEADERS} ${DW_CURL_DATA} -X PUT ${DW_MODIFICATION_URI}/${DW_MODIFICATION_SERVICE}/submit" + echo $curlcmd + + local response="$( eval "${curlcmd}" )" + local exitStatus=$? + + if [ "${exitStatus}" != "0" ] ; then + echo + error "Curl command exited with non-zero status: ${exitStatus}" + echo + return 1 + fi + + parseQueryResponse + prettyPrintResponse + printCurlSummary + + return 0 +} + +function configureModification() { + + while [ "${1}" != "" ]; do + case "${1}" in + --uuid | -u) + DW_MODIFICATION_UUID="${2}" + shift + ;; + --type | -t) + DW_MODIFICATION_UUID_TYPE="${2}" + shift + ;; + --field | -f) + DW_MODIFICATION_FIELD="${2}" + shift + ;; + --oldvalue | -o) + DW_MODIFICATION_OLD_VALUE="${2}" + shift + ;; + --newvalue | -n) + DW_MODIFICATION_NEW_VALUE="${2}" + shift + ;; + --visibility | --vis) + DW_MODIFICATION_VIZ="${2}" + shift + ;; + --command | -c) + DW_MODIFICATION_COMMAND="${2}" + shift + ;; + --header | -H) + DW_REQUEST_HEADERS="${DW_REQUEST_HEADERS} ${1} '${2}'" + shift + ;; + --help | -h) + modificationHelp && return 1 + ;; + --verbose | -v) + DW_MODIFICATION_VERBOSE=true + ;; + *) + error "Invalid argument passed to $( basename "$0" ): ${1}" && return 1 + esac + shift + done + + [ -z "${DW_MODIFICATION_UUID}" ] && error "Uuid is required" && return 1 + [ -z "${DW_MODIFICATION_UUID_TYPE}" ] && error "Uuid type (field) is required" && return 1 + [ -z "${DW_MODIFICATION_FIELD}" ] && error "Field is required" && return 1 + [ -z "${DW_MODIFICATION_VIZ}" ] && error "Visibility is required" && return 1 + BODY="${DW_MODIFICATION_UUID}${DW_MODIFICATION_UUID_TYPE}${DW_MODIFICATION_COMMAND}${DW_MODIFICATION_FIELD}${DW_MODIFICATION_NEW_VALUE}$( xmlencode ${DW_MODIFICATION_VIZ} )testUserINSERTTESTABCPUBLIC" + if [ "${DW_MODIFICATION_COMMAND}" == "INSERT" ] ; then + [ -z "${DW_MODIFICATION_NEW_VALUE}" ] && error "New field value is required" && return 1 + elif [ "${DW_MODIFICATION_COMMAND}" == "REPLACE" ] ; then + [ -z "${DW_MODIFICATION_NEW_VALUE}" ] && error "New field value is required" && return 1 + elif [ "${DW_MODIFICATION_COMMAND}" == "UPDATE" ] ; then + [ -z "${DW_MODIFICATION_NEW_VALUE}" ] && error "New field value is required" && return 1 + [ -z "${DW_MODIFICATION_OLD_VALUE}" ] && error "Old field value is required" && return 1 + BODY="${DW_MODIFICATION_UUID}${DW_MODIFICATION_UUID_TYPE}${DW_MODIFICATION_COMMAND}${DW_MODIFICATION_FIELD}${DW_MODIFICATION_NEW_VALUE}${DW_MODIFICATION_OLD_VALUE}$( xmlencode ${DW_MODIFICATION_VIZ} )testUserINSERTTESTABCPUBLIC" + elif [ "${DW_MODIFICATION_COMMAND}" == "DELETE" ] ; then + [ -z "${DW_MODIFICATION_OLD_VALUE}" ] && error "Old field value is required" && return 1 + BODY="${DW_MODIFICATION_UUID}${DW_MODIFICATION_UUID_TYPE}${DW_MODIFICATION_COMMAND}${DW_MODIFICATION_FIELD}${DW_MODIFICATION_OLD_VALUE}$( xmlencode ${DW_MODIFICATION_VIZ} )testUserINSERTTESTABCPUBLIC" + else + error "Command set to ${DW_MODIFICATION_COMMAND}. Command must be one of INSERT, UPDATE, DELETE, or REPLACE." && return 1 + fi + + DW_CURL_DATA="-d '$BODY'" +} + +function modificationHelp() { + echo + echo " The $( printGreen "datawaveModification" ) shell function allows you submit modification requests on demand to DataWave's" + echo " Rest API and to inspect the results. It automatically configures curl and sets" + echo " reasonable defaults for most required query parameters" + echo + echo " Assuming the following modification entries are in the datawave.metadata:" + echo " REVIEW m:csv []" + echo " REVIEW m:enwiki []" + echo " REVIEW m:tvmaze []" + echo + echo " $( printGreen datawaveModification ) --uuid 09aa3d46-8aa0-49fb-8859-f3add48859b0 --type UUID --field REVIEW -c INSERT --newvalue 'I liked this one'" + echo " $( printGreen datawaveModification ) --uuid 09aa3d46-8aa0-49fb-8859-f3add48859b0 --type UUID --field REVIEW -c DELETE --oldvalue 'I liked this one'" + echo " $( printGreen datawaveModification ) --uuid 09aa3d46-8aa0-49fb-8859-f3add48859b0 --type UUID --field REVIEW -c REPLACE --newvalue 'I really liked this one'" + echo " $( printGreen datawaveModification ) --uuid 09aa3d46-8aa0-49fb-8859-f3add48859b0 --type UUID --field REVIEW -c UPDATE --oldvalue 'I liked this one' --newvalue 'I really liked this one'" + echo + echo " Required:" + echo + echo " $( printGreen "-u" ) | $( printGreen "--uuid" ) \"\"" + echo " The event uuid" + echo + echo " $( printGreen "-t" ) | $( printGreen "--type" ) \"\"" + echo " The event uuid type (field)" + echo + echo " $( printGreen "-f" ) | $( printGreen "--field" ) \"\"" + echo " The field to modify" + echo + echo " Optional:" + echo + echo " $( printGreen "-c" ) | $( printGreen "--command" ) " + echo " The command must be one of INSERT, UPDATE, DELETE, or REPLACE. Defaults to ${DW_MODIFICATION_COMMAND}" + echo + echo " $( printGreen "-n" ) | $( printGreen "--newvalue" ) " + echo " The old value (required for INSERT, UPDATE or REPLACE service)" + echo + echo " $( printGreen "-o" ) | $( printGreen "--oldvalue" ) " + echo " The old value (required for UPDATE or DELETE service)" + echo + echo " $( printGreen "--vis" ) | $( printGreen "--visibility" ) " + echo " Visibility expression to use when logging this query to Accumulo. Defaults to '${DW_MODIFICATION_LOG_VIZ}'" + echo + echo " $( printGreen "-H" ) | $( printGreen "--header" ) \"HeaderName: HeaderValue\"" + echo " Adds specified name/value pair to the curl command as an HTTP request header" + echo " Defaults: '$(printGreen "Content-Type"): application/x-www-form-urlencoded' and '$(printGreen "Accept"): application/json'" + echo + echo " $( printGreen "-x" ) | $( printGreen "--xml" )" + echo " Adds '$(printGreen "Accept"): application/xml' as an HTTP request header to override the default JSON" + echo + echo " $( printGreen "-v" ) | $( printGreen "--verbose" )" + echo " Display curl command. Otherwise, only query results and response metadata are displayed" + echo + echo " $( printGreen "-h" ) | $( printGreen "--help" )" + echo " Print this usage information and exit the script" + echo +} + +function listMutableFields() { + + # Reset + + DW_QUERY_RESPONSE_BODY="" + DW_QUERY_RESPONSE_CODE="" + DW_QUERY_RESPONSE_TYPE="" + DW_QUERY_TOTAL_TIME="" + DW_QUERY_EXTRA_PARAMS="" + + configureUserIdentity || return 1 + + local curlcmd="/usr/bin/curl \ + --silent --write-out 'HTTP_STATUS_CODE:%{http_code};TOTAL_TIME:%{time_total};CONTENT_TYPE:%{content_type}' \ + --insecure --cert "${DW_CURL_CERT}" --key "${DW_CURL_KEY_RSA}" --cacert "${DW_CURL_CA}" \ + -X GET ${DW_MODIFICATION_URI}/getMutableFieldList" + + local response="$( eval "${curlcmd}" )" + local exitStatus=$? + if [ "${exitStatus}" != "0" ] ; then + error "Curl command exited with non-zero status: ${exitStatus}" + echo + return 1 + fi + + parseQueryResponse + prettyPrintResponse + printCurlSummary +} + +function reloadMutableFieldCache() { + + local curlcmd="/usr/bin/curl \ + --silent --write-out 'HTTP_STATUS_CODE:%{http_code};TOTAL_TIME:%{time_total};CONTENT_TYPE:%{content_type}' \ + --insecure --cert "${DW_CURL_CERT}" --key "${DW_CURL_KEY_RSA}" --cacert "${DW_CURL_CA}" \ + -X GET ${DW_MODIFICATION_URI}/AccumuloTableCache/reload/datawave.metadata" + local response="$( eval "${curlcmd}" )" + local exitStatus=$? + + if [ "${exitStatus}" != "0" ] ; then + error "Curl command exited with non-zero status: ${exitStatus}. Failed to update table cache: ${dwtable}" + return 1 + fi + + parseQueryResponse + prettyPrintResponse + printCurlSummary + + local curlcmd="/usr/bin/curl \ + --silent --write-out 'HTTP_STATUS_CODE:%{http_code};TOTAL_TIME:%{time_total};CONTENT_TYPE:%{content_type}' \ + --insecure --cert "${DW_CURL_CERT}" --key "${DW_CURL_KEY_RSA}" --cacert "${DW_CURL_CA}" \ + -X GET ${DW_MODIFICATION_URI}/reloadCache" + + local response="$( eval "${curlcmd}" )" + local exitStatus=$? + + if [ "${exitStatus}" != "0" ] ; then + error "Curl command exited with non-zero status: ${exitStatus}. Failed to update mutable fields cache: ${dwtable}" + return 1 + fi + + parseQueryResponse + prettyPrintResponse + printCurlSummary +} + +function listModificationConfiguration() { + + # Reset + + DW_QUERY_RESPONSE_BODY="" + DW_QUERY_RESPONSE_CODE="" + DW_QUERY_RESPONSE_TYPE="" + DW_QUERY_TOTAL_TIME="" + DW_QUERY_EXTRA_PARAMS="" + + configureUserIdentity || return 1 + + local curlcmd="/usr/bin/curl \ + --silent --write-out 'HTTP_STATUS_CODE:%{http_code};TOTAL_TIME:%{time_total};CONTENT_TYPE:%{content_type}' \ + --insecure --cert "${DW_CURL_CERT}" --key "${DW_CURL_KEY_RSA}" --cacert "${DW_CURL_CA}" \ + -X GET ${DW_MODIFICATION_URI}/listConfigurations" + + local response="$( eval "${curlcmd}" )" + local exitStatus=$? + if [ "${exitStatus}" != "0" ] ; then + error "Curl command exited with non-zero status: ${exitStatus}" + echo + return 1 + fi + + parseQueryResponse + prettyPrintResponse + printCurlSummary +} + diff --git a/contrib/datawave-quickstart/bin/query.sh b/contrib/datawave-quickstart/bin/query.sh index 7e44656010c..d49aa64b4f1 100644 --- a/contrib/datawave-quickstart/bin/query.sh +++ b/contrib/datawave-quickstart/bin/query.sh @@ -131,21 +131,8 @@ function setQueryIdFromResponse() { } function prettyPrintJson() { - local PY=$( which python ) - if [ -n "${PY}" ] ; then - echo "${1}" | ${PY} -c 'from __future__ import print_function;import sys,json;data=json.loads(sys.stdin.read()); print(json.dumps(data, indent=2, sort_keys=True))' - local exitStatus=$? - echo - if [ "${exitStatus}" != "0" ] ; then - printRawResponse "${1}" - warn "Python encountered error. Printed response without formatting" - echo - fi - else - printRawResponse "${1}" - warn "Couldn't find python in your environment. Json response was printed without formatting" - echo - fi + PY_CMD='from __future__ import print_function; import sys,json; data=json.loads(sys.stdin.read()); print(json.dumps(data, indent=2, sort_keys=True))' + echo "${1}" | ( python3 -c "${PY_CMD}" 2>/dev/null || python2 -c "${PY_CMD}" 2>/dev/null ) || ( warn "Python encountered error. Printed response without formatting" && printRawResponse "${1}" ) } function printRawResponse() { @@ -400,7 +387,7 @@ function getNextPage() { function parseQueryResponse() { DW_QUERY_RESPONSE_BODY=$( echo ${response} | sed -e 's/HTTP_STATUS_CODE\:.*//g' ) DW_QUERY_RESPONSE_CODE=$( echo ${response} | tr -d '\n' | sed -e 's/.*HTTP_STATUS_CODE://' | sed -e 's/;TOTAL_TIME\:.*//' ) - DW_QUERY_RESPONSE_TYPE=$( echo ${response} | tr -d '\n' | sed -e 's/.*CONTENT_TYPE://' ) + DW_QUERY_RESPONSE_TYPE=$( echo ${response} | tr -d '\n' | sed -e 's/.*CONTENT_TYPE://' | sed -e 's/;.*//' ) DW_QUERY_TOTAL_TIME=$( echo ${response} | tr -d '\n' | sed -e 's/.*TOTAL_TIME://' | sed -e 's/;CONTENT_TYPE\:.*//' ) } diff --git a/contrib/datawave-quickstart/bin/services/accumulo/bootstrap.sh b/contrib/datawave-quickstart/bin/services/accumulo/bootstrap.sh index 7e785e9b46e..15d01f2ece7 100644 --- a/contrib/datawave-quickstart/bin/services/accumulo/bootstrap.sh +++ b/contrib/datawave-quickstart/bin/services/accumulo/bootstrap.sh @@ -15,13 +15,26 @@ DW_ACCUMULO_SERVICE_DIR="$( cd "$( dirname "${BASH_SOURCE[0]}" )" && pwd )" # Zookeeper config # You may override DW_ZOOKEEPER_DIST_URI in your env ahead of time, and set as file:///path/to/file.tar.gz for local tarball, if needed -DW_ZOOKEEPER_DIST_URI="${DW_ZOOKEEPER_DIST_URI:-https://archive.apache.org/dist/zookeeper/zookeeper-3.7.1/apache-zookeeper-3.7.1-bin.tar.gz}" +# DW_ZOOKEEPER_DIST_URI should, if possible, be using https. There are potential security risks by using http. +DW_ZOOKEEPER_VERSION="3.7.2" +DW_ZOOKEEPER_DIST_URI="${DW_ZOOKEEPER_DIST_URI:-https://dlcdn.apache.org/zookeeper/zookeeper-${DW_ZOOKEEPER_VERSION}/apache-zookeeper-${DW_ZOOKEEPER_VERSION}-bin.tar.gz}" +# The sha512 checksum for the tarball. Value should be the hash value only and does not include the file name. Cannot be left blank. +DW_ZOOKEEPER_DIST_SHA512_CHECKSUM="${DW_ZOOKEEPER_DIST_SHA512_CHECKSUM:-6afbfc1afc8b9370281bd9862f37dbb1cb95ec54bb2ed4371831aa5c0f08cfee775050bd57ce5fc0836e61af27eed9f0076f54b98997dd0e15159196056e52ea}" # shellcheck disable=SC2154 # shellcheck disable=SC2034 -DW_ZOOKEEPER_DIST="$( downloadTarball "${DW_ZOOKEEPER_DIST_URI}" "${DW_ACCUMULO_SERVICE_DIR}" && echo "${tarball}" )" +DW_ZOOKEEPER_DIST="$( { downloadTarball "${DW_ZOOKEEPER_DIST_URI}" "${DW_ACCUMULO_SERVICE_DIR}" || downloadMavenTarball "datawave-parent" "gov.nsa.datawave.quickstart" "zookeeper" "${DW_ZOOKEEPER_VERSION}" "${DW_ACCUMULO_SERVICE_DIR}"; } && echo "${tarball}" )" DW_ZOOKEEPER_BASEDIR="zookeeper-install" DW_ZOOKEEPER_SYMLINK="zookeeper" +# You may override DW_BIND_HOST in your env ahead of time, if needed +DW_BIND_HOST="${DW_BIND_HOST:-localhost}" + +# If we are configured to bind to all interfaces, instead bind to the hostname +DW_ACCUMULO_BIND_HOST="${DW_ACCUMULO_BIND_HOST:-${DW_BIND_HOST}}" +if [ "$DW_ACCUMULO_BIND_HOST" == "0.0.0.0" ] ; then + DW_ACCUMULO_BIND_HOST="$(hostname)" +fi + # zoo.cfg... # shellcheck disable=SC2034 DW_ZOOKEEPER_CONF=" @@ -30,15 +43,20 @@ syncLimit=5 clientPort=2181 dataDir=${DW_CLOUD_DATA}/zookeeper maxClientCnxns=100 +4lw.commands.whitelist=ruok,wchs admin.serverPort=8089 admin.enableServer=false" # Accumulo config # You may override DW_ACCUMULO_DIST_URI in your env ahead of time, and set as file:///path/to/file.tar.gz for local tarball, if needed -DW_ACCUMULO_DIST_URI="${DW_ACCUMULO_DIST_URI:-http://archive.apache.org/dist/accumulo/2.1.1/accumulo-2.1.1-bin.tar.gz}" +# DW_ACCUMULO_DIST_URI should, if possible, be using https. There are potential security risks by using http. +DW_ACCUMULO_VERSION="2.1.3" +DW_ACCUMULO_DIST_URI="${DW_ACCUMULO_DIST_URI:-https://dlcdn.apache.org/accumulo/${DW_ACCUMULO_VERSION}/accumulo-${DW_ACCUMULO_VERSION}-bin.tar.gz}" +# The sha512 checksum for the tarball. Value should be the hash value only and does not include the file name. Cannot be left blank. +DW_ACCUMULO_DIST_SHA512_CHECKSUM="${DW_ACCUMULO_DIST_SHA512_CHECKSUM:-1a27a144dc31f55ccc8e081b6c1bc6cc0362a8391838c53c166cb45291ff8f35867fd8e4729aa7b2c540f8b721f8c6953281bf589fc7fe320e4dc4d20b87abc4}" # shellcheck disable=SC2034 -DW_ACCUMULO_DIST="$( downloadTarball "${DW_ACCUMULO_DIST_URI}" "${DW_ACCUMULO_SERVICE_DIR}" && echo "${tarball}" )" +DW_ACCUMULO_DIST="$( { downloadTarball "${DW_ACCUMULO_DIST_URI}" "${DW_ACCUMULO_SERVICE_DIR}" || downloadMavenTarball "datawave-parent" "gov.nsa.datawave.quickstart" "accumulo" "${DW_ACCUMULO_VERSION}" "${DW_ACCUMULO_SERVICE_DIR}"; } && echo "${tarball}" )" DW_ACCUMULO_BASEDIR="accumulo-install" DW_ACCUMULO_SYMLINK="accumulo" DW_ACCUMULO_INSTANCE_NAME="my-instance-01" @@ -57,7 +75,7 @@ DW_ACCUMULO_VFS_DATAWAVE_DIR="/datawave/accumulo-vfs-classpath" # accumulo.properties (Format: ={}) DW_ACCUMULO_PROPERTIES="## Sets location in HDFS where Accumulo will store data -instance.volumes=${DW_HADOOP_DFS_URI}/accumulo +instance.volumes=${DW_HADOOP_DFS_URI_CLIENT}/accumulo ## Sets location of Zookeepers instance.zookeeper.host=localhost:2181 @@ -66,7 +84,7 @@ instance.zookeeper.host=localhost:2181 instance.secret=${DW_ACCUMULO_PASSWORD} ## Set to false if 'accumulo-util build-native' fails -tserver.memory.maps.native.enabled=true +tserver.memory.maps.native.enabled=false tserver.memory.maps.max=385M tserver.cache.data.size=64M tserver.cache.index.size=64M @@ -79,7 +97,7 @@ trace.password=${DW_ACCUMULO_PASSWORD}" if [ "${DW_ACCUMULO_VFS_DATAWAVE_ENABLED}" != false ] ; then DW_ACCUMULO_PROPERTIES="${DW_ACCUMULO_PROPERTIES} -general.vfs.context.classpath.datawave=${DW_HADOOP_DFS_URI}${DW_ACCUMULO_VFS_DATAWAVE_DIR}/.*.jar" +general.vfs.context.classpath.datawave=${DW_HADOOP_DFS_URI_CLIENT}${DW_ACCUMULO_VFS_DATAWAVE_DIR}/.*.jar" else DW_ACCUMULO_PROPERTIES="${DW_ACCUMULO_PROPERTIES} general.vfs.context.classpath.extlib=file://${ACCUMULO_HOME}/lib/ext/.*.jar" @@ -108,7 +126,7 @@ DW_ZOOKEEPER_CMD_FIND_ALL_PIDS="ps -ef | grep 'zookeeper.server.quorum.QuorumPee DW_ACCUMULO_CMD_START="( cd ${ACCUMULO_HOME}/bin && ./accumulo-cluster start )" DW_ACCUMULO_CMD_STOP="( cd ${ACCUMULO_HOME}/bin && ./accumulo-cluster stop )" -DW_ACCUMULO_CMD_FIND_ALL_PIDS="pgrep -d ' ' -f 'o.start.Main manager|o.start.Main tserver|o.start.Main monitor|o.start.Main gc|o.start.Main tracer'" +DW_ACCUMULO_CMD_FIND_ALL_PIDS="pgrep -u ${USER} -d ' ' -f 'o.start.Main manager|o.start.Main tserver|o.start.Main monitor|o.start.Main gc|o.start.Main tracer'" function accumuloIsRunning() { DW_ACCUMULO_PID_LIST="$(eval "${DW_ACCUMULO_CMD_FIND_ALL_PIDS}")" @@ -131,7 +149,7 @@ function accumuloStart() { fi eval "${DW_ACCUMULO_CMD_START}" echo - info "For detailed status visit 'http://localhost:9995' in your browser" + info "For detailed status visit 'http://${DW_ACCUMULO_BIND_HOST}:9995' in your browser" } function accumuloStop() { @@ -233,7 +251,7 @@ function accumuloUninstall() { } function accumuloInstall() { - "${DW_ACCUMULO_SERVICE_DIR}/install.sh" + "${DW_ACCUMULO_SERVICE_DIR}/install.sh" } function zookeeperIsInstalled() { @@ -284,15 +302,15 @@ function accumuloPidList() { } function accumuloDisplayBinaryInfo() { - echo "Source: ${DW_ACCUMULO_DIST_URI}" - local tarballName="$(basename "$DW_ACCUMULO_DIST_URI")" + echo "Source: ${DW_ACCUMULO_DIST}" + local tarballName="$(basename "$DW_ACCUMULO_DIST")" if [[ -f "${DW_ACCUMULO_SERVICE_DIR}/${tarballName}" ]]; then echo " Local: ${DW_ACCUMULO_SERVICE_DIR}/${tarballName}" else echo " Local: Not loaded" fi - echo "Source: ${DW_ZOOKEEPER_DIST_URI}" - tarballName="$(basename "$DW_ZOOKEEPER_DIST_URI")" + echo "Source: ${DW_ZOOKEEPER_DIST}" + tarballName="$(basename "$DW_ZOOKEEPER_DIST")" if [[ -f "${DW_ACCUMULO_SERVICE_DIR}/${tarballName}" ]]; then echo " Local: ${DW_ACCUMULO_SERVICE_DIR}/${tarballName}" else diff --git a/contrib/datawave-quickstart/bin/services/accumulo/install.sh b/contrib/datawave-quickstart/bin/services/accumulo/install.sh index 3fa5dc9d2ae..b73e5494045 100755 --- a/contrib/datawave-quickstart/bin/services/accumulo/install.sh +++ b/contrib/datawave-quickstart/bin/services/accumulo/install.sh @@ -14,6 +14,11 @@ source "${SERVICES_DIR}/hadoop/bootstrap.sh" hadoopIsInstalled || fatal "Accumulo requires that Hadoop be installed" +# If Accumulo is not installed, verify that the two checksums match before installing. +accumuloIsInstalled || verifyChecksum "${DW_ACCUMULO_DIST_URI}" "${DW_ACCUMULO_SERVICE_DIR}" "${DW_ACCUMULO_DIST_SHA512_CHECKSUM}" +# If Zookeeper is not installed, verify that the two checksums match before installing. +zookeeperIsInstalled || verifyChecksum "${DW_ZOOKEEPER_DIST_URI}" "${DW_ACCUMULO_SERVICE_DIR}" "${DW_ZOOKEEPER_DIST_SHA512_CHECKSUM}" + if zookeeperIsInstalled ; then info "ZooKeeper is already installed" else @@ -21,6 +26,8 @@ else mkdir "${DW_ACCUMULO_SERVICE_DIR}/${DW_ZOOKEEPER_BASEDIR}" || fatal "Failed to create ZooKeeper base directory" # Extract ZooKeeper, set symlink, and verify... tar xf "${DW_ACCUMULO_SERVICE_DIR}/${DW_ZOOKEEPER_DIST}" -C "${DW_ACCUMULO_SERVICE_DIR}/${DW_ZOOKEEPER_BASEDIR}" --strip-components=1 || fatal "Failed to extract ZooKeeper tarball" + #symlink the zookeeper jars if needed + ln -s ${DW_ACCUMULO_SERVICE_DIR}/${DW_ZOOKEEPER_BASEDIR}/lib/* ${DW_ACCUMULO_SERVICE_DIR}/${DW_ZOOKEEPER_BASEDIR} ( cd "${DW_CLOUD_HOME}" && ln -s "bin/services/accumulo/${DW_ZOOKEEPER_BASEDIR}" "${DW_ZOOKEEPER_SYMLINK}" ) || fatal "Failed to set ZooKeeper symlink" zookeeperIsInstalled || fatal "ZooKeeper was not installed" @@ -66,6 +73,11 @@ sed -i'' -e "s~\(ACCUMULO_TSERVER_OPTS=\).*$~\1\"${DW_ACCUMULO_TSERVER_OPTS}\"~g sed -i'' -e "s~\(export JAVA_HOME=\).*$~\1\"${JAVA_HOME}\"~g" "${DW_ACCUMULO_CONF_DIR}/accumulo-env.sh" sed -i'' -e "s~\(export ACCUMULO_MONITOR_OPTS=\).*$~\1\"\${POLICY} -Xmx2g -Xms512m\"~g" "${DW_ACCUMULO_CONF_DIR}/accumulo-env.sh" +# Update Accumulo bind host if it's not set to localhost +if [ "${DW_ACCUMULO_BIND_HOST}" != "localhost" ] ; then + sed -i'' -e "s/localhost/${DW_ACCUMULO_BIND_HOST}/g" ${DW_ACCUMULO_CONF_DIR}/cluster.yaml +fi + # Write zoo.cfg file using our settings in DW_ZOOKEEPER_CONF if [ -n "${DW_ZOOKEEPER_CONF}" ] ; then echo "${DW_ZOOKEEPER_CONF}" > "${DW_ZOOKEEPER_CONF_DIR}/zoo.cfg" || fatal "Failed to write zoo.cfg" diff --git a/contrib/datawave-quickstart/bin/services/datawave/bootstrap-ingest.sh b/contrib/datawave-quickstart/bin/services/datawave/bootstrap-ingest.sh index bd689ec71d1..8629602bc00 100644 --- a/contrib/datawave-quickstart/bin/services/datawave/bootstrap-ingest.sh +++ b/contrib/datawave-quickstart/bin/services/datawave/bootstrap-ingest.sh @@ -184,7 +184,7 @@ function datawaveIngestWikipedia() { [ ! -f "${wikipediaRawFile}" ] && error "File not found: ${wikipediaRawFile}" && return 1 local wikipediaHdfsFile="${DW_DATAWAVE_INGEST_HDFS_BASEDIR}/$( basename ${wikipediaRawFile} )" - local putFileCommand="hdfs dfs -copyFromLocal ${wikipediaRawFile} ${wikipediaHdfsFile}" + local putFileCommand="hdfs dfs -copyFromLocal -f ${wikipediaRawFile} ${wikipediaHdfsFile}" local inputFormat="datawave.ingest.wikipedia.WikipediaEventInputFormat" local jobCommand="${DW_DATAWAVE_INGEST_HOME}/bin/ingest/live-ingest.sh ${wikipediaHdfsFile} ${DW_DATAWAVE_INGEST_NUM_SHARDS} -inputFormat ${inputFormat} -data.name.override=wikipedia ${extraOpts}" @@ -211,7 +211,7 @@ function datawaveIngestCsv() { [ ! -f "${csvRawFile}" ] && error "File not found: ${csvRawFile}" && return 1 local csvHdfsFile="${DW_DATAWAVE_INGEST_HDFS_BASEDIR}/$( basename ${csvRawFile} )" - local putFileCommand="hdfs dfs -copyFromLocal ${csvRawFile} ${csvHdfsFile}" + local putFileCommand="hdfs dfs -copyFromLocal -f ${csvRawFile} ${csvHdfsFile}" local inputFormat="datawave.ingest.csv.mr.input.CSVFileInputFormat" local jobCommand="${DW_DATAWAVE_INGEST_HOME}/bin/ingest/live-ingest.sh ${csvHdfsFile} ${DW_DATAWAVE_INGEST_NUM_SHARDS} -inputFormat ${inputFormat} -data.name.override=mycsv ${extraOpts}" @@ -232,7 +232,7 @@ function datawaveIngestJson() { [ ! -f "${jsonRawFile}" ] && error "File not found: ${jsonRawFile}" && return 1 local jsonHdfsFile="${DW_DATAWAVE_INGEST_HDFS_BASEDIR}/$( basename ${jsonRawFile} )" - local putFileCommand="hdfs dfs -copyFromLocal ${jsonRawFile} ${jsonHdfsFile}" + local putFileCommand="hdfs dfs -copyFromLocal -f ${jsonRawFile} ${jsonHdfsFile}" local inputFormat="datawave.ingest.json.mr.input.JsonInputFormat" local jobCommand="${DW_DATAWAVE_INGEST_HOME}/bin/ingest/live-ingest.sh ${jsonHdfsFile} ${DW_DATAWAVE_INGEST_NUM_SHARDS} -inputFormat ${inputFormat} -data.name.override=myjson ${extraOpts}" @@ -347,3 +347,10 @@ function datawaveIngestTarballName() { local dwVersion="$(getDataWaveVersion)" echo "$( basename "${DW_DATAWAVE_INGEST_TARBALL/-\*-/-$dwVersion-}" )" } + +function datawaveIngestExamples() { + datawaveIngestWikipedia ${DW_DATAWAVE_INGEST_TEST_FILE_WIKI} + datawaveIngestJson ${DW_DATAWAVE_INGEST_TEST_FILE_JSON} + datawaveIngestCsv ${DW_DATAWAVE_INGEST_TEST_FILE_CSV} +} + diff --git a/contrib/datawave-quickstart/bin/services/datawave/bootstrap-web.sh b/contrib/datawave-quickstart/bin/services/datawave/bootstrap-web.sh index 65e243576de..a6606321d7f 100644 --- a/contrib/datawave-quickstart/bin/services/datawave/bootstrap-web.sh +++ b/contrib/datawave-quickstart/bin/services/datawave/bootstrap-web.sh @@ -1,7 +1,11 @@ # You may override DW_WILDFLY_DIST_URI in your env ahead of time, and set as file:///path/to/file.tar.gz for local tarball, if needed -DW_WILDFLY_DIST_URI="${DW_WILDFLY_DIST_URI:-https://download.jboss.org/wildfly/17.0.1.Final/wildfly-17.0.1.Final.tar.gz}" -DW_WILDFLY_DIST="$( downloadTarball "${DW_WILDFLY_DIST_URI}" "${DW_DATAWAVE_SERVICE_DIR}" && echo "${tarball}" )" +DW_WILDFLY_VERSION="17.0.1" +# DW_WILDFLY_DIST_URI should, if possible, be using https. There are potential security risks by using http. +DW_WILDFLY_DIST_URI="${DW_WILDFLY_DIST_URI:-https://download.jboss.org/wildfly/${DW_WILDFLY_VERSION}.Final/wildfly-${DW_WILDFLY_VERSION}.Final.tar.gz}" +# The sha512 checksum for the tarball. Value should be the hash value only and does not include the file name. Cannot be left blank. +DW_WILDFLY_DIST_SHA512_CHECKSUM="${DW_WILDFLY_DIST_SHA512_CHECKSUM:-fcbdff4bc275f478c3bf5f665a83e62468a920e58fcddeaa2710272dd0f1ce3154cdc371d5011763a6be24ae1a5e0bca0218cceea63543edb4b5cf22de60b485}" +DW_WILDFLY_DIST="$( { downloadTarball "${DW_WILDFLY_DIST_URI}" "${DW_DATAWAVE_SERVICE_DIR}" || downloadMavenTarball "datawave-parent" "gov.nsa.datawave.quickstart" "wildfly" "${DW_WILDFLY_VERSION}" "${DW_DATAWAVE_SERVICE_DIR}"; } && echo "${tarball}" )" DW_WILDFLY_BASEDIR="wildfly-install" DW_WILDFLY_SYMLINK="wildfly" @@ -173,8 +177,8 @@ function datawaveWebDisplayBinaryInfo() { else echo " Local: Not loaded" fi - echo "Source: ${DW_WILDFLY_DIST_URI}" - local tarballName="$(basename "$DW_WILDFLY_DIST_URI")" + echo "Source: ${DW_WILDFLY_DIST}" + local tarballName="$(basename "$DW_WILDFLY_DIST")" if [[ -f "${DW_DATAWAVE_SERVICE_DIR}/${tarballName}" ]]; then echo " Local: ${DW_DATAWAVE_SERVICE_DIR}/${tarballName}" else diff --git a/contrib/datawave-quickstart/bin/services/datawave/bootstrap.sh b/contrib/datawave-quickstart/bin/services/datawave/bootstrap.sh index 10bdde77661..898828f357f 100644 --- a/contrib/datawave-quickstart/bin/services/datawave/bootstrap.sh +++ b/contrib/datawave-quickstart/bin/services/datawave/bootstrap.sh @@ -30,7 +30,7 @@ source "${DW_DATAWAVE_SERVICE_DIR}/bootstrap-user.sh" DW_DATAWAVE_BUILD_PROFILE=${DW_DATAWAVE_BUILD_PROFILE:-dev} # Maven command -DW_DATAWAVE_BUILD_COMMAND="${DW_DATAWAVE_BUILD_COMMAND:-mvn -P${DW_DATAWAVE_BUILD_PROFILE} -Ddeploy -Dtar -Ddist -DskipTests -Dmaven.build.cache.enabled=false clean package --builder smart -T1.0C}" +DW_DATAWAVE_BUILD_COMMAND="${DW_DATAWAVE_BUILD_COMMAND:-mvn -P${DW_DATAWAVE_BUILD_PROFILE} -Ddeploy -Dtar -Ddist -DskipServices -DskipTests -Dmaven.build.cache.enabled=false clean package --builder smart -T1.0C}" # Home of any temp data and *.properties file overrides for this instance of DataWave @@ -137,10 +137,10 @@ function setBuildPropertyOverrides() { echo "WAREHOUSE_ACCUMULO_HOME=${ACCUMULO_HOME}" >> ${BUILD_PROPERTIES_FILE} echo "WAREHOUSE_INSTANCE_NAME=${DW_ACCUMULO_INSTANCE_NAME}" >> ${BUILD_PROPERTIES_FILE} - echo "WAREHOUSE_JOBTRACKER_NODE=${DW_HADOOP_RESOURCE_MANAGER_ADDRESS}" >> ${BUILD_PROPERTIES_FILE} + echo "WAREHOUSE_JOBTRACKER_NODE=${DW_HADOOP_RESOURCE_MANAGER_ADDRESS_CLIENT}" >> ${BUILD_PROPERTIES_FILE} echo "INGEST_ACCUMULO_HOME=${ACCUMULO_HOME}" >> ${BUILD_PROPERTIES_FILE} echo "INGEST_INSTANCE_NAME=${DW_ACCUMULO_INSTANCE_NAME}" >> ${BUILD_PROPERTIES_FILE} - echo "INGEST_JOBTRACKER_NODE=${DW_HADOOP_RESOURCE_MANAGER_ADDRESS}" >> ${BUILD_PROPERTIES_FILE} + echo "INGEST_JOBTRACKER_NODE=${DW_HADOOP_RESOURCE_MANAGER_ADDRESS_CLIENT}" >> ${BUILD_PROPERTIES_FILE} echo "BULK_INGEST_DATA_TYPES=${DW_DATAWAVE_INGEST_BULK_DATA_TYPES}" >> ${BUILD_PROPERTIES_FILE} echo "LIVE_INGEST_DATA_TYPES=${DW_DATAWAVE_INGEST_LIVE_DATA_TYPES}" >> ${BUILD_PROPERTIES_FILE} echo "PASSWORD=${DW_ACCUMULO_PASSWORD}" >> ${BUILD_PROPERTIES_FILE} @@ -168,11 +168,11 @@ function setBuildPropertyOverrides() { # # uncomment to enable environment passwords in the quickstart, and comment out above line # echo "accumulo.user.password=env:DW_ACCUMULO_PASSWORD" >> ${BUILD_PROPERTIES_FILE} - echo "cached.results.hdfs.uri=${DW_HADOOP_DFS_URI}" >> ${BUILD_PROPERTIES_FILE} - echo "type.metadata.hdfs.uri=${DW_HADOOP_DFS_URI}" >> ${BUILD_PROPERTIES_FILE} - echo "mapReduce.hdfs.uri=${DW_HADOOP_DFS_URI}" >> ${BUILD_PROPERTIES_FILE} - echo "bulkResults.hdfs.uri=${DW_HADOOP_DFS_URI}" >> ${BUILD_PROPERTIES_FILE} - echo "jboss.log.hdfs.uri=${DW_HADOOP_DFS_URI}" >> ${BUILD_PROPERTIES_FILE} + echo "cached.results.hdfs.uri=${DW_HADOOP_DFS_URI_CLIENT}" >> ${BUILD_PROPERTIES_FILE} + echo "type.metadata.hdfs.uri=${DW_HADOOP_DFS_URI_CLIENT}" >> ${BUILD_PROPERTIES_FILE} + echo "mapReduce.hdfs.uri=${DW_HADOOP_DFS_URI_CLIENT}" >> ${BUILD_PROPERTIES_FILE} + echo "bulkResults.hdfs.uri=${DW_HADOOP_DFS_URI_CLIENT}" >> ${BUILD_PROPERTIES_FILE} + echo "jboss.log.hdfs.uri=${DW_HADOOP_DFS_URI_CLIENT}" >> ${BUILD_PROPERTIES_FILE} echo "lock.file.dir=${DW_DATAWAVE_INGEST_LOCKFILE_DIR}" >> ${BUILD_PROPERTIES_FILE} echo "server.keystore.password=${DW_ACCUMULO_PASSWORD}" >> ${BUILD_PROPERTIES_FILE} @@ -181,9 +181,10 @@ function setBuildPropertyOverrides() { echo "jboss.managed.executor.service.default.max.threads=${DW_WILDFLY_EE_DEFAULT_MAX_THREADS:-48}" >> ${BUILD_PROPERTIES_FILE} echo "hornetq.cluster.password=${DW_ACCUMULO_PASSWORD}" >> ${BUILD_PROPERTIES_FILE} echo "hornetq.system.password=${DW_ACCUMULO_PASSWORD}" >> ${BUILD_PROPERTIES_FILE} - echo "mapReduce.job.tracker=${DW_HADOOP_RESOURCE_MANAGER_ADDRESS}" >> ${BUILD_PROPERTIES_FILE} - echo "bulkResults.job.tracker=${DW_HADOOP_RESOURCE_MANAGER_ADDRESS}" >> ${BUILD_PROPERTIES_FILE} + echo "mapReduce.job.tracker=${DW_HADOOP_RESOURCE_MANAGER_ADDRESS_CLIENT}" >> ${BUILD_PROPERTIES_FILE} + echo "bulkResults.job.tracker=${DW_HADOOP_RESOURCE_MANAGER_ADDRESS_CLIENT}" >> ${BUILD_PROPERTIES_FILE} echo "EVENT_DISCARD_INTERVAL=0" >> ${BUILD_PROPERTIES_FILE} + echo "EVENT_DISCARD_FUTURE_INTERVAL=0" >> ${BUILD_PROPERTIES_FILE} echo "ingest.data.types=${DW_DATAWAVE_INGEST_LIVE_DATA_TYPES},${DW_DATAWAVE_INGEST_BULK_DATA_TYPES}" >> ${BUILD_PROPERTIES_FILE} echo "JOB_CACHE_REPLICATION=1" >> ${BUILD_PROPERTIES_FILE} echo "EDGE_DEFINITION_FILE=${DW_DATAWAVE_INGEST_EDGE_DEFINITIONS}" >> ${BUILD_PROPERTIES_FILE} diff --git a/contrib/datawave-quickstart/bin/services/datawave/ingest-examples/tvmaze-api-query.sh b/contrib/datawave-quickstart/bin/services/datawave/ingest-examples/tvmaze-api-query.sh index af1da19187e..69cf2d47fa5 100755 --- a/contrib/datawave-quickstart/bin/services/datawave/ingest-examples/tvmaze-api-query.sh +++ b/contrib/datawave-quickstart/bin/services/datawave/ingest-examples/tvmaze-api-query.sh @@ -38,10 +38,11 @@ TVMAZE_RESPONSE_STATUS=$( echo ${CURL_RESPONSE} | tr -d '\n' | sed -e 's/.*HTTP_ [ "${TVMAZE_RESPONSE_STATUS}" != "200" ] && error "api.tvmaze.com returned invalid response status: ${TVMAZE_RESPONSE_STATUS}" && exit 1 [ -z "${TVMAZE_RESPONSE_BODY}" ] && error "Response body is empty!" && exit 1 +PY_CMD='from __future__ import print_function; import sys,json; data=json.loads(sys.stdin.read()); print(json.dumps(data, indent=2, sort_keys=True))' if [ "${PRETTY}" == true ] ; then - echo "${TVMAZE_RESPONSE_BODY}" | python -c 'from __future__ import print_function;import sys,json;data=json.loads(sys.stdin.read()); print(json.dumps(data, indent=2, sort_keys=True))' + echo "${TVMAZE_RESPONSE_BODY}" | ( python3 -c "${PY_CMD}" 2>/dev/null || python2 -c "${PY_CMD}" 2>/dev/null ) || ( warn "Unable to pretty print, Python not detected" && echo "${TVMAZE_RESPONSE_BODY}" ) else - echo "${TVMAZE_RESPONSE_BODY}" + echo "${TVMAZE_RESPONSE_BODY}" fi exit 0 \ No newline at end of file diff --git a/contrib/datawave-quickstart/bin/services/datawave/install-ingest.sh b/contrib/datawave-quickstart/bin/services/datawave/install-ingest.sh index ca3ab94af47..2ab3f06fa7b 100755 --- a/contrib/datawave-quickstart/bin/services/datawave/install-ingest.sh +++ b/contrib/datawave-quickstart/bin/services/datawave/install-ingest.sh @@ -75,9 +75,9 @@ else [ ! -d ${ACCUMULO_HOME}/lib/ext ] && fatal "Unable to update Accumulo classpath. ${ACCUMULO_HOME}/lib/ext does not exist!" info "Removing any existing jars from ${ACCUMULO_HOME}/lib/ext" rm -f ${ACCUMULO_HOME}/lib/ext/*.jar - info "Copying DataWave jars into ${ACCUMULO_HOME}/lib/ext" + info "Copying DataWave jars into ${ACCUMULO_HOME}/lib and ${ACCUMULO_HOME}/lib/ext" if [ -d ${DW_DATAWAVE_INGEST_HOME}/accumulo-warehouse/lib ]; then - cp ${DW_DATAWAVE_INGEST_HOME}/accumulo-warehouse/lib/*.jar ${ACCUMULO_HOME}/lib/ext > /dev/null 2>&1 + cp ${DW_DATAWAVE_INGEST_HOME}/accumulo-warehouse/lib/*.jar ${ACCUMULO_HOME}/lib > /dev/null 2>&1 fi if [ -d ${DW_DATAWAVE_INGEST_HOME}/accumulo-warehouse/lib/ext ]; then cp ${DW_DATAWAVE_INGEST_HOME}/accumulo-warehouse/lib/ext/*.jar ${ACCUMULO_HOME}/lib/ext > /dev/null 2>&1 @@ -159,13 +159,6 @@ function initializeDatawaveTables() { fi } -function ingestExampleData() { - # Ingest some canned, example data files - datawaveIngestWikipedia "${DW_DATAWAVE_INGEST_TEST_FILE_WIKI}" - datawaveIngestJson "${DW_DATAWAVE_INGEST_TEST_FILE_JSON}" - datawaveIngestCsv "${DW_DATAWAVE_INGEST_TEST_FILE_CSV}" -} - initializeDatawaveTables @@ -186,4 +179,4 @@ info "See \$DW_CLOUD_HOME/bin/services/datawave/bootstrap-ingest.sh to view/edit # Ingest raw data examples, if appropriate... -[ "${DW_REDEPLOY_IN_PROGRESS}" != true ] && [ "${DW_DATAWAVE_INGEST_TEST_SKIP}" == false ] && ingestExampleData +[ "${DW_REDEPLOY_IN_PROGRESS}" != true ] && [ "${DW_DATAWAVE_INGEST_TEST_SKIP}" == false ] && datawaveIngestExamples diff --git a/contrib/datawave-quickstart/bin/services/datawave/install-web.sh b/contrib/datawave-quickstart/bin/services/datawave/install-web.sh index 5435e9598e9..40c18b4aa31 100755 --- a/contrib/datawave-quickstart/bin/services/datawave/install-web.sh +++ b/contrib/datawave-quickstart/bin/services/datawave/install-web.sh @@ -10,6 +10,9 @@ source "${THIS_DIR}/bootstrap.sh" source "${SERVICES_DIR}/hadoop/bootstrap.sh" source "${SERVICES_DIR}/accumulo/bootstrap.sh" +# If Wildfly is not installed, verify that the two checksums match before installing. +datawaveWebIsInstalled || verifyChecksum "${DW_WILDFLY_DIST_URI}" "${DW_DATAWAVE_SERVICE_DIR}" "${DW_WILDFLY_DIST_SHA512_CHECKSUM}" + accumuloIsInstalled || fatal "DataWave Web requires that Accumulo be installed" datawaveWebIsInstalled && info "DataWave Web is already installed" && exit 1 diff --git a/contrib/datawave-quickstart/bin/services/datawave/test-web/responses/EventQueryJexlSyntax.test/JexlWildFieldedPage1.expected b/contrib/datawave-quickstart/bin/services/datawave/test-web/responses/EventQueryJexlSyntax.test/JexlWildFieldedPage1.expected new file mode 100644 index 00000000000..c9e1cf9dab6 --- /dev/null +++ b/contrib/datawave-quickstart/bin/services/datawave/test-web/responses/EventQueryJexlSyntax.test/JexlWildFieldedPage1.expected @@ -0,0 +1 @@ +{"Events":[{"Fields":[{"Markings":{"entry":[{"key":"columnVisibility","value":"PUBLIC"}]},"Value":{"type":"xs:string","value":"1709323633673"},"columnVisibility":"PUBLIC","name":"LOAD_DATE","timestamp":1461628800000},{"Markings":{"entry":[{"key":"columnVisibility","value":"PUBLIC"}]},"Value":{"type":"xs:string","value":"30"},"columnVisibility":"PUBLIC","name":"TERM_COUNT","timestamp":1461628800000},{"Markings":{"entry":[{"key":"columnVisibility","value":"PUBLIC"}]},"Value":{"type":"xs:string","value":"2016-04-26T00:32:53Z"},"columnVisibility":"PUBLIC","name":"EVENT_DATE","timestamp":1461628800000},{"Markings":{"entry":[{"key":"columnVisibility","value":"PUBLIC"}]},"Value":{"type":"xs:string","value":"f7fbd49088f28aeb92bf76157ed5f1056fe2f9c3"},"columnVisibility":"PUBLIC","name":"SHA1","timestamp":1461628800000},{"Markings":{"entry":[{"key":"columnVisibility","value":"PUBLIC"}]},"Value":{"type":"xs:string","value":"PUBLIC"},"columnVisibility":"PUBLIC","name":"SECURITY_MARKING","timestamp":1461628800000},{"Markings":{"entry":[{"key":"columnVisibility","value":"PUBLIC"}]},"Value":{"type":"xs:string","value":"VERTEX1"},"columnVisibility":"PUBLIC","name":"EDGE_VERTEX_TO","timestamp":1461628800000},{"Markings":{"entry":[{"key":"columnVisibility","value":"PUBLIC"}]},"Value":{"type":"xs:string","value":"THIS SHALL NOT BE TOKENIZED"},"columnVisibility":"PUBLIC","name":"FIELDNAME1_NOT_TO_TOKENIZE","timestamp":1461628800000},{"Markings":{"entry":[{"key":"columnVisibility","value":"PUBLIC"}]},"Value":{"type":"xs:string","value":"2016-04-26 03:00:00"},"columnVisibility":"PUBLIC","name":"PROCESSING_DATE","timestamp":1461628800000},{"Markings":{"entry":[{"key":"columnVisibility","value":"PUBLIC"}]},"Value":{"type":"xs:string","value":"my.csv|5|0"},"columnVisibility":"PUBLIC","name":"ORIG_FILE","timestamp":1461628800000},{"Markings":{"entry":[{"key":"columnVisibility","value":"PUBLIC"}]},"Value":{"type":"xs:string","value":"TXT2M20161170217518731.gz#152498541"},"columnVisibility":"PUBLIC","name":"CONTENT_URI","timestamp":1461628800000},{"Markings":{"entry":[{"key":"columnVisibility","value":"PUBLIC"}]},"Value":{"type":"xs:string","value":"\nMore summary text \n"},"columnVisibility":"PUBLIC","name":"SUMMARY","timestamp":1461628800000},{"Markings":{"entry":[{"key":"columnVisibility","value":"PUBLIC"}]},"Value":{"type":"xs:string","value":"2190"},"columnVisibility":"PUBLIC","name":"PROCESSED_SIZE","timestamp":1461628800000},{"Markings":{"entry":[{"key":"columnVisibility","value":"PUBLIC"}]},"Value":{"type":"xs:string","value":"20160426_0/csv/1855zl.64utu.-nzzng8.1"},"columnVisibility":"PUBLIC","name":"RECORD_ID","timestamp":1461628800000},{"Markings":{"entry":[{"key":"columnVisibility","value":"PUBLIC"}]},"Value":{"type":"xs:string","value":"MYFOO"},"columnVisibility":"PUBLIC","name":"FOO_FIELD","timestamp":1461628800000},{"Markings":{"entry":[{"key":"columnVisibility","value":"PUBLIC"}]},"Value":{"type":"xs:string","value":"0016dd72-0000-827d-dd4d-001b2163ba09"},"columnVisibility":"PUBLIC","name":"UUID","timestamp":1461628800000},{"Markings":{"entry":[{"key":"columnVisibility","value":"PUBLIC"}]},"Value":{"type":"xs:string","value":"ENGLISH"},"columnVisibility":"PUBLIC","name":"LANGUAGE","timestamp":1461628800000},{"Markings":{"entry":[{"key":"columnVisibility","value":"PUBLIC"}]},"Value":{"type":"xs:string","value":"66f00045d715f5afa19ffc1c28c140cd01eac190"},"columnVisibility":"PUBLIC","name":"PARAGRAPH_CHECKSUM_SHA_1","timestamp":1461628800000},{"Markings":{"entry":[{"key":"columnVisibility","value":"PUBLIC"}]},"Value":{"type":"xs:string","value":"718ef8731de49eceaf24ce149710d6f3f854d5f3"},"columnVisibility":"PUBLIC","name":"PARAGRAPH_CHECKSUM_SHA_1","timestamp":1461628800000},{"Markings":{"entry":[{"key":"columnVisibility","value":"PUBLIC"}]},"Value":{"type":"xs:string","value":"d6bdac5ec34acde7d55611246280648de11d7a6b"},"columnVisibility":"PUBLIC","name":"PARAGRAPH_CHECKSUM_SHA_1","timestamp":1461628800000},{"Markings":{"entry":[{"key":"columnVisibility","value":"PUBLIC"}]},"Value":{"type":"xs:string","value":"MYBAR"},"columnVisibility":"PUBLIC","name":"BAR_FIELD","timestamp":1461628800000},{"Markings":{"entry":[{"key":"columnVisibility","value":"PUBLIC"}]},"Value":{"type":"xs:string","value":"MIME"},"columnVisibility":"PUBLIC","name":"PARENT_FILETYPE","timestamp":1461628800000},{"Markings":{"entry":[{"key":"columnVisibility","value":"PUBLIC"}]},"Value":{"type":"xs:string","value":"7e604bd4cd397bec7a861405f8860cb45892d054a96be66e94ea9b62c118f8a6"},"columnVisibility":"PUBLIC","name":"SHA256","timestamp":1461628800000},{"Markings":{"entry":[{"key":"columnVisibility","value":"PUBLIC"}]},"Value":{"type":"xs:string","value":"VERTEX9"},"columnVisibility":"PUBLIC","name":"EDGE_VERTEX_FROM","timestamp":1461628800000},{"Markings":{"entry":[{"key":"columnVisibility","value":"PUBLIC"}]},"Value":{"type":"xs:string","value":"gggb.n2016117myaa.0000000179-att-1"},"columnVisibility":"PUBLIC","name":"EVENT_ID","timestamp":1461628800000},{"Markings":{"entry":[{"key":"columnVisibility","value":"PUBLIC"}]},"Value":{"type":"xs:string","value":"MIME-BODY"},"columnVisibility":"PUBLIC","name":"FILE_TYPE","timestamp":1461628800000},{"Markings":{"entry":[{"key":"columnVisibility","value":"PUBLIC"}]},"Value":{"type":"xs:string","value":"2188"},"columnVisibility":"PUBLIC","name":"ORIGINAL_SIZE","timestamp":1461628800000},{"Markings":{"entry":[{"key":"columnVisibility","value":"PUBLIC"}]},"Value":{"type":"xs:string","value":"6c257114aaca09de9e9c3754baf1015h"},"columnVisibility":"PUBLIC","name":"MD5","timestamp":1461628800000}],"Markings":{"entry":[{"key":"columnVisibility","value":"PUBLIC"}]},"Metadata":{"DataType":"csv","InternalId":"1855zl.64utu.-nzzng8.1","Row":"20160426_0","Table":"datawave.shard"}}],"Fields":["BAR_FIELD","CONTENT_URI","EDGE_VERTEX_FROM","EDGE_VERTEX_TO","EVENT_DATE","EVENT_ID","FIELDNAME1_NOT_TO_TOKENIZE","FILE_TYPE","FOO_FIELD","LANGUAGE","LOAD_DATE","MD5","ORIGINAL_SIZE","ORIG_FILE","PARAGRAPH_CHECKSUM_SHA_1","PARENT_FILETYPE","PROCESSED_SIZE","PROCESSING_DATE","RECORD_ID","SECURITY_MARKING","SHA1","SHA256","SUMMARY","TERM_COUNT","UUID"],"HasResults":true,"LogicName":"EventQuery","OperationTimeMS":2,"PageNumber":2,"PartialResults":false,"QueryId":"f5b1e1a5-19a9-497d-aac5-0fd5d00a2a7f","ReturnedEvents":1} \ No newline at end of file diff --git a/contrib/datawave-quickstart/bin/services/datawave/test-web/tests/EventQueryJexlSyntax.test b/contrib/datawave-quickstart/bin/services/datawave/test-web/tests/EventQueryJexlSyntax.test index 274c6896ae0..8d6da3e235d 100644 --- a/contrib/datawave-quickstart/bin/services/datawave/test-web/tests/EventQueryJexlSyntax.test +++ b/contrib/datawave-quickstart/bin/services/datawave/test-web/tests/EventQueryJexlSyntax.test @@ -106,4 +106,35 @@ configureTest \ "" \ 204 + +setCurlData query=$( urlencode "SUMMARY =~ '.*more.*' AND UUID == '0016dd72-0000-827d-dd4d-001b2163ba09'" ) \ + queryName=EventQueryTestJexlWild \ + begin=20160301 \ + end=20160501 \ + pagesize=1 \ + auths=PUBLIC \ + columnVisibility=PRIVATE \ + query.syntax=JEXL + +configureTest \ + CreateJexlWildFielded \ + "Creates a valid, fielded EventQuery w/ JEXL query syntax" \ + "--header 'Content-Type: application/x-www-form-urlencoded' ${DW_CURL_DATA} -X POST ${URI_ROOT}/Query/EventQuery/create" \ + "application/xml;charset=UTF-8" \ + 200 + +runTest --set-query-id + +################################################################ +# Get 1st page from /Query/${DW_QUERY_ID}/next, in JSON format this time + +configureTest \ + JexlWildFieldedPage1 \ + "Gets the 1st page of results for the CreateJexlWild test in JSON format" \ + "--header 'Accept: application/json' -X GET ${URI_ROOT}/Query/${DW_QUERY_ID}/next" \ + application/json \ + 200 + +runTest + # This last test is executed by run.sh, as usual diff --git a/contrib/datawave-quickstart/bin/services/hadoop/bootstrap.sh b/contrib/datawave-quickstart/bin/services/hadoop/bootstrap.sh index f8f60d8f9c1..8cec3a75726 100644 --- a/contrib/datawave-quickstart/bin/services/hadoop/bootstrap.sh +++ b/contrib/datawave-quickstart/bin/services/hadoop/bootstrap.sh @@ -2,27 +2,41 @@ DW_HADOOP_SERVICE_DIR="$( cd "$( dirname "${BASH_SOURCE[0]}" )" && pwd )" +DW_HADOOP_VERSION="3.3.6" # You may override DW_HADOOP_DIST_URI in your env ahead of time, and set as file:///path/to/file.tar.gz for local tarball, if needed -DW_HADOOP_DIST_URI="${DW_HADOOP_DIST_URI:-http://archive.apache.org/dist/hadoop/common/hadoop-3.3.4/hadoop-3.3.4.tar.gz}" -DW_HADOOP_DIST="$( downloadTarball "${DW_HADOOP_DIST_URI}" "${DW_HADOOP_SERVICE_DIR}" && echo "${tarball}" )" +# DW_HADOOP_DIST_URI should, if possible, be using https. There are potential security risks by using http. +DW_HADOOP_DIST_URI="${DW_HADOOP_DIST_URI:-https://dlcdn.apache.org/hadoop/common/hadoop-${DW_HADOOP_VERSION}/hadoop-${DW_HADOOP_VERSION}.tar.gz}" +# The sha512 checksum for the tarball. Value should be the hash value only and does not include the file name. Cannot be left blank. +DW_HADOOP_DIST_SHA512_CHECKSUM="${DW_HADOOP_DIST_SHA512_CHECKSUM:-de3eaca2e0517e4b569a88b63c89fae19cb8ac6c01ff990f1ff8f0cc0f3128c8e8a23db01577ca562a0e0bb1b4a3889f8c74384e609cd55e537aada3dcaa9f8a}" +DW_HADOOP_DIST="$( { downloadTarball "${DW_HADOOP_DIST_URI}" "${DW_HADOOP_SERVICE_DIR}" || downloadMavenTarball "datawave-parent" "gov.nsa.datawave.quickstart" "hadoop" "${DW_HADOOP_VERSION}" "${DW_HADOOP_SERVICE_DIR}"; } && echo "${tarball}" )" DW_HADOOP_BASEDIR="hadoop-install" DW_HADOOP_SYMLINK="hadoop" -DW_HADOOP_DFS_URI="hdfs://localhost:9000" +# You may override DW_BIND_HOST in your env ahead of time, if needed +DW_BIND_HOST="${DW_BIND_HOST:-localhost}" + +DW_HADOOP_DFS_URI_SERVER="hdfs://${DW_BIND_HOST}:9000" +DW_HADOOP_DFS_URI_CLIENT="hdfs://${DW_BIND_HOST}:9000" DW_HADOOP_MR_INTER_DIR="/jobhist/inter" DW_HADOOP_MR_DONE_DIR="/jobhist/done" -DW_HADOOP_RESOURCE_MANAGER_ADDRESS="localhost:8050" +DW_HADOOP_RESOURCE_MANAGER_ADDRESS_SERVER="${DW_BIND_HOST}:8050" +DW_HADOOP_RESOURCE_MANAGER_ADDRESS_CLIENT="${DW_BIND_HOST}:8050" + +if [ "${DW_BIND_HOST}" == "0.0.0.0" ] ; then + DW_HADOOP_DFS_URI_CLIENT="hdfs://localhost:9000" + DW_HADOOP_RESOURCE_MANAGER_ADDRESS_CLIENT="localhost:8050" +fi HADOOP_HOME="${DW_CLOUD_HOME}/${DW_HADOOP_SYMLINK}" # core-site.xml (Format: {}) -DW_HADOOP_CORE_SITE_CONF="fs.defaultFS ${DW_HADOOP_DFS_URI} -hadoop.tmp.dir file:/${DW_CLOUD_DATA}/hadoop/tmp +DW_HADOOP_CORE_SITE_CONF="fs.defaultFS ${DW_HADOOP_DFS_URI_SERVER} +hadoop.tmp.dir file://${DW_CLOUD_DATA}/hadoop/tmp io.compression.codecs org.apache.hadoop.io.compress.GzipCodec" # hdfs-site.xml (Format: {}) DW_HADOOP_HDFS_SITE_CONF="dfs.namenode.name.dir file://${DW_CLOUD_DATA}/hadoop/nn -dfs.namenode.secondary.http-address localhost +dfs.namenode.secondary.http-address ${DW_BIND_HOST} dfs.namenode.checkpoint.dir file://${DW_CLOUD_DATA}/hadoop/nnchk dfs.datanode.data.dir file://${DW_CLOUD_DATA}/hadoop/dn dfs.datanode.handler.count 10 @@ -31,8 +45,8 @@ dfs.replication 1" DW_HADOOP_MR_HEAPDUMP_DIR="${DW_CLOUD_DATA}/heapdumps" # mapred-site.xml (Format: {}) -DW_HADOOP_MAPRED_SITE_CONF="mapreduce.jobhistory.address http://localhost:8020 -mapreduce.jobhistory.webapp.address http://localhost:8021 +DW_HADOOP_MAPRED_SITE_CONF="mapreduce.jobhistory.address ${DW_BIND_HOST}:8020 +mapreduce.jobhistory.webapp.address ${DW_BIND_HOST}:8021 mapreduce.jobhistory.intermediate-done-dir ${DW_HADOOP_MR_INTER_DIR} mapreduce.jobhistory.done-dir ${DW_HADOOP_MR_DONE_DIR} mapreduce.map.memory.mb 2048 @@ -45,12 +59,12 @@ mapreduce.map.env HADOOP_MAPRED_HOME=${HADOOP_HOME} mapreduce.reduce.env HADOOP_MAPRED_HOME=${HADOOP_HOME}" # yarn-site.xml (Format: {}) -DW_HADOOP_YARN_SITE_CONF="yarn.resourcemanager.scheduler.address localhost:8030 +DW_HADOOP_YARN_SITE_CONF="yarn.resourcemanager.scheduler.address ${DW_BIND_HOST}:8030 yarn.resourcemanager.scheduler.class org.apache.hadoop.yarn.server.resourcemanager.scheduler.capacity.CapacityScheduler -yarn.resourcemanager.resource-tracker.address localhost:8025 -yarn.resourcemanager.address ${DW_HADOOP_RESOURCE_MANAGER_ADDRESS} -yarn.resourcemanager.admin.address localhost:8033 -yarn.resourcemanager.webapp.address localhost:8088 +yarn.resourcemanager.resource-tracker.address ${DW_BIND_HOST}:8025 +yarn.resourcemanager.address ${DW_HADOOP_RESOURCE_MANAGER_ADDRESS_SERVER} +yarn.resourcemanager.admin.address ${DW_BIND_HOST}:8033 +yarn.resourcemanager.webapp.address ${DW_BIND_HOST}:8088 yarn.nodemanager.local-dirs ${DW_CLOUD_DATA}/hadoop/yarn/local yarn.nodemanager.log-dirs ${DW_CLOUD_DATA}/hadoop/yarn/log yarn.nodemanager.aux-services mapreduce_shuffle @@ -58,7 +72,7 @@ yarn.nodemanager.pmem-check-enabled false yarn.nodemanager.vmem-check-enabled false yarn.nodemanager.resource.memory-mb 6144 yarn.app.mapreduce.am.resource.mb 1024 -yarn.log.server.url http://localhost:8070/jobhistory/logs" +yarn.log.server.url http://localhost:8021/jobhistory/logs" # capacity-scheduler.xml (Format: {}) DW_HADOOP_CAPACITY_SCHEDULER_CONF="yarn.scheduler.capacity.maximum-applications 10000 @@ -85,6 +99,7 @@ yarn.scheduler.capacity.node-locality-delay 40" export HADOOP_HOME export HADOOP_CONF_DIR="${HADOOP_HOME}/etc/hadoop" export HADOOP_LOG_DIR="${HADOOP_HOME}/logs" +export YARN_LOG_DIR="${DW_CLOUD_DATA}/hadoop/yarn/log" export HADOOP_YARN_HOME="${HADOOP_HOME}" export HADOOP_MAPRED_HOME="${HADOOP_HOME}" export HADOOP_PID_DIR="${DW_CLOUD_DATA}/hadoop/pids" @@ -95,7 +110,7 @@ export PATH=${HADOOP_HOME}/bin:$PATH DW_HADOOP_CMD_START="( cd ${HADOOP_HOME}/sbin && ./start-dfs.sh && ./start-yarn.sh && mapred --daemon start historyserver )" DW_HADOOP_CMD_STOP="( cd ${HADOOP_HOME}/sbin && mapred --daemon stop historyserver && ./stop-yarn.sh && ./stop-dfs.sh )" -DW_HADOOP_CMD_FIND_ALL_PIDS="pgrep -d ' ' -f 'proc_datanode|proc_namenode|proc_secondarynamenode|proc_nodemanager|proc_resourcemanager|mapreduce.v2.hs.JobHistoryServer'" +DW_HADOOP_CMD_FIND_ALL_PIDS="pgrep -u ${USER} -d ' ' -f 'proc_datanode|proc_namenode|proc_secondarynamenode|proc_nodemanager|proc_resourcemanager|mapreduce.v2.hs.JobHistoryServer'" function hadoopIsRunning() { DW_HADOOP_PID_LIST="$(eval "${DW_HADOOP_CMD_FIND_ALL_PIDS}")" @@ -105,7 +120,7 @@ function hadoopIsRunning() { function hadoopStart() { hadoopIsRunning && echo "Hadoop is already running" || eval "${DW_HADOOP_CMD_START}" echo - info "For detailed status visit 'http://localhost:50070/dfshealth.html#tab-overview' in your browser" + info "For detailed status visit 'http://localhost:9870/dfshealth.html#tab-overview' in your browser" # Wait for Hadoop to come out of safemode ${HADOOP_HOME}/bin/hdfs dfsadmin -safemode wait } @@ -191,7 +206,7 @@ function hadoopUninstall() { } function hadoopInstall() { - "${DW_HADOOP_SERVICE_DIR}"/install.sh + "${DW_HADOOP_SERVICE_DIR}"/install.sh } function hadoopPrintenv() { @@ -209,8 +224,8 @@ function hadoopPidList() { } function hadoopDisplayBinaryInfo() { - echo "Source: ${DW_HADOOP_DIST_URI}" - local tarballName="$(basename "$DW_HADOOP_DIST_URI")" + echo "Source: ${DW_HADOOP_DIST}" + local tarballName="$(basename "$DW_HADOOP_DIST")" if [[ -f "${DW_HADOOP_SERVICE_DIR}/${tarballName}" ]]; then echo " Local: ${DW_HADOOP_SERVICE_DIR}/${tarballName}" else diff --git a/contrib/datawave-quickstart/bin/services/hadoop/install.sh b/contrib/datawave-quickstart/bin/services/hadoop/install.sh index 8757ec4bdb9..b31338c09ab 100755 --- a/contrib/datawave-quickstart/bin/services/hadoop/install.sh +++ b/contrib/datawave-quickstart/bin/services/hadoop/install.sh @@ -36,6 +36,9 @@ Generate the password-less ssh key now?" source "${BIN_DIR}/env.sh" source "${THIS_DIR}/bootstrap.sh" +# If Hadoop is not installed, verify that the two checksums match before installing. +hadoopIsInstalled || verifyChecksum "${DW_HADOOP_DIST_URI}" "${DW_HADOOP_SERVICE_DIR}" "${DW_HADOOP_DIST_SHA512_CHECKSUM}" + hadoopIsInstalled && info "Hadoop is already installed" && exit 1 [ -f "${DW_HADOOP_SERVICE_DIR}/${DW_HADOOP_DIST}" ] || fatal "Hadoop tarball not found" diff --git a/contrib/datawave-quickstart/bin/services/maven/bootstrap.sh b/contrib/datawave-quickstart/bin/services/maven/bootstrap.sh index e638fd044f1..61fc360a58c 100644 --- a/contrib/datawave-quickstart/bin/services/maven/bootstrap.sh +++ b/contrib/datawave-quickstart/bin/services/maven/bootstrap.sh @@ -1,16 +1,18 @@ # Sourced by env.sh DW_MAVEN_SERVICE_DIR="$( cd "$( dirname "${BASH_SOURCE[0]}" )" && pwd )" +DW_MAVEN_VERSION="3.8.8" # You may override DW_MAVEN_DIST_URI in your env ahead of time, and set as file:///path/to/file.tar.gz for local tarball, if needed -DW_MAVEN_DIST_URI="${DW_MAVEN_DIST_URI:-https://archive.apache.org/dist/maven/maven-3/3.8.6/binaries/apache-maven-3.8.6-bin.tar.gz}" +DW_MAVEN_DIST_URI="${DW_MAVEN_DIST_URI:-https://dlcdn.apache.org/maven/maven-3/${DW_MAVEN_VERSION}/binaries/apache-maven-${DW_MAVEN_VERSION}-bin.tar.gz}" DW_MAVEN_DIST="$( basename "${DW_MAVEN_DIST_URI}" )" DW_MAVEN_BASEDIR="maven-install" DW_MAVEN_SYMLINK="maven" function bootstrapEmbeddedMaven() { - [ ! -f "${DW_MAVEN_SERVICE_DIR}/${DW_MAVEN_DIST}" ] \ - && info "Maven 3.x not detected. Attempting to bootstrap a dedicated install..." \ - && downloadTarball "${DW_MAVEN_DIST_URI}" "${DW_MAVEN_SERVICE_DIR}" + if [ ! -f "${DW_MAVEN_SERVICE_DIR}/${DW_MAVEN_DIST}" ]; then + info "Maven 3.x not detected. Attempting to bootstrap a dedicated install..." + DW_MAVEN_DIST="$( { downloadTarball "${DW_MAVEN_DIST_URI}" "${DW_MAVEN_SERVICE_DIR}" || downloadMavenTarball "datawave-parent" "gov.nsa.datawave.quickstart" "maven" "${DW_MAVEN_VERSION}" "${DW_MAVEN_SERVICE_DIR}"; } && echo "${tarball}" )" + fi export MAVEN_HOME="${DW_CLOUD_HOME}/${DW_MAVEN_SYMLINK}" export M2_HOME="${MAVEN_HOME}" @@ -105,8 +107,8 @@ function mavenPrintenv() { } function mavenDisplayBinaryInfo() { - echo "Source: ${DW_MAVEN_DIST_URI}" - local tarballName="$(basename "$DW_MAVEN_DIST_URI")" + echo "Source: ${DW_MAVEN_DIST}" + local tarballName="$(basename "$DW_MAVEN_DIST")" if [[ -f "${DW_MAVEN_SERVICE_DIR}/${tarballName}" ]]; then echo " Local: ${DW_MAVEN_SERVICE_DIR}/${tarballName}" else diff --git a/contrib/datawave-quickstart/bin/services/nifi/bootstrap.sh b/contrib/datawave-quickstart/bin/services/nifi/bootstrap.sh index cb7145f9707..dc369b1ddd8 100644 --- a/contrib/datawave-quickstart/bin/services/nifi/bootstrap.sh +++ b/contrib/datawave-quickstart/bin/services/nifi/bootstrap.sh @@ -3,7 +3,10 @@ DW_NIFI_SERVICE_DIR="$( cd "$( dirname "${BASH_SOURCE[0]}" )" && pwd )" # You may override DW_NIFI_DIST_URI in your env ahead of time, and set as file:///path/to/file.tar.gz for local tarball, if needed -DW_NIFI_DIST_URI="${DW_NIFI_DIST_URI:-http://apache.claz.org/nifi/1.1.1/nifi-1.1.1-bin.tar.gz}" +# DW_NIFI_DIST_URI should, if possible, be using https. There are potential security risks by using http. +DW_NIFI_DIST_URI="${DW_NIFI_DIST_URI:-https://dlcdn.apache.org/nifi/1.25.0/nifi-1.25.0-bin.zip}" +# The sha512 checksum for the tarball. Value should be the hash value only and does not include the file name. Cannot be left blank. +DW_NIFI_DIST_SHA512_CHECKSUM="${DW_NIFI_DIST_SHA512_CHECKSUM:-3798e8923cfc9099b785ee2019e9a0fe8bcd36301946f19d21d414800ca6b7fedd1bbe28764fa446262a2f47b1c608651208c8d8790c73bea9ebd839f42dbab1}" DW_NIFI_DIST="$( downloadTarball "${DW_NIFI_DIST_URI}" "${DW_NIFI_SERVICE_DIR}" && echo "${tarball}" )" DW_NIFI_BASEDIR="nifi-install" DW_NIFI_SYMLINK="nifi" @@ -16,7 +19,7 @@ export PATH=${NIFI_HOME}/bin:$PATH DW_NIFI_CMD_START="( cd ${NIFI_HOME}/bin && ./nifi.sh start )" DW_NIFI_CMD_STOP="( cd ${NIFI_HOME}/bin && ./nifi.sh stop )" -DW_NIFI_CMD_FIND_ALL_PIDS="pgrep -d ' ' -f 'org.apache.nifi'" +DW_NIFI_CMD_FIND_ALL_PIDS="pgrep -u ${USER} -d ' ' -f 'org.apache.nifi'" function nifiIsRunning() { DW_NIFI_PID_LIST="$(eval "${DW_NIFI_CMD_FIND_ALL_PIDS}")" diff --git a/contrib/datawave-quickstart/bin/services/nifi/install.sh b/contrib/datawave-quickstart/bin/services/nifi/install.sh index 5e8edd4e6cf..92ccb6b0ea7 100755 --- a/contrib/datawave-quickstart/bin/services/nifi/install.sh +++ b/contrib/datawave-quickstart/bin/services/nifi/install.sh @@ -8,13 +8,16 @@ BIN_DIR="$( dirname "${SERVICES_DIR}" )" source "${BIN_DIR}/env.sh" source "${THIS_DIR}/bootstrap.sh" +# If NiFi is not installed, verify that the two checksums match before installing. +nifiIsInstalled || verifyChecksum "${DW_NIFI_DIST_URI}" "${DW_NIFI_SERVICE_DIR}" "${DW_NIFI_DIST_SHA512_CHECKSUM}" + nifiIsInstalled && info "NiFi is already installed" && exit 1 -[ ! -f "${DW_NIFI_SERVICE_DIR}/${DW_NIFI_DIST}" ] && fatal "NiFi tarball not found" +[ ! -f "${DW_NIFI_SERVICE_DIR}/${DW_NIFI_DIST}" ] && fatal "NiFi zip file not found" mkdir "${DW_NIFI_SERVICE_DIR}/${DW_NIFI_BASEDIR}" || fatal "Failed to create NiFi base directory" -tar xf "${DW_NIFI_SERVICE_DIR}/${DW_NIFI_DIST}" -C "${DW_NIFI_SERVICE_DIR}/${DW_NIFI_BASEDIR}" --strip-components=1 || fatal "Failed to extract NiFi tarball" +unzip "${DW_NIFI_SERVICE_DIR}/${DW_NIFI_DIST}" -d "${DW_NIFI_SERVICE_DIR}/${DW_NIFI_BASEDIR}" || fatal "Failed to extract NiFi tarball" $( cd "${DW_CLOUD_HOME}" && ln -s "bin/services/nifi/${DW_NIFI_BASEDIR}" "${DW_NIFI_SYMLINK}" ) || fatal "Failed to create NiFi symlink" nifiIsInstalled || fatal "NiFi was not installed" diff --git a/contrib/datawave-quickstart/docker/.dockerignore b/contrib/datawave-quickstart/docker/.dockerignore index 502d36b8395..c6bdfad356b 100644 --- a/contrib/datawave-quickstart/docker/.dockerignore +++ b/contrib/datawave-quickstart/docker/.dockerignore @@ -39,7 +39,7 @@ **/datawave-quickstart/bin/services/accumulo/accumulo-install **/datawave-quickstart/bin/services/datawave/datawave-ingest-* -**/datawave-quickstart/bin/services/datawave/datawave-web-* +**/datawave-quickstart/bin/services/datawave/datawave-webservice-* **/datawave-quickstart/bin/services/hadoop/hadoop-install **/datawave-quickstart/bin/services/nifi/nifi-install **/datawave-quickstart/bin/services/maven/maven-install diff --git a/contrib/datawave-quickstart/docker/.maven-dockerignore b/contrib/datawave-quickstart/docker/.maven-dockerignore new file mode 100644 index 00000000000..fb8d37ce358 --- /dev/null +++ b/contrib/datawave-quickstart/docker/.maven-dockerignore @@ -0,0 +1,59 @@ +# This .dockerignore gets symlinked temporarily from the datawave repo's +# root directory by docker-build.sh, since that directory is used as +# the build context for the image. + +# Therefore, any exclusions/exceptions added here should take that into account + +# Ignore any ide and repo dirs/files + +**/.m2/** +**/.git/** +**/.idea/** +**/target/** +**/*.iml/** +**/.classpath/** +**/.project/** +**/.settings/** +**/logs/** + +# Ignore temporary .dockerignore symlink in the root directory... +.dockerignore +# ...but don't ignore the actual .dockerignore file +!**/datawave-quickstart/docker/.dockerignore + +# Below, we mostly just ignore enviromnent-specific stuff that may have +# already been initialized under **/datawave-quickstart + +# Ignore any service install symlinks + +**/datawave-quickstart/accumulo/** +**/datawave-quickstart/datawave-ingest/** +**/datawave-quickstart/datawave-webservice/** +**/datawave-quickstart/hadoop/** +**/datawave-quickstart/nifi/** +**/datawave-quickstart/zookeeper/** +**/datawave-quickstart/java/** +**/datawave-quickstart/maven/** +**/datawave-quickstart/wildfly/** + +# Ignore any service install directories + +**/datawave-quickstart/bin/services/accumulo/accumulo-install/** +**/datawave-quickstart/bin/services/datawave/datawave-ingest-*/** +**/datawave-quickstart/bin/services/datawave/datawave-webservice-*/** +**/datawave-quickstart/bin/services/hadoop/hadoop-install/** +**/datawave-quickstart/bin/services/nifi/nifi-install/** +**/datawave-quickstart/bin/services/maven/maven-install/** +**/datawave-quickstart/bin/services/maven/apache-maven-install/** +**/datawave-quickstart/bin/services/java/jdk-8-linux-x64/** +**/datawave-quickstart/bin/services/datawave/wildfly-install/** +**/datawave-quickstart/bin/services/accumulo/zookeeper-install/** +**/datawave-quickstart/data/** +**/datawave-quickstart/build-properties/** + +# Make sure that we don't exclude the datawave tarballs, if they exist. +# The docker-build.sh script *should* ensure that these are built prior +# to the 'docker build ...' and that they are configured for the +# docker container's standard root directory, ie, /opt/datawave + +!**/datawave-quickstart/bin/services/datawave/datawave-*.tar.gz diff --git a/contrib/datawave-quickstart/docker/Dockerfile b/contrib/datawave-quickstart/docker/Dockerfile index d936761d930..7396b0c7527 100644 --- a/contrib/datawave-quickstart/docker/Dockerfile +++ b/contrib/datawave-quickstart/docker/Dockerfile @@ -1,24 +1,42 @@ -FROM centos:centos7 +FROM rockylinux/rockylinux:8 ARG DATAWAVE_COMMIT_ID ARG DATAWAVE_BRANCH_NAME ARG DATAWAVE_JAVA_HOME +ARG DATAWAVE_BUILD_PROFILE +ARG DATAWAVE_SKIP_INGEST=false +ARG DATAWAVE_SKIP_TESTS=false +ARG DATAWAVE_MAVEN_REPO="https://maven.pkg.github.com/NationalSecurityAgency/datawave" + +ARG ACCUMULO_URL +ARG HADOOP_URL +ARG MAVEN_URL +ARG WILDFLY_URL +ARG ZOOKEEPER_URL USER root +ENV USER=root ENV HADOOP_IDENT_STRING=root ENV HDFS_NAMENODE_USER=root HDFS_DATANODE_USER=root ENV HDFS_SECONDARYNAMENODE_USER=root YARN_RESOURCEMANAGER_USER=root ENV YARN_NODEMANAGER_USER=root +# Bind services to all interfaces +ENV DW_BIND_HOST=0.0.0.0 + +# Bind accumulo specifically to localhost +# This can be overridden at runtime to match the service name using DW_CONTAINER_HOST +ENV DW_ACCUMULO_BIND_HOST=localhost + # Build context should be the DataWave source root, minus .git and other dirs. See .dockerignore COPY . /opt/datawave # Install dependencies, configure password-less/zero-prompt SSH... -RUN yum -y install gcc-c++ openssl openssh openssh-server openssh-clients openssl-libs which bc wget git java-11-openjdk-devel iproute && \ - yum clean all && \ +RUN dnf -y install gcc-c++ openssl python3 openssh openssh-server openssh-clients openssl-libs which bc wget git java-11-openjdk-devel iproute && \ + dnf clean all && \ ssh-keygen -q -N "" -t rsa -f ~/.ssh/id_rsa && \ cat ~/.ssh/id_rsa.pub >> ~/.ssh/authorized_keys && \ chmod 0600 ~/.ssh/authorized_keys && \ @@ -34,17 +52,21 @@ WORKDIR /opt/datawave # Create new Git repo and configure environment... -RUN rm -f .dockerignore && \ +RUN rm -f .dockerignore .maven-dockerignore && \ git init && \ git add . && \ git config user.email "root@localhost.local" && \ git config user.name "Root User" && \ git commit -m "Source Branch :: $DATAWAVE_BRANCH_NAME :: Source Commit :: $DATAWAVE_COMMIT_ID" && \ - echo "export DW_ACCUMULO_DIST_URI=file://$(find /opt/datawave/contrib/datawave-quickstart/bin/services/accumulo -name 'accumulo*.tar.gz')" >> ~/.bashrc && \ - echo "export DW_ZOOKEEPER_DIST_URI=file://$(find /opt/datawave/contrib/datawave-quickstart/bin/services/accumulo -name 'apache-zookeeper*.tar.gz')" >> ~/.bashrc && \ - echo "export DW_MAVEN_DIST_URI=file://$(find /opt/datawave/contrib/datawave-quickstart/bin/services/maven -name 'apache-maven*.tar.gz')" >> ~/.bashrc && \ - echo "export DW_HADOOP_DIST_URI=file://$(find /opt/datawave/contrib/datawave-quickstart/bin/services/hadoop -name 'hadoop*.tar.gz')" >> ~/.bashrc && \ - echo "export DW_WILDFLY_DIST_URI=file://$(find /opt/datawave/contrib/datawave-quickstart/bin/services/datawave -name 'wildfly*.tar.gz')" >> ~/.bashrc && \ + echo "export DW_ACCUMULO_DIST_URI=\"$ACCUMULO_URL\"" >> ~/.bashrc && \ + echo "export DW_HADOOP_DIST_URI=\"$HADOOP_URL\"" >> ~/.bashrc && \ + echo "export DW_MAVEN_DIST_URI=\"$MAVEN_URL\"" >> ~/.bashrc && \ + echo "export DW_WILDFLY_DIST_URI=\"$WILDFLY_URL\"" >> ~/.bashrc && \ + echo "export DW_ZOOKEEPER_DIST_URI=\"$ZOOKEEPER_URL\"" >> ~/.bashrc && \ + echo "export DW_DATAWAVE_BUILD_PROFILE=\"$DATAWAVE_BUILD_PROFILE\"" >> ~/.bashrc && \ + echo "export DW_DATAWAVE_INGEST_TEST_SKIP=\"$DATAWAVE_SKIP_INGEST\"" >> ~/.bashrc && \ + echo "export DW_MAVEN_REPOSITORY=\"$DATAWAVE_MAVEN_REPO\"" >> ~/.bashrc && \ + echo "export DW_WGET_OPTS=\"-q --no-check-certificate\"" >> ~/.bashrc && \ echo "export JAVA_HOME=\"$DATAWAVE_JAVA_HOME\"" >> ~/.bashrc && \ echo "export PATH=\$JAVA_HOME/bin:\$PATH" >> ~/.bashrc && \ echo "source /opt/datawave/contrib/datawave-quickstart/bin/env.sh" >> ~/.bashrc @@ -56,8 +78,7 @@ RUN rm -f .dockerignore && \ # stopped gracefully, and any cruft is purged. RUN /bin/bash -c "/usr/bin/nohup /usr/sbin/sshd -D &" && \ - /bin/bash -c "source ~/.bashrc && allInstall && datawaveStart && datawaveWebTest --blacklist-files QueryMetrics && allStop" && \ - echo "0.0.0.0" > contrib/datawave-quickstart/accumulo/conf/monitor && \ + /bin/bash -c "source ~/.bashrc && allInstall && [ $DATAWAVE_SKIP_TESTS == true ] || (datawaveStart && datawaveWebTest --blacklist-files QueryMetrics && allStop)" && \ rm -rf contrib/datawave-quickstart/datawave-ingest/logs/* && \ rm -rf contrib/datawave-quickstart/hadoop/logs/* && \ rm -rf contrib/datawave-quickstart/accumulo/logs/* && \ @@ -77,7 +98,7 @@ VOLUME ["/opt/datawave/contrib/datawave-quickstart/accumulo/logs"] VOLUME ["/opt/datawave/contrib/datawave-quickstart/wildfly/standalone/log"] VOLUME ["/opt/datawave/contrib/datawave-quickstart/datawave-ingest/logs"] -EXPOSE 8443 9995 50070 8088 9000 2181 +EXPOSE 8443 9995 9870 8088 9000 2181 WORKDIR /opt/datawave/contrib/datawave-quickstart diff --git a/contrib/datawave-quickstart/docker/datawave-bootstrap.sh b/contrib/datawave-quickstart/docker/datawave-bootstrap.sh index 73a9f7710b5..5cb868b5aff 100755 --- a/contrib/datawave-quickstart/docker/datawave-bootstrap.sh +++ b/contrib/datawave-quickstart/docker/datawave-bootstrap.sh @@ -2,9 +2,22 @@ source ~/.bashrc +# If DW_CONTAINER_HOST is defined update Accumulo and Hadoop bind hosts +if [ ! -z "${DW_CONTAINER_HOST}" ] && [ "${DW_CONTAINER_HOST}" != "localhost" ] ; then + # Update Accumulo bind hosts + sed -i'' -e "s/localhost/${DW_CONTAINER_HOST}/g" ${ACCUMULO_HOME}/conf/cluster.yaml + + # Create hadoop client configs + mkdir -p ${HADOOP_HOME}/client/conf + cp -r ${HADOOP_CONF_DIR}/*-site.xml ${HADOOP_HOME}/client/conf + sed -i'' -e "s/${DW_BIND_HOST}/${DW_CONTAINER_HOST}/g" ${HADOOP_HOME}/client/conf/*-site.xml +fi + START_AS_DAEMON=true +START_ACCUMULO=false START_WEB=false +START_TEST=false START_INGEST=false for arg in "$@" @@ -13,6 +26,9 @@ do --bash) START_AS_DAEMON=false ;; + --accumulo) + START_ACCUMULO=true + ;; --web) START_WEB=true ;; @@ -34,6 +50,8 @@ done [ "${START_INGEST}" == true ] && datawaveIngestStart +[ "${START_ACCUMULO}" == true ] && accumuloStart + [ "${START_WEB}" == true ] && datawaveWebStart [ "${START_WEB_DEBUG}" == true ] && datawaveWebStart --debug diff --git a/contrib/datawave-quickstart/docker/docker-build.sh b/contrib/datawave-quickstart/docker/docker-build.sh index 87ed6357db1..21066907340 100755 --- a/contrib/datawave-quickstart/docker/docker-build.sh +++ b/contrib/datawave-quickstart/docker/docker-build.sh @@ -187,10 +187,11 @@ function buildDockerImage() { info "Building Docker image: ${IMAGE_NAME}" - docker build ${docker_opts} -f ${THIS_DIR}/Dockerfile -t ${IMAGE_NAME} \ + docker build -m 8g ${docker_opts} -f ${THIS_DIR}/Dockerfile -t ${IMAGE_NAME} \ --build-arg DATAWAVE_COMMIT_ID=$( git rev-parse --verify HEAD ) \ --build-arg DATAWAVE_BRANCH_NAME=$( git rev-parse --abbrev-ref HEAD ) \ --build-arg DATAWAVE_JAVA_HOME="${DW_JAVA_HOME_OVERRIDE}" \ + --build-arg DATAWAVE_BUILD_PROFILE="${DW_DATAWAVE_BUILD_PROFILE}" \ ${DATAWAVE_SOURCE_DIR} || fatal "Docker image creation for DataWave Quickstart failed" } diff --git a/contrib/datawave-quickstart/docker/docker-run.sh b/contrib/datawave-quickstart/docker/docker-run.sh index 7bfbf3c2a9e..2fef63f0f9b 100755 --- a/contrib/datawave-quickstart/docker/docker-run.sh +++ b/contrib/datawave-quickstart/docker/docker-run.sh @@ -58,7 +58,7 @@ VOLUMES="-v ${DATA}:${V_DATA} -v ${M2REPO}:${V_M2REPO} -v ${HLOGS}:${V_HLOGS} -v # Set port mapping -PORTS="-p 8443:8443 -p 50070:50070 -p 9995:9995" +PORTS="-p 8443:8443 -p 9870:9870 -p 9995:9995" # Interpret any remaining args as the CMD to pass in diff --git a/contrib/datawave-quickstart/docker/pom.xml b/contrib/datawave-quickstart/docker/pom.xml new file mode 100644 index 00000000000..de19a23c2d4 --- /dev/null +++ b/contrib/datawave-quickstart/docker/pom.xml @@ -0,0 +1,503 @@ + + + 4.0.0 + + gov.nsa.datawave + datawave-parent + 7.13.0-SNAPSHOT + ../../../pom.xml + + quickstart + pom + ${project.artifactId} + + accumulo-${version.accumulo.tar.gz}-bin.tar.gz + hadoop-${version.hadoop.tar.gz}.tar.gz + apache-maven-${version.maven.tar.gz}-bin.tar.gz + wildfly-${version.wildfly.tar.gz}.Final.tar.gz + apache-zookeeper-${version.zookeeper.tar.gz}-bin.tar.gz + false + https://dlcdn.apache.org/accumulo/${version.accumulo.tar.gz}/${dist.accumulo} + https://dlcdn.apache.org/hadoop/common/hadoop-${version.hadoop.tar.gz}/${dist.hadoop} + https://dlcdn.apache.org/maven/maven-3/${version.maven.tar.gz}/binaries/${dist.maven} + https://download.jboss.org/wildfly/${version.wildfly.tar.gz}.Final/${dist.wildfly} + https://dlcdn.apache.org/zookeeper/zookeeper-${version.zookeeper.tar.gz}/${dist.zookeeper} + + 2.1.3 + 3.3.6 + 3.8.8 + 17.0.1 + 3.7.2 + + + + gov.nsa.datawave + assemble-datawave + ${project.version} + pom + + + gov.nsa.datawave + assemble-datawave + ${project.version} + dist + tar.gz + + + gov.nsa.datawave.webservices + datawave-ws-deploy-application + ${project.version} + pom + + + gov.nsa.datawave.webservices + datawave-ws-deploy-application + ${project.version} + ${build.env} + tar.gz + + + + + + maven-clean-plugin + 3.2.0 + + + + ${project.basedir}/../../../ + + .maven-dockerignore + + + + ../bin/services/accumulo + + **/apache-zookeeper-*-bin.tar.gz + **/accumulo-*-bin.tar.gz + **/zookeeper-*.tar.gz + **/accumulo-*.tar.gz + + + + ../bin/services/datawave + + **/datawave-*-dist.tar.gz + **/datawave-ws-deploy-application-*.tar.gz + **/wildfly-*.tar.gz + + + + ../bin/services/hadoop + + **/hadoop-*.tar.gz + + + + ../bin/services/maven + + **/apache-maven-*-bin.tar.gz + **/maven-*.tar.gz + + + + + + + maven-dependency-plugin + + + copy-datawave-warehouse + + copy + + process-resources + + gov.nsa.datawave:assemble-datawave:${project.version}:tar.gz:dist + ${project.basedir}/../bin/services/datawave + + + + copy-datawave-webservice + + copy + + process-resources + + gov.nsa.datawave.webservices:datawave-ws-deploy-application:${project.version}:tar.gz:${build.env} + ${project.basedir}/../bin/services/datawave + + + + + + + maven-antrun-plugin + + + rename-datawave-warehouse + + run + + process-resources + + + + + + + + + + + + + + docker + + + quickstart-docker + + + + + + io.fabric8 + docker-maven-plugin + 0.40.1 + + true + + + ${docker.image.prefix}datawave/quickstart-${build.env} + + + 8589934592 + + none + ${project.basedir}/../../../ + ${project.basedir}/Dockerfile + + latest + ${project.version} + + + ${git.commit.id} + ${git.branch} + ${build.env} + ${skipIngest} + true + ${url.accumulo} + ${url.hadoop} + ${url.maven} + ${url.wildfly} + ${url.zookeeper} + + + + + + + + build + + build + + install + + + push + + push + + deploy + + + + + maven-resources-plugin + + + copy-docker-ignore + + copy-resources + + process-resources + + ${project.basedir}/../../../ + + + ${project.basedir} + + .maven-dockerignore + + + + + + + + + pl.project13.maven + git-commit-id-plugin + + false + + + + + + + quickstart-default + + + !quickstart-maven + + + + + + + com.googlecode.maven-download-plugin + download-maven-plugin + 1.6.8 + + + download-zookeeper + + wget + + + ${url.zookeeper} + ${project.build.outputDirectory} + + + + download-accumulo + + wget + + + ${url.accumulo} + ${project.build.outputDirectory} + + + + download-hadoop + + wget + + + ${url.hadoop} + ${project.build.outputDirectory} + + + + download-maven + + wget + + + ${url.maven} + ${project.build.outputDirectory} + + + + download-wildfly + + wget + + + ${url.wildfly} + ${project.build.outputDirectory} + + + + + + maven-resources-plugin + + + copy-accumulo-tarballs + + copy-resources + + process-resources + + ${project.basedir}/../bin/services/accumulo + + + ${project.build.outputDirectory} + + ${dist.zookeeper} + ${dist.accumulo} + + + + + + + copy-hadoop-tarballs + + copy-resources + + process-resources + + ${project.basedir}/../bin/services/hadoop + + + ${project.build.outputDirectory} + + ${dist.hadoop} + + + + + + + copy-maven-tarballs + + copy-resources + + process-resources + + ${project.basedir}/../bin/services/maven + + + ${project.build.outputDirectory} + + ${dist.maven} + + + + + + + copy-wildfly-tarball + + copy-resources + + process-resources + + ${project.basedir}/../bin/services/datawave + + + ${project.build.outputDirectory} + + ${dist.wildfly} + + + + + + + + + + + + quickstart-maven + + + quickstart-maven + + + + + gov.nsa.datawave.quickstart + accumulo + ${version.accumulo.tar.gz} + tar.gz + + + gov.nsa.datawave.quickstart + hadoop + ${version.hadoop.tar.gz} + tar.gz + + + gov.nsa.datawave.quickstart + maven + ${version.maven.tar.gz} + tar.gz + + + gov.nsa.datawave.quickstart + wildfly + ${version.wildfly.tar.gz} + tar.gz + + + gov.nsa.datawave.quickstart + zookeeper + ${version.zookeeper.tar.gz} + tar.gz + + + + + + maven-dependency-plugin + + + copy-accumulo-tarball + + copy + + process-resources + + gov.nsa.datawave.quickstart:accumulo:${version.accumulo.tar.gz}:tar.gz + ${project.basedir}/../bin/services/accumulo + + + + copy-hadoop-tarball + + copy + + process-resources + + gov.nsa.datawave.quickstart:hadoop:${version.hadoop.tar.gz}:tar.gz + ${project.basedir}/../bin/services/hadoop + + + + copy-maven-tarball + + copy + + process-resources + + gov.nsa.datawave.quickstart:maven:${version.maven.tar.gz}:tar.gz + ${project.basedir}/../bin/services/maven + + + + copy-wildfly-tarball + + copy + + process-resources + + gov.nsa.datawave.quickstart:wildfly:${version.wildfly.tar.gz}:tar.gz + ${project.basedir}/../bin/services/datawave + + + + copy-zookeeper-tarball + + copy + + process-resources + + gov.nsa.datawave.quickstart:zookeeper:${version.zookeeper.tar.gz}:tar.gz + ${project.basedir}/../bin/services/accumulo + + + + + + + + + diff --git a/contrib/datawave-utils b/contrib/datawave-utils new file mode 160000 index 00000000000..4348fc36a35 --- /dev/null +++ b/contrib/datawave-utils @@ -0,0 +1 @@ +Subproject commit 4348fc36a3519ca9e5d1b96ac47c3f0b64abe34b diff --git a/core/base-rest-responses b/core/base-rest-responses index f09536681ee..cb0f550615e 160000 --- a/core/base-rest-responses +++ b/core/base-rest-responses @@ -1 +1 @@ -Subproject commit f09536681eeefe27b0f3e5840966656b04a4e476 +Subproject commit cb0f550615ecd1d9fe1db7b17bb671db861301ff diff --git a/core/cached-results/pom.xml b/core/cached-results/pom.xml new file mode 100644 index 00000000000..9c5dbd86f17 --- /dev/null +++ b/core/cached-results/pom.xml @@ -0,0 +1,48 @@ + + + 4.0.0 + + gov.nsa.datawave.core + datawave-core-parent + 7.13.0-SNAPSHOT + + datawave-core-cached-results + ${project.artifactId} + + + + + gov.nsa.datawave.microservice + base-rest-responses + + + gov.nsa.datawave.microservice + query-api + + + jakarta.validation + jakarta.validation-api + + + + + gov.nsa.datawave.microservice + type-utils + + + gov.nsa.datawave.webservices + datawave-ws-client + ${project.version} + + + org.slf4j + * + + + log4j + log4j + + + + + diff --git a/web-services/query/src/main/java/datawave/webservice/query/cachedresults/CacheableQueryRowImpl.java b/core/cached-results/src/main/java/datawave/core/query/cachedresults/CacheableQueryRowImpl.java similarity index 99% rename from web-services/query/src/main/java/datawave/webservice/query/cachedresults/CacheableQueryRowImpl.java rename to core/cached-results/src/main/java/datawave/core/query/cachedresults/CacheableQueryRowImpl.java index 7fa25b368f3..ab4673fb274 100644 --- a/web-services/query/src/main/java/datawave/webservice/query/cachedresults/CacheableQueryRowImpl.java +++ b/core/cached-results/src/main/java/datawave/core/query/cachedresults/CacheableQueryRowImpl.java @@ -1,4 +1,4 @@ -package datawave.webservice.query.cachedresults; +package datawave.core.query.cachedresults; import java.util.ArrayList; import java.util.Collection; @@ -20,6 +20,7 @@ import datawave.data.type.Type; import datawave.marking.MarkingFunctions; import datawave.marking.MarkingFunctionsFactory; +import datawave.webservice.query.cachedresults.CacheableQueryRow; import datawave.webservice.query.data.ObjectSizeOf; import datawave.webservice.query.util.TypedValue; @@ -44,8 +45,6 @@ public class CacheableQueryRowImpl extends CacheableQueryRow implements ObjectSi private String queryOrigin = null; private String resultOrigin = null; - private static final MarkingFunctions markingFunctions = MarkingFunctionsFactory.createMarkingFunctions(); - public void addColumn(String columnName, String columnValueString, Map markings, String columnVisibility, Long timestamp) { addColumn(columnName, new TypedValue(columnValueString), markings, columnVisibility, timestamp); } diff --git a/web-services/query/src/main/java/datawave/webservice/query/cachedresults/CacheableQueryRowReader.java b/core/cached-results/src/main/java/datawave/core/query/cachedresults/CacheableQueryRowReader.java similarity index 95% rename from web-services/query/src/main/java/datawave/webservice/query/cachedresults/CacheableQueryRowReader.java rename to core/cached-results/src/main/java/datawave/core/query/cachedresults/CacheableQueryRowReader.java index 7b13334b102..ddcbf921264 100644 --- a/web-services/query/src/main/java/datawave/webservice/query/cachedresults/CacheableQueryRowReader.java +++ b/core/cached-results/src/main/java/datawave/core/query/cachedresults/CacheableQueryRowReader.java @@ -1,5 +1,6 @@ -package datawave.webservice.query.cachedresults; +package datawave.core.query.cachedresults; +import java.sql.ResultSet; import java.sql.ResultSetMetaData; import java.sql.SQLException; import java.util.HashMap; @@ -8,20 +9,21 @@ import java.util.Set; import java.util.TreeSet; -import javax.sql.rowset.CachedRowSet; - import org.apache.log4j.Logger; import datawave.marking.MarkingFunctions; +import datawave.webservice.query.cachedresults.CacheableQueryRow; import datawave.webservice.query.result.event.ResponseObjectFactory; public class CacheableQueryRowReader { private static Logger log = Logger.getLogger(CacheableQueryRowReader.class); - public static CacheableQueryRow createRow(CachedRowSet cachedRowSet, Set fixedFieldsInEvent, ResponseObjectFactory responseObjectFactory) { + public static CacheableQueryRow createRow(ResultSet cachedRowSet, Set fixedFieldsInEvent, ResponseObjectFactory responseObjectFactory, + MarkingFunctions markingFunctions) { CacheableQueryRow cqfc = responseObjectFactory.getCacheableQueryRow(); + cqfc.setMarkingFunctions(markingFunctions); ResultSetMetaData metadata; try { diff --git a/web-services/cached-results/src/main/java/datawave/webservice/results/cached/CachedResultsParameters.java b/core/cached-results/src/main/java/datawave/core/query/cachedresults/CachedResultsQueryParameters.java similarity index 96% rename from web-services/cached-results/src/main/java/datawave/webservice/results/cached/CachedResultsParameters.java rename to core/cached-results/src/main/java/datawave/core/query/cachedresults/CachedResultsQueryParameters.java index 3c5cc6cef89..59d12f3f225 100644 --- a/web-services/cached-results/src/main/java/datawave/webservice/results/cached/CachedResultsParameters.java +++ b/core/cached-results/src/main/java/datawave/core/query/cachedresults/CachedResultsQueryParameters.java @@ -1,4 +1,4 @@ -package datawave.webservice.results.cached; +package datawave.core.query.cachedresults; import java.util.Arrays; import java.util.List; @@ -7,11 +7,10 @@ import com.google.common.base.Preconditions; +import datawave.microservice.query.QueryParameters; import datawave.validation.ParameterValidator; -import datawave.webservice.query.QueryParameters; - -public class CachedResultsParameters implements ParameterValidator { +public class CachedResultsQueryParameters implements ParameterValidator { public static final String QUERY_ID = "queryId"; public static final String ALIAS = "alias"; public static final String VIEW = "view"; diff --git a/core/common-util/pom.xml b/core/common-util/pom.xml new file mode 100644 index 00000000000..f7ee5e356f3 --- /dev/null +++ b/core/common-util/pom.xml @@ -0,0 +1,121 @@ + + + 4.0.0 + + gov.nsa.datawave.core + datawave-core-parent + 7.13.0-SNAPSHOT + + datawave-core-common-util + ${project.artifactId} + + + + + + + gov.nsa.datawave.core + datawave-core-connection-pool + ${project.version} + + + gov.nsa.datawave.microservice + accumulo-utils + + + gov.nsa.datawave.microservice + authorization-api + + + org.apache.accumulo + accumulo-core + provided + + + javax.servlet + servlet-api + + + + + + + + true + src/main/resources + + source-templates/** + + + + test-classes + true + src/test/resources + + + + + org.apache.maven.plugins + maven-jar-plugin + + + META-INF/beans.xml + META-INF/jboss-ejb3.xml + + + + + jboss + + jar + + + jboss + + + + + + + maven-resources-plugin + + + copy-templated-sources + + copy-resources + + validate + + ${project.build.directory}/generated-sources/templated-sources + + + src/main/resources/source-templates + true + + + + + + + + org.codehaus.mojo + build-helper-maven-plugin + 3.3.0 + + + add-source + + add-source + + generate-sources + + + target/generated-sources/templated-sources + + + + + + + + diff --git a/web-services/common-util/src/main/java/datawave/webservice/common/logging/ThreadConfigurableLogger.java b/core/common-util/src/main/java/datawave/core/common/logging/ThreadConfigurableLogger.java similarity index 99% rename from web-services/common-util/src/main/java/datawave/webservice/common/logging/ThreadConfigurableLogger.java rename to core/common-util/src/main/java/datawave/core/common/logging/ThreadConfigurableLogger.java index 0e65d99c07f..64a3fbe64e1 100644 --- a/web-services/common-util/src/main/java/datawave/webservice/common/logging/ThreadConfigurableLogger.java +++ b/core/common-util/src/main/java/datawave/core/common/logging/ThreadConfigurableLogger.java @@ -1,4 +1,4 @@ -package datawave.webservice.common.logging; +package datawave.core.common.logging; import java.util.HashMap; import java.util.Map; diff --git a/web-services/common-util/src/main/java/datawave/webservice/util/EnvProvider.java b/core/common-util/src/main/java/datawave/core/common/util/EnvProvider.java similarity index 91% rename from web-services/common-util/src/main/java/datawave/webservice/util/EnvProvider.java rename to core/common-util/src/main/java/datawave/core/common/util/EnvProvider.java index 99d2e40b6b8..2b08e99c545 100644 --- a/web-services/common-util/src/main/java/datawave/webservice/util/EnvProvider.java +++ b/core/common-util/src/main/java/datawave/core/common/util/EnvProvider.java @@ -1,9 +1,9 @@ -package datawave.webservice.util; +package datawave.core.common.util; import org.apache.commons.lang3.StringUtils; import org.apache.log4j.Logger; -import datawave.webservice.common.logging.ThreadConfigurableLogger; +import datawave.core.common.logging.ThreadConfigurableLogger; public class EnvProvider { diff --git a/core/common-util/src/main/java/datawave/core/query/cache/ResultsPage.java b/core/common-util/src/main/java/datawave/core/query/cache/ResultsPage.java new file mode 100644 index 00000000000..3107497359f --- /dev/null +++ b/core/common-util/src/main/java/datawave/core/query/cache/ResultsPage.java @@ -0,0 +1,45 @@ +package datawave.core.query.cache; + +import java.util.ArrayList; +import java.util.List; + +/** + * + */ +public class ResultsPage { + public enum Status { + NONE, PARTIAL, COMPLETE + } + + private List results; + private Status status; + + public ResultsPage() { + this(new ArrayList<>()); + } + + public ResultsPage(List results) { + this(results, Status.COMPLETE); + } + + public ResultsPage(List results, Status status) { + this.results = results; + this.status = status; + } + + public Status getStatus() { + return status; + } + + public void setStatus(Status status) { + this.status = status; + } + + public List getResults() { + return results; + } + + public void setResults(List results) { + this.results = results; + } +} diff --git a/web-services/common-util/src/main/java/datawave/security/authorization/DatawavePrincipal.java b/core/common-util/src/main/java/datawave/security/authorization/DatawavePrincipal.java similarity index 97% rename from web-services/common-util/src/main/java/datawave/security/authorization/DatawavePrincipal.java rename to core/common-util/src/main/java/datawave/security/authorization/DatawavePrincipal.java index 8e798e79be6..1c43eb931da 100644 --- a/web-services/common-util/src/main/java/datawave/security/authorization/DatawavePrincipal.java +++ b/core/common-util/src/main/java/datawave/security/authorization/DatawavePrincipal.java @@ -17,7 +17,7 @@ import javax.xml.bind.annotation.XmlType; import datawave.security.authorization.DatawaveUser.UserType; -import datawave.security.util.DnUtils; +import datawave.security.util.ProxiedEntityUtils; /** * A {@link Principal} that represents a set of proxied {@link DatawaveUser}s. For example, this proxied user could represent a GUI server acting on behalf of a @@ -108,14 +108,17 @@ static protected List orderProxiedUsers(List datawav return users; } + @Override public Collection getProxiedUsers() { return Collections.unmodifiableCollection(this.proxiedUsers); } + @Override public DatawaveUser getPrimaryUser() { return primaryUser; } + @Override public Collection> getAuthorizations() { // @formatter:off return Collections.unmodifiableCollection( @@ -125,6 +128,7 @@ public Collection> getAuthorizations() { // @formatter:on } + @Override public String[] getDNs() { // @formatter:off return DatawavePrincipal.orderProxiedUsers(this.proxiedUsers).stream() @@ -144,14 +148,16 @@ public String getName() { return this.username; } + @Override public String getShortName() { - return DnUtils.getShortName(getPrimaryUser().getName()); + return ProxiedEntityUtils.getShortName(getPrimaryUser().getName()); } public SubjectIssuerDNPair getUserDN() { return getPrimaryUser().getDn(); } + @Override public List getProxyServers() { // @formatter:off diff --git a/core/common-util/src/main/resources/META-INF/beans.xml b/core/common-util/src/main/resources/META-INF/beans.xml new file mode 100644 index 00000000000..4ca201f8ff2 --- /dev/null +++ b/core/common-util/src/main/resources/META-INF/beans.xml @@ -0,0 +1,9 @@ + + + + \ No newline at end of file diff --git a/core/common-util/src/main/resources/META-INF/jboss-ejb3.xml b/core/common-util/src/main/resources/META-INF/jboss-ejb3.xml new file mode 100644 index 00000000000..8cf49db8c87 --- /dev/null +++ b/core/common-util/src/main/resources/META-INF/jboss-ejb3.xml @@ -0,0 +1,16 @@ + + + + + + + * + datawave + + + + \ No newline at end of file diff --git a/web-services/common-util/src/main/resources/source-templates/datawave/security/authorization/package-info.java b/core/common-util/src/main/resources/source-templates/datawave/security/authorization/package-info.java similarity index 99% rename from web-services/common-util/src/main/resources/source-templates/datawave/security/authorization/package-info.java rename to core/common-util/src/main/resources/source-templates/datawave/security/authorization/package-info.java index 7eb19db2923..6cc9445d7c2 100644 --- a/web-services/common-util/src/main/resources/source-templates/datawave/security/authorization/package-info.java +++ b/core/common-util/src/main/resources/source-templates/datawave/security/authorization/package-info.java @@ -4,4 +4,3 @@ import javax.xml.bind.annotation.XmlNs; import javax.xml.bind.annotation.XmlNsForm; import javax.xml.bind.annotation.XmlSchema; - diff --git a/web-services/common-util/src/test/java/datawave/webservice/util/EnvProviderTest.java b/core/common-util/src/test/java/datawave/core/common/util/EnvProviderTest.java similarity index 95% rename from web-services/common-util/src/test/java/datawave/webservice/util/EnvProviderTest.java rename to core/common-util/src/test/java/datawave/core/common/util/EnvProviderTest.java index bab9edeeda9..b792f0721e2 100644 --- a/web-services/common-util/src/test/java/datawave/webservice/util/EnvProviderTest.java +++ b/core/common-util/src/test/java/datawave/core/common/util/EnvProviderTest.java @@ -1,4 +1,4 @@ -package datawave.webservice.util; +package datawave.core.common.util; import static org.junit.Assert.assertEquals; import static org.junit.Assert.assertNull; diff --git a/core/common/pom.xml b/core/common/pom.xml new file mode 100644 index 00000000000..77522fd9a10 --- /dev/null +++ b/core/common/pom.xml @@ -0,0 +1,79 @@ + + + 4.0.0 + + gov.nsa.datawave.core + datawave-core-parent + 7.13.0-SNAPSHOT + + datawave-core-common + ${project.artifactId} + + + + + org.apache.accumulo + accumulo-core + ${version.accumulo} + + + org.slf4j + * + + + commons-logging + commons-logging + + + junit + junit + + + log4j + log4j + + + javax.servlet + servlet-api + + + + + org.springframework + spring-core + ${version.spring} + + + + + + gov.nsa.datawave.webservices + datawave-ws-client + ${project.version} + + + org.apache.accumulo + accumulo-core + + + org.springframework + spring-core + provided + + + junit + junit + test + + + org.junit.jupiter + junit-jupiter-engine + test + + + org.junit.vintage + junit-vintage-engine + test + + + diff --git a/web-services/common/src/main/java/datawave/webservice/common/audit/PrivateAuditConstants.java b/core/common/src/main/java/datawave/core/common/audit/PrivateAuditConstants.java similarity index 80% rename from web-services/common/src/main/java/datawave/webservice/common/audit/PrivateAuditConstants.java rename to core/common/src/main/java/datawave/core/common/audit/PrivateAuditConstants.java index c11f969b166..1cffc3e0177 100644 --- a/web-services/common/src/main/java/datawave/webservice/common/audit/PrivateAuditConstants.java +++ b/core/common/src/main/java/datawave/core/common/audit/PrivateAuditConstants.java @@ -1,6 +1,7 @@ -package datawave.webservice.common.audit; +package datawave.core.common.audit; -import javax.ws.rs.core.MultivaluedMap; +import java.util.List; +import java.util.Map; /** * Constants marking private parameters that are computed internally and then added at runtime to the incoming query parameters for the purposes of passing them @@ -15,7 +16,7 @@ public class PrivateAuditConstants { public static final String USER_DN = PREFIX + "userDn"; public static final String SELECTORS = PREFIX + "selectors"; - public static void stripPrivateParameters(MultivaluedMap queryParameters) { + public static void stripPrivateParameters(Map> queryParameters) { queryParameters.entrySet().removeIf(entry -> entry.getKey().startsWith(PREFIX)); } } diff --git a/core/common/src/main/java/datawave/core/common/edgedictionary/EdgeDictionaryProvider.java b/core/common/src/main/java/datawave/core/common/edgedictionary/EdgeDictionaryProvider.java new file mode 100644 index 00000000000..b473fbb8051 --- /dev/null +++ b/core/common/src/main/java/datawave/core/common/edgedictionary/EdgeDictionaryProvider.java @@ -0,0 +1,9 @@ +package datawave.core.common.edgedictionary; + +import datawave.microservice.query.Query; +import datawave.webservice.dictionary.edge.EdgeDictionaryBase; +import datawave.webservice.dictionary.edge.MetadataBase; + +public interface EdgeDictionaryProvider { + EdgeDictionaryBase> getEdgeDictionary(Query settings, String metadataTableName); +} diff --git a/web-services/common/src/main/java/datawave/webservice/common/extjs/ExtJsResponse.java b/core/common/src/main/java/datawave/core/common/extjs/ExtJsResponse.java similarity index 95% rename from web-services/common/src/main/java/datawave/webservice/common/extjs/ExtJsResponse.java rename to core/common/src/main/java/datawave/core/common/extjs/ExtJsResponse.java index e9760dcc039..d92feee8cbb 100644 --- a/web-services/common/src/main/java/datawave/webservice/common/extjs/ExtJsResponse.java +++ b/core/common/src/main/java/datawave/core/common/extjs/ExtJsResponse.java @@ -1,4 +1,4 @@ -package datawave.webservice.common.extjs; +package datawave.core.common.extjs; import java.util.List; diff --git a/core/connection-pool/pom.xml b/core/connection-pool/pom.xml new file mode 100644 index 00000000000..9c3ed01fa4b --- /dev/null +++ b/core/connection-pool/pom.xml @@ -0,0 +1,161 @@ + + + 4.0.0 + + gov.nsa.datawave.core + datawave-core-parent + 7.13.0-SNAPSHOT + + datawave-core-connection-pool + ${project.artifactId} + + 2.1.8 + + + + + dnsjava + dnsjava + ${version.dnsjava} + + + + + + com.fasterxml.jackson.module + jackson-module-jaxb-annotations + + + commons-configuration + commons-configuration + + + commons-lang + commons-lang + + + dnsjava + dnsjava + + + gov.nsa.datawave + datawave-in-memory-accumulo + + + gov.nsa.datawave.microservice + accumulo-api + + + junit + junit + + + org.apache.curator + curator-recipes + + + org.apache.curator + curator-test + + + org.easymock + easymock + + + com.fasterxml.woodstox + woodstox-core + provided + + + org.apache.commons + commons-configuration2 + provided + + + org.apache.hadoop.thirdparty + hadoop-shaded-guava + provided + + + org.powermock + powermock-reflect + test + + + + ${project.artifactId} + + + true + src/main/resources + + source-templates/** + + + + + + org.apache.maven.plugins + maven-jar-plugin + + + META-INF/beans.xml + META-INF/jboss-ejb3.xml + + + + + jboss + + jar + + + jboss + + + + + + + maven-resources-plugin + + + copy-templated-sources + + copy-resources + + validate + + ${project.build.directory}/generated-sources/templated-sources + + + src/main/resources/source-templates + true + + + + + + + + org.codehaus.mojo + build-helper-maven-plugin + 3.3.0 + + + add-source + + add-source + + generate-sources + + + target/generated-sources/templated-sources + + + + + + + + diff --git a/core/connection-pool/src/main/java/datawave/core/common/cache/AccumuloTableCache.java b/core/connection-pool/src/main/java/datawave/core/common/cache/AccumuloTableCache.java new file mode 100644 index 00000000000..674dfe3183c --- /dev/null +++ b/core/connection-pool/src/main/java/datawave/core/common/cache/AccumuloTableCache.java @@ -0,0 +1,28 @@ +package datawave.core.common.cache; + +import java.util.List; + +import org.apache.accumulo.core.client.security.tokens.PasswordToken; + +import datawave.accumulo.inmemory.InMemoryInstance; +import datawave.core.common.connection.AccumuloConnectionFactory; +import datawave.core.common.result.TableCacheDescription; + +/** + * Object that caches data from Accumulo tables. + */ +public interface AccumuloTableCache extends AutoCloseable { + + String MOCK_USERNAME = ""; + PasswordToken MOCK_PASSWORD = new PasswordToken(new byte[0]); + + void setConnectionFactory(AccumuloConnectionFactory connectionFactory); + + InMemoryInstance getInstance(); + + void submitReloadTasks(); + + public void reloadTableCache(String tableName); + + public List getTableCaches(); +} diff --git a/web-services/common/src/main/java/datawave/webservice/common/cache/AccumuloTableCache.java b/core/connection-pool/src/main/java/datawave/core/common/cache/AccumuloTableCacheImpl.java similarity index 50% rename from web-services/common/src/main/java/datawave/webservice/common/cache/AccumuloTableCache.java rename to core/connection-pool/src/main/java/datawave/core/common/cache/AccumuloTableCacheImpl.java index 9ffe3cf48b8..582e72c4e17 100644 --- a/web-services/common/src/main/java/datawave/webservice/common/cache/AccumuloTableCache.java +++ b/core/connection-pool/src/main/java/datawave/core/common/cache/AccumuloTableCacheImpl.java @@ -1,4 +1,4 @@ -package datawave.webservice.common.cache; +package datawave.core.common.cache; import java.util.ArrayList; import java.util.Date; @@ -6,125 +6,78 @@ import java.util.List; import java.util.Map; import java.util.Map.Entry; +import java.util.concurrent.ExecutorService; import java.util.concurrent.Future; +import java.util.concurrent.LinkedBlockingDeque; +import java.util.concurrent.ThreadPoolExecutor; +import java.util.concurrent.TimeUnit; -import javax.annotation.PostConstruct; -import javax.annotation.PreDestroy; -import javax.annotation.Resource; -import javax.annotation.security.DeclareRoles; -import javax.annotation.security.RolesAllowed; -import javax.annotation.security.RunAs; -import javax.ejb.EJBException; -import javax.ejb.LocalBean; -import javax.ejb.Lock; -import javax.ejb.LockType; -import javax.ejb.Schedule; -import javax.ejb.Singleton; -import javax.ejb.Startup; -import javax.enterprise.concurrent.ManagedExecutorService; -import javax.inject.Inject; -import javax.interceptor.Interceptors; -import javax.jms.Destination; -import javax.jms.JMSContext; -import javax.ws.rs.GET; -import javax.ws.rs.Path; -import javax.ws.rs.PathParam; -import javax.ws.rs.Produces; - -import org.apache.accumulo.core.client.security.tokens.PasswordToken; import org.apache.curator.framework.CuratorFramework; import org.apache.curator.framework.recipes.shared.SharedCountListener; import org.apache.curator.framework.recipes.shared.SharedCountReader; import org.apache.curator.framework.state.ConnectionState; -import org.apache.deltaspike.core.api.config.ConfigProperty; -import org.apache.deltaspike.core.api.exclude.Exclude; import org.apache.log4j.Logger; -import org.jboss.resteasy.annotations.GZIP; + +import com.google.common.util.concurrent.ThreadFactoryBuilder; import datawave.accumulo.inmemory.InMemoryInstance; -import datawave.annotation.Required; -import datawave.configuration.DatawaveEmbeddedProjectStageHolder; -import datawave.interceptor.RequiredInterceptor; -import datawave.webservice.common.connection.AccumuloConnectionFactory; -import datawave.webservice.common.exception.DatawaveWebApplicationException; -import datawave.webservice.common.result.AccumuloTableCacheStatus; -import datawave.webservice.query.exception.QueryException; -import datawave.webservice.result.VoidResponse; +import datawave.core.common.connection.AccumuloConnectionFactory; +import datawave.core.common.result.TableCacheDescription; /** * Object that caches data from Accumulo tables. */ -@Path("/Common/AccumuloTableCache") -@RunAs("InternalUser") -@RolesAllowed({"AuthorizedUser", "AuthorizedQueryServer", "AuthorizedServer", "InternalUser", "Administrator", "JBossAdministrator"}) -@DeclareRoles({"AuthorizedUser", "AuthorizedQueryServer", "AuthorizedServer", "InternalUser", "Administrator", "JBossAdministrator"}) -@LocalBean -@Startup -// tells the container to initialize on startup -@Singleton -// this is a singleton bean in the container -@Lock(LockType.READ) -@Exclude(ifProjectStage = DatawaveEmbeddedProjectStageHolder.DatawaveEmbedded.class) -public class AccumuloTableCache { - +public class AccumuloTableCacheImpl implements AccumuloTableCache { private final Logger log = Logger.getLogger(this.getClass()); - @Inject - private JMSContext jmsContext; - - @Resource(mappedName = "java:/topic/AccumuloTableCache") - private Destination cacheTopic; - - @Resource - private ManagedExecutorService executorService; - - @Inject - private AccumuloTableCacheConfiguration accumuloTableCacheConfiguration; - - @Inject - @ConfigProperty(name = "dw.cacheCoordinator.evictionReaperIntervalSeconds", defaultValue = "30") - private int evictionReaperIntervalInSeconds; - @Inject - @ConfigProperty(name = "dw.cacheCoordinator.numLocks", defaultValue = "300") - private int numLocks; - @Inject - @ConfigProperty(name = "dw.cacheCoordinator.maxRetries", defaultValue = "10") - private int maxRetries; - - public static final String MOCK_USERNAME = ""; - public static final PasswordToken MOCK_PASSWORD = new PasswordToken(new byte[0]); - + private final ExecutorService executorService; + private final AccumuloTableCacheProperties accumuloTableCacheProperties; private InMemoryInstance instance; private Map details; private List cacheCoordinators; private boolean connectionFactoryProvided = false; - public AccumuloTableCache() { - log.debug("Called AccumuloTableCacheBean and accumuloTableCacheConfiguration = " + accumuloTableCacheConfiguration); + public AccumuloTableCacheImpl(ExecutorService executorService, AccumuloTableCacheProperties accumuloTableCacheProperties) { + log.debug("Called AccumuloTableCacheImpl with accumuloTableCacheConfiguration = " + accumuloTableCacheProperties); + this.executorService = executorService; + this.accumuloTableCacheProperties = accumuloTableCacheProperties; + setup(); + } + + public AccumuloTableCacheImpl(AccumuloTableCacheProperties accumuloTableCacheProperties) { + this(getThreadPoolExecutor(accumuloTableCacheProperties), accumuloTableCacheProperties); } - @PostConstruct - private void setup() { - log.debug("accumuloTableCacheConfiguration was setup as: " + accumuloTableCacheConfiguration); + private static ExecutorService getThreadPoolExecutor(AccumuloTableCacheProperties accumuloTableCacheProperties) { + return new ThreadPoolExecutor(Math.max(accumuloTableCacheProperties.getTableNames().size() / 2, 1), + Math.max(accumuloTableCacheProperties.getTableNames().size(), 1), 5, TimeUnit.MINUTES, new LinkedBlockingDeque<>(), + new ThreadFactoryBuilder().setNameFormat("TableCacheReloader %d").build()); + } + public void setup() { + log.debug("accumuloTableCacheConfiguration was setup as: " + accumuloTableCacheProperties); instance = new InMemoryInstance(); details = new HashMap<>(); cacheCoordinators = new ArrayList<>(); - String zookeepers = accumuloTableCacheConfiguration.getZookeepers(); + String zookeepers = accumuloTableCacheProperties.getZookeepers(); + + for (String tableName : accumuloTableCacheProperties.getTableNames()) { + BaseTableCache detail = new BaseTableCache(); + detail.setTableName(tableName); + detail.setConnectionPoolName(accumuloTableCacheProperties.getPoolName()); + detail.setReloadInterval(accumuloTableCacheProperties.getReloadInterval()); - for (Entry entry : accumuloTableCacheConfiguration.getCaches().entrySet()) { - final String tableName = entry.getKey(); - TableCache detail = entry.getValue(); detail.setInstance(instance); - final SharedCacheCoordinator cacheCoordinator = new SharedCacheCoordinator(tableName, zookeepers, evictionReaperIntervalInSeconds, numLocks, - maxRetries); + final SharedCacheCoordinator cacheCoordinator = new SharedCacheCoordinator(tableName, zookeepers, + accumuloTableCacheProperties.getEvictionReaperIntervalInSeconds(), accumuloTableCacheProperties.getNumLocks(), + accumuloTableCacheProperties.getMaxRetries()); cacheCoordinators.add(cacheCoordinator); try { cacheCoordinator.start(); } catch (Exception e) { - throw new EJBException("Error starting AccumuloTableCache", e); + throw new RuntimeException("Error starting AccumuloTableCache", e); } try { @@ -165,24 +118,24 @@ public void countHasChanged(SharedCountReader sharedCount, int newCount) throws throw new RuntimeException("table:" + tableName + " Unable to create shared counters: " + e.getMessage(), e); } detail.setWatcher(cacheCoordinator); - details.put(entry.getKey(), entry.getValue()); - + details.put(tableName, detail); } } public void setConnectionFactory(AccumuloConnectionFactory connectionFactory) { - for (Entry entry : accumuloTableCacheConfiguration.getCaches().entrySet()) { + for (Entry entry : details.entrySet()) { TableCache detail = entry.getValue(); detail.setConnectionFactory(connectionFactory); } connectionFactoryProvided = true; } + @Override public InMemoryInstance getInstance() { return this.instance; } - @Schedule(hour = "*", minute = "*", second = "1", persistent = false) + @Override public void submitReloadTasks() { if (!connectionFactoryProvided) { log.trace("NOT submitting reload tasks since our connection factory hasn't been provided yet."); @@ -217,8 +170,8 @@ public void submitReloadTasks() { } } - @PreDestroy - public void stop() { + @Override + public void close() { for (Entry entry : details.entrySet()) { Future ref = entry.getValue().getReference(); if (null != ref) @@ -230,47 +183,25 @@ public void stop() { } /** - * JBossAdministrator or Administrator credentials required. + * Reload a table cache * * @param tableName * the name of the table for which the cached version is to be reloaded - * @return datawave.webservice.result.VoidResponse - * @RequestHeader X-ProxiedEntitiesChain use when proxying request for user - * @RequestHeader X-ProxiedIssuersChain required when using X-ProxiedEntitiesChain, specify one issuer DN per subject DN listed in X-ProxiedEntitiesChain - * @RequestHeader query-session-id session id value used for load balancing purposes. query-session-id can be placed in the request in a Cookie header or as - * a query parameter - * @ResponseHeader X-OperationTimeInMS time spent on the server performing the operation, does not account for network or result serialization - * - * @HTTP 200 success - * @HTTP 404 queries not found using {@code id} - * @HTTP 500 internal server error */ - @GET - @Path("/reload/{tableName}") - @Produces({"application/xml", "text/xml", "application/json", "text/yaml", "text/x-yaml", "application/x-yaml", "application/x-protobuf", - "application/x-protostuff"}) - @GZIP - @Interceptors(RequiredInterceptor.class) - public VoidResponse reloadCache(@Required("tableName") @PathParam("tableName") String tableName) { - VoidResponse response = new VoidResponse(); + @Override + public void reloadTableCache(String tableName) { if (null == details.get(tableName)) { - return response; + return; } + log.debug("Reloading table cache for " + tableName); // send an eviction notice to the cluster try { details.get(tableName).getWatcher().incrementCounter(tableName); } catch (Exception e) { - response.addException(new QueryException(e).getBottomQueryException()); - throw new DatawaveWebApplicationException(e, response); - } - try { - this.sendCacheReloadMessage(tableName); - } catch (Exception e) { - log.error("Unable to send message about cache reload"); + throw new RuntimeException(e); } handleReload(tableName); handleReloadTypeMetadata(tableName); - return response; } private void handleReloadTypeMetadata(String tableName) { @@ -291,26 +222,13 @@ private void handleReload(String tableName) { } /** - * JBossAdministrator or Administrator credentials required. - * - * @return datawave.webservice.common.result.AccumuloTableCacheStatus - * @RequestHeader X-ProxiedEntitiesChain use when proxying request for user - * @RequestHeader X-ProxiedIssuersChain required when using X-ProxiedEntitiesChain, specify one issuer DN per subject DN listed in X-ProxiedEntitiesChain - * @RequestHeader query-session-id session id value used for load balancing purposes. query-session-id can be placed in the request in a Cookie header or as - * a query parameter - * @ResponseHeader X-OperationTimeInMS time spent on the server performing the operation, does not account for network or result serialization - * - * @HTTP 200 success + * Get the table caches */ - @GET - @Path("/") - @Produces({"application/xml", "text/xml", "application/json", "text/yaml", "text/x-yaml", "application/x-yaml", "application/x-protobuf", - "application/x-protostuff", "text/html"}) - @GZIP - public AccumuloTableCacheStatus getStatus() { - AccumuloTableCacheStatus response = new AccumuloTableCacheStatus(); + @Override + public List getTableCaches() { + List tableCaches = new ArrayList<>(); for (Entry entry : details.entrySet()) { - datawave.webservice.common.result.TableCache t = new datawave.webservice.common.result.TableCache(); + TableCacheDescription t = new TableCacheDescription(); t.setTableName(entry.getValue().getTableName()); t.setConnectionPoolName(entry.getValue().getConnectionPoolName()); t.setAuthorizations(entry.getValue().getAuths()); @@ -318,14 +236,8 @@ public AccumuloTableCacheStatus getStatus() { t.setMaxRows(entry.getValue().getMaxRows()); t.setLastRefresh(entry.getValue().getLastRefresh()); t.setCurrentlyRefreshing((entry.getValue().getReference() != null)); - response.getCaches().add(t); + tableCaches.add(t); } - return response; - } - - private void sendCacheReloadMessage(String tableName) { - log.warn("table:" + tableName + " sending cache reload message about table " + tableName); - - jmsContext.createProducer().send(cacheTopic, tableName); + return tableCaches; } } diff --git a/core/connection-pool/src/main/java/datawave/core/common/cache/AccumuloTableCacheProperties.java b/core/connection-pool/src/main/java/datawave/core/common/cache/AccumuloTableCacheProperties.java new file mode 100644 index 00000000000..3be5b90b666 --- /dev/null +++ b/core/connection-pool/src/main/java/datawave/core/common/cache/AccumuloTableCacheProperties.java @@ -0,0 +1,129 @@ +package datawave.core.common.cache; + +import java.util.ArrayList; +import java.util.List; +import java.util.concurrent.TimeUnit; + +public class AccumuloTableCacheProperties { + private String zookeepers = null; + private List tableNames = new ArrayList<>(); + private String poolName; + private long reloadInterval; + private int evictionReaperIntervalInSeconds; + private int numLocks; + private int maxRetries; + private long tableCacheReloadTaskLease = TimeUnit.MINUTES.toMillis(10); + private TimeUnit tableCacheReloadTaskLeaseTimeUnit = TimeUnit.MILLISECONDS; + + public String getZookeepers() { + return zookeepers; + } + + public AccumuloTableCacheProperties withZookeepers(String zookeepers) { + this.zookeepers = zookeepers; + return this; + } + + public List getTableNames() { + return tableNames; + } + + public AccumuloTableCacheProperties withTableNames(List tableNames) { + this.tableNames = tableNames; + return this; + } + + public String getPoolName() { + return poolName; + } + + public AccumuloTableCacheProperties withPoolName(String poolName) { + this.poolName = poolName; + return this; + } + + public long getReloadInterval() { + return reloadInterval; + } + + public AccumuloTableCacheProperties withReloadInterval(long reloadInterval) { + this.reloadInterval = reloadInterval; + return this; + } + + public int getEvictionReaperIntervalInSeconds() { + return evictionReaperIntervalInSeconds; + } + + public AccumuloTableCacheProperties withEvictionReaperIntervalInSeconds(int evictionReaperIntervalInSeconds) { + this.evictionReaperIntervalInSeconds = evictionReaperIntervalInSeconds; + return this; + } + + public int getNumLocks() { + return numLocks; + } + + public AccumuloTableCacheProperties withNumLocks(int numLocks) { + this.numLocks = numLocks; + return this; + } + + public int getMaxRetries() { + return maxRetries; + } + + public AccumuloTableCacheProperties withMaxRetries(int maxRetries) { + this.maxRetries = maxRetries; + return this; + } + + public void setZookeepers(String zookeepers) { + this.zookeepers = zookeepers; + } + + public void setTableNames(List tableNames) { + this.tableNames = tableNames; + } + + public void setPoolName(String poolName) { + this.poolName = poolName; + } + + public void setReloadInterval(long reloadInterval) { + this.reloadInterval = reloadInterval; + } + + public void setEvictionReaperIntervalInSeconds(int evictionReaperIntervalInSeconds) { + this.evictionReaperIntervalInSeconds = evictionReaperIntervalInSeconds; + } + + public void setNumLocks(int numLocks) { + this.numLocks = numLocks; + } + + public void setMaxRetries(int maxRetries) { + this.maxRetries = maxRetries; + } + + public long getTableCacheReloadTaskLease() { + return tableCacheReloadTaskLease; + } + + public void setTableCacheReloadTaskLease(long tableCacheReloadTaskLease) { + this.tableCacheReloadTaskLease = tableCacheReloadTaskLease; + } + + public long getTableCacheReloadTaskLeaseMillis() { + return tableCacheReloadTaskLeaseTimeUnit.toMillis(tableCacheReloadTaskLease); + } + + public TimeUnit getTableCacheReloadTaskLeaseTimeUnit() { + return tableCacheReloadTaskLeaseTimeUnit; + } + + public void setTableCacheReloadTaskLeaseTimeUnit(TimeUnit tableCacheReloadTaskLeaseTimeUnit) { + this.tableCacheReloadTaskLeaseTimeUnit = tableCacheReloadTaskLeaseTimeUnit; + } + +} diff --git a/web-services/common/src/main/java/datawave/webservice/common/cache/BaseTableCache.java b/core/connection-pool/src/main/java/datawave/core/common/cache/BaseTableCache.java similarity index 96% rename from web-services/common/src/main/java/datawave/webservice/common/cache/BaseTableCache.java rename to core/connection-pool/src/main/java/datawave/core/common/cache/BaseTableCache.java index 2f90cfa77ce..e662b70f4fb 100644 --- a/web-services/common/src/main/java/datawave/webservice/common/cache/BaseTableCache.java +++ b/core/connection-pool/src/main/java/datawave/core/common/cache/BaseTableCache.java @@ -1,4 +1,4 @@ -package datawave.webservice.common.cache; +package datawave.core.common.cache; import java.io.Serializable; import java.util.Date; @@ -33,8 +33,7 @@ import datawave.accumulo.inmemory.InMemoryAccumuloClient; import datawave.accumulo.inmemory.InMemoryInstance; -import datawave.webservice.common.connection.AccumuloConnectionFactory; -import datawave.webservice.common.connection.AccumuloConnectionFactory.Priority; +import datawave.core.common.connection.AccumuloConnectionFactory; import datawave.webservice.common.connection.WrappedAccumuloClient; public class BaseTableCache implements Serializable, TableCache { @@ -171,7 +170,7 @@ public Boolean call() throws Exception { String tempTableName = tableName + "Temp"; try { Map map = connectionFactory.getTrackingMap(Thread.currentThread().getStackTrace()); - accumuloClient = connectionFactory.getClient(connectionPoolName, Priority.ADMIN, map); + accumuloClient = connectionFactory.getClient(null, null, connectionPoolName, AccumuloConnectionFactory.Priority.ADMIN, map); if (accumuloClient instanceof WrappedAccumuloClient) { accumuloClient = ((WrappedAccumuloClient) accumuloClient).getReal(); } diff --git a/web-services/common/src/main/java/datawave/webservice/common/cache/SharedBoolean.java b/core/connection-pool/src/main/java/datawave/core/common/cache/SharedBoolean.java similarity index 98% rename from web-services/common/src/main/java/datawave/webservice/common/cache/SharedBoolean.java rename to core/connection-pool/src/main/java/datawave/core/common/cache/SharedBoolean.java index 1e6bd7b9644..ec2965f76a4 100644 --- a/web-services/common/src/main/java/datawave/webservice/common/cache/SharedBoolean.java +++ b/core/connection-pool/src/main/java/datawave/core/common/cache/SharedBoolean.java @@ -3,7 +3,7 @@ * To change this template file, choose Tools | Templates * and open the template in the editor. */ -package datawave.webservice.common.cache; +package datawave.core.common.cache; import java.io.Closeable; import java.io.IOException; diff --git a/web-services/common/src/main/java/datawave/webservice/common/cache/SharedBooleanListener.java b/core/connection-pool/src/main/java/datawave/core/common/cache/SharedBooleanListener.java similarity index 90% rename from web-services/common/src/main/java/datawave/webservice/common/cache/SharedBooleanListener.java rename to core/connection-pool/src/main/java/datawave/core/common/cache/SharedBooleanListener.java index fa20bd7228b..e9300b7072b 100644 --- a/web-services/common/src/main/java/datawave/webservice/common/cache/SharedBooleanListener.java +++ b/core/connection-pool/src/main/java/datawave/core/common/cache/SharedBooleanListener.java @@ -3,7 +3,7 @@ * To change this template file, choose Tools | Templates * and open the template in the editor. */ -package datawave.webservice.common.cache; +package datawave.core.common.cache; import org.apache.curator.framework.state.ConnectionStateListener; diff --git a/web-services/common/src/main/java/datawave/webservice/common/cache/SharedBooleanReader.java b/core/connection-pool/src/main/java/datawave/core/common/cache/SharedBooleanReader.java similarity index 89% rename from web-services/common/src/main/java/datawave/webservice/common/cache/SharedBooleanReader.java rename to core/connection-pool/src/main/java/datawave/core/common/cache/SharedBooleanReader.java index 6703b03e73b..31721333f42 100644 --- a/web-services/common/src/main/java/datawave/webservice/common/cache/SharedBooleanReader.java +++ b/core/connection-pool/src/main/java/datawave/core/common/cache/SharedBooleanReader.java @@ -3,7 +3,7 @@ * To change this template file, choose Tools | Templates * and open the template in the editor. */ -package datawave.webservice.common.cache; +package datawave.core.common.cache; import org.apache.curator.framework.listen.Listenable; diff --git a/web-services/common/src/main/java/datawave/webservice/common/cache/SharedCacheCoordinator.java b/core/connection-pool/src/main/java/datawave/core/common/cache/SharedCacheCoordinator.java similarity index 97% rename from web-services/common/src/main/java/datawave/webservice/common/cache/SharedCacheCoordinator.java rename to core/connection-pool/src/main/java/datawave/core/common/cache/SharedCacheCoordinator.java index 363da12ef16..ac322004e26 100644 --- a/web-services/common/src/main/java/datawave/webservice/common/cache/SharedCacheCoordinator.java +++ b/core/connection-pool/src/main/java/datawave/core/common/cache/SharedCacheCoordinator.java @@ -1,4 +1,4 @@ -package datawave.webservice.common.cache; +package datawave.core.common.cache; import java.io.IOException; import java.io.Serializable; @@ -11,10 +11,6 @@ import java.util.Timer; import java.util.TimerTask; -import javax.annotation.PostConstruct; -import javax.annotation.PreDestroy; -import javax.inject.Inject; - import org.apache.curator.framework.CuratorFramework; import org.apache.curator.framework.CuratorFrameworkFactory; import org.apache.curator.framework.recipes.cache.ChildData; @@ -31,17 +27,14 @@ import org.apache.curator.framework.state.ConnectionStateListener; import org.apache.curator.retry.BoundedExponentialBackoffRetry; import org.apache.curator.utils.ZKPaths; -import org.apache.deltaspike.core.api.config.ConfigProperty; +import org.apache.log4j.Logger; import org.apache.zookeeper.CreateMode; import org.apache.zookeeper.KeeperException; import org.apache.zookeeper.ZKUtil; import org.apache.zookeeper.data.Stat; -import org.jboss.logging.Logger; import com.google.common.base.Preconditions; -import datawave.common.util.ArgumentChecker; - /** * Coordinates operations on a shared cache. That is, this coordinates operations where an in-memory cache may be running on multiple servers and each in-memory * cache is using a shared backing store (e.g., shared filesystem, Accumulo, etc). There are helper methods to handle distributed locking, notification of @@ -58,7 +51,7 @@ public interface EvictionCallback { private static final String LIVE_SERVERS = "/liveServers"; private static final long EVICT_MESSAGE_TIMEOUT = 60 * 1000L; - private Logger log = Logger.getLogger(getClass()); + private static final Logger log = Logger.getLogger(SharedCacheCoordinator.class); private transient CuratorFramework curatorClient; private String localName; private String serverIdentifierPath; @@ -98,12 +91,7 @@ public interface EvictionCallback { * @param numLocks * number of locks */ - @Inject - public SharedCacheCoordinator(@ConfigProperty(name = "dw.cache.coordinator.namespace") String namespace, - @ConfigProperty(name = "dw.warehouse.zookeepers") String zookeeperConnectionString, - @ConfigProperty(name = "dw.cacheCoordinator.evictionReaperIntervalSeconds", defaultValue = "30") int evictionReaperIntervalInSeconds, - @ConfigProperty(name = "dw.cacheCoordinator.numLocks", defaultValue = "300") int numLocks, - @ConfigProperty(name = "dw.cacheCoordinator.maxRetries", defaultValue = "10") int maxRetries) { + public SharedCacheCoordinator(String namespace, String zookeeperConnectionString, int evictionReaperIntervalInSeconds, int numLocks, int maxRetries) { ArgumentChecker.notNull(namespace, zookeeperConnectionString); locks = new HashMap<>(); @@ -130,7 +118,6 @@ public SharedCacheCoordinator(@ConfigProperty(name = "dw.cache.coordinator.names evictionReaper = new Timer("cache-eviction-reaper-" + namespace, true); } - @PostConstruct public void start() { curatorClient.start(); @@ -284,7 +271,6 @@ private void restartTriStates() { } } - @PreDestroy public void stop() { evictionReaper.cancel(); @@ -748,4 +734,18 @@ protected void reapEvictions() { log.warn("Error cleaning up eviction notices: " + e.getMessage(), e); } } + + public static class ArgumentChecker { + private static final String NULL_ARG_MSG = "argument was null"; + + public static final void notNull(final Object arg1) { + if (arg1 == null) + throw new IllegalArgumentException(NULL_ARG_MSG + ":Is null- arg1? true"); + } + + public static final void notNull(final Object arg1, final Object arg2) { + if (arg1 == null || arg2 == null) + throw new IllegalArgumentException(NULL_ARG_MSG + ":Is null- arg1? " + (arg1 == null) + " arg2? " + (arg2 == null)); + } + } } diff --git a/web-services/common/src/main/java/datawave/webservice/common/cache/SharedTriState.java b/core/connection-pool/src/main/java/datawave/core/common/cache/SharedTriState.java similarity index 99% rename from web-services/common/src/main/java/datawave/webservice/common/cache/SharedTriState.java rename to core/connection-pool/src/main/java/datawave/core/common/cache/SharedTriState.java index f1aa73e7f36..e98be8527ef 100644 --- a/web-services/common/src/main/java/datawave/webservice/common/cache/SharedTriState.java +++ b/core/connection-pool/src/main/java/datawave/core/common/cache/SharedTriState.java @@ -1,4 +1,4 @@ -package datawave.webservice.common.cache; +package datawave.core.common.cache; import java.io.Closeable; import java.io.IOException; diff --git a/web-services/common/src/main/java/datawave/webservice/common/cache/SharedTriStateListener.java b/core/connection-pool/src/main/java/datawave/core/common/cache/SharedTriStateListener.java similarity index 85% rename from web-services/common/src/main/java/datawave/webservice/common/cache/SharedTriStateListener.java rename to core/connection-pool/src/main/java/datawave/core/common/cache/SharedTriStateListener.java index 78ec9012cc7..363277c4db4 100644 --- a/web-services/common/src/main/java/datawave/webservice/common/cache/SharedTriStateListener.java +++ b/core/connection-pool/src/main/java/datawave/core/common/cache/SharedTriStateListener.java @@ -1,4 +1,4 @@ -package datawave.webservice.common.cache; +package datawave.core.common.cache; import org.apache.curator.framework.state.ConnectionStateListener; diff --git a/web-services/common/src/main/java/datawave/webservice/common/cache/SharedTriStateReader.java b/core/connection-pool/src/main/java/datawave/core/common/cache/SharedTriStateReader.java similarity index 82% rename from web-services/common/src/main/java/datawave/webservice/common/cache/SharedTriStateReader.java rename to core/connection-pool/src/main/java/datawave/core/common/cache/SharedTriStateReader.java index 02101aeb2f0..019e28af88d 100644 --- a/web-services/common/src/main/java/datawave/webservice/common/cache/SharedTriStateReader.java +++ b/core/connection-pool/src/main/java/datawave/core/common/cache/SharedTriStateReader.java @@ -1,4 +1,4 @@ -package datawave.webservice.common.cache; +package datawave.core.common.cache; import org.apache.curator.framework.listen.Listenable; diff --git a/web-services/common/src/main/java/datawave/webservice/common/cache/TableCache.java b/core/connection-pool/src/main/java/datawave/core/common/cache/TableCache.java similarity index 90% rename from web-services/common/src/main/java/datawave/webservice/common/cache/TableCache.java rename to core/connection-pool/src/main/java/datawave/core/common/cache/TableCache.java index 808f50726ef..5296146c1d5 100644 --- a/web-services/common/src/main/java/datawave/webservice/common/cache/TableCache.java +++ b/core/connection-pool/src/main/java/datawave/core/common/cache/TableCache.java @@ -1,4 +1,4 @@ -package datawave.webservice.common.cache; +package datawave.core.common.cache; import java.io.Serializable; import java.util.Date; @@ -6,7 +6,7 @@ import java.util.concurrent.Future; import datawave.accumulo.inmemory.InMemoryInstance; -import datawave.webservice.common.connection.AccumuloConnectionFactory; +import datawave.core.common.connection.AccumuloConnectionFactory; public interface TableCache extends Callable, Serializable { diff --git a/web-services/common/src/main/java/datawave/webservice/common/connection/AccumuloClientPool.java b/core/connection-pool/src/main/java/datawave/core/common/connection/AccumuloClientPool.java similarity index 88% rename from web-services/common/src/main/java/datawave/webservice/common/connection/AccumuloClientPool.java rename to core/connection-pool/src/main/java/datawave/core/common/connection/AccumuloClientPool.java index 978880fd932..2032b783f19 100644 --- a/web-services/common/src/main/java/datawave/webservice/common/connection/AccumuloClientPool.java +++ b/core/connection-pool/src/main/java/datawave/core/common/connection/AccumuloClientPool.java @@ -1,4 +1,4 @@ -package datawave.webservice.common.connection; +package datawave.core.common.connection; import java.util.ArrayList; import java.util.Arrays; @@ -34,9 +34,9 @@ public AccumuloClient borrowObject(Map trackingMap) throws Except Long threadId = Thread.currentThread().getId(); AccumuloClient o; try { - trackingMap.put("connection.state.start", Long.valueOf(System.currentTimeMillis()).toString()); - trackingMap.put("state", AccumuloConnectionFactory.State.WAITING.toString()); - trackingMap.put("thread.name", Thread.currentThread().getName()); + trackingMap.put(AccumuloConnectionFactory.START_TIME, Long.valueOf(System.currentTimeMillis()).toString()); + trackingMap.put(AccumuloConnectionFactory.STATE, AccumuloConnectionFactory.State.WAITING.toString()); + trackingMap.put(AccumuloConnectionFactory.THREAD_NAME, Thread.currentThread().getName()); threadToTrackingMapMap.put(threadId, trackingMap); o = super.borrowObject(); log.debug(System.currentTimeMillis() + " thread: " + threadId + " borrowed connector: " + o); @@ -47,8 +47,8 @@ public AccumuloClient borrowObject(Map trackingMap) throws Except // connection being moved from the threadToTrackingMapMap to the connectorToTrackingMapMap if (o != null) { - trackingMap.put("connection.state.start", Long.valueOf(System.currentTimeMillis()).toString()); - trackingMap.put("state", AccumuloConnectionFactory.State.CONNECTED.toString()); + trackingMap.put(AccumuloConnectionFactory.START_TIME, Long.valueOf(System.currentTimeMillis()).toString()); + trackingMap.put(AccumuloConnectionFactory.STATE, AccumuloConnectionFactory.State.CONNECTED.toString()); connectorToTrackingMapMap.put(o, trackingMap); } diff --git a/web-services/common/src/main/java/datawave/webservice/common/connection/AccumuloClientPoolFactory.java b/core/connection-pool/src/main/java/datawave/core/common/connection/AccumuloClientPoolFactory.java similarity index 97% rename from web-services/common/src/main/java/datawave/webservice/common/connection/AccumuloClientPoolFactory.java rename to core/connection-pool/src/main/java/datawave/core/common/connection/AccumuloClientPoolFactory.java index 0b7b246b3b2..bbc192ee193 100644 --- a/web-services/common/src/main/java/datawave/webservice/common/connection/AccumuloClientPoolFactory.java +++ b/core/connection-pool/src/main/java/datawave/core/common/connection/AccumuloClientPoolFactory.java @@ -1,4 +1,4 @@ -package datawave.webservice.common.connection; +package datawave.core.common.connection; import org.apache.accumulo.core.client.Accumulo; import org.apache.accumulo.core.client.AccumuloClient; diff --git a/web-services/common/src/main/java/datawave/webservice/common/connection/AccumuloConnectionFactory.java b/core/connection-pool/src/main/java/datawave/core/common/connection/AccumuloConnectionFactory.java similarity index 52% rename from web-services/common/src/main/java/datawave/webservice/common/connection/AccumuloConnectionFactory.java rename to core/connection-pool/src/main/java/datawave/core/common/connection/AccumuloConnectionFactory.java index 66a652baba9..80d67e6184d 100644 --- a/web-services/common/src/main/java/datawave/webservice/common/connection/AccumuloConnectionFactory.java +++ b/core/connection-pool/src/main/java/datawave/core/common/connection/AccumuloConnectionFactory.java @@ -1,11 +1,26 @@ -package datawave.webservice.common.connection; +package datawave.core.common.connection; +import java.util.Collection; +import java.util.List; import java.util.Map; import org.apache.accumulo.core.client.AccumuloClient; import org.apache.accumulo.core.clientImpl.ClientContext; -public interface AccumuloConnectionFactory { +import datawave.core.common.result.ConnectionPool; +import datawave.webservice.common.connection.WrappedAccumuloClient; + +public interface AccumuloConnectionFactory extends AutoCloseable { + + String USER_DN = "user.dn"; + String PROXY_SERVERS = "proxyServers"; + String REQUEST_LOCATION = "request.location"; + String START_TIME = "connection.state.start"; + String STATE = "state"; + String THREAD_NAME = "thread.name"; + String QUERY_USER = "query.user"; + String QUERY_ID = "query.id"; + String QUERY = "query.query"; enum Priority { @@ -17,17 +32,10 @@ enum State { WAITING, CONNECTED } - /** - * @param poolName - * the name of the pool to query - * @return name of the user used in the connection pools - */ - String getConnectionUserName(String poolName); - /** * Gets a connection from the pool with the assigned priority * - * Deprecated in 2.2.3, use {@link #getClient(Priority, Map)} + * Deprecated in 2.2.3, use {@link #getClient(String, Collection, String, Priority, Map)} * * @param priority * the connection's Priority @@ -35,9 +43,9 @@ enum State { * the tracking map * @return accumulo connection * @throws Exception - * if there are issues + * on failure */ - AccumuloClient getClient(Priority priority, Map trackingMap) throws Exception; + AccumuloClient getClient(String userDN, Collection proxyServers, Priority priority, Map trackingMap) throws Exception; /** * Gets a connection from the named pool with the assigned priority @@ -50,20 +58,47 @@ enum State { * the tracking map * @return Accumulo connection * @throws Exception - * if there are issues + * on failure */ - AccumuloClient getClient(String poolName, Priority priority, Map trackingMap) throws Exception; + AccumuloClient getClient(String userDN, Collection proxyServers, String poolName, Priority priority, Map trackingMap) + throws Exception; /** * Returns the connection to the pool with the associated priority. * * @param client - * The client to return + * The connection to return * @throws Exception - * if there are issues + * on failure */ void returnClient(AccumuloClient client) throws Exception; + /** + * Return a report of the current connection factory usage + */ + String report(); + + /** + * Get a description of the current pools + * + * @return A list of connection pools + */ + List getConnectionPools(); + + /** + * Get the current connection usage percentage. This can be used for balancing purposes. + * + * @return The usage percentage (0 - 100) + */ + int getConnectionUsagePercent(); + + /** + * Get a tracking map to be used in the getConnection calls + * + * @param stackTrace + * The callers stack trace + * @return A map representation + */ Map getTrackingMap(StackTraceElement[] stackTrace); /** diff --git a/core/connection-pool/src/main/java/datawave/core/common/connection/AccumuloConnectionFactoryImpl.java b/core/connection-pool/src/main/java/datawave/core/common/connection/AccumuloConnectionFactoryImpl.java new file mode 100644 index 00000000000..8c77dd34688 --- /dev/null +++ b/core/connection-pool/src/main/java/datawave/core/common/connection/AccumuloConnectionFactoryImpl.java @@ -0,0 +1,381 @@ +package datawave.core.common.connection; + +import java.text.SimpleDateFormat; +import java.util.ArrayList; +import java.util.Collection; +import java.util.Collections; +import java.util.Date; +import java.util.HashMap; +import java.util.HashSet; +import java.util.List; +import java.util.Map; +import java.util.Map.Entry; +import java.util.Set; + +import org.apache.accumulo.core.client.AccumuloClient; +import org.apache.accumulo.core.client.admin.SecurityOperations; +import org.apache.accumulo.core.client.security.tokens.PasswordToken; +import org.apache.accumulo.core.util.Pair; +import org.apache.commons.lang.StringUtils; +import org.apache.commons.lang.mutable.MutableInt; +import org.apache.log4j.Logger; + +import datawave.accumulo.inmemory.InMemoryAccumuloClient; +import datawave.core.common.cache.AccumuloTableCache; +import datawave.core.common.result.Connection; +import datawave.core.common.result.ConnectionPool; +import datawave.core.common.result.ConnectionPoolProperties; +import datawave.core.common.result.ConnectionPoolsProperties; +import datawave.webservice.common.connection.WrappedAccumuloClient; + +/** + * An accumulo connection factory + */ +public class AccumuloConnectionFactoryImpl implements AccumuloConnectionFactory { + + private Logger log = Logger.getLogger(this.getClass()); + + private final AccumuloTableCache cache; + private final ConnectionPoolsProperties connectionPoolsConfiguration; + + private Map> pools; + + private String defaultPoolName = null; + + private static AccumuloConnectionFactoryImpl factory = null; + + public static AccumuloConnectionFactory getInstance(AccumuloTableCache cache, ConnectionPoolsProperties config) { + if (factory == null) { + synchronized (AccumuloConnectionFactoryImpl.class) { + if (factory == null) { + setFactory(new AccumuloConnectionFactoryImpl(cache, config)); + } + } + } + return factory; + } + + private AccumuloConnectionFactoryImpl(AccumuloTableCache cache, ConnectionPoolsProperties config) { + this.cache = cache; + this.connectionPoolsConfiguration = config; + log.info("Initializing AccumuloConnectionFactoryImpl with " + config.getDefaultPool() + " and " + config.getPoolNames()); + init(); + } + + public void init() { + this.pools = new HashMap<>(); + + if (this.connectionPoolsConfiguration == null) { + log.error("connectionPoolsConfiguration was null - aborting init()"); + return; + } + HashMap> instances = new HashMap<>(); + this.defaultPoolName = connectionPoolsConfiguration.getDefaultPool(); + for (Entry entry : connectionPoolsConfiguration.getPools().entrySet()) { + Map p = new HashMap<>(); + ConnectionPoolProperties conf = entry.getValue(); + p.put(Priority.ADMIN, createConnectionPool(conf, conf.getAdminPriorityPoolSize())); + p.put(Priority.HIGH, createConnectionPool(conf, conf.getHighPriorityPoolSize())); + p.put(Priority.NORMAL, createConnectionPool(conf, conf.getNormalPriorityPoolSize())); + p.put(Priority.LOW, createConnectionPool(conf, conf.getLowPriorityPoolSize())); + this.pools.put(entry.getKey(), Collections.unmodifiableMap(p)); + try { + setupMockAccumuloUser(conf, p.get(AccumuloConnectionFactory.Priority.NORMAL), instances); + } catch (Exception e) { + log.error("Error configuring mock accumulo user for AccumuloConnectionFactoryBean.", e); + } + + // Initialize the distributed tracing system. This needs to be done once at application startup. Since + // it is tied to Accumulo connections, we do it here in this singleton bean. + String appName = "datawave_ws"; + try { + appName = System.getProperty("app", "datawave_ws"); + } catch (SecurityException e) { + log.warn("Unable to retrieve system property \"app\": " + e.getMessage()); + } + } + + cache.setConnectionFactory(this); + } + + private AccumuloClientPool createConnectionPool(ConnectionPoolProperties conf, int limit) { + AccumuloClientPoolFactory factory = new AccumuloClientPoolFactory(conf.getUsername(), conf.getPassword(), conf.getZookeepers(), conf.getInstance()); + AccumuloClientPool pool = new AccumuloClientPool(factory); + pool.setTestOnBorrow(true); + pool.setTestOnReturn(true); + pool.setMaxTotal(limit); + pool.setMaxIdle(-1); + + try { + pool.addObject(); + } catch (Exception e) { + log.error("Error pre-populating connection pool", e); + } + + return pool; + } + + private void setupMockAccumuloUser(ConnectionPoolProperties conf, AccumuloClientPool pool, HashMap> instances) + throws Exception { + AccumuloClient c = null; + try { + c = pool.borrowObject(new HashMap<>()); + + Pair pair = instances.get(cache.getInstance().getInstanceID()); + String user = "root"; + PasswordToken password = new PasswordToken(new byte[0]); + if (pair != null && user.equals(pair.getFirst())) + password = pair.getSecond(); + SecurityOperations security = new InMemoryAccumuloClient(user, cache.getInstance()).securityOperations(); + Set users = security.listLocalUsers(); + if (!users.contains(conf.getUsername())) { + security.createLocalUser(conf.getUsername(), new PasswordToken(conf.getPassword())); + security.changeUserAuthorizations(conf.getUsername(), c.securityOperations().getUserAuthorizations(conf.getUsername())); + } else { + PasswordToken newPassword = new PasswordToken(conf.getPassword()); + // If we're changing root's password, and trying to change then keep track of that. If we have multiple instances + // that specify mismatching passwords, then throw an error. + if (user.equals(conf.getUsername())) { + if (pair != null && !newPassword.equals(pair.getSecond())) + throw new IllegalStateException( + "Invalid AccumuloConnectionFactoryBean configuration--multiple pools are configured with different root passwords!"); + instances.put(cache.getInstance().getInstanceID(), new Pair<>(conf.getUsername(), newPassword)); + } + // match root's password on mock to the password on the actual Accumulo instance + security.changeLocalUserPassword(conf.getUsername(), newPassword); + } + } finally { + pool.returnObject(c); + } + } + + private static void setFactory(AccumuloConnectionFactoryImpl factory) { + AccumuloConnectionFactoryImpl.factory = factory; + } + + @Override + public void close() { + synchronized (AccumuloConnectionFactoryImpl.class) { + setFactory(null); + for (Entry> entry : this.pools.entrySet()) { + for (Entry poolEntry : entry.getValue().entrySet()) { + try { + poolEntry.getValue().close(); + } catch (Exception e) { + log.error("Error closing Accumulo Connection Pool: " + e); + } + } + } + } + } + + /** + * Gets a connection from the pool with the assigned priority + * + * Deprecated in 2.2.3, use getConnection(UserContext context, String poolName, Priority priority, {@code Map trackingMap)} + * + * @param priority + * the connection's Priority + * @return accumulo connection + * @throws Exception + */ + @Override + public AccumuloClient getClient(final String userDN, final Collection proxyServers, Priority priority, Map trackingMap) + throws Exception { + return getClient(userDN, proxyServers, defaultPoolName, priority, trackingMap); + } + + /** + * Gets a connection from the named pool with the assigned priority + * + * @param cpn + * the name of the pool to retrieve the connection from + * @param priority + * the priority of the connection + * @param tm + * the tracking map + * @return Accumulo connection + * @throws Exception + */ + @Override + public AccumuloClient getClient(final String userDN, final Collection proxyServers, final String cpn, final Priority priority, + final Map tm) throws Exception { + final Map trackingMap = (tm != null) ? tm : new HashMap<>(); + final String poolName = (cpn != null) ? cpn : defaultPoolName; + + if (!priority.equals(Priority.ADMIN)) { + if (userDN != null) + trackingMap.put(USER_DN, userDN); + if (proxyServers != null) + trackingMap.put(PROXY_SERVERS, StringUtils.join(proxyServers, " -> ")); + } + log.info("Getting pool from " + poolName + " for priority " + priority); + log.info("Pools = " + pools); + log.info("Pools.get(poolName) = " + pools.get(poolName)); + AccumuloClientPool pool = pools.get(poolName).get(priority); + AccumuloClient c = pool.borrowObject(trackingMap); + AccumuloClient mock = new InMemoryAccumuloClient(pool.getFactory().getUsername(), cache.getInstance()); + WrappedAccumuloClient wrappedAccumuloClient = new WrappedAccumuloClient(c, mock); + if (connectionPoolsConfiguration.getClientConfiguration(poolName) != null) { + wrappedAccumuloClient.setClientConfig(connectionPoolsConfiguration.getClientConfiguration(poolName).getConfiguration()); + } + String classLoaderContext = System.getProperty("dw.accumulo.classLoader.context"); + if (classLoaderContext != null) { + wrappedAccumuloClient.setScannerClassLoaderContext(classLoaderContext); + } + String timeout = System.getProperty("dw.accumulo.scan.batch.timeout.seconds"); + if (timeout != null) { + wrappedAccumuloClient.setScanBatchTimeoutSeconds(Long.parseLong(timeout)); + } + return wrappedAccumuloClient; + } + + /** + * Returns the connection to the pool with the associated priority. + * + * @param client + * The connection to return + * @throws Exception + */ + @Override + public void returnClient(AccumuloClient client) throws Exception { + if (client instanceof WrappedAccumuloClient) { + WrappedAccumuloClient wrappedAccumuloClient = (WrappedAccumuloClient) client; + wrappedAccumuloClient.clearScannerClassLoaderContext(); + client = wrappedAccumuloClient.getReal(); + } + for (Entry> entry : this.pools.entrySet()) { + for (Entry poolEntry : entry.getValue().entrySet()) { + if (poolEntry.getValue().connectorCameFromHere(client)) { + poolEntry.getValue().returnObject(client); + log.info("Returning connection to pool " + entry.getKey() + " for priority " + poolEntry.getKey()); + return; + } + } + } + log.info("returnConnection called with connection that did not come from any AccumuloConnectionPool"); + } + + @Override + public String report() { + StringBuilder buf = new StringBuilder(); + for (Entry> entry : this.pools.entrySet()) { + buf.append("**** ").append(entry.getKey()).append(" ****\n"); + buf.append("ADMIN: ").append(entry.getValue().get(Priority.ADMIN)).append("\n"); + buf.append("HIGH: ").append(entry.getValue().get(Priority.HIGH)).append("\n"); + buf.append("NORMAL: ").append(entry.getValue().get(Priority.NORMAL)).append("\n"); + buf.append("LOW: ").append(entry.getValue().get(Priority.LOW)).append("\n"); + } + + return buf.toString(); + } + + /** + * Returns metrics for the AccumuloConnectionFactory + * + * @return list of ConnectionPool (connection pool metrics) + */ + @Override + public List getConnectionPools() { + ArrayList connectionPools = new ArrayList<>(); + + Set exclude = new HashSet<>(); + exclude.add("connection.state.start"); + exclude.add("state"); + exclude.add("request.location"); + + SimpleDateFormat sdf = new SimpleDateFormat("yyyy-MM-dd HH:mm:ss,SSS"); + + for (Entry> entry : this.pools.entrySet()) { + for (Entry entry2 : entry.getValue().entrySet()) { + String poolName = entry.getKey(); + Priority priority = entry2.getKey(); + AccumuloClientPool p = entry2.getValue(); + + Long now = System.currentTimeMillis(); + MutableInt maxActive = new MutableInt(); + MutableInt numActive = new MutableInt(); + MutableInt maxIdle = new MutableInt(); + MutableInt numIdle = new MutableInt(); + MutableInt numWaiting = new MutableInt(); + // getConnectionPoolStats will collect the tracking maps and maxActive, numActive, maxIdle, numIdle while synchronized + // to ensure consistency between the GenericObjectPool and the tracking maps + List> requestingConnectionsMap = p.getConnectionPoolStats(maxActive, numActive, maxIdle, numIdle, numWaiting); + + ConnectionPool poolInfo = new ConnectionPool(); + poolInfo.setPriority(priority.name()); + poolInfo.setMaxActive(maxActive.toInteger()); + poolInfo.setNumActive(numActive.toInteger()); + poolInfo.setNumWaiting(numWaiting.toInteger()); + poolInfo.setMaxIdle(maxIdle.toInteger()); + poolInfo.setNumIdle(numIdle.toInteger()); + poolInfo.setPoolName(poolName); + + List requestingConnections = new ArrayList<>(); + for (Map m : requestingConnectionsMap) { + Connection c = new Connection(); + String state = m.get("state"); + if (state != null) { + c.setState(state); + } + String requestLocation = m.get("request.location"); + if (requestLocation != null) { + c.setRequestLocation(requestLocation); + } + String stateStart = m.get("connection.state.start"); + if (stateStart != null) { + Long stateStartLong = Long.valueOf(stateStart); + c.setTimeInState((now - stateStartLong)); + Date stateStartDate = new Date(stateStartLong); + c.addProperty("connection.state.start", sdf.format(stateStartDate)); + } + for (Entry e : m.entrySet()) { + if (!exclude.contains(e.getKey())) { + c.addProperty(e.getKey(), e.getValue()); + } + } + requestingConnections.add(c); + } + Collections.sort(requestingConnections); + poolInfo.setConnectionRequests(requestingConnections); + connectionPools.add(poolInfo); + } + } + return connectionPools; + } + + @Override + public int getConnectionUsagePercent() { + double maxPercentage = 0.0; + for (Entry> entry : pools.entrySet()) { + for (Entry poolEntry : entry.getValue().entrySet()) { + // Don't include ADMIN priority connections when computing a usage percentage + if (Priority.ADMIN.equals(poolEntry.getKey())) + continue; + + MutableInt maxActive = new MutableInt(); + MutableInt numActive = new MutableInt(); + MutableInt numWaiting = new MutableInt(); + MutableInt unused = new MutableInt(); + poolEntry.getValue().getConnectionPoolStats(maxActive, numActive, unused, unused, numWaiting); + + double percentage = (numActive.doubleValue() + numWaiting.doubleValue()) / maxActive.doubleValue(); + if (percentage > maxPercentage) { + maxPercentage = percentage; + } + } + } + return (int) (maxPercentage * 100); + } + + @Override + public Map getTrackingMap(StackTraceElement[] stackTrace) { + HashMap trackingMap = new HashMap<>(); + if (stackTrace != null) { + StackTraceElement ste = stackTrace[1]; + trackingMap.put(REQUEST_LOCATION, ste.getClassName() + "." + ste.getMethodName() + ":" + ste.getLineNumber()); + } + + return trackingMap; + } +} diff --git a/web-services/common-util/src/main/java/datawave/webservice/common/result/AccumuloTableCacheStatus.java b/core/connection-pool/src/main/java/datawave/core/common/result/AccumuloTableCacheStatus.java similarity index 92% rename from web-services/common-util/src/main/java/datawave/webservice/common/result/AccumuloTableCacheStatus.java rename to core/connection-pool/src/main/java/datawave/core/common/result/AccumuloTableCacheStatus.java index db7bc955185..ac7a0f52798 100644 --- a/web-services/common-util/src/main/java/datawave/webservice/common/result/AccumuloTableCacheStatus.java +++ b/core/connection-pool/src/main/java/datawave/core/common/result/AccumuloTableCacheStatus.java @@ -1,4 +1,4 @@ -package datawave.webservice.common.result; +package datawave.core.common.result; import java.util.LinkedList; import java.util.List; @@ -24,7 +24,7 @@ public class AccumuloTableCacheStatus extends BaseResponse implements HtmlProvid @XmlElementWrapper(name = "TableCaches") @XmlElement(name = "TableCache") - private List caches = new LinkedList<>(); + private List caches = new LinkedList<>(); @Override public String getTitle() { @@ -57,7 +57,7 @@ public String getMainContent() { builder.append("
"); builder.append(""); builder.append(""); - for (TableCache cache : caches) { + for (TableCacheDescription cache : caches) { builder.append(""); builder.append(""); builder.append(""); @@ -72,7 +72,7 @@ public String getMainContent() { return builder.toString(); } - public List getCaches() { + public List getCaches() { return caches; } } diff --git a/web-services/common-util/src/main/java/datawave/webservice/common/result/Connection.java b/core/connection-pool/src/main/java/datawave/core/common/result/Connection.java similarity index 98% rename from web-services/common-util/src/main/java/datawave/webservice/common/result/Connection.java rename to core/connection-pool/src/main/java/datawave/core/common/result/Connection.java index daf21c74e4b..ad234db4cb1 100644 --- a/web-services/common-util/src/main/java/datawave/webservice/common/result/Connection.java +++ b/core/connection-pool/src/main/java/datawave/core/common/result/Connection.java @@ -1,4 +1,4 @@ -package datawave.webservice.common.result; +package datawave.core.common.result; import java.io.Serializable; import java.util.Set; diff --git a/web-services/common-util/src/main/java/datawave/webservice/common/result/ConnectionFactoryResponse.java b/core/connection-pool/src/main/java/datawave/core/common/result/ConnectionFactoryResponse.java similarity index 99% rename from web-services/common-util/src/main/java/datawave/webservice/common/result/ConnectionFactoryResponse.java rename to core/connection-pool/src/main/java/datawave/core/common/result/ConnectionFactoryResponse.java index 12a518b9000..613d79697f8 100644 --- a/web-services/common-util/src/main/java/datawave/webservice/common/result/ConnectionFactoryResponse.java +++ b/core/connection-pool/src/main/java/datawave/core/common/result/ConnectionFactoryResponse.java @@ -1,4 +1,4 @@ -package datawave.webservice.common.result; +package datawave.core.common.result; import java.text.NumberFormat; import java.util.LinkedList; diff --git a/web-services/common-util/src/main/java/datawave/webservice/common/result/ConnectionPool.java b/core/connection-pool/src/main/java/datawave/core/common/result/ConnectionPool.java similarity index 98% rename from web-services/common-util/src/main/java/datawave/webservice/common/result/ConnectionPool.java rename to core/connection-pool/src/main/java/datawave/core/common/result/ConnectionPool.java index d9b66d0623e..fc453a9ef8d 100644 --- a/web-services/common-util/src/main/java/datawave/webservice/common/result/ConnectionPool.java +++ b/core/connection-pool/src/main/java/datawave/core/common/result/ConnectionPool.java @@ -1,4 +1,4 @@ -package datawave.webservice.common.result; +package datawave.core.common.result; import java.io.Serializable; import java.util.List; diff --git a/core/connection-pool/src/main/java/datawave/core/common/result/ConnectionPoolClientProperties.java b/core/connection-pool/src/main/java/datawave/core/common/result/ConnectionPoolClientProperties.java new file mode 100644 index 00000000000..7bd1a77136d --- /dev/null +++ b/core/connection-pool/src/main/java/datawave/core/common/result/ConnectionPoolClientProperties.java @@ -0,0 +1,25 @@ +package datawave.core.common.result; + +import org.apache.log4j.Logger; + +import datawave.webservice.common.connection.AccumuloClientConfiguration; + +/** + * The configuration for the connection pool clients of the form derived from properties as follows: + * + * dw.{pool}.client.{tableName}.consistency = IMMEDIATE|EVENTUAL dw.{pool}.client.{tableName}.{hintName} = {hintValue} + * + */ +public class ConnectionPoolClientProperties { + + private static final Logger log = Logger.getLogger(ConnectionPoolClientProperties.class); + protected AccumuloClientConfiguration config = new AccumuloClientConfiguration(); + + public AccumuloClientConfiguration getConfiguration() { + return config; + } + + public void setConfiguration(AccumuloClientConfiguration config) { + this.config = config; + } +} diff --git a/core/connection-pool/src/main/java/datawave/core/common/result/ConnectionPoolProperties.java b/core/connection-pool/src/main/java/datawave/core/common/result/ConnectionPoolProperties.java new file mode 100644 index 00000000000..980a0a75af3 --- /dev/null +++ b/core/connection-pool/src/main/java/datawave/core/common/result/ConnectionPoolProperties.java @@ -0,0 +1,77 @@ +package datawave.core.common.result; + +public class ConnectionPoolProperties { + protected String username; + protected String password; + protected String instance; + protected String zookeepers; + protected int lowPriorityPoolSize; + protected int normalPriorityPoolSize; + protected int highPriorityPoolSize; + protected int adminPriorityPoolSize; + + public String getUsername() { + return username; + } + + public String getPassword() { + return password; + } + + public String getInstance() { + return instance; + } + + public String getZookeepers() { + return zookeepers; + } + + public int getLowPriorityPoolSize() { + return lowPriorityPoolSize; + } + + public int getNormalPriorityPoolSize() { + return normalPriorityPoolSize; + } + + public int getHighPriorityPoolSize() { + return highPriorityPoolSize; + } + + public int getAdminPriorityPoolSize() { + return adminPriorityPoolSize; + } + + public void setUsername(String username) { + this.username = username; + } + + public void setPassword(String password) { + this.password = password; + } + + public void setInstance(String instance) { + this.instance = instance; + } + + public void setZookeepers(String zookeepers) { + this.zookeepers = zookeepers; + } + + public void setLowPriorityPoolSize(int lowPriorityPoolSize) { + this.lowPriorityPoolSize = lowPriorityPoolSize; + } + + public void setNormalPriorityPoolSize(int normalPriorityPoolSize) { + this.normalPriorityPoolSize = normalPriorityPoolSize; + } + + public void setHighPriorityPoolSize(int highPriorityPoolSize) { + this.highPriorityPoolSize = highPriorityPoolSize; + } + + public void setAdminPriorityPoolSize(int adminPriorityPoolSize) { + this.adminPriorityPoolSize = adminPriorityPoolSize; + } + +} diff --git a/core/connection-pool/src/main/java/datawave/core/common/result/ConnectionPoolsProperties.java b/core/connection-pool/src/main/java/datawave/core/common/result/ConnectionPoolsProperties.java new file mode 100644 index 00000000000..d57fc39d3d8 --- /dev/null +++ b/core/connection-pool/src/main/java/datawave/core/common/result/ConnectionPoolsProperties.java @@ -0,0 +1,49 @@ +package datawave.core.common.result; + +import java.util.ArrayList; +import java.util.Collections; +import java.util.HashMap; +import java.util.List; +import java.util.Map; + +public class ConnectionPoolsProperties { + protected String defaultPool; + protected Map pools = new HashMap<>(); + protected Map configs = new HashMap<>(); + + public String getDefaultPool() { + return defaultPool; + } + + public Map getPools() { + return Collections.unmodifiableMap(pools); + } + + public ConnectionPoolProperties getConfiguration(String pool) { + return pools.get(pool); + } + + public List getPoolNames() { + return Collections.unmodifiableList(new ArrayList<>(pools.keySet())); + } + + public Map getClientConfiguration() { + return Collections.unmodifiableMap(configs); + } + + public ConnectionPoolClientProperties getClientConfiguration(String pool) { + return configs.get(pool); + } + + public void setDefaultPool(String defaultPool) { + this.defaultPool = defaultPool; + } + + public void setPools(Map pools) { + this.pools = pools; + } + + public void setClientConfiguration(Map configs) { + this.configs = configs; + } +} diff --git a/web-services/common-util/src/main/java/datawave/webservice/common/result/ConnectionProperty.java b/core/connection-pool/src/main/java/datawave/core/common/result/ConnectionProperty.java similarity index 97% rename from web-services/common-util/src/main/java/datawave/webservice/common/result/ConnectionProperty.java rename to core/connection-pool/src/main/java/datawave/core/common/result/ConnectionProperty.java index eb09d1bca66..937aeea7e07 100644 --- a/web-services/common-util/src/main/java/datawave/webservice/common/result/ConnectionProperty.java +++ b/core/connection-pool/src/main/java/datawave/core/common/result/ConnectionProperty.java @@ -1,4 +1,4 @@ -package datawave.webservice.common.result; +package datawave.core.common.result; import java.io.Serializable; diff --git a/web-services/common-util/src/main/java/datawave/webservice/common/result/TableCache.java b/core/connection-pool/src/main/java/datawave/core/common/result/TableCacheDescription.java similarity index 95% rename from web-services/common-util/src/main/java/datawave/webservice/common/result/TableCache.java rename to core/connection-pool/src/main/java/datawave/core/common/result/TableCacheDescription.java index bf2502a8a32..ced34faed6c 100644 --- a/web-services/common-util/src/main/java/datawave/webservice/common/result/TableCache.java +++ b/core/connection-pool/src/main/java/datawave/core/common/result/TableCacheDescription.java @@ -1,4 +1,4 @@ -package datawave.webservice.common.result; +package datawave.core.common.result; import java.io.Serializable; import java.util.Date; @@ -10,8 +10,7 @@ @XmlRootElement @XmlAccessorType(XmlAccessType.NONE) -public class TableCache implements Serializable { - +public class TableCacheDescription implements Serializable { private static final long serialVersionUID = 1L; @XmlAttribute diff --git a/core/connection-pool/src/main/java/datawave/core/query/runner/AccumuloConnectionRequestMap.java b/core/connection-pool/src/main/java/datawave/core/query/runner/AccumuloConnectionRequestMap.java new file mode 100644 index 00000000000..6e346f431d6 --- /dev/null +++ b/core/connection-pool/src/main/java/datawave/core/query/runner/AccumuloConnectionRequestMap.java @@ -0,0 +1,105 @@ +package datawave.core.query.runner; + +import java.util.ArrayList; +import java.util.HashMap; +import java.util.Iterator; +import java.util.List; +import java.util.Map; + +import org.apache.accumulo.core.util.Pair; +import org.apache.log4j.Logger; + +import datawave.core.common.connection.AccumuloConnectionFactory; + +/** + * For storing a map of queryId to Thread that is requesting an AccumuloConnection + */ +public class AccumuloConnectionRequestMap { + + private static Logger log = Logger.getLogger(AccumuloConnectionRequestMap.class); + + /** + * This maps the query-id to a pair containing the tracking map (see the AccumuloConnectionFactory) and the thread handling the request + */ + private Map,Thread>>> connectionThreadMap = new HashMap<>(); + + public boolean cancelConnectionRequest(String id, String userDn) { + // this call checks that the Principal used for the connection request and the connection cancel are the same + // if query is waiting for an accumulo connection in create or reset, then interrupt it + boolean connectionRequestCanceled = false; + synchronized (connectionThreadMap) { + List,Thread>> connectionRequestPairs = connectionThreadMap.get(id); + if (connectionRequestPairs != null) { + for (Pair,Thread> connectionRequestPair : connectionRequestPairs) { + try { + if (connectionRequestPair != null && connectionRequestPair.getFirst() != null) { + String connectionRequestPrincipalName = connectionRequestPair.getFirst().get(AccumuloConnectionFactory.USER_DN); + String connectionCancelPrincipalName = userDn; + if (connectionRequestPrincipalName.equals(connectionCancelPrincipalName)) { + connectionRequestPair.getSecond().interrupt(); + connectionRequestCanceled = true; + } + } + } catch (Exception e) { + log.error(e.getMessage(), e); + } + } + } + } + return connectionRequestCanceled; + } + + public boolean adminCancelConnectionRequest(String id) { + // it is assumed that admin status is already checked, so this call does not check the calling Principals + // if query is waiting for an accumulo connection in create or reset, then interrupt it + boolean connectionRequestCanceled = false; + List,Thread>> connectionRequestPairs = connectionThreadMap.get(id); + if (connectionRequestPairs != null) { + for (Pair,Thread> connectionRequestPair : connectionRequestPairs) { + try { + if (connectionRequestPair != null && connectionRequestPair.getFirst() != null) { + connectionRequestPair.getSecond().interrupt(); + connectionRequestCanceled = true; + } + } catch (Exception e) { + log.error(e.getMessage(), e); + } + } + } + + return connectionRequestCanceled; + } + + public void requestBegin(String id, String userDN, Map trackingMap) { + synchronized (connectionThreadMap) { + List,Thread>> connectionRequestPairs = connectionThreadMap.get(id); + if (connectionRequestPairs == null) { + connectionRequestPairs = new ArrayList<>(); + connectionThreadMap.put(id, connectionRequestPairs); + } + Pair,Thread> connectionRequestPair = new Pair<>(trackingMap, Thread.currentThread()); + if (userDN != null && trackingMap != null) + trackingMap.put(AccumuloConnectionFactory.USER_DN, userDN); + connectionRequestPairs.add(connectionRequestPair); + } + } + + public void requestEnd(String id) { + synchronized (connectionThreadMap) { + List,Thread>> connectionRequestPairs = connectionThreadMap.get(id); + Thread t = Thread.currentThread(); + Iterator,Thread>> it = connectionRequestPairs.iterator(); + boolean found = false; + while (!found && it.hasNext()) { + Pair,Thread> connectionRequestPair = it.next(); + if (connectionRequestPair.getSecond().equals(t)) { + it.remove(); + found = true; + } + } + if (connectionRequestPairs.isEmpty()) { + connectionThreadMap.remove(id); + } + } + } +} diff --git a/core/connection-pool/src/main/resources/META-INF/beans.xml b/core/connection-pool/src/main/resources/META-INF/beans.xml new file mode 100644 index 00000000000..4ca201f8ff2 --- /dev/null +++ b/core/connection-pool/src/main/resources/META-INF/beans.xml @@ -0,0 +1,9 @@ + + + + \ No newline at end of file diff --git a/core/connection-pool/src/main/resources/META-INF/jboss-ejb3.xml b/core/connection-pool/src/main/resources/META-INF/jboss-ejb3.xml new file mode 100644 index 00000000000..8cf49db8c87 --- /dev/null +++ b/core/connection-pool/src/main/resources/META-INF/jboss-ejb3.xml @@ -0,0 +1,16 @@ + + + + + + + * + datawave + + + + \ No newline at end of file diff --git a/web-services/security/src/main/resources/source-templates/datawave/security/cache/package-info.java b/core/connection-pool/src/main/resources/source-templates/datawave/core/common/result/package-info.java similarity index 76% rename from web-services/security/src/main/resources/source-templates/datawave/security/cache/package-info.java rename to core/connection-pool/src/main/resources/source-templates/datawave/core/common/result/package-info.java index c364a3b87ec..366079fbbb5 100644 --- a/web-services/security/src/main/resources/source-templates/datawave/security/cache/package-info.java +++ b/core/connection-pool/src/main/resources/source-templates/datawave/core/common/result/package-info.java @@ -1,7 +1,6 @@ @XmlSchema(namespace="${datawave.webservice.namespace}", elementFormDefault=XmlNsForm.QUALIFIED, xmlns={@XmlNs(prefix = "", namespaceURI = "${datawave.webservice.namespace}")}) -package datawave.security.cache; +package datawave.core.common.result; import javax.xml.bind.annotation.XmlNs; import javax.xml.bind.annotation.XmlNsForm; -import javax.xml.bind.annotation.XmlSchema; - +import javax.xml.bind.annotation.XmlSchema; \ No newline at end of file diff --git a/web-services/common/src/test/java/datawave/webservice/common/cache/SharedCacheCoordinatorTest.java b/core/connection-pool/src/test/java/datawave/core/common/cache/SharedCacheCoordinatorTest.java similarity index 96% rename from web-services/common/src/test/java/datawave/webservice/common/cache/SharedCacheCoordinatorTest.java rename to core/connection-pool/src/test/java/datawave/core/common/cache/SharedCacheCoordinatorTest.java index f38395e070a..d466442cab6 100644 --- a/web-services/common/src/test/java/datawave/webservice/common/cache/SharedCacheCoordinatorTest.java +++ b/core/connection-pool/src/test/java/datawave/core/common/cache/SharedCacheCoordinatorTest.java @@ -1,4 +1,4 @@ -package datawave.webservice.common.cache; +package datawave.core.common.cache; import static org.junit.Assert.assertEquals; import static org.junit.Assert.assertTrue; @@ -124,8 +124,9 @@ public void stateChanged(CuratorFramework client, ConnectionState newState) { break; Thread.sleep(200L); } - assertEquals("Client never reconnected.", ConnectionState.RECONNECTED, state[0]); + // unfortunately curator does not always propogate the RECONNECTED state to the listener + // assertEquals("Client never reconnected.", ConnectionState.RECONNECTED, state[0]); newCount = 42; oldCount = counter.getVersionedValue(); counter.trySetCount(oldCount, newCount); @@ -181,7 +182,7 @@ public void stateChanged(CuratorFramework client, ConnectionState newState) {} testingZooKeeperServer.kill(); - for (int i = 0; i < 15; ++i) { + for (int i = 0; i < 20; ++i) { if (ConnectionState.LOST.equals(state[0])) break; Thread.sleep(3000L); @@ -190,7 +191,7 @@ public void stateChanged(CuratorFramework client, ConnectionState newState) {} testingZooKeeperServer.restart(); - for (int i = 0; i < 15; ++i) { + for (int i = 0; i < 20; ++i) { if (ConnectionState.RECONNECTED.equals(state[0])) break; Thread.sleep(3000L); diff --git a/web-services/common/src/test/java/datawave/webservice/common/connection/AccumuloConnectionFactoryTest.java b/core/connection-pool/src/test/java/datawave/core/common/connection/AccumuloConnectionFactoryTest.java similarity index 68% rename from web-services/common/src/test/java/datawave/webservice/common/connection/AccumuloConnectionFactoryTest.java rename to core/connection-pool/src/test/java/datawave/core/common/connection/AccumuloConnectionFactoryTest.java index 3d516b01f5b..8d891c689e7 100644 --- a/web-services/common/src/test/java/datawave/webservice/common/connection/AccumuloConnectionFactoryTest.java +++ b/core/connection-pool/src/test/java/datawave/core/common/connection/AccumuloConnectionFactoryTest.java @@ -1,6 +1,5 @@ -package datawave.webservice.common.connection; +package datawave.core.common.connection; -import static org.easymock.MockType.STRICT; import static org.junit.Assert.assertEquals; import static org.junit.Assert.assertNotNull; import static org.junit.Assert.assertNull; @@ -14,10 +13,12 @@ import org.apache.accumulo.core.client.AccumuloClient; import org.apache.commons.pool2.PooledObject; import org.apache.commons.pool2.impl.DefaultPooledObject; +import org.apache.log4j.Logger; import org.easymock.EasyMock; import org.easymock.EasyMockRunner; import org.easymock.EasyMockSupport; import org.easymock.Mock; +import org.easymock.MockType; import org.easymock.TestSubject; import org.junit.After; import org.junit.Before; @@ -25,30 +26,27 @@ import org.junit.runner.RunWith; import org.powermock.reflect.Whitebox; -import com.google.common.collect.Lists; - import datawave.accumulo.inmemory.InMemoryInstance; -import datawave.webservice.common.cache.AccumuloTableCache; -import datawave.webservice.common.connection.AccumuloConnectionFactory.Priority; -import datawave.webservice.common.connection.config.ConnectionPoolConfiguration; -import datawave.webservice.common.connection.config.ConnectionPoolsConfiguration; +import datawave.core.common.cache.AccumuloTableCache; +import datawave.core.common.result.ConnectionPoolProperties; +import datawave.core.common.result.ConnectionPoolsProperties; +import datawave.webservice.common.connection.WrappedAccumuloClient; @RunWith(EasyMockRunner.class) public class AccumuloConnectionFactoryTest extends EasyMockSupport { - @TestSubject - private AccumuloConnectionFactoryBean bean = createMockBuilder(AccumuloConnectionFactoryBean.class) - .addMockedMethods("getCurrentUserDN", "getCurrentProxyServers").createStrictMock(); - - @Mock(type = STRICT) + @Mock(type = MockType.STRICT) private AccumuloTableCache cache; private InMemoryInstance instance = new InMemoryInstance(); - @Mock(type = STRICT) + @TestSubject + private AccumuloConnectionFactoryImpl factory = createMockBuilder(AccumuloConnectionFactoryImpl.class).createStrictMock(); + + @Mock(type = MockType.STRICT) private WrappedAccumuloClient warehouseClient; - @Mock(type = STRICT) + @Mock(type = MockType.STRICT) private WrappedAccumuloClient metricsClient; @Before @@ -63,19 +61,17 @@ public void setup() throws Exception { warehouseFactory.setClient(warehouseClient); metricsFactory.setClient(metricsClient); - Map configs = new HashMap<>(); + Map configs = new HashMap<>(); configs.put("WAREHOUSE", null); configs.put("METRICS", null); - ConnectionPoolsConfiguration conf = new ConnectionPoolsConfiguration(); + ConnectionPoolsProperties conf = new ConnectionPoolsProperties(); Whitebox.setInternalState(conf, "defaultPool", "WAREHOUSE"); - Whitebox.setInternalState(conf, "poolNames", Lists.newArrayList("WAREHOUSE", "METRICS")); Whitebox.setInternalState(conf, "pools", configs); - String defaultPoolName = conf.getDefaultPool(); - HashMap> pools = new HashMap<>(); + HashMap> pools = new HashMap<>(); MyAccumuloClientPool warehousePool = new MyAccumuloClientPool(warehouseFactory); MyAccumuloClientPool metricsPool = new MyAccumuloClientPool(metricsFactory); - for (Entry entry : conf.getPools().entrySet()) { + for (Entry entry : conf.getPools().entrySet()) { AccumuloClientPool acp = null; switch (entry.getKey()) { case "METRICS": @@ -87,16 +83,18 @@ public void setup() throws Exception { default: fail("Unknown pool name " + entry.getKey()); } - Map p = new HashMap<>(); - p.put(Priority.ADMIN, acp); - p.put(Priority.HIGH, acp); - p.put(Priority.NORMAL, acp); - p.put(Priority.LOW, acp); + Map p = new HashMap<>(); + p.put(AccumuloConnectionFactory.Priority.ADMIN, acp); + p.put(AccumuloConnectionFactory.Priority.HIGH, acp); + p.put(AccumuloConnectionFactory.Priority.NORMAL, acp); + p.put(AccumuloConnectionFactory.Priority.LOW, acp); pools.put(entry.getKey(), Collections.unmodifiableMap(p)); } - Whitebox.setInternalState(bean, ConnectionPoolsConfiguration.class, conf); - Whitebox.setInternalState(bean, "defaultPoolName", defaultPoolName); - Whitebox.setInternalState(bean, "pools", pools); + Whitebox.setInternalState(factory, "log", Logger.getLogger(AccumuloConnectionFactoryImpl.class)); + Whitebox.setInternalState(factory, ConnectionPoolsProperties.class, conf); + Whitebox.setInternalState(factory, "defaultPoolName", conf.getDefaultPool()); + Whitebox.setInternalState(factory, "pools", pools); + Whitebox.setInternalState(factory, "cache", cache); } @After @@ -108,10 +106,8 @@ public void cleanup() { public void testGetConnection() throws Exception { resetAll(); EasyMock.expect(cache.getInstance()).andReturn(instance); - EasyMock.expect(bean.getCurrentUserDN()).andReturn(null); - EasyMock.expect(bean.getCurrentProxyServers()).andReturn(null); replayAll(); - AccumuloClient con = bean.getClient(Priority.HIGH, new HashMap<>()); + AccumuloClient con = factory.getClient(null, null, AccumuloConnectionFactory.Priority.HIGH, new HashMap<>()); verifyAll(); assertNotNull(con); assertEquals(warehouseClient, ((WrappedAccumuloClient) con).getReal()); @@ -122,10 +118,8 @@ public void testGetConnection() throws Exception { public void testGetWarehouseConnection() throws Exception { resetAll(); EasyMock.expect(cache.getInstance()).andReturn(new InMemoryInstance()); - EasyMock.expect(bean.getCurrentUserDN()).andReturn(null); - EasyMock.expect(bean.getCurrentProxyServers()).andReturn(null); replayAll(); - AccumuloClient con = bean.getClient("WAREHOUSE", Priority.HIGH, new HashMap<>()); + AccumuloClient con = factory.getClient(null, null, "WAREHOUSE", AccumuloConnectionFactory.Priority.HIGH, new HashMap<>()); verifyAll(); assertNotNull(con); assertEquals(warehouseClient, ((WrappedAccumuloClient) con).getReal()); @@ -136,10 +130,8 @@ public void testGetContextConnection() throws Exception { System.setProperty("dw.accumulo.classLoader.context", "alternateContext"); resetAll(); EasyMock.expect(cache.getInstance()).andReturn(new InMemoryInstance()); - EasyMock.expect(bean.getCurrentUserDN()).andReturn(null); - EasyMock.expect(bean.getCurrentProxyServers()).andReturn(null); replayAll(); - AccumuloClient con = bean.getClient("WAREHOUSE", Priority.HIGH, new HashMap<>()); + AccumuloClient con = factory.getClient(null, null, "WAREHOUSE", AccumuloConnectionFactory.Priority.HIGH, new HashMap<>()); verifyAll(); assertNotNull(con); assertEquals(warehouseClient, ((WrappedAccumuloClient) con).getReal()); @@ -150,10 +142,8 @@ public void testGetContextConnection() throws Exception { public void testGetMetricsConnection() throws Exception { resetAll(); EasyMock.expect(cache.getInstance()).andReturn(new InMemoryInstance()); - EasyMock.expect(bean.getCurrentUserDN()).andReturn(null); - EasyMock.expect(bean.getCurrentProxyServers()).andReturn(null); replayAll(); - AccumuloClient con = bean.getClient("METRICS", Priority.HIGH, new HashMap<>()); + AccumuloClient con = factory.getClient(null, null, "METRICS", AccumuloConnectionFactory.Priority.HIGH, new HashMap<>()); verifyAll(); assertNotNull(con); assertEquals(metricsClient, ((WrappedAccumuloClient) con).getReal()); diff --git a/web-services/common/src/test/java/datawave/webservice/common/curator/TestSharedCacheCoordinator.java b/core/connection-pool/src/test/java/datawave/core/common/curator/TestSharedCacheCoordinator.java similarity index 98% rename from web-services/common/src/test/java/datawave/webservice/common/curator/TestSharedCacheCoordinator.java rename to core/connection-pool/src/test/java/datawave/core/common/curator/TestSharedCacheCoordinator.java index 33386d31cf6..8d7a7d7dea6 100644 --- a/web-services/common/src/test/java/datawave/webservice/common/curator/TestSharedCacheCoordinator.java +++ b/core/connection-pool/src/test/java/datawave/core/common/curator/TestSharedCacheCoordinator.java @@ -1,4 +1,4 @@ -package datawave.webservice.common.curator; +package datawave.core.common.curator; import java.io.IOException; import java.io.Serializable; @@ -23,16 +23,16 @@ import org.apache.curator.framework.recipes.shared.SharedCountListener; import org.apache.curator.retry.ExponentialBackoffRetry; import org.apache.curator.utils.ZKPaths; +import org.apache.log4j.Logger; import org.apache.zookeeper.CreateMode; import org.apache.zookeeper.ZKUtil; import org.apache.zookeeper.data.Stat; -import org.jboss.logging.Logger; import com.google.common.base.Preconditions; -import datawave.common.util.ArgumentChecker; -import datawave.webservice.common.cache.SharedBoolean; -import datawave.webservice.common.cache.SharedBooleanListener; +import datawave.core.common.cache.SharedBoolean; +import datawave.core.common.cache.SharedBooleanListener; +import datawave.core.common.cache.SharedCacheCoordinator.ArgumentChecker; /** * Coordinates operations on a shared cache. That is, this coordinates operations where an in-memory cache may be running on multiple servers and each in-memory diff --git a/web-services/common-util/src/test/java/datawave/common/result/ConnectionPoolTest.java b/core/connection-pool/src/test/java/datawave/core/common/result/ConnectionPoolTest.java similarity index 94% rename from web-services/common-util/src/test/java/datawave/common/result/ConnectionPoolTest.java rename to core/connection-pool/src/test/java/datawave/core/common/result/ConnectionPoolTest.java index 33af8b795f0..6c5171908ac 100644 --- a/web-services/common-util/src/test/java/datawave/common/result/ConnectionPoolTest.java +++ b/core/connection-pool/src/test/java/datawave/core/common/result/ConnectionPoolTest.java @@ -1,4 +1,4 @@ -package datawave.common.result; +package datawave.core.common.result; import java.util.Iterator; import java.util.LinkedList; @@ -9,8 +9,7 @@ import org.junit.Before; import org.junit.Test; -import datawave.webservice.common.result.ConnectionPool; -import datawave.webservice.common.result.ConnectionPool.Priority; +import datawave.core.common.result.ConnectionPool.Priority; /** * diff --git a/core/connection-pool/src/test/resources/log4j.properties b/core/connection-pool/src/test/resources/log4j.properties new file mode 100644 index 00000000000..cacd01b436c --- /dev/null +++ b/core/connection-pool/src/test/resources/log4j.properties @@ -0,0 +1,6 @@ +log4j.rootCategory=INFO, CONSOLE + +log4j.appender.CONSOLE=org.apache.log4j.ConsoleAppender +log4j.appender.CONSOLE.Threshold=INFO +log4j.appender.CONSOLE.layout=org.apache.log4j.PatternLayout +log4j.appender.CONSOLE.layout.ConversionPattern=%-5p [%C{1}:%M] %m%n diff --git a/core/in-memory-accumulo b/core/in-memory-accumulo index 362d6dccd62..8a9d2f46d20 160000 --- a/core/in-memory-accumulo +++ b/core/in-memory-accumulo @@ -1 +1 @@ -Subproject commit 362d6dccd62d40235ae2cac45f3225ecb0c2a65f +Subproject commit 8a9d2f46d2012d4493baff5e8dc9f08f45f746d5 diff --git a/core/map-reduce/pom.xml b/core/map-reduce/pom.xml new file mode 100644 index 00000000000..35454964475 --- /dev/null +++ b/core/map-reduce/pom.xml @@ -0,0 +1,37 @@ + + + 4.0.0 + + gov.nsa.datawave.core + datawave-core-parent + 7.13.0-SNAPSHOT + + datawave-core-map-reduce + ${project.artifactId} + + + gov.nsa.datawave.core + datawave-core-common-util + ${project.version} + + + gov.nsa.datawave.core + datawave-core-query + ${project.version} + + + gov.nsa.datawave.microservice + mapreduce-query-api + + + gov.nsa.datawave.webservices + datawave-ws-client + ${project.version} + + + org.jboss.resteasy + resteasy-jaxrs + provided + + + diff --git a/core/map-reduce/src/main/java/datawave/core/mapreduce/bulkresults/map/ApplicationContextAwareMapper.java b/core/map-reduce/src/main/java/datawave/core/mapreduce/bulkresults/map/ApplicationContextAwareMapper.java new file mode 100644 index 00000000000..5a451a0b437 --- /dev/null +++ b/core/map-reduce/src/main/java/datawave/core/mapreduce/bulkresults/map/ApplicationContextAwareMapper.java @@ -0,0 +1,64 @@ +package datawave.core.mapreduce.bulkresults.map; + +import java.io.IOException; + +import org.apache.hadoop.mapreduce.Mapper; +import org.apache.log4j.Logger; +import org.springframework.context.ApplicationContext; +import org.springframework.context.annotation.AnnotationConfigApplicationContext; +import org.springframework.context.support.ClassPathXmlApplicationContext; +import org.springframework.core.io.ClassPathResource; +import org.springframework.core.io.support.ResourcePropertySource; + +public class ApplicationContextAwareMapper extends Mapper { + + private static Logger log = Logger.getLogger(ApplicationContextAwareMapper.class); + + public static final String SPRING_CONFIG_LOCATIONS = "spring.config.locations"; + public static final String SPRING_CONFIG_BASE_PACKAGES = "spring.config.base-packages"; + public static final String SPRING_CONFIG_STARTING_CLASS = "spring.config.starting-class"; + + protected ApplicationContext applicationContext; + + /** + * Create a Spring Application Context + * + * @param contextPath + * is a possibly CSV of spring config file locations + * @param basePackages + * is a possibly CSV of base packages to scan + * @param startingClass + * the annotated starting class to be processes + */ + protected void setApplicationContext(String contextPath, String basePackages, String startingClass) { + AnnotationConfigApplicationContext annotationApplicationContext = new AnnotationConfigApplicationContext(); + + try { + annotationApplicationContext.getEnvironment().getPropertySources() + .addLast(new ResourcePropertySource(new ClassPathResource("application.properties"))); + } catch (IOException e) { + log.error("application.properties could not be loaded", e); + throw new RuntimeException(e); + } + + if (basePackages != null && !basePackages.isEmpty()) { + annotationApplicationContext.scan(basePackages.split(",")); + } + + if (startingClass != null && !startingClass.isEmpty()) { + try { + annotationApplicationContext.register(Class.forName(startingClass)); + } catch (ClassNotFoundException e) { + throw new RuntimeException("Could not find starting class: " + startingClass, e); + } + } + + annotationApplicationContext.refresh(); + + if (contextPath != null && !contextPath.isEmpty()) { + this.applicationContext = new ClassPathXmlApplicationContext(contextPath.split(","), annotationApplicationContext); + } else { + this.applicationContext = annotationApplicationContext; + } + } +} diff --git a/web-services/map-reduce/src/main/java/datawave/webservice/mr/bulkresults/map/BulkResultsFileOutputMapper.java b/core/map-reduce/src/main/java/datawave/core/mapreduce/bulkresults/map/BulkResultsFileOutputMapper.java similarity index 90% rename from web-services/map-reduce/src/main/java/datawave/webservice/mr/bulkresults/map/BulkResultsFileOutputMapper.java rename to core/map-reduce/src/main/java/datawave/core/mapreduce/bulkresults/map/BulkResultsFileOutputMapper.java index 66651fafe78..1ce75bf2cd1 100644 --- a/web-services/map-reduce/src/main/java/datawave/webservice/mr/bulkresults/map/BulkResultsFileOutputMapper.java +++ b/core/map-reduce/src/main/java/datawave/core/mapreduce/bulkresults/map/BulkResultsFileOutputMapper.java @@ -1,4 +1,4 @@ -package datawave.webservice.mr.bulkresults.map; +package datawave.core.mapreduce.bulkresults.map; import java.io.ByteArrayOutputStream; import java.io.IOException; @@ -22,14 +22,14 @@ import org.apache.hadoop.conf.Configuration; import org.apache.hadoop.io.NullWritable; import org.apache.log4j.Logger; -import org.jboss.weld.environment.se.Weld; import org.springframework.util.Assert; -import datawave.webservice.query.Query; -import datawave.webservice.query.cache.ResultsPage; -import datawave.webservice.query.exception.EmptyObjectException; -import datawave.webservice.query.logic.QueryLogic; -import datawave.webservice.query.logic.QueryLogicTransformer; +import datawave.core.query.cache.ResultsPage; +import datawave.core.query.exception.EmptyObjectException; +import datawave.core.query.logic.QueryLogic; +import datawave.core.query.logic.QueryLogicTransformer; +import datawave.microservice.mapreduce.bulkresults.map.SerializationFormat; +import datawave.microservice.query.Query; import datawave.webservice.result.BaseQueryResponse; import datawave.webservice.util.ProtostuffMessageBodyWriter; @@ -61,17 +61,9 @@ public class BulkResultsFileOutputMapper extends ApplicationContextAwareMapper entries = new HashMap<>(); private Map> responseClassMap = new HashMap<>(); private SerializationFormat format = SerializationFormat.XML; - private Weld weld; @Override protected void setup(org.apache.hadoop.mapreduce.Mapper.Context context) throws IOException, InterruptedException { - if (System.getProperty("ignore.weld.startMain") == null) { - System.setProperty("com.sun.jersey.server.impl.cdi.lookupExtensionInBeanManager", "true"); // Disable CDI extensions in Jersey libs - - weld = new Weld("STATIC_INSTANCE"); - weld.initialize(); - } - super.setup(context); Query query; try { @@ -84,8 +76,8 @@ protected void setup(org.apache.hadoop.mapreduce.Mapper.Con } final Configuration configuration = context.getConfiguration(); - this.setApplicationContext(configuration.get(SPRING_CONFIG_LOCATIONS)); - + this.setApplicationContext(configuration.get(SPRING_CONFIG_LOCATIONS), configuration.get(SPRING_CONFIG_BASE_PACKAGES), + configuration.get(SPRING_CONFIG_STARTING_CLASS)); String logicName = context.getConfiguration().get(QUERY_LOGIC_NAME); QueryLogic logic = (QueryLogic) super.applicationContext.getBean(logicName); @@ -98,10 +90,6 @@ protected void setup(org.apache.hadoop.mapreduce.Mapper.Con @Override protected void cleanup(Context context) throws IOException, InterruptedException { super.cleanup(context); - - if (weld != null) { - weld.shutdown(); - } } @Override diff --git a/web-services/map-reduce/src/main/java/datawave/webservice/mr/bulkresults/map/BulkResultsTableOutputMapper.java b/core/map-reduce/src/main/java/datawave/core/mapreduce/bulkresults/map/BulkResultsTableOutputMapper.java similarity index 82% rename from web-services/map-reduce/src/main/java/datawave/webservice/mr/bulkresults/map/BulkResultsTableOutputMapper.java rename to core/map-reduce/src/main/java/datawave/core/mapreduce/bulkresults/map/BulkResultsTableOutputMapper.java index f840aaae9d7..4f965dc1728 100644 --- a/web-services/map-reduce/src/main/java/datawave/webservice/mr/bulkresults/map/BulkResultsTableOutputMapper.java +++ b/core/map-reduce/src/main/java/datawave/core/mapreduce/bulkresults/map/BulkResultsTableOutputMapper.java @@ -1,4 +1,4 @@ -package datawave.webservice.mr.bulkresults.map; +package datawave.core.mapreduce.bulkresults.map; import java.io.IOException; import java.util.Collections; @@ -12,13 +12,16 @@ import org.apache.accumulo.core.data.Mutation; import org.apache.accumulo.core.data.Value; import org.apache.accumulo.core.security.ColumnVisibility; +import org.apache.hadoop.conf.Configuration; import org.apache.hadoop.io.Text; +import org.springframework.util.Assert; -import datawave.webservice.query.Query; -import datawave.webservice.query.cache.ResultsPage; -import datawave.webservice.query.exception.EmptyObjectException; -import datawave.webservice.query.logic.QueryLogic; -import datawave.webservice.query.logic.QueryLogicTransformer; +import datawave.core.query.cache.ResultsPage; +import datawave.core.query.exception.EmptyObjectException; +import datawave.core.query.logic.QueryLogic; +import datawave.core.query.logic.QueryLogicTransformer; +import datawave.microservice.mapreduce.bulkresults.map.SerializationFormat; +import datawave.microservice.query.Query; import datawave.webservice.result.BaseQueryResponse; public class BulkResultsTableOutputMapper extends ApplicationContextAwareMapper { @@ -47,9 +50,17 @@ protected void setup(org.apache.hadoop.mapreduce.Mapper throw new RuntimeException("Error instantiating query impl class " + context.getConfiguration().get(BulkResultsFileOutputMapper.QUERY_IMPL_CLASS), e); } - QueryLogic logic = (QueryLogic) super.applicationContext.getBean(QUERY_LOGIC_NAME); - t = logic.getEnrichedTransformer(query); + final Configuration configuration = context.getConfiguration(); + + this.setApplicationContext(configuration.get(SPRING_CONFIG_LOCATIONS), configuration.get(SPRING_CONFIG_BASE_PACKAGES), + configuration.get(SPRING_CONFIG_STARTING_CLASS)); + String logicName = context.getConfiguration().get(QUERY_LOGIC_NAME); + + QueryLogic logic = (QueryLogic) super.applicationContext.getBean(logicName); + t = logic.getEnrichedTransformer(query); + Assert.notNull(logic.getMarkingFunctions()); + Assert.notNull(logic.getResponseObjectFactory()); this.tableName = new Text(context.getConfiguration().get(TABLE_NAME)); this.format = SerializationFormat.valueOf(context.getConfiguration().get(BulkResultsFileOutputMapper.RESULT_SERIALIZATION_FORMAT)); diff --git a/core/metrics-reporter b/core/metrics-reporter index 8089d53feb5..992378d6294 160000 --- a/core/metrics-reporter +++ b/core/metrics-reporter @@ -1 +1 @@ -Subproject commit 8089d53feb585c4a77a07f26d2a449afa28dca1c +Subproject commit 992378d62946730d2ee799606276adca9522e050 diff --git a/core/modification/pom.xml b/core/modification/pom.xml new file mode 100644 index 00000000000..3f6e8cf6313 --- /dev/null +++ b/core/modification/pom.xml @@ -0,0 +1,53 @@ + + + 4.0.0 + + gov.nsa.datawave.core + datawave-core-parent + 7.13.0-SNAPSHOT + + datawave-core-modification + ${project.artifactId} + + + + gov.nsa.datawave + datawave-query-core + ${project.version} + + + gov.nsa.datawave.core + datawave-core-common + ${project.version} + + + gov.nsa.datawave.core + datawave-core-common-util + ${project.version} + + + gov.nsa.datawave.core + datawave-core-connection-pool + ${project.version} + + + gov.nsa.datawave.microservice + base-rest-responses + + + junit + junit + test + + + org.junit.jupiter + junit-jupiter-engine + test + + + org.junit.vintage + junit-vintage-engine + test + + + diff --git a/core/modification/src/main/java/datawave/modification/DatawaveModificationException.java b/core/modification/src/main/java/datawave/modification/DatawaveModificationException.java new file mode 100644 index 00000000000..baa24c156e4 --- /dev/null +++ b/core/modification/src/main/java/datawave/modification/DatawaveModificationException.java @@ -0,0 +1,30 @@ +package datawave.modification; + +import java.util.ArrayList; +import java.util.Collections; +import java.util.List; + +import datawave.webservice.query.exception.QueryException; + +public class DatawaveModificationException extends RuntimeException { + + private List exceptions = new ArrayList<>(); + + public DatawaveModificationException(QueryException qe) { + super(qe); + exceptions.add(qe); + } + + public DatawaveModificationException(String msg, QueryException qe) { + super(msg, qe); + exceptions.add(qe); + } + + public void addException(QueryException e) { + exceptions.add(e); + } + + public List getExceptions() { + return Collections.unmodifiableList(exceptions); + } +} diff --git a/core/modification/src/main/java/datawave/modification/ModificationService.java b/core/modification/src/main/java/datawave/modification/ModificationService.java new file mode 100644 index 00000000000..0668dd4ae28 --- /dev/null +++ b/core/modification/src/main/java/datawave/modification/ModificationService.java @@ -0,0 +1,142 @@ +package datawave.modification; + +import static java.util.Map.Entry; + +import java.text.MessageFormat; +import java.util.ArrayList; +import java.util.Collection; +import java.util.Collections; +import java.util.List; +import java.util.Map; +import java.util.Set; +import java.util.stream.Collectors; + +import org.apache.accumulo.core.client.AccumuloClient; +import org.apache.accumulo.core.security.Authorizations; +import org.apache.log4j.Logger; + +import datawave.core.common.connection.AccumuloConnectionFactory; +import datawave.modification.cache.ModificationCache; +import datawave.modification.configuration.ModificationConfiguration; +import datawave.modification.configuration.ModificationServiceConfiguration; +import datawave.modification.query.ModificationQueryService; +import datawave.security.authorization.DatawaveUser; +import datawave.security.authorization.ProxiedUserDetails; +import datawave.webservice.modification.ModificationRequestBase; +import datawave.webservice.query.exception.BadRequestQueryException; +import datawave.webservice.query.exception.DatawaveErrorCode; +import datawave.webservice.query.exception.QueryException; +import datawave.webservice.query.exception.UnauthorizedQueryException; +import datawave.webservice.result.VoidResponse; +import datawave.webservice.results.modification.ModificationConfigurationResponse; + +public class ModificationService { + + private static final Logger log = Logger.getLogger(ModificationService.class); + + private final AccumuloConnectionFactory connectionFactory; + + private final ModificationCache cache; + + private final ModificationQueryService.ModificationQueryServiceFactory queryServiceFactory; + + private final ModificationConfiguration modificationConfiguration; + + public ModificationService(ModificationConfiguration modificationConfiguration, ModificationCache cache, AccumuloConnectionFactory connectionFactory, + ModificationQueryService.ModificationQueryServiceFactory queryServiceFactory) { + this.modificationConfiguration = modificationConfiguration; + this.cache = cache; + this.connectionFactory = connectionFactory; + this.queryServiceFactory = queryServiceFactory; + } + + /** + * Returns a list of the Modification service names and their configurations + * + * @return datawave.webservice.results.modification.ModificationConfigurationResponse + */ + public List listConfigurations() { + List configs = new ArrayList<>(); + for (Entry entry : this.modificationConfiguration.getConfigurations().entrySet()) { + ModificationConfigurationResponse r = new ModificationConfigurationResponse(); + r.setName(entry.getKey()); + r.setRequestClass(entry.getValue().getRequestClass().getName()); + r.setDescription(entry.getValue().getDescription()); + r.setAuthorizedRoles(entry.getValue().getAuthorizedRoles()); + configs.add(r); + } + return configs; + } + + /** + * Execute a Modification service with the given name and runtime parameters + * + * @param userDetails + * The proxied user list + * @param modificationServiceName + * Name of the modification service configuration + * @param request + * object type specified in listConfigurations response. + * @return datawave.webservice.result.VoidResponse + */ + public VoidResponse submit(ProxiedUserDetails userDetails, String modificationServiceName, ModificationRequestBase request) { + VoidResponse response = new VoidResponse(); + + // Find out who/what called this method + DatawaveUser primaryUser = userDetails.getPrimaryUser(); + String userDn = primaryUser.getDn().subjectDN(); + Collection proxyServers = userDetails.getProxiedUsers().stream().map(u -> u.getDn().subjectDN()).collect(Collectors.toList()); + Collection userRoles = primaryUser.getRoles(); + Set cbAuths = userDetails.getProxiedUsers().stream().map(u -> new Authorizations(u.getAuths().toArray(new String[0]))) + .collect(Collectors.toSet()); + + AccumuloClient client = null; + AccumuloConnectionFactory.Priority priority; + try { + // Get the Modification Service from the configuration + ModificationServiceConfiguration service = modificationConfiguration.getConfiguration(modificationServiceName); + if (!request.getClass().equals(service.getRequestClass())) { + BadRequestQueryException qe = new BadRequestQueryException(DatawaveErrorCode.INVALID_REQUEST_CLASS, + MessageFormat.format("Requires: {0} but got {1}", service.getRequestClass().getName(), request.getClass().getName())); + throw new DatawaveModificationException(qe); + } + + priority = service.getPriority(); + + // Ensure that the user is in the list of authorized roles + if (null != service.getAuthorizedRoles()) { + boolean authorized = !Collections.disjoint(userRoles, service.getAuthorizedRoles()); + if (!authorized) { + // Then the user does not have any of the authorized roles + UnauthorizedQueryException qe = new UnauthorizedQueryException(DatawaveErrorCode.JOB_EXECUTION_UNAUTHORIZED, + MessageFormat.format("Requires one of: {0}", service.getAuthorizedRoles())); + throw new DatawaveModificationException(qe); + } + } + + // Process the modification + Map trackingMap = connectionFactory.getTrackingMap(Thread.currentThread().getStackTrace()); + client = connectionFactory.getClient(userDn, proxyServers, modificationConfiguration.getPoolName(), priority, trackingMap); + service.setQueryServiceFactory(queryServiceFactory); + log.info("Processing modification request from user=" + userDetails.getShortName() + ": \n" + request); + service.process(client, request, cache.getCachedMutableFieldList(), cbAuths, userDetails); + } catch (DatawaveModificationException e) { + throw e; + } catch (Exception e) { + QueryException qe = new QueryException(DatawaveErrorCode.MODIFICATION_ERROR, e); + log.error(qe); + throw new DatawaveModificationException(qe); + } finally { + if (null != client) { + try { + connectionFactory.returnClient(client); + } catch (Exception e) { + log.error("Error returning connection", e); + } + } + } + + return response; + } + +} diff --git a/warehouse/query-core/src/main/java/datawave/webservice/modification/MutableMetadataHandler.java b/core/modification/src/main/java/datawave/modification/MutableMetadataHandler.java similarity index 94% rename from warehouse/query-core/src/main/java/datawave/webservice/modification/MutableMetadataHandler.java rename to core/modification/src/main/java/datawave/modification/MutableMetadataHandler.java index fd360a166ca..1a2bd92fce6 100644 --- a/warehouse/query-core/src/main/java/datawave/webservice/modification/MutableMetadataHandler.java +++ b/core/modification/src/main/java/datawave/modification/MutableMetadataHandler.java @@ -1,4 +1,4 @@ -package datawave.webservice.modification; +package datawave.modification; import java.util.ArrayList; import java.util.Arrays; @@ -15,7 +15,6 @@ import java.util.concurrent.TimeUnit; import javax.annotation.Nullable; -import javax.ws.rs.core.MultivaluedMap; import org.apache.accumulo.core.client.AccumuloClient; import org.apache.accumulo.core.client.AccumuloException; @@ -38,33 +37,37 @@ import org.apache.commons.lang.StringUtils; import org.apache.hadoop.io.Text; import org.apache.log4j.Logger; -import org.jboss.resteasy.specimpl.MultivaluedMapImpl; import com.google.common.base.Function; import com.google.common.collect.HashMultimap; import com.google.common.collect.Iterators; import com.google.common.collect.Multimap; +import datawave.core.common.connection.AccumuloConnectionFactory; import datawave.core.iterators.FieldIndexDocumentFilter; import datawave.data.ColumnFamilyConstants; import datawave.data.type.Type; import datawave.ingest.protobuf.Uid; import datawave.ingest.protobuf.Uid.List.Builder; import datawave.marking.MarkingFunctions; +import datawave.microservice.query.DefaultQueryParameters; +import datawave.microservice.query.QueryPersistence; +import datawave.modification.configuration.ModificationServiceConfiguration; +import datawave.modification.query.ModificationQueryService; import datawave.query.data.parsers.DatawaveKey; import datawave.query.data.parsers.DatawaveKey.KeyType; import datawave.query.util.MetadataHelper; import datawave.query.util.MetadataHelperFactory; +import datawave.security.authorization.ProxiedUserDetails; import datawave.security.util.ScannerHelper; import datawave.util.TextUtil; import datawave.util.time.DateHelper; -import datawave.webservice.common.connection.AccumuloConnectionFactory; +import datawave.webservice.modification.DefaultModificationRequest; +import datawave.webservice.modification.EventIdentifier; +import datawave.webservice.modification.ModificationOperation; +import datawave.webservice.modification.ModificationRequestBase; import datawave.webservice.modification.ModificationRequestBase.MODE; -import datawave.webservice.modification.configuration.ModificationServiceConfiguration; -import datawave.webservice.query.QueryParametersImpl; -import datawave.webservice.query.QueryPersistence; import datawave.webservice.query.result.event.EventBase; -import datawave.webservice.query.runner.QueryExecutorBean; import datawave.webservice.result.BaseQueryResponse; import datawave.webservice.result.EventQueryResponseBase; import datawave.webservice.result.GenericResponse; @@ -301,12 +304,14 @@ public Class getRequestClass() { // Default the insert history option to true so that the call remains backwards compatible. @Override public void process(AccumuloClient client, ModificationRequestBase request, Map> mutableFieldList, Set userAuths, - String user) throws Exception { - this.process(client, request, mutableFieldList, userAuths, user, false, true); + ProxiedUserDetails userDetails) throws Exception { + this.process(client, request, mutableFieldList, userAuths, userDetails, false, true); } public void process(AccumuloClient client, ModificationRequestBase request, Map> mutableFieldList, Set userAuths, - String user, boolean purgeIndex, boolean insertHistory) throws Exception { + ProxiedUserDetails userDetails, boolean purgeIndex, boolean insertHistory) throws Exception { + + String user = userDetails.getShortName(); DefaultModificationRequest mr = DefaultModificationRequest.class.cast(request); @@ -703,7 +708,6 @@ protected void delete(MultiTableBatchWriter writer, AccumuloClient client, Set findMatchingEventUuid(String uuid, String uuidType, Set userAuths, ModificationOperation operation) - throws Exception { - + protected EventBase findMatchingEventUuid(String uuid, String uuidType, Set userAuths, ModificationOperation operation, + ProxiedUserDetails userDetails) throws Exception { String field = operation.getFieldName(); String columnVisibility = operation.getColumnVisibility(); @@ -1008,8 +1011,7 @@ protected EventBase findMatchingEventUuid(String uuid, String uuidType, Set String logicName = "LuceneUUIDEventQuery"; EventBase e = null; - QueryExecutorBean queryService = this.getQueryService(); - + ModificationQueryService queryService = this.getQueryService(userDetails); String id = null; HashSet auths = new HashSet<>(); for (Authorizations a : userAuths) @@ -1019,11 +1021,10 @@ protected EventBase findMatchingEventUuid(String uuid, String uuidType, Set expiration = new Date(expiration.getTime() + (1000 * 60 * 60 * 24)); try { - MultivaluedMap paramsMap = new MultivaluedMapImpl<>(); - paramsMap.putAll(QueryParametersImpl.paramsToMap(logicName, query.toString(), "Query to find matching records for metadata modification", - columnVisibility, new Date(0), new Date(), StringUtils.join(auths, ','), expiration, 2, -1, null, QueryPersistence.TRANSIENT, null, - queryOptions.toString(), false)); - GenericResponse createResponse = queryService.createQuery(logicName, paramsMap); + GenericResponse createResponse = queryService.createQuery(logicName, + DefaultQueryParameters.paramsToMap(logicName, query.toString(), "Query to find matching records for metadata modification", + columnVisibility, new Date(0), new Date(), StringUtils.join(auths, ','), expiration, 2, -1, null, + QueryPersistence.TRANSIENT, null, queryOptions.toString(), false)); id = createResponse.getResult(); BaseQueryResponse response = queryService.next(id); @@ -1120,29 +1121,38 @@ protected static class FieldIndexIterable implements Iterable, AutoCloseabl public FieldIndexIterable(AccumuloClient client, String shardTable, String eventUid, String datatype, Set userAuths, List ranges) throws TableNotFoundException { - scanner = ScannerHelper.createBatchScanner(client, shardTable, userAuths, ranges.size()); - scanner.setRanges(ranges); - Map options = new HashMap(); - options.put(FieldIndexDocumentFilter.DATA_TYPE_OPT, datatype); - options.put(FieldIndexDocumentFilter.EVENT_UID_OPT, eventUid); - IteratorSetting settings = new IteratorSetting(100, FieldIndexDocumentFilter.class, options); - scanner.addScanIterator(settings); + if (!ranges.isEmpty()) { + scanner = ScannerHelper.createBatchScanner(client, shardTable, userAuths, ranges.size()); + scanner.setRanges(ranges); + Map options = new HashMap(); + options.put(FieldIndexDocumentFilter.DATA_TYPE_OPT, datatype); + options.put(FieldIndexDocumentFilter.EVENT_UID_OPT, eventUid); + IteratorSetting settings = new IteratorSetting(100, FieldIndexDocumentFilter.class, options); + scanner.addScanIterator(settings); + } } @Override public Iterator iterator() { - return Iterators.transform(scanner.iterator(), new Function,Key>() { - @Nullable - @Override - public Key apply(@Nullable Entry keyValueEntry) { - return keyValueEntry.getKey(); - } - }); + if (scanner != null) { + return Iterators.transform(scanner.iterator(), new Function,Key>() { + @Nullable + @Override + public Key apply(@Nullable Entry keyValueEntry) { + return keyValueEntry.getKey(); + } + }); + } else { + List list = Collections.emptyList(); + return list.iterator(); + } } @Override public void close() throws Exception { - scanner.close(); + if (scanner != null) { + scanner.close(); + } } } diff --git a/warehouse/query-core/src/main/java/datawave/webservice/modification/MutableMetadataUUIDHandler.java b/core/modification/src/main/java/datawave/modification/MutableMetadataUUIDHandler.java similarity index 92% rename from warehouse/query-core/src/main/java/datawave/webservice/modification/MutableMetadataUUIDHandler.java rename to core/modification/src/main/java/datawave/modification/MutableMetadataUUIDHandler.java index fc4a32ba8ba..23e28276552 100644 --- a/warehouse/query-core/src/main/java/datawave/webservice/modification/MutableMetadataUUIDHandler.java +++ b/core/modification/src/main/java/datawave/modification/MutableMetadataUUIDHandler.java @@ -1,4 +1,4 @@ -package datawave.webservice.modification; +package datawave.modification; import java.util.ArrayList; import java.util.Collections; @@ -16,14 +16,20 @@ import org.apache.log4j.Logger; import datawave.query.util.MetadataHelper; -import datawave.webservice.common.exception.BadRequestException; +import datawave.security.authorization.ProxiedUserDetails; +import datawave.webservice.modification.DefaultModificationRequest; +import datawave.webservice.modification.DefaultUUIDModificationRequest; +import datawave.webservice.modification.EventIdentifier; +import datawave.webservice.modification.ModificationEvent; +import datawave.webservice.modification.ModificationOperation; import datawave.webservice.modification.ModificationOperation.OPERATIONMODE; +import datawave.webservice.modification.ModificationOperationImpl; +import datawave.webservice.modification.ModificationRequestBase; import datawave.webservice.modification.ModificationRequestBase.MODE; import datawave.webservice.query.exception.DatawaveErrorCode; import datawave.webservice.query.exception.QueryException; import datawave.webservice.query.result.event.EventBase; import datawave.webservice.query.result.event.FieldBase; -import datawave.webservice.result.VoidResponse; /** * Class that handles requests for modification requests (INSERT, UPDATE, DELETE, REPLACE) for metadata. From a DefaultUUIDModificationRequest it performs
@@ -207,8 +213,10 @@ public void ResetValues() { @Override public void process(AccumuloClient client, ModificationRequestBase request, Map> mutableFieldList, Set userAuths, - String user) throws BadRequestException, AccumuloException, AccumuloSecurityException, TableNotFoundException, ExecutionException { - VoidResponse response = new VoidResponse(); + ProxiedUserDetails userDetails) + throws DatawaveModificationException, AccumuloException, AccumuloSecurityException, TableNotFoundException, ExecutionException { + String user = userDetails.getShortName(); + ArrayList exceptions = new ArrayList<>(); MetadataHelper mHelper = getMetadataHelper(client); @@ -246,8 +254,7 @@ public void process(AccumuloClient client, ModificationRequestBase request, Map< } // perform the lookupUUID - EventBase> idEvent = findMatchingEventUuid(event.getId(), event.getIdType(), userAuths, operation); - + EventBase> idEvent = findMatchingEventUuid(event.getId(), event.getIdType(), userAuths, operation, userDetails); // extract contents from lookupUUID necessary for modification List> fields = idEvent.getFields(); if (operation.getOldFieldValue() != null) @@ -321,14 +328,7 @@ else if (f.getName().equalsIgnoreCase(event.getIdType()) && fieldCount < 1 && co if (log != null) log.trace("Submitting request to MutableMetadataHandler from MutableMetadataUUIDHandler: " + modReq); - // make sure user isn't null or empty - if (eventUser == null || eventUser.equals("")) { - if (log != null) - log.trace("No user provided for event. Using caller: " + user); - super.process(client, modReq, mutableFieldList, userAuths, user); - } else { - super.process(client, modReq, mutableFieldList, userAuths, event.getUser()); - } + super.process(client, modReq, mutableFieldList, userAuths, userDetails); } } // log exceptions that occur for each modification request. Let as many requests work as possible before returning @@ -353,12 +353,16 @@ else if (f.getName().equalsIgnoreCase(event.getIdType()) && fieldCount < 1 && co // If any errors occurred, return them in the response to the user if (!exceptions.isEmpty()) { - for (Exception e : exceptions) { - QueryException qe = new QueryException(DatawaveErrorCode.MODIFICATION_ERROR, e); - response.addException(qe.getBottomQueryException()); + if (exceptions.size() == 1) { + throw new DatawaveModificationException(new QueryException(DatawaveErrorCode.MODIFICATION_ERROR, exceptions.get(0))); + } else { + DatawaveModificationException exception = new DatawaveModificationException(new QueryException(DatawaveErrorCode.MODIFICATION_ERROR)); + for (Exception e : exceptions) { + QueryException qe = new QueryException(DatawaveErrorCode.MODIFICATION_ERROR, e); + exception.addException(qe); + } + throw exception; } - QueryException e = new QueryException(DatawaveErrorCode.MODIFICATION_ERROR); - throw new BadRequestException(e, response); } } diff --git a/core/modification/src/main/java/datawave/modification/cache/ModificationCache.java b/core/modification/src/main/java/datawave/modification/cache/ModificationCache.java new file mode 100644 index 00000000000..2258be9e061 --- /dev/null +++ b/core/modification/src/main/java/datawave/modification/cache/ModificationCache.java @@ -0,0 +1,120 @@ +package datawave.modification.cache; + +import static datawave.core.common.connection.AccumuloConnectionFactory.Priority; + +import java.util.Collections; +import java.util.HashMap; +import java.util.HashSet; +import java.util.Map; +import java.util.Map.Entry; +import java.util.Set; + +import org.apache.accumulo.core.client.AccumuloClient; +import org.apache.accumulo.core.client.BatchScanner; +import org.apache.accumulo.core.data.Key; +import org.apache.accumulo.core.data.Range; +import org.apache.accumulo.core.data.Value; +import org.apache.hadoop.io.Text; +import org.apache.log4j.Logger; + +import datawave.core.common.connection.AccumuloConnectionFactory; +import datawave.modification.configuration.ModificationConfiguration; +import datawave.security.util.ScannerHelper; + +public class ModificationCache { + private static Logger log = Logger.getLogger(ModificationCache.class); + + private static final Text MODIFICATION_COLUMN = new Text("m"); + + private Map> cache = new HashMap<>(); + + private final AccumuloConnectionFactory connectionFactory; + + private ModificationConfiguration modificationConfiguration; + + public ModificationCache(AccumuloConnectionFactory connectionFactory, ModificationConfiguration modificationConfiguration) { + this.connectionFactory = connectionFactory; + this.modificationConfiguration = modificationConfiguration; + if (modificationConfiguration != null) { + reloadMutableFieldCache(); + } else { + log.error("modificationConfiguration was null"); + } + } + + /** + * Reload the cache + */ + public void reloadMutableFieldCache() { + Map> cache = new HashMap<>(); + AccumuloClient client = null; + BatchScanner s = null; + try { + Map trackingMap = connectionFactory.getTrackingMap(Thread.currentThread().getStackTrace()); + log.trace("getting mutable list from table " + this.modificationConfiguration.getTableName()); + log.trace("modificationConfiguration.getPoolName() = " + modificationConfiguration.getPoolName()); + client = connectionFactory.getClient(null, null, modificationConfiguration.getPoolName(), Priority.ADMIN, trackingMap); + log.trace("got connection"); + s = ScannerHelper.createBatchScanner(client, this.modificationConfiguration.getTableName(), + Collections.singleton(client.securityOperations().getUserAuthorizations(client.whoami())), 8); + s.setRanges(Collections.singleton(new Range())); + s.fetchColumnFamily(MODIFICATION_COLUMN); + for (Entry e : s) { + // Field name is in the row and datatype is in the colq. + String datatype = e.getKey().getColumnQualifier().toString(); + log.trace("datatype = " + datatype); + String fieldName = e.getKey().getRow().toString(); + log.trace("fieldname = " + fieldName); + if (null == cache.get(datatype)) + cache.put(datatype, new HashSet<>()); + cache.get(datatype).add(fieldName); + } + log.trace("cache size = " + cache.size()); + for (Entry> e : cache.entrySet()) { + log.trace("datatype = " + e.getKey() + ", fieldcount = " + e.getValue().size()); + } + // now atomically replace the cache + this.cache = cache; + } catch (Exception e) { + log.error("Error during initialization of ModificationCacheBean", e); + throw new RuntimeException("Error during initialization of ModificationCacheBean", e); + } finally { + if (null != s) + s.close(); + try { + connectionFactory.returnClient(client); + } catch (Exception e) { + log.error("Error returning connection to pool", e); + } + } + } + + /** + * List the mutable fields in the cache + */ + public String listMutableFields() { + return cache.toString(); + } + + /** + * Check to see if field for specified datatype is mutable + * + * @param datatype + * @param field + * name of field + * @return true if field is mutable for the given datatype + */ + public boolean isFieldMutable(String datatype, String field) { + log.trace("datatype = " + datatype + ", field = " + field); + return cache.get(datatype).contains(field); + } + + public Map> getCachedMutableFieldList() { + log.trace("cache = " + cache); + return Collections.unmodifiableMap(cache); + } + + public ModificationConfiguration getModificationConfiguration() { + return modificationConfiguration; + } +} diff --git a/warehouse/query-core/src/main/java/datawave/webservice/modification/configuration/ModificationConfiguration.java b/core/modification/src/main/java/datawave/modification/configuration/ModificationConfiguration.java similarity index 95% rename from warehouse/query-core/src/main/java/datawave/webservice/modification/configuration/ModificationConfiguration.java rename to core/modification/src/main/java/datawave/modification/configuration/ModificationConfiguration.java index bd23b94c27a..218ec2becea 100644 --- a/warehouse/query-core/src/main/java/datawave/webservice/modification/configuration/ModificationConfiguration.java +++ b/core/modification/src/main/java/datawave/modification/configuration/ModificationConfiguration.java @@ -1,4 +1,4 @@ -package datawave.webservice.modification.configuration; +package datawave.modification.configuration; import java.util.Map; diff --git a/warehouse/query-core/src/main/java/datawave/webservice/modification/configuration/ModificationServiceConfiguration.java b/core/modification/src/main/java/datawave/modification/configuration/ModificationServiceConfiguration.java similarity index 70% rename from warehouse/query-core/src/main/java/datawave/webservice/modification/configuration/ModificationServiceConfiguration.java rename to core/modification/src/main/java/datawave/modification/configuration/ModificationServiceConfiguration.java index 271d087a9fa..ccc5acf828a 100644 --- a/warehouse/query-core/src/main/java/datawave/webservice/modification/configuration/ModificationServiceConfiguration.java +++ b/core/modification/src/main/java/datawave/modification/configuration/ModificationServiceConfiguration.java @@ -1,4 +1,4 @@ -package datawave.webservice.modification.configuration; +package datawave.modification.configuration; import java.util.List; import java.util.Map; @@ -7,9 +7,10 @@ import org.apache.accumulo.core.client.AccumuloClient; import org.apache.accumulo.core.security.Authorizations; -import datawave.webservice.common.connection.AccumuloConnectionFactory; +import datawave.core.common.connection.AccumuloConnectionFactory; +import datawave.modification.query.ModificationQueryService; +import datawave.security.authorization.ProxiedUserDetails; import datawave.webservice.modification.ModificationRequestBase; -import datawave.webservice.query.runner.QueryExecutorBean; public abstract class ModificationServiceConfiguration { @@ -17,7 +18,7 @@ public abstract class ModificationServiceConfiguration { protected String description = null; protected List authorizedRoles = null; - protected QueryExecutorBean queryService = null; + protected ModificationQueryService.ModificationQueryServiceFactory queryServiceFactory = null; protected List securityMarkingExemptFields = null; public String getDescription() { @@ -47,14 +48,18 @@ public void setSecurityMarkingExemptFields(List securityMarkingExemptFie /** * Handle to query service in case the modification service needs to run queries. * - * @return RemoteQueryExecutor + * @return ModificationQueryService */ - public QueryExecutorBean getQueryService() { - return queryService; + public ModificationQueryService getQueryService(ProxiedUserDetails userDetails) { + return queryServiceFactory.createService(userDetails); } - public void setQueryService(QueryExecutorBean queryService) { - this.queryService = queryService; + public ModificationQueryService.ModificationQueryServiceFactory getQueryServiceFactory() { + return queryServiceFactory; + } + + public void setQueryServiceFactory(ModificationQueryService.ModificationQueryServiceFactory queryServiceFactory) { + this.queryServiceFactory = queryServiceFactory; } /** @@ -74,13 +79,13 @@ public void setQueryService(QueryExecutorBean queryService) { * map of datatype to set of fields that are mutable * @param userAuths * authorizations of user making the call - * @param user - * user identifier + * @param userDetails + * user details * @throws Exception * if there is an issue */ public abstract void process(AccumuloClient client, ModificationRequestBase request, Map> mutableFieldList, - Set userAuths, String user) throws Exception; + Set userAuths, ProxiedUserDetails userDetails) throws Exception; /** * diff --git a/core/modification/src/main/java/datawave/modification/query/ModificationQueryService.java b/core/modification/src/main/java/datawave/modification/query/ModificationQueryService.java new file mode 100644 index 00000000000..8cbba14ca47 --- /dev/null +++ b/core/modification/src/main/java/datawave/modification/query/ModificationQueryService.java @@ -0,0 +1,21 @@ +package datawave.modification.query; + +import java.util.List; +import java.util.Map; + +import datawave.query.exceptions.DatawaveQueryException; +import datawave.security.authorization.ProxiedUserDetails; +import datawave.webservice.result.BaseQueryResponse; +import datawave.webservice.result.GenericResponse; + +public interface ModificationQueryService { + GenericResponse createQuery(String logicName, Map> paramsToMap) throws DatawaveQueryException; + + BaseQueryResponse next(String id) throws DatawaveQueryException; + + void close(String id) throws DatawaveQueryException; + + public interface ModificationQueryServiceFactory { + ModificationQueryService createService(ProxiedUserDetails userDetails); + } +} diff --git a/core/pom.xml b/core/pom.xml index a3c2e6119dd..ebb7175043e 100644 --- a/core/pom.xml +++ b/core/pom.xml @@ -4,13 +4,20 @@ gov.nsa.datawave datawave-parent - 6.5.0-SNAPSHOT + 7.13.0-SNAPSHOT gov.nsa.datawave.core datawave-core-parent pom ${project.artifactId} + cached-results + common + common-util + connection-pool + map-reduce + modification + query utils @@ -18,11 +25,6 @@ scm:git:ssh://git@fixme/core.git HEAD - - 4.13.2 - 5.8.2 - 5.8.2 - @@ -31,24 +33,6 @@ ${version.junit} test - - org.junit.jupiter - junit-jupiter-api - ${version.junit.jupiter} - test - - - org.junit.jupiter - junit-jupiter-engine - ${version.junit.jupiter} - test - - - org.junit.vintage - junit-vintage-engine - ${version.junit.vintage} - test - @@ -66,11 +50,6 @@ junit-jupiter-engine test - - org.junit.vintage - junit-vintage-engine - test - @@ -90,6 +69,18 @@ true random + + + org.junit.jupiter + junit-jupiter-engine + ${version.junit.bom} + + + org.junit.vintage + junit-vintage-engine + ${version.junit.bom} + + diff --git a/core/query/pom.xml b/core/query/pom.xml new file mode 100644 index 00000000000..a80897693ac --- /dev/null +++ b/core/query/pom.xml @@ -0,0 +1,102 @@ + + + 4.0.0 + + gov.nsa.datawave.core + datawave-core-parent + 7.13.0-SNAPSHOT + + datawave-core-query + ${project.artifactId} + + + + gov.nsa.datawave.core + datawave-core-cached-results + ${project.version} + + + gov.nsa.datawave.core + datawave-core-common + ${project.version} + + + gov.nsa.datawave.core + datawave-core-common-util + ${project.version} + + + gov.nsa.datawave.core + datawave-core-connection-pool + ${project.version} + + + gov.nsa.datawave.microservice + audit-api + + + gov.nsa.datawave.microservice + base-rest-responses + + + gov.nsa.datawave.microservice + query-metric-api + + + gov.nsa.datawave.webservices + datawave-ws-client + ${project.version} + + + org.slf4j + * + + + log4j + log4j + + + + + junit + junit + test + + + org.junit.jupiter + junit-jupiter-engine + test + + + org.junit.vintage + junit-vintage-engine + test + + + + + + org.apache.maven.plugins + maven-jar-plugin + + + META-INF/beans.xml + META-INF/jboss-ejb3.xml + + + + + jboss + + jar + + + jboss + + + + + + + + diff --git a/web-services/query/src/main/java/datawave/webservice/query/cachedresults/CacheableLogic.java b/core/query/src/main/java/datawave/core/query/cachedresults/CacheableLogic.java similarity index 51% rename from web-services/query/src/main/java/datawave/webservice/query/cachedresults/CacheableLogic.java rename to core/query/src/main/java/datawave/core/query/cachedresults/CacheableLogic.java index 85a5fb82972..bb145dac89d 100644 --- a/web-services/query/src/main/java/datawave/webservice/query/cachedresults/CacheableLogic.java +++ b/core/query/src/main/java/datawave/core/query/cachedresults/CacheableLogic.java @@ -1,15 +1,13 @@ -package datawave.webservice.query.cachedresults; - -import java.util.List; +package datawave.core.query.cachedresults; +import datawave.webservice.query.cachedresults.CacheableQueryRow; import datawave.webservice.query.exception.QueryException; public interface CacheableLogic { - List writeToCache(Object o) throws QueryException; + CacheableQueryRow writeToCache(Object o) throws QueryException; // CachedRowSet is passed pointing to the current row // This method must create the objects that will later be passed to createResponse - List readFromCache(List row); - + Object readFromCache(CacheableQueryRow row); } diff --git a/core/query/src/main/java/datawave/core/query/configuration/CheckpointableQueryConfiguration.java b/core/query/src/main/java/datawave/core/query/configuration/CheckpointableQueryConfiguration.java new file mode 100644 index 00000000000..8a243afcbaa --- /dev/null +++ b/core/query/src/main/java/datawave/core/query/configuration/CheckpointableQueryConfiguration.java @@ -0,0 +1,12 @@ +package datawave.core.query.configuration; + +public interface CheckpointableQueryConfiguration { + + /** + * Create an instance of this configuration suitable for a checkpoint. Basically ensure that everything is copied that is required to continue execution of + * the query post create. + * + * @return The configuration + */ + GenericQueryConfiguration checkpoint(); +} diff --git a/core/query/src/main/java/datawave/core/query/configuration/GenericQueryConfiguration.java b/core/query/src/main/java/datawave/core/query/configuration/GenericQueryConfiguration.java new file mode 100644 index 00000000000..18945b1f88a --- /dev/null +++ b/core/query/src/main/java/datawave/core/query/configuration/GenericQueryConfiguration.java @@ -0,0 +1,374 @@ +package datawave.core.query.configuration; + +import java.io.Serializable; +import java.nio.charset.StandardCharsets; +import java.util.Collection; +import java.util.Collections; +import java.util.Date; +import java.util.HashMap; +import java.util.Iterator; +import java.util.Map; +import java.util.Objects; +import java.util.Set; +import java.util.stream.Collectors; + +import org.apache.accumulo.core.client.AccumuloClient; +import org.apache.accumulo.core.client.BatchScanner; +import org.apache.accumulo.core.client.ScannerBase; +import org.apache.accumulo.core.security.Authorizations; + +import com.google.common.collect.Iterators; + +import datawave.core.common.util.EnvProvider; +import datawave.core.query.logic.BaseQueryLogic; +import datawave.microservice.query.Query; +import datawave.util.TableName; + +/** + *

+ * A basic query configuration object that contains the information needed to run a query. + *

+ * + *

+ * Provides some "expected" default values for parameters. This configuration object also encapsulates iterators and their options that would be set on a + * {@link BatchScanner}. + *

+ * + */ +public class GenericQueryConfiguration implements Serializable { + // is this execution expected to be checkpointable (changes how we allocate ranges to scanners) + private boolean checkpointable = false; + + private transient AccumuloClient client = null; + + // This is just used for (de)serialization + private Set auths = Collections.emptySet(); + + private Set authorizations = Collections.singleton(Authorizations.EMPTY); + + private Query query = null; + + // Leave in a top-level query for backwards-compatibility purposes + private String queryString = null; + + private Date beginDate = null; + private Date endDate = null; + + // The max number of next + seek calls made by the underlying iterators + private Long maxWork = -1L; + + protected int baseIteratorPriority = 100; + + // Table name + private String tableName = TableName.SHARD; + + private Collection queries = Collections.emptyList(); + + private transient Iterator queriesIter = Collections.emptyIterator(); + protected boolean bypassAccumulo; + + // use a value like 'env:PASS' to pull from the environment + private String accumuloPassword = ""; + + private String connPoolName; + + // Whether or not this query emits every result or performs some kind of result reduction + protected boolean reduceResults = false; + + // either IMMEDIATE or EVENTUAL + private Map tableConsistencyLevels = new HashMap<>(); + // provides default scan hints + // NOTE: accumulo reserves the execution hint name 'meta' + // NOTE: datawave reserves the execution hint name 'expansion' for index expansion + private Map> tableHints = new HashMap<>(); + + /** + * Empty default constructor + */ + public GenericQueryConfiguration() { + + } + + /** + * Pulls the table name, max query results, and max rows to scan from the provided argument + * + * @param configuredLogic + * A pre-configured BaseQueryLogic to initialize the Configuration with + */ + public GenericQueryConfiguration(BaseQueryLogic configuredLogic) { + this(configuredLogic.getConfig()); + } + + @SuppressWarnings("CopyConstructorMissesField") + public GenericQueryConfiguration(GenericQueryConfiguration other) { + copyFrom(other); + } + + /** + * Deeply copies over all fields from the given {@link GenericQueryConfiguration} to this {@link GenericQueryConfiguration}. + * + * @param other + * the {@link GenericQueryConfiguration} to copy values from + */ + public void copyFrom(GenericQueryConfiguration other) { + this.setQuery(other.getQuery()); + this.setCheckpointable(other.isCheckpointable()); + this.setBaseIteratorPriority(other.getBaseIteratorPriority()); + this.setBypassAccumulo(other.getBypassAccumulo()); + this.setAccumuloPassword(other.getAccumuloPassword()); + this.setConnPoolName(other.getConnPoolName()); + this.setAuthorizations(other.getAuthorizations()); + this.setBeginDate(other.getBeginDate()); + this.setClient(other.getClient()); + this.setEndDate(other.getEndDate()); + this.setMaxWork(other.getMaxWork()); + this.setQueries(other.getQueries()); + // copying the query iterators can cause issues if the query is running. + // this.setQueriesIter(other.getQueriesIter()); + this.setQueryString(other.getQueryString()); + this.setTableName(other.getTableName()); + this.setReduceResults(other.isReduceResults()); + this.setTableConsistencyLevels(other.getTableConsistencyLevels()); + this.setTableHints(other.getTableHints()); + } + + public Collection getQueries() { + return queries; + } + + public void setQueries(Collection queries) { + this.queries = queries; + } + + /** + * Return the configured {@code Iterator} + * + * @return An iterator of query ranges + */ + public Iterator getQueriesIter() { + if ((queriesIter == null || !queriesIter.hasNext()) && queries != null) { + return Iterators.unmodifiableIterator(queries.iterator()); + } else { + return Iterators.unmodifiableIterator(this.queriesIter); + } + } + + /** + * Set the queries to be run. + * + * @param queriesIter + * An iterator of query ranges + */ + public void setQueriesIter(Iterator queriesIter) { + this.queriesIter = queriesIter; + } + + public boolean isCheckpointable() { + return checkpointable; + } + + public void setCheckpointable(boolean checkpointable) { + this.checkpointable = checkpointable; + } + + public AccumuloClient getClient() { + return client; + } + + public void setClient(AccumuloClient client) { + this.client = client; + } + + public Query getQuery() { + return query; + } + + public void setQuery(Query query) { + this.query = query; + } + + public void setQueryString(String query) { + this.queryString = query; + } + + public String getQueryString() { + return queryString; + } + + public Set getAuths() { + if (auths == null && authorizations != null) { + auths = authorizations.stream().flatMap(a -> a.getAuthorizations().stream()).map(b -> new String(b, StandardCharsets.UTF_8)) + .collect(Collectors.toSet()); + } + return auths; + } + + public void setAuths(Set auths) { + this.auths = auths; + this.authorizations = null; + getAuthorizations(); + } + + public Set getAuthorizations() { + if (authorizations == null && auths != null) { + authorizations = Collections + .singleton(new Authorizations(auths.stream().map(a -> a.getBytes(StandardCharsets.UTF_8)).collect(Collectors.toList()))); + } + return authorizations; + } + + public void setAuthorizations(Set authorizations) { + this.authorizations = authorizations; + this.auths = null; + getAuths(); + } + + public int getBaseIteratorPriority() { + return baseIteratorPriority; + } + + public void setBaseIteratorPriority(final int baseIteratorPriority) { + this.baseIteratorPriority = baseIteratorPriority; + } + + public Date getBeginDate() { + return beginDate; + } + + public void setBeginDate(Date beginDate) { + this.beginDate = beginDate; + } + + public Date getEndDate() { + return endDate; + } + + public void setEndDate(Date endDate) { + this.endDate = endDate; + } + + public Long getMaxWork() { + return maxWork; + } + + public void setMaxWork(Long maxWork) { + this.maxWork = maxWork; + } + + public String getTableName() { + return tableName; + } + + public void setTableName(String tableName) { + this.tableName = tableName; + } + + public boolean getBypassAccumulo() { + return bypassAccumulo; + } + + public void setBypassAccumulo(boolean bypassAccumulo) { + this.bypassAccumulo = bypassAccumulo; + } + + /** + * @return - the accumulo password + */ + public String getAccumuloPassword() { + return this.accumuloPassword; + } + + public boolean isReduceResults() { + return reduceResults; + } + + public void setReduceResults(boolean reduceResults) { + this.reduceResults = reduceResults; + } + + /** + * Sets configured password for accumulo access + * + * @param password + * the password used to connect to accumulo + */ + public void setAccumuloPassword(String password) { + this.accumuloPassword = EnvProvider.resolve(password); + } + + public String getConnPoolName() { + return connPoolName; + } + + public void setConnPoolName(String connPoolName) { + this.connPoolName = connPoolName; + } + + public Map getTableConsistencyLevels() { + return tableConsistencyLevels; + } + + public void setTableConsistencyLevels(Map tableConsistencyLevels) { + this.tableConsistencyLevels = tableConsistencyLevels; + } + + public Map> getTableHints() { + return tableHints; + } + + public void setTableHints(Map> tableHints) { + this.tableHints = tableHints; + } + + /** + * Checks for non-null, sane values for the configured values + * + * @return True if all of the encapsulated values have legitimate values, otherwise false + */ + public boolean canRunQuery() { + // Ensure we were given connector and authorizations + if (null == this.getClient() || null == this.getAuthorizations()) { + return false; + } + + // Ensure valid dates + if (null == this.getBeginDate() || null == this.getEndDate() || endDate.before(beginDate)) { + return false; + } + + // A non-empty table was given + if (null == getTableName() || this.getTableName().isEmpty()) { + return false; + } + + // At least one QueryData was provided + if (null == this.getQueriesIter()) { + return false; + } + + return true; + } + + @Override + public boolean equals(Object o) { + if (this == o) + return true; + if (o == null || getClass() != o.getClass()) + return false; + GenericQueryConfiguration that = (GenericQueryConfiguration) o; + return isCheckpointable() == that.isCheckpointable() && getBaseIteratorPriority() == that.getBaseIteratorPriority() + && getBypassAccumulo() == that.getBypassAccumulo() && Objects.equals(getAuthorizations(), that.getAuthorizations()) + && Objects.equals(getQuery(), that.getQuery()) && Objects.equals(getQueryString(), that.getQueryString()) + && Objects.equals(getBeginDate(), that.getBeginDate()) && Objects.equals(getEndDate(), that.getEndDate()) + && Objects.equals(getMaxWork(), that.getMaxWork()) && Objects.equals(getTableName(), that.getTableName()) + && Objects.equals(getQueries(), that.getQueries()) && Objects.equals(getAccumuloPassword(), that.getAccumuloPassword()) + && Objects.equals(getConnPoolName(), that.getConnPoolName()) && Objects.equals(isReduceResults(), that.isReduceResults()); + } + + @Override + public int hashCode() { + return Objects.hash(isCheckpointable(), getAuthorizations(), getQuery(), getQueryString(), getBeginDate(), getEndDate(), getMaxWork(), + getBaseIteratorPriority(), getTableName(), getQueries(), getBypassAccumulo(), getConnPoolName(), getAccumuloPassword(), + isReduceResults()); + } +} diff --git a/core/query/src/main/java/datawave/core/query/configuration/QueryData.java b/core/query/src/main/java/datawave/core/query/configuration/QueryData.java new file mode 100644 index 00000000000..a5a4a868a1a --- /dev/null +++ b/core/query/src/main/java/datawave/core/query/configuration/QueryData.java @@ -0,0 +1,556 @@ +package datawave.core.query.configuration; + +import java.io.Externalizable; +import java.io.IOException; +import java.io.ObjectInput; +import java.io.ObjectOutput; +import java.util.ArrayList; +import java.util.Collection; +import java.util.Collections; +import java.util.HashMap; +import java.util.HashSet; +import java.util.List; +import java.util.Map; + +import org.apache.accumulo.core.client.IteratorSetting; +import org.apache.accumulo.core.data.Key; +import org.apache.accumulo.core.data.Range; +import org.apache.commons.lang3.builder.EqualsBuilder; +import org.apache.commons.lang3.builder.HashCodeBuilder; +import org.apache.hadoop.io.Text; + +import com.fasterxml.jackson.core.JsonGenerator; +import com.fasterxml.jackson.core.JsonParser; +import com.fasterxml.jackson.core.JsonProcessingException; +import com.fasterxml.jackson.core.ObjectCodec; +import com.fasterxml.jackson.databind.DeserializationContext; +import com.fasterxml.jackson.databind.JsonNode; +import com.fasterxml.jackson.databind.ObjectMapper; +import com.fasterxml.jackson.databind.SerializerProvider; +import com.fasterxml.jackson.databind.annotation.JsonDeserialize; +import com.fasterxml.jackson.databind.annotation.JsonSerialize; +import com.fasterxml.jackson.databind.deser.std.StdDeserializer; +import com.fasterxml.jackson.databind.ser.std.StdSerializer; + +/** + * Class to encapsulate all required information to run a query. + */ +public class QueryData implements ResultContext, Externalizable { + private String tableName; + private String query; + @JsonSerialize(using = RangeListSerializer.class) + @JsonDeserialize(using = RangeListDeserializer.class) + private Collection ranges = new HashSet<>(); + private Collection columnFamilies = new HashSet<>(); + @JsonDeserialize(using = IteratorSettingListDeserializer.class) + private List settings = new ArrayList<>(); + @JsonSerialize(using = KeySerializer.class) + @JsonDeserialize(using = KeyDeserializer.class) + private Key lastResult; + private boolean rebuildHashCode = true; + private int hashCode = -1; + boolean finished = false; + + public QueryData() { + // empty constructor + } + + /** + * Full constructor + * + * @param tableName + * the table name + * @param query + * the query string + * @param ranges + * a collection of ranges + * @param columnFamilies + * a collection of column families + * @param settings + * a list of IteratorSetting + */ + public QueryData(String tableName, String query, Collection ranges, Collection columnFamilies, List settings) { + this.tableName = tableName; + this.query = query; + this.ranges = ranges; + this.columnFamilies = columnFamilies; + this.settings = settings; + } + + /** + * Copy constructor + * + * @param other + * another instance of QueryData + */ + public QueryData(QueryData other) { + this.tableName = other.tableName; + this.query = other.query; + this.ranges = new HashSet<>(other.ranges); + this.columnFamilies = new HashSet<>(other.columnFamilies); + this.settings = new ArrayList<>(other.settings); + this.hashCode = other.hashCode; + this.rebuildHashCode = other.rebuildHashCode; + this.lastResult = other.lastResult; + this.finished = other.finished; + } + + // builder style methods + + public QueryData withTableName(String tableName) { + this.tableName = tableName; + resetHashCode(); + return this; + } + + public QueryData withQuery(String query) { + this.query = query; + resetHashCode(); + return this; + } + + public QueryData withRanges(Collection ranges) { + this.ranges = ranges; + resetHashCode(); + return this; + } + + public QueryData withColumnFamilies(Collection columnFamilies) { + this.columnFamilies = columnFamilies; + resetHashCode(); + return this; + } + + public QueryData withSettings(List settings) { + this.settings = settings; + resetHashCode(); + return this; + } + + public void setSettings(List settings) { + this.settings.clear(); + if (settings != null) { + this.settings.addAll(settings); + } + resetHashCode(); + } + + public List getSettings() { + return settings; + } + + public void setQuery(String query) { + this.query = query; + resetHashCode(); + } + + public String getQuery() { + return query; + } + + public String getTableName() { + return tableName; + } + + public void setTableName(String tableName) { + this.tableName = tableName; + resetHashCode(); + } + + public Collection getRanges() { + if (isFinished()) { + return Collections.emptySet(); + } else if (lastResult != null) { + List newRanges = new ArrayList<>(); + for (Range range : ranges) { + if (range.contains(lastResult)) { + newRanges.add(new Range(lastResult, false, range.getEndKey(), range.isEndKeyInclusive())); + } else { + newRanges.add(range); + } + } + return newRanges; + } + return ranges; + } + + public Collection getColumnFamilies() { + return columnFamilies; + } + + public void setColumnFamilies(Collection columnFamilies) { + this.columnFamilies.clear(); + if (columnFamilies != null) { + this.columnFamilies.addAll(columnFamilies); + } + resetHashCode(); + } + + public void addColumnFamily(String cf) { + this.columnFamilies.add(cf); + resetHashCode(); + } + + public void addColumnFamily(Text cf) { + this.columnFamilies.add(cf.toString()); + resetHashCode(); + } + + public void setRanges(Collection ranges) { + this.ranges.clear(); + if (null != ranges) { + this.ranges.addAll(ranges); + } + resetHashCode(); + } + + public void addRange(Range range) { + this.ranges.add(range); + resetHashCode(); + } + + public void addIterator(IteratorSetting cfg) { + this.settings.add(cfg); + resetHashCode(); + } + + public void setLastResult(Key result) { + this.lastResult = result; + if (this.lastResult == null) { + this.finished = true; + } + resetHashCode(); + } + + public boolean isFinished() { + return this.finished; + } + + public Key getLastResult() { + return lastResult; + } + + @Override + public String toString() { + // @formatter:off + return new StringBuilder() + .append("Query: '").append(this.query) + .append("', Ranges: ").append(this.ranges) + .append(", lastResult: ").append(this.lastResult) + .append(", Settings: ").append(this.settings) + .toString(); + // @formatter:on + } + + @Override + public void writeExternal(ObjectOutput out) throws IOException { + out.writeUTF(tableName); + out.writeInt(settings.size()); + for (IteratorSetting setting : settings) { + setting.write(out); + } + if (query != null) { + out.writeBoolean(true); + out.writeUTF(query); + } else { + out.writeBoolean(false); + } + out.writeInt(ranges.size()); + for (Range range : ranges) { + range.write(out); + } + out.writeInt(columnFamilies.size()); + for (String cf : columnFamilies) { + out.writeUTF(cf); + } + if (lastResult != null) { + out.writeBoolean(true); + lastResult.write(out); + } else { + out.writeBoolean(false); + } + out.writeBoolean(finished); + } + + @Override + public void readExternal(ObjectInput in) throws IOException { + tableName = in.readUTF(); + settings.clear(); + int count = in.readInt(); + for (int i = 0; i < count; i++) { + settings.add(new IteratorSetting(in)); + } + boolean exists = in.readBoolean(); + if (exists) { + query = in.readUTF(); + } + ranges.clear(); + count = in.readInt(); + for (int i = 0; i < count; i++) { + Range range = new Range(); + range.readFields(in); + ranges.add(range); + } + count = in.readInt(); + for (int i = 0; i < count; i++) { + columnFamilies.add(in.readUTF()); + } + exists = in.readBoolean(); + if (exists) { + lastResult = new Key(); + lastResult.readFields(in); + } + finished = in.readBoolean(); + } + + public QueryData(ObjectInput in) throws IOException { + readExternal(in); + } + + @Override + public int hashCode() { + if (rebuildHashCode) { + // @formatter:off + hashCode = new HashCodeBuilder() + .append(tableName) + .append(query) + .append(ranges) + .append(columnFamilies) + .append(settings) + .append(lastResult) + .append(finished) + .hashCode(); + rebuildHashCode = false; + // @formatter:on + } + return hashCode; + } + + public boolean equals(Object o) { + if (o instanceof QueryData) { + QueryData other = (QueryData) o; + // @formatter:off + return new EqualsBuilder() + .append(tableName, other.tableName) + .append(query, other.query) + .append(ranges, other.ranges) + .append(columnFamilies, other.columnFamilies) + .append(settings, other.settings) + .append(lastResult, other.lastResult) + .append(finished, other.finished) + .isEquals(); + // @formatter:on + } + return false; + } + + /** + * Method to reset the hashcode when an internal variable is updated + */ + private void resetHashCode() { + rebuildHashCode = true; + } + + /** + * A json deserializer for a list of IteratorSetting which handles the json deserialization issues. The accumulo IteratorSetting does not have a default + * constructor. + */ + public static class IteratorSettingListDeserializer extends StdDeserializer> { + private ObjectMapper mapper = new ObjectMapper(); + + public IteratorSettingListDeserializer() { + this(null); + } + + public IteratorSettingListDeserializer(Class valueClass) { + super(valueClass); + } + + @Override + public List deserialize(JsonParser parser, DeserializationContext deserializer) throws IOException, JsonProcessingException { + List list = new ArrayList<>(); + ObjectCodec codec = parser.getCodec(); + JsonNode node = codec.readTree(parser); + + for (int i = 0; i < node.size(); i++) { + list.add(getIteratorSetting(node.get(i))); + } + + return list; + } + + private IteratorSetting getIteratorSetting(JsonNode node) throws JsonProcessingException { + int priority = -1; + String name = null; + String iteratorClass = null; + Map options = null; + JsonNode child = node.get("priority"); + if (child != null) { + priority = child.asInt(); + } + child = node.get("name"); + if (child != null) { + name = child.asText(); + } + child = node.get("iteratorClass"); + if (child != null) { + iteratorClass = child.asText(); + } + child = node.get("options"); + if (child == null) { + child = node.get("properties"); + } + if (child != null) { + options = mapper.treeToValue(child, HashMap.class); + } + IteratorSetting setting = new IteratorSetting(priority, name, iteratorClass); + if (options != null) { + setting.addOptions(options); + } + return setting; + } + } + + /** + * A json deserializer for a list of Range which handles the json deserialization issues. The accumulo Range and Key classes do not have appropriate + * setters. + */ + public static class RangeListSerializer extends StdSerializer> { + private ObjectMapper mapper = new ObjectMapper(); + + public RangeListSerializer() { + this(null); + } + + public RangeListSerializer(Class> type) { + super(type); + } + + @Override + public void serialize(Collection ranges, JsonGenerator jgen, SerializerProvider provider) throws IOException { + jgen.writeStartArray(ranges == null ? 0 : ranges.size()); + if (ranges != null) { + for (Range range : ranges) { + serialize(range, jgen, provider); + } + } + jgen.writeEndArray(); + } + + public void serialize(Range range, JsonGenerator jgen, SerializerProvider provider) throws IOException { + jgen.writeStartObject(); + if (range.getStartKey() != null) { + jgen.writeFieldName("startKey"); + new KeySerializer().serialize(range.getStartKey(), jgen, provider); + } + jgen.writeBooleanField("startKeyInclusive", range.isStartKeyInclusive()); + if (range.getEndKey() != null) { + jgen.writeFieldName("endKey"); + new KeySerializer().serialize(range.getEndKey(), jgen, provider); + } + jgen.writeBooleanField("endKeyInclusive", range.isEndKeyInclusive()); + jgen.writeEndObject(); + } + } + + /** + * A json deserializer for a list of Range which handles the json deserialization issues. The accumulo Range and Key classes do not have appropriate + * setters. + */ + public static class RangeListDeserializer extends StdDeserializer> { + public RangeListDeserializer() { + this(null); + } + + public RangeListDeserializer(Class valueClass) { + super(valueClass); + } + + @Override + public Collection deserialize(JsonParser parser, DeserializationContext deserializer) throws IOException { + ObjectCodec codec = parser.getCodec(); + JsonNode node = codec.readTree(parser); + return deserialize(node); + } + + public Collection deserialize(JsonNode node) throws IOException { + Collection list = new ArrayList<>(); + for (int i = 0; i < node.size(); i++) { + list.add(getRange(node.get(i))); + } + return list; + } + + private Range getRange(JsonNode node) throws IOException { + JsonNode start = node.get("startKey"); + JsonNode startInclusive = node.get("startKeyInclusive"); + JsonNode end = node.get("endKey"); + JsonNode endInclusive = node.get("endKeyInclusive"); + return new Range(getKey(start), startInclusive.asBoolean(), getKey(end), endInclusive.asBoolean()); + } + + private Key getKey(JsonNode node) throws IOException { + return new KeyDeserializer().deserialize(node); + } + } + + /** + * A json deserializer for a list of Range which handles the json deserialization issues. The accumulo Range and Key classes do not have appropriate + * setters. + */ + public static class KeySerializer extends StdSerializer { + public KeySerializer() { + this(null); + } + + public KeySerializer(Class type) { + super(type); + } + + @Override + public void serialize(Key key, JsonGenerator jgen, SerializerProvider provider) throws IOException { + jgen.writeStartObject(); + jgen.writeBinaryField("row", key.getRowData().getBackingArray()); + jgen.writeBinaryField("cf", key.getColumnFamilyData().getBackingArray()); + jgen.writeBinaryField("cq", key.getColumnQualifierData().getBackingArray()); + jgen.writeBinaryField("cv", key.getColumnVisibility().getBytes()); + jgen.writeNumberField("ts", key.getTimestamp()); + jgen.writeBooleanField("d", key.isDeleted()); + jgen.writeEndObject(); + } + } + + /** + * A json deserializer for a list of Range which handles the json deserialization issues. The accumulo Range and Key classes do not have appropriate + * setters. + */ + public static class KeyDeserializer extends StdDeserializer { + public KeyDeserializer() { + this(null); + } + + public KeyDeserializer(Class type) { + super(type); + } + + @Override + public Key deserialize(JsonParser parser, DeserializationContext deserializer) throws IOException, JsonProcessingException { + ObjectCodec codec = parser.getCodec(); + JsonNode node = codec.readTree(parser); + return deserialize(node); + } + + public Key deserialize(JsonNode node) throws IOException { + if (node == null) { + return null; + } + JsonNode row = node.get("row"); + JsonNode cf = node.get("cf"); + JsonNode cq = node.get("cq"); + JsonNode cv = node.get("cv"); + JsonNode ts = node.get("ts"); + JsonNode d = node.get("d"); + return new Key(row.binaryValue(), cf.binaryValue(), cq.binaryValue(), cv.binaryValue(), ts.longValue(), d.booleanValue()); + } + } +} diff --git a/core/query/src/main/java/datawave/core/query/configuration/Result.java b/core/query/src/main/java/datawave/core/query/configuration/Result.java new file mode 100644 index 00000000000..37930e50c54 --- /dev/null +++ b/core/query/src/main/java/datawave/core/query/configuration/Result.java @@ -0,0 +1,109 @@ +package datawave.core.query.configuration; + +import java.util.Iterator; +import java.util.Map; + +import javax.annotation.Nullable; + +import org.apache.accumulo.core.data.Key; +import org.apache.accumulo.core.data.Value; +import org.apache.commons.lang3.builder.EqualsBuilder; +import org.apache.commons.lang3.builder.HashCodeBuilder; + +import com.google.common.base.Function; +import com.google.common.base.Predicate; +import com.google.common.collect.Iterators; +import com.google.common.collect.Maps; + +public class Result implements Map.Entry { + private final T context; + private final Key key; + private Value value; + + public Result(Key k, Value v) { + this(null, k, v); + } + + public Result(T context, Key k, Value v) { + this.context = context; + this.key = k; + this.value = v; + } + + public T getContext() { + return context; + } + + @Override + public Key getKey() { + return key; + } + + @Override + public Value getValue() { + return value; + } + + @Override + public Value setValue(Value value) { + throw new UnsupportedOperationException("This value is immutable"); + } + + @Override + public boolean equals(Object o) { + if (o instanceof Result) { + Result other = (Result) o; + return new EqualsBuilder().append(context, other.context).append(key, other.key).append(value, other.value).isEquals(); + } + return false; + } + + @Override + public int hashCode() { + return new HashCodeBuilder().append(context).append(key).append(value).toHashCode(); + } + + public Map.Entry returnKeyValue() { + Map.Entry entry = (key == null ? null : Maps.immutableEntry(key, value)); + if (context != null) { + context.setLastResult(entry == null ? null : entry.getKey()); + } + return entry; + } + + public static Iterator> keyValueIterator(Iterator it) { + return Iterators.filter(Iterators.transform(it, new Function>() { + @Override + public Map.Entry apply(@Nullable Result input) { + if (input == null) { + return null; + } + return input.returnKeyValue(); + } + }), new Predicate>() { + @Override + public boolean apply(@Nullable Map.Entry keyValueEntry) { + return keyValueEntry != null; + } + }); + } + + public static Iterator resultIterator(final ResultContext context, Iterator> it) { + return Iterators.filter(Iterators.transform(it, new Function,Result>() { + @Nullable + @Override + public Result apply(@Nullable Map.Entry keyValueEntry) { + if (keyValueEntry == null) { + return null; + } + return new Result(context, keyValueEntry.getKey(), keyValueEntry.getValue()); + } + }), new Predicate() { + + @Override + public boolean apply(@Nullable Result result) { + return result != null; + } + }); + } +} diff --git a/core/query/src/main/java/datawave/core/query/configuration/ResultContext.java b/core/query/src/main/java/datawave/core/query/configuration/ResultContext.java new file mode 100644 index 00000000000..72cc4e5ea05 --- /dev/null +++ b/core/query/src/main/java/datawave/core/query/configuration/ResultContext.java @@ -0,0 +1,15 @@ +package datawave.core.query.configuration; + +import org.apache.accumulo.core.data.Key; + +public interface ResultContext { + /** + * Set the last result returned. Setting a result of null denotes this scan is finished. + * + * @param result + * The last result + */ + void setLastResult(Key result); + + boolean isFinished(); +} diff --git a/web-services/query/src/main/java/datawave/webservice/query/dashboard/DashboardFields.java b/core/query/src/main/java/datawave/core/query/dashboard/DashboardFields.java similarity index 98% rename from web-services/query/src/main/java/datawave/webservice/query/dashboard/DashboardFields.java rename to core/query/src/main/java/datawave/core/query/dashboard/DashboardFields.java index 60a423ecf7d..fdf21e7f221 100644 --- a/web-services/query/src/main/java/datawave/webservice/query/dashboard/DashboardFields.java +++ b/core/query/src/main/java/datawave/core/query/dashboard/DashboardFields.java @@ -1,4 +1,4 @@ -package datawave.webservice.query.dashboard; +package datawave.core.query.dashboard; import java.util.Arrays; import java.util.List; diff --git a/web-services/query/src/main/java/datawave/webservice/query/dashboard/DashboardSummary.java b/core/query/src/main/java/datawave/core/query/dashboard/DashboardSummary.java similarity index 99% rename from web-services/query/src/main/java/datawave/webservice/query/dashboard/DashboardSummary.java rename to core/query/src/main/java/datawave/core/query/dashboard/DashboardSummary.java index 43490002463..ca650cde5e8 100644 --- a/web-services/query/src/main/java/datawave/webservice/query/dashboard/DashboardSummary.java +++ b/core/query/src/main/java/datawave/core/query/dashboard/DashboardSummary.java @@ -1,4 +1,4 @@ -package datawave.webservice.query.dashboard; +package datawave.core.query.dashboard; import java.util.Date; import java.util.Objects; diff --git a/web-services/query/src/main/java/datawave/webservice/query/exception/EmptyObjectException.java b/core/query/src/main/java/datawave/core/query/exception/EmptyObjectException.java similarity index 81% rename from web-services/query/src/main/java/datawave/webservice/query/exception/EmptyObjectException.java rename to core/query/src/main/java/datawave/core/query/exception/EmptyObjectException.java index a85d2bae3cf..4aba8872adf 100644 --- a/web-services/query/src/main/java/datawave/webservice/query/exception/EmptyObjectException.java +++ b/core/query/src/main/java/datawave/core/query/exception/EmptyObjectException.java @@ -1,4 +1,4 @@ -package datawave.webservice.query.exception; +package datawave.core.query.exception; // used when a transformer gets a non-null empty object // and the TransformIterator should call next instead of returning null diff --git a/web-services/query/src/main/java/datawave/webservice/query/iterator/DatawaveTransformIterator.java b/core/query/src/main/java/datawave/core/query/iterator/DatawaveTransformIterator.java similarity index 91% rename from web-services/query/src/main/java/datawave/webservice/query/iterator/DatawaveTransformIterator.java rename to core/query/src/main/java/datawave/core/query/iterator/DatawaveTransformIterator.java index 3e667c77720..05e658369ee 100644 --- a/web-services/query/src/main/java/datawave/webservice/query/iterator/DatawaveTransformIterator.java +++ b/core/query/src/main/java/datawave/core/query/iterator/DatawaveTransformIterator.java @@ -1,4 +1,4 @@ -package datawave.webservice.query.iterator; +package datawave.core.query.iterator; import java.util.Iterator; @@ -6,8 +6,8 @@ import org.apache.commons.collections4.iterators.TransformIterator; import org.apache.log4j.Logger; -import datawave.webservice.query.exception.EmptyObjectException; -import datawave.webservice.query.logic.Flushable; +import datawave.core.query.exception.EmptyObjectException; +import datawave.core.query.logic.Flushable; public class DatawaveTransformIterator extends TransformIterator { diff --git a/web-services/query/src/main/java/datawave/webservice/query/logic/AbstractQueryLogicTransformer.java b/core/query/src/main/java/datawave/core/query/logic/AbstractQueryLogicTransformer.java similarity index 93% rename from web-services/query/src/main/java/datawave/webservice/query/logic/AbstractQueryLogicTransformer.java rename to core/query/src/main/java/datawave/core/query/logic/AbstractQueryLogicTransformer.java index 132829b9280..70418539fc1 100644 --- a/web-services/query/src/main/java/datawave/webservice/query/logic/AbstractQueryLogicTransformer.java +++ b/core/query/src/main/java/datawave/core/query/logic/AbstractQueryLogicTransformer.java @@ -1,8 +1,8 @@ -package datawave.webservice.query.logic; +package datawave.core.query.logic; import java.util.List; -import datawave.webservice.query.cache.ResultsPage; +import datawave.core.query.cache.ResultsPage; import datawave.webservice.result.BaseQueryResponse; public abstract class AbstractQueryLogicTransformer implements QueryLogicTransformer { diff --git a/web-services/query/src/main/java/datawave/webservice/query/logic/BaseQueryLogic.java b/core/query/src/main/java/datawave/core/query/logic/BaseQueryLogic.java similarity index 78% rename from web-services/query/src/main/java/datawave/webservice/query/logic/BaseQueryLogic.java rename to core/query/src/main/java/datawave/core/query/logic/BaseQueryLogic.java index 3b8728c7fcd..d52daead974 100644 --- a/web-services/query/src/main/java/datawave/webservice/query/logic/BaseQueryLogic.java +++ b/core/query/src/main/java/datawave/core/query/logic/BaseQueryLogic.java @@ -1,6 +1,6 @@ -package datawave.webservice.query.logic; +package datawave.core.query.logic; -import java.security.Principal; +import java.util.Collection; import java.util.Collections; import java.util.Iterator; import java.util.List; @@ -15,12 +15,14 @@ import org.springframework.beans.factory.annotation.Required; import datawave.audit.SelectorExtractor; +import datawave.core.query.configuration.GenericQueryConfiguration; +import datawave.core.query.iterator.DatawaveTransformIterator; import datawave.marking.MarkingFunctions; +import datawave.microservice.query.Query; +import datawave.security.authorization.ProxiedUserDetails; import datawave.security.authorization.UserOperations; import datawave.webservice.common.audit.Auditor.AuditType; -import datawave.webservice.query.Query; -import datawave.webservice.query.configuration.GenericQueryConfiguration; -import datawave.webservice.query.iterator.DatawaveTransformIterator; +import datawave.webservice.common.connection.AccumuloClientConfiguration; import datawave.webservice.query.result.event.ResponseObjectFactory; public abstract class BaseQueryLogic implements QueryLogic { @@ -32,20 +34,24 @@ public abstract class BaseQueryLogic implements QueryLogic { private Map dnResultLimits = null; private Map systemFromResultLimits = null; protected long maxResults = -1L; + protected int maxConcurrentTasks = -1; protected ScannerBase scanner; @SuppressWarnings("unchecked") protected Iterator iterator = (Iterator) Collections.emptyList().iterator(); private int maxPageSize = 0; private long pageByteTrigger = 0; private boolean collectQueryMetrics = true; - private String _connPoolName; private Set authorizedDNs; - protected Principal principal; - protected RoleManager roleManager; + + protected ProxiedUserDetails currentUser; + protected ProxiedUserDetails serverUser; + + protected Set requiredRoles; protected MarkingFunctions markingFunctions; protected ResponseObjectFactory responseObjectFactory; protected SelectorExtractor selectorExtractor; protected ResponseEnricherBuilder responseEnricherBuilder = null; + protected AccumuloClientConfiguration clientConfig = null; public static final String BYPASS_ACCUMULO = "rfile.debug"; @@ -54,30 +60,28 @@ public BaseQueryLogic() { } public BaseQueryLogic(BaseQueryLogic other) { - // Generic Query Config variables - setTableName(other.getTableName()); - setMaxWork(other.getMaxWork()); - setMaxResults(other.getMaxResults()); - setBaseIteratorPriority(other.getBaseIteratorPriority()); - setBypassAccumulo(other.getBypassAccumulo()); - setAccumuloPassword(other.getAccumuloPassword()); - - // Other variables + // copy base config variables + this.baseConfig = new GenericQueryConfiguration(other.getConfig()); + + // copy other variables setMaxResults(other.maxResults); setMarkingFunctions(other.getMarkingFunctions()); setResponseObjectFactory(other.getResponseObjectFactory()); setLogicName(other.getLogicName()); setLogicDescription(other.getLogicDescription()); setAuditType(other.getAuditType(null)); + this.dnResultLimits = other.dnResultLimits; + this.systemFromResultLimits = other.systemFromResultLimits; this.scanner = other.scanner; this.iterator = other.iterator; setMaxPageSize(other.getMaxPageSize()); setPageByteTrigger(other.getPageByteTrigger()); setCollectQueryMetrics(other.getCollectQueryMetrics()); - setConnPoolName(other.getConnPoolName()); - setPrincipal(other.getPrincipal()); - setRoleManager(other.getRoleManager()); + this.authorizedDNs = other.authorizedDNs; + setRequiredRoles(other.getRequiredRoles()); setSelectorExtractor(other.getSelectorExtractor()); + setCurrentUser(other.getCurrentUser()); + setServerUser(other.getServerUser()); setResponseEnricherBuilder(other.getResponseEnricherBuilder()); } @@ -112,12 +116,28 @@ public void setResponseObjectFactory(ResponseObjectFactory responseObjectFactory this.responseObjectFactory = responseObjectFactory; } - public Principal getPrincipal() { - return principal; + public ProxiedUserDetails getCurrentUser() { + return currentUser; + } + + public void setCurrentUser(ProxiedUserDetails currentUser) { + this.currentUser = currentUser; } - public void setPrincipal(Principal principal) { - this.principal = principal; + public ProxiedUserDetails getServerUser() { + return serverUser; + } + + public void setServerUser(ProxiedUserDetails serverUser) { + this.serverUser = serverUser; + } + + public Set getRequiredRoles() { + return requiredRoles; + } + + public void setRequiredRoles(Set requiredRoles) { + this.requiredRoles = requiredRoles; } @Override @@ -130,6 +150,11 @@ public long getMaxResults() { return this.maxResults; } + @Override + public int getMaxConcurrentTasks() { + return this.maxConcurrentTasks; + } + @Override @Deprecated public long getMaxRowsToScan() { @@ -151,6 +176,11 @@ public void setMaxResults(long maxResults) { this.maxResults = maxResults; } + @Override + public void setMaxConcurrentTasks(int maxConcurrentTasks) { + this.maxConcurrentTasks = maxConcurrentTasks; + } + @Override @Deprecated public void setMaxRowsToScan(long maxRowsToScan) { @@ -206,7 +236,8 @@ public final QueryLogicTransformer getEnrichedTransformer(Query settings) { .withConfig(getConfig()) .withMarkingFunctions(getMarkingFunctions()) .withResponseObjectFactory(responseObjectFactory) - .withPrincipal(getPrincipal()) + .withCurrentUser(getCurrentUser()) + .withServerUser(getServerUser()) .build(); //@formatter:on transformer.setResponseEnricher(enricher); @@ -294,33 +325,21 @@ public void setCollectQueryMetrics(boolean collectQueryMetrics) { this.collectQueryMetrics = collectQueryMetrics; } - public RoleManager getRoleManager() { - return roleManager; - } - - public void setRoleManager(RoleManager roleManager) { - this.roleManager = roleManager; - } - /** {@inheritDoc} */ @Override public String getConnPoolName() { - return _connPoolName; + return getConfig().getConnPoolName(); } /** {@inheritDoc} */ @Override public void setConnPoolName(final String connPoolName) { - _connPoolName = connPoolName; - } - - public boolean canRunQuery() { - return this.canRunQuery(this.getPrincipal()); + getConfig().setConnPoolName(connPoolName); } /** {@inheritDoc} */ - public boolean canRunQuery(Principal principal) { - return this.roleManager == null || this.roleManager.canRunQuery(this, principal); + public boolean canRunQuery(Collection userRoles) { + return this.requiredRoles == null || userRoles.containsAll(requiredRoles); } @Override @@ -414,4 +433,30 @@ public UserOperations getUserOperations() { // null implies that the local user operations/principal is to be used for auths. return null; } + + @Override + public void setClientConfig(AccumuloClientConfiguration clientConfig) { + this.clientConfig = clientConfig; + } + + @Override + public AccumuloClientConfiguration getClientConfig() { + return clientConfig; + } + + public Map getTableConsistencyLevels() { + return getConfig().getTableConsistencyLevels(); + } + + public void setTableConsistencyLevels(Map consistencyLevels) { + getConfig().setTableConsistencyLevels(consistencyLevels); + } + + public Map> getTableHints() { + return getConfig().getTableHints(); + } + + public void setTableHints(Map> hints) { + getConfig().setTableHints(hints); + } } diff --git a/web-services/query/src/main/java/datawave/webservice/query/logic/BaseQueryLogicTransformer.java b/core/query/src/main/java/datawave/core/query/logic/BaseQueryLogicTransformer.java similarity index 92% rename from web-services/query/src/main/java/datawave/webservice/query/logic/BaseQueryLogicTransformer.java rename to core/query/src/main/java/datawave/core/query/logic/BaseQueryLogicTransformer.java index b552892f20a..d7928802e41 100644 --- a/web-services/query/src/main/java/datawave/webservice/query/logic/BaseQueryLogicTransformer.java +++ b/core/query/src/main/java/datawave/core/query/logic/BaseQueryLogicTransformer.java @@ -1,4 +1,4 @@ -package datawave.webservice.query.logic; +package datawave.core.query.logic; import datawave.marking.MarkingFunctions; diff --git a/core/query/src/main/java/datawave/core/query/logic/CheckpointableQueryLogic.java b/core/query/src/main/java/datawave/core/query/logic/CheckpointableQueryLogic.java new file mode 100644 index 00000000000..3fefb160178 --- /dev/null +++ b/core/query/src/main/java/datawave/core/query/logic/CheckpointableQueryLogic.java @@ -0,0 +1,60 @@ +package datawave.core.query.logic; + +import java.util.List; + +import org.apache.accumulo.core.client.AccumuloClient; + +import datawave.core.query.configuration.GenericQueryConfiguration; + +public interface CheckpointableQueryLogic { + + /** + * This will allow us to check if a query logic is actually checkpointable. Even if the query logic supports it, the caller may have to tell the query logic + * that it is going to be checkpointed. + * + * @return true if checkpointable + */ + boolean isCheckpointable(); + + /** + * This will tell the query logic that is is going to be checkpointed. + * + * @param checkpointable + * true if this query logic is to be trated as checkpointable + */ + void setCheckpointable(boolean checkpointable); + + /** + * This can be called at any point to get a checkpoint such that this query logic instance can be torn down to be rebuilt later. + * + * @param queryKey + * - the query key to include in the checkpoint + * @return The query checkpoints + */ + List checkpoint(QueryKey queryKey); + + /** + * This can be called at any point to update a checkpoint with its updated state. This will be called periodically while pulling results for a query task + * handling a previously returned checkpoint. + * + * @param checkpoint + * @return The updated checkpoint + */ + QueryCheckpoint updateCheckpoint(QueryCheckpoint checkpoint); + + /** + * Implementations use the configuration to setup execution of a portion of their query. getTransformIterator should be used to get the partial results if + * any. + * + * @param client + * - The accumulo connector + * @param config + * - The query configuration + * @param checkpoint + * - the checkpoint + * @throws Exception + * on failure + */ + void setupQuery(AccumuloClient client, GenericQueryConfiguration config, QueryCheckpoint checkpoint) throws Exception; + +} diff --git a/web-services/query/src/main/java/datawave/webservice/query/logic/DelegatingQueryLogic.java b/core/query/src/main/java/datawave/core/query/logic/DelegatingQueryLogic.java similarity index 85% rename from web-services/query/src/main/java/datawave/webservice/query/logic/DelegatingQueryLogic.java rename to core/query/src/main/java/datawave/core/query/logic/DelegatingQueryLogic.java index 1f090e9c3df..4a32a3324c2 100644 --- a/web-services/query/src/main/java/datawave/webservice/query/logic/DelegatingQueryLogic.java +++ b/core/query/src/main/java/datawave/core/query/logic/DelegatingQueryLogic.java @@ -1,6 +1,5 @@ -package datawave.webservice.query.logic; +package datawave.core.query.logic; -import java.security.Principal; import java.util.Collection; import java.util.Iterator; import java.util.List; @@ -12,12 +11,14 @@ import org.apache.commons.collections4.iterators.TransformIterator; import datawave.audit.SelectorExtractor; +import datawave.core.common.connection.AccumuloConnectionFactory; +import datawave.core.query.configuration.GenericQueryConfiguration; import datawave.marking.MarkingFunctions; +import datawave.microservice.query.Query; +import datawave.security.authorization.ProxiedUserDetails; import datawave.security.authorization.UserOperations; import datawave.webservice.common.audit.Auditor; -import datawave.webservice.common.connection.AccumuloConnectionFactory; -import datawave.webservice.query.Query; -import datawave.webservice.query.configuration.GenericQueryConfiguration; +import datawave.webservice.common.connection.AccumuloClientConfiguration; import datawave.webservice.query.exception.QueryException; import datawave.webservice.query.result.event.ResponseObjectFactory; @@ -227,16 +228,6 @@ public void setCollectQueryMetrics(boolean collectQueryMetrics) { delegate.setCollectQueryMetrics(collectQueryMetrics); } - @Override - public void setRoleManager(RoleManager roleManager) { - delegate.setRoleManager(roleManager); - } - - @Override - public RoleManager getRoleManager() { - return delegate.getRoleManager(); - } - @Override public Set getOptionalQueryParameters() { return delegate.getOptionalQueryParameters(); @@ -252,26 +243,6 @@ public String getConnPoolName() { return delegate.getConnPoolName(); } - @Override - public boolean canRunQuery(Principal principal) { - return delegate.canRunQuery(principal); - } - - @Override - public boolean canRunQuery() { - return delegate.canRunQuery(); - } - - @Override - public void setPrincipal(Principal principal) { - delegate.setPrincipal(principal); - } - - @Override - public Principal getPrincipal() { - return delegate.getPrincipal(); - } - @Override public MarkingFunctions getMarkingFunctions() { return delegate.getMarkingFunctions(); @@ -352,6 +323,51 @@ public void validate(Map> parameters) throws IllegalArgument delegate.validate(parameters); } + @Override + public int getMaxConcurrentTasks() { + return delegate.getMaxConcurrentTasks(); + } + + @Override + public void setMaxConcurrentTasks(int maxConcurrentTasks) { + delegate.setMaxConcurrentTasks(maxConcurrentTasks); + } + + @Override + public boolean canRunQuery(Collection userRoles) { + return delegate.canRunQuery(userRoles); + } + + @Override + public void setRequiredRoles(Set requiredRoles) { + delegate.setRequiredRoles(requiredRoles); + } + + @Override + public Set getRequiredRoles() { + return delegate.getRequiredRoles(); + } + + @Override + public ProxiedUserDetails getCurrentUser() { + return delegate.getCurrentUser(); + } + + @Override + public void setCurrentUser(ProxiedUserDetails currentUser) { + delegate.setCurrentUser(currentUser); + } + + @Override + public ProxiedUserDetails getServerUser() { + return delegate.getServerUser(); + } + + @Override + public void setServerUser(ProxiedUserDetails serverUser) { + delegate.setServerUser(serverUser); + } + @Override public UserOperations getUserOperations() { return delegate.getUserOperations(); @@ -361,4 +377,14 @@ public UserOperations getUserOperations() { public void preInitialize(Query settings, Set queryAuths) { delegate.preInitialize(settings, queryAuths); } + + @Override + public void setClientConfig(AccumuloClientConfiguration config) { + delegate.setClientConfig(config); + } + + @Override + public AccumuloClientConfiguration getClientConfig() { + return delegate.getClientConfig(); + } } diff --git a/web-services/query/src/main/java/datawave/webservice/query/logic/Flushable.java b/core/query/src/main/java/datawave/core/query/logic/Flushable.java similarity index 84% rename from web-services/query/src/main/java/datawave/webservice/query/logic/Flushable.java rename to core/query/src/main/java/datawave/core/query/logic/Flushable.java index 0f1ce5f216f..9b803ed0f29 100644 --- a/web-services/query/src/main/java/datawave/webservice/query/logic/Flushable.java +++ b/core/query/src/main/java/datawave/core/query/logic/Flushable.java @@ -1,6 +1,6 @@ -package datawave.webservice.query.logic; +package datawave.core.query.logic; -import datawave.webservice.query.exception.EmptyObjectException; +import datawave.core.query.exception.EmptyObjectException; public interface Flushable { diff --git a/core/query/src/main/java/datawave/core/query/logic/QueryCheckpoint.java b/core/query/src/main/java/datawave/core/query/logic/QueryCheckpoint.java new file mode 100644 index 00000000000..7322fbb1fd9 --- /dev/null +++ b/core/query/src/main/java/datawave/core/query/logic/QueryCheckpoint.java @@ -0,0 +1,80 @@ +package datawave.core.query.logic; + +import java.io.Serializable; +import java.util.ArrayList; +import java.util.Collection; + +import org.apache.commons.lang3.builder.EqualsBuilder; +import org.apache.commons.lang3.builder.HashCodeBuilder; + +import datawave.core.query.configuration.QueryData; + +/** + * A query checkpoint will be very different depending on the query logic. It is expected that whatever the query state is can be encoded in a map of + * properties. + */ +public class QueryCheckpoint implements Serializable { + private static final long serialVersionUID = -9201879510622137934L; + + private final QueryKey queryKey; + private final Collection queries; + + public QueryCheckpoint(String queryPool, String queryId, String queryLogic, Collection queries) { + this(new QueryKey(queryPool, queryId, queryLogic), queries); + } + + public QueryCheckpoint(QueryKey queryKey) { + this.queryKey = queryKey; + this.queries = null; + } + + public QueryCheckpoint(QueryKey queryKey, Collection queries) { + this.queryKey = queryKey; + this.queries = queries; + } + + public QueryCheckpoint(QueryCheckpoint checkpoint) { + this.queryKey = new QueryKey(checkpoint.queryKey.toString()); + this.queries = new ArrayList<>(checkpoint.queries.size()); + for (QueryData query : checkpoint.queries) { + this.queries.add(new QueryData(query)); + } + } + + /** + * Get the query key + * + * @return the query key + */ + public QueryKey getQueryKey() { + return queryKey; + } + + /** + * Get the QueryData objects representing the state of the query. + * + * @return The QueryData objects representing the query checkpoint + */ + public Collection getQueries() { + return queries; + } + + @Override + public String toString() { + return getQueryKey() + ": " + getQueries(); + } + + @Override + public boolean equals(Object o) { + if (o instanceof QueryCheckpoint) { + QueryCheckpoint other = (QueryCheckpoint) o; + return new EqualsBuilder().append(getQueryKey(), other.getQueryKey()).append(getQueries(), other.getQueries()).isEquals(); + } + return false; + } + + @Override + public int hashCode() { + return new HashCodeBuilder().append(getQueryKey()).append(getQueries()).toHashCode(); + } +} diff --git a/core/query/src/main/java/datawave/core/query/logic/QueryKey.java b/core/query/src/main/java/datawave/core/query/logic/QueryKey.java new file mode 100644 index 00000000000..c2e5e199f98 --- /dev/null +++ b/core/query/src/main/java/datawave/core/query/logic/QueryKey.java @@ -0,0 +1,104 @@ +package datawave.core.query.logic; + +import java.io.Serializable; + +import org.apache.commons.lang3.StringUtils; +import org.apache.commons.lang3.builder.EqualsBuilder; +import org.apache.commons.lang3.builder.HashCodeBuilder; + +import com.fasterxml.jackson.annotation.JsonCreator; +import com.fasterxml.jackson.annotation.JsonProperty; + +public class QueryKey implements Serializable { + private static final long serialVersionUID = -2589618312956104322L; + + public static final String QUERY_ID_PREFIX = "Q-"; + public static final String POOL_PREFIX = "P-"; + public static final String LOGIC_PREFIX = "L-"; + + @JsonProperty + private String queryPool; + @JsonProperty + private String queryId; + @JsonProperty + private String queryLogic; + + /** + * Default constructor for deserialization + */ + public QueryKey() {} + + /** + * This method id to allow deserialization of the toKey() or toString() value used when this is in a map + * + * @param value + * The toString() from a task key + */ + public QueryKey(String value) { + String[] parts = StringUtils.split(value, '.'); + for (String part : parts) { + setPart(part); + } + } + + protected void setPart(String part) { + if (part.startsWith(QUERY_ID_PREFIX)) { + queryId = part.substring(QUERY_ID_PREFIX.length()); + } else if (part.startsWith(POOL_PREFIX)) { + queryPool = part.substring(POOL_PREFIX.length()); + } else if (part.startsWith(LOGIC_PREFIX)) { + queryLogic = part.substring(LOGIC_PREFIX.length()); + } + } + + @JsonCreator + public QueryKey(@JsonProperty("queryPool") String queryPool, @JsonProperty("queryId") String queryId, @JsonProperty("queryLogic") String queryLogic) { + this.queryPool = queryPool; + this.queryId = queryId; + this.queryLogic = queryLogic; + } + + public String getQueryPool() { + return queryPool; + } + + public String getQueryId() { + return queryId; + } + + public String getQueryLogic() { + return queryLogic; + } + + public static String toUUIDKey(String queryId) { + return QUERY_ID_PREFIX + queryId; + } + + public String toUUIDKey() { + return toUUIDKey(queryId); + } + + public String toKey() { + return toUUIDKey() + '.' + POOL_PREFIX + queryPool + '.' + LOGIC_PREFIX + queryLogic; + } + + @Override + public String toString() { + return toKey(); + } + + @Override + public boolean equals(Object o) { + if (o instanceof QueryKey) { + QueryKey other = (QueryKey) o; + return new EqualsBuilder().append(getQueryPool(), other.getQueryPool()).append(getQueryId(), other.getQueryId()) + .append(getQueryLogic(), other.getQueryLogic()).isEquals(); + } + return false; + } + + @Override + public int hashCode() { + return new HashCodeBuilder().append(getQueryPool()).append(getQueryId()).append(getQueryLogic()).toHashCode(); + } +} diff --git a/web-services/query/src/main/java/datawave/webservice/query/logic/QueryLogic.java b/core/query/src/main/java/datawave/core/query/logic/QueryLogic.java similarity index 88% rename from web-services/query/src/main/java/datawave/webservice/query/logic/QueryLogic.java rename to core/query/src/main/java/datawave/core/query/logic/QueryLogic.java index 872fbf2e519..a85ddfb3f29 100644 --- a/web-services/query/src/main/java/datawave/webservice/query/logic/QueryLogic.java +++ b/core/query/src/main/java/datawave/core/query/logic/QueryLogic.java @@ -1,6 +1,5 @@ -package datawave.webservice.query.logic; +package datawave.core.query.logic; -import java.security.Principal; import java.util.Collection; import java.util.List; import java.util.Map; @@ -11,15 +10,16 @@ import org.apache.commons.collections4.iterators.TransformIterator; import datawave.audit.SelectorExtractor; +import datawave.core.common.connection.AccumuloConnectionFactory; +import datawave.core.query.cache.ResultsPage; +import datawave.core.query.configuration.GenericQueryConfiguration; import datawave.marking.MarkingFunctions; +import datawave.microservice.query.Query; +import datawave.security.authorization.ProxiedUserDetails; import datawave.security.authorization.UserOperations; import datawave.validation.ParameterValidator; import datawave.webservice.common.audit.Auditor.AuditType; -import datawave.webservice.common.connection.AccumuloConnectionFactory; -import datawave.webservice.query.Query; -import datawave.webservice.query.QueryImpl; -import datawave.webservice.query.cache.ResultsPage; -import datawave.webservice.query.configuration.GenericQueryConfiguration; +import datawave.webservice.common.connection.AccumuloClientConfiguration; import datawave.webservice.query.exception.DatawaveErrorCode; import datawave.webservice.query.exception.QueryException; import datawave.webservice.query.result.event.ResponseObjectFactory; @@ -29,7 +29,7 @@ public interface QueryLogic extends Iterable, Cloneable, ParameterValidato /** * A mechanism to get the normalized query without actually setting up the query. This can be called with having to call initialize. - * + *

* The default implementation is to return the query string as the normalized query * * @param client @@ -65,7 +65,6 @@ String getPlan(AccumuloClient client, Query settings, Set runtim GenericQueryConfiguration initialize(AccumuloClient client, Query settings, Set runtimeQueryAuthorizations) throws Exception; /** - * * @param settings * - query settings (query, begin date, end date, etc.) * @return list of selectors used in the Query @@ -105,6 +104,10 @@ String getPlan(AccumuloClient client, Query settings, Set runtim QueryLogicTransformer getEnrichedTransformer(Query settings); + default ResultPostprocessor getResultPostprocessor(GenericQueryConfiguration config) { + return new ResultPostprocessor.IdentityResultPostprocessor(); + } + default String getResponseClass(Query query) throws QueryException { try { QueryLogicTransformer t = this.getEnrichedTransformer(query); @@ -136,7 +139,9 @@ default String getResponseClass(Query query) throws QueryException { */ void close(); - /** @return the tableName */ + /** + * @return the tableName + */ String getTableName(); /** @@ -144,6 +149,11 @@ default String getResponseClass(Query query) throws QueryException { */ long getMaxResults(); + /** + * @return max number of concurrent tasks to run for this query + */ + int getMaxConcurrentTasks(); + /** * @return the results of getMaxWork */ @@ -184,6 +194,12 @@ default String getResponseClass(Query query) throws QueryException { */ void setMaxResults(long maxResults); + /** + * @param maxConcurrentTasks + * max number of concurrent tasks to run for this query + */ + void setMaxConcurrentTasks(int maxConcurrentTasks); + /** * @param maxRowsToScan * This is now deprecated and setMaxWork should be used instead. This is equivalent to setMaxWork. @@ -268,10 +284,6 @@ default String getResponseClass(Query query) throws QueryException { */ void setCollectQueryMetrics(boolean collectQueryMetrics); - void setRoleManager(RoleManager roleManager); - - RoleManager getRoleManager(); - /** * List of parameters that can be used in the 'params' parameter to Query/create * @@ -285,23 +297,23 @@ default String getResponseClass(Query query) throws QueryException { */ void setConnPoolName(String connPoolName); - /** @return the connPoolName */ + /** + * @return the connPoolName + */ String getConnPoolName(); /** - * Check that the user has one of the required roles principal my be null when there is no intent to control access to QueryLogic + * Check that the user has one of the required roles. userRoles may be null when there is no intent to control access to QueryLogic * - * @param principal - * the principal + * @param userRoles + * The user's roles * @return true/false */ - boolean canRunQuery(Principal principal); + boolean canRunQuery(Collection userRoles); - boolean canRunQuery(); // uses member Principal + void setRequiredRoles(Set requiredRoles); - void setPrincipal(Principal principal); - - Principal getPrincipal(); + Set getRequiredRoles(); MarkingFunctions getMarkingFunctions(); @@ -439,9 +451,34 @@ default long getResultLimit(Query settings) { * it to filter this call as well. Most query logics will not implement this. * * @param settings + * query settings * @param userAuthorizations + * a set of user authorizations */ default void preInitialize(Query settings, Set userAuthorizations) { // noop } + + /** + * Set a client configuration for scanner hints and consistency. + * + * @param config + */ + void setClientConfig(AccumuloClientConfiguration config); + + /** + * Get the client configuration + * + * @return client configuration + */ + AccumuloClientConfiguration getClientConfig(); + + ProxiedUserDetails getCurrentUser(); + + void setCurrentUser(ProxiedUserDetails currentUser); + + ProxiedUserDetails getServerUser(); + + void setServerUser(ProxiedUserDetails serverUser); + } diff --git a/core/query/src/main/java/datawave/core/query/logic/QueryLogicFactory.java b/core/query/src/main/java/datawave/core/query/logic/QueryLogicFactory.java new file mode 100644 index 00000000000..d2b0fa3031e --- /dev/null +++ b/core/query/src/main/java/datawave/core/query/logic/QueryLogicFactory.java @@ -0,0 +1,29 @@ +package datawave.core.query.logic; + +import java.util.List; + +import datawave.security.authorization.ProxiedUserDetails; +import datawave.webservice.query.exception.QueryException; + +public interface QueryLogicFactory { + + /** + * + * @param name + * name of query logic + * @param currentUser + * the current user + * @return new instance of QueryLogic class + * @throws IllegalArgumentException + * if query logic name does not exist + * @throws QueryException + * if query not available for user's roles + * @throws CloneNotSupportedException + * if the query logic object failed to clone + */ + QueryLogic getQueryLogic(String name, ProxiedUserDetails currentUser) throws QueryException, IllegalArgumentException, CloneNotSupportedException; + + QueryLogic getQueryLogic(String name) throws QueryException, IllegalArgumentException, CloneNotSupportedException; + + List> getQueryLogicList(); +} diff --git a/web-services/query/src/main/java/datawave/webservice/query/logic/QueryLogicTransformer.java b/core/query/src/main/java/datawave/core/query/logic/QueryLogicTransformer.java similarity index 86% rename from web-services/query/src/main/java/datawave/webservice/query/logic/QueryLogicTransformer.java rename to core/query/src/main/java/datawave/core/query/logic/QueryLogicTransformer.java index 7e5c36f2c94..2ffed854293 100644 --- a/web-services/query/src/main/java/datawave/webservice/query/logic/QueryLogicTransformer.java +++ b/core/query/src/main/java/datawave/core/query/logic/QueryLogicTransformer.java @@ -1,9 +1,9 @@ -package datawave.webservice.query.logic; +package datawave.core.query.logic; import org.apache.commons.collections4.Transformer; -import datawave.webservice.query.cache.ResultsPage; -import datawave.webservice.query.exception.EmptyObjectException; +import datawave.core.query.cache.ResultsPage; +import datawave.core.query.exception.EmptyObjectException; import datawave.webservice.result.BaseQueryResponse; public interface QueryLogicTransformer extends Transformer { diff --git a/web-services/query/src/main/java/datawave/webservice/query/logic/ResponseEnricher.java b/core/query/src/main/java/datawave/core/query/logic/ResponseEnricher.java similarity index 79% rename from web-services/query/src/main/java/datawave/webservice/query/logic/ResponseEnricher.java rename to core/query/src/main/java/datawave/core/query/logic/ResponseEnricher.java index f79abab728b..5d39b014929 100644 --- a/web-services/query/src/main/java/datawave/webservice/query/logic/ResponseEnricher.java +++ b/core/query/src/main/java/datawave/core/query/logic/ResponseEnricher.java @@ -1,4 +1,4 @@ -package datawave.webservice.query.logic; +package datawave.core.query.logic; import datawave.webservice.result.BaseQueryResponse; diff --git a/web-services/query/src/main/java/datawave/webservice/query/logic/ResponseEnricherBuilder.java b/core/query/src/main/java/datawave/core/query/logic/ResponseEnricherBuilder.java similarity index 59% rename from web-services/query/src/main/java/datawave/webservice/query/logic/ResponseEnricherBuilder.java rename to core/query/src/main/java/datawave/core/query/logic/ResponseEnricherBuilder.java index bb9a0407346..8983a9b0d48 100644 --- a/web-services/query/src/main/java/datawave/webservice/query/logic/ResponseEnricherBuilder.java +++ b/core/query/src/main/java/datawave/core/query/logic/ResponseEnricherBuilder.java @@ -1,9 +1,8 @@ -package datawave.webservice.query.logic; - -import java.security.Principal; +package datawave.core.query.logic; +import datawave.core.query.configuration.GenericQueryConfiguration; import datawave.marking.MarkingFunctions; -import datawave.webservice.query.configuration.GenericQueryConfiguration; +import datawave.security.authorization.ProxiedUserDetails; import datawave.webservice.query.result.event.ResponseObjectFactory; public interface ResponseEnricherBuilder { @@ -13,7 +12,9 @@ public interface ResponseEnricherBuilder { public ResponseEnricherBuilder withResponseObjectFactory(ResponseObjectFactory factory); - public ResponseEnricherBuilder withPrincipal(Principal principal); + public ResponseEnricherBuilder withCurrentUser(ProxiedUserDetails user); + + public ResponseEnricherBuilder withServerUser(ProxiedUserDetails user); public ResponseEnricher build(); } diff --git a/core/query/src/main/java/datawave/core/query/logic/ResultPostprocessor.java b/core/query/src/main/java/datawave/core/query/logic/ResultPostprocessor.java new file mode 100644 index 00000000000..a68620aa619 --- /dev/null +++ b/core/query/src/main/java/datawave/core/query/logic/ResultPostprocessor.java @@ -0,0 +1,24 @@ +package datawave.core.query.logic; + +import java.util.List; + +/** + * Result Postprocessors are needed by the query microservices for certain query logics which need their results manipulated in some way. An example would be + * the CountingShardQueryLogic, which needs its events combined into a single event representing the final count for the query. Other query logics may have + * other uses for postprocessing aside from reducing/combining results. + */ +public interface ResultPostprocessor { + /** + * The apply method is called each time a result is added to the list. + * + * @param results + * The results to be returned to the user + */ + void apply(List results); + + class IdentityResultPostprocessor implements ResultPostprocessor { + public void apply(List results) { + // do nothing + } + } +} diff --git a/core/query/src/main/java/datawave/core/query/logic/WritesQueryMetrics.java b/core/query/src/main/java/datawave/core/query/logic/WritesQueryMetrics.java new file mode 100644 index 00000000000..d8ecf4a2313 --- /dev/null +++ b/core/query/src/main/java/datawave/core/query/logic/WritesQueryMetrics.java @@ -0,0 +1,24 @@ +package datawave.core.query.logic; + +import datawave.microservice.querymetric.BaseQueryMetric; + +public interface WritesQueryMetrics { + + void writeQueryMetrics(BaseQueryMetric metric); + + public boolean hasMetrics(); + + public long getSourceCount(); + + public long getNextCount(); + + public long getSeekCount(); + + public long getYieldCount(); + + public long getDocRanges(); + + public long getFiRanges(); + + public void resetMetrics(); +} diff --git a/web-services/query/src/main/java/datawave/webservice/query/logic/WritesResultCardinalities.java b/core/query/src/main/java/datawave/core/query/logic/WritesResultCardinalities.java similarity index 67% rename from web-services/query/src/main/java/datawave/webservice/query/logic/WritesResultCardinalities.java rename to core/query/src/main/java/datawave/core/query/logic/WritesResultCardinalities.java index 6d990992a1c..57bb4388ad6 100644 --- a/web-services/query/src/main/java/datawave/webservice/query/logic/WritesResultCardinalities.java +++ b/core/query/src/main/java/datawave/core/query/logic/WritesResultCardinalities.java @@ -1,4 +1,4 @@ -package datawave.webservice.query.logic; +package datawave.core.query.logic; public interface WritesResultCardinalities { diff --git a/core/query/src/main/java/datawave/core/query/logic/composite/CompositeLogicException.java b/core/query/src/main/java/datawave/core/query/logic/composite/CompositeLogicException.java new file mode 100644 index 00000000000..97db9d0f61c --- /dev/null +++ b/core/query/src/main/java/datawave/core/query/logic/composite/CompositeLogicException.java @@ -0,0 +1,100 @@ +package datawave.core.query.logic.composite; + +import java.util.Collection; +import java.util.Collections; +import java.util.Map; + +import datawave.webservice.query.exception.QueryException; + +public class CompositeLogicException extends RuntimeException { + + public CompositeLogicException(String message, String logicName, Exception exception) { + super(getMessage(message, Collections.singletonMap(logicName, exception)), getRaisedQueryException(exception)); + } + + public CompositeLogicException(String message, Map exceptions) { + super(getMessage(message, exceptions), getCause(exceptions.values())); + if (exceptions.size() > 1) { + exceptions.values().forEach(this::addSuppressed); + } + } + + /** + * Return the cause to use, prioritizing the first {@link QueryException} instance that we see. In the case where the {@link QueryException} is found to be + * the cause or further nested in the stack of an {@link Exception}, a {@link CompositeRaisedQueryException} will be returned with the query exception's + * error code, and the original exception as the cause. This is necessary to ensure the error code is passed to query metrics. + */ + private static Exception getCause(Collection exceptions) { + if (exceptions.size() == 1) { + return exceptions.iterator().next(); + } + Exception cause = null; + for (Exception exception : exceptions) { + // Establish the initial cause as the first seen exception. + if (cause == null) { + cause = getRaisedQueryException(exception); + // If the first cause we see is a QueryException, there's nothing further to do. + if (cause instanceof QueryException) { + return cause; + } + // If a subsequent exception is a or contains a QueryException in its stack, return it with the query exception error code available at the root + // exception. + } else if (hasQueryExceptionInStack(exception)) { + return getRaisedQueryException(exception); + } + } + return cause; + } + + /** + * Return whether the given throwable contains at least one {@link QueryException} in its stack trace (including itself). + */ + private static boolean hasQueryExceptionInStack(Throwable throwable) { + return getFirstQueryExceptionInStack(throwable) != null; + } + + /** + * Return the given exception with query exception's error code (if present) available at the root exception. This means one of the following cases will + * occur: + *
    + *
  • The exception is not a {@link QueryException} and no {@link QueryException} exists in the exception's stack: The exception will be returned.
  • + *
  • The exception is a {@link QueryException}: The exception will be returned.
  • + *
  • The exception is not a {@link QueryException}, but a {@link QueryException} exists in the exception's stack. A {@link CompositeRaisedQueryException} + * will be returned with the error code of the first {@link QueryException} found in the stack, and the original exception as its cause.
  • + *
+ */ + private static Exception getRaisedQueryException(Exception exception) { + if (exception instanceof QueryException) { + return exception; + } else { + // TODO - should we fetch the top-most or bottom-most query exception in the stack? + QueryException queryException = getFirstQueryExceptionInStack(exception); + if (queryException != null) { + return new CompositeRaisedQueryException(exception, queryException.getErrorCode()); + } else { + return exception; + } + } + } + + /** + * Return the first {@link QueryException} found in the stack, or null if none were found. + */ + private static QueryException getFirstQueryExceptionInStack(Throwable throwable) { + if (throwable != null) { + if (throwable instanceof QueryException) { + return (QueryException) throwable; + } else { + return getFirstQueryExceptionInStack(throwable.getCause()); + } + } + return null; + } + + private static String getMessage(String message, Map exceptions) { + StringBuilder builder = new StringBuilder(); + builder.append(message).append(":"); + exceptions.forEach((key, value) -> builder.append('\n').append(key).append(": ").append(value.getMessage())); + return builder.toString(); + } +} diff --git a/core/query/src/main/java/datawave/core/query/logic/composite/CompositeQueryCheckpoint.java b/core/query/src/main/java/datawave/core/query/logic/composite/CompositeQueryCheckpoint.java new file mode 100644 index 00000000000..623248635cd --- /dev/null +++ b/core/query/src/main/java/datawave/core/query/logic/composite/CompositeQueryCheckpoint.java @@ -0,0 +1,22 @@ +package datawave.core.query.logic.composite; + +import java.io.Serializable; + +import datawave.core.query.logic.QueryCheckpoint; + +public class CompositeQueryCheckpoint extends QueryCheckpoint implements Serializable { + protected String delegateQueryLogic; + + public CompositeQueryCheckpoint(String delegateQueryLogic, QueryCheckpoint checkpoint) { + super(checkpoint); + this.delegateQueryLogic = delegateQueryLogic; + } + + public String getDelegateQueryLogic() { + return delegateQueryLogic; + } + + public void setDelegateQueryLogic(String delegateQueryLogic) { + this.delegateQueryLogic = delegateQueryLogic; + } +} diff --git a/web-services/query/src/main/java/datawave/webservice/query/logic/composite/CompositeQueryConfiguration.java b/core/query/src/main/java/datawave/core/query/logic/composite/CompositeQueryConfiguration.java similarity index 81% rename from web-services/query/src/main/java/datawave/webservice/query/logic/composite/CompositeQueryConfiguration.java rename to core/query/src/main/java/datawave/core/query/logic/composite/CompositeQueryConfiguration.java index e7bf6cc3a98..3931adc67a5 100644 --- a/web-services/query/src/main/java/datawave/webservice/query/logic/composite/CompositeQueryConfiguration.java +++ b/core/query/src/main/java/datawave/core/query/logic/composite/CompositeQueryConfiguration.java @@ -1,14 +1,16 @@ -package datawave.webservice.query.logic.composite; +package datawave.core.query.logic.composite; import java.io.Serializable; +import java.util.HashMap; +import java.util.Map; -import datawave.webservice.query.Query; -import datawave.webservice.query.QueryImpl; -import datawave.webservice.query.configuration.GenericQueryConfiguration; +import datawave.core.query.configuration.GenericQueryConfiguration; +import datawave.microservice.query.Query; +import datawave.microservice.query.QueryImpl; public class CompositeQueryConfiguration extends GenericQueryConfiguration implements Serializable { - private Query query = null; + private Map configs = new HashMap<>(); // Specifies whether all queries must succeed initialization private boolean allMustInitialize = false; @@ -18,7 +20,7 @@ public class CompositeQueryConfiguration extends GenericQueryConfiguration imple public CompositeQueryConfiguration() { super(); - query = new QueryImpl(); + setQuery(new QueryImpl()); } /** @@ -82,12 +84,16 @@ public static CompositeQueryConfiguration create(CompositeQueryLogic compositeQu return config; } - public Query getQuery() { - return query; + public GenericQueryConfiguration getConfig(String logicName) { + return configs != null ? configs.get(logicName) : null; } - public void setQuery(Query query) { - this.query = query; + public Map getConfigs() { + return configs; + } + + public void setConfigs(Map configs) { + this.configs = configs; } public boolean isAllMustInitialize() { diff --git a/web-services/query/src/main/java/datawave/webservice/query/logic/composite/CompositeQueryLogic.java b/core/query/src/main/java/datawave/core/query/logic/composite/CompositeQueryLogic.java similarity index 67% rename from web-services/query/src/main/java/datawave/webservice/query/logic/composite/CompositeQueryLogic.java rename to core/query/src/main/java/datawave/core/query/logic/composite/CompositeQueryLogic.java index 2416ef19ab1..e4a6c022328 100644 --- a/web-services/query/src/main/java/datawave/webservice/query/logic/composite/CompositeQueryLogic.java +++ b/core/query/src/main/java/datawave/core/query/logic/composite/CompositeQueryLogic.java @@ -1,6 +1,5 @@ -package datawave.webservice.query.logic.composite; +package datawave.core.query.logic.composite; -import java.security.Principal; import java.util.ArrayList; import java.util.Collection; import java.util.Collections; @@ -17,25 +16,29 @@ import org.apache.accumulo.core.client.AccumuloClient; import org.apache.accumulo.core.security.Authorizations; -import org.apache.commons.collections4.functors.NOPTransformer; import org.apache.commons.collections4.iterators.TransformIterator; import org.apache.log4j.Logger; import com.google.common.base.Joiner; +import com.google.common.collect.Iterables; import datawave.audit.SelectorExtractor; +import datawave.core.common.connection.AccumuloConnectionFactory.Priority; +import datawave.core.query.cache.ResultsPage; +import datawave.core.query.configuration.GenericQueryConfiguration; +import datawave.core.query.exception.EmptyObjectException; +import datawave.core.query.logic.BaseQueryLogic; +import datawave.core.query.logic.CheckpointableQueryLogic; +import datawave.core.query.logic.QueryCheckpoint; +import datawave.core.query.logic.QueryKey; +import datawave.core.query.logic.QueryLogic; +import datawave.core.query.logic.QueryLogicTransformer; +import datawave.core.query.logic.filtered.FilteredQueryLogic; +import datawave.microservice.authorization.util.AuthorizationsUtil; +import datawave.microservice.query.Query; import datawave.security.authorization.AuthorizationException; -import datawave.security.authorization.DatawavePrincipal; +import datawave.security.authorization.ProxiedUserDetails; import datawave.security.authorization.UserOperations; -import datawave.security.util.WSAuthorizationsUtil; -import datawave.webservice.common.connection.AccumuloConnectionFactory.Priority; -import datawave.webservice.query.Query; -import datawave.webservice.query.cache.ResultsPage; -import datawave.webservice.query.configuration.GenericQueryConfiguration; -import datawave.webservice.query.exception.EmptyObjectException; -import datawave.webservice.query.logic.BaseQueryLogic; -import datawave.webservice.query.logic.QueryLogic; -import datawave.webservice.query.logic.QueryLogicTransformer; import datawave.webservice.query.result.event.EventBase; import datawave.webservice.result.BaseResponse; @@ -45,10 +48,9 @@ * object. If configured to run sequentially, then the execution will terminate after the first query that returns results. Query logics will be sorted by their * configured name. */ -public class CompositeQueryLogic extends BaseQueryLogic { +public class CompositeQueryLogic extends BaseQueryLogic implements CheckpointableQueryLogic { private class QueryLogicHolder extends Thread { - private GenericQueryConfiguration config; private String logicName; private QueryLogic logic; private TransformIterator transformIterator; @@ -84,14 +86,6 @@ public void setLogic(QueryLogic logic) { this.logic = logic; } - public GenericQueryConfiguration getConfig() { - return config; - } - - public void setConfig(GenericQueryConfiguration config) { - this.config = config; - } - public void setTransformIterator(TransformIterator transformIterator) { this.transformIterator = transformIterator; } @@ -115,7 +109,7 @@ public void setSettings(Query settings) { public void run() { long resultCount = 0L; - log.trace("Starting thread: " + this.getName()); + log.debug("Starting thread: " + this.getName()); if (!started) { startLatch.countDown(); @@ -142,19 +136,17 @@ public void run() { // special logic to deal with intermediate results if (last instanceof EventBase && ((EventBase) last).isIntermediateResult()) { - resetPageProcessingStartTime(); // reset the page processing time to avoid getting spammed with these + resetPageProcessingStartTime(); // let the RunningQuery handle timeouts for long-running queries - if (isLongRunningQuery()) { - last = null; - } - } - - if (last != null) { + log.debug(Thread.currentThread().getName() + ": received intermediate result"); + } else { results.add(last); resultCount++; log.debug(Thread.currentThread().getName() + ": Added result to queue"); } + } else { + log.debug(Thread.currentThread().getName() + ": Got null result"); } } catch (InterruptedException e) { // if this was on purpose, then just log and the loop will naturally exit @@ -176,7 +168,7 @@ public void run() { if (success) { completionLatch.countDown(); } - log.trace("Finished thread: " + this.getName() + " with success = " + success); + log.debug("Finished thread: " + this.getName() + " with success = " + success); } } @@ -213,26 +205,26 @@ public CompositeQueryLogic(CompositeQueryLogic other) { throw new RuntimeException(e); } } - setPrincipal(other.getPrincipal()); + setCurrentUser(other.getCurrentUser()); } public Set updateRuntimeAuthorizationsAndQueryAuths(QueryLogic logic, Query settings) throws AuthorizationException { - Set requestedAuths = new HashSet<>(WSAuthorizationsUtil.splitAuths(settings.getQueryAuthorizations())); + Set requestedAuths = new HashSet<>(AuthorizationsUtil.splitAuths(settings.getQueryAuthorizations())); // determine the valid authorizations for this call to be the user's auths for this logic - DatawavePrincipal principal = (DatawavePrincipal) logic.getPrincipal(); - DatawavePrincipal queryPrincipal = principal; + ProxiedUserDetails currentUser = logic.getCurrentUser(); + ProxiedUserDetails queryUser = currentUser; UserOperations userOperations = getUserOperations(); if (userOperations != null) { - principal = userOperations.getRemoteUser(principal); + currentUser = userOperations.getRemoteUser(currentUser); } - logic.preInitialize(settings, WSAuthorizationsUtil.buildAuthorizations(Collections.singleton(requestedAuths))); + logic.preInitialize(settings, AuthorizationsUtil.buildAuthorizations(Collections.singleton(requestedAuths))); if (logic.getUserOperations() != null) { - queryPrincipal = logic.getUserOperations().getRemoteUser(queryPrincipal); + queryUser = logic.getUserOperations().getRemoteUser(queryUser); } // get the valid auths from the query user - Collection validAuths = queryPrincipal.getPrimaryUser().getAuths(); + Collection validAuths = queryUser.getPrimaryUser().getAuths(); Set validRequestedAuths = new HashSet<>(requestedAuths); validRequestedAuths.retainAll(validAuths); String validQueryAuthorizations = Joiner.on(',').join(validRequestedAuths); @@ -241,9 +233,9 @@ public Set updateRuntimeAuthorizationsAndQueryAuths(QueryLogic downgradedAuths = WSAuthorizationsUtil.getDowngradedAuthorizations(validQueryAuthorizations, principal, queryPrincipal); + Set downgradedAuths = AuthorizationsUtil.getDowngradedAuthorizations(validQueryAuthorizations, currentUser, queryUser); if (log.isTraceEnabled()) { - log.trace("Principal auths for user " + principal.getPrimaryUser().getCommonName() + " are " + principal.getPrimaryUser().getAuths()); + log.trace("Principal auths for user " + currentUser.getPrimaryUser().getCommonName() + " are " + currentUser.getPrimaryUser().getAuths()); log.trace("Query principal auths for " + logic.getLogicName() + " are " + validAuths); log.trace("Requested auths were " + requestedAuths + " of which the valid query auths are " + validQueryAuthorizations); log.trace("Downgraded auths are " + downgradedAuths); @@ -257,47 +249,65 @@ public GenericQueryConfiguration initialize(AccumuloClient client, Query setting StringBuilder logicQueryStringBuilder = new StringBuilder(); if (!getInitializedLogics().isEmpty()) { logicQueryStringBuilder.append(getConfig().getQueryString()); - } else { - logicQueryStringBuilder.append("CompositeQueryLogic: "); } Map exceptions = new HashMap<>(); if (!getUninitializedLogics().isEmpty()) { + Map configs = new HashMap<>(); for (Map.Entry> next : getUninitializedLogics().entrySet()) { String logicName = next.getKey(); QueryLogic logic = next.getValue(); GenericQueryConfiguration config = null; + + // start the next query logic plan expression + if (logicQueryStringBuilder.length() > 0) { + logicQueryStringBuilder.append(" || "); + } + + logicQueryStringBuilder.append("( "); + logicQueryStringBuilder.append("( logic = '").append(logicName).append("' )"); + try { // duplicate the settings for this query Query settingsCopy = settings.duplicate(settings.getQueryName() + " -> " + logicName); + // ensure we use the same query id + settingsCopy.setId(settings.getId()); + // update the query auths and runtime query authorizations for this logic runtimeQueryAuthorizations = updateRuntimeAuthorizationsAndQueryAuths(logic, settingsCopy); config = logic.initialize(client, settingsCopy, runtimeQueryAuthorizations); - if (logicQueryStringBuilder.length() > 0) { - logicQueryStringBuilder.append(" || "); - } - logicQueryStringBuilder.append("( ( logic = '").append(logicName).append("' )"); - logicQueryStringBuilder.append(" && ").append(config.getQueryString()).append(" )"); - QueryLogicHolder holder = new QueryLogicHolder(logicName, logic); - holder.setConfig(config); - holder.setSettings(settingsCopy); - holder.setMaxResults(logic.getMaxResults()); - logicState.put(logicName, holder); - - // if doing sequential execution, then stop since we have one initialized - if (isShortCircuitExecution()) { - break; + + // only add this query logic to the initialized logic states if it was not simply filtered out + if (logic instanceof FilteredQueryLogic && ((FilteredQueryLogic) logic).isFiltered()) { + log.info("Dropping " + logic.getLogicName() + " as it was filtered out"); + logicQueryStringBuilder.append(" && ").append("( filtered = true )"); + } else { + logicQueryStringBuilder.append(" && ").append(config.getQueryString()); + QueryLogicHolder holder = new QueryLogicHolder(logicName, logic); + holder.setSettings(settingsCopy); + holder.setMaxResults(logic.getResultLimit(settingsCopy)); + configs.put(logicName, config); + logicState.put(logicName, holder); + + // if doing sequential execution, then stop since we have one initialized + if (isShortCircuitExecution()) { + break; + } } } catch (Exception e) { exceptions.put(logicName, e); log.error("Failed to initialize " + logic.getClass().getName(), e); + logicQueryStringBuilder.append(" && ").append("( failure = '").append(e.getMessage()).append("' )"); failedQueryLogics.put(logicName, logic); } finally { queryLogics.remove(next.getKey()); } + + // close out the query plan expression + logicQueryStringBuilder.append(" )"); } // if something failed initialization @@ -332,6 +342,7 @@ public GenericQueryConfiguration initialize(AccumuloClient client, Query setting final String compositeQueryString = logicQueryStringBuilder.toString(); CompositeQueryConfiguration config = getConfig(); + config.setConfigs(configs); config.setQueryString(compositeQueryString); config.setClient(client); config.setQuery(settings); @@ -378,9 +389,12 @@ public String getPlan(AccumuloClient client, Query settings, Set public void setupQuery(GenericQueryConfiguration configuration) throws Exception { int count = 0; + CompositeQueryConfiguration compositeConfig = (CompositeQueryConfiguration) configuration; + for (QueryLogicHolder holder : logicState.values()) { if (!holder.wasStarted()) { - holder.getLogic().setupQuery(holder.getConfig()); + GenericQueryConfiguration config = compositeConfig != null ? compositeConfig.getConfig(holder.getLogicName()) : null; + holder.getLogic().setupQuery(config); TransformIterator transformIterator = holder.getLogic().getTransformIterator(holder.getSettings()); holder.setTransformIterator(transformIterator); count++; @@ -415,22 +429,25 @@ public void setConnectionPriority(String priority) { */ @Override public synchronized QueryLogicTransformer getTransformer(Query settings) { - ResultsPage emptyList = new ResultsPage(); - Class responseClass = null; - List delegates = new ArrayList<>(); - for (QueryLogic logic : getQueryLogics().values()) { - QueryLogicTransformer t = logic.getTransformer(settings); - delegates.add(t); - BaseResponse refResponse = t.createResponse(emptyList); - if (null == responseClass) { - responseClass = refResponse.getClass(); - } else { - if (!responseClass.equals(refResponse.getClass())) { - throw new RuntimeException("All query logics must use transformers that return the same object type"); + if (this.transformer == null) { + ResultsPage emptyList = new ResultsPage(); + Class responseClass = null; + List delegates = new ArrayList<>(); + for (QueryLogic logic : getQueryLogics().values()) { + QueryLogicTransformer t = logic.getTransformer(settings); + delegates.add(t); + BaseResponse refResponse = t.createResponse(emptyList); + if (null == responseClass) { + responseClass = refResponse.getClass(); + } else { + if (!responseClass.equals(refResponse.getClass())) { + throw new RuntimeException("All query logics must use transformers that return the same object type: " + responseClass + " vs " + + refResponse.getClass()); + } } } + this.transformer = new CompositeQueryLogicTransformer(delegates); } - this.transformer = new CompositeQueryLogicTransformer(delegates); return this.transformer; } @@ -441,9 +458,13 @@ public Iterator iterator() { @Override public TransformIterator getTransformIterator(Query settings) { - // The objects put into the pageQueue have already been transformed. - // We will iterate over the pagequeue with the No-Op transformer - return new TransformIterator(results.iterator(), NOPTransformer.nopTransformer()); + if (isCheckpointable()) { + return Iterables.getOnlyElement(queryLogics.values()).getTransformIterator(settings); + } else { + // The objects put into the pageQueue have already been transformed. + // CompositeQueryLogicTransformer will iterate over the pageQueue with no change to the objects + return new TransformIterator(results.iterator(), getTransformer(settings)); + } } @Override @@ -542,10 +563,10 @@ public UserOperations getUserOperations() { } @Override - public boolean canRunQuery(Principal principal) { + public boolean canRunQuery(Collection userRoles) { // user can run this composite query if they can run at least one of the configured query logics for (Map.Entry> entry : getUninitializedLogics().entrySet()) { - if (!entry.getValue().canRunQuery(principal)) { + if (!entry.getValue().canRunQuery(userRoles)) { queryLogics.remove(entry.getKey()); } } @@ -583,21 +604,83 @@ public Set getExampleQueries() { return params.isEmpty() ? null : params; } - /** - * We can run the query if we can and at least of one of the children can. - * - * @return canRunQuery - */ @Override - public boolean canRunQuery() { - if (super.canRunQuery()) { - for (QueryLogic logic : getQueryLogics().values()) { - if (logic.canRunQuery()) { - return true; - } + public boolean isCheckpointable() { + boolean checkpointable = true; + for (QueryLogicHolder logicHolder : logicState.values()) { + QueryLogic logic = logicHolder.getLogic(); + if (!(logic instanceof CheckpointableQueryLogic && ((CheckpointableQueryLogic) logic).isCheckpointable())) { + checkpointable = false; + break; + } + } + return checkpointable; + } + + public void setCheckpointable(boolean checkpointable) { + for (QueryLogicHolder queryLogicHolder : logicState.values()) { + QueryLogic queryLogic = queryLogicHolder.getLogic(); + if (queryLogic instanceof CheckpointableQueryLogic) { + ((CheckpointableQueryLogic) queryLogic).setCheckpointable(checkpointable); + } else { + throw new UnsupportedOperationException("Cannot set checkpointable for a query logic that is not checkpointable."); } } - return false; + } + + @Override + public List checkpoint(QueryKey queryKey) { + if (!isCheckpointable()) { + throw new UnsupportedOperationException("Cannot checkpoint a query that is not checkpointable. Try calling setCheckpointable(true) first."); + } + + List checkpoints = new ArrayList<>(); + for (Map.Entry entry : logicState.entrySet()) { + for (QueryCheckpoint checkpoint : ((CheckpointableQueryLogic) entry.getValue().getLogic()).checkpoint(queryKey)) { + checkpoints.add(new CompositeQueryCheckpoint(entry.getKey(), checkpoint)); + } + } + return checkpoints; + } + + @Override + public QueryCheckpoint updateCheckpoint(QueryCheckpoint checkpoint) { + if (!isCheckpointable() || !(checkpoint instanceof CompositeQueryCheckpoint)) { + throw new UnsupportedOperationException("Cannot update a non-composite query checkpoint with the composite query logic."); + } + + CompositeQueryCheckpoint compositeCheckpoint = (CompositeQueryCheckpoint) checkpoint; + + CheckpointableQueryLogic logic = (CheckpointableQueryLogic) queryLogics.get(compositeCheckpoint.getDelegateQueryLogic()); + if (logic == null) { + throw new UnsupportedOperationException( + "Cannot update query checkpoint because delegate query logic [" + compositeCheckpoint.getDelegateQueryLogic() + "] does not exist"); + } + + return logic.updateCheckpoint(checkpoint); + } + + @Override + public void setupQuery(AccumuloClient client, GenericQueryConfiguration config, QueryCheckpoint checkpoint) throws Exception { + if (!isCheckpointable() || !(checkpoint instanceof CompositeQueryCheckpoint) || !(config instanceof CompositeQueryConfiguration)) { + throw new UnsupportedOperationException("Cannot setup a non-composite query checkpoint with the composite query logic."); + } + + CompositeQueryConfiguration compositeConfig = (CompositeQueryConfiguration) config; + + CompositeQueryCheckpoint compositeCheckpoint = (CompositeQueryCheckpoint) checkpoint; + + CheckpointableQueryLogic logic = (CheckpointableQueryLogic) queryLogics.get(compositeCheckpoint.getDelegateQueryLogic()); + if (logic == null) { + throw new UnsupportedOperationException( + "Cannot update query checkpoint because delegate query logic [" + compositeCheckpoint.getDelegateQueryLogic() + "] does not exist"); + } + + // we are setting up a checkpoint, with a single query data, against a single query logic, so just keep the one we need + queryLogics.clear(); + queryLogics.put(compositeCheckpoint.getDelegateQueryLogic(), (BaseQueryLogic) logic); + + logic.setupQuery(client, compositeConfig.getConfig(compositeCheckpoint.getDelegateQueryLogic()), checkpoint); } /** @@ -617,16 +700,28 @@ public SelectorExtractor getSelectorExtractor() { } /** - * Setting the principal is called after the logic is created. Pass this on to the children. + * Setting the current user is called after the logic is created. Pass this on to the children. + * + * @param user + */ + @Override + public void setCurrentUser(ProxiedUserDetails user) { + super.setCurrentUser(user); + for (QueryLogic logic : getQueryLogics().values()) { + logic.setCurrentUser(user); + } + } + + /** + * /** Setting the server user is called after the logic is created. Pass this on to the children. * - * @param principal - * the principal + * @param user */ @Override - public void setPrincipal(Principal principal) { - super.setPrincipal(principal); + public void setServerUser(ProxiedUserDetails user) { + super.setServerUser(user); for (QueryLogic logic : getQueryLogics().values()) { - logic.setPrincipal(principal); + logic.setServerUser(user); } } diff --git a/web-services/query/src/main/java/datawave/webservice/query/logic/composite/CompositeQueryLogicResults.java b/core/query/src/main/java/datawave/core/query/logic/composite/CompositeQueryLogicResults.java similarity index 95% rename from web-services/query/src/main/java/datawave/webservice/query/logic/composite/CompositeQueryLogicResults.java rename to core/query/src/main/java/datawave/core/query/logic/composite/CompositeQueryLogicResults.java index 8858b85e2e0..8ad475aa052 100644 --- a/web-services/query/src/main/java/datawave/webservice/query/logic/composite/CompositeQueryLogicResults.java +++ b/core/query/src/main/java/datawave/core/query/logic/composite/CompositeQueryLogicResults.java @@ -1,4 +1,4 @@ -package datawave.webservice.query.logic.composite; +package datawave.core.query.logic.composite; import java.util.ArrayList; import java.util.Iterator; @@ -7,7 +7,7 @@ import java.util.concurrent.ArrayBlockingQueue; import java.util.concurrent.CountDownLatch; -import org.apache.commons.collections.keyvalue.UnmodifiableMapEntry; +import org.apache.commons.collections4.keyvalue.UnmodifiableMapEntry; public class CompositeQueryLogicResults implements Iterable, Thread.UncaughtExceptionHandler { diff --git a/web-services/query/src/main/java/datawave/webservice/query/logic/composite/CompositeQueryLogicResultsIterator.java b/core/query/src/main/java/datawave/core/query/logic/composite/CompositeQueryLogicResultsIterator.java similarity index 95% rename from web-services/query/src/main/java/datawave/webservice/query/logic/composite/CompositeQueryLogicResultsIterator.java rename to core/query/src/main/java/datawave/core/query/logic/composite/CompositeQueryLogicResultsIterator.java index 63c74579664..99728bcf46c 100644 --- a/web-services/query/src/main/java/datawave/webservice/query/logic/composite/CompositeQueryLogicResultsIterator.java +++ b/core/query/src/main/java/datawave/core/query/logic/composite/CompositeQueryLogicResultsIterator.java @@ -1,4 +1,4 @@ -package datawave.webservice.query.logic.composite; +package datawave.core.query.logic.composite; import java.util.Iterator; import java.util.concurrent.ArrayBlockingQueue; @@ -8,8 +8,8 @@ import com.google.common.base.Throwables; -import datawave.webservice.query.configuration.GenericQueryConfiguration; -import datawave.webservice.query.exception.EmptyObjectException; +import datawave.core.query.configuration.GenericQueryConfiguration; +import datawave.core.query.exception.EmptyObjectException; public class CompositeQueryLogicResultsIterator implements Iterator, Thread.UncaughtExceptionHandler { diff --git a/core/query/src/main/java/datawave/core/query/logic/composite/CompositeQueryLogicTransformer.java b/core/query/src/main/java/datawave/core/query/logic/composite/CompositeQueryLogicTransformer.java new file mode 100644 index 00000000000..ecb1583f7da --- /dev/null +++ b/core/query/src/main/java/datawave/core/query/logic/composite/CompositeQueryLogicTransformer.java @@ -0,0 +1,186 @@ +package datawave.core.query.logic.composite; + +import java.util.List; + +import org.apache.log4j.Logger; + +import com.google.common.base.Throwables; + +import datawave.core.query.cachedresults.CacheableLogic; +import datawave.core.query.logic.AbstractQueryLogicTransformer; +import datawave.core.query.logic.QueryLogicTransformer; +import datawave.core.query.logic.WritesQueryMetrics; +import datawave.microservice.querymetric.BaseQueryMetric; +import datawave.webservice.query.cachedresults.CacheableQueryRow; +import datawave.webservice.query.exception.QueryException; +import datawave.webservice.result.BaseQueryResponse; + +public class CompositeQueryLogicTransformer extends AbstractQueryLogicTransformer implements CacheableLogic, WritesQueryMetrics { + + protected static final Logger log = Logger.getLogger(CompositeQueryLogicTransformer.class); + + private List> delegates = null; + + public CompositeQueryLogicTransformer(List> delegates) { + this.delegates = delegates; + } + + @Override + public O transform(I input) { + // The objects put into the pageQueue have already been transformed, so no transformation required here. + return (O) input; + } + + @Override + public CacheableQueryRow writeToCache(Object o) throws QueryException { + CacheableQueryRow result = null; + for (QueryLogicTransformer t : delegates) { + if (t instanceof CacheableLogic) { + CacheableLogic c = (CacheableLogic) t; + try { + result = c.writeToCache(o); + } catch (Exception e) { + log.warn("Error calling writeToCache on delegate, continuing...", e); + } + } + } + return result; + } + + @Override + public Object readFromCache(CacheableQueryRow cacheableQueryRow) { + Object result = null; + for (QueryLogicTransformer t : delegates) { + if (t instanceof CacheableLogic) { + CacheableLogic c = (CacheableLogic) t; + try { + result = c.readFromCache(cacheableQueryRow); + } catch (Exception e) { + log.warn("Error calling writeToCache on delegate, continuing...", e); + } + } + } + return result; + } + + @Override + public BaseQueryResponse createResponse(List resultList) { + Exception lastFailure = null; + for (QueryLogicTransformer t : delegates) { + if (t instanceof AbstractQueryLogicTransformer) { + AbstractQueryLogicTransformer a = (AbstractQueryLogicTransformer) t; + try { + log.trace("createResponse List"); + return a.createResponse(resultList); + } catch (Exception e) { + log.warn("Error calling createResponse on delegate, trying the next one", e); + lastFailure = e; + } + } + } + if (lastFailure != null) { + Throwables.propagate(lastFailure); + } + return null; + } + + @Override + public boolean hasMetrics() { + for (QueryLogicTransformer d : delegates) { + if (d instanceof WritesQueryMetrics) { + if (((WritesQueryMetrics) d).hasMetrics()) { + return true; + } + } + } + return false; + } + + @Override + public void writeQueryMetrics(BaseQueryMetric metric) { + // if any timing details have been returned, add metrics + if (hasMetrics()) { + metric.setSourceCount(getSourceCount()); + metric.setNextCount(getNextCount()); + metric.setSeekCount(getSeekCount()); + metric.setYieldCount(getYieldCount()); + metric.setDocRanges(getDocRanges()); + metric.setFiRanges(getFiRanges()); + } + } + + @Override + public void resetMetrics() { + for (QueryLogicTransformer d : delegates) { + if (d instanceof WritesQueryMetrics) { + ((WritesQueryMetrics) d).resetMetrics(); + } + } + } + + @Override + public long getFiRanges() { + long total = 0; + for (QueryLogicTransformer d : delegates) { + if (d instanceof WritesQueryMetrics) { + total += ((WritesQueryMetrics) d).getFiRanges(); + } + } + return total; + } + + @Override + public long getDocRanges() { + long total = 0; + for (QueryLogicTransformer d : delegates) { + if (d instanceof WritesQueryMetrics) { + total += ((WritesQueryMetrics) d).getDocRanges(); + } + } + return total; + } + + @Override + public long getSourceCount() { + long total = 0; + for (QueryLogicTransformer d : delegates) { + if (d instanceof WritesQueryMetrics) { + total += ((WritesQueryMetrics) d).getSourceCount(); + } + } + return total; + } + + @Override + public long getSeekCount() { + long total = 0; + for (QueryLogicTransformer d : delegates) { + if (d instanceof WritesQueryMetrics) { + total += ((WritesQueryMetrics) d).getSeekCount(); + } + } + return total; + } + + @Override + public long getNextCount() { + long total = 0; + for (QueryLogicTransformer d : delegates) { + if (d instanceof WritesQueryMetrics) { + total += ((WritesQueryMetrics) d).getNextCount(); + } + } + return total; + } + + @Override + public long getYieldCount() { + long total = 0; + for (QueryLogicTransformer d : delegates) { + if (d instanceof WritesQueryMetrics) { + total += ((WritesQueryMetrics) d).getYieldCount(); + } + } + return total; + } +} diff --git a/core/query/src/main/java/datawave/core/query/logic/composite/CompositeRaisedQueryException.java b/core/query/src/main/java/datawave/core/query/logic/composite/CompositeRaisedQueryException.java new file mode 100644 index 00000000000..52bdac0455c --- /dev/null +++ b/core/query/src/main/java/datawave/core/query/logic/composite/CompositeRaisedQueryException.java @@ -0,0 +1,16 @@ +package datawave.core.query.logic.composite; + +import datawave.webservice.query.exception.QueryException; + +/** + * This class exists to be used when a {@link CompositeLogicException} has a cause that is not a {@link QueryException}, but contains a {@link QueryException} + * in its stack trace. In order for the error code to be properly passed to query metrics, the error code must be present as part of the + * {@link CompositeLogicException}'s cause. This exception is intended to be a wrapper for the original cause, with the error code of the identified query + * exception. + */ +public class CompositeRaisedQueryException extends QueryException { + + public CompositeRaisedQueryException(Throwable cause, String errorCode) { + super(cause, errorCode); + } +} diff --git a/web-services/query/src/main/java/datawave/webservice/query/logic/composite/CompositeUserOperations.java b/core/query/src/main/java/datawave/core/query/logic/composite/CompositeUserOperations.java similarity index 89% rename from web-services/query/src/main/java/datawave/webservice/query/logic/composite/CompositeUserOperations.java rename to core/query/src/main/java/datawave/core/query/logic/composite/CompositeUserOperations.java index 21ec0077aa0..620da4a1de9 100644 --- a/web-services/query/src/main/java/datawave/webservice/query/logic/composite/CompositeUserOperations.java +++ b/core/query/src/main/java/datawave/core/query/logic/composite/CompositeUserOperations.java @@ -1,4 +1,4 @@ -package datawave.webservice.query.logic.composite; +package datawave.core.query.logic.composite; import java.util.ArrayList; import java.util.Collections; @@ -10,12 +10,13 @@ import com.google.common.collect.Sets; +import datawave.microservice.authorization.util.AuthorizationsUtil; import datawave.security.authorization.AuthorizationException; import datawave.security.authorization.DatawavePrincipal; import datawave.security.authorization.DatawaveUser; +import datawave.security.authorization.ProxiedUserDetails; import datawave.security.authorization.SubjectIssuerDNPair; import datawave.security.authorization.UserOperations; -import datawave.security.util.WSAuthorizationsUtil; import datawave.user.AuthorizationsListBase; import datawave.webservice.query.exception.QueryException; import datawave.webservice.query.exception.QueryExceptionType; @@ -42,7 +43,7 @@ public CompositeUserOperations(List remoteOperations, boolean in } @Override - public AuthorizationsListBase listEffectiveAuthorizations(Object callerObject) throws AuthorizationException { + public AuthorizationsListBase listEffectiveAuthorizations(ProxiedUserDetails callerObject) throws AuthorizationException { AuthorizationsListBase auths = responseObjectFactory.getAuthorizationsList(); final DatawavePrincipal principal = getDatawavePrincipal(callerObject); Map> authMap = new HashMap<>(); @@ -87,7 +88,7 @@ public static AuthorizationsListBase.SubjectIssuerDNPair dn(SubjectIssuerDNPair } @Override - public GenericResponse flushCachedCredentials(Object callerObject) throws AuthorizationException { + public GenericResponse flushCachedCredentials(ProxiedUserDetails callerObject) throws AuthorizationException { GenericResponse response = new GenericResponse<>(); response.setResult(""); String separator = ""; @@ -114,14 +115,14 @@ public GenericResponse flushCachedCredentials(Object callerObject) throw } @Override - public DatawavePrincipal getRemoteUser(DatawavePrincipal principal) throws AuthorizationException { - List principals = new ArrayList<>(); + public ProxiedUserDetails getRemoteUser(ProxiedUserDetails currentUser) throws AuthorizationException { + List userDetails = new ArrayList<>(); if (includeLocal) { - principals.add(principal); + userDetails.add(currentUser); } for (UserOperations ops : userOperations) { try { - principals.add(ops.getRemoteUser(principal)); + userDetails.add(ops.getRemoteUser(currentUser)); } catch (Exception e) { // ignore the exception if shortCircuitExecution is specified as we may never even call that remote logic if (!shortCircuitExecution) { @@ -130,7 +131,7 @@ public DatawavePrincipal getRemoteUser(DatawavePrincipal principal) throws Autho } } - return WSAuthorizationsUtil.mergePrincipals(principals.toArray(new DatawavePrincipal[0])); + return AuthorizationsUtil.mergeProxiedUserDetails(userDetails.toArray(new ProxiedUserDetails[0])); } public static Exception getException(QueryExceptionType qet) { diff --git a/web-services/query/src/main/java/datawave/webservice/query/logic/filtered/FilteredQueryLogic.java b/core/query/src/main/java/datawave/core/query/logic/filtered/FilteredQueryLogic.java similarity index 90% rename from web-services/query/src/main/java/datawave/webservice/query/logic/filtered/FilteredQueryLogic.java rename to core/query/src/main/java/datawave/core/query/logic/filtered/FilteredQueryLogic.java index bd5f911bda3..5b81cbfef86 100644 --- a/web-services/query/src/main/java/datawave/webservice/query/logic/filtered/FilteredQueryLogic.java +++ b/core/query/src/main/java/datawave/core/query/logic/filtered/FilteredQueryLogic.java @@ -1,4 +1,4 @@ -package datawave.webservice.query.logic.filtered; +package datawave.core.query.logic.filtered; import java.util.Collections; import java.util.Iterator; @@ -9,12 +9,12 @@ import org.apache.commons.collections4.iterators.TransformIterator; import org.apache.log4j.Logger; +import datawave.core.query.configuration.GenericQueryConfiguration; +import datawave.core.query.iterator.DatawaveTransformIterator; +import datawave.core.query.logic.DelegatingQueryLogic; +import datawave.core.query.logic.QueryLogic; +import datawave.microservice.query.Query; import datawave.security.authorization.UserOperations; -import datawave.webservice.query.Query; -import datawave.webservice.query.configuration.GenericQueryConfiguration; -import datawave.webservice.query.iterator.DatawaveTransformIterator; -import datawave.webservice.query.logic.DelegatingQueryLogic; -import datawave.webservice.query.logic.QueryLogic; /** * A filtered query logic will only actually execute the delegate query logic if the filter passes. Otherwise this will do nothing and return no results. @@ -72,7 +72,7 @@ public boolean isFiltered() { log.debug("Passing through filter " + filter + " for query " + super.getLogicName()); } } - return filtered; + return filtered || (getDelegate() instanceof FilteredQueryLogic && ((FilteredQueryLogic) getDelegate()).isFiltered()); } @Override diff --git a/web-services/query/src/main/java/datawave/webservice/query/logic/filtered/QueryLogicFilterByAuth.java b/core/query/src/main/java/datawave/core/query/logic/filtered/QueryLogicFilterByAuth.java similarity index 91% rename from web-services/query/src/main/java/datawave/webservice/query/logic/filtered/QueryLogicFilterByAuth.java rename to core/query/src/main/java/datawave/core/query/logic/filtered/QueryLogicFilterByAuth.java index 25af6c71e19..955f93b60cf 100644 --- a/web-services/query/src/main/java/datawave/webservice/query/logic/filtered/QueryLogicFilterByAuth.java +++ b/core/query/src/main/java/datawave/core/query/logic/filtered/QueryLogicFilterByAuth.java @@ -1,12 +1,12 @@ -package datawave.webservice.query.logic.filtered; +package datawave.core.query.logic.filtered; import java.util.Set; import java.util.stream.Collectors; import org.apache.accumulo.core.security.Authorizations; -import datawave.webservice.query.Query; -import datawave.webservice.query.predicate.ProxiedAuthorizationsPredicate; +import datawave.core.query.predicate.ProxiedAuthorizationsPredicate; +import datawave.microservice.query.Query; /** * This is a filter for the FilteredQueryLogic that will run the delegate query logic if the auths requested match a specified visibility (as defined by diff --git a/web-services/query/src/main/java/datawave/webservice/query/logic/filtered/QueryLogicFilterByParameter.java b/core/query/src/main/java/datawave/core/query/logic/filtered/QueryLogicFilterByParameter.java similarity index 89% rename from web-services/query/src/main/java/datawave/webservice/query/logic/filtered/QueryLogicFilterByParameter.java rename to core/query/src/main/java/datawave/core/query/logic/filtered/QueryLogicFilterByParameter.java index cec60195708..85b0801c0e6 100644 --- a/web-services/query/src/main/java/datawave/webservice/query/logic/filtered/QueryLogicFilterByParameter.java +++ b/core/query/src/main/java/datawave/core/query/logic/filtered/QueryLogicFilterByParameter.java @@ -1,12 +1,11 @@ -package datawave.webservice.query.logic.filtered; +package datawave.core.query.logic.filtered; import java.util.Set; import org.apache.accumulo.core.security.Authorizations; -import datawave.webservice.query.Query; -import datawave.webservice.query.QueryImpl; -import datawave.webservice.query.predicate.QueryParameterPredicate; +import datawave.core.query.predicate.QueryParameterPredicate; +import datawave.microservice.query.Query; /** * This is a filter for the FilteredQueryLogic that will run the delegate query logic if a specified query parameter matches a specified value. If no value is diff --git a/core/query/src/main/java/datawave/core/query/logic/lookup/LookupQueryLogic.java b/core/query/src/main/java/datawave/core/query/logic/lookup/LookupQueryLogic.java new file mode 100644 index 00000000000..4216e668551 --- /dev/null +++ b/core/query/src/main/java/datawave/core/query/logic/lookup/LookupQueryLogic.java @@ -0,0 +1,394 @@ +package datawave.core.query.logic.lookup; + +import java.util.Collection; +import java.util.Iterator; +import java.util.List; +import java.util.Map; +import java.util.Set; +import java.util.Spliterator; +import java.util.function.Consumer; + +import org.apache.accumulo.core.client.AccumuloClient; +import org.apache.accumulo.core.security.Authorizations; +import org.apache.commons.collections4.iterators.TransformIterator; +import org.springframework.beans.factory.annotation.Required; +import org.springframework.util.MultiValueMap; + +import datawave.audit.SelectorExtractor; +import datawave.core.common.connection.AccumuloConnectionFactory; +import datawave.core.query.configuration.GenericQueryConfiguration; +import datawave.core.query.logic.BaseQueryLogic; +import datawave.core.query.logic.CheckpointableQueryLogic; +import datawave.core.query.logic.QueryCheckpoint; +import datawave.core.query.logic.QueryKey; +import datawave.core.query.logic.QueryLogicTransformer; +import datawave.microservice.query.Query; +import datawave.security.authorization.ProxiedUserDetails; +import datawave.webservice.common.audit.Auditor; +import datawave.webservice.query.exception.QueryException; + +public abstract class LookupQueryLogic extends BaseQueryLogic implements CheckpointableQueryLogic { + public static final String LOOKUP_KEY_VALUE_DELIMITER = ":"; + + // The underlying query logic to use for the lookup + private final BaseQueryLogic delegateQueryLogic; + + public LookupQueryLogic(BaseQueryLogic delegateQueryLogic) { + this.delegateQueryLogic = delegateQueryLogic; + } + + @SuppressWarnings("unchecked") + public LookupQueryLogic(LookupQueryLogic other) throws CloneNotSupportedException { + this((BaseQueryLogic) other.delegateQueryLogic.clone()); + } + + public abstract boolean isEventLookupRequired(MultiValueMap lookupTerms); + + public abstract Set getContentLookupTerms(MultiValueMap lookupTerms); + + @Override + public GenericQueryConfiguration initialize(AccumuloClient client, Query settings, Set runtimeQueryAuthorizations) throws Exception { + return delegateQueryLogic.initialize(client, settings, runtimeQueryAuthorizations); + } + + @Override + public void setupQuery(GenericQueryConfiguration configuration) throws Exception { + delegateQueryLogic.setupQuery(configuration); + } + + @Override + public GenericQueryConfiguration getConfig() { + if (delegateQueryLogic != null) { + return delegateQueryLogic.getConfig(); + } else { + return super.getConfig(); + } + } + + @Override + public String getPlan(AccumuloClient client, Query settings, Set runtimeQueryAuthorizations, boolean expandFields, boolean expandValues) + throws Exception { + return delegateQueryLogic.getPlan(client, settings, runtimeQueryAuthorizations, expandFields, expandValues); + } + + @Override + public Set getRequiredRoles() { + return delegateQueryLogic.getRequiredRoles(); + } + + @Override + public void setRequiredRoles(Set requiredRoles) { + delegateQueryLogic.setRequiredRoles(requiredRoles); + } + + @Override + public String getTableName() { + return delegateQueryLogic.getTableName(); + } + + @Override + public long getMaxResults() { + return delegateQueryLogic.getMaxResults(); + } + + @Override + public int getMaxConcurrentTasks() { + return delegateQueryLogic.getMaxConcurrentTasks(); + } + + @Override + @Deprecated + public long getMaxRowsToScan() { + return delegateQueryLogic.getMaxRowsToScan(); + } + + @Override + public long getMaxWork() { + return delegateQueryLogic.getMaxWork(); + } + + @Override + public void setTableName(String tableName) { + delegateQueryLogic.setTableName(tableName); + } + + @Override + public void setMaxResults(long maxResults) { + delegateQueryLogic.setMaxResults(maxResults); + } + + @Override + public void setMaxConcurrentTasks(int maxConcurrentTasks) { + delegateQueryLogic.setMaxConcurrentTasks(maxConcurrentTasks); + } + + @Override + @Deprecated + public void setMaxRowsToScan(long maxRowsToScan) { + delegateQueryLogic.setMaxRowsToScan(maxRowsToScan); + } + + @Override + public void setMaxWork(long maxWork) { + delegateQueryLogic.setMaxWork(maxWork); + } + + @Override + public int getMaxPageSize() { + return delegateQueryLogic.getMaxPageSize(); + } + + @Override + public void setMaxPageSize(int maxPageSize) { + delegateQueryLogic.setMaxPageSize(maxPageSize); + } + + @Override + public long getPageByteTrigger() { + return delegateQueryLogic.getPageByteTrigger(); + } + + @Override + public void setPageByteTrigger(long pageByteTrigger) { + delegateQueryLogic.setPageByteTrigger(pageByteTrigger); + } + + @Override + public int getBaseIteratorPriority() { + return delegateQueryLogic.getBaseIteratorPriority(); + } + + @Override + public void setBaseIteratorPriority(int baseIteratorPriority) { + delegateQueryLogic.setBaseIteratorPriority(baseIteratorPriority); + } + + @Override + public Iterator iterator() { + return delegateQueryLogic.iterator(); + } + + @Override + public TransformIterator getTransformIterator(Query settings) { + return delegateQueryLogic.getTransformIterator(settings); + } + + @Override + public boolean getBypassAccumulo() { + return delegateQueryLogic.getBypassAccumulo(); + } + + @Override + public void setBypassAccumulo(boolean bypassAccumulo) { + delegateQueryLogic.setBypassAccumulo(bypassAccumulo); + } + + @Override + public void close() { + delegateQueryLogic.close(); + } + + @Override + public Auditor.AuditType getAuditType(Query query) { + return delegateQueryLogic.getAuditType(query); + } + + @Override + public Auditor.AuditType getAuditType() { + return delegateQueryLogic.getAuditType(); + } + + @Override + @Required + public void setAuditType(Auditor.AuditType auditType) { + delegateQueryLogic.setAuditType(auditType); + } + + @Override + public boolean getCollectQueryMetrics() { + return delegateQueryLogic.getCollectQueryMetrics(); + } + + @Override + public void setCollectQueryMetrics(boolean collectQueryMetrics) { + delegateQueryLogic.setCollectQueryMetrics(collectQueryMetrics); + } + + @Override + public String getConnPoolName() { + return delegateQueryLogic.getConnPoolName(); + } + + @Override + public void setConnPoolName(String connPoolName) { + delegateQueryLogic.setConnPoolName(connPoolName); + } + + @Override + public boolean canRunQuery(Collection userRoles) { + return delegateQueryLogic.canRunQuery(userRoles); + } + + @Override + public List getSelectors(Query settings) throws IllegalArgumentException { + return delegateQueryLogic.getSelectors(settings); + } + + @Override + public void setSelectorExtractor(SelectorExtractor selectorExtractor) { + delegateQueryLogic.setSelectorExtractor(selectorExtractor); + } + + @Override + public SelectorExtractor getSelectorExtractor() { + return delegateQueryLogic.getSelectorExtractor(); + } + + @Override + public Set getAuthorizedDNs() { + return delegateQueryLogic.getAuthorizedDNs(); + } + + @Override + public void setAuthorizedDNs(Set authorizedDNs) { + delegateQueryLogic.setAuthorizedDNs(authorizedDNs); + } + + @Override + public void setDnResultLimits(Map dnResultLimits) { + delegateQueryLogic.setDnResultLimits(dnResultLimits); + } + + @Override + public Map getDnResultLimits() { + return delegateQueryLogic.getDnResultLimits(); + } + + @Override + public AccumuloConnectionFactory.Priority getConnectionPriority() { + return delegateQueryLogic.getConnectionPriority(); + } + + @Override + public QueryLogicTransformer getTransformer(Query settings) { + return delegateQueryLogic.getTransformer(settings); + } + + @Override + public String getResponseClass(Query query) throws QueryException { + return delegateQueryLogic.getResponseClass(query); + } + + @Override + public Set getOptionalQueryParameters() { + return delegateQueryLogic.getOptionalQueryParameters(); + } + + @Override + public Set getRequiredQueryParameters() { + return delegateQueryLogic.getRequiredQueryParameters(); + } + + @Override + public Set getExampleQueries() { + return delegateQueryLogic.getExampleQueries(); + } + + @Override + public boolean containsDNWithAccess(Collection dns) { + return delegateQueryLogic.containsDNWithAccess(dns); + } + + @Override + public long getResultLimit(Query settings) { + return delegateQueryLogic.getResultLimit(settings); + } + + @Override + public void forEach(Consumer action) { + delegateQueryLogic.forEach(action); + } + + @Override + public Spliterator spliterator() { + return delegateQueryLogic.spliterator(); + } + + @Override + public String getLogicName() { + return delegateQueryLogic.getLogicName(); + } + + @Override + public void setLogicName(String logicName) { + delegateQueryLogic.setLogicName(logicName); + } + + @Override + public void setLogicDescription(String logicDescription) { + delegateQueryLogic.setLogicDescription(logicDescription); + } + + @Override + public String getLogicDescription() { + return delegateQueryLogic.getLogicDescription(); + } + + @Override + public void setCurrentUser(ProxiedUserDetails currentUser) { + super.setCurrentUser(currentUser); + delegateQueryLogic.setCurrentUser(currentUser); + } + + @Override + public void setServerUser(ProxiedUserDetails serverUser) { + super.setServerUser(serverUser); + delegateQueryLogic.setServerUser(currentUser); + } + + public BaseQueryLogic getDelegateQueryLogic() { + return delegateQueryLogic; + } + + @Override + public boolean isCheckpointable() { + if (delegateQueryLogic instanceof CheckpointableQueryLogic) { + return ((CheckpointableQueryLogic) delegateQueryLogic).isCheckpointable(); + } + return false; + } + + @Override + public void setCheckpointable(boolean checkpointable) { + if (delegateQueryLogic instanceof CheckpointableQueryLogic) { + ((CheckpointableQueryLogic) delegateQueryLogic).setCheckpointable(checkpointable); + } + } + + @Override + public List checkpoint(QueryKey queryKey) { + if (!isCheckpointable()) { + throw new UnsupportedOperationException("Cannot create checkpoints because the query logic is not checkpointable."); + } + + return ((CheckpointableQueryLogic) delegateQueryLogic).checkpoint(queryKey); + } + + @Override + public QueryCheckpoint updateCheckpoint(QueryCheckpoint checkpoint) { + if (!isCheckpointable()) { + throw new UnsupportedOperationException("Cannot update the query checkpoint because the query logic is not checkpointable."); + } + + return ((CheckpointableQueryLogic) delegateQueryLogic).updateCheckpoint(checkpoint); + } + + @Override + public void setupQuery(AccumuloClient client, GenericQueryConfiguration config, QueryCheckpoint checkpoint) throws Exception { + if (!isCheckpointable()) { + throw new UnsupportedOperationException("Cannot setup a query checkpoint because the query logic is not checkpointable."); + } + + ((CheckpointableQueryLogic) delegateQueryLogic).setupQuery(client, config, checkpoint); + } +} diff --git a/core/query/src/main/java/datawave/core/query/logic/lookup/uid/LookupUIDQueryLogic.java b/core/query/src/main/java/datawave/core/query/logic/lookup/uid/LookupUIDQueryLogic.java new file mode 100644 index 00000000000..c785ded98fd --- /dev/null +++ b/core/query/src/main/java/datawave/core/query/logic/lookup/uid/LookupUIDQueryLogic.java @@ -0,0 +1,38 @@ +package datawave.core.query.logic.lookup.uid; + +import java.util.Collection; +import java.util.Set; +import java.util.stream.Collectors; + +import org.springframework.util.MultiValueMap; + +import datawave.core.query.logic.BaseQueryLogic; +import datawave.core.query.logic.lookup.LookupQueryLogic; + +public class LookupUIDQueryLogic extends LookupQueryLogic { + public static final String UID_TERM_SEPARATOR = " "; + private static final String EVENT_FIELD = "event"; + + public LookupUIDQueryLogic(BaseQueryLogic delegateQueryLogic) { + super(delegateQueryLogic); + } + + public LookupUIDQueryLogic(LookupQueryLogic other) throws CloneNotSupportedException { + super(other); + } + + @Override + public boolean isEventLookupRequired(MultiValueMap lookupTerms) { + return !(lookupTerms.keySet().size() == 1 && lookupTerms.containsKey(EVENT_FIELD)); + } + + @Override + public Set getContentLookupTerms(MultiValueMap lookupTerms) { + return lookupTerms.values().stream().flatMap(Collection::stream).collect(Collectors.toSet()); + } + + @Override + public Object clone() throws CloneNotSupportedException { + return new LookupUIDQueryLogic<>(this); + } +} diff --git a/core/query/src/main/java/datawave/core/query/logic/lookup/uuid/LookupUUIDQueryLogic.java b/core/query/src/main/java/datawave/core/query/logic/lookup/uuid/LookupUUIDQueryLogic.java new file mode 100644 index 00000000000..d7d04a132eb --- /dev/null +++ b/core/query/src/main/java/datawave/core/query/logic/lookup/uuid/LookupUUIDQueryLogic.java @@ -0,0 +1,38 @@ +package datawave.core.query.logic.lookup.uuid; + +import java.util.Set; +import java.util.stream.Collectors; + +import org.springframework.util.MultiValueMap; + +import datawave.core.query.logic.BaseQueryLogic; +import datawave.core.query.logic.lookup.LookupQueryLogic; + +public class LookupUUIDQueryLogic extends LookupQueryLogic { + private static final String UUID_TERM_SEPARATOR = " OR "; + + public LookupUUIDQueryLogic(BaseQueryLogic delegateQueryLogic) { + super(delegateQueryLogic); + } + + public LookupUUIDQueryLogic(LookupQueryLogic other) throws CloneNotSupportedException { + super(other); + } + + @Override + public boolean isEventLookupRequired(MultiValueMap lookupTerms) { + // always, regardless of the terms + return true; + } + + @Override + public Set getContentLookupTerms(MultiValueMap lookupTerms) throws UnsupportedOperationException { + throw new UnsupportedOperationException("Cannot convert lookup terms to event lookups for LookupUUIDQueryLogic"); + } + + @Override + public Object clone() throws CloneNotSupportedException { + return new LookupUUIDQueryLogic<>(this); + } + +} diff --git a/web-services/query/src/main/java/datawave/webservice/query/map/QueryGeometryHandler.java b/core/query/src/main/java/datawave/core/query/map/QueryGeometryHandler.java similarity index 70% rename from web-services/query/src/main/java/datawave/webservice/query/map/QueryGeometryHandler.java rename to core/query/src/main/java/datawave/core/query/map/QueryGeometryHandler.java index c35c1b015ae..d01179b429f 100644 --- a/web-services/query/src/main/java/datawave/webservice/query/map/QueryGeometryHandler.java +++ b/core/query/src/main/java/datawave/core/query/map/QueryGeometryHandler.java @@ -1,8 +1,9 @@ -package datawave.webservice.query.map; +package datawave.core.query.map; import java.util.List; import datawave.microservice.querymetric.BaseQueryMetric; +import datawave.microservice.querymetric.QueryGeometryResponse; public interface QueryGeometryHandler { diff --git a/web-services/query/src/main/java/datawave/webservice/query/metric/QueryMetricHandler.java b/core/query/src/main/java/datawave/core/query/metric/QueryMetricHandler.java similarity index 96% rename from web-services/query/src/main/java/datawave/webservice/query/metric/QueryMetricHandler.java rename to core/query/src/main/java/datawave/core/query/metric/QueryMetricHandler.java index f245d5382d2..5132c244909 100644 --- a/web-services/query/src/main/java/datawave/webservice/query/metric/QueryMetricHandler.java +++ b/core/query/src/main/java/datawave/core/query/metric/QueryMetricHandler.java @@ -1,4 +1,4 @@ -package datawave.webservice.query.metric; +package datawave.core.query.metric; import java.util.Date; import java.util.Map; diff --git a/web-services/query/src/main/java/datawave/webservice/query/predicate/ProxiedAuthorizationsPredicate.java b/core/query/src/main/java/datawave/core/query/predicate/ProxiedAuthorizationsPredicate.java similarity index 95% rename from web-services/query/src/main/java/datawave/webservice/query/predicate/ProxiedAuthorizationsPredicate.java rename to core/query/src/main/java/datawave/core/query/predicate/ProxiedAuthorizationsPredicate.java index f392a116fb6..6da9184d0f6 100644 --- a/web-services/query/src/main/java/datawave/webservice/query/predicate/ProxiedAuthorizationsPredicate.java +++ b/core/query/src/main/java/datawave/core/query/predicate/ProxiedAuthorizationsPredicate.java @@ -1,4 +1,4 @@ -package datawave.webservice.query.predicate; +package datawave.core.query.predicate; import java.util.List; import java.util.function.Predicate; @@ -6,6 +6,8 @@ import org.apache.accumulo.core.security.Authorizations; import org.apache.accumulo.core.security.ColumnVisibility; +import datawave.security.authorization.predicate.AuthorizationsPredicate; + /** * This is a predicate that will test the auths against a specified visibility (as defined by accumulo's ColumnVisibility). In addition to the visibility, one * can specify that only the first of the authorizations is matched (presumably the user). diff --git a/web-services/query/src/main/java/datawave/webservice/query/predicate/QueryParameterPredicate.java b/core/query/src/main/java/datawave/core/query/predicate/QueryParameterPredicate.java similarity index 92% rename from web-services/query/src/main/java/datawave/webservice/query/predicate/QueryParameterPredicate.java rename to core/query/src/main/java/datawave/core/query/predicate/QueryParameterPredicate.java index f5825e1903b..76eef670607 100644 --- a/web-services/query/src/main/java/datawave/webservice/query/predicate/QueryParameterPredicate.java +++ b/core/query/src/main/java/datawave/core/query/predicate/QueryParameterPredicate.java @@ -1,9 +1,9 @@ -package datawave.webservice.query.predicate; +package datawave.core.query.predicate; import java.util.function.Predicate; -import datawave.webservice.query.Query; -import datawave.webservice.query.QueryImpl; +import datawave.microservice.query.Query; +import datawave.microservice.query.QueryImpl; /** * This is a predicate that will test a specified query parameter matches a specified value. If no value is specified then the parameter is treated as a boolean diff --git a/core/query/src/main/java/datawave/core/query/predict/NoOpQueryPredictor.java b/core/query/src/main/java/datawave/core/query/predict/NoOpQueryPredictor.java new file mode 100644 index 00000000000..16cc497f369 --- /dev/null +++ b/core/query/src/main/java/datawave/core/query/predict/NoOpQueryPredictor.java @@ -0,0 +1,13 @@ +package datawave.core.query.predict; + +import java.util.Set; + +import datawave.microservice.querymetric.BaseQueryMetric; + +public class NoOpQueryPredictor implements QueryPredictor { + + @Override + public Set predict(BaseQueryMetric query) throws PredictionException { + return null; + } +} diff --git a/web-services/query/src/main/java/datawave/webservice/query/runner/QueryPredictor.java b/core/query/src/main/java/datawave/core/query/predict/QueryPredictor.java similarity index 94% rename from web-services/query/src/main/java/datawave/webservice/query/runner/QueryPredictor.java rename to core/query/src/main/java/datawave/core/query/predict/QueryPredictor.java index 5c0c59e59a3..6bdc525e300 100644 --- a/web-services/query/src/main/java/datawave/webservice/query/runner/QueryPredictor.java +++ b/core/query/src/main/java/datawave/core/query/predict/QueryPredictor.java @@ -1,4 +1,4 @@ -package datawave.webservice.query.runner; +package datawave.core.query.predict; import java.io.Serializable; import java.util.Set; diff --git a/web-services/common/src/main/java/datawave/webservice/common/remote/RemoteQueryService.java b/core/query/src/main/java/datawave/core/query/remote/RemoteQueryService.java similarity index 59% rename from web-services/common/src/main/java/datawave/webservice/common/remote/RemoteQueryService.java rename to core/query/src/main/java/datawave/core/query/remote/RemoteQueryService.java index 681a07f4f03..f996f9c83c6 100644 --- a/web-services/common/src/main/java/datawave/webservice/common/remote/RemoteQueryService.java +++ b/core/query/src/main/java/datawave/core/query/remote/RemoteQueryService.java @@ -1,9 +1,11 @@ -package datawave.webservice.common.remote; +package datawave.core.query.remote; import java.net.URI; import java.util.List; import java.util.Map; +import datawave.security.authorization.ProxiedUserDetails; +import datawave.webservice.query.exception.QueryException; import datawave.webservice.result.BaseQueryResponse; import datawave.webservice.result.GenericResponse; import datawave.webservice.result.VoidResponse; @@ -24,7 +26,15 @@ public interface RemoteQueryService { * the caller * @return the generic response */ - public GenericResponse createQuery(String queryLogicName, Map> queryParameters, Object callerObject); + GenericResponse createQuery(String queryLogicName, Map> queryParameters, ProxiedUserDetails callerObject) throws QueryException; + + /** + * Set the class for the next response. The default is to use the event query response but to make this useful for other query services we need to be able + * to override. + * + * @param nextQueryResponseClass + */ + void setNextQueryResponseClass(Class nextQueryResponseClass); /** * Call next on a remote query service @@ -35,7 +45,7 @@ public interface RemoteQueryService { * the caller * @return the base query response */ - public BaseQueryResponse next(String id, Object callerObject); + BaseQueryResponse next(String id, ProxiedUserDetails callerObject) throws QueryException; /** * Call close on a remote query service @@ -46,7 +56,7 @@ public interface RemoteQueryService { * the caller * @return the void response */ - public VoidResponse close(String id, Object callerObject); + VoidResponse close(String id, ProxiedUserDetails callerObject) throws QueryException; /** * Plan a query using a remote query service @@ -59,7 +69,7 @@ public interface RemoteQueryService { * the caller * @return the generic response */ - public GenericResponse planQuery(String queryLogicName, Map> queryParameters, Object callerObject); + GenericResponse planQuery(String queryLogicName, Map> queryParameters, ProxiedUserDetails callerObject) throws QueryException; /** * Get the plan from a remote query service @@ -70,7 +80,7 @@ public interface RemoteQueryService { * the caller * @return a generic response */ - public GenericResponse planQuery(String id, Object callerObject); + GenericResponse planQuery(String id, ProxiedUserDetails callerObject) throws QueryException; /** * Get the URI for the query metrics @@ -79,5 +89,5 @@ public interface RemoteQueryService { * the id * @return the query metrics uri */ - public URI getQueryMetricsURI(String id); + URI getQueryMetricsURI(String id); } diff --git a/web-services/query/src/main/java/datawave/webservice/query/result/event/DefaultResponseObjectFactory.java b/core/query/src/main/java/datawave/core/query/result/event/DefaultResponseObjectFactory.java similarity index 79% rename from web-services/query/src/main/java/datawave/webservice/query/result/event/DefaultResponseObjectFactory.java rename to core/query/src/main/java/datawave/core/query/result/event/DefaultResponseObjectFactory.java index 46b90202793..8ea65cd6eed 100644 --- a/web-services/query/src/main/java/datawave/webservice/query/result/event/DefaultResponseObjectFactory.java +++ b/core/query/src/main/java/datawave/core/query/result/event/DefaultResponseObjectFactory.java @@ -1,5 +1,8 @@ -package datawave.webservice.query.result.event; +package datawave.core.query.result.event; +import datawave.core.query.cachedresults.CacheableQueryRowImpl; +import datawave.microservice.query.Query; +import datawave.microservice.query.QueryImpl; import datawave.user.AuthorizationsListBase; import datawave.user.DefaultAuthorizationsList; import datawave.webservice.dictionary.data.DataDictionaryBase; @@ -10,13 +13,19 @@ import datawave.webservice.dictionary.data.FieldsBase; import datawave.webservice.metadata.DefaultMetadataField; import datawave.webservice.metadata.MetadataFieldBase; -import datawave.webservice.query.Query; -import datawave.webservice.query.QueryImpl; import datawave.webservice.query.cachedresults.CacheableQueryRow; -import datawave.webservice.query.cachedresults.CacheableQueryRowImpl; import datawave.webservice.query.result.EdgeQueryResponseBase; import datawave.webservice.query.result.edge.DefaultEdge; import datawave.webservice.query.result.edge.EdgeBase; +import datawave.webservice.query.result.event.DefaultEvent; +import datawave.webservice.query.result.event.DefaultFacets; +import datawave.webservice.query.result.event.DefaultField; +import datawave.webservice.query.result.event.DefaultFieldCardinality; +import datawave.webservice.query.result.event.EventBase; +import datawave.webservice.query.result.event.FacetsBase; +import datawave.webservice.query.result.event.FieldBase; +import datawave.webservice.query.result.event.FieldCardinalityBase; +import datawave.webservice.query.result.event.ResponseObjectFactory; import datawave.webservice.response.objects.DefaultKey; import datawave.webservice.response.objects.KeyBase; import datawave.webservice.result.DefaultEdgeQueryResponse; diff --git a/web-services/query/src/main/java/datawave/webservice/query/util/QueryUtil.java b/core/query/src/main/java/datawave/core/query/util/QueryUtil.java similarity index 96% rename from web-services/query/src/main/java/datawave/webservice/query/util/QueryUtil.java rename to core/query/src/main/java/datawave/core/query/util/QueryUtil.java index b0cfec5fbea..bca140d2097 100644 --- a/web-services/query/src/main/java/datawave/webservice/query/util/QueryUtil.java +++ b/core/query/src/main/java/datawave/core/query/util/QueryUtil.java @@ -1,4 +1,4 @@ -package datawave.webservice.query.util; +package datawave.core.query.util; import java.util.HashSet; import java.util.Set; @@ -11,8 +11,8 @@ import com.google.protobuf.InvalidProtocolBufferException; -import datawave.webservice.query.Query; -import datawave.webservice.query.QueryImpl.Parameter; +import datawave.microservice.query.Query; +import datawave.microservice.query.QueryImpl.Parameter; import io.protostuff.LinkedBuffer; import io.protostuff.ProtobufIOUtil; import io.protostuff.Schema; diff --git a/core/query/src/main/resources/META-INF/beans.xml b/core/query/src/main/resources/META-INF/beans.xml new file mode 100644 index 00000000000..4ca201f8ff2 --- /dev/null +++ b/core/query/src/main/resources/META-INF/beans.xml @@ -0,0 +1,9 @@ + + + + \ No newline at end of file diff --git a/core/query/src/main/resources/META-INF/jboss-ejb3.xml b/core/query/src/main/resources/META-INF/jboss-ejb3.xml new file mode 100644 index 00000000000..8cf49db8c87 --- /dev/null +++ b/core/query/src/main/resources/META-INF/jboss-ejb3.xml @@ -0,0 +1,16 @@ + + + + + + + * + datawave + + + + \ No newline at end of file diff --git a/core/query/src/test/java/datawave/core/query/logic/composite/CompositeLogicExceptionTest.java b/core/query/src/test/java/datawave/core/query/logic/composite/CompositeLogicExceptionTest.java new file mode 100644 index 00000000000..2e5dd88af2f --- /dev/null +++ b/core/query/src/test/java/datawave/core/query/logic/composite/CompositeLogicExceptionTest.java @@ -0,0 +1,100 @@ +package datawave.core.query.logic.composite; + +import static org.junit.Assert.assertEquals; + +import java.util.LinkedHashMap; +import java.util.Map; + +import org.junit.Test; + +import datawave.webservice.query.exception.DatawaveErrorCode; +import datawave.webservice.query.exception.QueryException; + +public class CompositeLogicExceptionTest { + + @Test + public void testSingleNonQueryExceptionCause() { + IllegalArgumentException cause = new IllegalArgumentException("illegal argument"); + CompositeLogicException exception = new CompositeLogicException("composite error occurred", "LogicName", cause); + assertEquals("composite error occurred:\nLogicName: illegal argument", exception.getMessage()); + assertEquals(cause, exception.getCause()); + } + + @Test + public void testSingleQueryExceptionCause() { + QueryException cause = new QueryException(DatawaveErrorCode.MODEL_FETCH_ERROR, "connection failed"); + CompositeLogicException exception = new CompositeLogicException("composite error occurred", "LogicName", cause); + + assertEquals("composite error occurred:\nLogicName: Could not get model. connection failed", exception.getMessage()); + assertEquals(cause, exception.getCause()); + assertEquals(DatawaveErrorCode.MODEL_FETCH_ERROR.getErrorCode(), ((QueryException) exception.getCause()).getErrorCode()); + } + + @Test + public void testNestedSingleQueryExceptionCause() { + QueryException nestedCause = new QueryException(DatawaveErrorCode.MODEL_FETCH_ERROR, "connection failed"); + IllegalArgumentException cause = new IllegalArgumentException("illegal argument", nestedCause); + CompositeLogicException exception = new CompositeLogicException("composite error occurred", "LogicName", cause); + assertEquals("composite error occurred:\nLogicName: illegal argument", exception.getMessage()); + assertEquals(CompositeRaisedQueryException.class, exception.getCause().getClass()); + assertEquals(DatawaveErrorCode.MODEL_FETCH_ERROR.getErrorCode(), ((CompositeRaisedQueryException) exception.getCause()).getErrorCode()); + } + + @Test + public void testMultipleNonQueryExceptionCauses() { + IllegalArgumentException expectedCause = new IllegalArgumentException("illegal name"); + Map exceptions = new LinkedHashMap<>(); + exceptions.put("logic1", expectedCause); + exceptions.put("logic2", new NullPointerException("null value")); + exceptions.put("logic3", new IllegalStateException("bad state")); + + CompositeLogicException exception = new CompositeLogicException("failed to complete", exceptions); + assertEquals("failed to complete:\nlogic1: illegal name\nlogic2: null value\nlogic3: bad state", exception.getMessage()); + assertEquals(expectedCause, exception.getCause()); + } + + @Test + public void testMultipleExceptionWithOneTopLevelQueryException() { + QueryException expectedCause = new QueryException(DatawaveErrorCode.MODEL_FETCH_ERROR, "connection failed"); + Map exceptions = new LinkedHashMap<>(); + exceptions.put("logic1", new IllegalArgumentException("illegal name")); + exceptions.put("logic2", new NullPointerException("null value")); + exceptions.put("logic3", expectedCause); + exceptions.put("logic4", new IllegalStateException("bad state")); + + CompositeLogicException exception = new CompositeLogicException("failed to complete", exceptions); + assertEquals("failed to complete:\nlogic1: illegal name\nlogic2: null value\nlogic3: Could not get model. connection failed\nlogic4: bad state", + exception.getMessage()); + assertEquals(expectedCause, exception.getCause()); + } + + @Test + public void testMultipleExceptionWithOneNestedQueryException() { + QueryException nestedCause = new QueryException(DatawaveErrorCode.MODEL_FETCH_ERROR, "connection failed"); + IllegalStateException topCause = new IllegalStateException("bad state", nestedCause); + Map exceptions = new LinkedHashMap<>(); + exceptions.put("logic1", new IllegalArgumentException("illegal name")); + exceptions.put("logic2", topCause); + exceptions.put("logic3", new NullPointerException("null value")); + + CompositeLogicException exception = new CompositeLogicException("failed to complete", exceptions); + assertEquals("failed to complete:\nlogic1: illegal name\nlogic2: bad state\nlogic3: null value", exception.getMessage()); + assertEquals(CompositeRaisedQueryException.class, exception.getCause().getClass()); + assertEquals(DatawaveErrorCode.MODEL_FETCH_ERROR.getErrorCode(), ((CompositeRaisedQueryException) exception.getCause()).getErrorCode()); + } + + @Test + public void testMultipleExceptionWithNestedQueryExceptionSeenFirst() { + QueryException nestedCause = new QueryException(DatawaveErrorCode.MODEL_FETCH_ERROR, "connection failed"); + IllegalStateException topCause = new IllegalStateException("bad state", nestedCause); + Map exceptions = new LinkedHashMap<>(); + exceptions.put("logic1", topCause); + exceptions.put("logic2", new IllegalArgumentException("illegal name")); + exceptions.put("logic3", new NullPointerException("null value")); + + CompositeLogicException exception = new CompositeLogicException("failed to complete", exceptions); + assertEquals("failed to complete:\nlogic1: bad state\nlogic2: illegal name\nlogic3: null value", exception.getMessage()); + assertEquals(CompositeRaisedQueryException.class, exception.getCause().getClass()); + assertEquals(DatawaveErrorCode.MODEL_FETCH_ERROR.getErrorCode(), ((CompositeRaisedQueryException) exception.getCause()).getErrorCode()); + } +} diff --git a/core/utils/accumulo-utils b/core/utils/accumulo-utils index 7673278ecf3..638b3eda970 160000 --- a/core/utils/accumulo-utils +++ b/core/utils/accumulo-utils @@ -1 +1 @@ -Subproject commit 7673278ecf39ed140a2c0b90b06de87625533e6e +Subproject commit 638b3eda97016bb66a7d014112b215075aac212e diff --git a/core/utils/common-utils b/core/utils/common-utils index 9e73b23a69d..c96ed213426 160000 --- a/core/utils/common-utils +++ b/core/utils/common-utils @@ -1 +1 @@ -Subproject commit 9e73b23a69d806185d53b9936a1cd5e3fc388a67 +Subproject commit c96ed21342666db82b9e92ccf676d0987cb7ff8e diff --git a/core/utils/metadata-utils b/core/utils/metadata-utils index d6e89c86f0d..9b84bb8b5ac 160000 --- a/core/utils/metadata-utils +++ b/core/utils/metadata-utils @@ -1 +1 @@ -Subproject commit d6e89c86f0dbe2059abbf0fcf1ac1959e1d4f6a7 +Subproject commit 9b84bb8b5ac291b2293619f7e1bd294579990a5d diff --git a/core/utils/pom.xml b/core/utils/pom.xml index ddd2df405ba..9cfbcece2c4 100644 --- a/core/utils/pom.xml +++ b/core/utils/pom.xml @@ -4,7 +4,7 @@ gov.nsa.datawave.core datawave-core-parent - 6.5.0-SNAPSHOT + 7.13.0-SNAPSHOT gov.nsa.datawave.core datawave-utils-parent diff --git a/core/utils/type-utils b/core/utils/type-utils index 3e9628e86e0..55d92d5d99c 160000 --- a/core/utils/type-utils +++ b/core/utils/type-utils @@ -1 +1 @@ -Subproject commit 3e9628e86e0d02ebaa7374eb4eba57856523a221 +Subproject commit 55d92d5d99c6e232ba1d7ad12c210ded9ec240a9 diff --git a/docker/.gitignore b/docker/.gitignore new file mode 100644 index 00000000000..f73687e9947 --- /dev/null +++ b/docker/.gitignore @@ -0,0 +1,29 @@ +hadoop +**/scripts/query_* +**/scripts/errorQuery_* +**/scripts/edge_* +**/scripts/plan_* +**/scripts/prediction_* +**/scripts/lookup_* +**/scripts/batchLookup_* +**/scripts/lookupContent_* +**/scripts/batchLookupContent_* +**/scripts/streamingQuery_* +**/scripts/discovery_* +**/scripts/errorDiscovery_* +**/scripts/count_* +**/scripts/errorCount_* +**/scripts/fieldIndexCount_* +**/scripts/errorFieldIndexCount_* +**/scripts/poundit_* +**/scripts/executor_* +**/scripts/termFrequency_* +**/scripts/hitHighlights_* +**/scripts/edgeEvent_* +**/scripts/metrics_* +**/scripts/shutdown_* +**/scripts/health_* +**/scripts/modification_* +**/scripts/mapReduceQuery_* +**/scripts/oozieQuery_* +**/scripts/cachedResultsQuery_* diff --git a/docker/README.md b/docker/README.md new file mode 100644 index 00000000000..14187956565 --- /dev/null +++ b/docker/README.md @@ -0,0 +1,384 @@ +# DATAWAVE Docker Compose + +It is recommended to read through these instructions in their entirety before attempting to build or deploy Datawave. However, +if you just want to get started and use this document as a reference, here's the short version (although we recommend checking +out the [prereqs](#prereqs) at a minimum): + +## TLDR + +```shell +# from the base datawave project directory check out the microservice submodules +git submodule update --init --recursive + +# build docker images for datawave and all of the microservices +# optionally include '-Dquickstart-maven' to download accumulo/zookeeper/hadoop/maven tarballs from the maven repository +mvn -Pcompose -Dmicroservice-docker -Dquickstart-docker -Ddeploy -Dtar -Ddist -DskipTests clean install + +# bootstrap the services, and bring them up using docker compose +cd docker +./bootstrap.sh +docker compose up -d + +# run some queries to ensure everything is working +cd scripts +./testAll.sh +``` + +## Components + +### Quickstart + +Datawave Quickstart is a self-contained hadoop, zookeeper, and accumulo deployment prepopulated with data. + +### Consul + +Consul v1.15.4 is a prepacked docker image used for discovery between the various services. + +### RabbitMQ + +RabbitMQ v3.12.4 is a prepacked docker image used for messaging between the various services. + +### Configuration + +[Datawave Config Service](https://github.com/NationalSecurityAgency/datawave-config-service/tree/main) is Datawave's customized Spring Cloud config service. + +Sample configuration files can be found in the config folder. + +You will need to build the docker image for this service on your local machine following the instructions in the config service README. + +### Cache + +[Datawave Hazelcast Service](https://github.com/NationalSecurityAgency/datawave-hazelcast-service/tree/main) is Datawave's customized Hazelcast In-Memory Data Grid. + +You will need to build the docker image for this service on your local machine following the instructions in the hazelcast cache service README. + +### Authorization + +[Datawave Authorization Service](https://github.com/NationalSecurityAgency/datawave-authorization-service/tree/main) provides basic authorization for the Datawave microservices. + +You will need to build the docker image for this service on your local machine following the instructions in the authorization service README. + +### Audit + +[Datawave Audit Service](https://github.com/NationalSecurityAgency/datawave-audit-service/tree/main) provides query audit capabilities for Datawave. + +You will need to build the docker image for this service on your local machine following the instructions in the audit service README. + +### Metrics + +[Datawave Query Metric Service](https://github.com/NationalSecurityAgency/datawave-query-metric-service/tree/main) provides metrics caching, storage, and retrieval capabilities for Datawave. + +You will need to build the docker image for this service on your local machine following the instructions in the query metrics service README. + +### Zookeeper + +Zookeeper is a prepacked docker image used for distributed synchronization. + +### Kafka + +Kafka is a prepacked docker image used for messaging between the various services. + +### Query + +Datawave Query Service v1.0-SNAPSHOT is a user-facing interface for Datawave query. + +This microservice is in development, and can be found in this repo. + +You will need to build the docker image for this service on your local machine following the instructions in the config service README. + +### Executor Pool 1 + +Datawave Executor Service v1.0-SNAPSHOT is the back-end worker for Datawave queries. + +This microservice is in development, and can be found in this repo. + +You will need to build the docker image for this service on your local machine following the instructions in the config service README. + +### Executor Pool 2 + +Enabled via the 'pool2', or 'full' profile. + +Datawave Executor Service v1.0-SNAPSHOT is the back-end worker for Datawave queries. + +This microservice is in development, and can be found in this repo. + +You will need to build the docker image for this service on your local machine following the instructions in the config service README. + +### Query Storage + +Enabled via the 'storage', or 'full' profile. + +Datawave Query Storage Service v1.0-SNAPSHOT is a utility service used to inspect the storage cache. + +This microservice is in development, and can be found in this repo. + +You will need to build the docker image for this service on your local machine following the instructions in the config service README. + +## Optional Components + +### Kafdrop + +Enabled via the 'management', or 'full' profile. + +Kafdrop is a prepacked docker image used for kafka cluster management. + +### Hazelcast Management Center + +Enabled via the 'management', or 'full' profile. + +Hazelcast Management Center v4.2021.06 is a prepacked docker image used for hazelcast cluster management. + +### Dictionary + +Enabled via the 'dictionary', or 'full' profile. + +[Datawave Dictionary Service](https://github.com/NationalSecurityAgency/datawave-dictionary-service/tree/main) provides access to the data dictionary and edge dictionary for Datawave. + +You will need to build the docker image for this service on your local machine following the instructions in the dictionary service README. + +### File Provider + +Enabled via the 'file-provider', or 'full' profile. + +This microservice is in development, and can be found in this repo. + +[Datawave File Provider Service](https://github.com/NationalSecurityAgency/datawave-file-provider-service/tree/main) provides file management and access to Datawave and it's services. + +You will need to build the docker image for this service on your local machine following the instructions in the file provider service README. + + +## Usage + +Please read through these instructions in their entirety before attempting to build or deploy Datawave. + +### Prereqs + +#### /etc/hosts + +In order for the following bootstrap step to work properly, you should ensure that your /etc/hosts file looks similar to the following: + +``` + +127.0.0.1 localhost +``` + +#### Docker + +These services have been successfully deployed using the following versions of docker and docker compose. + +``` +$> docker --version +Docker version 24.0.6, build ed223bc +$> docker compose version +Docker Compose version v2.21.0 +``` + +#### Datawave Quickstart + +Prior to starting docker compose, you need to use the Datawave Quickstart to deploy working instances of Hadoop, Zookeeper, and Accumulo, along with some sample datasets for query. + +There are two methods for deploying the Datawave Quickstart. + + - **default**: Deploys the Datawave Quickstart as a docker container within docker compose. + + - **hybrid**: Deploys the Datawave Quickstart directly on your host system. + +#### Default Datawave Quickstart Setup + +Build the Datawave Quickstart docker image using the following build command: + +``` +# To build the quickstart docker image, and all of the microservice images, run this +mvn -Pcompose -Dmicroservice-docker -Dquickstart-docker -Ddeploy -Dtar -Ddist -DskipTests clean install -T1C + +# To build just the quickstart docker image, run this +mvn -Pcompose -DskipServices -Dquickstart-docker -Ddeploy -Dtar -Ddist -DskipTests clean install -T1C +``` +Note that the quickstart-docker property is set. This property is a shortcut which activates the `docker` and `quickstart` profiles without activating the `docker` profile for the microservices. + +For this command, the build profile is set to `compose`. This profile contains all of the properties needed to make the quickstart work as part +of the docker compose deployment. The use of any other build profile with docker compose is unsupported. + +This command also prevents the microservice services from building with `-DskipServices`. This is an optional setting which will skip the microservice builds entirely, saving you some time if you only want to build/rebuild the Datawave Quickstart. + +If you ever need to rebuild the Datawave quickstart docker image, but don't want to ingest the sample data you can add `-DskipIngest` to +your build command. This can save you some time, since the docker compose configuration stores ingested data in a persistent volume. + +If desired, you can start and test the wildfly deployment embedded in the Datawave Quickstart by running the following command: +``` +docker run -m 8g datawave/quickstart-compose datawave-bootstrap.sh --test +``` + +#### Hybrid Datawave Quickstart Setup + +Before running the quickstart setup, you need to edit your ~/.bashrc to include the following export: + +``` +export DW_BIND_HOST=0.0.0.0 +``` + +This will ensure that Hadoop binds to all interfaces, and that Accumulo binds to the hostname/IP address. This is required to connect to the host Accumulo instance from a docker container. + +What follows is a brief description of how to setup and run the Datawave Quickstart. For more detailed information see the [DataWave Quickstart Readme](../contrib/datawave-quickstart/README.md). + +``` +# Add the quickstart env.sh to your .bashrc +# DW_SOURCE refers to your local path to the datawave source code, and may be set as an environment variable if desired +echo "activateDW() {\n source DW_SOURCE/contrib/datawave-quickstart/bin/env.sh\n}" >> ~/.bashrc + +# Source .bashrc to kick off the quickstart build +source ~/.bashrc + +# Activate DataWave +activateDW + +# Install Datawave and its dependencies +allInstall + +# Start Accumulo and its dependencies +accumuloStart + +# At this point, you are ready to deploy and test the query microservices via docker compose + +# If desired, start the wildfly webservice, and run some diagnostic tests +datawaveWebStart && datawaveWebTest + +# Make sure to stop the wildfly webservice before starting the query microservices via docker compose +datawaveWebStop +``` + +#### Datawave Microservices + +If you haven't done so already, you can build the Datawave Microservice docker images using the following build command: + +``` +mvn -Pcompose -Dmicroservice-docker -Ddist -DskipTests clean install -T1C +``` + +Note that the microservice-docker property is set. This property is a shortcut which activates the `docker` profile for just the microservices. + +This command can be combined with default Datawave Quickstart build command to build everything at once. + +### Bootstrap + +The audit, dictionary, query executor, and query metric services all need to connect to Zookeeper, Hadoop and/or Accumulo. In order to make that work, there are some environment variables which need to be configured. + +#### Default Bootstrap + +Bootstrap your deployment by running: + +```./bootstrap.sh``` + +This will produce a `.env` file containing the following: + +``` +# If set to quickstart, enables the quickstart container +# Note: More than one profile may be set. +COMPOSE_PROFILES="" + +# These environment variables are used to create extra hosts which +# allow containers to route to the host quickstart deployment. +# The extra hosts aren't used when deploying the docker quickstart, +# but the variables still need to be set for the compose file to be valid. +DW_HOSTNAME="" +DW_HOST_FQDN="" +DW_HOST_IP="" + +# These environment variables must be set when running the quickstart +# from the host machine in hybrid mode. +DW_ZOOKEEPER_HOST="" +DW_HADOOP_HOST="" +``` + +#### Hybrid Bootstrap + +Bootstrap your deployment by running: + +```./bootstrap.sh hybrid``` + +This will produce a `.env` file containing the following: + +``` +# If set to quickstart, enables the quickstart container +# Note: More than one profile may be set. +COMPOSE_PROFILES="" + +# These environment variables are used to create extra hosts which +# allow containers to route to the host quickstart deployment. +# The extra hosts aren't used when deploying the docker quickstart, +# but the variables still need to be set for the compose file to be valid. +DW_HOSTNAME="" +DW_HOST_FQDN="" +DW_HOST_IP="" + +# These environment variables must be set when running the quickstart +# from the host machine in hybrid mode. +DW_ZOOKEEPER_HOST="" +DW_HADOOP_HOST="" +``` + +### Start services + +Start the default services (with the Kafka as the backend): + +```docker compose up -d``` + +Start the default services (with RabbitMQ as the backend): + +```BACKEND=rabbitmq docker compose up -d``` + +Start the default services (with Hazelcast as the backend): + +```BACKEND=hazelcast docker compose up -d``` + +Start the default services, and the dictionary service: + +```docker compose --profile quickstart --profile dictionary up -d``` + +Start the default services, the kafka services, and the dictionary service: + +```docker compose --profile quickstart --profile dictionary --profile kafka up -d``` + +Start the default services, and the file provider service: + +```docker compose --profile quickstart --profile file-provider up -d``` + +Start all services: + +```docker compose --profile quickstart --profile full up -d``` + +### View logs + +For everything: + +```docker compose logs -f``` + +For a specific service: + +```docker compose logs -f audit``` + +### Stop services + +Stop the configured services + +```docker compose down``` + +Stop the configured services, and delete all volumes + +```docker compose down -v``` + +Stop all services, including ones that are no longer enabled + +```docker compose down --remove-orphans``` + +### Restart a service and pull an updated image + +``` +docker compose stop audit +docker compose rm -f audit +docker compose up -d +``` + +### Restart a service without pulling an updated image + +```docker compose restart ``` diff --git a/docker/bootstrap.sh b/docker/bootstrap.sh new file mode 100755 index 00000000000..50d0acebf5a --- /dev/null +++ b/docker/bootstrap.sh @@ -0,0 +1,54 @@ +echo "Creating .env file..." +echo + +# Ensure that permissions are set correctly for the config files +chmod -R 755 config pki rabbitmq-config + +DW_HOSTNAME=$(hostname) +DW_HOSTNAME=${DW_HOSTNAME%%.*} +DW_HOST_FQDN=$(hostname -f) + +# If the hostname matches the fqdn, leave the fqdn unset +if [[ "${DW_HOST_FQDN}" == "${DW_HOSTNAME}" ]]; then + DW_HOST_FQDN="unused" +fi + +DW_HOST_IP=$(hostname -i) + +if [ "$1" == "hybrid" ] ; then + COMPOSE_PROFILES="" + DW_ZOOKEEPER_HOST=${DW_HOSTNAME} + DW_HADOOP_HOST=${DW_HOSTNAME} +else + COMPOSE_PROFILES=quickstart + DW_ZOOKEEPER_HOST=quickstart + DW_HADOOP_HOST=quickstart +fi + +ENV_CONF="\ +# If set to quickstart, enables the quickstart container +# Note: More than one profile may be set. +COMPOSE_PROFILES=\"${COMPOSE_PROFILES}\" + +# These environment variables are used to create extra hosts which +# allow containers to route to the host quickstart deployment. +# The extra hosts aren't used when deploying the docker quickstart, +# but the variables still need to be set for the compose file to be valid. +DW_HOSTNAME=\"${DW_HOSTNAME}\" +DW_HOST_FQDN=\"${DW_HOST_FQDN}\" +DW_HOST_IP=\"${DW_HOST_IP}\" + +# These environment variables must be set when running the quickstart +# from the host machine in hybrid mode. +DW_ZOOKEEPER_HOST=\"${DW_ZOOKEEPER_HOST}\" +DW_HADOOP_HOST=\"${DW_HADOOP_HOST}\" +" + +# Write .env file using our settings in ENV_CONF +if [ ! -z "${ENV_CONF}" ] ; then + echo "${ENV_CONF}" > ./.env || fatal "Failed to write .env" +else + warn "No .env content defined! :(" +fi + +cat .env diff --git a/docker/cleanup.sh b/docker/cleanup.sh new file mode 100755 index 00000000000..a909b27201c --- /dev/null +++ b/docker/cleanup.sh @@ -0,0 +1,9 @@ +#!/bin/sh +if [[ "${@/keepdata}" == "$@" ]]; then + docker volume rm docker_quickstart_data +fi +docker image prune -f +docker system prune -f +if [[ "${@/keeplog}" == "$@" ]]; then + sudo find logs -type f -name '*log*' -delete +fi diff --git a/docker/config/accumulo.yml b/docker/config/accumulo.yml new file mode 100755 index 00000000000..9a3b836b924 --- /dev/null +++ b/docker/config/accumulo.yml @@ -0,0 +1,23 @@ +warehouse-cluster: + accumulo: + zookeepers: '${accumulo.zookeepers}' + instanceName: '${accumulo.instanceName}' + username: '${accumulo.username}' + password: '${accumulo.password}' + +accumulo: + lookup: + audit: + defaultAuditType: 'ACTIVE' + stats: + enabled: true + +audit-client: + discovery: + enabled: false + uri: '${AUDIT_SERVER_URL:http://localhost:11111/audit}' + +datawave: + swagger: + title: "Accumulo Service" + description: "REST API provided by the Accumulo Service" \ No newline at end of file diff --git a/docker/config/application-cachedresults.yml b/docker/config/application-cachedresults.yml new file mode 100755 index 00000000000..ce58aa98ad2 --- /dev/null +++ b/docker/config/application-cachedresults.yml @@ -0,0 +1,79 @@ +spring: + datasource: + cachedResults: + url: 'jdbc:mysql://${datawave.mysql.host}:3306/${datawave.mysql.dbname}?zeroDateTimeBehavior=convertToNull' + username: '${datawave.mysql.username}' + password: '${datawave.mysql.password}' + driver-class-name: 'com.mysql.cj.jdbc.Driver' + hikari: + # default: 30000 + connection-timeout: 5000 + # default: 600000 + idle-timeout: 900000 + # default: maximum-pool-size + minimum-idle: ${datawave.mysql.pool.min-size} + # default: 10 + maximum-pool-size: ${datawave.mysql.pool.max-size} + +datawave: + mysql: + host: 'mysql' + dbname: 'cachedresults' + pool: + min-size: '5' + max-size: '20' + username: 'datawave' + password: 'secret' + query: + cachedResults: + enabled: ${CACHED_RESULTS:false} + remoteQuery: + queryServiceUri: "https://query:8443/query/v1/query" + # unlimited + maxBytesToBuffer: -1 + numFields: 900 + statementTemplates: + createTableTemplate: | + CREATE TABLE IF NOT EXISTS template ( + _user_ VARCHAR(200) NOT NULL, + _queryId_ VARCHAR(200) NOT NULL, + _logicName_ VARCHAR(200) NOT NULL, + _datatype_ VARCHAR(35) NOT NULL, + _eventId_ VARCHAR(50) NOT NULL, + _row_ LONGTEXT NOT NULL, + _colf_ LONGTEXT NOT NULL, + _markings_ VARCHAR(400) NOT NULL, + _column_markings_ LONGTEXT NOT NULL, + _column_timestamps_ LONGTEXT NOT NULL, + %FIELD_DEFINITIONS% + ) ENGINE = MyISAM + createTable: "CREATE TABLE %TABLE% LIKE template" + dropTable: "DROP TABLE %TABLE%" + dropView: "DROP VIEW %TABLE%" + insert: | + INSERT INTO %TABLE% ( + _user_, + _queryId_, + _logicName_, + _datatype_, + _eventId_, + _row_, + _colf_, + _markings_, + _column_markings_, + _column_timestamps_, + %PREPARED_FIELDS% + ) VALUES ( + ?, + ?, + ?, + ?, + ?, + ?, + ?, + ?, + ?, + ?, + %PREPARED_VALUES% + ) + createView: "CREATE VIEW %VIEW%(%VIEW_COLS%) AS SELECT %TABLE_COLS% FROM %TABLE%" diff --git a/docker/config/application-compose.yml b/docker/config/application-compose.yml new file mode 100755 index 00000000000..7b3835bbc2e --- /dev/null +++ b/docker/config/application-compose.yml @@ -0,0 +1,42 @@ +# This template is for configuring your site-specific properties for all microservices. +system.name: DOCKER-COMPOSE + +server: + ssl: + trust-store: '/etc/pki/testCA.p12' + trust-store-type: PKCS12 + trust-store-password: 'ChangeIt' + key-store: '/etc/pki/testServer.p12' + key-store-type: PKCS12 + key-store-password: 'ChangeIt' + +# Define the client certificates (in lower-case subjectDN form) that are allowed to call a service. Note that you only need to +# specify one value in the list below. +# Since enforce-allowed-callers is false, you can skip configuring this section if you want. +spring: + security: + datawave: + enforce-allowed-callers: false + allowed-callers: + - "cn=test a. user, ou=example developers, o=example corp, c=us" + kafka: + bootstrap-servers: kafka:9092 + consumer: + autoOffsetReset: earliest + enableAutoCommit: false + properties: + allow.auto.create.topics: false + +# This is the accumulo configuration we use in services. These don't map directly to a properties class, but +# they are here to be used as a reference for other properties. +accumulo: + zookeepers: '${ZOOKEEPER_HOST:localhost}:2181' + instanceName: 'my-instance-01' + username: 'root' + password: 'secret' + +# Configuration placeholders which 1) determines what backend will be used for transmitting query results +# and 2) determines the message size limit before claim checks are used with RabbitMQ messaging +messaging: + backend: ${BACKEND:kafka} + maxMessageSizeBytes: 536870912 diff --git a/docker/config/application-consul.yml b/docker/config/application-consul.yml new file mode 100755 index 00000000000..88bb8fc8220 --- /dev/null +++ b/docker/config/application-consul.yml @@ -0,0 +1,29 @@ +spring: + cloud: + # Enable spring cloud discovery + discovery: + enabled: true + # Set defaults for service discovery with Consul.. + # Note that for Consul-first config, each client will still need a bootstrap.yml that enables + # Consul-first discovery and sets up any Consul agent configuration that is non-default. + consul: + enabled: true + host: ${CONSUL_HOST:localhost} + # Give the consul agent a lot of time to come up + retry: + max-attempts: 60 + discovery: + # we can use this + scheme: https + # or alternatively this + # port: ${server.non-secure-port} + + health-check-path: ${server.servlet.context-path}${management.endpoints.web.base-path}/health + health-check-interval: 15s + # Generate a unique instance ID when registering with Consul so that we can scale to more than one copy of a service + instance-id: ${spring.application.name}:${vcap.application.instance_id:${spring.application.instance_id:${random.value}}} + # Use discovery to find the RabbitMQ server + rabbitmq: + discovery: + enabled: true + failFast: true diff --git a/docker/config/application-federation.yml b/docker/config/application-federation.yml new file mode 100755 index 00000000000..d88f8c5ec64 --- /dev/null +++ b/docker/config/application-federation.yml @@ -0,0 +1,16 @@ +# This serves as a set of sensible defaults for authorization and query federation. + +datawave: + authorization: + federation: + # Each entry in the following map will be registered as a FederatedAuthorizationService bean, named after the key + services: + FederatedAuthorizationService: + federatedAuthorizationUri: "https://authorization:8443/authorization/v2" + query: + federation: + # Each entry in the following map will be registered as a FederatedQueryService (RemoteQueryService) bean, named after the key + services: + FederatedQueryService: + queryServiceUri: 'https://query:8443/query/v1' + queryMetricServiceUri: 'https://querymetric:8443/querymetric/v1/id' diff --git a/docker/config/application-metricssource.yml b/docker/config/application-metricssource.yml new file mode 100755 index 00000000000..b1bdb1196c7 --- /dev/null +++ b/docker/config/application-metricssource.yml @@ -0,0 +1,23 @@ +# This profile should be added to your service if you depend on the +# query metric starter to send metrics to the query metric service. +spring: + cloud: + stream: + bindings: + queryMetricSource-out-0: + destination: queryMetricChannel + producer: + requiredGroups: queryMetricService + errorChannelEnabled: true + # NOTE: When defining your functions, be sure to include busConsumer, or else spring cloud bus will not work + function: + definition: queryMetricSource;busConsumer + +datawave: + query: + metric: + client: + confirmAckTimeoutMillis: 30000 +# To send metrics via REST, uncomment the following +# host: metrics +# transport: HTTPS diff --git a/docker/config/application-mrquery.yml b/docker/config/application-mrquery.yml new file mode 100755 index 00000000000..6b2c940e5fc --- /dev/null +++ b/docker/config/application-mrquery.yml @@ -0,0 +1,54 @@ +datawave: + query: + mapreduce: + fsConfigResources: + - ${HADOOP_CONF_DIR:/etc/hadoop/conf}/core-site.xml + - ${HADOOP_CONF_DIR:/etc/hadoop/conf}/hdfs-site.xml + - ${HADOOP_CONF_DIR:/etc/hadoop/conf}/mapred-site.xml + - ${HADOOP_CONF_DIR:/etc/hadoop/conf}/yarn-site.xml + callbackServletURL: "http://query:8080/query/v1/mapreduce/updateState" + mapReduceBaseDirectory: "/datawave/MapReduceService" + restrictInputFormats: true + validInputFormats: + - "org.apache.accumulo.core.client.mapreduce.AccumuloInputFormat" + - "datawave.mr.bulk.BulkInputFormat" + jobs: + 'BulkResultsJob': + startingClass: datawave.microservice.query.mapreduce.MapReduce + jobJarName: "MapReduceQueryCoreJob.jar" + description: "MapReduce job that runs a query and either puts the results into a table or files in HDFS" + hdfsUri: "hdfs://${HADOOP_HOST}:9000/" + jobTracker: "${HADOOP_HOST}:8021" + requiredRuntimeParameters: + queryId: java.lang.String + format: datawave.microservice.mapreduce.bulkresults.map.SerializationFormat + optionalRuntimeParameters: + outputTableName: java.lang.String + outputFormat: java.lang.String + jobConfigurationProperties: + "mapreduce.map.speculative": "false" + "mapreduce.map.output.compress": "false" + "mapreduce.output.fileoutputformat.compress": "false" + "mapreduce.job.user.classpath.first": "true" + # NOTE: Disable spring components which should not be run in a map-reduce context + jobSystemProperties: + "datawave.table.cache.enabled": "false" + "spring.profiles.active": "query,mrquery" + "spring.cloud.bus.enabled": "false" + "spring.cloud.discovery.enabled": "false" + "spring.cloud.consul.enabled": "false" + "spring.rabbitmq.discovery.enabled": "false" + "datawave.query.messaging.backend": "none" + "datawave.query.messaging.claimCheck.enabled": "false" + "datawave.query.storage.cache.enabled": "false" + "hazelcast.client.enabled": "false" + "spring.cloud.config.enabled": "false" + "datawave.query.metric.client.enabled": "false" + accumulo: + zookeepers: '${accumulo.zookeepers}' + instanceName: '${accumulo.instanceName}' + username: '${accumulo.username}' + password: '${accumulo.password}' + 'OozieJob': + hdfsUri: "hdfs://${HADOOP_HOST}:9000/" + jobTracker: "${HADOOP_HOST}:8021" \ No newline at end of file diff --git a/docker/config/application-query.yml b/docker/config/application-query.yml new file mode 100755 index 00000000000..3b8fc5d024a --- /dev/null +++ b/docker/config/application-query.yml @@ -0,0 +1,574 @@ +# This profile should be included by any service which depends on the query starter. This +# file contains all of the configuration required to use the QueryLogicFactory. +warehouse: + accumulo: + zookeepers: '${accumulo.zookeepers}' + instanceName: '${accumulo.instanceName}' + username: '${accumulo.username}' + password: '${accumulo.password}' + statsd: + host: localhost + port: 8125 + tables: + shard: + name: 'datawave.shard' + index: + name: 'datawave.shardIndex' + reverseIndex: + name: 'datawave.shardReverseIndex' + dateIndex: + name: 'datawave.dateIndex' + metadata: + name: 'datawave.metadata' + model: + name: 'datawave.metadata' + edge: + name: 'datawave.edge' + errorTables: + shard: + name: "datawave.error_s" + index: + name: "datawave.error_i" + reverseIndex: + name: "datawave.error_r" + dateIndex: + name: "" + metadata: + name: "datawave.error_m" + model: + name: "datawave.error_m" + metricTables: + shard: + name: "datawave.queryMetrics_s" + index: + name: "datawave.queryMetrics_i" + reverseIndex: + name: "datawave.queryMetrics_r" + dateIndex: + name: "" + metadata: + name: "datawave.queryMetrics_m" + model: + name: "datawave.queryMetrics_m" + defaults: + checkpointable: true + queryThreads: 100 + indexLookupThreads: 100 + dateIndexThreads: 20 + fullTableScanEnabled: false + baseIteratorPriority: 100 + maxIndexScanTimeMillis: 31536000000 + eventPerDayThreshold: 40000 + shardsPerDayThreshold: 20 + initialMaxTermThreshold: 2000 + finalMaxTermThreshold: 2000 + maxDepthThreshold: 2000 + maxUnfieldedExpansionThreshold: 50 + maxValueExpansionThreshold: 50 + maxOrExpansionThreshold: 500 + maxOrRangeThreshold: 10 + maxRangesPerRangeIvarator: 5 + maxOrRangeIvarators: 10 + maxOrExpansionFstThreshold: 750 + maxFieldIndexRangeSplit: 16 + maxIvaratorSources: 20 + maxEvaluationPipelines: 16 + maxPipelineCachedResults: 16 + hdfsSiteConfigURLs: 'file://${HADOOP_CONF_DIR:/etc/hadoop/conf}/core-site.xml,file://${HADOOP_CONF_DIR:/etc/hadoop/conf}/hdfs-site.xml' + ivaratorFstHdfsBaseURIs: "hdfs://${HADOOP_HOST:localhost}:9000/IvaratorCache" + ivaratorCacheBufferSize: 10000 + ivaratorMaxOpenFiles: 100 + ivaratorCacheScanPersistThreshold: 100000 + ivaratorCacheScanTimeoutMinutes: 60 + modelName: 'DATAWAVE' + edgeModelName: 'DATAWAVE_EDGE' + +datawave: + connection: + factory: + defaultPool: 'WAREHOUSE' + metadata: + all-metadata-auths: + - PRIVATE,PUBLIC + type-substitutions: + "[datawave.data.type.DateType]": "datawave.data.type.RawDateType" + + query: + poolLimits: + 'pool1': &defaultPoolLimits + maxQueriesPerExecutor: + 'WAREHOUSE': 40 + 'UUID': 20 + livenessTimeout: 90 + livenessTimeoutUnit: SECONDS + 'pool2': *defaultPoolLimits + parser: + skipTokenizeUnfieldedFields: + - "DOMETA" + tokenizedFields: + - "CONTENT" + logic: + factory: + enabled: true + # Uncomment the following line to override the query logic beans to load + # xmlBeansPath: "classpath:MyTestQueryLogicFactory.xml" + + # If desired, you may populate this map to redefine the name for each query logic. + # This can also be used to limit the available query logics. + queryLogicsByName: + "EventQuery": "EventQuery" + "ErrorEventQuery": "ErrorEventQuery" + "DiscoveryQuery": "DiscoveryQuery" + "ErrorDiscoveryQuery": "ErrorDiscoveryQuery" + "LuceneUUIDEventQuery": "LuceneUUIDEventQuery" + "ContentQuery": "ContentQuery" + "EdgeQuery": "EdgeQuery" + "CountQuery": "CountQuery" + "ErrorCountQuery": "ErrorCountQuery" + "FieldIndexCountQuery": "FieldIndexCountQuery" + "ErrorFieldIndexCountQuery": "ErrorFieldIndexCountQuery" + "TermFrequencyQuery": "TermFrequencyQuery" + "IndexStatsQuery": "IndexStatsQuery" + "QueryMetricsQuery": "QueryMetricsQuery" + "InternalQueryMetricsQuery": "InternalQueryMetricsQuery" + "FacetedQuery": "FacetedQuery" + "HitHighlights": "HitHighlights" + "EdgeEventQuery": "EdgeEventQuery" + "FederatedEventQuery": "FederatedEventQuery" + + # The max page size that a user can request. 0 turns off this feature + maxPageSize: 10000 + + # The number of bytes at which a page will be returned, event if the pagesize has not been reached. 0 turns off this feature + pageByteTrigger: 0 + logics: + BaseEventQuery: + checkpointable: ${warehouse.defaults.checkpointable} + accumuloPassword: ${warehouse.accumulo.password} + tableName: ${warehouse.tables.shard.name} + dateIndexTableName: ${warehouse.tables.dateIndex.name} + defaultDateTypeName: "EVENT" + metadataTableName: ${warehouse.tables.metadata.name} + indexTableName: ${warehouse.tables.index.name} + reverseIndexTableName: ${warehouse.tables.reverseIndex.name} + maxResults: -1 + queryThreads: ${warehouse.defaults.queryThreads} + maxConcurrentTasks: ${warehouse.defaults.queryThreads} + indexLookupThreads: ${warehouse.defaults.indexLookupThreads} + dateIndexThreads: ${warehouse.defaults.dateIndexThreads} + fullTableScanEnabled: ${warehouse.defaults.fullTableScanEnabled} + includeDataTypeAsField: false + disableIndexOnlyDocuments: false + indexOnlyFilterFunctionsEnabled: false + includeHierarchyFields: false + hierarchyFieldOptions: + "FOO": "BAR" + baseIteratorPriority: ${warehouse.defaults.baseIteratorPriority} + maxIndexScanTimeMillis: ${warehouse.defaults.maxIndexScanTimeMillis} + collapseUids: false + collapseUidsThreshold: -1 + useEnrichers: true + contentFieldNames: + - 'CONTENT' + realmSuffixExclusionPatterns: + - '<.*>$' + minimumSelectivity: .2 + enricherClassNames: + - 'datawave.query.enrich.DatawaveTermFrequencyEnricher' + useFilters: false + filterClassNames: + - 'foo.bar' + filterOptions: + 'bar': "foo" + auditType: "ACTIVE" + logicDescription: "Retrieve sharded events/documents, leveraging the global index tables as needed" + eventPerDayThreshold: ${warehouse.defaults.eventPerDayThreshold} + shardsPerDayThreshold: ${warehouse.defaults.shardsPerDayThreshold} + initialMaxTermThreshold: ${warehouse.defaults.initialMaxTermThreshold} + finalMaxTermThreshold: ${warehouse.defaults.finalMaxTermThreshold} + maxDepthThreshold: ${warehouse.defaults.maxDepthThreshold} + maxUnfieldedExpansionThreshold: ${warehouse.defaults.maxUnfieldedExpansionThreshold} + maxValueExpansionThreshold: ${warehouse.defaults.maxValueExpansionThreshold} + maxOrExpansionThreshold: ${warehouse.defaults.maxOrExpansionThreshold} + maxOrRangeThreshold: ${warehouse.defaults.maxOrRangeThreshold} + maxOrExpansionFstThreshold: ${warehouse.defaults.maxOrExpansionFstThreshold} + maxFieldIndexRangeSplit: ${warehouse.defaults.maxFieldIndexRangeSplit} + maxIvaratorSources: ${warehouse.defaults.maxIvaratorSources} + maxEvaluationPipelines: ${warehouse.defaults.maxEvaluationPipelines} + maxPipelineCachedResults: ${warehouse.defaults.maxPipelineCachedResults} + hdfsSiteConfigURLs: ${warehouse.defaults.hdfsSiteConfigURLs} + zookeeperConfig: ${warehouse.accumulo.zookeepers} + ivaratorCacheDirConfigs: + - basePathURI: "hdfs://${HADOOP_HOST:localhost}:9000/IvaratorCache" + ivaratorFstHdfsBaseURIs: ${warehouse.defaults.ivaratorFstHdfsBaseURIs} + ivaratorCacheBufferSize: ${warehouse.defaults.ivaratorCacheBufferSize} + ivaratorMaxOpenFiles: ${warehouse.defaults.ivaratorMaxOpenFiles} + ivaratorCacheScanPersistThreshold: ${warehouse.defaults.ivaratorCacheScanPersistThreshold} + ivaratorCacheScanTimeoutMinutes: ${warehouse.defaults.ivaratorCacheScanTimeoutMinutes} + eventQueryDataDecoratorTransformer: + requestedDecorators: + - "CSV" + - "WIKIPEDIA" + dataDecorators: + "CSV": + "EVENT_ID": "https://localhost:8443/DataWave/Query/lookupUUID/EVENT_ID?uuid=@field_value@&parameters=data.decorators:CSV" + "UUID": "https://localhost:8443/DataWave/Query/lookupUUID/UUID?uuid=@field_value@&parameters=data.decorators:CSV" + "PARENT_UUID": "https://localhost:8443/DataWave/Query/lookupUUID/PARENT_UUID?uuid=@field_value@&parameters=data.decorators:CSV" + "WIKIPEDIA": + "PAGE_ID": "https://localhost:8443/DataWave/Query/lookupUUID/PAGE_ID?uuid=@field_value@&parameters=data.decorators:WIKIPEDIA" + "PAGE_TITLE": "https://localhost:8443/DataWave/Query/lookupUUID/PAGE_TITLE?uuid=@field_value@&parameters=data.decorators:WIKIPEDIA" + modelTableName: ${warehouse.tables.model.name} + modelName: ${warehouse.defaults.modelName} + querySyntaxParsers: + JEXL: "" + LUCENE: "LuceneToJexlQueryParser" + LUCENE-UUID: "LuceneToJexlUUIDQueryParser" + TOKENIZED-LUCENE: "TokenizedLuceneToJexlQueryParser" + sendTimingToStatsd: false + collectQueryMetrics: true + logTimingDetails: true + statsdHost: ${warehouse.statsd.host} + statsdPort: ${warehouse.statsd.port} + evaluationOnlyFields: "" + requiredRoles: + - "AuthorizedUser" + + EventQuery: + checkpointable: ${warehouse.defaults.checkpointable} + logicDescription: "Query the sharded event/document schema, leveraging the global index tables as needed" + + ErrorEventQuery: + checkpointable: ${warehouse.defaults.checkpointable} + logicDescription: "Retrieve events/documents that encountered one or more errors during ingest" + tableName: ${warehouse.errorTables.shard.name} + metadataTableName: ${warehouse.errorTables.metadata.name} + dateIndexTableName: "" + indexTableName: ${warehouse.errorTables.index.name} + reverseIndexTableName: ${warehouse.errorTables.reverseIndex.name} + includeHierarchyFields: false + + DiscoveryQuery: + checkpointable: ${warehouse.defaults.checkpointable} + tableName: ${warehouse.tables.shard.name} + indexTableName: ${warehouse.tables.index.name} + reverseIndexTableName: ${warehouse.tables.reverseIndex.name} + metadataTableName: ${warehouse.tables.metadata.name} + modelTableName: ${warehouse.tables.model.name} + modelName: ${warehouse.defaults.modelName} + fullTableScanEnabled: ${warehouse.defaults.fullTableScanEnabled} + allowLeadingWildcard: true + auditType: "NONE" + maxResults: -1 + maxWork: -1 + logicDescription: "Discovery query that returns information from the index about the supplied term(s)" + + ErrorDiscoveryQuery: + checkpointable: ${warehouse.defaults.checkpointable} + tableName: ${warehouse.errorTables.shard.name} + indexTableName: ${warehouse.errorTables.index.name} + reverseIndexTableName: ${warehouse.errorTables.reverseIndex.name} + maxResults: -1 + maxWork: -1 + metadataTableName: ${warehouse.errorTables.metadata.name} + modelTableName: ${warehouse.errorTables.model.name} + modelName: ${warehouse.defaults.modelName} + fullTableScanEnabled: ${warehouse.defaults.fullTableScanEnabled} + allowLeadingWildcard: true + auditType: "NONE" + logicDescription: "Discovery query that returns information from the ingest errors index about the supplied term(s)" + + LuceneUUIDEventQuery: + checkpointable: ${warehouse.defaults.checkpointable} + logicDescription: "Composite query logic that retrieves records from the event and error tables, based on known UUID fields, ie, those configured via UUIDTypeList in QueryLogicFactory.xml" + auditType: "NONE" + eventQuery: + auditType: "NONE" + logicDescription: "Lucene query for event/document UUIDs" + mandatoryQuerySyntax: + - "LUCENE-UUID" + connPoolName: "UUID" + errorEventQuery: + auditType: "NONE" + logicDescription: "Lucene query for event/document UUIDs for events that encountered errors at ingest time" + mandatoryQuerySyntax: + - "LUCENE-UUID" + connPoolName: "UUID" + tableName: ${warehouse.errorTables.shard.name} + dateIndexTableName: ${warehouse.errorTables.dateIndex.name} + metadataTableName: ${warehouse.errorTables.metadata.name} + indexTableName: ${warehouse.errorTables.index.name} + reverseIndexTableName: ${warehouse.errorTables.reverseIndex.name} + + ContentQuery: + checkpointable: ${warehouse.defaults.checkpointable} + tableName: ${warehouse.tables.shard.name} + maxResults: -1 + maxWork: -1 + auditType: "NONE" + logicDescription: "Query that returns a document given the document identifier" + + EdgeQuery: + checkpointable: ${warehouse.defaults.checkpointable} + tableName: ${warehouse.tables.edge.name} + metadataTableName: ${warehouse.tables.metadata.name} + modelTableName: ${warehouse.tables.model.name} + modelName: ${warehouse.defaults.edgeModelName} + maxResults: 25000 + maxWork: -1 + queryThreads: 16 + maxConcurrentTasks: 16 + auditType: "NONE" + logicDescription: "Retrieve graph edges matching the search term(s)" + querySyntaxParsers: + "JEXL": "" + "LIST": "" + "LIMITED_JEXL": "" + "LUCENE": "" + + CountQuery: + checkpointable: ${warehouse.defaults.checkpointable} + logicDescription: "Retrieve event/document counts based on your search criteria" + + ErrorCountQuery: + checkpointable: ${warehouse.defaults.checkpointable} + logicDescription: "Retrieve counts of errored events based on your search criteria" + tableName: ${warehouse.errorTables.shard.name} + metadataTableName: ${warehouse.errorTables.metadata.name} + indexTableName: ${warehouse.errorTables.index.name} + reverseIndexTableName: ${warehouse.errorTables.reverseIndex.name} + + FieldIndexCountQuery: + checkpointable: false + tableName: ${warehouse.tables.shard.name} + indexTableName: ${warehouse.tables.index.name} + reverseIndexTableName: ${warehouse.tables.reverseIndex.name} + metadataTableName: ${warehouse.tables.metadata.name} + maxResults: -1 + maxWork: -1 + queryThreads: ${warehouse.defaults.queryThreads} + maxConcurrentTasks: ${warehouse.defaults.queryThreads} + modelTableName: ${warehouse.tables.model.name} + modelName: "DATAWAVE" + maxUniqueValues: 20000 + auditType: "NONE" + logicDescription: "Indexed Fields Only: Given FIELDNAME returns counts for each unique value. Given FIELDNAME:FIELDVALUE returns count for only that value." + + ErrorFieldIndexCountQuery: + checkpointable: false + tableName: ${warehouse.errorTables.shard.name} + indexTableName: ${warehouse.errorTables.index.name} + reverseIndexTableName: ${warehouse.errorTables.reverseIndex.name} + metadataTableName: ${warehouse.errorTables.metadata.name} + maxResults: -1 + maxWork: -1 + queryThreads: ${warehouse.defaults.queryThreads} + maxConcurrentTasks: ${warehouse.defaults.queryThreads} + modelTableName: ${warehouse.errorTables.model.name} + modelName: "DATAWAVE" + maxUniqueValues: 20000 + auditType: "NONE" + logicDescription: "FieldIndex count query (experimental)" + + TermFrequencyQuery: + tableName: ${warehouse.tables.shard.name} + maxResults: -1 + maxWork: -14 + auditType: "NONE" + logicDescription: "Query that returns data from the term frequency query table" + + IndexStatsQuery: + auditType: "NONE" + + QueryMetricsQuery: + checkpointable: ${warehouse.defaults.checkpointable} + logicDescription: "Retrieve query metrics based on the given search term(s)" + includeHierarchyFields: false + modelTableName: ${warehouse.metricTables.model.name} + modelName: "NONE" + tableName: ${warehouse.metricTables.shard.name} + dateIndexTableName: ${warehouse.metricTables.dateIndex.name} + metadataTableName: ${warehouse.metricTables.metadata.name} + indexTableName: ${warehouse.metricTables.index.name} + reverseIndexTableName: ${warehouse.metricTables.reverseIndex.name} + auditType: "NONE" + collectQueryMetrics: true + + InternalQueryMetricsQuery: + collectQueryMetrics: false + requiredRoles: + - "AuthorizedServer" + + FacetedQuery: + checkpointable: ${warehouse.defaults.checkpointable} + auditType: "NONE" + logicDescription: "Faceted search over indexed fields, returning aggregate counts for field values" + facetedSearchType: "FIELD_VALUE_FACETS" + facetTableName: "datawave.facets" + facetMetadataTableName: "datawave.facetMetadata" + facetHashTableName: "datawave.facetHashes" + maximumFacetGrouping: 200 + minimumFacet: 1 + streaming: true + querySyntaxParsers: + JEXL: "" + LUCENE: "LuceneToJexlQueryParser" + LUCENE-UUID: "LuceneToJexlUUIDQueryParser" + + HitHighlights: + checkpointable: ${warehouse.defaults.checkpointable} + accumuloPassword: ${warehouse.accumulo.password} + tableName: ${warehouse.tables.shard.name} + dateIndexTableName: ${warehouse.tables.dateIndex.name} + defaultDateTypeName: "EVENT" + metadataTableName: ${warehouse.tables.metadata.name} + indexTableName: ${warehouse.tables.index.name} + reverseIndexTableName: ${warehouse.tables.reverseIndex.name} + queryThreads: ${warehouse.defaults.indexLookupThreads} + maxConcurrentTasks: ${warehouse.defaults.indexLookupThreads} + fullTableScanEnabled: ${warehouse.defaults.fullTableScanEnabled} + minimumSelectivity: .2 + includeDataTypeAsField: false + includeGroupingContext: false + useEnrichers: false + auditType: "NONE" + logicDescription: "Fast boolean query over indexed fields, only returning fields queried on" + eventPerDayThreshold: 40000 + shardsPerDayThreshold: ${warehouse.defaults.shardsPerDayThreshold} + initialMaxTermThreshold: ${warehouse.defaults.initialMaxTermThreshold} + finalMaxTermThreshold: ${warehouse.defaults.finalMaxTermThreshold} + maxDepthThreshold: ${warehouse.defaults.maxDepthThreshold} + maxUnfieldedExpansionThreshold: ${warehouse.defaults.maxUnfieldedExpansionThreshold} + maxValueExpansionThreshold: ${warehouse.defaults.maxValueExpansionThreshold} + maxOrExpansionThreshold: ${warehouse.defaults.maxOrExpansionThreshold} + maxOrRangeThreshold: ${warehouse.defaults.maxOrRangeThreshold} + maxRangesPerRangeIvarator: ${warehouse.defaults.maxRangesPerRangeIvarator} + maxOrRangeIvarators: ${warehouse.defaults.maxOrRangeIvarators} + maxOrExpansionFstThreshold: ${warehouse.defaults.maxOrExpansionFstThreshold} + maxFieldIndexRangeSplit: ${warehouse.defaults.maxFieldIndexRangeSplit} + maxEvaluationPipelines: ${warehouse.defaults.maxEvaluationPipelines} + maxPipelineCachedResults: ${warehouse.defaults.maxPipelineCachedResults} + hdfsSiteConfigURLs: ${warehouse.defaults.hdfsSiteConfigURLs} + zookeeperConfig: ${warehouse.accumulo.zookeepers} + ivaratorCacheDirConfigs: + - basePathURI: "hdfs://${HADOOP_HOST:localhost}:9000/IvaratorCache" + ivaratorFstHdfsBaseURIs: ${warehouse.defaults.ivaratorFstHdfsBaseURIs} + ivaratorCacheBufferSize: 10000 + ivaratorMaxOpenFiles: ${warehouse.defaults.ivaratorMaxOpenFiles} + ivaratorCacheScanPersistThreshold: 100000 + ivaratorCacheScanTimeoutMinutes: ${warehouse.defaults.ivaratorCacheScanTimeoutMinutes} + querySyntaxParsers: + JEXL: "" + LUCENE: "LuceneToJexlQueryParser" + LUCENE-UUID: "LuceneToJexlUUIDQueryParser" + + EdgeEventQuery: + checkpointable: ${warehouse.defaults.checkpointable} + logicDescription: "Use results of an EdgeQuery to obtain events/documents that created the given edge" + edgeModelName: "DATAWAVE_EDGE" + modelTableName: ${warehouse.tables.model.name} + + uuidTypes: &defaultUuidTypes + 'EVENT_ID': + fieldName: 'EVENT_ID' + queryLogics: + 'default': 'LuceneUUIDEventQuery' + allowedWildcardAfter: 28 + 'UUID': + fieldName: 'UUID' + queryLogics: + 'default': 'LuceneUUIDEventQuery' + 'PARENT_UUID': + fieldName: 'PARENT_UUID' + queryLogics: + 'default': 'LuceneUUIDEventQuery' + 'PAGE_ID': + fieldName: 'PAGE_ID' + queryLogics: + 'default': 'LuceneUUIDEventQuery' + 'PAGE_TITLE': + fieldName: 'PAGE_TITLE' + queryLogics: + 'default': 'LuceneUUIDEventQuery' + + lookup: + columnVisibility: "" + beginDate: "20100101 000000.000" + types: *defaultUuidTypes + + translateid: + columnVisibility: "" + beginDate: "20100101 000000.000" + types: *defaultUuidTypes + + edge: + # Uncomment the following line to override the edge beans to load + # xmlBeansPath: "classpath:EdgeBeans.xml" + model: + baseFieldMap: + EDGE_SOURCE: 'SOURCE' + EDGE_SINK: 'SINK' + EDGE_TYPE: 'TYPE' + EDGE_RELATIONSHIP: 'RELATION' + EDGE_ATTRIBUTE1: 'ATTRIBUTE1' + EDGE_ATTRIBUTE2: 'ATTRIBUTE2' + EDGE_ATTRIBUTE3: 'ATTRIBUTE3' + DATE: 'DATE' + STATS_EDGE: 'STATS_TYPE' + keyUtilFieldMap: + ENRICHMENT_TYPE: 'ENRICHMENT_TYPE' + FACT_TYPE: 'FACT_TYPE' + GROUPED_FIELDS: 'GROUPED_FIELDS' + transformFieldMap: + COUNT: 'COUNT' + COUNTS: 'COUNTS' + LOAD_DATE: 'LOADDATE' + ACTIVITY_DATE: 'ACTIVITY_DATE' + fieldMappings: + - fieldName: "SOURCE" + modelFieldName: "VERTEXA" + direction: "REVERSE" + - fieldName: "SOURCE" + modelFieldName: "VERTEXA" + direction: "FORWARD" + - fieldName: "SINK" + modelFieldName: "VERTEXB" + direction: "REVERSE" + - fieldName: "SINK" + modelFieldName: "VERTEXB" + direction: "FORWARD" + - fieldName: "RELATION" + modelFieldName: "RELATION" + direction: "REVERSE" + - fieldName: "RELATION" + modelFieldName: "RELATION" + direction: "FORWARD" + - fieldName: "TYPE" + modelFieldName: "TYPE" + direction: "REVERSE" + - fieldName: "TYPE" + modelFieldName: "TYPE" + direction: "FORWARD" + - fieldName: "ATTRIBUTE1" + modelFieldName: "ATTR1" + direction: "REVERSE" + - fieldName: "ATTRIBUTE1" + modelFieldName: "ATTR1" + direction: "FORWARD" + - fieldName: "ATTRIBUTE2" + modelFieldName: "ATTR2" + direction: "REVERSE" + - fieldName: "ATTRIBUTE2" + modelFieldName: "ATTR2" + direction: "FORWARD" + - fieldName: "ATTRIBUTE3" + modelFieldName: "ATTR3" + direction: "REVERSE" + - fieldName: "ATTRIBUTE3" + modelFieldName: "ATTR3" + direction: "FORWARD" + +# Enable additional Hazelcast cluster for use by the query and executor services +hazelcast: + client: + clusterName: ${QUERY_CACHE:cache} + clusterName: ${QUERY_CACHE:cache} \ No newline at end of file diff --git a/docker/config/application-querymessaging.yml b/docker/config/application-querymessaging.yml new file mode 100755 index 00000000000..8845ce8340c --- /dev/null +++ b/docker/config/application-querymessaging.yml @@ -0,0 +1,27 @@ +# This profile should be included by any service which depends on the query starter and +# wants to read/write query stats from/to the query storage cache, or read/write query +# results from/to the query results messaging backend. +datawave: + query: + messaging: + backend: ${messaging.backend} + rabbitmq: + maxMessageSizeBytes: ${messaging.maxMessageSizeBytes} + # enable the following configuration if you want to use an independent, dedicated rabbitmq cluster for query (i.e. not the default spring one) + useDedicatedInstance: ${USE_DEDICATED_INSTANCE:false} + instanceSettings: + host: ${QUERY_RABBIT_HOST:query-rabbitmq} + port: ${QUERY_RABBIT_PORT:5672} + publisherConfirmType: SIMPLE + # Note - spring doesn't like it when you enable publisherConfirms for the SIMPLE confirm type... + publisherConfirms: false + publisherReturns: true + kafka: + partitions: 2 + # enable the following configuration if you want to use an independent, dedicated kafka cluster for query (i.e. not the default spring one) + useDedicatedInstance: ${USE_DEDICATED_INSTANCE:false} + instanceSettings: + bootstrapServers: ${QUERY_KAFKA_HOST:query-kafka}:${QUERY_KAFKA_PORT:9092} + autoOffsetReset: earliest + enableAutoCommit: false + allowAutoCreateTopics: false diff --git a/docker/config/application.yml b/docker/config/application.yml new file mode 100755 index 00000000000..187031a21c0 --- /dev/null +++ b/docker/config/application.yml @@ -0,0 +1,104 @@ +server: + servlet.context-path: /${spring.application.name} + cdn-uri: /${spring.application.name}/ + port: 8443 + non-secure-port: 8080 + ssl: + client-auth: WANT + # Default outbound ssl configuration to the server's ssl configuration. + # Individual services can override if they want. + outbound-ssl: + #key-alias: ${server.ssl.key-alias} + #key-password: ${server.ssl.key-password} + key-store: ${server.ssl.key-store} + key-store-password: ${server.ssl.key-store-password} + key-store-type: ${server.ssl.key-store-type} + #key-store-provider: ${server.ssl.key-store-provider} + trust-store: ${server.ssl.trust-store} + trust-store-password: ${server.ssl.trust-store-password} + trust-store-type: ${server.ssl.trust-store-type} + #trust-store-provider: ${server.ssl.trust-store-provider} + #protocol: ${server.ssl.protocol:TLS} + + undertow: + accesslog: + enabled: true + dir: '${logDir:./logs}' + prefix: '${spring.application.name}_access.' + pattern: '%h %l %u %t "%r" %s %b %D' + +management: + endpoints: + web: + # Actuator services show up under the /mgmt context path, and users with either Administrator or JBossAdministrator can access. + base-path: "/mgmt" + # Include all actuator endpoints in the web frontend. We'll secure most of them with a default security configuration. + exposure: + include: "*" + # Show details on the health endpoint only when the user is authorized. Otherwise just show simple up/down status. + endpoint.health.show-details: when-authorized + +info: + build: + version: "@project.version@" + groupId: "@project.groupId@" + artifactId: "@project.artifactId@" + +spring: + security: + user.password: passwordNotUsed + datawave: + # JWTs live for 1 day by default + jwt.ttl: 86400 + # An issuer DN must always be supplied along with a subject DN + issuers-required: true + # Allow users with Administrator or JBossAdministrator roles to access the sensitive actuator endpoints. + manager-roles: Administrator,JBossAdministrator + # A single role that must not be present in the proxy chain, or else access will be denied for the user. + # deniedAccessRole: DeniedRole + # List of required roles, of which each entity in the proxy chain must have at least one of, or else these roles will be removed from the primary user. + requiredRoles: + - "AuthorizedUser" + - "AuthorizedServer" + - "AuthorizedQueryServer" + rabbitmq: + host: ${MESSAGING_SERVER_HOSTNAME:localhost} + publisher-confirm-type: simple + publisher-confirms: true + publisher-returns: true + cloud: + consul: + enabled: false + # Configure jackson mappers so that null properties are not included in the serialized response. + jackson: + default-property-include: non_null + +hazelcast: + client: + clusterName: cache + +# The default URI for remote authorization +datawave: + authorization: + uri: "https://authorization:8443/authorization/v1/authorize" + security: + util: + subjectDnPattern: "(?:^|,)\\s*OU\\s*=\\s*My Department\\s*(?:,|$)" + npeOuList: "EXAMPLE_SERVER_OU1,EXAMPLE_SERVER_OU2" + swagger: + title: "${spring.application.name} service" + description: "REST API provided by the ${spring.application.name} service" + +swagger: + doc: + packages: datawave.microservice + +springdoc: + api-docs: + path: /apidocs + show-actuator: true + writer-with-default-pretty-printer: true + swagger-ui: + docExpansion: 'none' + operationsSorter: 'method' + tagsSorter: 'alpha' diff --git a/docker/config/audit.yml b/docker/config/audit.yml new file mode 100755 index 00000000000..42c2c7012b9 --- /dev/null +++ b/docker/config/audit.yml @@ -0,0 +1,64 @@ +spring: + cloud: + stream: + rabbit: + bindings: + logAuditSink-in-0: + consumer: + autoBindDlq: true + accumuloAuditSink-in-0: + consumer: + autoBindDlq: true + dumpAuditSink-in-0: + consumer: + autoBindDlq: true + bindQueue: false + bindings: + auditSource-out-0: + destination: 'audit' + producer: + errorChannelEnabled: true + logAuditSink-in-0: + destination: 'audit' + group: 'log' + accumuloAuditSink-in-0: + destination: 'audit' + group: 'accumulo' + dumpAuditSink-in-0: + destination: 'audit' + group: 'dump' + consumer: + concurrency: 5 + # NOTE: When defining your functions, be sure to include busConsumer, or else spring cloud bus will not work + function: + definition: auditSource;logAuditSink;accumuloAuditSink;dumpAuditSink;busConsumer + +audit: + confirmAckEnabled: true + auditors: + log: + enabled: true + accumulo: + enabled: true + accumuloConfig: + zookeepers: "${accumulo.zookeepers}" + instanceName: '${accumulo.instanceName}' + username: '${accumulo.username}' + password: '${accumulo.password}' + file: + enabled: false + pathUri: 'file:///audit' + dump: + enabled: false + pathUri: 'file:///audit' + replay: + enabled: true + +datawave: + swagger: + title: "Audit Service" + description: "REST API provided by the Audit Service" + +logging: + level: + datawave.microservice.audit: DEBUG diff --git a/docker/config/authorization-mock.yml b/docker/config/authorization-mock.yml new file mode 100755 index 00000000000..181955d1d40 --- /dev/null +++ b/docker/config/authorization-mock.yml @@ -0,0 +1,62 @@ + +mock: + users: + # Defines a regex such that, if the incoming subject DN matches the regex, the user is considered to be a server UserType + server-DN-regex: .*ou=servers.* + # Define roles (and the associated Accumulo auths) that are applied to all incoming users unless the name is found in the per-user map + global-roles-to-auths: + AuthorizedUser: DW_USER + JBossAdministrator: JBOSS_ADMIN + Administrator: DW_ADMIN + # Define per-user roles and auths. If the incoming DN (in lower-case subjectDN form) matches an entry here, the + # corresponding roles and authorizations are provided instead of the global ones. + mockUsers: + # + # "[subjectDN]": + # ROLE1: auth1 + # ROLE2: auth2 + # ROLE3: auth3 + # + # E.g., the users below match the testUser.p12 and testServer.p12 certs in spring-boot-starter-datawave/src/main/resources/ + # (password: ChangeIt) and will also work with the datawave quickstart and its example datasets + + "[cn=test a. user, ou=example developers, o=example corp, c=us]": + roles-to-auths: + AuthorizedUser: DW_USER + JBossAdministrator: JBOSS_ADMIN + Administrator: DW_ADMIN + FooRole: FOO + BarRole: BAR + DefRole: DEF + PublicRole: PUBLIC + PrivateRole: PRIVATE + RoleA: A + RoleB: B + RoleC: C + RoleD: D + RoleE: E + RoleF: F + RoleG: G + RoleH: H + RoleI: I + + "[cn=testserver.example.com, ou=servers, o=example corp, c=us]": + roles-to-auths: + AuthorizedServer: DW_SERV + MetricsAdministrator: METRICS_ADMIN + JBossAdministrator: JBOSS_ADMIN + Administrator: DW_ADMIN + FooRole: FOO + BarRole: BAR + DefRole: DEF + PublicRole: PUBLIC + PrivateRole: PRIVATE + RoleA: A + RoleB: B + RoleC: C + RoleD: D + RoleE: E + RoleF: F + RoleG: G + RoleH: H + RoleI: I diff --git a/docker/config/authorization.yml b/docker/config/authorization.yml new file mode 100755 index 00000000000..9546ef7bf88 --- /dev/null +++ b/docker/config/authorization.yml @@ -0,0 +1,27 @@ +# Authorization requires a client certificate +server: + ssl: + client-auth: NEED + +# Use trusted headers for the authorization service. This simply trusts values that appear in the +# X-SSL-clientcert-subject/issuer headers, which is necessary when someone is calling the service +# using HTTP (e.g., behind a load-balancer that does SSL termination and calls using HTTP rather +# than paying the penalty to set up a new SSL connection -- only do this if you are behind a +# firewall and trust those with access) +spring: + security: + datawave: + use-trusted-subject-headers: true + oauth: + authCodeTtl: 60 + idTokenTtl: 86400 + refreshTokenTtl: 604800 + +logging: + level: + datawave.microservice.authorization: DEBUG + +datawave: + swagger: + title: "Authorization Service" + description: "REST API provided by the Authorization Service" diff --git a/docker/config/cache.yml b/docker/config/cache.yml new file mode 100755 index 00000000000..53f9c2ca012 --- /dev/null +++ b/docker/config/cache.yml @@ -0,0 +1,35 @@ +spring: + security: + datawave: + jwt: + enabled: false + +datawave: + swagger: + title: "Cache Service" + description: "REST API provided by the Cache Service" + +hazelcast: + server: + # How long to wait (in seconds) before performing the first split-brain merge. When a number + # of cache servers start up at once, they will form individual clusters because the attempt + # to create a cluster happens before the server has registered in Consul. Therefore, the split + # brain merge will take care of joining the multiple clusters. + initialMergeDelaySeconds: 50 + xml-config: | + + + + + 2 + + 86400 + + + + + 2 + + diff --git a/docker/config/dictionary.yml b/docker/config/dictionary.yml new file mode 100755 index 00000000000..cdd4e020993 --- /dev/null +++ b/docker/config/dictionary.yml @@ -0,0 +1,42 @@ +# Authorization requires a client certificate +server: + ssl: + client-auth: NEED + +datawave: + swagger: + title: "Dictionary Service" + description: "REST API provided by the Dictionary Service" + metadata: + all-metadata-auths: + - PRIVATE,PUBLIC + type-substitutions: + "[datawave.data.type.DateType]": "datawave.data.type.RawDateType" + dictionary: + accumulo-properties: + zookeepers: "${accumulo.zookeepers}" + instance-name: '${accumulo.instanceName}' + username: '${accumulo.username}' + password: '${accumulo.password}' + edge: + metadata-table-name: ${metadata.table.name:datawave.metadata} + num-threads: 8 + data: + model-name: DATAWAVE + model-table-name: ${metadata.table.name:datawave.metadata} + metadata-table-name: ${metadata.table.name:datawave.metadata} + num-threads: 8 + normalizer-map: + "[datawave.data.type.GeoLatType]": "Latitude" + "[datawave.data.type.GeoLonType]": "Longitude" + "[datawave.data.type.GeometryType]": "Geometry" + "[datawave.data.type.GeoType]": "Combined latitude, longitude" + "[datawave.data.type.IpAddressType]": "IP address" + "[datawave.data.type.MacAddressType]": "MAC address" + "[datawave.data.type.LcNoDiacriticsType]": "Text" + "[datawave.data.type.LcType]": "Text" + "[datawave.data.type.NoOpType]": "Unnormalized Text" + "[datawave.data.type.NumberType]": "Number" + "[datawave.data.type.PointType]": "Point Geometry" + "[datawave.data.type.TrimLeadingZerosType]": "Text" + diff --git a/docker/config/executor-pool1.yml b/docker/config/executor-pool1.yml new file mode 100755 index 00000000000..d77972592bc --- /dev/null +++ b/docker/config/executor-pool1.yml @@ -0,0 +1,8 @@ +# This is where you set properties which are specific to pool1 +executor: + poolName: 'pool1' + +datawave: + swagger: + title: "Query Executor Service (Pool 1)" + description: "REST API provided by the Query Executor Service" diff --git a/docker/config/executor-pool2.yml b/docker/config/executor-pool2.yml new file mode 100755 index 00000000000..e643ce1f4dc --- /dev/null +++ b/docker/config/executor-pool2.yml @@ -0,0 +1,8 @@ +# This is where you set properties which are specific to pool2 +executor: + poolName: 'pool2' + +datawave: + swagger: + title: "Query Executor Service (Pool 2)" + description: "REST API provided by the Query Executor Service" diff --git a/docker/config/executor.yml b/docker/config/executor.yml new file mode 100755 index 00000000000..a82a46b1e55 --- /dev/null +++ b/docker/config/executor.yml @@ -0,0 +1,75 @@ +# This is where you set properties which are common to all executors +server: + # since the application names include the pools, lets override the context path to simply be executor + servlet.context-path: /executor + cdn-uri: /executor/ + +logging: + level: + root: INFO + datawave.query: DEBUG + datawave.microservice.query: DEBUG + datawave.microservice.query.executor: DEBUG + org.apache.kafka: ERROR + datawave.microservice.query.storage: WARN + +warehouse: + accumulo: + zookeepers: '${accumulo.zookeepers}' + instanceName: '${accumulo.instanceName}' + username: '${accumulo.username}' + password: '${accumulo.password}' + +datawave: + table: + cache: + zookeepers: '${accumulo.zookeepers}' + tableNames: + - '${warehouse.tables.metadata.name}' + poolName: '${datawave.connection.factory.defaultPool}' + reloadInterval: 360000 + evictionReaperIntervalInSeconds: 360 + numLocks: 3 + maxRetries: 3 + reload-crontab: '* * * * * ?' + connection: + factory: + pools: + "WAREHOUSE": + zookeepers: '${accumulo.zookeepers}' + instance: '${accumulo.instanceName}' + username: '${accumulo.username}' + password: '${accumulo.password}' + lowPriorityPoolSize: 40 + normalPriorityPoolSize: 40 + highPriorityPoolSize: 40 + adminPriorityPoolSize: 40 + "UUID": + zookeepers: '${accumulo.zookeepers}' + instance: '${accumulo.instanceName}' + username: '${accumulo.username}' + password: '${accumulo.password}' + lowPriorityPoolSize: 20 + normalPriorityPoolSize: 20 + highPriorityPoolSize: 20 + adminPriorityPoolSize: 20 + query: + executor: + pool: "${executor.poolName}" + availableResultsPageMultiplier: 2.5 + maxQueueSize: 400 + coreThreads: 10 + maxThreads: 40 + keepAliveMs: 600000 + queryStatusExpirationMs: 60000 + checkpointFlushMs: 1000 + checkpointFlushResults: 2 + orphanThresholdMs: 60000 + monitorTaskLease: 30 + monitorTaskLeaseUnit: SECONDS + monitor: + enabled: true + scheduler-crontab: '* * * * * ?' + logStatusPeriodMs: 600000 + logStatusWhenChangedMs: 300000 + queryMetricsUrlPrefix: https://localhost:8543/querymetric/v1/id/ diff --git a/docker/config/modification.yml b/docker/config/modification.yml new file mode 100755 index 00000000000..bfcd5b895f8 --- /dev/null +++ b/docker/config/modification.yml @@ -0,0 +1,70 @@ +# This file contains all of the configuration required to use the modification service +logging: + level: + root: INFO + datawave.modification: DEBUG + datawave.microservice.modification: DEBUG + datawave.microservice.modification.query: DEBUG + +datawave: + swagger: + title: "Modification (Mutable Metadata) Service" + description: "REST API provided by the Modification (Mutable Metadata) Service" + + modification: + query: + queryURI: https://query:8443/query/v1 + queryPool: pool1 + remoteQueryTimeout: 1 + remoteQueryTimeoutUnit: MINUTES + data: + tableName: ${warehouse.tables.metadata.name} + poolName: "default" + handlers: + authorizedRoles: + - "AuthorizedUser" + eventTableName: ${warehouse.tables.shard.name} + metadataTableName: ${warehouse.tables.metadata.name} + indexTableName: ${warehouse.tables.index.name} + reverseIndexTableName: ${warehouse.tables.reverseIndex.name} + securityMarkingExemptFields: + - "ExampleExemptField" + requiresAudit: false + indexOnlyMap: + "SomeEventField": "SomeIndexField1,SomeIndexField2" + indexOnlySuffixes: + - "ExampleSuffix" + contentFields: + - "ExampleContentField" + + table: + cache: + zookeepers: '${accumulo.zookeepers}' + tableNames: + - '${warehouse.tables.metadata.name}' + poolName: 'default' + reloadInterval: 360000 + evictionReaperIntervalInSeconds: 360 + numLocks: 3 + maxRetries: 3 + reload-crontab: '* * * * * ?' + connection: + factory: + defaultPool: "default" + pools: + "default": + zookeepers: '${accumulo.zookeepers}' + instance: '${accumulo.instanceName}' + username: '${accumulo.username}' + password: '${accumulo.password}' + lowPriorityPoolSize: 40 + normalPriorityPoolSize: 40 + highPriorityPoolSize: 40 + adminPriorityPoolSize: 40 + + security: + util: + subjectDnPattern: "(?:^|,)\\s*OU\\s*=\\s*My Department\\s*(?:,|$)" + npeOuList: "EXAMPLE_SERVER_OU1,EXAMPLE_SERVER_OU2" + + diff --git a/docker/config/mrquery.yml b/docker/config/mrquery.yml new file mode 100755 index 00000000000..2e3a5736b29 --- /dev/null +++ b/docker/config/mrquery.yml @@ -0,0 +1,31 @@ +logging: + level: + root: INFO + org.springframework.cloud.bootstrap.config: DEBUG + datawave.microservice.query: DEBUG + +datawave: + table: + cache: + zookeepers: '${accumulo.zookeepers}' + tableNames: + - '${warehouse.tables.metadata.name}' + poolName: 'default' + reloadInterval: 360000 + evictionReaperIntervalInSeconds: 360 + numLocks: 3 + maxRetries: 3 + reload-crontab: '* * * * * ?' + connection: + factory: + defaultPool: "default" + pools: + "default": + zookeepers: '${accumulo.zookeepers}' + instance: '${accumulo.instanceName}' + username: '${accumulo.username}' + password: '${accumulo.password}' + lowPriorityPoolSize: 40 + normalPriorityPoolSize: 40 + highPriorityPoolSize: 40 + adminPriorityPoolSize: 40 \ No newline at end of file diff --git a/docker/config/query.yml b/docker/config/query.yml new file mode 100755 index 00000000000..f6490619910 --- /dev/null +++ b/docker/config/query.yml @@ -0,0 +1,74 @@ +spring: + lifecycle: + # Set the shutdown grace period + timeout-per-shutdown-phase: 5s + +management: + endpoint: + # Enable the shutdown endpoint for the query service + shutdown: + enabled: true + +# Query requires a client certificate +server: + ssl: + client-auth: NEED + # Enable graceful shutdown + shutdown: "graceful" + +logging: + level: + root: INFO + org.springframework.cloud.bootstrap.config: DEBUG + datawave.microservice.query: DEBUG + org.apache.kafka: ERROR + datawave.microservice.query.storage: WARN + +datawave: + swagger: + title: "Query Service" + description: "REST API provided by the Query Service" + query: + nextCall: + resultPollInterval: 500 + statusUpdateInterval: 500 + expiration: + callTimeout: 1 + callTimeoutUnit: HOURS + shortCircuitCheckTime: 60 + shortCircuitCheckTimeUnit: MINUTES + shortCircuitTimeout: 55 + shortCircuitTimeoutUnit: MINUTES + longRunningQueryTimeout: 24 + longRunningQueryTimeoutUnit: HOURS + monitor: + monitorInterval: 30 + monitorIntervalUnit: SECONDS + doc: + menu: + extras: '
  • Accumulo
  • ' + table: + cache: + enabled: false + web: + accumulo: + uri: 'https://localhost:9143/accumulo' + cache: + uri: 'https://localhost:8843/cache' + config: + uri: 'https://localhost:8888/configserver' + authorization: + uri: 'https://localhost:8343/authorization' + audit: + uri: 'https://localhost:9043/audit' + metrics: + uri: 'https://localhost:8543/querymetric' + dictionary: + uri: 'https://localhost:8643/dictionary' + executor: + uri: 'https://localhost:8743/executor' + +audit-client: + discovery: + enabled: false + uri: '${AUDIT_SERVER_URL:http://localhost:11111/audit}' diff --git a/docker/config/querymetric.yml b/docker/config/querymetric.yml new file mode 100755 index 00000000000..325380f6ecd --- /dev/null +++ b/docker/config/querymetric.yml @@ -0,0 +1,124 @@ +# Query Metrics requires a client certificate +server: + ssl: + client-auth: NEED + +spring: + cloud: + stream: + bindings: + queryMetricSource-out-0: + destination: queryMetricChannel + producer: + errorChannelEnabled: true + queryMetricSink-in-0: + destination: queryMetricChannel + group: queryMetricService + # NOTE: When defining your functions, be sure to include busConsumer, or else spring cloud bus will not work + function: + definition: queryMetricSource;queryMetricSink;busConsumer + +warehouse-cluster: + accumulo: + zookeepers: '${accumulo.zookeepers}' + instanceName: '${accumulo.instanceName}' + username: '${accumulo.username}' + password: '${accumulo.password}' + +logging: + level: + ROOT: DEBUG + datawave: + microservice.querymetric: DEBUG + iterators: error + query: error + ingest: error + security: error + +datawave: + swagger: + title: "Query Metric Service" + description: "REST API provided by the Query Metric Service" + query: + metric: + handler: + zookeepers: ${warehouse-cluster.accumulo.zookeepers} + instanceName: ${warehouse-cluster.accumulo.instanceName} + username: ${warehouse-cluster.accumulo.username} + password: ${warehouse-cluster.accumulo.password} + accumuloClientPoolSize: 16 + mapStoreWriteThreads: 1 + numShards: 10 + fieldLengthThreshold: 4049 + shardTableName: datawave.queryMetrics_s + indexTableName: datawave.queryMetrics_i + dateIndexTableName: datawave.queryMetrics_di + reverseIndexTableName: datawave.queryMetrics_r + metadataTableName: datawave.queryMetrics_m + metadataDefaultAuths: PUBLIC + recordWriterMaxMemory: 10 + recordWriterMaxLatency: 16 + recordWriterNumThreads: 4 + enableBloomFilter: false + queryVisibility: PUBLIC + defaultMetricVisibility: PUBLIC + defaultMetricMarkings: + columnVisibility: PUBLIC + baseMaps: "{}" + queryPool: "pool1" + timely: + enabled: false + confirmAckTimeoutMillis: 30000 + + # this should be consolidated into a metadata application profile + metadata: + all-metadata-auths: + - PRIVATE + - PUBLIC + type-substitutions: + "[datawave.data.type.DateType]": "datawave.data.type.RawDateType" + +hazelcast: + client.enabled: false + server: + enabled: true + xml-config: | + + + + 1 + + 600 + + 3600 + com.hazelcast.spi.merge.LatestUpdateMergePolicy + true + + + + datawave.microservice.querymetric.persistence.AccumuloMapStore$Factory + 1 + 1000 + + + + + 1 + + 600 + + 3600 + com.hazelcast.spi.merge.LatestUpdateMergePolicy + true + + + + datawave.microservice.querymetric.persistence.AccumuloMapLoader$Factory + + + + clusterName: '${spring.application.name}' diff --git a/docker/debug.yml.example b/docker/debug.yml.example new file mode 100644 index 00000000000..0ee12296de3 --- /dev/null +++ b/docker/debug.yml.example @@ -0,0 +1,5 @@ + +executor-pool1: + entrypoint: ["java","-agentlib:jdwp=transport=dt_socket,server=y,suspend=y,address=5005","-jar","app.jar"] + ports: + - "5005:5005" diff --git a/docker/docker-compose.yml b/docker/docker-compose.yml new file mode 100644 index 00000000000..a44a40a1859 --- /dev/null +++ b/docker/docker-compose.yml @@ -0,0 +1,708 @@ +version: '2.2' +volumes: + quickstart_data: + hadoop_conf: + +services: + quickstart: + profiles: + - quickstart + # To run the wildfly webservice, change `--accumulo` to `--web` + command: ["datawave-bootstrap.sh", "--accumulo"] + image: datawave/quickstart-compose + environment: + - DW_CONTAINER_HOST=quickstart + - DW_DATAWAVE_WEB_JAVA_OPTS=-agentlib:jdwp=transport=dt_socket,server=y,suspend=n,address=*:8787 -Duser.timezone=GMT -Dfile.encoding=UTF-8 -Djava.net.preferIPv4Stack=true + ports: + # resource manager web ui + - "8088:8088" + # resource manager + - "8032:8032" + # node manager web ui + - "8042:8042" + # namenode server + - "9000:9000" + # namenode web ui + - "9870:9870" + # datanode web ui + - "9864:9864" + # jobhistory web ui + - "8021:8021" + # accumulo monitor + - "9995:9995" + # web server + - "9443:8443" + # web server debug port + - "5011:8787" + extra_hosts: + - "${DW_HOSTNAME}:${DW_HOST_IP}" + - "${DW_HOST_FQDN}:${DW_HOST_IP}" + volumes: + - hadoop_conf:/opt/datawave/contrib/datawave-quickstart/hadoop/client/conf + - quickstart_data:/opt/datawave/contrib/datawave-quickstart/data + - ./logs:/logs + networks: + - demo + healthcheck: + test: ["CMD-SHELL", "! accumuloStatus | grep DW-WARN > /dev/null"] + + consul: + image: docker.io/hashicorp/consul:1.15.4 + hostname: localhost + environment: + - 'CONSUL_LOCAL_CONFIG={"log_level": "trace", "datacenter": "demo_dc", "disable_update_check": true, "enable_agent_tls_for_checks": true, "addresses": {"https": "0.0.0.0"}, "ports": {"https": 8501, "grpc_tls": 8503}, "tls": {"defaults": {"key_file": "/etc/pki/testServer.key", "cert_file": "/etc/pki/testServer.crt", "ca_file": "/etc/pki/testCA.pem", "verify_outgoing": true}, "internal_rpc": {"verify_server_hostname": false}}}' + - CONSUL_BIND_INTERFACE=eth0 + # defined as host:container + ports: + - "8400" + - "8500:8500" + - "8501:8501" + - "8503:8503" + - "53" + volumes: + - ${PKI_DIR:-./pki}:/etc/pki:ro + networks: + - demo + + rabbitmq: + image: docker.io/rabbitmq:3.12.4 + volumes: + - ${RABBITMQ_CONFIG_DIR:-./rabbitmq-config}:/etc/rabbitmq + - ./logs:/logs + environment: + - TCP_PORTS=15672, 5672 + - RABBITMQ_ERLANG_COOKIE="mycookie" + ports: + - "15672:15672" + networks: + - demo + depends_on: + consul: + condition: service_started + + # When auto.create.topics.enable is true, this causes deleted topics to be recreated at random. So, leave it disabled. + kafka: + profiles: + - kafka + image: docker.io/bitnami/kafka:3.2 + ports: + - "9094:9094" + networks: + - demo + environment: + - KAFKA_CFG_NODE_ID=1 + - KAFKA_CFG_PROCESS_ROLES=controller,broker + - ALLOW_PLAINTEXT_LISTENER=yes + - KAFKA_CFG_LISTENER_SECURITY_PROTOCOL_MAP=CLIENT:PLAINTEXT,CONTROLLER:PLAINTEXT,EXTERNAL:PLAINTEXT + - KAFKA_CFG_LISTENERS=CLIENT://:9092,CONTROLLER://:9093,EXTERNAL://:9094 + - KAFKA_CFG_ADVERTISED_LISTENERS=CLIENT://kafka:9092,EXTERNAL://${DW_HOSTNAME}:9094 + - KAFKA_CFG_CONTROLLER_QUORUM_VOTERS=1@kafka:9093 + - KAFKA_CFG_CONTROLLER_LISTENER_NAMES=CONTROLLER + - KAFKA_INTER_BROKER_LISTENER_NAME=CLIENT + - KAFKA_CFG_AUTO_CREATE_TOPICS_ENABLE=false + - KAFKA_CFG_DELETE_TOPICS_ENABLE=true + + kafdrop: + profiles: + - kafka + image: docker.io/obsidiandynamics/kafdrop + ports: + - "8999:9000" + networks: + - demo + environment: + - "KAFKA_BROKERCONNECT=${DW_HOSTNAME}:9094" + # This mapping is required to enable kafdrop to communicate with + # the external, host-bound port for kafka + extra_hosts: + - "${DW_HOSTNAME}:${DW_HOST_IP}" + - "${DW_HOST_FQDN}:${DW_HOST_IP}" + depends_on: + kafka: + condition: service_started + + configuration: + entrypoint: [ "java","-agentlib:jdwp=transport=dt_socket,server=y,suspend=n,address=*:5009","-jar","app.jar" ] + image: datawave/config-service + command: + - --spring.output.ansi.enabled=ALWAYS + - --spring.profiles.active=consul,native,open_actuator + - --spring.cloud.consul.host=consul + - --spring.cloud.config.server.native.searchLocations=file:///microservice-config + environment: + - 'KEYSTORE_LOCATION=file:///etc/pki/testServer.p12' + - KEYSTORE_PASSWORD=ChangeIt + - KEY_ALIAS=certificate + ports: + - "8888:8888" + - "5009:5009" + volumes: + - ${CONFIG_DIR:-./config}:/microservice-config:ro + - ${PKI_DIR:-./pki}:/etc/pki:ro + - ./logs:/logs + networks: + - demo + depends_on: + rabbitmq: + condition: service_started + + cache: + image: datawave/hazelcast-service + scale: 1 + command: + - --spring.profiles.active=consul,compose,remoteauth + - --spring.output.ansi.enabled=ALWAYS + - --spring.cloud.consul.host=consul + - --spring.cloud.consul.discovery.instance-id=$${spring.application.name}:$${random.value} + ports: + - "5701-5703" + - "8080" + - "8843:8443" + volumes: + - ${PKI_DIR:-./pki}:/etc/pki:ro + - ./logs:/logs + networks: + - demo + healthcheck: + test: curl -f http://localhost:8080/cache/mgmt/health + interval: 10s + timeout: 1s + start_period: 45s + retries: 3 + depends_on: + configuration: + condition: service_started + + query-cache: + profiles: + - querycache + # To enable an additional Hazelcast cluster, enable the "querycache" profile + # Set an environment variable: QUERY_CACHE=query-cache. This will force the query and executor service to use a separate "query-cache" Hazelcast cluster. + image: datawave/hazelcast-service + scale: 1 + command: + - --spring.application.name=query-cache + - --hazelcast.client.cluster-members + - --spring.profiles.active=consul,compose,remoteauth + - --spring.output.ansi.enabled=ALWAYS + - --spring.cloud.consul.host=consul + - --spring.cloud.consul.discovery.instance-id=$${spring.application.name}:$${random.value} + ports: + - "5701-5703" + - "8080" + - "8844:8443" + volumes: + - ${PKI_DIR:-./pki}:/etc/pki:ro + - ./logs:/logs + networks: + - demo + healthcheck: + test: curl -f http://localhost:8080/query-cache/mgmt/health + interval: 10s + timeout: 1s + start_period: 45s + retries: 3 + depends_on: + configuration: + condition: service_started + + query-rabbitmq: + profiles: + - queryrabbit + # To enable an additional rabbit cluster, enable the "queryrabbit" profile + # Set an environment variable: USE_DEDICATED_INSTANCE=true. This will force the query and executor service to use a separate "query-rabbitmq" rabbit cluster. + image: docker.io/rabbitmq:3.12.4 + volumes: + - ${RABBITMQ_CONFIG_DIR:-./rabbitmq-query-config}:/etc/rabbitmq + - ./logs:/logs + environment: + - TCP_PORTS=15672,5672 + - RABBITMQ_ERLANG_COOKIE="someothercookie" + ports: + - "15673:15672" + networks: + - demo + depends_on: + consul: + condition: service_started + + # When auto.create.topics.enable is true, this causes deleted topics to be recreated at random. So, leave it disabled. + query-kafka: + profiles: + - querykafka + # To enable an additional rabbit cluster, enable the "querykafka" profile + # Set an environment variable: USE_DEDICATED_INSTANCE=true. This will force the query and executor service to use a separate "query-kafka" kafka cluster. + image: docker.io/bitnami/kafka:3.2 + ports: + - "9095:9095" + networks: + - demo + environment: + - KAFKA_CFG_NODE_ID=1 + - KAFKA_CFG_PROCESS_ROLES=controller,broker + - ALLOW_PLAINTEXT_LISTENER=yes + - KAFKA_CFG_LISTENER_SECURITY_PROTOCOL_MAP=CLIENT:PLAINTEXT,CONTROLLER:PLAINTEXT,EXTERNAL:PLAINTEXT + - KAFKA_CFG_LISTENERS=CLIENT://:9092,CONTROLLER://:9093,EXTERNAL://:9095 + - KAFKA_CFG_ADVERTISED_LISTENERS=CLIENT://query-kafka:9092,EXTERNAL://${DW_HOSTNAME}:9095 + - KAFKA_CFG_CONTROLLER_QUORUM_VOTERS=1@query-kafka:9093 + - KAFKA_CFG_CONTROLLER_LISTENER_NAMES=CONTROLLER + - KAFKA_INTER_BROKER_LISTENER_NAME=CLIENT + - KAFKA_CFG_AUTO_CREATE_TOPICS_ENABLE=false + - KAFKA_CFG_DELETE_TOPICS_ENABLE=true + + authorization: + entrypoint: [ "java","-agentlib:jdwp=transport=dt_socket,server=y,suspend=n,address=*:5008","-jar","app.jar" ] + image: datawave/authorization-service + command: + - --spring.output.ansi.enabled=ALWAYS + - --spring.profiles.active=consul,mock,compose,federation + - --spring.cloud.consul.host=consul + - --spring.cloud.consul.discovery.instance-id=$${spring.application.name}:$${random.value} + ports: + - "8080" + - "8343:8443" + - "5008:5008" + volumes: + - ${PKI_DIR:-./pki}:/etc/pki:ro + - ./logs:/logs + networks: + - demo + healthcheck: + test: curl -f http://localhost:8080/authorization/mgmt/health + interval: 10s + timeout: 1s + start_period: 20s + retries: 3 + depends_on: + cache: + condition: service_healthy + + accumulo: + profiles: + - accumulo + - full + image: datawave/accumulo-service + command: + - --spring.output.ansi.enabled=ALWAYS + - --spring.profiles.active=consul,compose,remoteauth + - --spring.cloud.consul.host=consul + - --spring.cloud.consul.discovery.instance-id=$${spring.application.name}:$${random.value} + environment: + - AUDIT_SERVER_URL=http://audit:8080/audit + - ZOOKEEPER_HOST=${DW_ZOOKEEPER_HOST} + # This mapping is required to enable the metrics service to communicate + # with host-deployed services like hadoop, zookeeper, and accumulo. + # These values are set locally in .env via bootstrap.sh + extra_hosts: + - "${DW_HOSTNAME}:${DW_HOST_IP}" + - "${DW_HOST_FQDN}:${DW_HOST_IP}" + ports: + - "9143:8443" + volumes: + - ${PKI_DIR:-./pki}:/etc/pki:ro + - ./logs:/logs + networks: + - demo + healthcheck: + test: curl -f http://localhost:8080/accumulo/mgmt/health + interval: 10s + timeout: 1s + start_period: 45s + retries: 3 + depends_on: + authorization: + condition: service_healthy + + audit: + image: datawave/audit-service + command: + - --spring.output.ansi.enabled=ALWAYS + - --spring.profiles.active=consul,compose,remoteauth + - --spring.cloud.consul.host=consul + - --spring.cloud.consul.discovery.instance-id=$${spring.application.name}:$${random.value} + environment: + - ZOOKEEPER_HOST=${DW_ZOOKEEPER_HOST} + # This mapping is required to enable the audit service to communicate + # with host-deployed services like hadoop, zookeeper, and accumulo. + # These values are set locally in .env via bootstrap.sh + extra_hosts: + - "${DW_HOSTNAME}:${DW_HOST_IP}" + - "${DW_HOST_FQDN}:${DW_HOST_IP}" + ports: + - "8080" + - "9043:8443" + volumes: + - ${PKI_DIR:-./pki}:/etc/pki:ro + - ./logs:/logs + networks: + - demo + healthcheck: + test: curl -f http://localhost:8080/audit/mgmt/health + interval: 10s + timeout: 1s + start_period: 30s + retries: 3 + depends_on: + authorization: + condition: service_healthy + + metrics: + entrypoint: ["java","-agentlib:jdwp=transport=dt_socket,server=y,suspend=n,address=*:5007","-jar","app.jar"] + image: datawave/query-metric-service + command: + - --spring.output.ansi.enabled=ALWAYS + - --spring.profiles.active=consul,compose,remoteauth + - --spring.cloud.consul.host=consul + - --spring.cloud.consul.discovery.instance-id=$${spring.application.name}:$${random.value} + environment: + - ZOOKEEPER_HOST=${DW_ZOOKEEPER_HOST} + # This mapping is required to enable the metrics service to communicate + # with host-deployed services like hadoop, zookeeper, and accumulo. + # These values are set locally in .env via bootstrap.sh + extra_hosts: + - "${DW_HOSTNAME}:${DW_HOST_IP}" + - "${DW_HOST_FQDN}:${DW_HOST_IP}" + ports: + - "8180:8080" + - "8543:8443" + - "5007:5007" + volumes: + - ${PKI_DIR:-./pki}:/etc/pki:ro + - ./logs:/logs + networks: + - demo + healthcheck: + test: curl -f http://localhost:8080/querymetric/mgmt/health + interval: 10s + timeout: 1s + start_period: 45s + retries: 3 + depends_on: + authorization: + condition: service_healthy + + dictionary: + profiles: + - dictionary + - full + image: datawave/dictionary-service + command: + - --spring.output.ansi.enabled=ALWAYS + - --spring.profiles.active=consul,compose,remoteauth + - --spring.cloud.consul.host=consul + - --spring.cloud.consul.discovery.instance-id=$${spring.application.name}:$${random.value} + environment: + - ZOOKEEPER_HOST=${DW_ZOOKEEPER_HOST} + # This mapping is required to enable the metrics service to communicate + # with host-deployed services like hadoop, zookeeper, and accumulo. + # These values are set locally in .env via bootstrap.sh + extra_hosts: + - "${DW_HOSTNAME}:${DW_HOST_IP}" + - "${DW_HOST_FQDN}:${DW_HOST_IP}" + ports: + - "8280:8080" + - "8643:8443" + volumes: + - ${PKI_DIR:-./pki}:/etc/pki:ro + - ./logs:/logs + networks: + - demo + healthcheck: + test: curl -f http://localhost:8080/dictionary/mgmt/health + depends_on: + authorization: + condition: service_healthy + + file-provider: + profiles: + - file-provider + - full + image: datawave/file-provider-service + command: + - --spring.output.ansi.enabled=ALWAYS + - --spring.profiles.active=consul,compose,remoteauth + - --spring.cloud.consul.host=consul + - --spring.cloud.consul.discovery.instance-id=$${spring.application.name}:$${random.value} + ports: + - "8580:8080" + - "8943:8443" + volumes: + - ${PKI_DIR:-./pki}:/etc/pki:ro + - ./logs:/logs + networks: + - demo + + # If you want to test cached results, enable the cachedresults profile + mysql: + profiles: + - cachedresults + image: docker.io/mysql:8.0.32 + environment: + - MYSQL_RANDOM_ROOT_PASSWORD=true + - MYSQL_DATABASE=cachedresults + - MYSQL_USER=datawave + - MYSQL_PASSWORD=secret + networks: + - demo + healthcheck: + test: ["CMD", "mysqladmin", "ping", "-h", "localhost"] + timeout: 20s + retries: 10 + + # If you want to test cached results, set the CACHED_RESULTS environment variable to 'true' + query: + entrypoint: ["java","-agentlib:jdwp=transport=dt_socket,server=y,suspend=n,address=*:5005","-jar","app.jar"] + image: datawave/query-service + command: + - --spring.output.ansi.enabled=ALWAYS + - --spring.profiles.active=consul,compose,remoteauth,querymessaging,metricssource,query,mrquery,cachedresults,federation + - --spring.cloud.consul.host=consul + - --spring.cloud.consul.discovery.instance-id=$${spring.application.name}:$${random.value} + environment: + - AUDIT_SERVER_URL=http://audit:8080/audit + - HADOOP_HOST=${DW_HADOOP_HOST} + - HADOOP_CONF_DIR=${HADOOP_CONF_DIR:-/etc/hadoop/conf} + - "BACKEND=${BACKEND:-rabbitmq}" + - CACHED_RESULTS=${CACHED_RESULTS:-false} + - QUERY_CACHE=${QUERY_CACHE:-cache} + - USE_DEDICATED_INSTANCE=${USE_DEDICATED_INSTANCE:-false} + ports: + - "8080:8080" + - "8443:8443" + - "5005:5005" + volumes: + - ${PKI_DIR:-./pki}:/etc/pki:ro + - ./logs:/logs + - ${HADOOP_CONF_DIR:-hadoop_conf}:${HADOOP_CONF_DIR:-/etc/hadoop/conf}:ro + networks: + - demo + healthcheck: + test: curl -f http://localhost:8080/query/mgmt/health + interval: 10s + timeout: 1s + start_period: 30s + retries: 3 + depends_on: + audit: + condition: service_healthy + authorization: + condition: service_healthy + metrics: + condition: service_healthy + executor-pool1: + condition: service_started + + mapreduce-query: + profiles: + - full + entrypoint: ["java","-agentlib:jdwp=transport=dt_socket,server=y,suspend=n,address=*:5005","-jar","app.jar"] + image: datawave/mapreduce-query-service + command: + - --spring.output.ansi.enabled=ALWAYS + - --spring.profiles.active=consul,compose,remoteauth,query,mrquery,federation + - --spring.cloud.consul.host=consul + - --spring.cloud.consul.discovery.instance-id=$${spring.application.name}:$${random.value} + environment: + - ZOOKEEPER_HOST=${DW_ZOOKEEPER_HOST} + - HADOOP_HOST=${DW_HADOOP_HOST} + - HADOOP_CONF_DIR=${HADOOP_CONF_DIR:-/etc/hadoop/conf} + ports: + - "50005:5005" + volumes: + - ${PKI_DIR:-./pki}:/etc/pki:ro + - ./logs:/logs + - ${HADOOP_CONF_DIR:-hadoop_conf}:${HADOOP_CONF_DIR:-/etc/hadoop/conf}:ro + networks: + - demo + healthcheck: + test: curl -f http://localhost:8080/mrquery/mgmt/health + interval: 10s + timeout: 1s + start_period: 30s + retries: 3 + depends_on: + audit: + condition: service_healthy + authorization: + condition: service_healthy + metrics: + condition: service_healthy + executor-pool1: + condition: service_started + + executor-pool1: + entrypoint: ["java","-agentlib:jdwp=transport=dt_socket,server=y,suspend=n,address=*:5006","-jar","app.jar"] + image: datawave/query-executor-service + command: + - --spring.application.name=executor-pool1 + - --spring.cloud.config.name=executor + - --spring.output.ansi.enabled=ALWAYS + - --spring.profiles.active=consul,compose,remoteauth,querymessaging,metricssource,query,pool1,federation + - --spring.cloud.consul.host=consul + - --spring.cloud.consul.discovery.instance-id=$${spring.application.name}:$${random.value} + environment: + - ZOOKEEPER_HOST=${DW_ZOOKEEPER_HOST} + - HADOOP_HOST=${DW_HADOOP_HOST} + - BACKEND=${BACKEND:-rabbitmq} + - HADOOP_CONF_DIR=${HADOOP_CONF_DIR:-/etc/hadoop/conf} + - QUERY_CACHE=${QUERY_CACHE:-cache} + - USE_DEDICATED_INSTANCE=${USE_DEDICATED_INSTANCE:-false} + # This mapping is required to enable the metrics service to communicate + # with host-deployed services like hadoop, zookeeper, and accumulo. + # These values are set locally in .env via bootstrap.sh + extra_hosts: + - "${DW_HOSTNAME}:${DW_HOST_IP}" + - "${DW_HOST_FQDN}:${DW_HOST_IP}" + ports: + - "8380:8080" + - "8743:8443" + - "5006:5006" + volumes: + - ${PKI_DIR:-./pki}:/etc/pki:ro + - ./logs/pool1:/logs + - ${HADOOP_CONF_DIR:-hadoop_conf}:${HADOOP_CONF_DIR:-/etc/hadoop/conf}:ro + networks: + - demo + healthcheck: + test: curl -f http://localhost:8080/executor/mgmt/health + interval: 10s + timeout: 1s + start_period: 30s + retries: 3 + depends_on: + rabbitmq: + condition: service_started + authorization: + condition: service_healthy + metrics: + condition: service_healthy + + executor-pool2: + profiles: + - pool2 + - full + image: datawave/query-executor-service + command: + - --spring.application.name=executor-pool2 + - --spring.cloud.config.name=executor + - --spring.output.ansi.enabled=ALWAYS + - --spring.profiles.active=consul,compose,remoteauth,querymessaging,metricssource,query,pool2,federation + - --spring.cloud.consul.host=consul + - --spring.cloud.consul.discovery.instance-id=$${spring.application.name}:$${random.value} + environment: + - ZOOKEEPER_HOST=${DW_ZOOKEEPER_HOST} + - HADOOP_HOST=${DW_HADOOP_HOST} + - BACKEND=${BACKEND:-rabbitmq} + - HADOOP_CONF_DIR=${HADOOP_CONF_DIR:-/etc/hadoop/conf} + - QUERY_CACHE=${QUERY_CACHE:-cache} + - USE_DEDICATED_INSTANCE=${USE_DEDICATED_INSTANCE:-false} + # This mapping is required to enable the metrics service to communicate + # with host-deployed services like hadoop, zookeeper, and accumulo. + # These values are set locally in .env via bootstrap.sh + extra_hosts: + - "${DW_HOSTNAME}:${DW_HOST_IP}" + - "${DW_HOST_FQDN}:${DW_HOST_IP}" + ports: + - "8480:8080" + - "8243:8443" + volumes: + - ${PKI_DIR:-./pki}:/etc/pki:ro + - ./logs/pool2:/logs + - ${HADOOP_CONF_DIR:-hadoop_conf}:${HADOOP_CONF_DIR:-/etc/hadoop/conf}:ro + networks: + - demo + healthcheck: + test: curl -f http://localhost:8080/executor/mgmt/health + interval: 10s + timeout: 1s + start_period: 30s + retries: 3 + depends_on: + rabbitmq: + condition: service_started + authorization: + condition: service_healthy + metrics: + condition: service_healthy + + modification: + entrypoint: ["java","-agentlib:jdwp=transport=dt_socket,server=y,suspend=n,address=*:5010","-jar","app.jar"] + profiles: + - modification + - full + image: datawave/modification-service + command: + - --spring.output.ansi.enabled=ALWAYS + - --spring.profiles.active=consul,compose,remoteauth,query + - --spring.cloud.consul.host=consul + - --spring.cloud.consul.discovery.instance-id=$${spring.application.name}:$${random.value} + environment: + - ZOOKEEPER_HOST=${DW_ZOOKEEPER_HOST} + # This mapping is required to enable the metrics service to communicate + # with host-deployed services like hadoop, zookeeper, and accumulo. + # These values are set locally in .env via bootstrap.sh + extra_hosts: + - "${DW_HOSTNAME}:${DW_HOST_IP}" + - "${DW_HOST_FQDN}:${DW_HOST_IP}" + ports: + - "8680:8080" + - "9343:8443" + - "5010:5010" + volumes: + - ${PKI_DIR:-./pki}:/etc/pki:ro + - ./logs:/logs + networks: + - demo + healthcheck: + test: curl -f http://localhost:8080/dictionary/mgmt/health + depends_on: + authorization: + condition: service_healthy + + # If you use the management center, you can connect to the hazelcast cache as follows: + # In your browser connect to https://localhost:9243/ + # Enable 'dev' mode + # Click 'Add Cluster Config' + # Enter the following for the cache service: + # - Cluster Name: cache + # - Cluster Config: Enabled + # - Member Addresses: cache + # Enter the following for the query metric service: + # - Cluster Name: metrics + # - Cluster Config: Enabled + # - Member Addresses: metrics + # Use the console to view the cache contents + # - Select the 'cache' cluster + # - Select 'Console' under the 'CLUSTER' navigation entry + # - Run the following commands to list all entries in the 'datawaveUsers' map: + # - ns datawaveUsers + # - m.entries + management-center: + profiles: + - management + - full + image: docker.io/hazelcast/management-center:5.1.2 + environment: + - |- + JAVA_OPTS= + -Dhazelcast.mc.healthCheck.enable=true + -Dhazelcast.mc.tls.enabled=true + -Dhazelcast.mc.tls.keyStore=/etc/pki/testServer.p12 + -Dhazelcast.mc.tls.keyStorePassword=ChangeIt + -Dhazelcast.mc.tls.trustStore=/etc/pki/testCA.p12 + -Dhazelcast.mc.tls.trustStorePassword=ChangeIt + ports: + - "8081" + - "9243:8443" + volumes: + - ${PKI_DIR:-./pki}:/etc/pki:ro + networks: + - demo + healthcheck: + test: wget -q http://localhost:8081/health -O /dev/null || exit 1 + depends_on: + cache: + condition: service_healthy + +networks: + demo: diff --git a/docker/pki/testCA.p12 b/docker/pki/testCA.p12 new file mode 100755 index 00000000000..27994edb4e9 Binary files /dev/null and b/docker/pki/testCA.p12 differ diff --git a/docker/pki/testCA.pem b/docker/pki/testCA.pem new file mode 100755 index 00000000000..ab005a90a09 --- /dev/null +++ b/docker/pki/testCA.pem @@ -0,0 +1,20 @@ +-----BEGIN CERTIFICATE----- +MIIDTjCCAjagAwIBAgIURUj8l+ms/i9jeabIBBi19KxEhAgwDQYJKoZIhvcNAQEL +BQAwPjELMAkGA1UEBhMCVVMxFTATBgNVBAoTDEV4YW1wbGUgQ29ycDEYMBYGA1UE +AxMPRVhBTVBMRSBDT1JQIENBMCAXDTIzMDIwMTIzMTUwMFoYDzIxMjMwMTA4MjMx +NTAwWjA+MQswCQYDVQQGEwJVUzEVMBMGA1UEChMMRXhhbXBsZSBDb3JwMRgwFgYD +VQQDEw9FWEFNUExFIENPUlAgQ0EwggEiMA0GCSqGSIb3DQEBAQUAA4IBDwAwggEK +AoIBAQDVBr2XF20SKmGXfvp2BUY44BF1fm3Qw83GkC66qIHgpgQxzJ8u44P34vV1 +DYTj4XzrBpx23J0nCMEiAW8YW9MxZJPEokZafRNKFQAngVdgmAW9fHemHRQfaYwe +ySVz2UZ+z28DJ3C0QshvqXskrYstwLVs4Ss2uoX+XpeFbW9IVVg4aW+GX3LHPNTk +7Hji13Capp6BCiS61AGQAiMWNBW2aRgHVLOb7RDQug2Bka/dcJ44XxJDh26C6kV1 +pNEZm0ToqWAiVfvyzionPec5im6NKX9kStZc+C/iikfx5vdmrvGIF5zxy6phdhJ7 +AGd6cF6UBkkYm/SsIP3WTXpBek5LAgMBAAGjQjBAMA4GA1UdDwEB/wQEAwIBBjAP +BgNVHRMBAf8EBTADAQH/MB0GA1UdDgQWBBSPLNlCmmp9PH+crzCx9TNp5BoCajAN +BgkqhkiG9w0BAQsFAAOCAQEAkjLnZKqLMjbu5Hy8YOVoKaEKahP+t/tNYZW/NtrC +xSxKmGLz5kVCkjpLgCykkcpu4h2wZW4MGZJ4nc87+tN22mJOg1WnqEpBzRsEPajk +7duVjdU2FDCarERj53hxF41l/u8BxvMGbFnrAijv5SzspeLi+VCGF3wu53Bi6UHi +7WO1dIhZLMHepqgtwbDDudxwlKbG/JWjJ7OspCNqOpktpY8BFUAQ42ctYZcOHy2w +kdZDcK5IhRztkBH2aSWOhudYh/CF7BOu2nSUdtR5NwhXnsStmSbud7ahw3W5Jc9/ +IVAFXf2brkrfIzwrY5kmjnEPJ8mjUyLepwSlOlMTBrehzQ== +-----END CERTIFICATE----- diff --git a/docker/pki/testKeystore.jks b/docker/pki/testKeystore.jks new file mode 100755 index 00000000000..f8087af8bd4 Binary files /dev/null and b/docker/pki/testKeystore.jks differ diff --git a/docker/pki/testServer.crt b/docker/pki/testServer.crt new file mode 100755 index 00000000000..5bdf5533840 --- /dev/null +++ b/docker/pki/testServer.crt @@ -0,0 +1,23 @@ +-----BEGIN CERTIFICATE----- +MIIDwjCCAqqgAwIBAgIUe6yBgkY1m/oXuPXA9j/c/7+w16UwDQYJKoZIhvcNAQEL +BQAwPjELMAkGA1UEBhMCVVMxFTATBgNVBAoTDEV4YW1wbGUgQ29ycDEYMBYGA1UE +AxMPRVhBTVBMRSBDT1JQIENBMCAXDTIzMDIwMTIzMTUwMFoYDzIxMjMwMTA4MjMx +NTAwWjBXMQswCQYDVQQGEwJVUzEVMBMGA1UEChMMRXhhbXBsZSBDb3JwMRAwDgYD +VQQLEwdTZXJ2ZXJzMR8wHQYDVQQDExZ0ZXN0c2VydmVyLmV4YW1wbGUuY29tMIIB +IjANBgkqhkiG9w0BAQEFAAOCAQ8AMIIBCgKCAQEAmb1pEWg9a6ZChQULazK75ppc +XScU5Ap29L70/5zcaOcbaBwqKhpKhdZCoZ1TD+Sdkk9kp7IlabCQjM2vVjknb6NY +msl3PbQFNva0EkH6s5cvpX4WhQDsWK77/4eztHT+0fpaG1YoKQn+Gy3VuW0CDyR6 +GzGKifnI/aNLevgUBebbiiTPLEdGlo0jp3SXlZfoDZdbvnhBWI8Xc+aDekNlrXxW +X2TVnc02Fi914PECYJzkMpmxnBRPAcljU24WSqaUVgRoSbafkm4OXDw0xtvxnIrJ +s3iO0uZfRf7+OxAlqlvh4ZJ9/PufPMEKvmr6zea05KkFjBotKMip5Md43H3lQQID +AQABo4GcMIGZMA4GA1UdDwEB/wQEAwIFoDAdBgNVHSUEFjAUBggrBgEFBQcDAQYI +KwYBBQUHAwIwDAYDVR0TAQH/BAIwADAdBgNVHQ4EFgQUDWwMrl9/uc1wplERWxas +bDROB4MwHwYDVR0jBBgwFoAUjyzZQppqfTx/nK8wsfUzaeQaAmowGgYDVR0RBBMw +EYIJbG9jYWxob3N0hwR/AAABMA0GCSqGSIb3DQEBCwUAA4IBAQBfG7StgQf3WNk/ +EnaYuurwvaOOpcOV5NDcZ6tmgaXUrISPVDpyIWfsDGetNEXc+4gpefLQtGF2wuiW +odiLc4H9JwQ/yEkVYwzrFViPdZKVvxNU2EjiwO/e/3sl+SxxkTDy6o838uTXX+X5 +Q8DabmL5L8ukcnqUHVOqW38oA1vRNBKiJX58JzOr8ezX8crZmqU9h8cNwG/V+ZXE +5U7+hSRzLm7gRwtixj8ZsTDUiGJxOdbyUKImPClt1Se9TizB2Q/xOPTdmYB1qiiB ++XVKkGwQPgIbHckLsZWhBor2u5cgdrBGW09Q+H88maagwq5sBHNMs33vkkgbUDpB +6WSEBJMi +-----END CERTIFICATE----- diff --git a/docker/pki/testServer.key b/docker/pki/testServer.key new file mode 100755 index 00000000000..88c4fbe86a0 --- /dev/null +++ b/docker/pki/testServer.key @@ -0,0 +1,27 @@ +-----BEGIN RSA PRIVATE KEY----- +MIIEowIBAAKCAQEAmb1pEWg9a6ZChQULazK75ppcXScU5Ap29L70/5zcaOcbaBwq +KhpKhdZCoZ1TD+Sdkk9kp7IlabCQjM2vVjknb6NYmsl3PbQFNva0EkH6s5cvpX4W +hQDsWK77/4eztHT+0fpaG1YoKQn+Gy3VuW0CDyR6GzGKifnI/aNLevgUBebbiiTP +LEdGlo0jp3SXlZfoDZdbvnhBWI8Xc+aDekNlrXxWX2TVnc02Fi914PECYJzkMpmx +nBRPAcljU24WSqaUVgRoSbafkm4OXDw0xtvxnIrJs3iO0uZfRf7+OxAlqlvh4ZJ9 +/PufPMEKvmr6zea05KkFjBotKMip5Md43H3lQQIDAQABAoIBAGRBwuzZH4S+DY8V +dRbZJPQDIcyVmegEcwK0LK0srAe0/ohsJYsDTintSdJ++BXbLm6lW6yIXiGIRRut +XnRKHU+OJE/8pcgVR3lKdK+bW1QWebngeesOaRKEOk2vPx+lVC+2pWBCVTo+C/nJ +cCDV14vsa7ykGAZWYB4XiTeN4/DcUvZQq1XJufK8194tvUuxKwX+0ag2m4A+o9pu +63oZ0quT7NsMcFfy1zJeC8ZcGtI8/y1fLMzTGVYjzlgKjgsuzWW8AgmgnylVAMJj +RgZBN6eCVT3izS8wgWPRtKx1ZhSSysj5wgez7QmXhU7FPvDhx1qMTBbGjjvT6fBb +Qf6/D2ECgYEAyNDsxKN7f0Cr28AHc/RM3tC0L7Lc2SInU1m2DE8R9IOwyLft6ddX +etRirX3rmFEz5S6Hbn3b/wWj/BvbCDc8YrAd7h/+/OzPX8ItTszu8zDGWQlWu0Nb +2ZOrWtamNz3pHGDD6owSYcT9V90u8fPpq1ub06v5fGUVRXi7xJIem6UCgYEAw/zA +kSaKup83dg6E8k/ZuajviZc7Zrvfk76lToO6tDTu1O3c8jG1oDLzBaBAYnZ8pZVN +axI9mDqODqyNr/yZkj1FUvFV7AZJBlKiJ1eBf4NFqd4c65GRTKhy2ugMnboJkTZD +qYLq0SjosAC9D4L5PPmziGyjDOyQRLWemb4tIG0CgYA+Eua9OdOY2ELHYKptG10P +mu+3ttAxhi8lwptVXrOsI3Dn+oLeP6RY4YHHIx9KK/+gnita6iiwweuYqHuiJIDT ++KCn1NgbEpBfh0Q+EXbjUXZeW1al+AJSVSUopSUn+kkubnLg839THXM6T7KNM9pN +D7oxSf2KiCx1Fwbjkk9HyQKBgGQHV519x2aSprLpnC80VBKvR3q5Jkc78lOtBR/6 +mcsqDQ++S3uNpX6bInAnp1TZ1qqIdlgEmk1c7W5N4Y8F5Le/7+byaAEsAA9rE3ny +7pRPQrn9NFODG169xsk1kGLcNc/Ym1YkDIcuUvypk9Ub6uuVIm2pzNpmzwdSjx02 +9D9NAoGBAIInidWF2GBz4wAVie/0vd2f/HiWNJLlHtQ0RWijpX3o/EjghRSGvdCF +tpgJ7o4eeR+DdpcMOLPzfhSEUKEKQNR63IfbWEYl8qLkHOu3RZS8ZgjlSSH99WVL +eyOB3D4bH6+H7giV1DwbNY2g5sELJPdFVq12k8Eo5j/shzIj32gN +-----END RSA PRIVATE KEY----- diff --git a/docker/pki/testServer.p12 b/docker/pki/testServer.p12 new file mode 100755 index 00000000000..a241e3f712b Binary files /dev/null and b/docker/pki/testServer.p12 differ diff --git a/docker/pki/testUser.p12 b/docker/pki/testUser.p12 new file mode 100755 index 00000000000..a5ac4fe1423 Binary files /dev/null and b/docker/pki/testUser.p12 differ diff --git a/docker/rabbitmq-config/enabled_plugins b/docker/rabbitmq-config/enabled_plugins new file mode 100755 index 00000000000..f863eb4644c --- /dev/null +++ b/docker/rabbitmq-config/enabled_plugins @@ -0,0 +1 @@ +[rabbitmq_management,rabbitmq_peer_discovery_consul]. \ No newline at end of file diff --git a/docker/rabbitmq-config/rabbitmq.conf b/docker/rabbitmq-config/rabbitmq.conf new file mode 100755 index 00000000000..fb31f282405 --- /dev/null +++ b/docker/rabbitmq-config/rabbitmq.conf @@ -0,0 +1,16 @@ +cluster_formation.peer_discovery_backend = rabbit_peer_discovery_consul + +# Consul host (hostname or IP address). Default value is localhost +cluster_formation.consul.host = consul +# do compute service address +cluster_formation.consul.svc_addr_auto = true +# compute service address using node name +cluster_formation.consul.svc_addr_use_nodename = true +# health check interval (node TTL) in seconds +cluster_formation.consul.svc_ttl = 30 +# how soon should nodes that fail their health checks be unregistered by Consul? +# this value is in seconds and must not be lower than 60 (a Consul requirement) +cluster_formation.consul.deregister_after = 90 +cluster_partition_handling = autoheal +# Enable the guest user +loopback_users.guest = false \ No newline at end of file diff --git a/docker/rabbitmq-query-config/enabled_plugins b/docker/rabbitmq-query-config/enabled_plugins new file mode 100755 index 00000000000..90fdaa378e5 --- /dev/null +++ b/docker/rabbitmq-query-config/enabled_plugins @@ -0,0 +1 @@ +[rabbitmq_management]. \ No newline at end of file diff --git a/docker/rabbitmq-query-config/rabbitmq.conf b/docker/rabbitmq-query-config/rabbitmq.conf new file mode 100755 index 00000000000..27f4f5b8fa3 --- /dev/null +++ b/docker/rabbitmq-query-config/rabbitmq.conf @@ -0,0 +1,3 @@ +cluster_partition_handling = autoheal +# Enable the guest user +loopback_users.guest = false \ No newline at end of file diff --git a/docker/restart.sh b/docker/restart.sh new file mode 100755 index 00000000000..c50175aeb91 --- /dev/null +++ b/docker/restart.sh @@ -0,0 +1,7 @@ +#!/bin/sh +services=$@ +for service in $services; do + docker compose stop $service +done +docker compose rm -f +docker compose up -d diff --git a/docker/scripts/batchLookup.sh b/docker/scripts/batchLookup.sh new file mode 100755 index 00000000000..1e31e539eee --- /dev/null +++ b/docker/scripts/batchLookup.sh @@ -0,0 +1,15 @@ +#!/bin/bash + +SCRIPT_DIR=$( cd -- "$( dirname -- "${BASH_SOURCE[0]}" )" &> /dev/null && pwd ) + +source ${SCRIPT_DIR}/common/batchLookup.sh + +# QUERY PARAMETERS +BEGIN='19660908 000000.000' +END='20161002 235959.999' +COLUMN_VISIBILITY='PUBLIC' +UUID_PAIRS="PAGE_TITLE:anarchism OR PAGE_TITLE:accessiblecomputing" +AUTHS='PUBLIC,PRIVATE,BAR,FOO' +QUERY_NAME='Developer Test Lookup UUID Query' + +runBatchLookup diff --git a/docker/scripts/batchLookupContent.sh b/docker/scripts/batchLookupContent.sh new file mode 100755 index 00000000000..5f4e6266c5a --- /dev/null +++ b/docker/scripts/batchLookupContent.sh @@ -0,0 +1,15 @@ +#!/bin/bash + +SCRIPT_DIR=$( cd -- "$( dirname -- "${BASH_SOURCE[0]}" )" &> /dev/null && pwd ) + +source ${SCRIPT_DIR}/common/batchLookupContent.sh + +# QUERY PARAMETERS +BEGIN='19660908 000000.000' +END='20161002 235959.999' +COLUMN_VISIBILITY='PUBLIC' +UUID_PAIRS="PAGE_TITLE:anarchism OR PAGE_TITLE:accessiblecomputing" +AUTHS='PUBLIC,PRIVATE,BAR,FOO' +QUERY_NAME='Developer Test Lookup UUID Query' + +runBatchLookupContent diff --git a/docker/scripts/cachedResultsQuery.sh b/docker/scripts/cachedResultsQuery.sh new file mode 100755 index 00000000000..dc9daf538b2 --- /dev/null +++ b/docker/scripts/cachedResultsQuery.sh @@ -0,0 +1,18 @@ +#!/bin/bash + +SCRIPT_DIR=$( cd -- "$( dirname -- "${BASH_SOURCE[0]}" )" &> /dev/null && pwd ) + +source ${SCRIPT_DIR}/common/cachedResultsQuery.sh + +# QUERY PARAMETERS +QUERY_LOGIC='EventQuery' +BEGIN='19660908 000000.000' +END='20161002 235959.999' +COLUMN_VISIBILITY='PUBLIC' +QUERY='GENRES:[Action to Western]' +QUERY_SYNTAX='LUCENE' +AUTHS='PUBLIC,PRIVATE,BAR,FOO' +QUERY_NAME='Developer Test Query' +PAGE_SIZE='10' + +runCachedResultsQuery \ No newline at end of file diff --git a/docker/scripts/cancel.sh b/docker/scripts/cancel.sh new file mode 100755 index 00000000000..0f917b53120 --- /dev/null +++ b/docker/scripts/cancel.sh @@ -0,0 +1,10 @@ +#!/bin/bash + +SCRIPT_DIR=$( cd -- "$( dirname -- "${BASH_SOURCE[0]}" )" &> /dev/null && pwd ) + +source ${SCRIPT_DIR}/common/common.sh + +createTempPem + +echo "$(date): Canceling query" +curl -X POST -s -k -E ${TMP_PEM} ${DATAWAVE_ENDPOINT}/$1/cancel -w '%{http_code}\n' diff --git a/docker/scripts/cleanup.sh b/docker/scripts/cleanup.sh new file mode 100755 index 00000000000..618f7101348 --- /dev/null +++ b/docker/scripts/cleanup.sh @@ -0,0 +1,6 @@ +#!/bin/bash + +PASSED_TESTS=(${1}) +for p in "${PASSED_TESTS[@]}" ; do + rm -rf "${p%.sh}"_* +done diff --git a/docker/scripts/close.sh b/docker/scripts/close.sh new file mode 100755 index 00000000000..be6ca3d2af7 --- /dev/null +++ b/docker/scripts/close.sh @@ -0,0 +1,10 @@ +#!/bin/bash + +SCRIPT_DIR=$( cd -- "$( dirname -- "${BASH_SOURCE[0]}" )" &> /dev/null && pwd ) + +source ${SCRIPT_DIR}/common/common.sh + +createTempPem + +echo "$(date): Closing query" +curl -X POST -s -k -E ${TMP_PEM} ${DATAWAVE_ENDPOINT}/$1/close -w '%{http_code}\n' diff --git a/docker/scripts/common/batchLookup.sh b/docker/scripts/common/batchLookup.sh new file mode 100755 index 00000000000..e07fa65b6d1 --- /dev/null +++ b/docker/scripts/common/batchLookup.sh @@ -0,0 +1,61 @@ +#!/bin/bash + +source ${SCRIPT_DIR}/common/common.sh + +PAUSE='false' +POOL="${POOL:-pool1}" +MAX_PAGES=100 +QUERY_TYPE='batchLookup' + +# QUERY PARAMETERS +#BEGIN='19660908 000000.000' +#END='20161002 235959.999' +#COLUMN_VISIBILITY='PUBLIC' +#UUID_PAIRS="PAGE_TITLE:anarchism OR PAGE_TITLE:accessiblecomputing" +#AUTHS='PUBLIC,PRIVATE,BAR,FOO' +#QUERY_NAME='Developer Test Lookup UUID Query' + +# Override common get_query_id +get_query_id () { + while read_dom; do + if [[ $ENTITY =~ 'QueryId' ]]; then + echo $CONTENT + break + fi + done +} + +runBatchLookup() { + createTempPem + + FOLDER="${QUERY_TYPE}_$(date +%Y%m%d_%I%M%S.%N)" + + mkdir $FOLDER + cd $FOLDER + + SYSTEM_FROM=$(hostname) + + echo "$(date): Creating query" + echo "$(date): Creating query" > querySummary.txt + + curl -s -D headers_0.txt -k -E ${TMP_PEM} \ + -H "Accept: application/xml" \ + -H "Pool: $POOL" \ + --data-urlencode "begin=${BEGIN}" \ + --data-urlencode "end=${END}" \ + --data-urlencode "columnVisibility=${COLUMN_VISIBILITY}" \ + --data-urlencode "uuidPairs=${UUID_PAIRS}" \ + --data-urlencode "auths=${AUTHS}" \ + --data-urlencode "systemFrom=${SYSTEM_FROM}" \ + --data-urlencode "queryName=${QUERY_NAME}" \ + ${DATAWAVE_ENDPOINT}/lookupUUID -o lookupResponse.xml -w '%{http_code}\n' >> querySummary.txt + + QUERY_ID=$(get_query_id < lookupResponse.xml) + NUM_EVENTS=$(get_num_events < lookupResponse.xml) + echo "$(date): Returned $NUM_EVENTS events" + echo "$(date): Returned $NUM_EVENTS events" >> querySummary.txt + + cd ../ + + logMetrics +} diff --git a/docker/scripts/common/batchLookupContent.sh b/docker/scripts/common/batchLookupContent.sh new file mode 100755 index 00000000000..c8866fd9fb1 --- /dev/null +++ b/docker/scripts/common/batchLookupContent.sh @@ -0,0 +1,63 @@ +#!/bin/bash + +source ${SCRIPT_DIR}/common/common.sh + +PAUSE='false' +POOL="${POOL:-pool1}" +MAX_PAGES=100 +QUERY_TYPE='batchLookupContent' + +# QUERY PARAMETERS +#BEGIN='19660908 000000.000' +#END='20161002 235959.999' +#COLUMN_VISIBILITY='PUBLIC' +#UUID_PAIRS='PAGE_TITLE:anarchism OR PAGE_TITLE:accessiblecomputing' +#AUTHS='PUBLIC,PRIVATE,BAR,FOO' +#QUERY_NAME='Developer Test Lookup Content UUID Query' + +# Override common get_query_id +get_query_id () { + while read_dom; do + if [[ $ENTITY =~ 'QueryId' ]]; then + echo $CONTENT + break + fi + done +} + +runBatchLookupContent() { + createTempPem + + FOLDER="${QUERY_TYPE}_$(date +%Y%m%d_%I%M%S.%N)" + + mkdir $FOLDER + cd $FOLDER + + SYSTEM_FROM=$(hostname) + + echo "$(date): Running LookupContentUUID query" + echo "$(date): Running LookupContentUUID query" > querySummary.txt + curl -s -D headers_0.txt -k -E ${TMP_PEM} \ + -H "Accept: application/xml" \ + -H "Pool: $POOL" \ + --data-urlencode "begin=${BEGIN}" \ + --data-urlencode "end=${END}" \ + --data-urlencode "columnVisibility=${COLUMN_VISIBILITY}" \ + --data-urlencode "uuidPairs=${UUID_PAIRS}" \ + --data-urlencode "auths=${AUTHS}" \ + --data-urlencode "systemFrom=${SYSTEM_FROM}" \ + --data-urlencode "queryName=${QUERY_NAME}" \ + ${DATAWAVE_ENDPOINT}/lookupContentUUID -o lookupResponse.xml -w '%{http_code}\n' >> querySummary.txt + + QUERY_ID=$(get_query_id < lookupResponse.xml) + NUM_EVENTS=$(get_num_events < lookupResponse.xml) + echo "$(date): Returned $NUM_EVENTS events" + echo "$(date): Returned $NUM_EVENTS events" >> querySummary.txt + + echo "$(date): Finished running $QUERY_ID" + echo "$(date): Finished running $QUERY_ID" >> querySummary.txt + + cd ../ + + logMetrics +} diff --git a/docker/scripts/common/cachedResultsQuery.sh b/docker/scripts/common/cachedResultsQuery.sh new file mode 100755 index 00000000000..cd540df8207 --- /dev/null +++ b/docker/scripts/common/cachedResultsQuery.sh @@ -0,0 +1,148 @@ +#!/bin/bash + +source ${SCRIPT_DIR}/common/common.sh + +PAUSE='false' +POOL="${POOL:-pool1}" +MAX_PAGES=100 +QUERY_TYPE='cachedResultsQuery' + +# QUERY PARAMETERS +#QUERY_LOGIC='EventQuery' +#BEGIN='19660908 000000.000' +#END='20161002 235959.999' +#COLUMN_VISIBILITY='PUBLIC' +#QUERY='GENRES:[Action to Western]' +#QUERY_SYNTAX='LUCENE' +#AUTHS='PUBLIC,PRIVATE,BAR,FOO' +#QUERY_NAME='Developer Test Query' +#PAGE_SIZE='10' + +# Override common get_query_id +get_query_id () { + while read_dom; do + if [[ $ENTITY = 'QueryId' ]]; then + echo $CONTENT + break + fi + done +} + +get_result () { + while read_dom; do + if [[ $ENTITY =~ 'Result' ]] && [[ ! $ENTITY =~ 'HasResults' ]]; then + echo $CONTENT + break + fi + done +} + +get_total_num_events () { + while read_dom; do + if [[ $ENTITY = 'TotalEvents' ]]; then + echo $CONTENT + break + fi + done +} + +runCachedResultsQuery() { + createTempPem + + FOLDER="${QUERY_TYPE}_$(date +%Y%m%d_%I%M%S.%N)" + + mkdir $FOLDER + cd $FOLDER + + SYSTEM_FROM=$(hostname) + + echo "$(date): Defining query" + echo "$(date): Defining query" > querySummary.txt + curl -s -D headers_0.txt -k -E ${TMP_PEM} \ + -H "Accept: application/xml" \ + -H "Pool: $POOL" \ + --data-urlencode "begin=${BEGIN}" \ + --data-urlencode "end=${END}" \ + --data-urlencode "columnVisibility=${COLUMN_VISIBILITY}" \ + --data-urlencode "query=${QUERY}" \ + --data-urlencode "query.syntax=${QUERY_SYNTAX}" \ + --data-urlencode "auths=${AUTHS}" \ + --data-urlencode "systemFrom=${SYSTEM_FROM}" \ + --data-urlencode "queryName=${QUERY_NAME}" \ + --data-urlencode "pagesize=${PAGE_SIZE}" \ + ${DATAWAVE_ENDPOINT}/${QUERY_LOGIC}/define -o defineResponse.xml -w '%{http_code}\n' >> querySummary.txt + + QUERY_ID=$(get_result < defineResponse.xml) + + echo "$(date): Loading cached results" + echo "$(date): Loading cached results" > querySummary.txt + curl -s -D headers_1.txt -k -E ${TMP_PEM} \ + -H "Accept: application/xml" \ + -H "Pool: $POOL" \ + ${CACHEDRESULTS_ENDPOINT}/$QUERY_ID/load?alias=alias-${QUERY_ID} -o loadResponse.xml -w '%{http_code}\n' >> querySummary.txt + + VIEW_NAME=$(get_result < loadResponse.xml) + + echo "$(date): Creating the SQL query" + echo "$(date): Creating the SQL query" > querySummary.txt + curl -s -D headers_2.txt -k -X POST -E ${TMP_PEM} \ + -H "Accept: application/xml" \ + -H "Pool: $POOL" \ + --data-urlencode "fields=" \ + --data-urlencode "conditions=" \ + --data-urlencode "grouping=" \ + --data-urlencode "order=" \ + --data-urlencode "fixedFields=" \ + --data-urlencode "pagesize=10" \ + ${CACHEDRESULTS_ENDPOINT}/$VIEW_NAME/create -o createResponse.xml -w '%{http_code}\n' >> querySummary.txt + + METRICS_QUERY_ID=$(get_query_id < createResponse.xml) + + i=1 + TOTAL_NUM_EVENTS=0 + TOTAL_EVENTS=0 + TOTAL_PAGES=0 + + while [ $i -gt 0 ] && [ $i -lt $MAX_PAGES ]; do + if [ "$PAUSE" == "true" ]; then + echo "press any key to continue" + read -n 1 + fi + + echo "$(date): Requesting page $i for $VIEW_NAME" + echo "$(date): Requesting page $i for $VIEW_NAME" >> querySummary.txt + curl -s -D headers_$((i + 3)).txt -k -E ${TMP_PEM} \ + -H "Accept: application/xml" \ + -H "Pool: $POOL" \ + "${CACHEDRESULTS_ENDPOINT}/$VIEW_NAME/getRows?rowBegin=$((TOTAL_PAGES * PAGE_SIZE + 1))&rowEnd=$(((TOTAL_PAGES + 1) * PAGE_SIZE))" -o getRowsResponse_$i.xml -w '%{http_code}\n' >> querySummary.txt + + CONTINUE=`grep 'HTTP/2 200' headers_$((i + 3)).txt` + + if [ -z "$CONTINUE" ]; then + i=-1 + else + NUM_EVENTS=$(get_num_events < getRowsResponse_$i.xml) + TOTAL_NUM_EVENTS=$(get_total_num_events < getRowsResponse_$i.xml) + TOTAL_EVENTS=$((TOTAL_EVENTS + NUM_EVENTS)) + TOTAL_PAGES=$((TOTAL_PAGES + 1)) + echo "$(date): Page $i contained $NUM_EVENTS events" + echo "$(date): Page $i contained $NUM_EVENTS events" >> querySummary.txt + + if [ $TOTAL_EVENTS -ge $TOTAL_NUM_EVENTS ]; then + i=-1 + else + ((i++)) + fi + fi + done + + echo "$(date): Returned $TOTAL_PAGES pages" + echo "$(date): Returned $TOTAL_PAGES pages" >> querySummary.txt + + echo "$(date): Returned $TOTAL_EVENTS events" + echo "$(date): Returned $TOTAL_EVENTS events" >> querySummary.txt + + cd ../ + + logMetrics +} diff --git a/docker/scripts/common/common.sh b/docker/scripts/common/common.sh new file mode 100755 index 00000000000..9cb3903c87b --- /dev/null +++ b/docker/scripts/common/common.sh @@ -0,0 +1,119 @@ +#!/bin/bash + +WEBSERVICE="${WEBSERVICE:-false}" + +if [ "$WEBSERVICE" = true ]; then + DATAWAVE_ENDPOINT=https://localhost:9443/DataWave/Query + CACHEDRESULTS_ENDPOINT=https://localhost:9443/DataWave/CachedResults + MAPREDUCE_ENDPOINT=https://localhost:9443/DataWave/MapReduce +else + DATAWAVE_ENDPOINT=https://localhost:8443/query/v1/query + CACHEDRESULTS_ENDPOINT=https://localhost:8443/query/v1/cachedresults + MAPREDUCE_ENDPOINT=https://localhost:8443/query/v1/mapreduce +fi + +METRICS_ENDPOINT=https://localhost:8543/querymetric/v1 + +createTempPem() { + # use the test user pkcs12 cert + P12_KEYSTORE=${SCRIPT_DIR}/../pki/testUser.p12 + P12_KEYSTORE_PASS=ChangeIt + + TMP_DIR=/dev/shm + TMP_PEM="$TMP_DIR/testUser-$$-pem" + + sh -c "while kill -0 $$ 2>/dev/null; do sleep 1; done; rm -f '${TMP_P12}' '${TMP_PEM}'" & + + function needsPassphrase() { + [ -z "${P12_KEYSTORE_PASS}" ] + } + + function getFromCliPrompt() { + read -s -p "Passphrase for ${P12_KEYSTORE}: " P12_KEYSTORE_PASS && echo 1>&2 + } + + needsPassphrase && getFromCliPrompt + + params="" + opensslVersion3="$( openssl version | awk '{ print $2 }' | grep -E ^3\. )" + if [ ! -z "$opensslVersion3" ]; then + params="-provider legacy -provider default" + fi + + # Create one-time passphrase and certificate + OLD_UMASK=$(umask) + umask 0277 + export P12_KEYSTORE_PASS + openssl pkcs12 ${params} \ + -in ${P12_KEYSTORE} -passin env:P12_KEYSTORE_PASS \ + -out ${TMP_PEM} -nodes 2>/dev/null + opensslexit=$? + umask $OLD_UMASK + [ $opensslexit = 0 ] || echo "Error creating temporary certificate file" +} + +read_dom () { + local IFS=\> + read -d \< ENTITY CONTENT +} + +get_query_id () { + while read_dom; do + if [[ $ENTITY =~ 'Result' ]] && [[ ! $ENTITY =~ 'HasResults' ]]; then + echo $CONTENT + break + fi + done +} + +get_num_events () { + while read_dom; do + if [[ $ENTITY = 'ReturnedEvents' ]]; then + echo $CONTENT + break + fi + done +} + +logMetrics () { + if [ ! -z "$QUERY_ID" ]; then + mv ${FOLDER} ${QUERY_TYPE}_${QUERY_ID} + + echo "$(date): Getting metrics for ${QUERY_ID}" + echo "$(date): Getting metrics for ${QUERY_ID}" >> ${QUERY_TYPE}_${QUERY_ID}/querySummary.txt + + echo "$(date): Metrics available at: ${METRICS_ENDPOINT}/id/${QUERY_ID}" + echo "$(date): Metrics available at: ${METRICS_ENDPOINT}/id/${QUERY_ID}" >> ${QUERY_TYPE}_${QUERY_ID}/querySummary.txt + fi +} + +printLine() { + echo "$( printGreen "********************************************************************************************************" )" +} + +printRed() { + echo -ne "${DW_COLOR_RED}${1}${DW_COLOR_RESET}" +} + +printGreen() { + echo -ne "${DW_COLOR_GREEN}${1}${DW_COLOR_RESET}" +} + +setPrintColors() { + DW_COLOR_RED="\033[31m" + DW_COLOR_GREEN="\033[32m" + DW_COLOR_RESET="\033[m" +} + +setTestLabels() { + LABEL_PASS="$( printGreen PASSED )" + LABEL_FAIL="$( printRed FAILED )" +} + +printTestStatus() { + elapsed_time=$(echo "scale=3; ($2 - $1) / 1000000000" | bc) + echo + echo "Test Total Time: $elapsed_time seconds" + echo "Test Status: $3" + echo +} \ No newline at end of file diff --git a/docker/scripts/common/count.sh b/docker/scripts/common/count.sh new file mode 100755 index 00000000000..5e4548f1c4c --- /dev/null +++ b/docker/scripts/common/count.sh @@ -0,0 +1,39 @@ +#!/bin/bash + +source ${SCRIPT_DIR}/common/query.sh + +PAUSE='false' +MAX_PAGES=100 +QUERY_TYPE='count' + +# QUERY PARAMETERS +#QUERY_LOGIC='CountQuery' +#BEGIN='19660908 000000.000' +#END='20161002 235959.999' +#COLUMN_VISIBILITY='PUBLIC' +#QUERY='GENRES:[Action to Western]' +#QUERY_SYNTAX='LUCENE' +#AUTHS='PUBLIC,PRIVATE,BAR,FOO' +#QUERY_NAME='Developer Test Query' +#PAGE_SIZE='10' + +# Override common get_num_events +get_num_events () { + tag_found=false + while read_dom; do + if [[ $ENTITY =~ Field.*(RECORD_COUNT|count).* ]]; then + tag_found=true + elif [[ $tag_found == true ]]; then + if [[ $ENTITY =~ 'Value' ]]; then + echo $CONTENT + break + elif [[ $ENTITY =~ '/Field' ]]; then + tag_found=false + fi + fi + done +} + +runCount() { + runQuery +} \ No newline at end of file diff --git a/docker/scripts/common/edge.sh b/docker/scripts/common/edge.sh new file mode 100755 index 00000000000..5500dfe675a --- /dev/null +++ b/docker/scripts/common/edge.sh @@ -0,0 +1,107 @@ +#!/bin/bash + +source ${SCRIPT_DIR}/common/common.sh + +PAUSE='false' +POOL="${POOL:-pool1}" +MAX_PAGES=100 +QUERY_TYPE='edge' + +# QUERY PARAMETERS +#QUERY_LOGIC='EdgeQuery' +#BEGIN='19660908 000000.000' +#END='20161002 235959.999' +#COLUMN_VISIBILITY='PUBLIC' +#QUERY='SOURCE == 'Jerry Seinfeld'' +#AUTHS='PUBLIC,PRIVATE,BAR,FOO' +#QUERY_NAME='Developer Test Edge Query' +#PAGE_SIZE='100' + +get_num_edges () { + declare -i count=0 + while read_dom; do + if [[ $ENTITY = '/Edge' ]]; then + count=$((count + 1)) + fi + done + echo $count +} + +runEdgeQuery() { + createTempPem + + FOLDER="${QUERY_TYPE}_$(date +%Y%m%d_%I%M%S.%N)" + + mkdir $FOLDER + cd $FOLDER + + SYSTEM_FROM=$(hostname) + + echo "$(date): Creating query" + echo "$(date): Creating query" > querySummary.txt + curl -s -D headers_0.txt -k -E ${TMP_PEM} \ + -H "Accept: application/xml" \ + -H "Pool: $POOL" \ + --data-urlencode "begin=${BEGIN}" \ + --data-urlencode "end=${END}" \ + --data-urlencode "columnVisibility=${COLUMN_VISIBILITY}" \ + --data-urlencode "query=${QUERY}" \ + --data-urlencode "auths=${AUTHS}" \ + --data-urlencode "systemFrom=${SYSTEM_FROM}" \ + --data-urlencode "queryName=${QUERY_NAME}" \ + --data-urlencode "pagesize=${PAGE_SIZE}" \ + ${DATAWAVE_ENDPOINT}/${QUERY_LOGIC}/create -o createResponse.xml -w '%{http_code}\n' >> querySummary.txt + + i=1 + + QUERY_ID=$(get_query_id < createResponse.xml) + + TOTAL_EVENTS=0 + TOTAL_PAGES=0 + + while [ $i -gt 0 ] && [ $i -lt $MAX_PAGES ]; do + echo "$(date): Requesting page $i for $QUERY_ID" + echo "$(date): Requesting page $i for $QUERY_ID" >> querySummary.txt + curl -s -D headers_$i.txt -q -k -E ${TMP_PEM} \ + -H "Accept: application/xml" \ + -H "Pool: $POOL" \ + ${DATAWAVE_ENDPOINT}/$QUERY_ID/next -o nextResponse_$i.xml -w '%{http_code}\n' >> querySummary.txt + + CONTINUE=`grep 'HTTP/2 200' headers_$i.txt` + + if [ -z "$CONTINUE" ]; then + i=-1 + else + NUM_EVENTS=$(get_num_edges < nextResponse_$i.xml) + TOTAL_EVENTS=$((TOTAL_EVENTS + NUM_EVENTS)) + TOTAL_PAGES=$((TOTAL_PAGES + 1)) + echo "$(date): Page $i contained $NUM_EVENTS edges" + echo "$(date): Page $i contained $NUM_EVENTS edges" >> querySummary.txt + + ((i++)) + fi + + if [ "$PAUSE" == "true" ]; then + echo "press any key to continue" + read -n 1 + fi + done + + echo "$(date): Returned $TOTAL_PAGES pages" + echo "$(date): Returned $TOTAL_PAGES pages" >> querySummary.txt + + echo "$(date): Returned $TOTAL_EVENTS events" + echo "$(date): Returned $TOTAL_EVENTS events" >> querySummary.txt + + echo "$(date): Closing $QUERY_ID" + echo "$(date): Closing $QUERY_ID" >> querySummary.txt + # close the query + curl -s -q -k -X POST -E ${TMP_PEM} \ + -H "Accept: application/xml" \ + -H "Pool: $POOL" \ + ${DATAWAVE_ENDPOINT}/$QUERY_ID/close -o closeResponse.xml -w '%{http_code}\n' >> querySummary.txt + + cd ../ + + logMetrics +} diff --git a/docker/scripts/common/lookup.sh b/docker/scripts/common/lookup.sh new file mode 100755 index 00000000000..e23c92ff373 --- /dev/null +++ b/docker/scripts/common/lookup.sh @@ -0,0 +1,65 @@ +#!/bin/bash + +source ${SCRIPT_DIR}/common/common.sh + +PAUSE='false' +POOL="${POOL:-pool1}" +MAX_PAGES=100 +QUERY_TYPE='lookup' + +# QUERY PARAMETERS +#BEGIN='19660908 000000.000' +#END='20161002 235959.999' +#COLUMN_VISIBILITY='PUBLIC' +#UUID_TYPE='PAGE_TITLE' +#UUID='anarchism' +#QUERY='GENRES:[Action to Western]' +#QUERY_SYNTAX='LUCENE' +#AUTHS='PUBLIC,PRIVATE,BAR,FOO' +#QUERY_NAME='Developer Test Lookup UUID Query' + +# Override common get_query_id +get_query_id () { + while read_dom; do + if [[ $ENTITY =~ 'QueryId' ]]; then + echo $CONTENT + break + fi + done +} + +runLookup() { + createTempPem + + FOLDER="${QUERY_TYPE}_$(date +%Y%m%d_%I%M%S.%N)" + + mkdir $FOLDER + cd $FOLDER + + SYSTEM_FROM=$(hostname) + + echo "$(date): Running LookupUUID query" + echo "$(date): Running LookupUUID query" > querySummary.txt + curl -s -D headers_0.txt -X GET -k -E ${TMP_PEM} \ + -H "Accept: application/xml" \ + -H "Pool: $POOL" \ + --data-urlencode "begin=${BEGIN}" \ + --data-urlencode "end=${END}" \ + --data-urlencode "columnVisibility=${COLUMN_VISIBILITY}" \ + --data-urlencode "auths=${AUTHS}" \ + --data-urlencode "systemFrom=${SYSTEM_FROM}" \ + --data-urlencode "queryName=${QUERY_NAME}" \ + ${DATAWAVE_ENDPOINT}/lookupUUID/${UUID_TYPE}/${UUID} -o lookupResponse.xml -w '%{http_code}\n' >> querySummary.txt + + QUERY_ID=$(get_query_id < lookupResponse.xml) + NUM_EVENTS=$(get_num_events < lookupResponse.xml) + echo "$(date): Returned $NUM_EVENTS events" + echo "$(date): Returned $NUM_EVENTS events" >> querySummary.txt + + echo "$(date): Finished running $QUERY_ID" + echo "$(date): Finished running $QUERY_ID" >> querySummary.txt + + cd ../ + + logMetrics +} diff --git a/docker/scripts/common/lookupContent.sh b/docker/scripts/common/lookupContent.sh new file mode 100755 index 00000000000..2c8cefa55f1 --- /dev/null +++ b/docker/scripts/common/lookupContent.sh @@ -0,0 +1,65 @@ +#!/bin/bash + +source ${SCRIPT_DIR}/common/common.sh + +PAUSE='false' +POOL="${POOL:-pool1}" +MAX_PAGES=100 +QUERY_TYPE='lookupContent' + +# QUERY PARAMETERS +#BEGIN='19660908 000000.000' +#END='20161002 235959.999' +#COLUMN_VISIBILITY='PUBLIC' +#UUID_TYPE='PAGE_TITLE' +#UUID='anarchism' +#QUERY='GENRES:[Action to Western]' +#QUERY_SYNTAX='LUCENE' +#AUTHS='PUBLIC,PRIVATE,BAR,FOO' +#QUERY_NAME='Developer Test Lookup UUID Query' + +# Override common get_query_id +get_query_id () { + while read_dom; do + if [[ $ENTITY =~ 'QueryId' ]]; then + echo $CONTENT + break + fi + done +} + +runLookupContent() { + createTempPem + + FOLDER="${QUERY_TYPE}_$(date +%Y%m%d_%I%M%S.%N)" + + mkdir $FOLDER + cd $FOLDER + + SYSTEM_FROM=$(hostname) + + echo "$(date): Running LookupUUID query" + echo "$(date): Running LookupUUID query" > querySummary.txt + curl -s -D headers_0.txt -X GET -k -E ${TMP_PEM} \ + -H "Accept: application/xml" \ + -H "Pool: $POOL" \ + --data-urlencode "begin=${BEGIN}" \ + --data-urlencode "end=${END}" \ + --data-urlencode "columnVisibility=${COLUMN_VISIBILITY}" \ + --data-urlencode "auths=${AUTHS}" \ + --data-urlencode "systemFrom=${SYSTEM_FROM}" \ + --data-urlencode "queryName=${QUERY_NAME}" \ + ${DATAWAVE_ENDPOINT}/lookupContentUUID/${UUID_TYPE}/${UUID} -o lookupResponse.xml -w '%{http_code}\n' >> querySummary.txt + + QUERY_ID=$(get_query_id < lookupResponse.xml) + NUM_EVENTS=$(get_num_events < lookupResponse.xml) + echo "$(date): Returned $NUM_EVENTS events" + echo "$(date): Returned $NUM_EVENTS events" >> querySummary.txt + + echo "$(date): Finished running $QUERY_ID" + echo "$(date): Finished running $QUERY_ID" >> querySummary.txt + + cd ../ + + logMetrics +} diff --git a/docker/scripts/common/mapReduceQuery.sh b/docker/scripts/common/mapReduceQuery.sh new file mode 100755 index 00000000000..efae9cf3314 --- /dev/null +++ b/docker/scripts/common/mapReduceQuery.sh @@ -0,0 +1,184 @@ +#!/bin/bash + +source ${SCRIPT_DIR}/common/common.sh + +PAUSE='false' +POOL="${POOL:-pool1}" +MAX_PAGES=100 +QUERY_TYPE='mapReduceQuery' + +# QUERY PARAMETERS +#QUERY_LOGIC='EventQuery' +#JOB_NAME='BulkResultsJob' +#FORMAT=XML +#OUTPUT_FORMAT=TEXT +#BEGIN='19660908 000000.000' +#END='20161002 235959.999' +#COLUMN_VISIBILITY='PUBLIC' +#QUERY='GENRES:[Action to Western]' +#QUERY_SYNTAX='LUCENE' +#AUTHS='PUBLIC,PRIVATE,BAR,FOO' +#QUERY_NAME='Developer Test Query' +#PAGE_SIZE='10' + +get_job_status () { + while read_dom; do + if [[ $ENTITY =~ 'JobExecution' ]]; then + if [[ $ENTITY =~ 'state="DEFINED"' ]]; then + echo "DEFINED" + break + elif [[ $ENTITY =~ 'state="SUBMITTED"' ]]; then + echo "SUBMITTED" + break + elif [[ $ENTITY =~ 'state="RUNNING"' ]]; then + echo "RUNNING" + break + elif [[ $ENTITY =~ 'state="SUCCEEDED"' ]]; then + echo "SUCCEEDED" + break + elif [[ $ENTITY =~ 'state="CLOSED"' ]]; then + echo "CLOSED" + break + elif [[ $ENTITY =~ 'state="CANCELED"' ]]; then + echo "CANCELED" + break + elif [[ $ENTITY =~ 'state="FAILED"' ]]; then + echo "FAILED" + break + fi + fi + done +} + +# Override common get_num_events +get_num_events () { + local EVENTS=0 + while read_dom; do + if [[ $ENTITY = 'ReturnedEvents' ]] || [[ $ENTITY = 'returnedEvents' ]]; then + EVENTS=$((EVENTS + CONTENT)) + fi + done + echo $EVENTS +} + +# Override common logMetrics +logMetrics () { + if [ ! -z "$JOB_ID" ]; then + mv $FOLDER ${QUERY_TYPE}_${JOB_ID} + + echo "$(date): Job status available at: ${MAPREDUCE_ENDPOINT}/${JOB_ID}/list" + echo "$(date): Job status available at: ${MAPREDUCE_ENDPOINT}/${JOB_ID}/list" >> ${QUERY_TYPE}_${JOB_ID}/querySummary.txt + fi +} + +runMapReduceQuery() { + createTempPem + + FOLDER="${QUERY_TYPE}_$(date +%Y%m%d_%I%M%S.%N)" + + mkdir $FOLDER + cd $FOLDER + + SYSTEM_FROM=$(hostname) + + echo "$(date): Defining query" + echo "$(date): Defining query" > querySummary.txt + curl -s -D headers_0.txt -k -E ${TMP_PEM} \ + -H "Accept: application/xml" \ + -H "Pool: $POOL" \ + --data-urlencode "begin=${BEGIN}" \ + --data-urlencode "end=${END}" \ + --data-urlencode "columnVisibility=${COLUMN_VISIBILITY}" \ + --data-urlencode "query=${QUERY}" \ + --data-urlencode "query.syntax=${QUERY_SYNTAX}" \ + --data-urlencode "auths=${AUTHS}" \ + --data-urlencode "systemFrom=${SYSTEM_FROM}" \ + --data-urlencode "queryName=${QUERY_NAME}" \ + --data-urlencode "pagesize=${PAGE_SIZE}" \ + ${DATAWAVE_ENDPOINT}/${QUERY_LOGIC}/define -o defineResponse.xml -w '%{http_code}\n' >> querySummary.txt + + + QUERY_ID=$(get_query_id < defineResponse.xml) + + echo "$(date): Submitting map reduce query" + echo "$(date): Submitting map reduce query" >> querySummary.txt + + # To write the output to a table, add the following parameter + # --data-urlencode "outputTableName=ResultsTable" \ + + curl -s -D headers_1.txt -k -E ${TMP_PEM} \ + -H "Accept: application/xml" \ + -H "Pool: $POOL" \ + --data-urlencode "jobName=${JOB_NAME}" \ + --data-urlencode "queryId=${QUERY_ID}" \ + --data-urlencode "format=${FORMAT}" \ + --data-urlencode "outputFormat=${OUTPUT_FORMAT}" \ + ${MAPREDUCE_ENDPOINT}/submit -o submitResponse.xml -w '%{http_code}\n' >> querySummary.txt + + JOB_ID=$(get_query_id < submitResponse.xml) + + ATTEMPTS=6 + ATTEMPT=1 + TIMEOUT=20 + + JOB_STATUS="DEFINED" + + while [ $ATTEMPT -le $ATTEMPTS ]; do + echo "$(date): Checking map reduce query status (Attempt ${ATTEMPT}/${ATTEMPTS})" + echo "$(date): Checking map reduce query status (Attempt ${ATTEMPT}/${ATTEMPTS})" >> querySummary.txt + + curl -s -k -E ${TMP_PEM} \ + ${MAPREDUCE_ENDPOINT}/${JOB_ID}/list -o listResponse.xml -w '%{http_code}\n' >> querySummary.txt + + JOB_STATUS=$(get_job_status < listResponse.xml) + + echo "$(date): Job Status: $JOB_STATUS" + echo "$(date): Job Status: $JOB_STATUS" >> querySummary.txt + + if [ "$JOB_STATUS" != "DEFINED" ] && [ "$JOB_STATUS" != "SUBMITTED" ] && [ "$JOB_STATUS" != "RUNNING" ]; then + break; + fi + + if [ $ATTEMPT -le $ATTEMPTS ]; then + sleep ${TIMEOUT} + fi + + ((ATTEMPT++)) + done + + TOTAL_EVENTS=0 + TOTAL_FILES=0 + if [ "$JOB_STATUS" == "SUCCEEDED" ]; then + echo "$(date): Downloading results.tar" + echo "$(date): Downloading results.tar" >> querySummary.txt + + curl -s -k -E ${TMP_PEM} \ + ${MAPREDUCE_ENDPOINT}/${JOB_ID}/getAllFiles -o results.tar -w '%{http_code}\n' >> querySummary.txt + + tar -xf results.tar + + cd ${JOB_ID} + + for f in $(ls) + do + NUM_EVENTS=$(get_num_events < $f) + TOTAL_EVENTS=$((TOTAL_EVENTS + NUM_EVENTS)) + TOTAL_FILES=$((TOTAL_FILES + 1)) + + echo "$(date): $f contained $NUM_EVENTS events" + echo "$(date): $f contained $NUM_EVENTS events" >> querySummary.txt + done + + cd .. + fi + + echo "$(date): Returned $TOTAL_FILES files" + echo "$(date): Returned $TOTAL_FILES files" >> querySummary.txt + + echo "$(date): Returned $TOTAL_EVENTS events" + echo "$(date): Returned $TOTAL_EVENTS events" >> querySummary.txt + + cd ../ + + logMetrics +} \ No newline at end of file diff --git a/docker/scripts/common/oozieQuery.sh b/docker/scripts/common/oozieQuery.sh new file mode 100755 index 00000000000..989d70e6942 --- /dev/null +++ b/docker/scripts/common/oozieQuery.sh @@ -0,0 +1,152 @@ +#!/bin/bash + +source ${SCRIPT_DIR}/common/common.sh + +PAUSE='false' +POOL="${POOL:-pool1}" +MAX_PAGES=100 +QUERY_TYPE='oozieQuery' + +# QUERY PARAMETERS +#WORKFLOW='OozieJob' +#COLUMN_VISIBILITY='PUBLIC' + +get_job_status () { + while read_dom; do + if [[ $ENTITY =~ 'JobExecution' ]]; then + if [[ $ENTITY =~ 'state="DEFINED"' ]]; then + echo "DEFINED" + break + elif [[ $ENTITY =~ 'state="SUBMITTED"' ]]; then + echo "SUBMITTED" + break + elif [[ $ENTITY =~ 'state="RUNNING"' ]]; then + echo "RUNNING" + break + elif [[ $ENTITY =~ 'state="SUCCEEDED"' ]]; then + echo "SUCCEEDED" + break + elif [[ $ENTITY =~ 'state="CLOSED"' ]]; then + echo "CLOSED" + break + elif [[ $ENTITY =~ 'state="CANCELED"' ]]; then + echo "CANCELED" + break + elif [[ $ENTITY =~ 'state="FAILED"' ]]; then + echo "FAILED" + break + fi + fi + done +} + +# Override common get_num_events +get_num_events () { + local EVENTS=0 + while read_dom; do + if [[ $ENTITY = 'ReturnedEvents' ]] || [[ $ENTITY = 'returnedEvents' ]]; then + EVENTS=$((EVENTS + CONTENT)) + fi + done + echo $EVENTS +} + +# Override common logMetrics +logMetrics () { + if [ ! -z "$JOB_ID" ]; then + mv $FOLDER ${QUERY_TYPE}_${JOB_ID} + + echo "$(date): Job status available at: ${DATAWAVE_ENDPOINT}/mapreduce/${JOB_ID}/list" + echo "$(date): Job status available at: ${DATAWAVE_ENDPOINT}/mapreduce/${JOB_ID}/list" >> ${QUERY_TYPE}_${JOB_ID}/querySummary.txt + fi +} + +runOozieQuery() { + createTempPem + + FOLDER="${QUERY_TYPE}_$(date +%Y%m%d_%I%M%S.%N)" + + mkdir $FOLDER + cd $FOLDER + + SYSTEM_FROM=$(hostname) + + echo "$(date): Submitting oozie query" + echo "$(date): Submitting oozie query" >> querySummary.txt + + # To write the output to a table, add the following parameter + # --data-urlencode "outputTableName=ResultsTable" \ + + curl -s -D headers_1.txt -k -E ${TMP_PEM} \ + -H "Accept: application/xml" \ + -H "Pool: $POOL" \ + --data-urlencode "workFlow=${WORKFLOW}" \ + --data-urlencode "columnVisibility=${COLUMN_VISIBILITY}" \ + ${MAPREDUCE_ENDPOINT}/oozieSubmit -o submitResponse.xml -w '%{http_code}\n' >> querySummary.txt + + JOB_ID=$(get_query_id < submitResponse.xml) + + ATTEMPTS=6 + ATTEMPT=1 + TIMEOUT=20 + + JOB_STATUS="DEFINED" + + while [ $ATTEMPT -le $ATTEMPTS ]; do + echo "$(date): Checking oozie query status (Attempt ${ATTEMPT}/${ATTEMPTS})" + echo "$(date): Checking oozie query status (Attempt ${ATTEMPT}/${ATTEMPTS})" >> querySummary.txt + + curl -s -k -E ${TMP_PEM} \ + ${MAPREDUCE_ENDPOINT}/${JOB_ID}/list -o listResponse.xml -w '%{http_code}\n' >> querySummary.txt + + JOB_STATUS=$(get_job_status < listResponse.xml) + + echo "$(date): Job Status: $JOB_STATUS" + echo "$(date): Job Status: $JOB_STATUS" >> querySummary.txt + + if [ "$JOB_STATUS" != "DEFINED" ] && [ "$JOB_STATUS" != "SUBMITTED" ] && [ "$JOB_STATUS" != "RUNNING" ]; then + break; + fi + + if [ $ATTEMPT -lt $ATTEMPTS ]; then + sleep ${TIMEOUT} + ((ATTEMPT++)) + fi + done + + TOTAL_EVENTS=0 + TOTAL_FILES=0 + if [ "$JOB_STATUS" == "SUCCEEDED" ]; then + echo "$(date): Downloading results.tar" + echo "$(date): Downloading results.tar" >> querySummary.txt + + curl -s -k -E ${TMP_PEM} \ + ${MAPREDUCE_ENDPOINT}/${JOB_ID}/getAllFiles -o results.tar -w '%{http_code}\n' >> querySummary.txt + + tar -xf results.tar + + cd ${JOB_ID} + + for f in $(ls) + do + NUM_EVENTS=$(get_num_events < $f) + TOTAL_EVENTS=$((TOTAL_EVENTS + NUM_EVENTS)) + TOTAL_FILES=$((TOTAL_FILES + 1)) + + echo "$(date): $f contained $NUM_EVENTS events" + echo "$(date): $f contained $NUM_EVENTS events" >> querySummary.txt + done + + cd .. + fi + + echo "$(date): Returned $TOTAL_FILES files" + echo "$(date): Returned $TOTAL_FILES files" >> querySummary.txt + + echo "$(date): Returned $TOTAL_EVENTS events" + echo "$(date): Returned $TOTAL_EVENTS events" >> querySummary.txt + + cd ../ + + logMetrics +} \ No newline at end of file diff --git a/docker/scripts/common/plan.sh b/docker/scripts/common/plan.sh new file mode 100755 index 00000000000..f677877720c --- /dev/null +++ b/docker/scripts/common/plan.sh @@ -0,0 +1,62 @@ +#!/bin/bash + +source ${SCRIPT_DIR}/common/common.sh + +POOL="${POOL:-pool1}" + +# QUERY PARAMETERS +#QUERY_LOGIC='EventQuery' +#BEGIN='19660908 000000.000' +#END='20161002 235959.999' +#COLUMN_VISIBILITY='PUBLIC' +#QUERY='GENRES:[Action to Western]' +#QUERY_SYNTAX='LUCENE' +#AUTHS='PUBLIC,PRIVATE,BAR,FOO' +#QUERY_NAME='Developer Test Query' +#EXPAND_VALUES='true' + +get_query_plan () { + while read_dom; do + if [[ $ENTITY =~ 'Result' ]] && [[ ! $ENTITY =~ 'HasResults' ]]; then + echo $CONTENT + break + fi + done +} + +runPlan() { + createTempPem + + FOLDER="plan_$(date +%Y%m%d_%I%M%S.%N)" + + mkdir $FOLDER + cd $FOLDER + + SYSTEM_FROM=$(hostname) + + echo "$(date): Planning query" + echo "$(date): Planning query" > querySummary.txt + curl -s -D headers_0.txt -k -E ${TMP_PEM} \ + -H "Accept: application/xml" \ + -H "Pool: $POOL" \ + --data-urlencode "begin=${BEGIN}" \ + --data-urlencode "end=${END}" \ + --data-urlencode "columnVisibility=${COLUMN_VISIBILITY}" \ + --data-urlencode "query=${QUERY}" \ + --data-urlencode "query.syntax=${QUERY_SYNTAX}" \ + --data-urlencode "auths=${AUTHS}" \ + --data-urlencode "systemFrom=${SYSTEM_FROM}" \ + --data-urlencode "queryName=${QUERY_NAME}" \ + --data-urlencode "expand.values=${EXPAND_VALUES}" \ + ${DATAWAVE_ENDPOINT}/${QUERY_LOGIC}/plan -o planResponse.txt -w '%{http_code}\n' >> querySummary.txt + + QUERY_PLAN=$(get_query_plan < planResponse.txt) + + echo "$(date): Received query plan" + echo "$(date): Received query plan" >> querySummary.txt + + echo "$QUERY_PLAN" + echo "$QUERY_PLAN" >> querySummary.txt + + cd ../ +} diff --git a/docker/scripts/common/predict.sh b/docker/scripts/common/predict.sh new file mode 100755 index 00000000000..0632b2dbc6f --- /dev/null +++ b/docker/scripts/common/predict.sh @@ -0,0 +1,64 @@ +#!/bin/bash + +SCRIPT_DIR=$( cd -- "$( dirname -- "${BASH_SOURCE[0]}" )" &> /dev/null && pwd ) + +source ${SCRIPT_DIR}/common/common.sh + +POOL="${POOL:-pool1}" + +# QUERY PARAMETERS +#QUERY_LOGIC='EventQuery' +#BEGIN='19660908 000000.000' +#END='20161002 235959.999' +#COLUMN_VISIBILITY='PUBLIC' +#QUERY='GENRES:[Action to Western]' +#QUERY_SYNTAX='LUCENE' +#AUTHS='PUBLIC,PRIVATE,BAR,FOO' +#QUERY_NAME='Developer Test Query' +#EXPAND_VALUES='true' + +get_query_prediction () { + while read_dom; do + if [[ $ENTITY =~ 'Result' ]] && [[ ! $ENTITY =~ 'HasResults' ]]; then + echo $CONTENT + break + fi + done +} + +runPredict() { + createTempPem + + FOLDER="predict_$(date +%Y%m%d_%I%M%S.%N)" + + mkdir $FOLDER + cd $FOLDER + + SYSTEM_FROM=$(hostname) + + echo "$(date): Predicting query" + echo "$(date): Predicting query" > querySummary.txt + curl -s -D headers_0.txt -k -E ${TMP_PEM} \ + -H "Accept: application/xml" \ + -H "Pool: $POOL" \ + --data-urlencode "begin=${BEGIN}" \ + --data-urlencode "end=${END}" \ + --data-urlencode "columnVisibility=${COLUMN_VISIBILITY}" \ + --data-urlencode "query=${QUERY}" \ + --data-urlencode "query.syntax=${QUERY_SYNTAX}" \ + --data-urlencode "auths=${AUTHS}" \ + --data-urlencode "systemFrom=${SYSTEM_FROM}" \ + --data-urlencode "queryName=${QUERY_NAME}" \ + --data-urlencode "expand.values=${EXPAND_VALUES}" \ + ${DATAWAVE_ENDPOINT}/${QUERY_LOGIC}/predict -o predictResponse.txt -w '%{http_code}\n' >> querySummary.txt + + QUERY_PREDICTION=$(get_query_prediction < predictResponse.txt) + + echo "$(date): Received query prediction" + echo "$(date): Received query prediction" >> querySummary.txt + + echo "$QUERY_PREDICTION" + echo "$QUERY_PREDICTION" >> querySummary.txt + + cd ../ +} diff --git a/docker/scripts/common/query.sh b/docker/scripts/common/query.sh new file mode 100755 index 00000000000..f46cbb19268 --- /dev/null +++ b/docker/scripts/common/query.sh @@ -0,0 +1,100 @@ +#!/bin/bash + +source ${SCRIPT_DIR}/common/common.sh + +PAUSE='false' +POOL="${POOL:-pool1}" +MAX_PAGES=100 +QUERY_TYPE='query' + +# QUERY PARAMETERS +#QUERY_LOGIC='EventQuery' +#BEGIN='19660908 000000.000' +#END='20161002 235959.999' +#COLUMN_VISIBILITY='PUBLIC' +#QUERY='GENRES:[Action to Western]' +#QUERY_SYNTAX='LUCENE' +#AUTHS='PUBLIC,PRIVATE,BAR,FOO' +#QUERY_NAME='Developer Test Query' +#PAGE_SIZE='10' + +runQuery() { + createTempPem + + FOLDER="${QUERY_TYPE}_$(date +%Y%m%d_%I%M%S.%N)" + + mkdir $FOLDER + cd $FOLDER + + SYSTEM_FROM=$(hostname) + + echo "$(date): Creating query" + echo "$(date): Creating query" > querySummary.txt + + curl -s -D headers_0.txt -k -E ${TMP_PEM} \ + -H "Accept: application/xml" \ + -H "Pool: $POOL" \ + --data-urlencode "begin=${BEGIN}" \ + --data-urlencode "end=${END}" \ + --data-urlencode "columnVisibility=${COLUMN_VISIBILITY}" \ + --data-urlencode "query=${QUERY}" \ + --data-urlencode "query.syntax=${QUERY_SYNTAX}" \ + --data-urlencode "auths=${AUTHS}" \ + --data-urlencode "systemFrom=${SYSTEM_FROM}" \ + --data-urlencode "queryName=${QUERY_NAME}" \ + --data-urlencode "pagesize=${PAGE_SIZE}" \ + ${DATAWAVE_ENDPOINT}/${QUERY_LOGIC}/create -o createResponse.xml -w '%{http_code}\n' >> querySummary.txt + + i=1 + + QUERY_ID=$(get_query_id < createResponse.xml) + + TOTAL_EVENTS=0 + TOTAL_PAGES=0 + + while [ $i -gt 0 ] && [ $i -lt $MAX_PAGES ]; do + if [ "$PAUSE" == "true" ]; then + echo "press any key to continue" + read -n 1 + fi + + echo "$(date): Requesting page $i for $QUERY_ID" + echo "$(date): Requesting page $i for $QUERY_ID" >> querySummary.txt + curl -s -D headers_$i.txt -b headers_0.txt -q -k -E ${TMP_PEM} \ + -H "Accept: application/xml" \ + -H "Pool: $POOL" \ + ${DATAWAVE_ENDPOINT}/$QUERY_ID/next -o nextResponse_$i.xml -w '%{http_code}\n' >> querySummary.txt + + CONTINUE=`grep 'HTTP/.* 200' headers_$i.txt` + + if [ -z "$CONTINUE" ]; then + i=-1 + else + NUM_EVENTS=$(get_num_events < nextResponse_$i.xml) + TOTAL_EVENTS=$((TOTAL_EVENTS + NUM_EVENTS)) + TOTAL_PAGES=$((TOTAL_PAGES + 1)) + echo "$(date): Page $i contained $NUM_EVENTS events" + echo "$(date): Page $i contained $NUM_EVENTS events" >> querySummary.txt + + ((i++)) + fi + done + + echo "$(date): Returned $TOTAL_PAGES pages" + echo "$(date): Returned $TOTAL_PAGES pages" >> querySummary.txt + + echo "$(date): Returned $TOTAL_EVENTS events" + echo "$(date): Returned $TOTAL_EVENTS events" >> querySummary.txt + + echo "$(date): Closing $QUERY_ID" + echo "$(date): Closing $QUERY_ID" >> querySummary.txt + # close the query + curl -s -D close_headers.txt -q -k -X POST -E ${TMP_PEM} \ + -H "Accept: application/xml" \ + -H "Pool: $POOL" \ + ${DATAWAVE_ENDPOINT}/$QUERY_ID/close -o closeResponse.xml -w '%{http_code}\n' >> querySummary.txt + + cd ../ + + logMetrics +} diff --git a/docker/scripts/common/streamingQuery.sh b/docker/scripts/common/streamingQuery.sh new file mode 100755 index 00000000000..2a6dd8c1308 --- /dev/null +++ b/docker/scripts/common/streamingQuery.sh @@ -0,0 +1,77 @@ +#!/bin/bash + +source ${SCRIPT_DIR}/common/common.sh + +PAUSE='false' +POOL="${POOL:-pool1}" +MAX_PAGES=100 +QUERY_TYPE='streamingQuery' + +# QUERY PARAMETERS +#QUERY_LOGIC='EventQuery' +#BEGIN='19660908 000000.000' +#END='20161002 235959.999' +#COLUMN_VISIBILITY='PUBLIC' +#QUERY='GENRES:[Action to Western]' +#QUERY_SYNTAX='LUCENE' +#AUTHS='PUBLIC,PRIVATE,BAR,FOO' +#QUERY_NAME='Developer Test Streaming Query' +#PAGE_SIZE='10' + +# Override common get_query_id +get_query_id () { + while read_dom; do + if [[ $ENTITY =~ 'QueryId' ]]; then + echo $CONTENT + break + fi + done +} + +# Override common get_num_events +get_num_events () { + count=0 + while read_dom; do + if [[ $ENTITY = 'ReturnedEvents' ]]; then + count=$((count + CONTENT)) + fi + done + echo $count +} + +runStreamingQuery() { + createTempPem + + FOLDER="${QUERY_TYPE}_$(date +%Y%m%d_%I%M%S.%N)" + + mkdir $FOLDER + cd $FOLDER + + SYSTEM_FROM=$(hostname) + + echo "$(date): Running streaming query" + echo "$(date): Running streaming query" > querySummary.txt + curl -s -D headers_0.txt -k -E ${TMP_PEM} \ + -H "Accept: application/xml" \ + -H "Pool: $POOL" \ + --data-urlencode "begin=${BEGIN}" \ + --data-urlencode "end=${END}" \ + --data-urlencode "columnVisibility=${COLUMN_VISIBILITY}" \ + --data-urlencode "query=${QUERY}" \ + --data-urlencode "query.syntax=${QUERY_SYNTAX}" \ + --data-urlencode "auths=${AUTHS}" \ + --data-urlencode "systemFrom=${SYSTEM_FROM}" \ + --data-urlencode "queryName=${QUERY_NAME}" \ + --data-urlencode "pagesize=${PAGE_SIZE}" \ + ${DATAWAVE_ENDPOINT}/${QUERY_LOGIC}/createAndExecute -o streamingResponse.xml -w '%{http_code}\n' >> querySummary.txt + + QUERY_ID=$(get_query_id < streamingResponse.xml) + NUM_EVENTS=$(get_num_events < streamingResponse.xml) + + echo "$(date): Streaming results contained $NUM_EVENTS events" + echo "$(date): Streaming results contained $NUM_EVENTS events" >> querySummary.txt + + cd ../ + + logMetrics +} diff --git a/docker/scripts/connectionFactory.sh b/docker/scripts/connectionFactory.sh new file mode 100755 index 00000000000..45941a20369 --- /dev/null +++ b/docker/scripts/connectionFactory.sh @@ -0,0 +1,28 @@ +#!/bin/bash + +SCRIPT_DIR=$( cd -- "$( dirname -- "${BASH_SOURCE[0]}" )" &> /dev/null && pwd ) + +source ${SCRIPT_DIR}/common/common.sh + +createTempPem + +EXECUTOR_ENDPOINT1=https://localhost:8743/executor/v1 +EXECUTOR_ENDPOINT2=https://localhost:8843/executor/v1 + +FOLDER="executor_$(date +%Y%m%d_%I%M%S.%N)" + +mkdir $FOLDER +cd $FOLDER + +echo "$(date): polling connection factory for pool1" +curl -s -D headers_1.txt -k -E ${TMP_PEM} \ + -H "Accept: application/xml" \ + -H "Pool: $POOL" \ + ${EXECUTOR_ENDPOINT1}/Common/AccumuloConnectionFactory/stats -o connectionFactory1Response.txt -w '%{http_code}\n' +echo "$(date): polling connection factory for pool2" +curl -s -D headers_2.txt -k -E ${TMP_PEM} \ + -H "Accept: application/xml" \ + -H "Pool: $POOL" \ + ${EXECUTOR_ENDPOINT2}/Common/AccumuloConnectionFactory/stats -o connectionFactory2Response.txt -w '%{http_code}\n' + +cd ../ diff --git a/docker/scripts/count.sh b/docker/scripts/count.sh new file mode 100755 index 00000000000..bd41af24928 --- /dev/null +++ b/docker/scripts/count.sh @@ -0,0 +1,18 @@ +#!/bin/bash + +SCRIPT_DIR=$( cd -- "$( dirname -- "${BASH_SOURCE[0]}" )" &> /dev/null && pwd ) + +source ${SCRIPT_DIR}/common/count.sh + +# QUERY PARAMETERS +QUERY_LOGIC='CountQuery' +BEGIN='19660908 000000.000' +END='20161002 235959.999' +COLUMN_VISIBILITY='PUBLIC' +QUERY='GENRES:[Action to Western]' +QUERY_SYNTAX='LUCENE' +AUTHS='PUBLIC,PRIVATE,BAR,FOO' +QUERY_NAME='Developer Test Query' +PAGE_SIZE='10' + +runCount \ No newline at end of file diff --git a/docker/scripts/discovery.sh b/docker/scripts/discovery.sh new file mode 100755 index 00000000000..61550a253c6 --- /dev/null +++ b/docker/scripts/discovery.sh @@ -0,0 +1,19 @@ +#!/bin/bash + +SCRIPT_DIR=$( cd -- "$( dirname -- "${BASH_SOURCE[0]}" )" &> /dev/null && pwd ) + +source ${SCRIPT_DIR}/common/query.sh + +# QUERY PARAMETERS +QUERY_TYPE='discovery' +QUERY_LOGIC='DiscoveryQuery' +BEGIN='19660908 000000.000' +END='20161002 235959.999' +COLUMN_VISIBILITY='PUBLIC' +QUERY='Western' +QUERY_SYNTAX='LUCENE' +AUTHS='PUBLIC,PRIVATE,BAR,FOO' +QUERY_NAME='Developer Test Query' +PAGE_SIZE='100' + +runQuery diff --git a/docker/scripts/edge.sh b/docker/scripts/edge.sh new file mode 100755 index 00000000000..425b21d4d9b --- /dev/null +++ b/docker/scripts/edge.sh @@ -0,0 +1,17 @@ +#!/bin/bash + +SCRIPT_DIR=$( cd -- "$( dirname -- "${BASH_SOURCE[0]}" )" &> /dev/null && pwd ) + +source ${SCRIPT_DIR}/common/edge.sh + +# QUERY PARAMETERS +QUERY_LOGIC='EdgeQuery' +BEGIN='19660908 000000.000' +END='20161002 235959.999' +COLUMN_VISIBILITY='PUBLIC' +QUERY='SOURCE == 'Jerry Seinfeld'' +AUTHS='PUBLIC,PRIVATE,BAR,FOO' +QUERY_NAME='Developer Test Edge Query' +PAGE_SIZE='100' + +runEdgeQuery diff --git a/docker/scripts/edgeEvent.sh b/docker/scripts/edgeEvent.sh new file mode 100755 index 00000000000..ff5891b1dee --- /dev/null +++ b/docker/scripts/edgeEvent.sh @@ -0,0 +1,19 @@ +#!/bin/bash + +SCRIPT_DIR=$( cd -- "$( dirname -- "${BASH_SOURCE[0]}" )" &> /dev/null && pwd ) + +source ${SCRIPT_DIR}/common/query.sh + +# QUERY PARAMETERS +QUERY_TYPE='edgeEvent' +QUERY_LOGIC='EdgeEventQuery' +BEGIN='19660908 000000.000' +END='20161002 235959.999' +COLUMN_VISIBILITY='PUBLIC' +QUERY='SOURCE:Jerry\\ Seinfeld SINK:Seinfeld TYPE:TV_SHOW_CAST RELATION:PERSON-SHOW' +QUERY_SYNTAX='LUCENE' +AUTHS='PUBLIC,PRIVATE,BAR,FOO' +QUERY_NAME='Developer Test Query' +PAGE_SIZE='10' + +runQuery diff --git a/docker/scripts/errorCount.sh b/docker/scripts/errorCount.sh new file mode 100755 index 00000000000..b0adc420ff5 --- /dev/null +++ b/docker/scripts/errorCount.sh @@ -0,0 +1,19 @@ +#!/bin/bash + +SCRIPT_DIR=$( cd -- "$( dirname -- "${BASH_SOURCE[0]}" )" &> /dev/null && pwd ) + +source ${SCRIPT_DIR}/common/count.sh + +# QUERY PARAMETERS +QUERY_TYPE='errorCount' +QUERY_LOGIC='ErrorCountQuery' +BEGIN='19660908 000000.000' +END='20301231 235959.999' +COLUMN_VISIBILITY='PUBLIC' +QUERY='FOO_FIELD:myFoo' +QUERY_SYNTAX='LUCENE' +AUTHS='PUBLIC,PRIVATE,BAR,FOO' +QUERY_NAME='Developer Test Query' +PAGE_SIZE='10' + +runCount diff --git a/docker/scripts/errorDiscovery.sh b/docker/scripts/errorDiscovery.sh new file mode 100755 index 00000000000..f7cbcc8586d --- /dev/null +++ b/docker/scripts/errorDiscovery.sh @@ -0,0 +1,19 @@ +#!/bin/bash + +SCRIPT_DIR=$( cd -- "$( dirname -- "${BASH_SOURCE[0]}" )" &> /dev/null && pwd ) + +source ${SCRIPT_DIR}/common/query.sh + +# QUERY PARAMETERS +QUERY_TYPE='errorDiscovery' +QUERY_LOGIC='ErrorDiscoveryQuery' +BEGIN='19660908 000000.000' +END='20301231 235959.999' +COLUMN_VISIBILITY='PUBLIC' +QUERY='english' +QUERY_SYNTAX='LUCENE' +AUTHS='PUBLIC,PRIVATE,BAR,FOO' +QUERY_NAME='Developer Test Query' +PAGE_SIZE='100' + +runQuery diff --git a/docker/scripts/errorFieldIndexCount.sh b/docker/scripts/errorFieldIndexCount.sh new file mode 100755 index 00000000000..99480ca749c --- /dev/null +++ b/docker/scripts/errorFieldIndexCount.sh @@ -0,0 +1,19 @@ +#!/bin/bash + +SCRIPT_DIR=$( cd -- "$( dirname -- "${BASH_SOURCE[0]}" )" &> /dev/null && pwd ) + +source ${SCRIPT_DIR}/common/count.sh + +# QUERY PARAMETERS +QUERY_TYPE='errorFieldIndexCount' +QUERY_LOGIC='ErrorFieldIndexCountQuery' +BEGIN='19660908 000000.000' +END='20301231 235959.999' +COLUMN_VISIBILITY='PUBLIC' +QUERY='FOO_FIELD' +QUERY_SYNTAX='LUCENE' +AUTHS='PUBLIC,PRIVATE,BAR,FOO' +QUERY_NAME='Developer Test Query' +PAGE_SIZE='10' + +runCount diff --git a/docker/scripts/errorQuery.sh b/docker/scripts/errorQuery.sh new file mode 100755 index 00000000000..af39b413b6e --- /dev/null +++ b/docker/scripts/errorQuery.sh @@ -0,0 +1,19 @@ +#!/bin/bash + +SCRIPT_DIR=$( cd -- "$( dirname -- "${BASH_SOURCE[0]}" )" &> /dev/null && pwd ) + +source ${SCRIPT_DIR}/common/query.sh + +# QUERY PARAMETERS +QUERY_TYPE='errorQuery' +QUERY_LOGIC='ErrorEventQuery' +BEGIN='19660908 000000.000' +END='20301231 235959.999' +COLUMN_VISIBILITY='PUBLIC' +QUERY='FOO_FIELD:myFoo' +QUERY_SYNTAX='LUCENE' +AUTHS='PUBLIC,PRIVATE,BAR,FOO' +QUERY_NAME='Developer Test Query' +PAGE_SIZE='10' + +runQuery diff --git a/docker/scripts/executorHealth.sh b/docker/scripts/executorHealth.sh new file mode 100755 index 00000000000..4fa56b10d41 --- /dev/null +++ b/docker/scripts/executorHealth.sh @@ -0,0 +1,23 @@ +#!/bin/bash + +SCRIPT_DIR=$( cd -- "$( dirname -- "${BASH_SOURCE[0]}" )" &> /dev/null && pwd ) + +source ${SCRIPT_DIR}/common/common.sh + +createTempPem + +HEALTH_ENDPOINT=https://localhost:8743/executor/mgmt/health + +FOLDER="executorHealth_$(date +%Y%m%d_%I%M%S.%N)" + +mkdir $FOLDER +cd $FOLDER + +echo "$(date): Getting query executor service health" +echo "$(date): Getting query executor service health" > healthSummary.txt +curl -s -D headers_0.txt -k -E ${TMP_PEM} \ + -H "Accept: application/json" \ + ${HEALTH_ENDPOINT} -o healthResponse.json -w '%{http_code}\n' >> healthSummary.txt + +echo "$(date): Query Executor service health retrieved" +echo "$(date): Query Executor service health retrieved" > healthSummary.txt diff --git a/docker/scripts/executorShutdown.sh b/docker/scripts/executorShutdown.sh new file mode 100755 index 00000000000..68e733bbeb3 --- /dev/null +++ b/docker/scripts/executorShutdown.sh @@ -0,0 +1,23 @@ +#!/bin/bash + +SCRIPT_DIR=$( cd -- "$( dirname -- "${BASH_SOURCE[0]}" )" &> /dev/null && pwd ) + +source ${SCRIPT_DIR}/common/common.sh + +createTempPem + +SHUTDOWN_ENDPOINT=https://localhost:8743/executor/mgmt/shutdown + +FOLDER="executorShutdown_$(date +%Y%m%d_%I%M%S.%N)" + +mkdir $FOLDER +cd $FOLDER + +echo "$(date): Shutting down query executor service" +echo "$(date): Shutting down query executor service" > shutdownSummary.txt +curl -s -D headers_0.txt -k -E ${TMP_PEM} -X POST \ + -H "Accept: application/json" \ + ${SHUTDOWN_ENDPOINT} -o shutdownResponse.json -w '%{http_code}\n' >> shutdownSummary.txt + +echo "$(date): Query Executor service shutdown" +echo "$(date): Query Executor service shutdown" > shutdownSummary.txt diff --git a/docker/scripts/fieldIndexCount.sh b/docker/scripts/fieldIndexCount.sh new file mode 100755 index 00000000000..54ad4df9824 --- /dev/null +++ b/docker/scripts/fieldIndexCount.sh @@ -0,0 +1,19 @@ +#!/bin/bash + +SCRIPT_DIR=$( cd -- "$( dirname -- "${BASH_SOURCE[0]}" )" &> /dev/null && pwd ) + +source ${SCRIPT_DIR}/common/query.sh + +# QUERY PARAMETERS +QUERY_TYPE='fieldIndexCount' +QUERY_LOGIC='FieldIndexCountQuery' +BEGIN='19660908 000000.000' +END='20161002 235959.999' +COLUMN_VISIBILITY='PUBLIC' +QUERY='GENRES' +QUERY_SYNTAX='LUCENE' +AUTHS='PUBLIC,PRIVATE,BAR,FOO' +QUERY_NAME='Developer Test Query' +PAGE_SIZE='10' + +runQuery diff --git a/docker/scripts/hitHighlights.sh b/docker/scripts/hitHighlights.sh new file mode 100755 index 00000000000..be8b578022b --- /dev/null +++ b/docker/scripts/hitHighlights.sh @@ -0,0 +1,19 @@ +#!/bin/bash + +SCRIPT_DIR=$( cd -- "$( dirname -- "${BASH_SOURCE[0]}" )" &> /dev/null && pwd ) + +source ${SCRIPT_DIR}/common/query.sh + +# QUERY PARAMETERS +QUERY_TYPE='hitHighlights' +QUERY_LOGIC='HitHighlights' +BEGIN='19660908 000000.000' +END='20161002 235959.999' +COLUMN_VISIBILITY='PUBLIC' +QUERY='GENRES:[Action to Western]' +QUERY_SYNTAX='LUCENE' +AUTHS='PUBLIC,PRIVATE,BAR,FOO' +QUERY_NAME='Developer Test Query' +PAGE_SIZE='10' + +runQuery diff --git a/docker/scripts/lookup.sh b/docker/scripts/lookup.sh new file mode 100755 index 00000000000..f513954b9b9 --- /dev/null +++ b/docker/scripts/lookup.sh @@ -0,0 +1,18 @@ +#!/bin/bash + +SCRIPT_DIR=$( cd -- "$( dirname -- "${BASH_SOURCE[0]}" )" &> /dev/null && pwd ) + +source ${SCRIPT_DIR}/common/lookup.sh + +# QUERY PARAMETERS +BEGIN='19660908 000000.000' +END='20161002 235959.999' +COLUMN_VISIBILITY='PUBLIC' +UUID_TYPE='PAGE_TITLE' +UUID='anarchism' +QUERY='GENRES:[Action to Western]' +QUERY_SYNTAX='LUCENE' +AUTHS='PUBLIC,PRIVATE,BAR,FOO' +QUERY_NAME='Developer Test Lookup UUID Query' + +runLookup diff --git a/docker/scripts/lookupContent.sh b/docker/scripts/lookupContent.sh new file mode 100755 index 00000000000..ece58797d4a --- /dev/null +++ b/docker/scripts/lookupContent.sh @@ -0,0 +1,18 @@ +#!/bin/bash + +SCRIPT_DIR=$( cd -- "$( dirname -- "${BASH_SOURCE[0]}" )" &> /dev/null && pwd ) + +source ${SCRIPT_DIR}/common/lookupContent.sh + +# QUERY PARAMETERS +BEGIN='19660908 000000.000' +END='20161002 235959.999' +COLUMN_VISIBILITY='PUBLIC' +UUID_TYPE='PAGE_TITLE' +UUID='anarchism' +QUERY='GENRES:[Action to Western]' +QUERY_SYNTAX='LUCENE' +AUTHS='PUBLIC,PRIVATE,BAR,FOO' +QUERY_NAME='Developer Test Lookup UUID Query' + +runLookupContent diff --git a/docker/scripts/mapReduceCancel.sh b/docker/scripts/mapReduceCancel.sh new file mode 100755 index 00000000000..cec8a55c474 --- /dev/null +++ b/docker/scripts/mapReduceCancel.sh @@ -0,0 +1,10 @@ +#!/bin/bash + +SCRIPT_DIR=$( cd -- "$( dirname -- "${BASH_SOURCE[0]}" )" &> /dev/null && pwd ) + +source ${SCRIPT_DIR}/common/common.sh + +createTempPem + +echo "$(date): Canceling map reduce query" +curl -X POST -s -k -E ${TMP_PEM} ${MAPREDUCE_ENDPOINT}/$1/cancel -w '%{http_code}\n' diff --git a/docker/scripts/mapReduceQuery.sh b/docker/scripts/mapReduceQuery.sh new file mode 100755 index 00000000000..dca3cc9da5e --- /dev/null +++ b/docker/scripts/mapReduceQuery.sh @@ -0,0 +1,21 @@ +#!/bin/bash + +SCRIPT_DIR=$( cd -- "$( dirname -- "${BASH_SOURCE[0]}" )" &> /dev/null && pwd ) + +source ${SCRIPT_DIR}/common/mapReduceQuery.sh + +# QUERY PARAMETERS +QUERY_LOGIC='EventQuery' +JOB_NAME='BulkResultsJob' +FORMAT=XML +OUTPUT_FORMAT=TEXT +BEGIN='19660908 000000.000' +END='20161002 235959.999' +COLUMN_VISIBILITY='PUBLIC' +QUERY='GENRES:[Action to Western]' +QUERY_SYNTAX='LUCENE' +AUTHS='PUBLIC,PRIVATE,BAR,FOO' +QUERY_NAME='Developer Test Query' +PAGE_SIZE='10' + +runMapReduceQuery diff --git a/docker/scripts/mapReduceRemove.sh b/docker/scripts/mapReduceRemove.sh new file mode 100755 index 00000000000..5c5d8562f9e --- /dev/null +++ b/docker/scripts/mapReduceRemove.sh @@ -0,0 +1,10 @@ +#!/bin/bash + +SCRIPT_DIR=$( cd -- "$( dirname -- "${BASH_SOURCE[0]}" )" &> /dev/null && pwd ) + +source ${SCRIPT_DIR}/common/common.sh + +createTempPem + +echo "$(date): Removing map reduce query" +curl -X DELETE -s -k -E ${TMP_PEM} ${DATAWAVE_ENDPOINT}/$1/remove -w '%{http_code}\n' diff --git a/docker/scripts/metrics.sh b/docker/scripts/metrics.sh new file mode 100755 index 00000000000..a6c4ce56b3b --- /dev/null +++ b/docker/scripts/metrics.sh @@ -0,0 +1,19 @@ +#!/bin/bash + +SCRIPT_DIR=$( cd -- "$( dirname -- "${BASH_SOURCE[0]}" )" &> /dev/null && pwd ) + +source ${SCRIPT_DIR}/common/query.sh + +# QUERY PARAMETERS +QUERY_TYPE='metrics' +QUERY_LOGIC='QueryMetricsQuery' +BEGIN='20000101 000000.000' +END="$(date +%Y%m%d) 235959.999" +COLUMN_VISIBILITY='PUBLIC' +QUERY='QUERY_ID:[0 TO z]' +QUERY_SYNTAX='LUCENE' +AUTHS='PUBLIC,PRIVATE,BAR,FOO' +QUERY_NAME='Developer Test Query' +PAGE_SIZE='10' + +runQuery diff --git a/docker/scripts/modification.sh b/docker/scripts/modification.sh new file mode 100755 index 00000000000..26917515f69 --- /dev/null +++ b/docker/scripts/modification.sh @@ -0,0 +1,31 @@ +#!/bin/bash + +SCRIPT_DIR=$( cd -- "$( dirname -- "${BASH_SOURCE[0]}" )" &> /dev/null && pwd ) + +source ${SCRIPT_DIR}/common/common.sh + +createTempPem + +MODIFICATION_ENDPOINT=https://localhost:9343/modification/v1 + +FOLDER="modification_$(date +%Y%m%d_%I%M%S.%N)" + +mkdir $FOLDER +cd $FOLDER + +echo "$(date): polling modification service for configurations" +curl -s -D headers_1.txt -k -E ${TMP_PEM} \ + -H "Accept: application/json" \ + ${MODIFICATION_ENDPOINT}/listConfigurations -o modificationConfigurationResponse.txt -w '%{http_code}\n' + +echo "$(date): reloading modification service fields" +curl -s -D headers_2.txt -k -E ${TMP_PEM} \ + -H "Accept: application/json" \ + ${MODIFICATION_ENDPOINT}/reloadCache -o modificationReloadCacheResponse.txt -w '%{http_code}\n' + +echo "$(date): polling modification service fields" +curl -s -D headers_2.txt -k -E ${TMP_PEM} \ + -H "Accept: application/json" \ + ${MODIFICATION_ENDPOINT}/getMutableFieldList -o modificationFieldListResponse.txt -w '%{http_code}\n' + +cd ../ diff --git a/docker/scripts/oozieQuery.sh b/docker/scripts/oozieQuery.sh new file mode 100755 index 00000000000..8251471bf0b --- /dev/null +++ b/docker/scripts/oozieQuery.sh @@ -0,0 +1,11 @@ +#!/bin/bash + +SCRIPT_DIR=$( cd -- "$( dirname -- "${BASH_SOURCE[0]}" )" &> /dev/null && pwd ) + +source ${SCRIPT_DIR}/common/oozieQuery.sh + +# QUERY PARAMETERS +WORKFLOW='OozieJob' +COLUMN_VISIBILITY='PUBLIC' + +runOozieQuery diff --git a/docker/scripts/plan.sh b/docker/scripts/plan.sh new file mode 100755 index 00000000000..4b01d4e3b1c --- /dev/null +++ b/docker/scripts/plan.sh @@ -0,0 +1,18 @@ +#!/bin/bash + +SCRIPT_DIR=$( cd -- "$( dirname -- "${BASH_SOURCE[0]}" )" &> /dev/null && pwd ) + +source ${SCRIPT_DIR}/common/plan.sh + +# QUERY PARAMETERS +QUERY_LOGIC='EventQuery' +BEGIN='19660908 000000.000' +END='20161002 235959.999' +COLUMN_VISIBILITY='PUBLIC' +QUERY='GENRES:[Action to Western]' +QUERY_SYNTAX='LUCENE' +AUTHS='PUBLIC,PRIVATE,BAR,FOO' +QUERY_NAME='Developer Test Query' +EXPAND_VALUES='true' + +runPlan diff --git a/docker/scripts/poundit.sh b/docker/scripts/poundit.sh new file mode 100755 index 00000000000..bc3ce8bae47 --- /dev/null +++ b/docker/scripts/poundit.sh @@ -0,0 +1,19 @@ +#!/bin/bash + +RUNS=${1:-10} +SCRIPTS=${@:2} +FOLDER="poundit_${RUNS}_$(date +%Y%m%d_%I%M%S.%N)" + +if [[ "$SCRIPTS" == "" || "$SCRIPTS" == "all" ]]; then + SCRIPTS="batchLookupContent.sh batchLookup.sh connectionFactory.sh count.sh discovery.sh edgeEvent.sh edge.sh errorCount.sh errorDiscovery.sh errorFieldIndexCount.sh errorQuery.sh fieldIndexCount.sh hitHighlights.sh lookupContent.sh lookup.sh metrics.sh plan.sh predict.sh query.sh streamingQuery.sh termFrequency.sh" +fi + +mkdir $FOLDER +cd $FOLDER + +for ((i=0; i < ${RUNS}; i++)); do + for script in $SCRIPTS; do + echo "Executing ../${script} >> ${script%%.sh}.log &" + ../${script} >> ${script%%.sh}.log & + done +done diff --git a/docker/scripts/predict.sh b/docker/scripts/predict.sh new file mode 100755 index 00000000000..4eeb34695c7 --- /dev/null +++ b/docker/scripts/predict.sh @@ -0,0 +1,18 @@ +#!/bin/bash + +SCRIPT_DIR=$( cd -- "$( dirname -- "${BASH_SOURCE[0]}" )" &> /dev/null && pwd ) + +source ${SCRIPT_DIR}/common/predict.sh + +# QUERY PARAMETERS +QUERY_LOGIC='EventQuery' +BEGIN='19660908 000000.000' +END='20161002 235959.999' +COLUMN_VISIBILITY='PUBLIC' +QUERY='GENRES:[Action to Western]' +QUERY_SYNTAX='LUCENE' +AUTHS='PUBLIC,PRIVATE,BAR,FOO' +QUERY_NAME='Developer Test Query' +EXPAND_VALUES='true' + +runPredict diff --git a/docker/scripts/query.sh b/docker/scripts/query.sh new file mode 100755 index 00000000000..703890747d0 --- /dev/null +++ b/docker/scripts/query.sh @@ -0,0 +1,18 @@ +#!/bin/bash + +SCRIPT_DIR=$( cd -- "$( dirname -- "${BASH_SOURCE[0]}" )" &> /dev/null && pwd ) + +source ${SCRIPT_DIR}/common/query.sh + +# QUERY PARAMETERS +QUERY_LOGIC='EventQuery' +BEGIN='19660908 000000.000' +END='20161002 235959.999' +COLUMN_VISIBILITY='PUBLIC' +QUERY='GENRES:[Action to Western]' +QUERY_SYNTAX='LUCENE' +AUTHS='PUBLIC,PRIVATE,BAR,FOO' +QUERY_NAME='Developer Test Query' +PAGE_SIZE='10' + +runQuery diff --git a/docker/scripts/queryHealth.sh b/docker/scripts/queryHealth.sh new file mode 100755 index 00000000000..25a639bd8ec --- /dev/null +++ b/docker/scripts/queryHealth.sh @@ -0,0 +1,23 @@ +#!/bin/bash + +SCRIPT_DIR=$( cd -- "$( dirname -- "${BASH_SOURCE[0]}" )" &> /dev/null && pwd ) + +source ${SCRIPT_DIR}/common/common.sh + +createTempPem + +HEALTH_ENDPOINT=https://localhost:8443/query/mgmt/health + +FOLDER="queryHealth_$(date +%Y%m%d_%I%M%S.%N)" + +mkdir $FOLDER +cd $FOLDER + +echo "$(date): Getting query service health" +echo "$(date): Getting query service health" > healthSummary.txt +curl -s -D headers_0.txt -k -E ${TMP_PEM} \ + -H "Accept: application/json" \ + ${HEALTH_ENDPOINT} -o healthResponse.json -w '%{http_code}\n' >> healthSummary.txt + +echo "$(date): Query service health retrieved" +echo "$(date): Query service health retrieved" > healthSummary.txt diff --git a/docker/scripts/queryShutdown.sh b/docker/scripts/queryShutdown.sh new file mode 100755 index 00000000000..c720eed8bd1 --- /dev/null +++ b/docker/scripts/queryShutdown.sh @@ -0,0 +1,23 @@ +#!/bin/bash + +SCRIPT_DIR=$( cd -- "$( dirname -- "${BASH_SOURCE[0]}" )" &> /dev/null && pwd ) + +source ${SCRIPT_DIR}/common/common.sh + +createTempPem + +SHUTDOWN_ENDPOINT=https://localhost:8443/query/mgmt/shutdown + +FOLDER="queryShutdown_$(date +%Y%m%d_%I%M%S.%N)" + +mkdir $FOLDER +cd $FOLDER + +echo "$(date): Shutting down query service" +echo "$(date): Shutting down query service" > shutdownSummary.txt +curl -s -D headers_0.txt -k -E ${TMP_PEM} -X POST \ + -H "Accept: application/json" \ + ${SHUTDOWN_ENDPOINT} -o shutdownResponse.json -w '%{http_code}\n' >> shutdownSummary.txt + +echo "$(date): Query service shutdown" +echo "$(date): Query service shutdown" > shutdownSummary.txt diff --git a/docker/scripts/streamingQuery.sh b/docker/scripts/streamingQuery.sh new file mode 100755 index 00000000000..8eb48db3e1d --- /dev/null +++ b/docker/scripts/streamingQuery.sh @@ -0,0 +1,18 @@ +#!/bin/bash + +SCRIPT_DIR=$( cd -- "$( dirname -- "${BASH_SOURCE[0]}" )" &> /dev/null && pwd ) + +source ${SCRIPT_DIR}/common/streamingQuery.sh + +# QUERY PARAMETERS +QUERY_LOGIC='EventQuery' +BEGIN='19660908 000000.000' +END='20161002 235959.999' +COLUMN_VISIBILITY='PUBLIC' +QUERY='GENRES:[Action to Western]' +QUERY_SYNTAX='LUCENE' +AUTHS='PUBLIC,PRIVATE,BAR,FOO' +QUERY_NAME='Developer Test Streaming Query' +PAGE_SIZE='10' + +runStreamingQuery diff --git a/docker/scripts/termFrequency.sh b/docker/scripts/termFrequency.sh new file mode 100755 index 00000000000..2accd64ca48 --- /dev/null +++ b/docker/scripts/termFrequency.sh @@ -0,0 +1,19 @@ +#!/bin/bash + +SCRIPT_DIR=$( cd -- "$( dirname -- "${BASH_SOURCE[0]}" )" &> /dev/null && pwd ) + +source ${SCRIPT_DIR}/common/query.sh + +# QUERY PARAMETERS +QUERY_TYPE='termFrequency' +QUERY_LOGIC='TermFrequencyQuery' +BEGIN='19500101 000000.000' +END='20161002 235959.999' +COLUMN_VISIBILITY='PUBLIC' +QUERY='jackie:19520920_0/tvmaze/-bb3qxp.e771of.e3f2gs' +QUERY_SYNTAX='LUCENE' +AUTHS='PUBLIC,PRIVATE,BAR,FOO' +QUERY_NAME='Developer Test Query' +PAGE_SIZE='10' + +runQuery diff --git a/docker/scripts/testAll.sh b/docker/scripts/testAll.sh new file mode 100755 index 00000000000..9430fb7be7f --- /dev/null +++ b/docker/scripts/testAll.sh @@ -0,0 +1,201 @@ +#!/usr/bin/env bash + +SCRIPT_DIR=$( cd -- "$( dirname -- "${BASH_SOURCE[0]}" )" &> /dev/null && pwd ) + +source ${SCRIPT_DIR}/common/common.sh + +WEBSERVICE="${WEBSERVICE:-false}" +MAX_ATTEMPTS=30 +QUERY_TIMEOUT=2m +# Amount of time to wait after a failed test attempt +TIMEOUT=10 +# Amount of time to wait for services to be ready +SERVICES_INTERVAL=4 +TEST_COUNTER=0 + +# First argument is the script to run +# Second argument is the expected number of events +# Third argument is the expected number of pages/files +runTest () { + ATTEMPTS=3 + ATTEMPT=1 + TEST_COUNTER=$((TEST_COUNTER + 1)) + test_start_time=$(date +%s%N) + + while [ $ATTEMPT -le $ATTEMPTS ]; do + echo + echo -n "Running test (Attempt ${ATTEMPT}/${ATTEMPTS}): $1 - " + echo + + attempt_start_time=$(date +%s%N) + QUERY_RESPONSE="$(timeout ${QUERY_TIMEOUT} ${SCRIPT_DIR}/$1)" + attempt_end_time=$(date +%s%N) + EXIT_CODE=$? + + if [[ "$QUERY_RESPONSE" == *"Returned $2 events"* ]] ; then + if [ ! -z "$3" ] ; then + if [[ "$QUERY_RESPONSE" == *"Returned $3 pages"* ]] ; then + TEST_STATUS="${LABEL_PASS} -> Returned $2 events and $3 pages" && TESTS_PASSED="${TESTS_PASSED} $1" + printTestStatus "$attempt_start_time" "$attempt_end_time" "$TEST_STATUS" + printLine + return 0 + elif [[ "$QUERY_RESPONSE" == *"Returned $3 files"* ]] ; then + TEST_STATUS="${LABEL_PASS} -> Returned $2 events and $3 files" && TESTS_PASSED="${TESTS_PASSED} $1" + printTestStatus "$attempt_start_time" "$attempt_end_time" "$TEST_STATUS" + printLine + return 0 + else + TEST_STATUS="${LABEL_FAIL} -> Unexpected number of pages/files returned: Expected $2 events and $3 files." + echo "Query Response:" + echo "$QUERY_RESPONSE" + echo "----------------" + + if [ $ATTEMPT == $ATTEMPTS ] ; then + TEST_STATUS="${LABEL_FAIL} -> Failed to succeed after ${ATTEMPT} attempts" + TEST_FAILURES="${TEST_FAILURES},${1}: ${TEST_STATUS}" + printTestStatus "$test_start_time" "$(date +%s%N)" "$TEST_STATUS" + printLine + return 1 + else + sleep ${TIMEOUT} + fi + fi + else + TEST_STATUS="${LABEL_PASS} -> Returned $2 events" && TESTS_PASSED="${TESTS_PASSED} $1" + printTestStatus "$attempt_start_time" "$attempt_end_time" "$TEST_STATUS" + printLine + return 0 + fi + else + if [ $EXIT_CODE == 124 ] ; then + TEST_STATUS="${LABEL_FAIL} -> Query timed out after ${QUERY_TIMEOUT}" + printTestStatus "$attempt_start_time" "$attempt_end_time" "$TEST_STATUS" + else + TEST_STATUS="${LABEL_FAIL} -> Unexpected number of events returned: Expected $2 events." + printTestStatus "$attempt_start_time" "$attempt_end_time" "$TEST_STATUS" + echo "Query Response:" + echo "$QUERY_RESPONSE" + echo "----------------" + fi + + if [ $ATTEMPT == $ATTEMPTS ] ; then + TEST_STATUS="${LABEL_FAIL} -> Failed to succeed after ${ATTEMPT} attempts" + TEST_FAILURES="${TEST_FAILURES},${1}: ${TEST_STATUS}" + printTestStatus "$test_start_time" "$(date +%s%N)" "$TEST_STATUS" + printLine + return 1 + else + sleep ${TIMEOUT} + fi + fi + ((ATTEMPT++)) + done +} + +printTestSummary() { + echo " Overall Summary" + printLine + echo + echo " Test Count: ${TEST_COUNTER}" + echo + if [ -z "${TESTS_PASSED}" ] ; then + echo " Tests Passed: 0" + else + local passed=(${TESTS_PASSED}) + echo "$( printGreen " Tests Passed: ${#passed[@]}" )" + for p in "${passed[@]}" ; do + echo " ${p}" + done + fi + echo + if [ -z "${TEST_FAILURES}" ] ; then + echo " Failed Tests: 0" + else + ( + IFS="," + local failed=(${TEST_FAILURES}) + echo "$( printRed " Tests Failed: $(( ${#failed[@]} - 1 ))" )" + for f in "${failed[@]}" ; do + echo " ${f}" + done + ) + fi + echo + printLine +} + +setPrintColors +setTestLabels + +if [ "$WEBSERVICE" = true ]; then + echo "Waiting for webservice to be ready..." +else + echo "Waiting for services to be ready..." +fi + +attempt=0 +while [ $attempt -lt $MAX_ATTEMPTS ]; do + if [ "$WEBSERVICE" = true ]; then + echo "Checking webservice status (${attempt}/${MAX_ATTEMPTS})" + + WEBSERVICE_STATUS=$(curl -s -m 5 -k https://localhost:9443/DataWave/Common/Health/health | grep Status) + if [[ "${WEBSERVICE_STATUS}" =~ \"Status\":\"ready\" ]] ; then + echo "Webservice ready" + break + fi + else + echo "Checking query and executor status (${attempt}/${MAX_ATTEMPTS})" + + QUERY_STATUS=$(curl -s -m 5 http://localhost:8080/query/mgmt/health | grep UP) + EXEC_STATUS=$(curl -s -m 5 http://localhost:8380/executor/mgmt/health | grep UP) + if [ "${QUERY_STATUS}" == "{\"status\":\"UP\"}" ] && [ "${EXEC_STATUS}" == "{\"status\":\"UP\"}" ] ; then + echo "Query and Executor Services ready" + break + fi + fi + + sleep ${SERVICES_INTERVAL} + + ((attempt++)) +done + +if [ $attempt == $MAX_ATTEMPTS ]; then + if [ "$WEBSERVICE" = true ]; then + echo "$( printRed "FAILURE" ) - Webservice never became ready" + else + echo "$( printRed "FAILURE" ) - Query and/or Executor Services never became ready" + fi + exit 1 +fi + +echo "Running tests..." +echo + +runTest batchLookup.sh 2 +runTest batchLookupContent.sh 4 +runTest count.sh 12 1 +runTest discovery.sh 2 1 +# runTest edge.sh 0 0 +# runTest edgeEvent.sh 1 1 +runTest errorCount.sh 1 1 +runTest errorDiscovery.sh 1 1 +runTest errorFieldIndexCount.sh 1 1 +runTest errorQuery.sh 1 1 +runTest fieldIndexCount.sh 12 2 +runTest hitHighlights.sh 12 2 +runTest lookup.sh 1 +runTest lookupContent.sh 2 +# runTest metrics.sh 0 0 +runTest query.sh 12 2 +#runTest mapReduceQuery.sh 12 2 +#runTest oozieQuery.sh 0 0 + +# Gives option to skip the cleanup stage +if [ "${1}" == "-noCleanup" ] ; then + printTestSummary + exit 0 +fi + +printTestSummary +# The cleanup script will only delete the logs for tests that passed. Failed test logs will remain. +"$SCRIPT_DIR"/cleanup.sh "${TESTS_PASSED}" \ No newline at end of file diff --git a/docker/scripts/webQuery.sh b/docker/scripts/webQuery.sh new file mode 100755 index 00000000000..1f63ebb34d1 --- /dev/null +++ b/docker/scripts/webQuery.sh @@ -0,0 +1,25 @@ +#!/bin/bash + +# For this to work, the webserver must be running in the quickstart docker image. +# To do that, change --accumulo to --web or --webdebug in the docker-compose.yml. + +SCRIPT_DIR=$( cd -- "$( dirname -- "${BASH_SOURCE[0]}" )" &> /dev/null && pwd ) + +source ${SCRIPT_DIR}/common/query.sh + +# QUERY PARAMETERS +QUERY_TYPE='webQuery' +QUERY_LOGIC='RemoteEventQuery' +BEGIN='19660908 000000.000' +END='20161002 235959.999' +COLUMN_VISIBILITY='PUBLIC' +QUERY='GENRES:[Action to Western]' +QUERY_SYNTAX='LUCENE' +AUTHS='PUBLIC,PRIVATE,BAR,FOO' +QUERY_NAME='Developer Test Query' +PAGE_SIZE='10' + +# run query against the webservice +WEBSERVICE=true + +runQuery diff --git a/docs/enunciate.xml b/docs/enunciate.xml index bba65097ae1..8a3857682f1 100644 --- a/docs/enunciate.xml +++ b/docs/enunciate.xml @@ -3,7 +3,8 @@ xmlns:xsi="http://www.w3.org/2001/XMLSchema-instance" xsi:noNamespaceSchemaLocation="http://enunciate.webcohesion.com/schemas/enunciate-2.9.1.xsd"> - + + diff --git a/docs/pom.xml b/docs/pom.xml index 595f0092c17..3bf8e7133e3 100644 --- a/docs/pom.xml +++ b/docs/pom.xml @@ -4,7 +4,7 @@ gov.nsa.datawave datawave-parent - 6.5.0-SNAPSHOT + 7.13.0-SNAPSHOT datawave-docs @@ -166,6 +166,11 @@ jackson-jaxrs 1.8.8 + + org.springframework.security + spring-security-core + ${version.springframework} + org.wildfly wildfly-undertow @@ -237,6 +242,12 @@ 2.28.2 provided + + org.mockito + mockito-core + 2.28.2 + provided + org.picketbox picketbox @@ -279,6 +290,7 @@ docs + true ${project.build.outputDirectory}/enunciate @@ -320,7 +332,7 @@ ${project.build.directory}/dependency ${project.build.directory}/apidocs - generated-sources.* + generated-sources.*:datawave.microservice.querymetric diff --git a/githooks/README.md b/githooks/README.md new file mode 100644 index 00000000000..60640f86454 --- /dev/null +++ b/githooks/README.md @@ -0,0 +1,6 @@ +githooks enable a user to have things happen automatically based on the git action taken. DataWave has the following hooks: + +pre-push +Before pushing the code up to github, this script will +RUN: + ln -fns githooks/pre-push .git/hooks/pre-push diff --git a/githooks/pre-push b/githooks/pre-push new file mode 100755 index 00000000000..03831136deb --- /dev/null +++ b/githooks/pre-push @@ -0,0 +1,4 @@ +#!/bin/sh + +# Format files +mvn -V -B -e -ntp "-Dstyle.color=always" clean formatter:format sortpom:sort impsort:sort -Pautoformat diff --git a/import-control-accumulo.xml b/import-control-accumulo.xml new file mode 100644 index 00000000000..3227801483a --- /dev/null +++ b/import-control-accumulo.xml @@ -0,0 +1,28 @@ + + + + + + + + + + + + + + + + + + + + diff --git a/microservices/configcheck/pom.xml b/microservices/configcheck/pom.xml new file mode 100644 index 00000000000..769ba2328b5 --- /dev/null +++ b/microservices/configcheck/pom.xml @@ -0,0 +1,261 @@ + + + 4.0.0 + gov.nsa.datawave.microservice + datawave-microservice-configcheck + 7.0.1-SNAPSHOT + ${project.artifactId} + + + github-datawave + GitHub Datawave Apache Maven Packages + https://maven.pkg.github.com/NationalSecurityAgency/datawave + + + + 11 + ${java.version} + ${java.version} + UTF-8 + UTF-8 + + 1C + 2.13.2 + 5.9.1 + 2.17.2 + 2.7.1 + + + + + org.springframework.boot + spring-boot-starter + ${version.spring.boot} + + + com.fasterxml.jackson + jackson-bom + ${version.jackson} + pom + import + + + org.apache.logging.log4j + log4j-bom + ${version.log4j.bom} + pom + import + + + org.junit + junit-bom + ${version.junit.bom} + pom + import + + + org.springframework.boot + spring-boot-dependencies + ${version.spring.boot} + pom + import + + + + + + com.fasterxml.jackson.core + jackson-databind + + + com.fasterxml.jackson.dataformat + jackson-dataformat-yaml + + + org.springframework.boot + spring-boot-starter + + + commons-logging + commons-logging + + + spring-boot-starter-logging + org.springframework.boot + + + + + org.springframework.boot + spring-boot-starter-log4j2 + + + org.junit.jupiter + junit-jupiter-engine + test + + + org.springframework.boot + spring-boot-starter-test + test + + + + + + true + + + false + + github-datawave + https://maven.pkg.github.com/NationalSecurityAgency/datawave + + + + + + true + + + false + + github-datawave + https://maven.pkg.github.com/NationalSecurityAgency/datawave + + + + + + com.github.ekryd.sortpom + sortpom-maven-plugin + 2.10.0 + + + sort-pom + process-sources + + sort + + + + + false + \n + false + 4 + scope,groupId,artifactId + groupId,artifactId + true + + + + com.github.spotbugs + spotbugs-maven-plugin + 3.1.12.2 + + + + check + + + + + Max + High + true + ${spotbugs.excludes.file} + + + + net.revelc.code + impsort-maven-plugin + 1.9.0 + + + sort-imports + process-sources + + sort + + + + + java.,javax.,org.,com. + java,* + + **/thrift/*.java + + + + + net.revelc.code.formatter + formatter-maven-plugin + 2.16.0 + + + + format + + + + + + gov.nsa.datawave + datawave-code-style + 1.0 + + + + eclipse/Eclipse-Datawave-Codestyle.xml + LF + ${maven.compiler.source} + ${maven.compiler.source} + ${maven.compiler.target} + + + + org.apache.maven.plugins + maven-surefire-plugin + 3.0.0-M6 + + + org.junit.jupiter + junit-jupiter-engine + ${version.junit.bom} + + + + false + ${surefire.forkCount} + 1 + 1 + 0 + 0 + -XX:+TieredCompilation -XX:TieredStopAtLevel=1 -Dfile.encoding=UTF8 -Duser.timezone=GMT -Xmx1024m -Dapple.awt.UIElement=true -Djava.security.krb5.realm= -Djava.security.krb5.kdc= + true + random + + + + org.springframework.boot + spring-boot-maven-plugin + ${version.spring.boot} + + + + repackage + + + true + datawave.microservice.configcheck.ConfigCheckApplication + + ${basedir}/src/main/resources/launch.conf + + + + + + + + diff --git a/microservices/configcheck/src/main/java/datawave/microservice/configcheck/Analysis.java b/microservices/configcheck/src/main/java/datawave/microservice/configcheck/Analysis.java new file mode 100644 index 00000000000..96dc19d50bf --- /dev/null +++ b/microservices/configcheck/src/main/java/datawave/microservice/configcheck/Analysis.java @@ -0,0 +1,103 @@ +package datawave.microservice.configcheck; + +import static datawave.microservice.configcheck.XmlPropertyAnalyzer.PLACEHOLDERS_HEADER; +import static datawave.microservice.configcheck.XmlPropertyAnalyzer.PROPERTIES_HEADER; +import static datawave.microservice.configcheck.XmlPropertyAnalyzer.REFS_HEADER; +import static datawave.microservice.configcheck.XmlPropertyAnalyzer.VALUES_HEADER; +import static datawave.microservice.configcheck.XmlPropertyAnalyzer.YML_HEADER; + +/** + * Analysis is used to represent an xml analysis, splitting the various sections of the report (full or partial) into separate values. + */ +public class Analysis { + private String file; + private String analysis; + private String placeholders; + private String values; + private String refs; + private String properties; + private String yml; + + public Analysis(String file, String analysis) { + this.file = file; + this.analysis = analysis; + decomposeAnalysis(); + } + + private void decomposeAnalysis() { + if (analysis.contains(PLACEHOLDERS_HEADER) && analysis.contains(VALUES_HEADER) && analysis.contains(REFS_HEADER) && analysis.contains(PROPERTIES_HEADER) + && analysis.contains(YML_HEADER)) { + placeholders = extractSection(PLACEHOLDERS_HEADER, VALUES_HEADER); + values = extractSection(VALUES_HEADER, REFS_HEADER); + refs = extractSection(REFS_HEADER, PROPERTIES_HEADER); + properties = extractSection(PROPERTIES_HEADER, YML_HEADER); + yml = extractSection(YML_HEADER); + } else if (analysis.contains(VALUES_HEADER)) { + values = extractSection(VALUES_HEADER); + } + } + + private String extractSection(String header) { + return extractSection(header, null); + } + + private String extractSection(String header, String nextHeader) { + return analysis.substring(analysis.indexOf(header) + header.length(), (nextHeader != null) ? analysis.indexOf(nextHeader) : analysis.length()); + } + + public String getFile() { + return file; + } + + public void setFile(String file) { + this.file = file; + } + + public String getAnalysis() { + return analysis; + } + + public void setAnalysis(String analysis) { + this.analysis = analysis; + } + + public String getPlaceholders() { + return placeholders; + } + + public void setPlaceholders(String placeholders) { + this.placeholders = placeholders; + } + + public String getValues() { + return values; + } + + public void setValues(String values) { + this.values = values; + } + + public String getRefs() { + return refs; + } + + public void setRefs(String refs) { + this.refs = refs; + } + + public String getProperties() { + return properties; + } + + public void setProperties(String properties) { + this.properties = properties; + } + + public String getYml() { + return yml; + } + + public void setYml(String yml) { + this.yml = yml; + } +} diff --git a/microservices/configcheck/src/main/java/datawave/microservice/configcheck/AnalysisComparator.java b/microservices/configcheck/src/main/java/datawave/microservice/configcheck/AnalysisComparator.java new file mode 100644 index 00000000000..fb65714c6c3 --- /dev/null +++ b/microservices/configcheck/src/main/java/datawave/microservice/configcheck/AnalysisComparator.java @@ -0,0 +1,141 @@ +package datawave.microservice.configcheck; + +import static datawave.microservice.configcheck.XmlPropertyAnalyzer.PLACEHOLDERS_HEADER; +import static datawave.microservice.configcheck.XmlPropertyAnalyzer.REFS_HEADER; +import static datawave.microservice.configcheck.XmlPropertyAnalyzer.VALUES_HEADER; +import static datawave.microservice.configcheck.util.XmlRenderUtils.valueToObject; + +import java.util.Arrays; +import java.util.Map; +import java.util.Objects; +import java.util.Set; +import java.util.TreeSet; +import java.util.stream.Collectors; + +/** + * AnalysisComparator is used to produce a 'git merge conflict'-like comparison of two xml analyses. The resulting report can be viewed with a git merge + * conflict tool to analyze the differences. + */ +public class AnalysisComparator { + private static final String FIRST_HEADER = "# FIRST: "; + private static final String SECOND_HEADER = "# SECOND: "; + private static final String FIRST = "<<<<<<< FIRST\n"; + private static final String SEPARATOR = "=======\n"; + private static final String SECOND = ">>>>>>> SECOND\n"; + + private Analysis firstAnalysis; + private Analysis secondAnalysis; + + public AnalysisComparator(Analysis firstAnalysis, Analysis secondAnalysis) { + this.firstAnalysis = firstAnalysis; + this.secondAnalysis = secondAnalysis; + } + + public String compareAnalyses() { + String output = ""; + if (firstAnalysis != null && secondAnalysis != null) { + StringBuilder sb = new StringBuilder(); + sb.append(FIRST_HEADER).append(firstAnalysis.getFile()).append("\n"); + sb.append(SECOND_HEADER).append(secondAnalysis.getFile()).append("\n"); + + if (firstAnalysis.getPlaceholders() != null && !firstAnalysis.getPlaceholders().isEmpty() && secondAnalysis.getPlaceholders() != null + && !secondAnalysis.getPlaceholders().isEmpty()) { + sb.append(PLACEHOLDERS_HEADER); + sb.append(diffProperties(firstAnalysis.getPlaceholders(), secondAnalysis.getPlaceholders())); + sb.append("\n"); + } + + if (firstAnalysis.getValues() != null && !firstAnalysis.getValues().isEmpty() && secondAnalysis.getValues() != null + && !secondAnalysis.getValues().isEmpty()) { + sb.append(VALUES_HEADER); + sb.append(diffProperties(firstAnalysis.getValues(), secondAnalysis.getValues())); + sb.append("\n"); + } + + if (firstAnalysis.getRefs() != null && !firstAnalysis.getRefs().isEmpty() && secondAnalysis.getRefs() != null + && !secondAnalysis.getRefs().isEmpty()) { + sb.append(REFS_HEADER); + sb.append(diffProperties(firstAnalysis.getRefs(), secondAnalysis.getRefs())); + sb.append("\n"); + } + + output = sb.toString().trim(); + } + return output; + } + + private String diffProperties(String first, String second) { + StringBuilder sb = new StringBuilder(); + Map firstMap = Arrays.stream(first.split("\ndoc.")).map(x -> x.split(": ", 2)) + .collect(Collectors.toMap(x -> addPrefix(x[0], "doc."), x -> valueToObject(x[1]))); + Map secondMap = Arrays.stream(second.split("\ndoc.")).map(x -> x.split(": ", 2)) + .collect(Collectors.toMap(x -> addPrefix(x[0], "doc."), x -> valueToObject(x[1]))); + + Set sortedKeys = new TreeSet<>(); + sortedKeys.addAll(firstMap.keySet()); + sortedKeys.addAll(secondMap.keySet()); + + boolean buildingDiff = false; + boolean buildingMissing = false; + StringBuilder firstBuilder = new StringBuilder(); + StringBuilder secondBuilder = new StringBuilder(); + for (String key : sortedKeys) { + boolean firstContains = firstMap.containsKey(key); + boolean secondContains = secondMap.containsKey(key); + boolean valuesMatch = firstContains && secondContains && Objects.equals(firstMap.get(key), secondMap.get(key)); + + if (valuesMatch) { + if (buildingDiff) { + writeDiff(sb, firstBuilder, secondBuilder); + buildingDiff = false; + } else if (buildingMissing) { + writeDiff(sb, firstBuilder, secondBuilder); + buildingMissing = false; + } + sb.append(key).append(": ").append(firstMap.get(key)).append("\n"); + } else { + if (firstContains && secondContains) { + if (buildingMissing) { + writeDiff(sb, firstBuilder, secondBuilder); + buildingMissing = false; + } + buildingDiff = true; + } else { + if (buildingDiff) { + writeDiff(sb, firstBuilder, secondBuilder); + buildingDiff = false; + } + buildingMissing = true; + } + + if (firstContains) { + firstBuilder.append(key).append(": ").append(firstMap.get(key)).append("\n"); + } + if (secondContains) { + secondBuilder.append(key).append(": ").append(secondMap.get(key)).append("\n"); + } + } + } + if (buildingDiff || buildingMissing) { + writeDiff(sb, firstBuilder, secondBuilder); + } + return sb.toString(); + } + + private String addPrefix(String key, String prefix) { + if (!key.startsWith(prefix)) { + key = prefix + key; + } + return key; + } + + private void writeDiff(StringBuilder sb, StringBuilder first, StringBuilder second) { + sb.append(FIRST); + sb.append(first); + sb.append(SEPARATOR); + sb.append(second); + sb.append(SECOND); + first.setLength(0); + second.setLength(0); + } +} diff --git a/microservices/configcheck/src/main/java/datawave/microservice/configcheck/CommandRunner.java b/microservices/configcheck/src/main/java/datawave/microservice/configcheck/CommandRunner.java new file mode 100644 index 00000000000..7aed91849c4 --- /dev/null +++ b/microservices/configcheck/src/main/java/datawave/microservice/configcheck/CommandRunner.java @@ -0,0 +1,272 @@ +package datawave.microservice.configcheck; + +import static datawave.microservice.configcheck.util.ArgumentUtils.OUTPUT; +import static datawave.microservice.configcheck.util.ArgumentUtils.getFile; +import static datawave.microservice.configcheck.util.ArgumentUtils.getFileList; +import static datawave.microservice.configcheck.util.ArgumentUtils.getFiles; +import static datawave.microservice.configcheck.util.ArgumentUtils.getOutputPath; +import static datawave.microservice.configcheck.util.FileUtils.getFilePath; +import static datawave.microservice.configcheck.util.XmlRenderUtils.loadContent; +import static datawave.microservice.configcheck.util.XmlRenderUtils.loadProperties; +import static datawave.microservice.configcheck.util.XmlRenderUtils.loadYamlAsProperties; +import static datawave.microservice.configcheck.util.XmlRenderUtils.renderContent; + +import java.io.IOException; +import java.nio.charset.StandardCharsets; +import java.nio.file.Files; +import java.nio.file.Path; +import java.util.Properties; + +import org.slf4j.Logger; +import org.slf4j.LoggerFactory; +import org.springframework.boot.ApplicationArguments; + +/** + * CommandRunner is used to parse the application arguments and figure out which command needs to be run. + */ +public class CommandRunner { + private static Logger log = LoggerFactory.getLogger(ConfigCheckApplication.class); + + public static final String RENDER = "render"; + public static final String ANALYZE = "analyze"; + public static final String COMPARE = "compare"; + + public static final String HELP = "help"; + public static final String CONFIGDIR = "configdir"; + public static final String PROPERTIES = "properties"; + public static final String YAML = "yaml"; + public static final String FULL_REPORT = "fullreport"; + + private ApplicationArguments args; + + public CommandRunner(ApplicationArguments args) { + this.args = args; + } + + public Output run() { + Output output = new Output(); + boolean help = args.containsOption(HELP) || args.getNonOptionArgs().size() == 1; + if (!args.getNonOptionArgs().isEmpty()) { + switch (args.getNonOptionArgs().get(0)) { + case RENDER: + try { + if (!help) { + runRenderCommand(args, output); + } + } catch (Exception e) { + log.error("Encountered exception during render", e); + output.setErrorMessage("Encountered ERROR running render command"); + } finally { + if (output.getMessage() == null) { + output.setMessage("USAGE \n" + "configcheck " + RENDER + " [FILE] [--" + CONFIGDIR + "=[PATH]] --" + PROPERTIES + "=[FILE...] [--" + + OUTPUT + "=[FILE]]\n" + "configcheck " + RENDER + " [FILE] [--" + CONFIGDIR + "=[PATH]] --" + YAML + + "=[FILE...] [--" + OUTPUT + "=[FILE]]\n\n" + "NOTE\n" + + " Any file containing placeholders of the form '${}' can be rendered, but it is expected\n" + + " that .properties, .yaml, and .xml files will be rendered.\n\n" + "OPTIONS\n" + " --" + CONFIGDIR + + "=[PATH]\n" + " The directory containing your configuration files\n\n" + " --" + PROPERTIES + "=[FILE...]\n" + + " A comma-separated list of properties files to load. If " + CONFIGDIR + " is specified, \n" + + " the files will be loaded relative to that path. Configuration files are loaded in order, so \n" + + " properties in subsequent files will override prior files.\n\n" + " --" + YAML + "=[FILE...]\n" + + " A comma-separated list of yaml files to load. If " + CONFIGDIR + " is specified, \n" + + " the files will be loaded relative to that path. Configuration files are loaded in order, so \n" + + " properties in subsequent files will override prior files.\n\n" + " --" + OUTPUT + "=[FILE]\n" + + " The file where the output should be written.\n"); + } + } + break; + case ANALYZE: + try { + if (!help) { + runAnalyzeCommand(args, output); + } + } catch (Exception e) { + log.error("Encountered exception during analyze", e); + output.setErrorMessage("Encountered ERROR running analyze command"); + } finally { + if (output.getMessage() == null) { + output.setMessage("USAGE \n" + "configcheck " + ANALYZE + " [FILE] [--" + FULL_REPORT + "] [--" + CONFIGDIR + "=[PATH]] --" + + PROPERTIES + "=[FILE...] [--" + OUTPUT + "=[FILE]]\n" + "configcheck " + ANALYZE + " [FILE] [--" + FULL_REPORT + + "] [--" + CONFIGDIR + "=[PATH]] --" + YAML + "=[FILE...] [--" + OUTPUT + "=[FILE]]\n\n" + "NOTE\n" + + " Only XML files can be analyzed.\n\n" + "OPTIONS\n" + " --" + FULL_REPORT + "\n" + + " Generate a full report containing a mapping of keys to placeholders, values, and other useful " + + " information.\n\n" + " --" + CONFIGDIR + "=[PATH]\n" + + " The directory containing your configuration files\n\n" + " --" + PROPERTIES + "=[FILE...]\n" + + " A comma-separated list of properties files to load. If " + CONFIGDIR + " is specified, \n" + + " the files will be loaded relative to that path. Configuration files are loaded in order, so \n" + + " properties in subsequent files will override prior files.\n\n" + " --" + YAML + "=[FILE...]\n" + + " A comma-separated list of yaml files to load. If " + CONFIGDIR + " is specified, \n" + + " the files will be loaded relative to that path. Configuration files are loaded in order, so \n" + + " properties in subsequent files will override prior files.\n\n" + " --" + OUTPUT + "=[FILE]\n" + + " The file where the output should be written.\n"); + } + } + break; + case COMPARE: + try { + if (!help) { + runCompareCommand(args, output); + } + } catch (Exception e) { + log.error("Encountered exception during compare", e); + output.setErrorMessage("Encountered ERROR running compare command"); + } finally { + if (output.getMessage() == null) { + output.setMessage("USAGE \n" + "configcheck " + COMPARE + " [FILE] [FILE] [--" + OUTPUT + "=[FILE]]\n\n" + "NOTE\n" + + " Compares the key/values generated by the " + ANALYZE + " command.\n\n" + "OPTIONS\n" + " --" + OUTPUT + + "=[FILE]\n" + " The file where the output should be written.\n"); + } + } + break; + default: + if (!help) { + output.setErrorMessage("configcheck: '" + args.getNonOptionArgs().get(0) + "' is not a configcheck command. See 'configcheck --help'"); + } + break; + } + } + + if (output.getMessage() == null) { + output.setMessage("configcheck can be used to render, analyze and compare files which are configured with \n" + + " property placeholders, such as QueryLogicFactory.xml. \n\n" + "Available Commands:\n" + " " + RENDER + + " Produces a rendered version of the given file, substituting placeholders using \n" + + " the given configuration properties.\n\n" + " " + ANALYZE + + " Produces a normalized key/value mapping of xml bean properties to their associated \n" + + " configuration property values.\n\n" + " " + COMPARE + + " Compares two analyses, and produces a git-merge-like diff showing value differences \n" + + " between the two files.\n\n"); + } + + return output; + } + + private void runRenderCommand(ApplicationArguments args, Output output) { + if (args.getNonOptionArgs().size() != 2) { + output.setErrorMessage("Invalid arguments for render command. See 'configcheck render --help'"); + return; + } + + // load the content + String file = getFile(args); + String content = loadContent(file); + if (content == null || content.isEmpty()) { + output.setErrorMessage("No content loaded for '" + file + "'"); + return; + } + + // load the properties (properties, or yaml) + Properties properties = getProperties(args); + if (properties == null) { + output.setErrorMessage("No properties/yaml loaded"); + return; + } + + output.setMessage(handleOutput(renderContent(content, properties))); + } + + private void runAnalyzeCommand(ApplicationArguments args, Output output) { + if (args.getNonOptionArgs().size() != 2) { + output.setErrorMessage("Invalid arguments for analyze command. See 'configcheck analyze --help'"); + return; + } + + // load the xml + String file = getFile(args); + String xmlContent = loadContent(file); + if (xmlContent == null || xmlContent.isEmpty()) { + output.setErrorMessage("No content loaded for '" + file + "'"); + return; + } + + // load the properties (properties, or yaml) + Properties properties = getProperties(args); + if (properties == null) { + output.setErrorMessage("No properties/yaml loaded"); + return; + } + + XmlPropertyAnalyzer analyzer = new XmlPropertyAnalyzer(xmlContent, properties); + String report; + if (args.containsOption(FULL_REPORT)) { + report = analyzer.getFullReport(); + } else { + report = analyzer.getSimpleReport(); + } + + output.setMessage(handleOutput(report)); + } + + private void runCompareCommand(ApplicationArguments args, Output output) { + if (args.getNonOptionArgs().size() != 3) { + output.setErrorMessage("Invalid arguments for compare command. See 'configcheck compare --help'"); + return; + } + + String[] files = getFiles(args); + if (files != null) { + // load the content + String first = loadContent(files[0]); + if (first == null || first.isEmpty()) { + output.setErrorMessage("No content loaded for '" + files[0] + "'"); + return; + } + + String second = loadContent(files[1]); + if (second == null || second.isEmpty()) { + output.setErrorMessage("No content loaded for '" + files[1] + "'"); + return; + } + + AnalysisComparator comparator = new AnalysisComparator(new Analysis(files[0], first), new Analysis(files[1], second)); + output.setMessage(comparator.compareAnalyses()); + } else { + output.setErrorMessage("No files to compare"); + } + } + + private String getConfigdir(ApplicationArguments args) { + String configdir = null; + if (args.getOptionNames().contains(CONFIGDIR)) { + configdir = args.getOptionValues(CONFIGDIR).get(0); + } + return configdir; + } + + private Properties getProperties(ApplicationArguments args) { + String configdir = getConfigdir(args); + + // load the properties (or yaml) + Properties mergedProperties = null; + if (args.getOptionNames().contains(PROPERTIES)) { + mergedProperties = loadProperties(configdir, getFileList(args, PROPERTIES)); + } else if (args.getOptionNames().contains(YAML)) { + mergedProperties = loadYamlAsProperties(configdir, getFileList(args, YAML)); + } else { + log.info("No properties or yaml to render"); + } + + return mergedProperties; + } + + private String handleOutput(String output) { + String outputPath = getOutputPath(args); + if (outputPath != null) { + writeOutput(getFilePath(outputPath), output); + output = ""; + } + return output; + } + + private void writeOutput(Path outputPath, String output) { + if (output != null) { + if (outputPath != null) { + try { + Files.write(outputPath, output.getBytes(StandardCharsets.UTF_8)); + } catch (IOException e) { + log.error("Unable to write output", e); + } + } + } else { + log.error("No output"); + } + } +} diff --git a/microservices/configcheck/src/main/java/datawave/microservice/configcheck/ConfigCheckApplication.java b/microservices/configcheck/src/main/java/datawave/microservice/configcheck/ConfigCheckApplication.java new file mode 100644 index 00000000000..3bf892903d1 --- /dev/null +++ b/microservices/configcheck/src/main/java/datawave/microservice/configcheck/ConfigCheckApplication.java @@ -0,0 +1,57 @@ +package datawave.microservice.configcheck; + +import org.slf4j.Logger; +import org.slf4j.LoggerFactory; +import org.springframework.boot.ApplicationArguments; +import org.springframework.boot.ApplicationRunner; +import org.springframework.boot.Banner; +import org.springframework.boot.ExitCodeGenerator; +import org.springframework.boot.SpringApplication; + +/** + * ConfigCheckApplication is the main class which will be run when the executable jar is run. + */ +public class ConfigCheckApplication implements ApplicationRunner, ExitCodeGenerator { + private static Logger log = LoggerFactory.getLogger(ConfigCheckApplication.class); + + private int exitCode; + + public static void main(String[] args) { + SpringApplication app = new SpringApplication(ConfigCheckApplication.class); + app.setBannerMode(Banner.Mode.OFF); + System.exit(SpringApplication.exit(app.run(args))); + } + + @Override + public void run(ApplicationArguments args) throws Exception { + log.info("Executing: application runner"); + + log.info("Raw args:"); + for (int i = 0; i < args.getSourceArgs().length; i++) { + log.info("args[{}]: {}", i, args.getSourceArgs()[i]); + } + + log.info("Non-option args:"); + for (String arg : args.getNonOptionArgs()) { + log.info(" " + arg); + } + + log.info("Option args:"); + for (String name : args.getOptionNames()) { + log.info(" " + name + "=" + String.join(",", args.getOptionValues(name))); + } + + Output output = new CommandRunner(args).run(); + if (!output.isError()) { + System.out.println(output.getMessage()); + } else { + System.err.println(output.getMessage()); + this.exitCode = 1; + } + } + + @Override + public int getExitCode() { + return this.exitCode; + } +} diff --git a/microservices/configcheck/src/main/java/datawave/microservice/configcheck/Output.java b/microservices/configcheck/src/main/java/datawave/microservice/configcheck/Output.java new file mode 100644 index 00000000000..cb83a7675f2 --- /dev/null +++ b/microservices/configcheck/src/main/java/datawave/microservice/configcheck/Output.java @@ -0,0 +1,27 @@ +package datawave.microservice.configcheck; + +/** + * Output is used to handle both the output message and output type (stdout or stderr). + */ +public class Output { + private String message; + private boolean error; + + public String getMessage() { + return message; + } + + public void setMessage(String message) { + this.message = message; + this.error = false; + } + + public void setErrorMessage(String message) { + this.message = message; + this.error = true; + } + + public boolean isError() { + return error; + } +} diff --git a/microservices/configcheck/src/main/java/datawave/microservice/configcheck/XmlPropertyAnalyzer.java b/microservices/configcheck/src/main/java/datawave/microservice/configcheck/XmlPropertyAnalyzer.java new file mode 100644 index 00000000000..b3ebbe35b39 --- /dev/null +++ b/microservices/configcheck/src/main/java/datawave/microservice/configcheck/XmlPropertyAnalyzer.java @@ -0,0 +1,396 @@ +package datawave.microservice.configcheck; + +import static com.fasterxml.jackson.dataformat.yaml.YAMLGenerator.Feature.SPLIT_LINES; +import static com.fasterxml.jackson.dataformat.yaml.YAMLGenerator.Feature.WRITE_DOC_START_MARKER; +import static datawave.microservice.configcheck.util.XmlRenderUtils.valueToObject; + +import java.io.ByteArrayInputStream; +import java.io.ByteArrayOutputStream; +import java.io.IOException; +import java.nio.charset.StandardCharsets; +import java.util.LinkedHashMap; +import java.util.LinkedList; +import java.util.List; +import java.util.Map; +import java.util.Properties; +import java.util.Set; +import java.util.stream.Collectors; + +import javax.xml.parsers.DocumentBuilder; +import javax.xml.parsers.DocumentBuilderFactory; + +import org.slf4j.Logger; +import org.slf4j.LoggerFactory; +import org.w3c.dom.Document; +import org.w3c.dom.NamedNodeMap; +import org.w3c.dom.Node; +import org.w3c.dom.NodeList; + +import com.fasterxml.jackson.core.JsonProcessingException; +import com.fasterxml.jackson.databind.ObjectMapper; +import com.fasterxml.jackson.dataformat.yaml.YAMLFactory; + +/** + * XmlPropertyAnalyzer is used to read an xml file and associated properties/yaml in order to produce a report of all the placeholders, their associated + * properties, and the values of those properties that are found in the xml file. + */ +public class XmlPropertyAnalyzer { + private static Logger log = LoggerFactory.getLogger(XmlPropertyAnalyzer.class); + private static final String IGNORED_KEY = "IGNORED_KEY"; + private static final String DOC = "doc"; + private static final String BEANS = "beans"; + private static final String BEAN = "bean"; + private static final String PROPERTY = "property"; + private static final String VALUE = "value"; + private static final String REF = "ref"; + private static final String CONSTRUCTOR_ARG = "constructor-arg"; + private static final String UTIL_MAP = "util:map"; + private static final String MAP = "map"; + private static final String ENTRY = "entry"; + private static final String UTIL_LIST = "util:list"; + private static final String LIST = "list"; + private static final String UTIL_SET = "util:set"; + private static final String SET = "set"; + private static final String LOOKUP_METHOD = "lookup-method"; + private static final String NULL = "null"; + private static final String ID = "id"; + private static final String KEY = "key"; + private static final String NAME = "name"; + private static final String TEXT = "#text"; + private static final String COMMENT = "#comment"; + private static final String CONTEXT_PROPERTY_PLACEHOLDER = "context:property-placeholder"; + + private static final String PLACEHOLDER_PREFIX = "${"; + private static final String PLACEHOLDER_SUFFIX = "}"; + + private static final String KEY_COMPONENT_SEPARATOR = "."; + + public static final String PLACEHOLDERS_HEADER = "Placeholders (key: ${placeholder})\n----------------------------------------\n"; + public static final String VALUES_HEADER = "Values (key: value)\n----------------------------------------\n"; + public static final String REFS_HEADER = "Refs (key: ref)\n----------------------------------------\n"; + public static final String PROPERTIES_HEADER = "Effective Properties (name=value)\n----------------------------------------\n"; + public static final String YML_HEADER = "Effective Yml\n----------------------------------------\n"; + + private String xmlContent; + private Properties properties; + private Map propertyPlaceholderByKey = new LinkedHashMap<>(); + private Map propertyValueByKey = new LinkedHashMap<>(); + private Map propertyRefByKey = new LinkedHashMap<>(); + + public XmlPropertyAnalyzer(String xmlContent, Properties properties) { + this.xmlContent = xmlContent; + this.properties = properties; + analyzeProperties(); + } + + private void analyzeProperties() { + try { + // find all of the placeholders and values in the original xml, and figure out what their key is + DocumentBuilder builder = DocumentBuilderFactory.newInstance().newDocumentBuilder(); + Document doc = builder.parse(new ByteArrayInputStream(xmlContent.getBytes(StandardCharsets.UTF_8))); + doc.normalize(); + + LinkedList nodeStack = new LinkedList<>(); + LinkedList parentStack = new LinkedList<>(); + LinkedList keyComponents = new LinkedList<>(); + + // the document node is the root + parentStack.push(doc); + keyComponents.add(DOC); + + // add the initial nodes to the stack + NodeList nodeList = doc.getChildNodes(); + for (int i = nodeList.getLength() - 1; i >= 0; i--) { + nodeStack.push(nodeList.item(i)); + } + + // work through the xml document until all nodes have been processed + while (!nodeStack.isEmpty()) { + Node node = nodeStack.pop(); + String nodeName = node.getNodeName(); + + // perform upkeep on the parent stack and key components + while (node.getParentNode() != parentStack.peek()) { + parentStack.pop(); + keyComponents.removeLast(); + } + + if (nodeName.equals(BEANS)) { + addChildren(node, BEANS, nodeStack, parentStack, keyComponents); + } else if (nodeName.equals(BEAN)) { + addChildren(node, getBeanId(node), nodeStack, parentStack, keyComponents); + } else if (nodeName.equals(PROPERTY)) { + String propertyName = getPropertyName(node); + if (node.hasChildNodes()) { + addChildren(node, propertyName, nodeStack, parentStack, keyComponents); + } else if (node.hasAttributes()) { + String key = createKey(keyComponents, propertyName); + if (node.getAttributes().getNamedItem(VALUE) != null) { + String value = getPropertyValue(node); + if (value.startsWith(PLACEHOLDER_PREFIX)) { + propertyPlaceholderByKey.put(key, value); + propertyValueByKey.put(key, properties.get(value.substring(2, value.length() - 1))); + } else { + propertyValueByKey.put(key, valueToObject(value)); + } + } else if (node.getAttributes().getNamedItem(REF) != null) { + propertyRefByKey.put(key, getPropertyRef(node)); + } + } + } else if (nodeName.equals(CONSTRUCTOR_ARG)) { + addChildren(node, CONSTRUCTOR_ARG, nodeStack, parentStack, keyComponents); + } else if (nodeName.equals(UTIL_MAP) || nodeName.equals(MAP)) { + addChildren(node, IGNORED_KEY, nodeStack, parentStack, keyComponents); + } else if (nodeName.equals(ENTRY)) { + addChildren(node, getEntryKey(node), nodeStack, parentStack, keyComponents); + } else if (nodeName.equals(VALUE)) { + String key = createKey(keyComponents); + Object value = node.getTextContent(); + String placeholder = null; + + if (((String) value).startsWith(PLACEHOLDER_PREFIX)) { + placeholder = (String) value; + value = properties.getProperty(placeholder.substring(2, placeholder.length() - 1)); + } + + value = valueToObject(value); + + String indexedKey = null; + if (keyComponents.getLast().equals(CONSTRUCTOR_ARG)) { + int index = 0; + do { + indexedKey = key + "[" + index + "]"; + index++; + } while (propertyPlaceholderByKey.containsKey(indexedKey) || propertyValueByKey.containsKey(indexedKey)); + } + + key = indexedKey != null ? indexedKey : key; + if (placeholder != null) { + propertyPlaceholderByKey.put(key, placeholder); + } + propertyValueByKey.put(key, value); + } else if (nodeName.equals(UTIL_LIST) || nodeName.equals(LIST) || nodeName.equals(UTIL_SET) || nodeName.equals(SET)) { + addChildren(node, getBeanId(node), nodeStack, parentStack, keyComponents); + } else if (nodeName.equals(NULL)) { + propertyValueByKey.put(createKey(keyComponents), null); + } else if (nodeName.equals(LOOKUP_METHOD) || nodeName.equals(TEXT) || nodeName.equals(COMMENT) + || nodeName.equals(CONTEXT_PROPERTY_PLACEHOLDER)) { + // do nothing + } else { + log.warn("Ignoring unknown node name: {}", nodeName); + } + } + } catch (Exception e) { + log.error("Encountered exception while analyzing xml", e); + } + } + + private void addChildren(Node node, String keyComponent, LinkedList nodeStack, LinkedList parentStack, LinkedList keyComponents) { + if (node.hasChildNodes()) { + // add the children to the stack + NodeList children = node.getChildNodes(); + for (int i = children.getLength() - 1; i >= 0; i--) { + nodeStack.push(children.item(i)); + } + + // add the parent node info + parentStack.push(node); + keyComponents.add(keyComponent); + } + } + + private String getBeanId(Node node) { + return getAttributeByName(node, ID); + } + + private String getEntryKey(Node node) { + return getAttributeByName(node, KEY); + } + + private String getPropertyName(Node node) { + return getAttributeByName(node, NAME); + } + + private String getPropertyValue(Node node) { + return getAttributeByName(node, VALUE); + } + + private String getPropertyRef(Node node) { + return getAttributeByName(node, REF); + } + + private String getAttributeByName(Node node, String name) { + String beanId = IGNORED_KEY; + if (node.hasAttributes()) { + NamedNodeMap attributes = node.getAttributes(); + Node idAttribute = attributes.getNamedItem(name); + if (idAttribute != null) { + beanId = idAttribute.getNodeValue(); + } + } + return beanId; + } + + private String createKey(List keyComponents) { + return createKey(keyComponents, null); + } + + private String createKey(List keyComponents, String suffix) { + StringBuilder key = new StringBuilder(); + for (String keyComponent : keyComponents) { + if (!keyComponent.equals(IGNORED_KEY) && !keyComponent.equals(CONSTRUCTOR_ARG)) { + if (key.length() > 0) { + key.append(KEY_COMPONENT_SEPARATOR); + } + key.append(keyComponent); + } + } + if (suffix != null) { + key.append(KEY_COMPONENT_SEPARATOR).append(suffix); + } + return key.toString(); + } + + public String getKeyedValues() { + StringBuilder sb = new StringBuilder(); + // @formatter:off + propertyValueByKey.keySet().stream() + .sorted() + .forEach(key -> { + Object value = valueToObject(propertyValueByKey.get(key)); + if (value instanceof String) { + value = "\"" + value + "\""; + } + sb.append(key).append(": ").append(value).append("\n"); + }); + // @formatter:on + return sb.toString(); + } + + public String getSimpleReport() { + StringBuilder sb = new StringBuilder(); + + sb.append(VALUES_HEADER); + sb.append(getKeyedValues()); + sb.append("\n"); + + return sb.toString().trim(); + } + + public String getFullReport() { + StringBuilder sb = new StringBuilder(); + + sb.append(PLACEHOLDERS_HEADER); + // @formatter:off + propertyPlaceholderByKey.keySet().stream() + .sorted() + .forEach(key -> sb.append(key).append(": ").append(propertyPlaceholderByKey.get(key)).append("\n")); + // @formatter:on + sb.append("\n"); + + sb.append(VALUES_HEADER); + sb.append(getKeyedValues()); + sb.append("\n"); + + sb.append(REFS_HEADER); + // @formatter:off + propertyRefByKey.keySet().stream() + .sorted() + .forEach(key -> sb.append(key).append(": ").append(propertyRefByKey.get(key)).append("\n")); + // @formatter:on + sb.append("\n"); + + // Note: We could just add all of the properties to a single properties object, + // but if we do that, they will be printed in a random order, so we add one at a time + sb.append(PROPERTIES_HEADER); + sb.append(createEffectiveProperties()); + sb.append("\n"); + + sb.append(YML_HEADER); + sb.append(createEffectiveYaml()); + sb.append("\n"); + + return sb.toString().trim(); + } + + private String createEffectiveProperties() { + ByteArrayOutputStream baos = new ByteArrayOutputStream(); + Properties properties = new Properties(); + for (String key : propertyValueByKey.keySet()) { + Object value = propertyValueByKey.get(key); + key = propertyPlaceholderByKey.get(key); + if (key != null) { + properties.clear(); + key = key.substring(2, key.length() - 1); + properties.setProperty(key, String.valueOf(value)); + try { + properties.store(baos, null); + } catch (IOException e) { + throw new RuntimeException(e); + } + } + } + return baos.toString(StandardCharsets.UTF_8).replaceAll("#.*\n", ""); + } + + private String createEffectiveYaml() { + Map ymlMap = new LinkedHashMap<>(); + Set propKeys = propertyPlaceholderByKey.values().stream().map(x -> x.substring(2, x.length() - 1)).collect(Collectors.toSet()); + for (String key : propertyValueByKey.keySet()) { + Object value = propertyValueByKey.get(key); + key = propertyPlaceholderByKey.get(key); + if (key != null) { + key = key.substring(2, key.length() - 1); + Map curMap = ymlMap; + String[] keyParts = key.split("\\."); + for (int i = 0; i < keyParts.length; i++) { + final String partialKey = createPartialKey(keyParts, 0, i + 1); + if (i == keyParts.length - 1) { + curMap.put(keyParts[i], value); + } + // if this partial key is set to a value in the properties, then we stop here and set the value using the remaining terms as the key + else if (propKeys.stream().anyMatch(x -> x.equals(partialKey))) { + String finalKey = "[" + createPartialKey(keyParts, i, keyParts.length) + "]"; + curMap.put(finalKey, value); + } else { + curMap = (LinkedHashMap) curMap.computeIfAbsent(keyParts[i], (k) -> new LinkedHashMap()); + } + } + } + } + + String yml = null; + try { + YAMLFactory yamlFactory = new YAMLFactory(); + yamlFactory.configure(WRITE_DOC_START_MARKER, false); + yamlFactory.configure(SPLIT_LINES, false); + yml = new ObjectMapper(yamlFactory).writeValueAsString(ymlMap); + } catch (JsonProcessingException e) { + throw new RuntimeException(e); + } + return yml; + } + + private String createPartialKey(String[] keyComponents, int start, int stop) { + StringBuilder sb = new StringBuilder(); + for (int i = start; i < stop; i++) { + if (i > start) { + sb.append("."); + } + sb.append(keyComponents[i]); + } + return sb.toString(); + } + + public Map getPropertyPlaceholderByKey() { + return propertyPlaceholderByKey; + } + + public Map getPropertyValueByKey() { + return propertyValueByKey; + } + + public Map getPropertyRefByKey() { + return propertyRefByKey; + } +} diff --git a/microservices/configcheck/src/main/java/datawave/microservice/configcheck/util/ArgumentUtils.java b/microservices/configcheck/src/main/java/datawave/microservice/configcheck/util/ArgumentUtils.java new file mode 100644 index 00000000000..3d1858ec2d0 --- /dev/null +++ b/microservices/configcheck/src/main/java/datawave/microservice/configcheck/util/ArgumentUtils.java @@ -0,0 +1,34 @@ +package datawave.microservice.configcheck.util; + +import java.util.Arrays; +import java.util.List; +import java.util.stream.Collectors; + +import org.springframework.boot.ApplicationArguments; + +/** + * ArgumentUtils is used to parse application arguments into various different objects. + */ +public class ArgumentUtils { + public static final String OUTPUT = "output"; + + public static String getFile(ApplicationArguments args) { + return (args.getNonOptionArgs().size() == 2) ? args.getNonOptionArgs().get(1) : null; + } + + public static String[] getFiles(ApplicationArguments args) { + return (args.getNonOptionArgs().size() == 3) ? new String[] {args.getNonOptionArgs().get(1), args.getNonOptionArgs().get(2)} : null; + } + + public static List getFileList(ApplicationArguments args, String option) { + return args.getOptionValues(option).stream().flatMap(x -> Arrays.stream(x.split(","))).collect(Collectors.toList()); + } + + public static String getOutputPath(ApplicationArguments args) { + String output = null; + if (args.getOptionNames().contains(OUTPUT) && args.getOptionValues(OUTPUT).size() == 1) { + output = args.getOptionValues(OUTPUT).get(0); + } + return output; + } +} diff --git a/microservices/configcheck/src/main/java/datawave/microservice/configcheck/util/FileUtils.java b/microservices/configcheck/src/main/java/datawave/microservice/configcheck/util/FileUtils.java new file mode 100644 index 00000000000..ec933d0084d --- /dev/null +++ b/microservices/configcheck/src/main/java/datawave/microservice/configcheck/util/FileUtils.java @@ -0,0 +1,38 @@ +package datawave.microservice.configcheck.util; + +import java.io.File; +import java.nio.file.Path; + +/** + * FileUtils is used to load files, keeping in mind that the relative path of a file is based on the working directory. + */ +public class FileUtils { + public static final String WORKING_DIR = "working.dir"; + public static final String workingDir = System.getProperty(WORKING_DIR); + + public static Path getFilePath(String file) { + Path path = null; + if (file != null) { + if (new File(file).isAbsolute()) { + path = Path.of(file); + } else if (workingDir != null) { + path = Path.of(workingDir, file); + } else { + path = Path.of(file); + } + } + return path; + } + + public static Path getFilePath(String parent, String file) { + Path path = null; + Path parentPath = getFilePath(parent); + if (parentPath != null) { + path = parentPath.resolve(file); + } + if (path == null || !path.toFile().exists()) { + path = getFilePath(file); + } + return path; + } +} diff --git a/microservices/configcheck/src/main/java/datawave/microservice/configcheck/util/XmlRenderUtils.java b/microservices/configcheck/src/main/java/datawave/microservice/configcheck/util/XmlRenderUtils.java new file mode 100644 index 00000000000..71b29f40ee9 --- /dev/null +++ b/microservices/configcheck/src/main/java/datawave/microservice/configcheck/util/XmlRenderUtils.java @@ -0,0 +1,84 @@ +package datawave.microservice.configcheck.util; + +import static datawave.microservice.configcheck.util.FileUtils.getFilePath; + +import java.io.IOException; +import java.nio.charset.StandardCharsets; +import java.nio.file.Files; +import java.util.List; +import java.util.Properties; + +import org.slf4j.Logger; +import org.slf4j.LoggerFactory; +import org.springframework.beans.factory.config.YamlPropertiesFactoryBean; +import org.springframework.core.CollectionFactory; +import org.springframework.core.io.PathResource; +import org.springframework.util.PropertyPlaceholderHelper; + +/** + * XmlRenderUtils is used to load xml content as a string from a given file and subsequently render the property placeholders in the xml file using either yaml + * or java properties. + */ +public class XmlRenderUtils { + private static Logger log = LoggerFactory.getLogger(XmlRenderUtils.class); + + private static final String TRUE = "true"; + private static final String FALSE = "false"; + + public static String loadContent(String filePath) { + String xmlContent = null; + try { + xmlContent = Files.readString(getFilePath(filePath), StandardCharsets.UTF_8); + } catch (Exception e) { + log.error("Exception reading xml file", e); + } + return xmlContent; + } + + public static Properties loadProperties(String configdir, List propertiesFiles) { + Properties mergedProperties = CollectionFactory.createStringAdaptingProperties(); + try { + for (String propertiesFile : propertiesFiles) { + Properties props = CollectionFactory.createStringAdaptingProperties(); + props.load(Files.newBufferedReader(getFilePath(configdir, propertiesFile))); + mergedProperties.putAll(props); + } + } catch (Exception e) { + log.error("Exception reading properties file", e); + } + return mergedProperties; + } + + public static Properties loadYamlAsProperties(String configdir, List yamlFiles) { + YamlPropertiesFactoryBean yamlPropFactory = new YamlPropertiesFactoryBean(); + yamlPropFactory.setResources(yamlFiles.stream().map(yamlFile -> new PathResource(getFilePath(configdir, yamlFile))).toArray(PathResource[]::new)); + return yamlPropFactory.getObject(); + } + + public static String renderContent(String content, Properties properties) { + String renderedXmlContent = null; + if (content != null) { + renderedXmlContent = new PropertyPlaceholderHelper("${", "}").replacePlaceholders(content, properties); + } + return renderedXmlContent; + } + + public static Object valueToObject(Object value) { + if (value instanceof String) { + value = ((String) value).trim(); + try { + value = Integer.parseInt((String) value); + } catch (Exception e1) { + try { + value = Double.parseDouble((String) value); + } catch (Exception e2) { + // ignored exception + if (value.equals(TRUE) || value.equals(FALSE)) { + value = Boolean.parseBoolean((String) value); + } + } + } + } + return value; + } +} diff --git a/microservices/configcheck/src/main/resources/config/application.yml b/microservices/configcheck/src/main/resources/config/application.yml new file mode 100644 index 00000000000..dfb160a72f6 --- /dev/null +++ b/microservices/configcheck/src/main/resources/config/application.yml @@ -0,0 +1,7 @@ +spring: + main: + web-application-type: NONE + +logging: + level: + root: FATAL \ No newline at end of file diff --git a/microservices/configcheck/src/main/resources/launch.conf b/microservices/configcheck/src/main/resources/launch.conf new file mode 100644 index 00000000000..be4f7763af0 --- /dev/null +++ b/microservices/configcheck/src/main/resources/launch.conf @@ -0,0 +1 @@ +JAVA_OPTS="$JAVA_OPTS -Dworking.dir=$WORKING_DIR" \ No newline at end of file diff --git a/microservices/configcheck/src/main/resources/log4j2.yml b/microservices/configcheck/src/main/resources/log4j2.yml new file mode 100644 index 00000000000..0187931b311 --- /dev/null +++ b/microservices/configcheck/src/main/resources/log4j2.yml @@ -0,0 +1,46 @@ +Configuration: + status: warn + monitorInterval: 60 + + Properties: + Property: + - name: logDir + value: "logs/" + - name: PID + value: "????" + - name: LOG_PATTERN + value: "%clr{%d{yyyy-MM-dd HH:mm:ss.SSS}}{faint} %clr{%5p} %clr{${sys:PID}}{magenta} %clr{---}{faint} %clr{[%15.15t]}{faint} %clr{%-40.40c{1.}}{cyan} %clr{:}{faint} %m%n%wEx" + + Appenders: + Console: + name: Console + target: SYSTEM_OUT + follow: true + PatternLayout: + pattern: "${LOG_PATTERN}" + + RollingFile: + - name: File + fileName: "${sys:logDir}/config-checker.log" + filePattern: "${sys:logDir}/config-checker.log.%d{yyyy-MM-dd}-%i.gz" + append: true + bufferedIO: true + bufferSize: 8192 + Policies: + TimeBasedTriggeringPolicy: + interval: 1 + SizeBasedTriggeringPolicy: + size: 250MB + DefaultRolloverStrategy: + max: 10 + PatternLayout: + pattern: "${LOG_PATTERN}" + + Loggers: + Root: + level: info + AppenderRef: + - ref: Console + level: info + - ref: File + level: trace diff --git a/microservices/configcheck/src/test/java/datawave/microservice/configcheck/ConfigCheckApplicationTest.java b/microservices/configcheck/src/test/java/datawave/microservice/configcheck/ConfigCheckApplicationTest.java new file mode 100644 index 00000000000..b274700d6ec --- /dev/null +++ b/microservices/configcheck/src/test/java/datawave/microservice/configcheck/ConfigCheckApplicationTest.java @@ -0,0 +1,196 @@ +package datawave.microservice.configcheck; + +import static org.junit.jupiter.api.Assertions.assertEquals; +import static org.junit.jupiter.api.Assertions.assertFalse; + +import java.io.File; +import java.io.IOException; +import java.nio.charset.StandardCharsets; +import java.nio.file.Files; +import java.nio.file.Path; + +import javax.xml.parsers.ParserConfigurationException; + +import org.junit.jupiter.api.BeforeAll; +import org.junit.jupiter.api.Test; +import org.springframework.boot.ApplicationArguments; +import org.springframework.boot.DefaultApplicationArguments; +import org.xml.sax.SAXException; + +public class ConfigCheckApplicationTest { + + public static String resourcesAbsolutePath; + + @BeforeAll + public static void beforeAll() { + resourcesAbsolutePath = new File("src/test/resources").getAbsolutePath(); + } + + @Test + public void testRenderXmlFromYaml() throws IOException, SAXException, ParserConfigurationException { + // @formatter:off + String[] stringArgs = new String[]{ + "render", + Path.of(resourcesAbsolutePath, "input/microservice/QueryLogicFactory.xml").toFile().getAbsolutePath(), + "--configdir=" + Path.of(resourcesAbsolutePath, "input/microservice/yaml/"), + "--yaml=" + "application-query.yml" + }; + // @formatter:on + + ApplicationArguments args = new DefaultApplicationArguments(stringArgs); + + Output output = new CommandRunner(args).run(); + + String expectedOutput = Files.readString(Path.of(resourcesAbsolutePath, "rendered/microservice/QueryLogicFactory.xml"), StandardCharsets.UTF_8); + + assertEquals(expectedOutput, output.getMessage()); + assertFalse(output.isError()); + } + + @Test + public void testRenderXmlFromProperties() throws IOException, SAXException, ParserConfigurationException { + // @formatter:off + String[] stringArgs = new String[]{ + "render", + Path.of(resourcesAbsolutePath, "input/webservice/QueryLogicFactory.xml").toFile().getAbsolutePath(), + "--configdir=" + Path.of(resourcesAbsolutePath, "input/webservice/properties/"), + "--properties=" + "default.properties" + }; + // @formatter:on + + ApplicationArguments args = new DefaultApplicationArguments(stringArgs); + + Output output = new CommandRunner(args).run(); + + String expectedOutput = Files.readString(Path.of(resourcesAbsolutePath, "rendered/webservice/QueryLogicFactory.xml"), StandardCharsets.UTF_8); + + assertEquals(expectedOutput, output.getMessage()); + assertFalse(output.isError()); + } + + @Test + public void testXmlPropertyAnalyzerWithYaml() throws IOException { + // @formatter:off + String[] stringArgs = new String[]{ + "analyze", + Path.of(resourcesAbsolutePath, "input/microservice/QueryLogicFactory.xml").toFile().getAbsolutePath(), + "--configdir=" + Path.of(resourcesAbsolutePath, "input/microservice/yaml/"), + "--yaml=" + "application-query.yml" + }; + // @formatter:on + + ApplicationArguments args = new DefaultApplicationArguments(stringArgs); + + Output output = new CommandRunner(args).run(); + + String expectedOutput = Files.readString(Path.of(resourcesAbsolutePath, "rendered/microservice/analysis.txt"), StandardCharsets.UTF_8); + + assertEquals(expectedOutput, output.getMessage()); + assertFalse(output.isError()); + } + + @Test + public void testXmlPropertyAnalyzerWithProperties() throws IOException { + // @formatter:off + String[] stringArgs = new String[]{ + "analyze", + Path.of(resourcesAbsolutePath, "input/webservice/QueryLogicFactory.xml").toFile().getAbsolutePath(), + "--configdir=" + Path.of(resourcesAbsolutePath, "input/webservice/properties/"), + "--properties=" + "default.properties,database.properties" + }; + // @formatter:on + + ApplicationArguments args = new DefaultApplicationArguments(stringArgs); + + Output output = new CommandRunner(args).run(); + + String expectedOutput = Files.readString(Path.of(resourcesAbsolutePath, "rendered/webservice/analysis.txt"), StandardCharsets.UTF_8); + + assertEquals(expectedOutput, output.getMessage()); + assertFalse(output.isError()); + } + + @Test + public void testXmlPropertyAnalyzerFullReportWithYaml() throws IOException { + // @formatter:off + String[] stringArgs = new String[]{ + "analyze", + Path.of(resourcesAbsolutePath, "input/microservice/QueryLogicFactory.xml").toFile().getAbsolutePath(), + "--configdir=" + Path.of(resourcesAbsolutePath, "input/microservice/yaml/"), + "--yaml=" + "application-query.yml", + "--fullreport" + }; + // @formatter:on + + ApplicationArguments args = new DefaultApplicationArguments(stringArgs); + + Output output = new CommandRunner(args).run(); + + String expectedOutput = Files.readString(Path.of(resourcesAbsolutePath, "rendered/microservice/fullReport.txt"), StandardCharsets.UTF_8); + + assertEquals(expectedOutput, output.getMessage()); + assertFalse(output.isError()); + } + + @Test + public void testXmlPropertyAnalyzerFullReportWithProperties() throws IOException { + // @formatter:off + String[] stringArgs = new String[]{ + "analyze", + Path.of(resourcesAbsolutePath, "input/webservice/QueryLogicFactory.xml").toFile().getAbsolutePath(), + "--configdir=" + Path.of(resourcesAbsolutePath, "input/webservice/properties/"), + "--properties=" + "default.properties,database.properties", + "--fullreport" + }; + // @formatter:on + + ApplicationArguments args = new DefaultApplicationArguments(stringArgs); + + Output output = new CommandRunner(args).run(); + + String expectedOutput = Files.readString(Path.of(resourcesAbsolutePath, "rendered/webservice/fullReport.txt"), StandardCharsets.UTF_8); + + assertEquals(expectedOutput, output.getMessage()); + assertFalse(output.isError()); + } + + @Test + public void testKeyValueComparator() throws IOException { + // @formatter:off + String[] stringArgs = new String[]{ + "compare", + Path.of(resourcesAbsolutePath, "rendered/microservice/analysis.txt").toFile().getAbsolutePath(), + Path.of(resourcesAbsolutePath, "rendered/webservice/analysis.txt").toFile().getAbsolutePath() + }; + // @formatter:on + + ApplicationArguments args = new DefaultApplicationArguments(stringArgs); + + Output output = new CommandRunner(args).run(); + + String expectedOutput = Files.readString(Path.of(resourcesAbsolutePath, "rendered/comparison.diff"), StandardCharsets.UTF_8); + + assertEquals(expectedOutput, output.getMessage().substring(output.getMessage().indexOf("\n", output.getMessage().indexOf("\n") + 1) + 1)); + assertFalse(output.isError()); + } + + @Test + public void testFullComparator() throws IOException { + // @formatter:off + String[] stringArgs = new String[]{ + "compare", + Path.of(resourcesAbsolutePath, "rendered/microservice/fullReport.txt").toFile().getAbsolutePath(), + Path.of(resourcesAbsolutePath, "rendered/webservice/fullReport.txt").toFile().getAbsolutePath() + }; + // @formatter:on + + ApplicationArguments args = new DefaultApplicationArguments(stringArgs); + + Output output = new CommandRunner(args).run(); + + String expectedOutput = Files.readString(Path.of(resourcesAbsolutePath, "rendered/fullComparison.diff"), StandardCharsets.UTF_8); + + assertEquals(expectedOutput, output.getMessage().substring(output.getMessage().indexOf("\n", output.getMessage().indexOf("\n") + 1) + 1)); + assertFalse(output.isError()); + } +} diff --git a/microservices/configcheck/src/test/resources/input/microservice/QueryLogicFactory.xml b/microservices/configcheck/src/test/resources/input/microservice/QueryLogicFactory.xml new file mode 100644 index 00000000000..c11b1c258c3 --- /dev/null +++ b/microservices/configcheck/src/test/resources/input/microservice/QueryLogicFactory.xml @@ -0,0 +1,552 @@ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + 2611 + + + + true + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + diff --git a/microservices/configcheck/src/test/resources/input/microservice/yaml/application-query.yml b/microservices/configcheck/src/test/resources/input/microservice/yaml/application-query.yml new file mode 100755 index 00000000000..cff76e4dc64 --- /dev/null +++ b/microservices/configcheck/src/test/resources/input/microservice/yaml/application-query.yml @@ -0,0 +1,534 @@ +# This profile should be included by any service which depends on the query starter. This +# file contains all of the configuration required to use the QueryLogicFactory. +warehouse: + accumulo: + zookeepers: '${accumulo.zookeepers}' + instanceName: '${accumulo.instanceName}' + username: '${accumulo.username}' + password: '${accumulo.password}' + statsd: + host: localhost + port: 8125 + tables: + shard: + name: 'datawave.shard' + index: + name: 'datawave.shardIndex' + reverseIndex: + name: 'datawave.shardReverseIndex' + dateIndex: + name: 'datawave.dateIndex' + metadata: + name: 'datawave.metadata' + model: + name: 'datawave.metadata' + edge: + name: 'datawave.edge' + errorTables: + shard: + name: "datawave.error_s" + index: + name: "datawave.error_i" + reverseIndex: + name: "datawave.error_r" + dateIndex: + name: "" + metadata: + name: "datawave.error_m" + model: + name: "datawave.error_m" + metricTables: + shard: + name: "datawave.queryMetrics_s" + index: + name: "datawave.queryMetrics_i" + reverseIndex: + name: "datawave.queryMetrics_r" + dateIndex: + name: "" + metadata: + name: "datawave.queryMetrics_m" + model: + name: "datawave.queryMetrics_m" + defaults: + checkpointable: true + queryThreads: 100 + indexLookupThreads: 100 + dateIndexThreads: 20 + fullTableScanEnabled: false + baseIteratorPriority: 100 + maxIndexScanTimeMillis: 31536000000 + eventPerDayThreshold: 40000 + shardsPerDayThreshold: 20 + initialMaxTermThreshold: 2000 + finalMaxTermThreshold: 2000 + maxDepthThreshold: 2000 + maxUnfieldedExpansionThreshold: 50 + maxValueExpansionThreshold: 50 + maxOrExpansionThreshold: 500 + maxOrRangeThreshold: 10 + maxRangesPerRangeIvarator: 5 + maxOrRangeIvarators: 10 + maxOrExpansionFstThreshold: 750 + maxFieldIndexRangeSplit: 16 + maxIvaratorSources: 20 + maxEvaluationPipelines: 16 + maxPipelineCachedResults: 16 + hdfsSiteConfigURLs: 'file://${HADOOP_CONF_DIR:/etc/hadoop/conf}/core-site.xml,file://${HADOOP_CONF_DIR:/etc/hadoop/conf}/hdfs-site.xml' + ivaratorFstHdfsBaseURIs: "hdfs://${HADOOP_HOST:localhost}:9000/IvaratorCache" + ivaratorCacheBufferSize: 10000 + ivaratorMaxOpenFiles: 100 + ivaratorCacheScanPersistThreshold: 100000 + ivaratorCacheScanTimeoutMinutes: 60 + modelName: 'DATAWAVE' + edgeModelName: 'DATAWAVE_EDGE' + +datawave: + metadata: + all-metadata-auths: + - PRIVATE,PUBLIC + type-substitutions: + "[datawave.data.type.DateType]": "datawave.data.type.RawDateType" + + query: + parser: + skipTokenizeUnfieldedFields: + - "DOMETA" + tokenizedFields: + - "CONTENT" + logic: + factory: + enabled: true + # Uncomment the following line to override the query logic beans to load + # xmlBeansPath: "classpath:MyTestQueryLogicFactory.xml" + + # The max page size that a user can request. 0 turns off this feature + maxPageSize: 10000 + + # The number of bytes at which a page will be returned, event if the pagesize has not been reached. 0 turns off this feature + pageByteTrigger: 0 + logics: + BaseEventQuery: + checkpointable: ${warehouse.defaults.checkpointable} + accumuloPassword: ${warehouse.accumulo.password} + tableName: ${warehouse.tables.shard.name} + dateIndexTableName: ${warehouse.tables.dateIndex.name} + defaultDateTypeName: "EVENT" + metadataTableName: ${warehouse.tables.metadata.name} + indexTableName: ${warehouse.tables.index.name} + reverseIndexTableName: ${warehouse.tables.reverseIndex.name} + maxResults: -1 + queryThreads: ${warehouse.defaults.queryThreads} + indexLookupThreads: ${warehouse.defaults.indexLookupThreads} + dateIndexThreads: ${warehouse.defaults.dateIndexThreads} + fullTableScanEnabled: ${warehouse.defaults.fullTableScanEnabled} + includeDataTypeAsField: false + disableIndexOnlyDocuments: false + indexOnlyFilterFunctionsEnabled: false + includeHierarchyFields: false + hierarchyFieldOptions: + "FOO": "BAR" + baseIteratorPriority: ${warehouse.defaults.baseIteratorPriority} + maxIndexScanTimeMillis: ${warehouse.defaults.maxIndexScanTimeMillis} + collapseUids: false + collapseUidsThreshold: -1 + useEnrichers: true + contentFieldNames: + - 'CONTENT' + realmSuffixExclusionPatterns: + - '<.*>$' + minimumSelectivity: .2 + enricherClassNames: + - 'datawave.query.enrich.DatawaveTermFrequencyEnricher' + useFilters: false + filterClassNames: + - 'foo.bar' + filterOptions: + 'bar': "foo" + auditType: "ACTIVE" + logicDescription: "Retrieve sharded events/documents, leveraging the global index tables as needed" + eventPerDayThreshold: ${warehouse.defaults.eventPerDayThreshold} + shardsPerDayThreshold: ${warehouse.defaults.shardsPerDayThreshold} + initialMaxTermThreshold: ${warehouse.defaults.initialMaxTermThreshold} + finalMaxTermThreshold: ${warehouse.defaults.finalMaxTermThreshold} + maxDepthThreshold: ${warehouse.defaults.maxDepthThreshold} + maxUnfieldedExpansionThreshold: ${warehouse.defaults.maxUnfieldedExpansionThreshold} + maxValueExpansionThreshold: ${warehouse.defaults.maxValueExpansionThreshold} + maxOrExpansionThreshold: ${warehouse.defaults.maxOrExpansionThreshold} + maxOrRangeThreshold: ${warehouse.defaults.maxOrRangeThreshold} + maxOrExpansionFstThreshold: ${warehouse.defaults.maxOrExpansionFstThreshold} + maxFieldIndexRangeSplit: ${warehouse.defaults.maxFieldIndexRangeSplit} + maxIvaratorSources: ${warehouse.defaults.maxIvaratorSources} + maxEvaluationPipelines: ${warehouse.defaults.maxEvaluationPipelines} + maxPipelineCachedResults: ${warehouse.defaults.maxPipelineCachedResults} + hdfsSiteConfigURLs: ${warehouse.defaults.hdfsSiteConfigURLs} + zookeeperConfig: ${warehouse.accumulo.zookeepers} + ivaratorCacheDirConfigs: + - basePathURI: "hdfs://${HADOOP_HOST:localhost}:9000/IvaratorCache" + ivaratorFstHdfsBaseURIs: ${warehouse.defaults.ivaratorFstHdfsBaseURIs} + ivaratorCacheBufferSize: ${warehouse.defaults.ivaratorCacheBufferSize} + ivaratorMaxOpenFiles: ${warehouse.defaults.ivaratorMaxOpenFiles} + ivaratorCacheScanPersistThreshold: ${warehouse.defaults.ivaratorCacheScanPersistThreshold} + ivaratorCacheScanTimeoutMinutes: ${warehouse.defaults.ivaratorCacheScanTimeoutMinutes} + eventQueryDataDecoratorTransformer: + requestedDecorators: + - "CSV" + - "WIKIPEDIA" + dataDecorators: + "CSV": + "EVENT_ID": "https://localhost:8443/DataWave/Query/lookupUUID/EVENT_ID?uuid=@field_value@&parameters=data.decorators:CSV" + "UUID": "https://localhost:8443/DataWave/Query/lookupUUID/UUID?uuid=@field_value@&parameters=data.decorators:CSV" + "PARENT_UUID": "https://localhost:8443/DataWave/Query/lookupUUID/PARENT_UUID?uuid=@field_value@&parameters=data.decorators:CSV" + "WIKIPEDIA": + "PAGE_ID": "https://localhost:8443/DataWave/Query/lookupUUID/PAGE_ID?uuid=@field_value@&parameters=data.decorators:WIKIPEDIA" + "PAGE_TITLE": "https://localhost:8443/DataWave/Query/lookupUUID/PAGE_TITLE?uuid=@field_value@&parameters=data.decorators:WIKIPEDIA" + modelTableName: ${warehouse.tables.model.name} + modelName: ${warehouse.defaults.modelName} + querySyntaxParsers: + JEXL: "" + LUCENE: "LuceneToJexlQueryParser" + LUCENE-UUID: "LuceneToJexlUUIDQueryParser" + TOKENIZED-LUCENE: "TokenizedLuceneToJexlQueryParser" + sendTimingToStatsd: false + collectQueryMetrics: true + logTimingDetails: true + statsdHost: ${warehouse.statsd.host} + statsdPort: ${warehouse.statsd.port} + evaluationOnlyFields: "" + maxConcurrentTasks: 10 + requiredRoles: + - "AuthorizedUser" + + EventQuery: + checkpointable: ${warehouse.defaults.checkpointable} + logicDescription: "Query the sharded event/document schema, leveraging the global index tables as needed" + + ErrorEventQuery: + checkpointable: ${warehouse.defaults.checkpointable} + logicDescription: "Retrieve events/documents that encountered one or more errors during ingest" + tableName: ${warehouse.errorTables.shard.name} + metadataTableName: ${warehouse.errorTables.metadata.name} + dateIndexTableName: "" + indexTableName: ${warehouse.errorTables.index.name} + reverseIndexTableName: ${warehouse.errorTables.reverseIndex.name} + includeHierarchyFields: false + + DiscoveryQuery: + checkpointable: ${warehouse.defaults.checkpointable} + tableName: ${warehouse.tables.shard.name} + indexTableName: ${warehouse.tables.index.name} + reverseIndexTableName: ${warehouse.tables.reverseIndex.name} + metadataTableName: ${warehouse.tables.metadata.name} + modelTableName: ${warehouse.tables.model.name} + modelName: ${warehouse.defaults.modelName} + fullTableScanEnabled: ${warehouse.defaults.fullTableScanEnabled} + allowLeadingWildcard: true + auditType: "NONE" + maxResults: -1 + maxWork: -1 + logicDescription: "Discovery query that returns information from the index about the supplied term(s)" + + ErrorDiscoveryQuery: + checkpointable: ${warehouse.defaults.checkpointable} + tableName: ${warehouse.errorTables.shard.name} + indexTableName: ${warehouse.errorTables.index.name} + reverseIndexTableName: ${warehouse.errorTables.reverseIndex.name} + maxResults: -1 + maxWork: -1 + metadataTableName: ${warehouse.errorTables.metadata.name} + modelTableName: ${warehouse.errorTables.model.name} + modelName: ${warehouse.defaults.modelName} + fullTableScanEnabled: ${warehouse.defaults.fullTableScanEnabled} + allowLeadingWildcard: true + auditType: "NONE" + logicDescription: "Discovery query that returns information from the ingest errors index about the supplied term(s)" + + LuceneUUIDEventQuery: + checkpointable: ${warehouse.defaults.checkpointable} + logicDescription: "Composite query logic that retrieves records from the event and error tables, based on known UUID fields, ie, those configured via UUIDTypeList in QueryLogicFactory.xml" + auditType: "NONE" + eventQuery: + auditType: "NONE" + logicDescription: "Lucene query for event/document UUIDs" + mandatoryQuerySyntax: + - "LUCENE-UUID" + connPoolName: "UUID" + errorEventQuery: + auditType: "NONE" + logicDescription: "Lucene query for event/document UUIDs for events that encountered errors at ingest time" + mandatoryQuerySyntax: + - "LUCENE-UUID" + connPoolName: "UUID" + tableName: ${warehouse.errorTables.shard.name} + dateIndexTableName: ${warehouse.errorTables.dateIndex.name} + metadataTableName: ${warehouse.errorTables.metadata.name} + indexTableName: ${warehouse.errorTables.index.name} + reverseIndexTableName: ${warehouse.errorTables.reverseIndex.name} + + ContentQuery: + checkpointable: ${warehouse.defaults.checkpointable} + tableName: ${warehouse.tables.shard.name} + maxResults: -1 + maxWork: -1 + auditType: "NONE" + logicDescription: "Query that returns a document given the document identifier" + + EdgeQuery: + checkpointable: ${warehouse.defaults.checkpointable} + tableName: ${warehouse.tables.edge.name} + metadataTableName: ${warehouse.tables.metadata.name} + modelTableName: ${warehouse.tables.model.name} + modelName: ${warehouse.defaults.edgeModelName} + queryThreads: 16 + auditType: "NONE" + maxResults: 25000 + maxWork: -1 + + CountQuery: + checkpointable: ${warehouse.defaults.checkpointable} + logicDescription: "Retrieve event/document counts based on your search criteria" + + ErrorCountQuery: + checkpointable: ${warehouse.defaults.checkpointable} + logicDescription: "Retrieve counts of errored events based on your search criteria" + tableName: ${warehouse.errorTables.shard.name} + metadataTableName: ${warehouse.errorTables.metadata.name} + indexTableName: ${warehouse.errorTables.index.name} + reverseIndexTableName: ${warehouse.errorTables.reverseIndex.name} + + FieldIndexCountQuery: + checkpointable: false + tableName: ${warehouse.tables.shard.name} + indexTableName: ${warehouse.tables.index.name} + reverseIndexTableName: ${warehouse.tables.reverseIndex.name} + metadataTableName: ${warehouse.tables.metadata.name} + maxResults: -1 + maxWork: -1 + queryThreads: ${warehouse.defaults.queryThreads} + modelTableName: ${warehouse.tables.model.name} + modelName: "DATAWAVE" + maxUniqueValues: 20000 + auditType: "NONE" + logicDescription: "Indexed Fields Only: Given FIELDNAME returns counts for each unique value. Given FIELDNAME:FIELDVALUE returns count for only that value." + + ErrorFieldIndexCountQuery: + checkpointable: false + tableName: ${warehouse.errorTables.shard.name} + indexTableName: ${warehouse.errorTables.index.name} + reverseIndexTableName: ${warehouse.errorTables.reverseIndex.name} + metadataTableName: ${warehouse.errorTables.metadata.name} + maxResults: -1 + maxWork: -1 + queryThreads: ${warehouse.defaults.queryThreads} + modelTableName: ${warehouse.errorTables.model.name} + modelName: "DATAWAVE" + maxUniqueValues: 20000 + auditType: "NONE" + logicDescription: "FieldIndex count query (experimental)" + + TermFrequencyQuery: + tableName: ${warehouse.tables.shard.name} + maxResults: -1 + maxWork: -14 + auditType: "NONE" + logicDescription: "Query that returns data from the term frequency query table" + + IndexStatsQuery: + auditType: "NONE" + + QueryMetricsQuery: + checkpointable: ${warehouse.defaults.checkpointable} + logicDescription: "Retrieve query metrics based on the given search term(s)" + includeHierarchyFields: false + modelTableName: ${warehouse.metricTables.model.name} + modelName: "NONE" + tableName: ${warehouse.metricTables.shard.name} + dateIndexTableName: ${warehouse.metricTables.dateIndex.name} + metadataTableName: ${warehouse.metricTables.metadata.name} + indexTableName: ${warehouse.metricTables.index.name} + reverseIndexTableName: ${warehouse.metricTables.reverseIndex.name} + auditType: "NONE" + collectQueryMetrics: true + + InternalQueryMetricsQuery: + collectQueryMetrics: false + requiredRoles: + - "AuthorizedServer" + + FacetedQuery: + checkpointable: ${warehouse.defaults.checkpointable} + auditType: "NONE" + logicDescription: "Faceted search over indexed fields, returning aggregate counts for field values" + facetedSearchType: "FIELD_VALUE_FACETS" + facetTableName: "datawave.facets" + facetMetadataTableName: "datawave.facetMetadata" + facetHashTableName: "datawave.facetHashes" + maximumFacetGrouping: 200 + minimumFacet: 1 + streaming: true + querySyntaxParsers: + JEXL: "" + LUCENE: "LuceneToJexlQueryParser" + LUCENE-UUID: "LuceneToJexlUUIDQueryParser" + + HitHighlights: + checkpointable: ${warehouse.defaults.checkpointable} + accumuloPassword: ${warehouse.accumulo.password} + tableName: ${warehouse.tables.shard.name} + dateIndexTableName: ${warehouse.tables.dateIndex.name} + defaultDateTypeName: "EVENT" + metadataTableName: ${warehouse.tables.metadata.name} + indexTableName: ${warehouse.tables.index.name} + reverseIndexTableName: ${warehouse.tables.reverseIndex.name} + queryThreads: ${warehouse.defaults.indexLookupThreads} + fullTableScanEnabled: ${warehouse.defaults.fullTableScanEnabled} + minimumSelectivity: .2 + includeDataTypeAsField: false + includeGroupingContext: false + useEnrichers: false + auditType: "NONE" + logicDescription: "Fast boolean query over indexed fields, only returning fields queried on" + eventPerDayThreshold: 40000 + shardsPerDayThreshold: ${warehouse.defaults.shardsPerDayThreshold} + initialMaxTermThreshold: ${warehouse.defaults.initialMaxTermThreshold} + finalMaxTermThreshold: ${warehouse.defaults.finalMaxTermThreshold} + maxDepthThreshold: ${warehouse.defaults.maxDepthThreshold} + maxUnfieldedExpansionThreshold: ${warehouse.defaults.maxUnfieldedExpansionThreshold} + maxValueExpansionThreshold: ${warehouse.defaults.maxValueExpansionThreshold} + maxOrExpansionThreshold: ${warehouse.defaults.maxOrExpansionThreshold} + maxOrRangeThreshold: ${warehouse.defaults.maxOrRangeThreshold} + maxRangesPerRangeIvarator: ${warehouse.defaults.maxRangesPerRangeIvarator} + maxOrRangeIvarators: ${warehouse.defaults.maxOrRangeIvarators} + maxOrExpansionFstThreshold: ${warehouse.defaults.maxOrExpansionFstThreshold} + maxFieldIndexRangeSplit: ${warehouse.defaults.maxFieldIndexRangeSplit} + maxEvaluationPipelines: ${warehouse.defaults.maxEvaluationPipelines} + maxPipelineCachedResults: ${warehouse.defaults.maxPipelineCachedResults} + hdfsSiteConfigURLs: ${warehouse.defaults.hdfsSiteConfigURLs} + zookeeperConfig: ${warehouse.accumulo.zookeepers} + ivaratorCacheDirConfigs: + - basePathURI: "hdfs://${HADOOP_HOST:localhost}:9000/IvaratorCache" + ivaratorFstHdfsBaseURIs: ${warehouse.defaults.ivaratorFstHdfsBaseURIs} + ivaratorCacheBufferSize: 10000 + ivaratorMaxOpenFiles: ${warehouse.defaults.ivaratorMaxOpenFiles} + ivaratorCacheScanPersistThreshold: 100000 + ivaratorCacheScanTimeoutMinutes: ${warehouse.defaults.ivaratorCacheScanTimeoutMinutes} + querySyntaxParsers: + JEXL: "" + LUCENE: "LuceneToJexlQueryParser" + LUCENE-UUID: "LuceneToJexlUUIDQueryParser" + + EdgeEventQuery: + checkpointable: ${warehouse.defaults.checkpointable} + logicDescription: "Use results of an EdgeQuery to obtain events/documents that created the given edge" + edgeModelName: "DATAWAVE_EDGE" + modelTableName: ${warehouse.tables.model.name} + + uuidTypes: &defaultUuidTypes + 'EVENT_ID': + fieldName: 'EVENT_ID' + queryLogic: 'LuceneUUIDEventQuery' + allowedWildcardAfter: 28 + 'UUID': + fieldName: 'UUID' + queryLogic: 'LuceneUUIDEventQuery' + 'PARENT_UUID': + fieldName: 'PARENT_UUID' + queryLogic: 'LuceneUUIDEventQuery' + 'PAGE_ID': + fieldName: 'PAGE_ID' + queryLogic: 'LuceneUUIDEventQuery' + 'PAGE_TITLE': + fieldName: 'PAGE_TITLE' + queryLogic: 'LuceneUUIDEventQuery' + + lookup: + columnVisibility: "" + beginDate: "20100101 000000.000" + types: *defaultUuidTypes + + translateid: + columnVisibility: "" + beginDate: "20100101 000000.000" + types: *defaultUuidTypes + + edge: + # Uncomment the following line to override the edge beans to load + # xmlBeansPath: "classpath:EdgeBeans.xml" + model: + baseFieldMap: + EDGE_SOURCE: 'SOURCE' + EDGE_SINK: 'SINK' + EDGE_TYPE: 'TYPE' + EDGE_RELATIONSHIP: 'RELATION' + EDGE_ATTRIBUTE1: 'ATTRIBUTE1' + EDGE_ATTRIBUTE2: 'ATTRIBUTE2' + EDGE_ATTRIBUTE3: 'ATTRIBUTE3' + DATE: 'DATE' + STATS_EDGE: 'STATS_TYPE' + keyUtilFieldMap: + ENRICHMENT_TYPE: 'ENRICHMENT_TYPE' + FACT_TYPE: 'FACT_TYPE' + GROUPED_FIELDS: 'GROUPED_FIELDS' + transformFieldMap: + COUNT: 'COUNT' + COUNTS: 'COUNTS' + LOAD_DATE: 'LOADDATE' + ACTIVITY_DATE: 'ACTIVITY_DATE' + fieldMappings: + - fieldName: "SOURCE" + modelFieldName: "VERTEXA" + direction: "REVERSE" + indexOnly: false + - fieldName: "SOURCE" + modelFieldName: "VERTEXA" + direction: "FORWARD" + indexOnly: false + - fieldName: "SINK" + modelFieldName: "VERTEXB" + direction: "REVERSE" + indexOnly: false + - fieldName: "SINK" + modelFieldName: "VERTEXB" + direction: "FORWARD" + indexOnly: false + - fieldName: "RELATION" + modelFieldName: "RELATION" + direction: "REVERSE" + indexOnly: false + - fieldName: "RELATION" + modelFieldName: "RELATION" + direction: "FORWARD" + indexOnly: false + - fieldName: "TYPE" + modelFieldName: "TYPE" + direction: "REVERSE" + indexOnly: false + - fieldName: "TYPE" + modelFieldName: "TYPE" + direction: "FORWARD" + indexOnly: false + - fieldName: "ATTRIBUTE1" + modelFieldName: "ATTR1" + direction: "REVERSE" + indexOnly: false + - fieldName: "ATTRIBUTE1" + modelFieldName: "ATTR1" + direction: "FORWARD" + indexOnly: false + - fieldName: "ATTRIBUTE2" + modelFieldName: "ATTR2" + direction: "REVERSE" + indexOnly: false + - fieldName: "ATTRIBUTE2" + modelFieldName: "ATTR2" + direction: "FORWARD" + indexOnly: false + - fieldName: "ATTRIBUTE3" + modelFieldName: "ATTR3" + direction: "REVERSE" + indexOnly: false + - fieldName: "ATTRIBUTE3" + modelFieldName: "ATTR3" + direction: "FORWARD" + indexOnly: false + \ No newline at end of file diff --git a/microservices/configcheck/src/test/resources/input/webservice/QueryLogicFactory.xml b/microservices/configcheck/src/test/resources/input/webservice/QueryLogicFactory.xml new file mode 100644 index 00000000000..306770277de --- /dev/null +++ b/microservices/configcheck/src/test/resources/input/webservice/QueryLogicFactory.xml @@ -0,0 +1,688 @@ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + DOMETA + + + + + CONTENT + + + + + + + + + + + + + + + + + + + + + + + + + + 2611 + + + + true + + + + + + + + + + + + ${lookup.uuid.mappings} + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + CONTENT + + + + + <.*>$ + + + + + + datawave.query.enrich.DatawaveTermFrequencyEnricher + + + + + + ${event.query.filters.classnames} + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + ${ivarator.cache.dir.config} + + + + ${event.query.filters.options} + + + + ${hierarchy.field.options} + + + + + + + + + + + + + ${event.query.data.decorators} + + + + + + ${lookup.uuid.uuidTypes} + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + datawave.query.function.NormalizedVersionPredicate + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + LUCENE-UUID + + + + + + + + + + + + + + + + + LUCENE-UUID + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + diff --git a/microservices/configcheck/src/test/resources/input/webservice/properties/database.properties b/microservices/configcheck/src/test/resources/input/webservice/properties/database.properties new file mode 100644 index 00000000000..eb5cd5b51e2 --- /dev/null +++ b/microservices/configcheck/src/test/resources/input/webservice/properties/database.properties @@ -0,0 +1,20 @@ +metadata.table.name=${table.name.metadata} +date.index.table.name=${table.name.dateIndex} +index.table.name=${table.name.shardIndex} +rindex.table.name=${table.name.shardReverseIndex} +shard.table.name=${table.name.shard} +edge.table.name=${table.name.edge} +error.metadata.table.name=${table.name.errors.metadata} +error.index.table.name=${table.name.errors.shardIndex} +error.rindex.table.name=${table.name.errors.shardReverseIndex} +error.shard.table.name=${table.name.errors.shard} +querymetrics.metadata.table.name=${table.name.queryMetrics.metadata} +querymetrics.index.table.name=${table.name.queryMetrics.shardIndex} +querymetrics.rindex.table.name=${table.name.queryMetrics.shardReverseIndex} +querymetrics.shard.table.name=${table.name.queryMetrics.shard} +querymetrics.dateindex.table.name=${table.name.queryMetrics.dateIndex} +loaddates.table.name=${table.name.loadDates} + +accumulo.instance.name=${accumulo.instance.name} +accumulo.user.name=${accumulo.user.name} +accumulo.user.password=${accumulo.user.password} diff --git a/microservices/configcheck/src/test/resources/input/webservice/properties/default.properties b/microservices/configcheck/src/test/resources/input/webservice/properties/default.properties new file mode 100644 index 00000000000..2adeacbcc0e --- /dev/null +++ b/microservices/configcheck/src/test/resources/input/webservice/properties/default.properties @@ -0,0 +1,653 @@ +# Passwords should be set in private maven settings +# They are here for reference but commented out so that the +# assert-properties plugin will warn the user if they are not set + +############################ +# +# Usernames +# +############################ +accumulo.user.name=root +mysql.user.name=sorted +# Admin Console and JMX Console username +jboss.jmx.username=jmxadmin +# Credentials for HORNETQ servers to join the cluster +hornetq.cluster.username=datawave-cluster +# Your system's default username for JMS. This will be used by MDBs and other clients to connect to JMS Server. You can also create +# additional accounts for external systems by adding entries in hornetq-users.properties and hornetq-roles.properties +hornetq.system.username=DATAWAVE + +############################ +# +# Passwords +# +############################ +#server.keystore.password=SET_ME_IN_PRIVATE_MAVEN_SETTINGS +#accumulo.user.password=SET_ME_IN_PRIVATE_MAVEN_SETTINGS +#mysql.user.password=SET_ME_IN_PRIVATE_MAVEN_SETTINGS +#jboss.jmx.password=SET_ME_IN_PRIVATE_MAVEN_SETTINGS +#hornetq.cluster.password=SET_ME_IN_PRIVATE_MAVEN_SETTINGS +#hornetq.system.password=SET_ME_IN_PRIVATE_MAVEN_SETTINGS +server.truststore.password= + +############################ +# +# Server Identity +# +############################ +server.cert.basename= +# Subject DN of the server cert +server.dn= +# Issuer DN of the server cert +issuer.dn= +server.trust.store= + +############################ +# +# Security Settings +# +############################ +# Whether or not to use the remote authorization service +security.use.remoteauthservice=false +# Whether or not to use the test authorization service that loads canned users +security.use.testauthservice=false +# Spring context entry defining the location of test authorization service entries +security.testauthservice.context.entry= +# JSON-encoded DatawaveUser objects to use in the test authorization service +security.testauthservice.users= +# Trusted entities that can be removed from proxied entity chains +trusted.proxied.entities= +# Configuration for the remote DatawaveUser service +# +# Find the host and port of the service using a SRV DNS lookup +security.remoteuserservice.srv.lookup.enabled=false +# The DNS servers to use for the SRV lookup +security.remoteuserservice.srv.lookup.servers=127.0.0.1 +# The port on which the DNS server that serves SRV records is listening +security.remoteuserservice.srv.lookup.port=8600 +# The scheme to use when connecting to the remote user service +security.remoteuserservice.scheme=https +# The host to connect to (or do a SRV lookup on) for the remote user service +security.remoteuserservice.host=localhost +# The port to connect to (unless a SRV lookup was performed) for the remote user service +security.remoteuserservice.port=8643 + +############################ +# +# Configuration for the remote Accumulo service +# +############################ +# Find the host and port of the service using a SRV DNS lookup +accumulo.remoteservice.srv.lookup.enabled=false +# The DNS servers to use for the SRV lookup +accumulo.remoteservice.srv.lookup.servers=127.0.0.1 +# The port on which the DNS server that serves SRV records is listening +accumulo.remoteservice.srv.lookup.port=8600 +# The scheme to use when connecting to the remote user service +accumulo.remoteservice.scheme=https +# The host to connect to (or do a SRV lookup on) for the remote user service +accumulo.remoteservice.host=localhost +# The port to connect to (unless a SRV lookup was performed) for the remote user service +accumulo.remoteservice.port=8943 + +############################ +# +# Configuration for the remote Query Metric service +# +############################ +# Find the host and port of the service using a SRV DNS lookup +querymetric.remoteservice.srv.lookup.enabled=false +# The DNS servers to use for the SRV lookup +querymetric.remoteservice.srv.lookup.servers=127.0.0.1 +# The port on which the DNS server that serves SRV records is listening +querymetric.remoteservice.srv.lookup.port=8600 +# The scheme to use when connecting to the remote query metric service +querymetric.remoteservice.scheme=https +# The host to connect to (or do a SRV lookup on) for the remote query metric service +querymetric.remoteservice.host=localhost +# The port to connect to (unless a SRV lookup was performed) for the remote query metric service +querymetric.remoteservice.port=9043 +# Is the remote service enabled +querymetric.remoteservice.enabled=false + +############################ +# +# Server Settings +# +############################ +jboss.console.redirect= +port.definition.set=ports-default +# JBoss Heap size, used in bin/run.conf +jboss.jvm.heap.size=4096m +# JBoss CMSInitiatingOccupancyFraction, start a garbage collection if the tenured generation exceeds this fraction, 92% by default +jboss.cms.initiating.occupancy.fraction=75 +# Additional args for the JBoss JVM used in bin/run.conf +jboss.java.opts= +# Add no additional JAVA_OPTS to run.conf +jboss.extra.java.opts= +# Extra stuff to append to the end of run.conf +jboss.run.conf.extras= +#location of JBoss log dir +jboss.log.hdfs.uri=hdfs://localhost:8020/ +jboss.log.hdfs.dir=/datawave/WebServiceLogs +# Application Server Cluster partition name +jboss.partition.name=${env.USER} +#user the wildfly init.d script should use to run wildfly +jboss.runas.user=jboss + +# Defines the size parameters of the worker's task thread pool +# Suggest setting values here based on accumulo connection pool sizes, available cores, and expected access patterns +# From WildFly manual: Workers for I/O channel notification. The maximum number of threads for the worker task thread pool. +# default cpuCount * 16. Once this is filled, tasks that cannot be queued will be rejected. +wildfly.io.worker.default.task-max-threads=16 +# How many I/O (selector) threads should be maintained. Generally this number should be a small constant multiple of the number of available cores. +# From WildFly manual: Specify the number of I/O threads to create for the worker. Default cpuCount * 2 +wildfly.io.worker.default.io-threads=2 + +############################ +# +# RestEasy Settings +# +############################ +# Number of job result sets held in memory at once, defaults to 100 +resteasy.async.job.service.max.job.results=200 +# Maximum wait time on a job when a client is querying for it, defaults to 5m in ms +resteasy.async.job.service.max.wait=300000 +# Thread pool size of background threads that run the job, defaults to 100 +resteasy.async.job.service.thread.pool.size=200 +# The base path for job URIs +resteasy.async.job.service.base.path=/asynch/jobs + +############################ +# +# Table & Query Settings +# +############################ + +table.name.metadata=datawave.metadata +table.name.shard=datawave.shard +table.name.shardStats=datawave.shardStats +table.name.shardIndex=datawave.shardIndex +table.name.shardReverseIndex=datawave.shardReverseIndex +table.name.dateIndex=datawave.dateIndex +table.name.edge=datawave.edge +table.name.errors.metadata=datawave.error_m +table.name.errors.shardIndex=datawave.error_i +table.name.errors.shardReverseIndex=datawave.error_r +table.name.errors.shard=datawave.error_s +table.name.queryMetrics.metadata=datawave.queryMetrics_m +table.name.queryMetrics.shardIndex=datawave.queryMetrics_i +table.name.queryMetrics.shardReverseIndex=datawave.queryMetrics_r +table.name.queryMetrics.shard=datawave.queryMetrics_s +table.name.queryMetrics.dateIndex=datawave.queryMetrics_di +table.name.loadDates=datawave.loadDates +table.name.atom.categories=datawave.atom +table.name.facet=datawave.facets +table.name.facet.metadata=datawave.facetMetadata +table.name.facet.hashes=datawave.facetHashes + +table.shard.numShardsPerDay=10 +table.dateIndex.numShardsPerDay=10 +table.loadDates.enabled=true + +metadata.table.names= \ +datawave.metadata \ +\n datawave.queryMetrics_m \ +\n datawave.error_m + +tables.to.cache=datawave.metadata,datawave.queryMetrics_m,datawave.error_m +cache.reloadInterval=86400000 + +indexTables.keepCountOnlyEntries=false + +default.date.type.name=EVENT + +# Number of minutes that a query can be idle before the connection is closed +query.expiration.minutes=15 +# Number of minutes that a query next or create call can take before it is canceled. +query.max.call.time.minutes=60 +# Number of minutes after which the page will be returned iff it contains results. This prevents a query from being cancelled re query.max.call.time.minutes if there are results. +query.page.shortcircuit.minutes=55 +# Number of minutes after which the page size velocity will be checked (percent page full vs percent call time complete) to potentially short circuit the next call +query.page.size.shortcircuit.minutes=30 +# The max page size that a user can request. +query.default.page.size=10 +# The max page size that a user can request. 0 turns off this feature +query.max.page.size=10000 +# The number of bytes at which a page will be returned, event if the pagesize has not been reached. 0 turns off this feature +query.page.byte.trigger=0 +# Determine whether or not we collapse UIDS into a sharded range when doing the rangestream lookup +query.collapse.uids=false +# If we have more UIDS than this threshold, collapse into a single rangestream lookup. +query.collapse.uids.threshold=-1 +# Determine when we give up on an global index scan and push down to the field index. Default is virtually unlimited (1 year). +query.max.index.scan.ms=31536000000 +# Suppresses documents which would otherwise have only index only fields within it +disable.index.only.documents=false +# Indicates whether index-only filter functions should be enabled, such as filter:includeRegex() +enable.index.only.filter.functions=false +query.tld.collapse.uids=false +#fields generated internally at query evaluation time +evaluation.only.fields= +############################ +# +# Accumulo Connection Pools +# +############################ +accumulo.instance.name=accumulo +zookeeper.hosts=localhost:2181 + +# Number of connections in the connection pools to the accumulo instance. If not enough connections, then operations will block +# until a connection becomes available. Be careful here as when used in a batch scanner, a connection will use N threads and network +# connections when querying ACCUMULO.These are the defaults, if your table names are different then override them in +# your profile +accumulo.low.defaultpool.size=25 +accumulo.normal.defaultpool.size=50 +accumulo.high.defaultpool.size=100 +accumulo.admin.defaultpool.size=200 +accumulo.low.uuidpool.size=1 +accumulo.normal.uuidpool.size=2 +accumulo.high.uuidpool.size=3 +accumulo.admin.uuidpool.size=5 +accumulo.low.fipool.size=1 +accumulo.normal.fipool.size=2 +accumulo.high.fipool.size=3 +accumulo.admin.fipool.size=5 + +############################ +# +# EJB Settings +# +############################ +# Number of MDBs in the pool for the modification cache, max jboss.mdb.pool.max.size. This will determine the number of concurrent calls to the mutable field cache +modification.cache.mdb.pool.size=50 +# Number of threads available for EJB3 asynchronous methods +jboss.ejb3.async.threads=10 +# Number of seconds before transactions will time out (NOTE: This should ne > query.max.call.time.minutes) +jboss.transaction.time.out=3900 +# Number of ms before the remote ejb connections will time out (NOTE: This should be > query.max.call.time.minutes) +jboss.ejb3.connector.time.out=3900000 +# Number of threads for accepting HTTP requests, defaults to 200 +jboss.web.max.threads=200 +# Number of requests to queue up for available thread. When queue is full then connection refused errors will be returned to the caller +jboss.web.accept.count=200 +# Maximum number of Stateless Session Bean instances in each pool +jboss.slsb.pool.max.size=200 +# Timeout (ms) before throwing an exception when waiting to get a Stateless Session Bean instance from the pool +jboss.slsb.pool.timeout=30000 +# Maximum number of Message Driven Bean instances in each pool +jboss.mdb.pool.max.size=200 +# Timeout (ms) before throwing an exception when waiting to get a Message Driven Bean instance from the pool +jboss.mdb.pool.timeout=30000 +# Number of threads to be used by the managed executor service (increase this if seeing RejectedExecutionExceptions) +jboss.managed.executor.service.default.max.threads=32 + +############################ +# +# HornetQ Settings +# +############################ +# HORNETQ JMS DataSource max pool size +hornetq.datasource.max.pool.size=200 +hornetq.host= +hornetq.port= + +############################ +# +# DATAWAVE Settings +# +############################ +# Transport guarantee for web apps +webapp.transport.guarantee=CONFIDENTIAL +# Tell the login module to expect client cert, and not DN stuffed in a header. +trusted.header.login=false +# web service response namespaces +datawave.webservice.namespace=http://webservice.datawave.nsa/v1 +# Name of the Cluster +cluster.name=DEV + +############################ +# +# Timely metrics reporting +# +############################ +metrics.reporter.host=localhost +metrics.reporter.port=54321 +metrics.reporter.class=datawave.metrics.TimelyMetricsReporterFactory + +############################ +# +# EventQuery +# +############################ +# Default set of filter properties (which are disabled) +event.query.filters.enabled=false +event.query.filters.classnames= +event.query.filters.options= +event.query.filters.index.classnames= + +# Default set of decorators +event.query.data.decorators= + +# Configure max results for Event Query only, -1 means unlimited +event.query.max.results=-1 + +############################ +# +# Cached Results +# +############################ +cached.results.hdfs.uri=hdfs://localhost:8020/ +cached.results.export.dir=/CachedResults +# Number of rows per batch update in CachedResults.load +cached_results.rows.per.batch=10 +# Number of days that the cached results tables should remain in the cached results store +cached_results.daysToLive=1 + +############################ +# +# LookupUUID +# +############################ +# Default uuid lookup mappings +lookup.uuid.mappings= +# Default uuidTypes +lookup.uuid.uuidTypes= +# Default lookup.uuid.beginDate +lookup.uuid.beginDate=20100101 + +############################ +# +# MapReduce Service +# +############################ +# Default restrict input formats +mapReduce.inputFormat.restrict=true +mapReduce.job.tracker=localhost:8021 +#mapreduce.http.port identifies Wildfly address and port +mapReduce.http.port=http://localhost:8443 +mapReduce.hdfs.uri=hdfs://localhost:8020/ +mapReduce.hdfs.base.dir=/datawave/MapReduceService + +bulkResults.job.tracker=localhost:8021 +bulkResults.http.port=http://localhost:8080 +bulkResults.hdfs.uri=hdfs://localhost:8020/ +bulkResults.hdfs.base.dir=/datawave/BulkResults +mapreduce.securitydomain.useJobCache=true + +# Query configuration parameter, true by default, but may cause an issue with malformed UIDs +include.hierarchy.fields=false +hierarchy.field.options= + +# BaseEventQuery (beq) thresholds +beq.baseIteratorPriority=100 +beq.eventPerDayThreshold=40000 +beq.shardsPerDayThreshold=20 +# max number of terms BEFORE all expansions (calculated based on how much the initial parser can handle before hitting a stack overflow: between 3500 and 3750) +beq.initialMaxTermThreshold=2000 +# max number of terms AFTER all expansions (calculated based on how much the initial parser can handle before hitting a stack overflow: between 3500 and 3750) +beq.finalMaxTermThreshold=2000 +# max depth of query (calculated based on how much the initial parser can handle before hitting a stack overflow: between 3500 and 3750) +beq.maxDepthThreshold=2000 +# only used in the refactored query logic: max value (regex/range) expansion and max unfielded (_ANYFIELD) expansion +beq.valueExpansionThreshold=50 +beq.unfieldedExpansionThreshold=50 +# only used in the refactored query logic: max or'ed values for a single field after which an iverator is used. FSTs are used after if greater than both. +beq.orExpansionThreshold=500 +# the maximum number of allowed ranges against a single field within an or node before combining ranges into the desired number of ivarators. +beq.orRangeThreshold=10 +# The maximum number of ranges to combine for merged range ivarators against a single field within an or node +beq.maxRangesPerRangeIvarator=5 +# The maximum number of range ivarators allowed for a single field under an or node +beq.maxOrRangeIvarators=10 +beq.orExpansionFstThreshold=750 +# only used in the legacy query logic: max ranges and max terms post expansion +beq.rangeExpansionThreshold=2000 +beq.maxTermExpansionThreshold=2000 +# The max number of splits to divide a range into for the ivarators. They are run in a pool of threads controlled by the tserver.datawave.ivarator.threads accumulo configuration property which defaults to 100 (IteratorThreadPoolManager). +beq.fieldIndexRangeSplit=16 +# The max number of sources that can be created across ivarators for one scan +beq.maxIvaratorSources=20 +# The max number of files that one ivarator can open at one time +beq.maxIvaratorOpenFiles=100 +# The max number of evaluation pipelines. They are run in a pool of threads controlled by the tserver.datawave.evaluation.threads accumulo configuration property which defaults to 100 (IteratorThreadPoolManager). +beq.evaluationPipelines=16 +# The max number of non-null evaluated results to cache on each tserver beyond the evaluation pipelines in queue +beq.pipelineCachedResults=16 +# Are full scans enabled for the base event query? +beq.fullTableScanEnabled=false + +# Threads used for various query logics +shard.query.threads=100 +index.query.threads=100 +date.index.threads=20 +edge.query.threads=16 + +# MySQL Connection settings parameters +mysql.host=localhost +mysql.dbname=sort +mysql.pool.min.size=5 +mysql.pool.max.size=20 + +extra.connection.factory.entries= + +# Web service connection pool for atom service +atom.connection.pool.name=WAREHOUSE + +# HDFS backed sorted set Settings +hdfs.site.config.urls=file:///etc/hadoop/conf/core-site.xml,file:///etc/hadoop/conf/hdfs-site.xml + +# The paths to use for the ivarators. Paths will be used in the order they are listed. +## Specify a list of beans using the 1, 2 or 3-argument constructor with the following params: +## 1) basePathURI - (Required) A string URI representing the filesystem and directory to use for the ivarators. +## The path should be fully qualified and start with either 'file:/' or 'hdfs:/'. +## 2) priority - (Optional, Default: Integer.MAX_VALUE) An integer >= 0 which can be used to give certain ivarator +## paths preference over others. For paths which share the same priority, the order will be determined at +## random. Otherwise, paths are sorted in ascending order by priority. +## 3) The third argument can be specified either as an exact number of MB (long), or as a percent (double). +## - minAvailableStorageMB - (Optional, Default: 0) A long, greater than or equal to 0, which specifies the minimum +## amount of available storage space required to persist to this ivarator path. If less than this amount is +## available, we will not use this ivarator path. +## - minAvailableStoragePercent - (Optional, Default: 0.0) A double, between 0.0 and 1.0, which specifies the +## minimum percent of available storage space required to persist to this ivarator path. If less than this +## percent is available, we will not use this ivarator path. +ivarator.cache.dir.config= \ + \ +\n \ +\n + +# By comparison, here is an example configuration which specifies two ivarator cache dirs. +# - The first cache directory is on the local disk, has a priority of 0, and has minAvailableStorageMB set to 4096. +# - The second cache directory is in hdfs, has a priority of 1, and has minAvailableStoragePercent set to 0.33 (i.e. 33%) +#ivarator.cache.dir.config= \ +# \ +#\n \ +#\n \ +#\n \ +#\n \ +#\n \ +#\n \ +#\n \ +#\n \ +#\n + +ivarator.fst.hdfs.base.uris=hdfs:///IvaratorCache +ivarator.zookeeper.hosts= + +id.translation.return.fields= + +jboss.log.dir= +jboss.data.dir= + +############################ +# +# Query Metrics Settings +# +############################ +query.metrics.ingest.policy.enforcer.class=datawave.policy.IngestPolicyEnforcer$NoOpIngestPolicyEnforcer +query.metrics.marking= +query.metrics.visibility= + +############################ +# +# Mutable Metadata Settings +# +############################ +mutable.metadata.index.only.mapping= +mutable.metadata.index.only.suffixes= +mutable.metadata.content.fields= + +metrics.warehouse.namenode= +metrics.warehouse.hadoop.path= + +cache.accumulo.username= +cache.accumulo.password= +cache.accumulo.zookeepers= +cache.accumulo.instance= + +DATAWAVE_INGEST_HOME=/opt/datawave-ingest + +EDGE_EVALUATE_PRECONDITIONS=false +EDGE_DEFINITION_FILE=config/edge-definitions.xml + +COMPOSITE_INGEST_DATA_TYPES= +DEPRECATED_INGEST_DATA_TYPES= +PASSWORD_INGEST_ENV=/opt/datawave-ingest/ingest-passwd.sh + +INCLUDE_UID_TIME_COMPONENT=false +SHARD_INDEX_CREATE_UIDS=true + +LIVE_FLAG_TIMEOUT_MS=10000 +BULK_FLAG_TIMEOUT_MS=480000 + +LIVE_FLAG_COLLECT_METRICS=false +BULK_FLAG_COLLECT_METRICS=false + +FLAG_EXTRA_ARGS= +MAP_FILE_LOADER_EXTRA_ARGS=-ingestMetricsDisabled + +JOB_OBSERVERS= +JOB_OBSERVER_EXTRA_OPTS= + +# These should be set only if deploying on the CDH distro of Accumulo, +# otherwise leave them blank +WAREHOUSE_ACCUMULO_LIB= +WAREHOUSE_ACCUMULO_BIN= + +mutableMetadata.securityMarkingExemptFields= + +#################################### +# Internal Edge Model Defaults +# +# Allows the edge query model to be dictated by the needs of the deployment environment. +# Addtionally, the default field names defined here can be overridden by the use of custom +# query models as with the event-based query logics +# +#################################### +edge.model.base.map= \ +\n \ +\n \ +\n \ +\n \ +\n \ +\n \ +\n \ +\n \ +\n \ +\n \ +\n +edge.model.keyutil.map= \ +\n \ +\n \ +\n \ +\n \ +\n +edge.model.transform.map= \ +\n \ +\n \ +\n \ +\n \ +\n \ +\n + +# Comma-separated list of auths needed for internal queries against DW's metadata table +metadatahelper.default.auths= + +# Comma-separated list of valid OU values that denote an "NPE" (server) DN. +# This should be overridden as needed in the deployment environment to reflect +# PKI validation requirements there. +security.npe.ou.entries=OVERRIDE_ME_IN_ENVIRONMENT_PROFILE_PROPERTIES + +# Regex pattern denoting a valid subject DN. This should be overridden as needed in the +# deployment environment to reflect PKI validation requirements there. +security.subject.dn.pattern=OVERRIDE_ME_IN_ENVIRONMENT_PROFILE_PROPERTIES + +############################ +# +# TypeMetadata +# +############################ +type.metadata.hdfs.uri=hdfs://localhost:8020/ +type.metadata.dir=/datawave/TypeMetadata +type.metadata.fileName=typeMetadata + +########################## +# +# UID Caching +# +########################## +SNOWFLAKE_ZOOKEEPER_ENABLED=false +SNOWFLAKE_ZOOKEEPERS= + +########################## +# +# Timely Defaults +# +########################## + +timely.host=localhost +timely.tcp.port=4242 +timely.udp.port=4245 +query.metrics.timelyMetricTags= \ +\n \ +\n USER \ +\n HOST \ +\n QUERY_ID \ +\n QUERY_LOGIC \ +\n + +########################## +# +# Extra DataWave Docs Menu Items +# +########################## + +datawave.docs.menu.extras= + +########################## +# +# Basemap Configuration for Query Geometry Map +# +# Add key-value pairings of layer name to leaflet tile layers. The first +# key-value pairing will be used as the default basemap. +# +# In order to use the example provided below, you will need to supply your +# own mapbox access token. +# +########################## +basemaps= {\ + 'Mapbox Streets': L.tileLayer( \ + 'https://api.tiles.mapbox.com/v4/{id}/{z}/{x}/{y}.png?access_token={accessToken}', \ + { \ + maxZoom: 18, \ + id: 'mapbox.streets', \ + accessToken: 'your.mapbox.access.token' \ + }), \ + 'Mapbox Satellite': L.tileLayer( \ + 'https://api.tiles.mapbox.com/v4/{id}/{z}/{x}/{y}.png?access_token={accessToken}', \ + { \ + maxZoom: 18, \ + id: 'mapbox.satellite', \ + accessToken: 'your.mapbox.access.token' \ + }) \ + } diff --git a/microservices/configcheck/src/test/resources/rendered/comparison.diff b/microservices/configcheck/src/test/resources/rendered/comparison.diff new file mode 100644 index 00000000000..9d177b29874 --- /dev/null +++ b/microservices/configcheck/src/test/resources/rendered/comparison.diff @@ -0,0 +1,617 @@ +Values (key: value) +---------------------------------------- +<<<<<<< FIRST +======= +doc.beans.AstValidator.validateFlatten: true +doc.beans.AstValidator.validateJunctions: true +doc.beans.AstValidator.validateLineage: true +doc.beans.AstValidator.validateQueryPropertyMarkers: true +doc.beans.AstValidator.validateReferenceExpressions: true +>>>>>>> SECOND +<<<<<<< FIRST +doc.beans.BaseEventQuery.accumuloPassword: "${warehouse.accumulo.password}" +doc.beans.BaseEventQuery.auditType: "ACTIVE" +doc.beans.BaseEventQuery.baseIteratorPriority: "${warehouse.defaults.baseIteratorPriority}" +======= +doc.beans.BaseEventQuery.accumuloPassword: "${accumulo.user.password}" +doc.beans.BaseEventQuery.auditType: "NONE" +doc.beans.BaseEventQuery.baseIteratorPriority: 100 +>>>>>>> SECOND +<<<<<<< FIRST +doc.beans.BaseEventQuery.checkpointable: "${warehouse.defaults.checkpointable}" +======= +>>>>>>> SECOND +doc.beans.BaseEventQuery.collapseUids: false +doc.beans.BaseEventQuery.collapseUidsThreshold: -1 +doc.beans.BaseEventQuery.collectQueryMetrics: true +<<<<<<< FIRST +======= +doc.beans.BaseEventQuery.contentFieldNames: "CONTENT" +>>>>>>> SECOND +<<<<<<< FIRST +doc.beans.BaseEventQuery.dateIndexTableName: "${warehouse.tables.dateIndex.name}" +doc.beans.BaseEventQuery.dateIndexThreads: "${warehouse.defaults.dateIndexThreads}" +======= +doc.beans.BaseEventQuery.dateIndexTableName: "${table.name.dateIndex}" +doc.beans.BaseEventQuery.dateIndexThreads: 20 +>>>>>>> SECOND +doc.beans.BaseEventQuery.defaultDateTypeName: "EVENT" +doc.beans.BaseEventQuery.disableIndexOnlyDocuments: false +<<<<<<< FIRST +======= +doc.beans.BaseEventQuery.docAggregationThresholdMs: -1 +doc.beans.BaseEventQuery.enricherClassNames: "datawave.query.enrich.DatawaveTermFrequencyEnricher" +>>>>>>> SECOND +doc.beans.BaseEventQuery.evaluationOnlyFields: "" +<<<<<<< FIRST +doc.beans.BaseEventQuery.eventPerDayThreshold: "${warehouse.defaults.eventPerDayThreshold}" +doc.beans.BaseEventQuery.finalMaxTermThreshold: "${warehouse.defaults.finalMaxTermThreshold}" +doc.beans.BaseEventQuery.fullTableScanEnabled: "${warehouse.defaults.fullTableScanEnabled}" +doc.beans.BaseEventQuery.hdfsSiteConfigURLs: "${warehouse.defaults.hdfsSiteConfigURLs}" +======= +doc.beans.BaseEventQuery.eventPerDayThreshold: 40000 +doc.beans.BaseEventQuery.finalMaxTermThreshold: 2000 +doc.beans.BaseEventQuery.fullTableScanEnabled: false +doc.beans.BaseEventQuery.hdfsSiteConfigURLs: "file:///etc/hadoop/conf/core-site.xml,file:///etc/hadoop/conf/hdfs-site.xml" +>>>>>>> SECOND +doc.beans.BaseEventQuery.includeDataTypeAsField: false +doc.beans.BaseEventQuery.includeHierarchyFields: false +<<<<<<< FIRST +doc.beans.BaseEventQuery.indexLookupThreads: "${warehouse.defaults.indexLookupThreads}" +======= +doc.beans.BaseEventQuery.indexLookupThreads: 100 +>>>>>>> SECOND +doc.beans.BaseEventQuery.indexOnlyFilterFunctionsEnabled: false +<<<<<<< FIRST +doc.beans.BaseEventQuery.indexTableName: "${warehouse.tables.index.name}" +doc.beans.BaseEventQuery.initialMaxTermThreshold: "${warehouse.defaults.initialMaxTermThreshold}" +doc.beans.BaseEventQuery.ivaratorCacheBufferSize: "${warehouse.defaults.ivaratorCacheBufferSize}" +doc.beans.BaseEventQuery.ivaratorCacheScanPersistThreshold: "${warehouse.defaults.ivaratorCacheScanPersistThreshold}" +doc.beans.BaseEventQuery.ivaratorCacheScanTimeoutMinutes: "${warehouse.defaults.ivaratorCacheScanTimeoutMinutes}" +doc.beans.BaseEventQuery.ivaratorFstHdfsBaseURIs: "${warehouse.defaults.ivaratorFstHdfsBaseURIs}" +doc.beans.BaseEventQuery.ivaratorMaxOpenFiles: "${warehouse.defaults.ivaratorMaxOpenFiles}" +======= +doc.beans.BaseEventQuery.indexTableName: "${table.name.shardIndex}" +doc.beans.BaseEventQuery.initialMaxTermThreshold: 2000 +doc.beans.BaseEventQuery.ivaratorCacheBufferSize: 10000 +doc.beans.BaseEventQuery.ivaratorCacheScanPersistThreshold: 100000 +doc.beans.BaseEventQuery.ivaratorCacheScanTimeoutMinutes: 60 +doc.beans.BaseEventQuery.ivaratorFstHdfsBaseURIs: "hdfs:///IvaratorCache" +doc.beans.BaseEventQuery.ivaratorMaxOpenFiles: 100 +>>>>>>> SECOND +<<<<<<< FIRST +======= +doc.beans.BaseEventQuery.lazySetMechanismEnabled: false +>>>>>>> SECOND +<<<<<<< FIRST +doc.beans.BaseEventQuery.logTimingDetails: true +======= +doc.beans.BaseEventQuery.logTimingDetails: false +>>>>>>> SECOND +doc.beans.BaseEventQuery.logicDescription: "Retrieve sharded events/documents, leveraging the global index tables as needed" +<<<<<<< FIRST +doc.beans.BaseEventQuery.maxConcurrentTasks: 10 +======= +>>>>>>> SECOND +<<<<<<< FIRST +doc.beans.BaseEventQuery.maxDepthThreshold: "${warehouse.defaults.maxDepthThreshold}" +doc.beans.BaseEventQuery.maxEvaluationPipelines: "${warehouse.defaults.maxEvaluationPipelines}" +doc.beans.BaseEventQuery.maxFieldIndexRangeSplit: "${warehouse.defaults.maxFieldIndexRangeSplit}" +doc.beans.BaseEventQuery.maxIndexScanTimeMillis: "${warehouse.defaults.maxIndexScanTimeMillis}" +doc.beans.BaseEventQuery.maxIvaratorSources: "${warehouse.defaults.maxIvaratorSources}" +doc.beans.BaseEventQuery.maxOrExpansionFstThreshold: "${warehouse.defaults.maxOrExpansionFstThreshold}" +doc.beans.BaseEventQuery.maxOrExpansionThreshold: "${warehouse.defaults.maxOrExpansionThreshold}" +doc.beans.BaseEventQuery.maxOrRangeThreshold: "${warehouse.defaults.maxOrRangeThreshold}" +doc.beans.BaseEventQuery.maxPipelineCachedResults: "${warehouse.defaults.maxPipelineCachedResults}" +======= +doc.beans.BaseEventQuery.maxDepthThreshold: 2000 +doc.beans.BaseEventQuery.maxEvaluationPipelines: 16 +doc.beans.BaseEventQuery.maxFieldIndexRangeSplit: 16 +doc.beans.BaseEventQuery.maxIndexScanTimeMillis: 3.1536E10 +doc.beans.BaseEventQuery.maxIvaratorSources: 20 +doc.beans.BaseEventQuery.maxOrExpansionFstThreshold: 750 +doc.beans.BaseEventQuery.maxOrExpansionThreshold: 500 +doc.beans.BaseEventQuery.maxOrRangeThreshold: 10 +doc.beans.BaseEventQuery.maxPipelineCachedResults: 16 +>>>>>>> SECOND +doc.beans.BaseEventQuery.maxResults: -1 +<<<<<<< FIRST +doc.beans.BaseEventQuery.maxUnfieldedExpansionThreshold: "${warehouse.defaults.maxUnfieldedExpansionThreshold}" +doc.beans.BaseEventQuery.maxValueExpansionThreshold: "${warehouse.defaults.maxValueExpansionThreshold}" +doc.beans.BaseEventQuery.metadataTableName: "${warehouse.tables.metadata.name}" +======= +doc.beans.BaseEventQuery.maxUnfieldedExpansionThreshold: 50 +doc.beans.BaseEventQuery.maxValueExpansionThreshold: 50 +doc.beans.BaseEventQuery.metadataTableName: "${table.name.metadata}" +>>>>>>> SECOND +doc.beans.BaseEventQuery.minimumSelectivity: 0.2 +<<<<<<< FIRST +doc.beans.BaseEventQuery.modelName: "${warehouse.defaults.modelName}" +doc.beans.BaseEventQuery.modelTableName: "${warehouse.tables.model.name}" +======= +doc.beans.BaseEventQuery.modelName: "DATAWAVE" +doc.beans.BaseEventQuery.modelTableName: "${table.name.metadata}" +>>>>>>> SECOND +<<<<<<< FIRST +======= +doc.beans.BaseEventQuery.querySyntaxParsers.JEXL: null +>>>>>>> SECOND +<<<<<<< FIRST +doc.beans.BaseEventQuery.queryThreads: "${warehouse.defaults.queryThreads}" +======= +doc.beans.BaseEventQuery.queryThreads: 100 +>>>>>>> SECOND +<<<<<<< FIRST +======= +doc.beans.BaseEventQuery.realmSuffixExclusionPatterns: "<.*>$" +>>>>>>> SECOND +<<<<<<< FIRST +doc.beans.BaseEventQuery.reverseIndexTableName: "${warehouse.tables.reverseIndex.name}" +======= +doc.beans.BaseEventQuery.reverseIndexTableName: "${table.name.shardReverseIndex}" +>>>>>>> SECOND +doc.beans.BaseEventQuery.sendTimingToStatsd: false +<<<<<<< FIRST +doc.beans.BaseEventQuery.shardsPerDayThreshold: "${warehouse.defaults.shardsPerDayThreshold}" +doc.beans.BaseEventQuery.statsdHost: "${warehouse.statsd.host}" +doc.beans.BaseEventQuery.statsdPort: "${warehouse.statsd.port}" +doc.beans.BaseEventQuery.tableName: "${warehouse.tables.shard.name}" +======= +doc.beans.BaseEventQuery.shardsPerDayThreshold: 20 +doc.beans.BaseEventQuery.statsdHost: "localhost" +doc.beans.BaseEventQuery.statsdPort: 8125 +doc.beans.BaseEventQuery.tableName: "${table.name.shard}" +>>>>>>> SECOND +<<<<<<< FIRST +======= +doc.beans.BaseEventQuery.tfAggregationThresholdMs: -1 +>>>>>>> SECOND +doc.beans.BaseEventQuery.useEnrichers: true +doc.beans.BaseEventQuery.useFilters: false +<<<<<<< FIRST +doc.beans.BaseEventQuery.zookeeperConfig: "${warehouse.accumulo.zookeepers}" +======= +doc.beans.BaseEventQuery.zookeeperConfig: "" +>>>>>>> SECOND +<<<<<<< FIRST +======= +doc.beans.BaseModelEventQuery.modelName: "DATAWAVE" +doc.beans.BaseModelEventQuery.modelTableName: "${table.name.metadata}" +>>>>>>> SECOND +doc.beans.ContentQuery.auditType: "NONE" +<<<<<<< FIRST +doc.beans.ContentQuery.checkpointable: "${warehouse.defaults.checkpointable}" +======= +>>>>>>> SECOND +doc.beans.ContentQuery.logicDescription: "Query that returns a document given the document identifier" +doc.beans.ContentQuery.maxResults: -1 +doc.beans.ContentQuery.maxWork: -1 +<<<<<<< FIRST +doc.beans.ContentQuery.tableName: "${warehouse.tables.shard.name}" +======= +doc.beans.ContentQuery.tableName: "${table.name.shard}" +>>>>>>> SECOND +<<<<<<< FIRST +doc.beans.CountQuery.checkpointable: "${warehouse.defaults.checkpointable}" +======= +>>>>>>> SECOND +doc.beans.CountQuery.logicDescription: "Retrieve event/document counts based on your search criteria" +doc.beans.DefaultQueryPlanner.compressOptionMappings: true +doc.beans.DefaultQueryPlanner[0]: 2611 +doc.beans.DefaultQueryPlanner[1]: true +doc.beans.DiscoveryQuery.allowLeadingWildcard: true +doc.beans.DiscoveryQuery.auditType: "NONE" +<<<<<<< FIRST +doc.beans.DiscoveryQuery.checkpointable: "${warehouse.defaults.checkpointable}" +======= +>>>>>>> SECOND +<<<<<<< FIRST +doc.beans.DiscoveryQuery.fullTableScanEnabled: "${warehouse.defaults.fullTableScanEnabled}" +doc.beans.DiscoveryQuery.indexTableName: "${warehouse.tables.index.name}" +======= +doc.beans.DiscoveryQuery.fullTableScanEnabled: false +doc.beans.DiscoveryQuery.indexTableName: "${table.name.shardIndex}" +>>>>>>> SECOND +doc.beans.DiscoveryQuery.logicDescription: "Discovery query that returns information from the index about the supplied term(s)" +doc.beans.DiscoveryQuery.maxResults: -1 +doc.beans.DiscoveryQuery.maxWork: -1 +<<<<<<< FIRST +doc.beans.DiscoveryQuery.metadataTableName: "${warehouse.tables.metadata.name}" +doc.beans.DiscoveryQuery.modelName: "${warehouse.defaults.modelName}" +doc.beans.DiscoveryQuery.modelTableName: "${warehouse.tables.model.name}" +doc.beans.DiscoveryQuery.reverseIndexTableName: "${warehouse.tables.reverseIndex.name}" +doc.beans.DiscoveryQuery.tableName: "${warehouse.tables.shard.name}" +======= +doc.beans.DiscoveryQuery.metadataTableName: "${table.name.metadata}" +doc.beans.DiscoveryQuery.modelName: "DATAWAVE" +doc.beans.DiscoveryQuery.modelTableName: "${table.name.metadata}" +doc.beans.DiscoveryQuery.reverseIndexTableName: "${table.name.shardReverseIndex}" +doc.beans.DiscoveryQuery.tableName: "${table.name.shardIndex}" +>>>>>>> SECOND +<<<<<<< FIRST +doc.beans.EdgeEventQuery.checkpointable: "${warehouse.defaults.checkpointable}" +======= +>>>>>>> SECOND +doc.beans.EdgeEventQuery.edgeModelName: "DATAWAVE_EDGE" +doc.beans.EdgeEventQuery.logicDescription: "Use results of an EdgeQuery to obtain events/documents that created the given edge" +<<<<<<< FIRST +doc.beans.EdgeEventQuery.modelTableName: "${warehouse.tables.model.name}" +======= +doc.beans.EdgeEventQuery.modelTableName: "${table.name.metadata}" +>>>>>>> SECOND +<<<<<<< FIRST +doc.beans.ErrorCountQuery.checkpointable: "${warehouse.defaults.checkpointable}" +======= +>>>>>>> SECOND +<<<<<<< FIRST +doc.beans.ErrorCountQuery.indexTableName: "${warehouse.errorTables.index.name}" +======= +doc.beans.ErrorCountQuery.indexTableName: "${table.name.errors.shardIndex}" +>>>>>>> SECOND +doc.beans.ErrorCountQuery.logicDescription: "Retrieve counts of errored events based on your search criteria" +<<<<<<< FIRST +doc.beans.ErrorCountQuery.metadataTableName: "${warehouse.errorTables.metadata.name}" +doc.beans.ErrorCountQuery.reverseIndexTableName: "${warehouse.errorTables.index.name}" +doc.beans.ErrorCountQuery.tableName: "${warehouse.errorTables.shard.name}" +======= +doc.beans.ErrorCountQuery.metadataTableName: "${table.name.errors.metadata}" +doc.beans.ErrorCountQuery.reverseIndexTableName: "${table.name.errors.shardReverseIndex}" +doc.beans.ErrorCountQuery.tableName: "${table.name.errors.shard}" +>>>>>>> SECOND +doc.beans.ErrorDiscoveryQuery.allowLeadingWildcard: true +doc.beans.ErrorDiscoveryQuery.auditType: "NONE" +<<<<<<< FIRST +doc.beans.ErrorDiscoveryQuery.checkpointable: "${warehouse.defaults.checkpointable}" +======= +>>>>>>> SECOND +<<<<<<< FIRST +doc.beans.ErrorDiscoveryQuery.fullTableScanEnabled: "${warehouse.defaults.fullTableScanEnabled}" +doc.beans.ErrorDiscoveryQuery.indexTableName: "${warehouse.errorTables.index.name}" +======= +doc.beans.ErrorDiscoveryQuery.fullTableScanEnabled: false +doc.beans.ErrorDiscoveryQuery.indexTableName: "${table.name.errors.shardIndex}" +>>>>>>> SECOND +doc.beans.ErrorDiscoveryQuery.logicDescription: "Discovery query that returns information from the ingest errors index about the supplied term(s)" +doc.beans.ErrorDiscoveryQuery.maxResults: -1 +doc.beans.ErrorDiscoveryQuery.maxWork: -1 +<<<<<<< FIRST +doc.beans.ErrorDiscoveryQuery.metadataTableName: "${warehouse.errorTables.metadata.name}" +doc.beans.ErrorDiscoveryQuery.modelName: "${warehouse.defaults.modelName}" +doc.beans.ErrorDiscoveryQuery.modelTableName: "${warehouse.errorTables.model.name}" +doc.beans.ErrorDiscoveryQuery.reverseIndexTableName: "${warehouse.errorTables.reverseIndex.name}" +doc.beans.ErrorDiscoveryQuery.tableName: "${warehouse.errorTables.shard.name}" +======= +doc.beans.ErrorDiscoveryQuery.metadataTableName: "${table.name.errors.metadata}" +doc.beans.ErrorDiscoveryQuery.modelName: "DATAWAVE" +doc.beans.ErrorDiscoveryQuery.modelTableName: "${table.name.metadata}" +doc.beans.ErrorDiscoveryQuery.reverseIndexTableName: "${table.name.errors.shardReverseIndex}" +doc.beans.ErrorDiscoveryQuery.tableName: "${table.name.errors.shardIndex}" +>>>>>>> SECOND +<<<<<<< FIRST +doc.beans.ErrorEventQuery.checkpointable: "${warehouse.defaults.checkpointable}" +======= +>>>>>>> SECOND +doc.beans.ErrorEventQuery.dateIndexTableName: "" +doc.beans.ErrorEventQuery.includeHierarchyFields: false +<<<<<<< FIRST +doc.beans.ErrorEventQuery.indexTableName: "${warehouse.errorTables.index.name}" +======= +doc.beans.ErrorEventQuery.indexTableName: "${table.name.errors.shardIndex}" +>>>>>>> SECOND +doc.beans.ErrorEventQuery.logicDescription: "Retrieve events/documents that encountered one or more errors during ingest" +<<<<<<< FIRST +doc.beans.ErrorEventQuery.metadataTableName: "${warehouse.errorTables.metadata.name}" +doc.beans.ErrorEventQuery.reverseIndexTableName: "${warehouse.errorTables.reverseIndex.name}" +doc.beans.ErrorEventQuery.tableName: "${warehouse.errorTables.shard.name}" +======= +doc.beans.ErrorEventQuery.metadataTableName: "${table.name.errors.metadata}" +doc.beans.ErrorEventQuery.reverseIndexTableName: "${table.name.errors.shardReverseIndex}" +doc.beans.ErrorEventQuery.tableName: "${table.name.errors.shard}" +>>>>>>> SECOND +doc.beans.ErrorFieldIndexCountQuery.auditType: "NONE" +<<<<<<< FIRST +doc.beans.ErrorFieldIndexCountQuery.checkpointable: false +======= +>>>>>>> SECOND +<<<<<<< FIRST +doc.beans.ErrorFieldIndexCountQuery.indexTableName: "${warehouse.errorTables.index.name}" +======= +doc.beans.ErrorFieldIndexCountQuery.indexTableName: "${table.name.errors.shardIndex}" +>>>>>>> SECOND +doc.beans.ErrorFieldIndexCountQuery.logicDescription: "FieldIndex count query (experimental)" +doc.beans.ErrorFieldIndexCountQuery.maxResults: -1 +doc.beans.ErrorFieldIndexCountQuery.maxUniqueValues: 20000 +doc.beans.ErrorFieldIndexCountQuery.maxWork: -1 +<<<<<<< FIRST +doc.beans.ErrorFieldIndexCountQuery.metadataTableName: "${warehouse.errorTables.metadata.name}" +======= +doc.beans.ErrorFieldIndexCountQuery.metadataTableName: "${table.name.errors.metadata}" +>>>>>>> SECOND +doc.beans.ErrorFieldIndexCountQuery.modelName: "DATAWAVE" +<<<<<<< FIRST +doc.beans.ErrorFieldIndexCountQuery.modelTableName: "${warehouse.errorTables.model.name}" +doc.beans.ErrorFieldIndexCountQuery.queryThreads: "${warehouse.defaults.queryThreads}" +doc.beans.ErrorFieldIndexCountQuery.reverseIndexTableName: "${warehouse.errorTables.reverseIndex.name}" +doc.beans.ErrorFieldIndexCountQuery.tableName: "${warehouse.errorTables.shard.name}" +======= +doc.beans.ErrorFieldIndexCountQuery.modelTableName: "${table.name.metadata}" +doc.beans.ErrorFieldIndexCountQuery.queryThreads: 100 +doc.beans.ErrorFieldIndexCountQuery.reverseIndexTableName: "${table.name.errors.shardReverseIndex}" +doc.beans.ErrorFieldIndexCountQuery.tableName: "${table.name.errors.shard}" +>>>>>>> SECOND +<<<<<<< FIRST +doc.beans.EventQuery.checkpointable: "${warehouse.defaults.checkpointable}" +======= +>>>>>>> SECOND +doc.beans.EventQuery.logicDescription: "Query the sharded event/document schema, leveraging the global index tables as needed" +doc.beans.FacetedQuery.auditType: "NONE" +<<<<<<< FIRST +doc.beans.FacetedQuery.checkpointable: "${warehouse.defaults.checkpointable}" +======= +>>>>>>> SECOND +doc.beans.FacetedQuery.facetHashTableName: "datawave.facetHashes" +doc.beans.FacetedQuery.facetMetadataTableName: "datawave.facetMetadata" +doc.beans.FacetedQuery.facetTableName: "datawave.facets" +doc.beans.FacetedQuery.facetedSearchType: "FIELD_VALUE_FACETS" +<<<<<<< FIRST +======= +doc.beans.FacetedQuery.fullTableScanEnabled: false +>>>>>>> SECOND +doc.beans.FacetedQuery.logicDescription: "Faceted search over indexed fields, returning aggregate counts for field values" +doc.beans.FacetedQuery.maximumFacetGrouping: 200 +doc.beans.FacetedQuery.minimumFacet: 1 +<<<<<<< FIRST +======= +doc.beans.FacetedQuery.querySyntaxParsers.JEXL: null +>>>>>>> SECOND +doc.beans.FacetedQuery.streaming: true +doc.beans.FieldIndexCountQuery.auditType: "NONE" +<<<<<<< FIRST +doc.beans.FieldIndexCountQuery.checkpointable: false +======= +>>>>>>> SECOND +<<<<<<< FIRST +doc.beans.FieldIndexCountQuery.indexTableName: "${warehouse.tables.index.name}" +======= +doc.beans.FieldIndexCountQuery.indexTableName: "${table.name.shardIndex}" +>>>>>>> SECOND +doc.beans.FieldIndexCountQuery.logicDescription: "Indexed Fields Only: Given FIELDNAME returns counts for each unique value. Given FIELDNAME:FIELDVALUE returns count for only that value." +doc.beans.FieldIndexCountQuery.maxResults: -1 +doc.beans.FieldIndexCountQuery.maxUniqueValues: 20000 +doc.beans.FieldIndexCountQuery.maxWork: -1 +<<<<<<< FIRST +doc.beans.FieldIndexCountQuery.metadataTableName: "${warehouse.tables.metadata.name}" +======= +doc.beans.FieldIndexCountQuery.metadataTableName: "${table.name.metadata}" +>>>>>>> SECOND +doc.beans.FieldIndexCountQuery.modelName: "DATAWAVE" +<<<<<<< FIRST +doc.beans.FieldIndexCountQuery.modelTableName: "${warehouse.tables.model.name}" +doc.beans.FieldIndexCountQuery.queryThreads: "${warehouse.defaults.queryThreads}" +doc.beans.FieldIndexCountQuery.reverseIndexTableName: "${warehouse.tables.reverseIndex.name}" +doc.beans.FieldIndexCountQuery.tableName: "${warehouse.tables.shard.name}" +======= +doc.beans.FieldIndexCountQuery.modelTableName: "${table.name.metadata}" +doc.beans.FieldIndexCountQuery.queryThreads: 100 +doc.beans.FieldIndexCountQuery.reverseIndexTableName: "${table.name.shardReverseIndex}" +doc.beans.FieldIndexCountQuery.tableName: "${table.name.shard}" +>>>>>>> SECOND +<<<<<<< FIRST +doc.beans.HitHighlights.accumuloPassword: "${warehouse.accumulo.password}" +======= +>>>>>>> SECOND +doc.beans.HitHighlights.auditType: "NONE" +<<<<<<< FIRST +doc.beans.HitHighlights.checkpointable: "${warehouse.defaults.checkpointable}" +doc.beans.HitHighlights.dateIndexTableName: "${warehouse.tables.dateIndex.name}" +doc.beans.HitHighlights.defaultDateTypeName: "EVENT" +======= +>>>>>>> SECOND +doc.beans.HitHighlights.eventPerDayThreshold: 40000 +<<<<<<< FIRST +doc.beans.HitHighlights.finalMaxTermThreshold: "${warehouse.defaults.finalMaxTermThreshold}" +doc.beans.HitHighlights.fullTableScanEnabled: "${warehouse.defaults.fullTableScanEnabled}" +doc.beans.HitHighlights.hdfsSiteConfigURLs: "${warehouse.defaults.hdfsSiteConfigURLs}" +======= +doc.beans.HitHighlights.finalMaxTermThreshold: 2000 +doc.beans.HitHighlights.fullTableScanEnabled: false +doc.beans.HitHighlights.hdfsSiteConfigURLs: "file:///etc/hadoop/conf/core-site.xml,file:///etc/hadoop/conf/hdfs-site.xml" +>>>>>>> SECOND +doc.beans.HitHighlights.includeDataTypeAsField: false +doc.beans.HitHighlights.includeGroupingContext: false +<<<<<<< FIRST +doc.beans.HitHighlights.indexTableName: "${warehouse.tables.index.name}" +doc.beans.HitHighlights.initialMaxTermThreshold: "${warehouse.defaults.initialMaxTermThreshold}" +======= +doc.beans.HitHighlights.indexTableName: "${table.name.shardIndex}" +doc.beans.HitHighlights.initialMaxTermThreshold: 2000 +>>>>>>> SECOND +doc.beans.HitHighlights.ivaratorCacheBufferSize: 10000 +doc.beans.HitHighlights.ivaratorCacheScanPersistThreshold: 100000 +<<<<<<< FIRST +doc.beans.HitHighlights.ivaratorCacheScanTimeoutMinutes: "${warehouse.defaults.ivaratorCacheScanTimeoutMinutes}" +doc.beans.HitHighlights.ivaratorFstHdfsBaseURIs: "${warehouse.defaults.ivaratorFstHdfsBaseURIs}" +doc.beans.HitHighlights.ivaratorMaxOpenFiles: "${warehouse.defaults.ivaratorMaxOpenFiles}" +======= +doc.beans.HitHighlights.ivaratorCacheScanTimeoutMinutes: 60 +doc.beans.HitHighlights.ivaratorFstHdfsBaseURIs: "hdfs:///IvaratorCache" +doc.beans.HitHighlights.ivaratorMaxOpenFiles: 100 +>>>>>>> SECOND +doc.beans.HitHighlights.logicDescription: "Fast boolean query over indexed fields, only returning fields queried on" +<<<<<<< FIRST +doc.beans.HitHighlights.maxDepthThreshold: "${warehouse.defaults.maxDepthThreshold}" +doc.beans.HitHighlights.maxEvaluationPipelines: "${warehouse.defaults.maxEvaluationPipelines}" +doc.beans.HitHighlights.maxFieldIndexRangeSplit: "${warehouse.defaults.maxFieldIndexRangeSplit}" +doc.beans.HitHighlights.maxOrExpansionFstThreshold: "${warehouse.defaults.maxOrExpansionFstThreshold}" +doc.beans.HitHighlights.maxOrExpansionThreshold: "${warehouse.defaults.maxOrExpansionThreshold}" +doc.beans.HitHighlights.maxOrRangeIvarators: "${warehouse.defaults.maxOrRangeIvarators}" +doc.beans.HitHighlights.maxOrRangeThreshold: "${warehouse.defaults.maxOrRangeThreshold}" +doc.beans.HitHighlights.maxPipelineCachedResults: "${warehouse.defaults.maxPipelineCachedResults}" +doc.beans.HitHighlights.maxRangesPerRangeIvarator: "${warehouse.defaults.maxRangesPerRangeIvarator}" +doc.beans.HitHighlights.maxUnfieldedExpansionThreshold: "${warehouse.defaults.maxUnfieldedExpansionThreshold}" +doc.beans.HitHighlights.maxValueExpansionThreshold: "${warehouse.defaults.maxValueExpansionThreshold}" +doc.beans.HitHighlights.metadataTableName: "${warehouse.tables.metadata.name}" +======= +doc.beans.HitHighlights.maxDepthThreshold: 2000 +doc.beans.HitHighlights.maxEvaluationPipelines: 16 +doc.beans.HitHighlights.maxFieldIndexRangeSplit: 16 +doc.beans.HitHighlights.maxOrExpansionFstThreshold: 750 +doc.beans.HitHighlights.maxOrExpansionThreshold: 500 +doc.beans.HitHighlights.maxOrRangeIvarators: 10 +doc.beans.HitHighlights.maxOrRangeThreshold: 10 +doc.beans.HitHighlights.maxPipelineCachedResults: 16 +doc.beans.HitHighlights.maxRangesPerRangeIvarator: 5 +doc.beans.HitHighlights.maxUnfieldedExpansionThreshold: 50 +doc.beans.HitHighlights.maxValueExpansionThreshold: 50 +doc.beans.HitHighlights.metadataTableName: "${table.name.metadata}" +>>>>>>> SECOND +doc.beans.HitHighlights.minimumSelectivity: 0.2 +<<<<<<< FIRST +======= +doc.beans.HitHighlights.querySyntaxParsers.JEXL: null +>>>>>>> SECOND +<<<<<<< FIRST +doc.beans.HitHighlights.queryThreads: "${warehouse.defaults.indexLookupThreads}" +doc.beans.HitHighlights.reverseIndexTableName: "${warehouse.tables.reverseIndex.name}" +doc.beans.HitHighlights.shardsPerDayThreshold: "${warehouse.defaults.shardsPerDayThreshold}" +doc.beans.HitHighlights.tableName: "${warehouse.tables.shard.name}" +======= +doc.beans.HitHighlights.queryThreads: 100 +doc.beans.HitHighlights.reverseIndexTableName: "${table.name.shardReverseIndex}" +doc.beans.HitHighlights.shardsPerDayThreshold: 20 +doc.beans.HitHighlights.tableName: "${table.name.shard}" +>>>>>>> SECOND +doc.beans.HitHighlights.useEnrichers: false +<<<<<<< FIRST +doc.beans.HitHighlights.zookeeperConfig: "${warehouse.accumulo.zookeepers}" +======= +doc.beans.HitHighlights.zookeeperConfig: "" +>>>>>>> SECOND +<<<<<<< FIRST +======= +doc.beans.IdTranslatorConfiguration.beginDate: 20100101 +doc.beans.IdTranslatorConfiguration.columnVisibility: "" +>>>>>>> SECOND +doc.beans.IndexStatsQuery.auditType: "NONE" +doc.beans.IndexStatsQuery.selectorExtractor: null +<<<<<<< FIRST +doc.beans.InternalQueryMetricsQuery.collectQueryMetrics: false +======= +doc.beans.LookupUUIDConfiguration.beginDate: 20100101 +doc.beans.LookupUUIDConfiguration.columnVisibility: "" +>>>>>>> SECOND +doc.beans.LuceneUUIDEventQuery.auditType: "NONE" +<<<<<<< FIRST +doc.beans.LuceneUUIDEventQuery.checkpointable: "${warehouse.defaults.checkpointable}" +======= +>>>>>>> SECOND +doc.beans.LuceneUUIDEventQuery.logicDescription: "Composite query logic that retrieves records from the event and error tables, based on known UUID fields, ie, those configured via UUIDTypeList in QueryLogicFactory.xml" +doc.beans.LuceneUUIDEventQuery.queryLogics.ErrorEventQuery.auditType: "NONE" +doc.beans.LuceneUUIDEventQuery.queryLogics.ErrorEventQuery.connPoolName: "UUID" +<<<<<<< FIRST +doc.beans.LuceneUUIDEventQuery.queryLogics.ErrorEventQuery.dateIndexTableName: "${warehouse.errorTables.dateIndex.name}" +doc.beans.LuceneUUIDEventQuery.queryLogics.ErrorEventQuery.indexTableName: "${warehouse.errorTables.index.name}" +======= +doc.beans.LuceneUUIDEventQuery.queryLogics.ErrorEventQuery.dateIndexTableName: "" +doc.beans.LuceneUUIDEventQuery.queryLogics.ErrorEventQuery.indexTableName: "${table.name.errors.shardIndex}" +>>>>>>> SECOND +doc.beans.LuceneUUIDEventQuery.queryLogics.ErrorEventQuery.logicDescription: "Lucene query for event/document UUIDs for events that encountered errors at ingest time" +<<<<<<< FIRST +======= +doc.beans.LuceneUUIDEventQuery.queryLogics.ErrorEventQuery.mandatoryQuerySyntax: "LUCENE-UUID" +>>>>>>> SECOND +<<<<<<< FIRST +doc.beans.LuceneUUIDEventQuery.queryLogics.ErrorEventQuery.metadataTableName: "${warehouse.errorTables.metadata.name}" +doc.beans.LuceneUUIDEventQuery.queryLogics.ErrorEventQuery.reverseIndexTableName: "${warehouse.errorTables.reverseIndex.name}" +doc.beans.LuceneUUIDEventQuery.queryLogics.ErrorEventQuery.tableName: "${warehouse.errorTables.shard.name}" +======= +doc.beans.LuceneUUIDEventQuery.queryLogics.ErrorEventQuery.metadataTableName: "${table.name.errors.metadata}" +doc.beans.LuceneUUIDEventQuery.queryLogics.ErrorEventQuery.reverseIndexTableName: "${table.name.errors.shardReverseIndex}" +doc.beans.LuceneUUIDEventQuery.queryLogics.ErrorEventQuery.tableName: "${table.name.errors.shard}" +>>>>>>> SECOND +doc.beans.LuceneUUIDEventQuery.queryLogics.EventQuery.auditType: "NONE" +doc.beans.LuceneUUIDEventQuery.queryLogics.EventQuery.connPoolName: "UUID" +doc.beans.LuceneUUIDEventQuery.queryLogics.EventQuery.logicDescription: "Lucene query for event/document UUIDs" +<<<<<<< FIRST +======= +doc.beans.LuceneUUIDEventQuery.queryLogics.EventQuery.mandatoryQuerySyntax: "LUCENE-UUID" +>>>>>>> SECOND +doc.beans.LuceneUUIDEventQuery.selectorExtractor: null +<<<<<<< FIRST +======= +doc.beans.MyRemoteUserOps.queryServiceHost: "localhost" +doc.beans.MyRemoteUserOps.queryServicePort: 8443 +doc.beans.MyRemoteUserOps.queryServiceScheme: "https" +doc.beans.MyRemoteUserOps.queryServiceURI: "/DataWave/Security/User/" +doc.beans.QueryLogicFactoryConfiguration.maxPageSize: 10000 +doc.beans.QueryLogicFactoryConfiguration.pageByteTrigger: 0 +>>>>>>> SECOND +doc.beans.QueryMetricsQuery.auditType: "NONE" +<<<<<<< FIRST +doc.beans.QueryMetricsQuery.checkpointable: "${warehouse.defaults.checkpointable}" +doc.beans.QueryMetricsQuery.collectQueryMetrics: true +======= +>>>>>>> SECOND +<<<<<<< FIRST +doc.beans.QueryMetricsQuery.dateIndexTableName: "${warehouse.metricTables.dateIndex.name}" +======= +doc.beans.QueryMetricsQuery.dateIndexTableName: "${table.name.queryMetrics.dateIndex}" +>>>>>>> SECOND +doc.beans.QueryMetricsQuery.includeHierarchyFields: false +<<<<<<< FIRST +doc.beans.QueryMetricsQuery.indexTableName: "${warehouse.metricTables.index.name}" +======= +doc.beans.QueryMetricsQuery.indexTableName: "${table.name.queryMetrics.shardIndex}" +>>>>>>> SECOND +doc.beans.QueryMetricsQuery.logicDescription: "Retrieve query metrics based on the given search term(s)" +<<<<<<< FIRST +doc.beans.QueryMetricsQuery.metadataTableName: "${warehouse.metricTables.metadata.name}" +======= +doc.beans.QueryMetricsQuery.metadataTableName: "${table.name.queryMetrics.metadata}" +>>>>>>> SECOND +doc.beans.QueryMetricsQuery.modelName: "NONE" +<<<<<<< FIRST +doc.beans.QueryMetricsQuery.modelTableName: "${warehouse.metricTables.model.name}" +doc.beans.QueryMetricsQuery.reverseIndexTableName: "${warehouse.metricTables.reverseIndex.name}" +doc.beans.QueryMetricsQuery.tableName: "${warehouse.metricTables.shard.name}" +======= +doc.beans.QueryMetricsQuery.modelTableName: "${table.name.queryMetrics.metadata}" +doc.beans.QueryMetricsQuery.reverseIndexTableName: "${table.name.queryMetrics.shardReverseIndex}" +doc.beans.QueryMetricsQuery.tableName: "${table.name.queryMetrics.shard}" +>>>>>>> SECOND +<<<<<<< FIRST +======= +doc.beans.RemoteEventQuery.auditType: "NONE" +doc.beans.RemoteEventQuery.checkpointable: false +doc.beans.RemoteEventQuery.logicDescription: "Retrieve sharded events/documents, leveraging the global index tables as needed" +doc.beans.RemoteEventQuery.maxResults: -1 +doc.beans.RemoteEventQuery.remoteQueryLogic: "EventQuery" +doc.beans.RemoteEventQuery.tableName: "${table.name.shard}" +doc.beans.RemoteQueryService.queryServiceHost: "query" +doc.beans.RemoteQueryService.queryServicePort: 8443 +doc.beans.RemoteQueryService.queryServiceScheme: "https" +doc.beans.RemoteQueryService.queryServiceURI: "/query/v1/" +doc.beans.TLDEventQuery.collapseUids: false +doc.beans.TLDEventQuery.indexFilteringClassNames: "datawave.query.function.NormalizedVersionPredicate" +>>>>>>> SECOND +doc.beans.TermFrequencyQuery.auditType: "NONE" +doc.beans.TermFrequencyQuery.logicDescription: "Query that returns data from the term frequency query table" +doc.beans.TermFrequencyQuery.maxResults: -1 +<<<<<<< FIRST +doc.beans.TermFrequencyQuery.maxWork: -14 +doc.beans.TermFrequencyQuery.tableName: "${warehouse.tables.shard.name}" +======= +doc.beans.TermFrequencyQuery.maxWork: -1 +doc.beans.TermFrequencyQuery.tableName: "${table.name.shard}" +>>>>>>> SECOND +<<<<<<< FIRST +======= +doc.beans.TimedVisitorManager.debugEnabled: false +doc.beans.TimedVisitorManager.validateAst: false +>>>>>>> SECOND +doc.beans.TokenizedLuceneToJexlQueryParser.tokenizeUnfieldedQueries: true +<<<<<<< FIRST +======= +doc.beans.skipTokenizeFields: "DOMETA" +doc.beans.tokenizeFields: "CONTENT" +>>>>>>> SECOND \ No newline at end of file diff --git a/microservices/configcheck/src/test/resources/rendered/fullComparison.diff b/microservices/configcheck/src/test/resources/rendered/fullComparison.diff new file mode 100644 index 00000000000..abc92c102d2 --- /dev/null +++ b/microservices/configcheck/src/test/resources/rendered/fullComparison.diff @@ -0,0 +1,1358 @@ +Placeholders (key: ${placeholder}) +---------------------------------------- +<<<<<<< FIRST +doc.beans.BaseEventQuery.accumuloPassword: ${datawave.query.logic.logics.BaseEventQuery.accumuloPassword} +======= +doc.beans.BaseEventQuery.accumuloPassword: ${accumulo.user.password} +>>>>>>> SECOND +<<<<<<< FIRST +doc.beans.BaseEventQuery.auditType: ${datawave.query.logic.logics.BaseEventQuery.auditType} +======= +>>>>>>> SECOND +<<<<<<< FIRST +doc.beans.BaseEventQuery.baseIteratorPriority: ${datawave.query.logic.logics.BaseEventQuery.baseIteratorPriority} +======= +doc.beans.BaseEventQuery.baseIteratorPriority: ${beq.baseIteratorPriority} +>>>>>>> SECOND +<<<<<<< FIRST +doc.beans.BaseEventQuery.checkpointable: ${datawave.query.logic.logics.BaseEventQuery.checkpointable} +======= +>>>>>>> SECOND +<<<<<<< FIRST +doc.beans.BaseEventQuery.collapseUids: ${datawave.query.logic.logics.BaseEventQuery.collapseUids} +doc.beans.BaseEventQuery.collapseUidsThreshold: ${datawave.query.logic.logics.BaseEventQuery.collapseUidsThreshold} +======= +doc.beans.BaseEventQuery.collapseUids: ${query.collapse.uids} +doc.beans.BaseEventQuery.collapseUidsThreshold: ${query.collapse.uids.threshold} +>>>>>>> SECOND +<<<<<<< FIRST +doc.beans.BaseEventQuery.collectQueryMetrics: ${datawave.query.logic.logics.BaseEventQuery.collectQueryMetrics} +======= +>>>>>>> SECOND +<<<<<<< FIRST +doc.beans.BaseEventQuery.dateIndexTableName: ${datawave.query.logic.logics.BaseEventQuery.dateIndexTableName} +doc.beans.BaseEventQuery.dateIndexThreads: ${datawave.query.logic.logics.BaseEventQuery.dateIndexThreads} +doc.beans.BaseEventQuery.defaultDateTypeName: ${datawave.query.logic.logics.BaseEventQuery.defaultDateTypeName} +doc.beans.BaseEventQuery.disableIndexOnlyDocuments: ${datawave.query.logic.logics.BaseEventQuery.disableIndexOnlyDocuments} +doc.beans.BaseEventQuery.evaluationOnlyFields: ${datawave.query.logic.logics.BaseEventQuery.evaluationOnlyFields} +======= +doc.beans.BaseEventQuery.dateIndexTableName: ${date.index.table.name} +doc.beans.BaseEventQuery.dateIndexThreads: ${date.index.threads} +doc.beans.BaseEventQuery.defaultDateTypeName: ${default.date.type.name} +doc.beans.BaseEventQuery.disableIndexOnlyDocuments: ${disable.index.only.documents} +doc.beans.BaseEventQuery.evaluationOnlyFields: ${evaluation.only.fields} +>>>>>>> SECOND +<<<<<<< FIRST +doc.beans.BaseEventQuery.eventPerDayThreshold: ${datawave.query.logic.logics.BaseEventQuery.eventPerDayThreshold} +======= +>>>>>>> SECOND +<<<<<<< FIRST +doc.beans.BaseEventQuery.finalMaxTermThreshold: ${datawave.query.logic.logics.BaseEventQuery.finalMaxTermThreshold} +doc.beans.BaseEventQuery.fullTableScanEnabled: ${datawave.query.logic.logics.BaseEventQuery.fullTableScanEnabled} +doc.beans.BaseEventQuery.hdfsSiteConfigURLs: ${datawave.query.logic.logics.BaseEventQuery.hdfsSiteConfigURLs} +======= +doc.beans.BaseEventQuery.finalMaxTermThreshold: ${beq.finalMaxTermThreshold} +doc.beans.BaseEventQuery.fullTableScanEnabled: ${beq.fullTableScanEnabled} +doc.beans.BaseEventQuery.hdfsSiteConfigURLs: ${hdfs.site.config.urls} +>>>>>>> SECOND +<<<<<<< FIRST +doc.beans.BaseEventQuery.includeDataTypeAsField: ${datawave.query.logic.logics.BaseEventQuery.includeDataTypeAsField} +======= +>>>>>>> SECOND +<<<<<<< FIRST +doc.beans.BaseEventQuery.includeHierarchyFields: ${datawave.query.logic.logics.BaseEventQuery.includeHierarchyFields} +doc.beans.BaseEventQuery.indexLookupThreads: ${datawave.query.logic.logics.BaseEventQuery.indexLookupThreads} +doc.beans.BaseEventQuery.indexOnlyFilterFunctionsEnabled: ${datawave.query.logic.logics.BaseEventQuery.indexOnlyFilterFunctionsEnabled} +doc.beans.BaseEventQuery.indexTableName: ${datawave.query.logic.logics.BaseEventQuery.indexTableName} +doc.beans.BaseEventQuery.initialMaxTermThreshold: ${datawave.query.logic.logics.BaseEventQuery.initialMaxTermThreshold} +======= +doc.beans.BaseEventQuery.includeHierarchyFields: ${include.hierarchy.fields} +doc.beans.BaseEventQuery.indexLookupThreads: ${index.query.threads} +doc.beans.BaseEventQuery.indexOnlyFilterFunctionsEnabled: ${enable.index.only.filter.functions} +doc.beans.BaseEventQuery.indexTableName: ${index.table.name} +doc.beans.BaseEventQuery.initialMaxTermThreshold: ${beq.initialMaxTermThreshold} +>>>>>>> SECOND +<<<<<<< FIRST +doc.beans.BaseEventQuery.ivaratorCacheBufferSize: ${datawave.query.logic.logics.BaseEventQuery.ivaratorCacheBufferSize} +doc.beans.BaseEventQuery.ivaratorCacheScanPersistThreshold: ${datawave.query.logic.logics.BaseEventQuery.ivaratorCacheScanPersistThreshold} +======= +>>>>>>> SECOND +<<<<<<< FIRST +doc.beans.BaseEventQuery.ivaratorCacheScanTimeoutMinutes: ${datawave.query.logic.logics.BaseEventQuery.ivaratorCacheScanTimeoutMinutes} +doc.beans.BaseEventQuery.ivaratorFstHdfsBaseURIs: ${datawave.query.logic.logics.BaseEventQuery.ivaratorFstHdfsBaseURIs} +doc.beans.BaseEventQuery.ivaratorMaxOpenFiles: ${datawave.query.logic.logics.BaseEventQuery.ivaratorMaxOpenFiles} +======= +doc.beans.BaseEventQuery.ivaratorCacheScanTimeoutMinutes: ${query.max.call.time.minutes} +doc.beans.BaseEventQuery.ivaratorFstHdfsBaseURIs: ${ivarator.fst.hdfs.base.uris} +doc.beans.BaseEventQuery.ivaratorMaxOpenFiles: ${beq.maxIvaratorOpenFiles} +>>>>>>> SECOND +<<<<<<< FIRST +doc.beans.BaseEventQuery.logTimingDetails: ${datawave.query.logic.logics.BaseEventQuery.logTimingDetails} +doc.beans.BaseEventQuery.logicDescription: ${datawave.query.logic.logics.BaseEventQuery.logicDescription} +doc.beans.BaseEventQuery.maxConcurrentTasks: ${datawave.query.logic.logics.BaseEventQuery.maxConcurrentTasks} +======= +>>>>>>> SECOND +<<<<<<< FIRST +doc.beans.BaseEventQuery.maxDepthThreshold: ${datawave.query.logic.logics.BaseEventQuery.maxDepthThreshold} +doc.beans.BaseEventQuery.maxEvaluationPipelines: ${datawave.query.logic.logics.BaseEventQuery.maxEvaluationPipelines} +doc.beans.BaseEventQuery.maxFieldIndexRangeSplit: ${datawave.query.logic.logics.BaseEventQuery.maxFieldIndexRangeSplit} +doc.beans.BaseEventQuery.maxIndexScanTimeMillis: ${datawave.query.logic.logics.BaseEventQuery.maxIndexScanTimeMillis} +doc.beans.BaseEventQuery.maxIvaratorSources: ${datawave.query.logic.logics.BaseEventQuery.maxIvaratorSources} +doc.beans.BaseEventQuery.maxOrExpansionFstThreshold: ${datawave.query.logic.logics.BaseEventQuery.maxOrExpansionFstThreshold} +doc.beans.BaseEventQuery.maxOrExpansionThreshold: ${datawave.query.logic.logics.BaseEventQuery.maxOrExpansionThreshold} +doc.beans.BaseEventQuery.maxOrRangeThreshold: ${datawave.query.logic.logics.BaseEventQuery.maxOrRangeThreshold} +doc.beans.BaseEventQuery.maxPipelineCachedResults: ${datawave.query.logic.logics.BaseEventQuery.maxPipelineCachedResults} +doc.beans.BaseEventQuery.maxResults: ${datawave.query.logic.logics.BaseEventQuery.maxResults} +doc.beans.BaseEventQuery.maxUnfieldedExpansionThreshold: ${datawave.query.logic.logics.BaseEventQuery.maxUnfieldedExpansionThreshold} +doc.beans.BaseEventQuery.maxValueExpansionThreshold: ${datawave.query.logic.logics.BaseEventQuery.maxValueExpansionThreshold} +doc.beans.BaseEventQuery.metadataTableName: ${datawave.query.logic.logics.BaseEventQuery.metadataTableName} +======= +doc.beans.BaseEventQuery.maxDepthThreshold: ${beq.maxDepthThreshold} +doc.beans.BaseEventQuery.maxEvaluationPipelines: ${beq.evaluationPipelines} +doc.beans.BaseEventQuery.maxFieldIndexRangeSplit: ${beq.fieldIndexRangeSplit} +doc.beans.BaseEventQuery.maxIndexScanTimeMillis: ${query.max.index.scan.ms} +doc.beans.BaseEventQuery.maxIvaratorSources: ${beq.maxIvaratorSources} +doc.beans.BaseEventQuery.maxOrExpansionFstThreshold: ${beq.orExpansionFstThreshold} +doc.beans.BaseEventQuery.maxOrExpansionThreshold: ${beq.orExpansionThreshold} +doc.beans.BaseEventQuery.maxOrRangeThreshold: ${beq.orRangeThreshold} +doc.beans.BaseEventQuery.maxPipelineCachedResults: ${beq.pipelineCachedResults} +doc.beans.BaseEventQuery.maxResults: ${event.query.max.results} +doc.beans.BaseEventQuery.maxUnfieldedExpansionThreshold: ${beq.unfieldedExpansionThreshold} +doc.beans.BaseEventQuery.maxValueExpansionThreshold: ${beq.valueExpansionThreshold} +doc.beans.BaseEventQuery.metadataTableName: ${metadata.table.name} +>>>>>>> SECOND +<<<<<<< FIRST +doc.beans.BaseEventQuery.minimumSelectivity: ${datawave.query.logic.logics.BaseEventQuery.minimumSelectivity} +doc.beans.BaseEventQuery.modelName: ${datawave.query.logic.logics.BaseEventQuery.modelName} +======= +>>>>>>> SECOND +<<<<<<< FIRST +doc.beans.BaseEventQuery.modelTableName: ${datawave.query.logic.logics.BaseEventQuery.modelTableName} +doc.beans.BaseEventQuery.queryThreads: ${datawave.query.logic.logics.BaseEventQuery.queryThreads} +doc.beans.BaseEventQuery.reverseIndexTableName: ${datawave.query.logic.logics.BaseEventQuery.reverseIndexTableName} +======= +doc.beans.BaseEventQuery.modelTableName: ${metadata.table.name} +doc.beans.BaseEventQuery.queryThreads: ${shard.query.threads} +doc.beans.BaseEventQuery.reverseIndexTableName: ${rindex.table.name} +>>>>>>> SECOND +<<<<<<< FIRST +doc.beans.BaseEventQuery.sendTimingToStatsd: ${datawave.query.logic.logics.BaseEventQuery.sendTimingToStatsd} +======= +>>>>>>> SECOND +<<<<<<< FIRST +doc.beans.BaseEventQuery.shardsPerDayThreshold: ${datawave.query.logic.logics.BaseEventQuery.shardsPerDayThreshold} +======= +doc.beans.BaseEventQuery.shardsPerDayThreshold: ${beq.shardsPerDayThreshold} +>>>>>>> SECOND +<<<<<<< FIRST +doc.beans.BaseEventQuery.statsdHost: ${datawave.query.logic.logics.BaseEventQuery.statsdHost} +doc.beans.BaseEventQuery.statsdPort: ${datawave.query.logic.logics.BaseEventQuery.statsdPort} +======= +>>>>>>> SECOND +<<<<<<< FIRST +doc.beans.BaseEventQuery.tableName: ${datawave.query.logic.logics.BaseEventQuery.tableName} +======= +doc.beans.BaseEventQuery.tableName: ${shard.table.name} +>>>>>>> SECOND +<<<<<<< FIRST +doc.beans.BaseEventQuery.useEnrichers: ${datawave.query.logic.logics.BaseEventQuery.useEnrichers} +======= +>>>>>>> SECOND +<<<<<<< FIRST +doc.beans.BaseEventQuery.useFilters: ${datawave.query.logic.logics.BaseEventQuery.useFilters} +doc.beans.BaseEventQuery.zookeeperConfig: ${datawave.query.logic.logics.BaseEventQuery.zookeeperConfig} +======= +doc.beans.BaseEventQuery.useFilters: ${event.query.filters.enabled} +doc.beans.BaseEventQuery.zookeeperConfig: ${ivarator.zookeeper.hosts} +>>>>>>> SECOND +<<<<<<< FIRST +doc.beans.ContentQuery.auditType: ${datawave.query.logic.logics.ContentQuery.auditType} +doc.beans.ContentQuery.checkpointable: ${datawave.query.logic.logics.ContentQuery.checkpointable} +doc.beans.ContentQuery.logicDescription: ${datawave.query.logic.logics.ContentQuery.logicDescription} +doc.beans.ContentQuery.maxResults: ${datawave.query.logic.logics.ContentQuery.maxResults} +doc.beans.ContentQuery.maxWork: ${datawave.query.logic.logics.ContentQuery.maxWork} +======= +doc.beans.BaseModelEventQuery.modelTableName: ${metadata.table.name} +>>>>>>> SECOND +<<<<<<< FIRST +doc.beans.ContentQuery.tableName: ${datawave.query.logic.logics.ContentQuery.tableName} +======= +doc.beans.ContentQuery.tableName: ${shard.table.name} +>>>>>>> SECOND +<<<<<<< FIRST +doc.beans.CountQuery.checkpointable: ${datawave.query.logic.logics.CountQuery.checkpointable} +doc.beans.CountQuery.logicDescription: ${datawave.query.logic.logics.CountQuery.logicDescription} +doc.beans.DiscoveryQuery.allowLeadingWildcard: ${datawave.query.logic.logics.DiscoveryQuery.allowLeadingWildcard} +doc.beans.DiscoveryQuery.auditType: ${datawave.query.logic.logics.DiscoveryQuery.auditType} +doc.beans.DiscoveryQuery.checkpointable: ${datawave.query.logic.logics.DiscoveryQuery.checkpointable} +doc.beans.DiscoveryQuery.fullTableScanEnabled: ${datawave.query.logic.logics.DiscoveryQuery.fullTableScanEnabled} +======= +>>>>>>> SECOND +<<<<<<< FIRST +doc.beans.DiscoveryQuery.indexTableName: ${datawave.query.logic.logics.DiscoveryQuery.indexTableName} +======= +doc.beans.DiscoveryQuery.indexTableName: ${index.table.name} +>>>>>>> SECOND +<<<<<<< FIRST +doc.beans.DiscoveryQuery.logicDescription: ${datawave.query.logic.logics.DiscoveryQuery.logicDescription} +doc.beans.DiscoveryQuery.maxResults: ${datawave.query.logic.logics.DiscoveryQuery.maxResults} +doc.beans.DiscoveryQuery.maxWork: ${datawave.query.logic.logics.DiscoveryQuery.maxWork} +======= +>>>>>>> SECOND +<<<<<<< FIRST +doc.beans.DiscoveryQuery.metadataTableName: ${datawave.query.logic.logics.DiscoveryQuery.metadataTableName} +======= +doc.beans.DiscoveryQuery.metadataTableName: ${metadata.table.name} +>>>>>>> SECOND +<<<<<<< FIRST +doc.beans.DiscoveryQuery.modelName: ${datawave.query.logic.logics.DiscoveryQuery.modelName} +======= +>>>>>>> SECOND +<<<<<<< FIRST +doc.beans.DiscoveryQuery.modelTableName: ${datawave.query.logic.logics.DiscoveryQuery.modelTableName} +doc.beans.DiscoveryQuery.reverseIndexTableName: ${datawave.query.logic.logics.DiscoveryQuery.reverseIndexTableName} +doc.beans.DiscoveryQuery.tableName: ${datawave.query.logic.logics.DiscoveryQuery.tableName} +======= +doc.beans.DiscoveryQuery.modelTableName: ${metadata.table.name} +doc.beans.DiscoveryQuery.reverseIndexTableName: ${rindex.table.name} +doc.beans.DiscoveryQuery.tableName: ${index.table.name} +>>>>>>> SECOND +<<<<<<< FIRST +doc.beans.EdgeEventQuery.checkpointable: ${datawave.query.logic.logics.EdgeEventQuery.checkpointable} +doc.beans.EdgeEventQuery.edgeModelName: ${datawave.query.logic.logics.EdgeEventQuery.edgeModelName} +doc.beans.EdgeEventQuery.logicDescription: ${datawave.query.logic.logics.EdgeEventQuery.logicDescription} +======= +>>>>>>> SECOND +<<<<<<< FIRST +doc.beans.EdgeEventQuery.modelTableName: ${datawave.query.logic.logics.EdgeEventQuery.modelTableName} +======= +doc.beans.EdgeEventQuery.modelTableName: ${metadata.table.name} +>>>>>>> SECOND +<<<<<<< FIRST +doc.beans.ErrorCountQuery.checkpointable: ${datawave.query.logic.logics.ErrorCountQuery.checkpointable} +======= +>>>>>>> SECOND +<<<<<<< FIRST +doc.beans.ErrorCountQuery.indexTableName: ${datawave.query.logic.logics.ErrorCountQuery.indexTableName} +======= +doc.beans.ErrorCountQuery.indexTableName: ${error.index.table.name} +>>>>>>> SECOND +<<<<<<< FIRST +doc.beans.ErrorCountQuery.logicDescription: ${datawave.query.logic.logics.ErrorCountQuery.logicDescription} +======= +>>>>>>> SECOND +<<<<<<< FIRST +doc.beans.ErrorCountQuery.metadataTableName: ${datawave.query.logic.logics.ErrorCountQuery.metadataTableName} +doc.beans.ErrorCountQuery.reverseIndexTableName: ${datawave.query.logic.logics.ErrorCountQuery.indexTableName} +doc.beans.ErrorCountQuery.tableName: ${datawave.query.logic.logics.ErrorCountQuery.tableName} +======= +doc.beans.ErrorCountQuery.metadataTableName: ${error.metadata.table.name} +doc.beans.ErrorCountQuery.reverseIndexTableName: ${error.rindex.table.name} +doc.beans.ErrorCountQuery.tableName: ${error.shard.table.name} +>>>>>>> SECOND +<<<<<<< FIRST +doc.beans.ErrorDiscoveryQuery.allowLeadingWildcard: ${datawave.query.logic.logics.ErrorDiscoveryQuery.allowLeadingWildcard} +doc.beans.ErrorDiscoveryQuery.auditType: ${datawave.query.logic.logics.ErrorDiscoveryQuery.auditType} +doc.beans.ErrorDiscoveryQuery.checkpointable: ${datawave.query.logic.logics.ErrorDiscoveryQuery.checkpointable} +doc.beans.ErrorDiscoveryQuery.fullTableScanEnabled: ${datawave.query.logic.logics.ErrorDiscoveryQuery.fullTableScanEnabled} +======= +>>>>>>> SECOND +<<<<<<< FIRST +doc.beans.ErrorDiscoveryQuery.indexTableName: ${datawave.query.logic.logics.ErrorDiscoveryQuery.indexTableName} +======= +doc.beans.ErrorDiscoveryQuery.indexTableName: ${error.index.table.name} +>>>>>>> SECOND +<<<<<<< FIRST +doc.beans.ErrorDiscoveryQuery.logicDescription: ${datawave.query.logic.logics.ErrorDiscoveryQuery.logicDescription} +doc.beans.ErrorDiscoveryQuery.maxResults: ${datawave.query.logic.logics.ErrorDiscoveryQuery.maxResults} +doc.beans.ErrorDiscoveryQuery.maxWork: ${datawave.query.logic.logics.ErrorDiscoveryQuery.maxWork} +======= +>>>>>>> SECOND +<<<<<<< FIRST +doc.beans.ErrorDiscoveryQuery.metadataTableName: ${datawave.query.logic.logics.ErrorDiscoveryQuery.metadataTableName} +======= +doc.beans.ErrorDiscoveryQuery.metadataTableName: ${error.metadata.table.name} +>>>>>>> SECOND +<<<<<<< FIRST +doc.beans.ErrorDiscoveryQuery.modelName: ${datawave.query.logic.logics.ErrorDiscoveryQuery.modelName} +======= +>>>>>>> SECOND +<<<<<<< FIRST +doc.beans.ErrorDiscoveryQuery.modelTableName: ${datawave.query.logic.logics.ErrorDiscoveryQuery.modelTableName} +doc.beans.ErrorDiscoveryQuery.reverseIndexTableName: ${datawave.query.logic.logics.ErrorDiscoveryQuery.reverseIndexTableName} +doc.beans.ErrorDiscoveryQuery.tableName: ${datawave.query.logic.logics.ErrorDiscoveryQuery.tableName} +======= +doc.beans.ErrorDiscoveryQuery.modelTableName: ${metadata.table.name} +doc.beans.ErrorDiscoveryQuery.reverseIndexTableName: ${error.rindex.table.name} +doc.beans.ErrorDiscoveryQuery.tableName: ${error.index.table.name} +>>>>>>> SECOND +<<<<<<< FIRST +doc.beans.ErrorEventQuery.checkpointable: ${datawave.query.logic.logics.ErrorEventQuery.checkpointable} +doc.beans.ErrorEventQuery.dateIndexTableName: ${datawave.query.logic.logics.ErrorEventQuery.dateIndexTableName} +doc.beans.ErrorEventQuery.includeHierarchyFields: ${datawave.query.logic.logics.ErrorEventQuery.includeHierarchyFields} +======= +>>>>>>> SECOND +<<<<<<< FIRST +doc.beans.ErrorEventQuery.indexTableName: ${datawave.query.logic.logics.ErrorEventQuery.indexTableName} +======= +doc.beans.ErrorEventQuery.indexTableName: ${error.index.table.name} +>>>>>>> SECOND +<<<<<<< FIRST +doc.beans.ErrorEventQuery.logicDescription: ${datawave.query.logic.logics.ErrorEventQuery.logicDescription} +======= +>>>>>>> SECOND +<<<<<<< FIRST +doc.beans.ErrorEventQuery.metadataTableName: ${datawave.query.logic.logics.ErrorEventQuery.metadataTableName} +doc.beans.ErrorEventQuery.reverseIndexTableName: ${datawave.query.logic.logics.ErrorEventQuery.reverseIndexTableName} +doc.beans.ErrorEventQuery.tableName: ${datawave.query.logic.logics.ErrorEventQuery.tableName} +======= +doc.beans.ErrorEventQuery.metadataTableName: ${error.metadata.table.name} +doc.beans.ErrorEventQuery.reverseIndexTableName: ${error.rindex.table.name} +doc.beans.ErrorEventQuery.tableName: ${error.shard.table.name} +>>>>>>> SECOND +<<<<<<< FIRST +doc.beans.ErrorFieldIndexCountQuery.auditType: ${datawave.query.logic.logics.ErrorFieldIndexCountQuery.auditType} +doc.beans.ErrorFieldIndexCountQuery.checkpointable: ${datawave.query.logic.logics.ErrorFieldIndexCountQuery.checkpointable} +======= +>>>>>>> SECOND +<<<<<<< FIRST +doc.beans.ErrorFieldIndexCountQuery.indexTableName: ${datawave.query.logic.logics.ErrorFieldIndexCountQuery.indexTableName} +======= +doc.beans.ErrorFieldIndexCountQuery.indexTableName: ${error.index.table.name} +>>>>>>> SECOND +<<<<<<< FIRST +doc.beans.ErrorFieldIndexCountQuery.logicDescription: ${datawave.query.logic.logics.ErrorFieldIndexCountQuery.logicDescription} +doc.beans.ErrorFieldIndexCountQuery.maxResults: ${datawave.query.logic.logics.ErrorFieldIndexCountQuery.maxResults} +doc.beans.ErrorFieldIndexCountQuery.maxUniqueValues: ${datawave.query.logic.logics.ErrorFieldIndexCountQuery.maxUniqueValues} +doc.beans.ErrorFieldIndexCountQuery.maxWork: ${datawave.query.logic.logics.ErrorFieldIndexCountQuery.maxWork} +======= +>>>>>>> SECOND +<<<<<<< FIRST +doc.beans.ErrorFieldIndexCountQuery.metadataTableName: ${datawave.query.logic.logics.ErrorFieldIndexCountQuery.metadataTableName} +======= +doc.beans.ErrorFieldIndexCountQuery.metadataTableName: ${error.metadata.table.name} +>>>>>>> SECOND +<<<<<<< FIRST +doc.beans.ErrorFieldIndexCountQuery.modelName: ${datawave.query.logic.logics.ErrorFieldIndexCountQuery.modelName} +======= +>>>>>>> SECOND +<<<<<<< FIRST +doc.beans.ErrorFieldIndexCountQuery.modelTableName: ${datawave.query.logic.logics.ErrorFieldIndexCountQuery.modelTableName} +doc.beans.ErrorFieldIndexCountQuery.queryThreads: ${datawave.query.logic.logics.ErrorFieldIndexCountQuery.queryThreads} +doc.beans.ErrorFieldIndexCountQuery.reverseIndexTableName: ${datawave.query.logic.logics.ErrorFieldIndexCountQuery.reverseIndexTableName} +doc.beans.ErrorFieldIndexCountQuery.tableName: ${datawave.query.logic.logics.ErrorFieldIndexCountQuery.tableName} +======= +doc.beans.ErrorFieldIndexCountQuery.modelTableName: ${metadata.table.name} +doc.beans.ErrorFieldIndexCountQuery.queryThreads: ${shard.query.threads} +doc.beans.ErrorFieldIndexCountQuery.reverseIndexTableName: ${error.rindex.table.name} +doc.beans.ErrorFieldIndexCountQuery.tableName: ${error.shard.table.name} +>>>>>>> SECOND +<<<<<<< FIRST +doc.beans.EventQuery.checkpointable: ${datawave.query.logic.logics.EventQuery.checkpointable} +doc.beans.EventQuery.logicDescription: ${datawave.query.logic.logics.EventQuery.logicDescription} +doc.beans.FacetedQuery.auditType: ${datawave.query.logic.logics.FacetedQuery.auditType} +doc.beans.FacetedQuery.checkpointable: ${datawave.query.logic.logics.FacetedQuery.checkpointable} +======= +>>>>>>> SECOND +<<<<<<< FIRST +doc.beans.FacetedQuery.facetHashTableName: ${datawave.query.logic.logics.FacetedQuery.facetHashTableName} +doc.beans.FacetedQuery.facetMetadataTableName: ${datawave.query.logic.logics.FacetedQuery.facetMetadataTableName} +doc.beans.FacetedQuery.facetTableName: ${datawave.query.logic.logics.FacetedQuery.facetTableName} +======= +doc.beans.FacetedQuery.facetHashTableName: ${table.name.facet.hashes} +doc.beans.FacetedQuery.facetMetadataTableName: ${table.name.facet.metadata} +doc.beans.FacetedQuery.facetTableName: ${table.name.facet} +>>>>>>> SECOND +<<<<<<< FIRST +doc.beans.FacetedQuery.facetedSearchType: ${datawave.query.logic.logics.FacetedQuery.facetedSearchType} +doc.beans.FacetedQuery.logicDescription: ${datawave.query.logic.logics.FacetedQuery.logicDescription} +doc.beans.FacetedQuery.maximumFacetGrouping: ${datawave.query.logic.logics.FacetedQuery.maximumFacetGrouping} +doc.beans.FacetedQuery.minimumFacet: ${datawave.query.logic.logics.FacetedQuery.minimumFacet} +doc.beans.FacetedQuery.streaming: ${datawave.query.logic.logics.FacetedQuery.streaming} +doc.beans.FieldIndexCountQuery.auditType: ${datawave.query.logic.logics.FieldIndexCountQuery.auditType} +doc.beans.FieldIndexCountQuery.checkpointable: ${datawave.query.logic.logics.FieldIndexCountQuery.checkpointable} +======= +>>>>>>> SECOND +<<<<<<< FIRST +doc.beans.FieldIndexCountQuery.indexTableName: ${datawave.query.logic.logics.FieldIndexCountQuery.indexTableName} +======= +doc.beans.FieldIndexCountQuery.indexTableName: ${index.table.name} +>>>>>>> SECOND +<<<<<<< FIRST +doc.beans.FieldIndexCountQuery.logicDescription: ${datawave.query.logic.logics.FieldIndexCountQuery.logicDescription} +doc.beans.FieldIndexCountQuery.maxResults: ${datawave.query.logic.logics.FieldIndexCountQuery.maxResults} +doc.beans.FieldIndexCountQuery.maxUniqueValues: ${datawave.query.logic.logics.FieldIndexCountQuery.maxUniqueValues} +doc.beans.FieldIndexCountQuery.maxWork: ${datawave.query.logic.logics.FieldIndexCountQuery.maxWork} +======= +>>>>>>> SECOND +<<<<<<< FIRST +doc.beans.FieldIndexCountQuery.metadataTableName: ${datawave.query.logic.logics.FieldIndexCountQuery.metadataTableName} +======= +doc.beans.FieldIndexCountQuery.metadataTableName: ${metadata.table.name} +>>>>>>> SECOND +<<<<<<< FIRST +doc.beans.FieldIndexCountQuery.modelName: ${datawave.query.logic.logics.FieldIndexCountQuery.modelName} +======= +>>>>>>> SECOND +<<<<<<< FIRST +doc.beans.FieldIndexCountQuery.modelTableName: ${datawave.query.logic.logics.FieldIndexCountQuery.modelTableName} +doc.beans.FieldIndexCountQuery.queryThreads: ${datawave.query.logic.logics.FieldIndexCountQuery.queryThreads} +doc.beans.FieldIndexCountQuery.reverseIndexTableName: ${datawave.query.logic.logics.FieldIndexCountQuery.reverseIndexTableName} +doc.beans.FieldIndexCountQuery.tableName: ${datawave.query.logic.logics.FieldIndexCountQuery.tableName} +======= +doc.beans.FieldIndexCountQuery.modelTableName: ${metadata.table.name} +doc.beans.FieldIndexCountQuery.queryThreads: ${shard.query.threads} +doc.beans.FieldIndexCountQuery.reverseIndexTableName: ${rindex.table.name} +doc.beans.FieldIndexCountQuery.tableName: ${shard.table.name} +>>>>>>> SECOND +<<<<<<< FIRST +doc.beans.HitHighlights.accumuloPassword: ${datawave.query.logic.logics.HitHighlights.accumuloPassword} +doc.beans.HitHighlights.auditType: ${datawave.query.logic.logics.HitHighlights.auditType} +doc.beans.HitHighlights.checkpointable: ${datawave.query.logic.logics.HitHighlights.checkpointable} +doc.beans.HitHighlights.dateIndexTableName: ${datawave.query.logic.logics.HitHighlights.dateIndexTableName} +doc.beans.HitHighlights.defaultDateTypeName: ${datawave.query.logic.logics.HitHighlights.defaultDateTypeName} +doc.beans.HitHighlights.eventPerDayThreshold: ${datawave.query.logic.logics.HitHighlights.eventPerDayThreshold} +======= +>>>>>>> SECOND +<<<<<<< FIRST +doc.beans.HitHighlights.finalMaxTermThreshold: ${datawave.query.logic.logics.BaseEventQuery.finalMaxTermThreshold} +======= +doc.beans.HitHighlights.finalMaxTermThreshold: ${beq.finalMaxTermThreshold} +>>>>>>> SECOND +<<<<<<< FIRST +doc.beans.HitHighlights.fullTableScanEnabled: ${datawave.query.logic.logics.HitHighlights.fullTableScanEnabled} +======= +>>>>>>> SECOND +<<<<<<< FIRST +doc.beans.HitHighlights.hdfsSiteConfigURLs: ${datawave.query.logic.logics.HitHighlights.hdfsSiteConfigURLs} +======= +doc.beans.HitHighlights.hdfsSiteConfigURLs: ${hdfs.site.config.urls} +>>>>>>> SECOND +<<<<<<< FIRST +doc.beans.HitHighlights.includeDataTypeAsField: ${datawave.query.logic.logics.HitHighlights.includeDataTypeAsField} +doc.beans.HitHighlights.includeGroupingContext: ${datawave.query.logic.logics.HitHighlights.includeGroupingContext} +======= +>>>>>>> SECOND +<<<<<<< FIRST +doc.beans.HitHighlights.indexTableName: ${datawave.query.logic.logics.HitHighlights.indexTableName} +doc.beans.HitHighlights.initialMaxTermThreshold: ${datawave.query.logic.logics.BaseEventQuery.initialMaxTermThreshold} +======= +doc.beans.HitHighlights.indexTableName: ${index.table.name} +doc.beans.HitHighlights.initialMaxTermThreshold: ${beq.initialMaxTermThreshold} +>>>>>>> SECOND +<<<<<<< FIRST +doc.beans.HitHighlights.ivaratorCacheBufferSize: ${datawave.query.logic.logics.HitHighlights.ivaratorCacheBufferSize} +doc.beans.HitHighlights.ivaratorCacheScanPersistThreshold: ${datawave.query.logic.logics.HitHighlights.ivaratorCacheScanPersistThreshold} +======= +>>>>>>> SECOND +<<<<<<< FIRST +doc.beans.HitHighlights.ivaratorCacheScanTimeoutMinutes: ${datawave.query.logic.logics.HitHighlights.ivaratorCacheScanTimeoutMinutes} +doc.beans.HitHighlights.ivaratorFstHdfsBaseURIs: ${datawave.query.logic.logics.HitHighlights.ivaratorFstHdfsBaseURIs} +doc.beans.HitHighlights.ivaratorMaxOpenFiles: ${datawave.query.logic.logics.HitHighlights.ivaratorMaxOpenFiles} +======= +doc.beans.HitHighlights.ivaratorCacheScanTimeoutMinutes: ${query.max.call.time.minutes} +doc.beans.HitHighlights.ivaratorFstHdfsBaseURIs: ${ivarator.fst.hdfs.base.uris} +doc.beans.HitHighlights.ivaratorMaxOpenFiles: ${beq.maxIvaratorOpenFiles} +>>>>>>> SECOND +<<<<<<< FIRST +doc.beans.HitHighlights.logicDescription: ${datawave.query.logic.logics.HitHighlights.logicDescription} +======= +>>>>>>> SECOND +<<<<<<< FIRST +doc.beans.HitHighlights.maxDepthThreshold: ${datawave.query.logic.logics.HitHighlights.maxDepthThreshold} +doc.beans.HitHighlights.maxEvaluationPipelines: ${datawave.query.logic.logics.HitHighlights.maxEvaluationPipelines} +doc.beans.HitHighlights.maxFieldIndexRangeSplit: ${datawave.query.logic.logics.HitHighlights.maxFieldIndexRangeSplit} +doc.beans.HitHighlights.maxOrExpansionFstThreshold: ${datawave.query.logic.logics.HitHighlights.maxOrExpansionFstThreshold} +doc.beans.HitHighlights.maxOrExpansionThreshold: ${datawave.query.logic.logics.HitHighlights.maxOrExpansionThreshold} +doc.beans.HitHighlights.maxOrRangeIvarators: ${datawave.query.logic.logics.HitHighlights.maxOrRangeIvarators} +doc.beans.HitHighlights.maxOrRangeThreshold: ${datawave.query.logic.logics.HitHighlights.maxOrRangeThreshold} +doc.beans.HitHighlights.maxPipelineCachedResults: ${datawave.query.logic.logics.HitHighlights.maxPipelineCachedResults} +doc.beans.HitHighlights.maxRangesPerRangeIvarator: ${datawave.query.logic.logics.HitHighlights.maxRangesPerRangeIvarator} +doc.beans.HitHighlights.maxUnfieldedExpansionThreshold: ${datawave.query.logic.logics.HitHighlights.maxUnfieldedExpansionThreshold} +doc.beans.HitHighlights.maxValueExpansionThreshold: ${datawave.query.logic.logics.HitHighlights.maxValueExpansionThreshold} +doc.beans.HitHighlights.metadataTableName: ${datawave.query.logic.logics.HitHighlights.metadataTableName} +======= +doc.beans.HitHighlights.maxDepthThreshold: ${beq.maxDepthThreshold} +doc.beans.HitHighlights.maxEvaluationPipelines: ${beq.evaluationPipelines} +doc.beans.HitHighlights.maxFieldIndexRangeSplit: ${beq.fieldIndexRangeSplit} +doc.beans.HitHighlights.maxOrExpansionFstThreshold: ${beq.orExpansionFstThreshold} +doc.beans.HitHighlights.maxOrExpansionThreshold: ${beq.orExpansionThreshold} +doc.beans.HitHighlights.maxOrRangeIvarators: ${beq.maxOrRangeIvarators} +doc.beans.HitHighlights.maxOrRangeThreshold: ${beq.orRangeThreshold} +doc.beans.HitHighlights.maxPipelineCachedResults: ${beq.pipelineCachedResults} +doc.beans.HitHighlights.maxRangesPerRangeIvarator: ${beq.maxRangesPerRangeIvarator} +doc.beans.HitHighlights.maxUnfieldedExpansionThreshold: ${beq.unfieldedExpansionThreshold} +doc.beans.HitHighlights.maxValueExpansionThreshold: ${beq.valueExpansionThreshold} +doc.beans.HitHighlights.metadataTableName: ${metadata.table.name} +>>>>>>> SECOND +<<<<<<< FIRST +doc.beans.HitHighlights.minimumSelectivity: ${datawave.query.logic.logics.HitHighlights.minimumSelectivity} +======= +>>>>>>> SECOND +<<<<<<< FIRST +doc.beans.HitHighlights.queryThreads: ${datawave.query.logic.logics.HitHighlights.queryThreads} +doc.beans.HitHighlights.reverseIndexTableName: ${datawave.query.logic.logics.HitHighlights.reverseIndexTableName} +doc.beans.HitHighlights.shardsPerDayThreshold: ${datawave.query.logic.logics.HitHighlights.shardsPerDayThreshold} +doc.beans.HitHighlights.tableName: ${datawave.query.logic.logics.HitHighlights.tableName} +======= +doc.beans.HitHighlights.queryThreads: ${index.query.threads} +doc.beans.HitHighlights.reverseIndexTableName: ${rindex.table.name} +doc.beans.HitHighlights.shardsPerDayThreshold: ${beq.shardsPerDayThreshold} +doc.beans.HitHighlights.tableName: ${shard.table.name} +>>>>>>> SECOND +<<<<<<< FIRST +doc.beans.HitHighlights.useEnrichers: ${datawave.query.logic.logics.HitHighlights.useEnrichers} +======= +>>>>>>> SECOND +<<<<<<< FIRST +doc.beans.HitHighlights.zookeeperConfig: ${datawave.query.logic.logics.HitHighlights.zookeeperConfig} +======= +doc.beans.HitHighlights.zookeeperConfig: ${ivarator.zookeeper.hosts} +>>>>>>> SECOND +<<<<<<< FIRST +doc.beans.IndexStatsQuery.auditType: ${datawave.query.logic.logics.IndexStatsQuery.auditType} +doc.beans.InternalQueryMetricsQuery.collectQueryMetrics: ${datawave.query.logic.logics.InternalQueryMetricsQuery.collectQueryMetrics} +doc.beans.LuceneUUIDEventQuery.auditType: ${datawave.query.logic.logics.LuceneUUIDEventQuery.auditType} +doc.beans.LuceneUUIDEventQuery.checkpointable: ${datawave.query.logic.logics.LuceneUUIDEventQuery.checkpointable} +doc.beans.LuceneUUIDEventQuery.logicDescription: ${datawave.query.logic.logics.LuceneUUIDEventQuery.logicDescription} +doc.beans.LuceneUUIDEventQuery.queryLogics.ErrorEventQuery.auditType: ${datawave.query.logic.logics.LuceneUUIDEventQuery.errorEventQuery.auditType} +doc.beans.LuceneUUIDEventQuery.queryLogics.ErrorEventQuery.connPoolName: ${datawave.query.logic.logics.LuceneUUIDEventQuery.errorEventQuery.connPoolName} +doc.beans.LuceneUUIDEventQuery.queryLogics.ErrorEventQuery.dateIndexTableName: ${datawave.query.logic.logics.LuceneUUIDEventQuery.errorEventQuery.dateIndexTableName} +======= +doc.beans.IdTranslatorConfiguration.beginDate: ${lookup.uuid.beginDate} +doc.beans.LookupUUIDConfiguration.beginDate: ${lookup.uuid.beginDate} +>>>>>>> SECOND +<<<<<<< FIRST +doc.beans.LuceneUUIDEventQuery.queryLogics.ErrorEventQuery.indexTableName: ${datawave.query.logic.logics.LuceneUUIDEventQuery.errorEventQuery.indexTableName} +======= +doc.beans.LuceneUUIDEventQuery.queryLogics.ErrorEventQuery.indexTableName: ${error.index.table.name} +>>>>>>> SECOND +<<<<<<< FIRST +doc.beans.LuceneUUIDEventQuery.queryLogics.ErrorEventQuery.logicDescription: ${datawave.query.logic.logics.LuceneUUIDEventQuery.errorEventQuery.logicDescription} +======= +>>>>>>> SECOND +<<<<<<< FIRST +doc.beans.LuceneUUIDEventQuery.queryLogics.ErrorEventQuery.metadataTableName: ${datawave.query.logic.logics.LuceneUUIDEventQuery.errorEventQuery.metadataTableName} +doc.beans.LuceneUUIDEventQuery.queryLogics.ErrorEventQuery.reverseIndexTableName: ${datawave.query.logic.logics.LuceneUUIDEventQuery.errorEventQuery.reverseIndexTableName} +doc.beans.LuceneUUIDEventQuery.queryLogics.ErrorEventQuery.tableName: ${datawave.query.logic.logics.LuceneUUIDEventQuery.errorEventQuery.tableName} +======= +doc.beans.LuceneUUIDEventQuery.queryLogics.ErrorEventQuery.metadataTableName: ${error.metadata.table.name} +doc.beans.LuceneUUIDEventQuery.queryLogics.ErrorEventQuery.reverseIndexTableName: ${error.rindex.table.name} +doc.beans.LuceneUUIDEventQuery.queryLogics.ErrorEventQuery.tableName: ${error.shard.table.name} +>>>>>>> SECOND +<<<<<<< FIRST +doc.beans.LuceneUUIDEventQuery.queryLogics.EventQuery.auditType: ${datawave.query.logic.logics.LuceneUUIDEventQuery.eventQuery.auditType} +doc.beans.LuceneUUIDEventQuery.queryLogics.EventQuery.connPoolName: ${datawave.query.logic.logics.LuceneUUIDEventQuery.eventQuery.connPoolName} +doc.beans.LuceneUUIDEventQuery.queryLogics.EventQuery.logicDescription: ${datawave.query.logic.logics.LuceneUUIDEventQuery.eventQuery.logicDescription} +doc.beans.QueryMetricsQuery.auditType: ${datawave.query.logic.logics.QueryMetricsQuery.auditType} +doc.beans.QueryMetricsQuery.checkpointable: ${datawave.query.logic.logics.QueryMetricsQuery.checkpointable} +doc.beans.QueryMetricsQuery.collectQueryMetrics: ${datawave.query.logic.logics.QueryMetricsQuery.collectQueryMetrics} +======= +doc.beans.QueryLogicFactoryConfiguration.maxPageSize: ${query.max.page.size} +doc.beans.QueryLogicFactoryConfiguration.pageByteTrigger: ${query.page.byte.trigger} +>>>>>>> SECOND +<<<<<<< FIRST +doc.beans.QueryMetricsQuery.dateIndexTableName: ${datawave.query.logic.logics.QueryMetricsQuery.dateIndexTableName} +======= +doc.beans.QueryMetricsQuery.dateIndexTableName: ${querymetrics.dateindex.table.name} +>>>>>>> SECOND +<<<<<<< FIRST +doc.beans.QueryMetricsQuery.includeHierarchyFields: ${datawave.query.logic.logics.QueryMetricsQuery.includeHierarchyFields} +======= +>>>>>>> SECOND +<<<<<<< FIRST +doc.beans.QueryMetricsQuery.indexTableName: ${datawave.query.logic.logics.QueryMetricsQuery.indexTableName} +======= +doc.beans.QueryMetricsQuery.indexTableName: ${querymetrics.index.table.name} +>>>>>>> SECOND +<<<<<<< FIRST +doc.beans.QueryMetricsQuery.logicDescription: ${datawave.query.logic.logics.QueryMetricsQuery.logicDescription} +======= +>>>>>>> SECOND +<<<<<<< FIRST +doc.beans.QueryMetricsQuery.metadataTableName: ${datawave.query.logic.logics.QueryMetricsQuery.metadataTableName} +======= +doc.beans.QueryMetricsQuery.metadataTableName: ${querymetrics.metadata.table.name} +>>>>>>> SECOND +<<<<<<< FIRST +doc.beans.QueryMetricsQuery.modelName: ${datawave.query.logic.logics.QueryMetricsQuery.modelName} +======= +>>>>>>> SECOND +<<<<<<< FIRST +doc.beans.QueryMetricsQuery.modelTableName: ${datawave.query.logic.logics.QueryMetricsQuery.modelTableName} +doc.beans.QueryMetricsQuery.reverseIndexTableName: ${datawave.query.logic.logics.QueryMetricsQuery.reverseIndexTableName} +doc.beans.QueryMetricsQuery.tableName: ${datawave.query.logic.logics.QueryMetricsQuery.tableName} +======= +doc.beans.QueryMetricsQuery.modelTableName: ${querymetrics.metadata.table.name} +doc.beans.QueryMetricsQuery.reverseIndexTableName: ${querymetrics.rindex.table.name} +doc.beans.QueryMetricsQuery.tableName: ${querymetrics.shard.table.name} +>>>>>>> SECOND +<<<<<<< FIRST +doc.beans.TermFrequencyQuery.auditType: ${datawave.query.logic.logics.TermFrequencyQuery.auditType} +doc.beans.TermFrequencyQuery.logicDescription: ${datawave.query.logic.logics.TermFrequencyQuery.logicDescription} +doc.beans.TermFrequencyQuery.maxResults: ${datawave.query.logic.logics.TermFrequencyQuery.maxResults} +doc.beans.TermFrequencyQuery.maxWork: ${datawave.query.logic.logics.TermFrequencyQuery.maxWork} +======= +doc.beans.RemoteEventQuery.maxResults: ${event.query.max.results} +doc.beans.RemoteEventQuery.tableName: ${shard.table.name} +>>>>>>> SECOND +<<<<<<< FIRST +doc.beans.TermFrequencyQuery.tableName: ${datawave.query.logic.logics.TermFrequencyQuery.tableName} +======= +doc.beans.TermFrequencyQuery.tableName: ${shard.table.name} +>>>>>>> SECOND + +Values (key: value) +---------------------------------------- +<<<<<<< FIRST +======= +doc.beans.AstValidator.validateFlatten: true +doc.beans.AstValidator.validateJunctions: true +doc.beans.AstValidator.validateLineage: true +doc.beans.AstValidator.validateQueryPropertyMarkers: true +doc.beans.AstValidator.validateReferenceExpressions: true +>>>>>>> SECOND +<<<<<<< FIRST +doc.beans.BaseEventQuery.accumuloPassword: "${warehouse.accumulo.password}" +doc.beans.BaseEventQuery.auditType: "ACTIVE" +doc.beans.BaseEventQuery.baseIteratorPriority: "${warehouse.defaults.baseIteratorPriority}" +======= +doc.beans.BaseEventQuery.accumuloPassword: "${accumulo.user.password}" +doc.beans.BaseEventQuery.auditType: "NONE" +doc.beans.BaseEventQuery.baseIteratorPriority: 100 +>>>>>>> SECOND +<<<<<<< FIRST +doc.beans.BaseEventQuery.checkpointable: "${warehouse.defaults.checkpointable}" +======= +>>>>>>> SECOND +doc.beans.BaseEventQuery.collapseUids: false +doc.beans.BaseEventQuery.collapseUidsThreshold: -1 +doc.beans.BaseEventQuery.collectQueryMetrics: true +<<<<<<< FIRST +======= +doc.beans.BaseEventQuery.contentFieldNames: "CONTENT" +>>>>>>> SECOND +<<<<<<< FIRST +doc.beans.BaseEventQuery.dateIndexTableName: "${warehouse.tables.dateIndex.name}" +doc.beans.BaseEventQuery.dateIndexThreads: "${warehouse.defaults.dateIndexThreads}" +======= +doc.beans.BaseEventQuery.dateIndexTableName: "${table.name.dateIndex}" +doc.beans.BaseEventQuery.dateIndexThreads: 20 +>>>>>>> SECOND +doc.beans.BaseEventQuery.defaultDateTypeName: "EVENT" +doc.beans.BaseEventQuery.disableIndexOnlyDocuments: false +<<<<<<< FIRST +======= +doc.beans.BaseEventQuery.docAggregationThresholdMs: -1 +doc.beans.BaseEventQuery.enricherClassNames: "datawave.query.enrich.DatawaveTermFrequencyEnricher" +>>>>>>> SECOND +doc.beans.BaseEventQuery.evaluationOnlyFields: "" +<<<<<<< FIRST +doc.beans.BaseEventQuery.eventPerDayThreshold: "${warehouse.defaults.eventPerDayThreshold}" +doc.beans.BaseEventQuery.finalMaxTermThreshold: "${warehouse.defaults.finalMaxTermThreshold}" +doc.beans.BaseEventQuery.fullTableScanEnabled: "${warehouse.defaults.fullTableScanEnabled}" +doc.beans.BaseEventQuery.hdfsSiteConfigURLs: "${warehouse.defaults.hdfsSiteConfigURLs}" +======= +doc.beans.BaseEventQuery.eventPerDayThreshold: 40000 +doc.beans.BaseEventQuery.finalMaxTermThreshold: 2000 +doc.beans.BaseEventQuery.fullTableScanEnabled: false +doc.beans.BaseEventQuery.hdfsSiteConfigURLs: "file:///etc/hadoop/conf/core-site.xml,file:///etc/hadoop/conf/hdfs-site.xml" +>>>>>>> SECOND +doc.beans.BaseEventQuery.includeDataTypeAsField: false +doc.beans.BaseEventQuery.includeHierarchyFields: false +<<<<<<< FIRST +doc.beans.BaseEventQuery.indexLookupThreads: "${warehouse.defaults.indexLookupThreads}" +======= +doc.beans.BaseEventQuery.indexLookupThreads: 100 +>>>>>>> SECOND +doc.beans.BaseEventQuery.indexOnlyFilterFunctionsEnabled: false +<<<<<<< FIRST +doc.beans.BaseEventQuery.indexTableName: "${warehouse.tables.index.name}" +doc.beans.BaseEventQuery.initialMaxTermThreshold: "${warehouse.defaults.initialMaxTermThreshold}" +doc.beans.BaseEventQuery.ivaratorCacheBufferSize: "${warehouse.defaults.ivaratorCacheBufferSize}" +doc.beans.BaseEventQuery.ivaratorCacheScanPersistThreshold: "${warehouse.defaults.ivaratorCacheScanPersistThreshold}" +doc.beans.BaseEventQuery.ivaratorCacheScanTimeoutMinutes: "${warehouse.defaults.ivaratorCacheScanTimeoutMinutes}" +doc.beans.BaseEventQuery.ivaratorFstHdfsBaseURIs: "${warehouse.defaults.ivaratorFstHdfsBaseURIs}" +doc.beans.BaseEventQuery.ivaratorMaxOpenFiles: "${warehouse.defaults.ivaratorMaxOpenFiles}" +======= +doc.beans.BaseEventQuery.indexTableName: "${table.name.shardIndex}" +doc.beans.BaseEventQuery.initialMaxTermThreshold: 2000 +doc.beans.BaseEventQuery.ivaratorCacheBufferSize: 10000 +doc.beans.BaseEventQuery.ivaratorCacheScanPersistThreshold: 100000 +doc.beans.BaseEventQuery.ivaratorCacheScanTimeoutMinutes: 60 +doc.beans.BaseEventQuery.ivaratorFstHdfsBaseURIs: "hdfs:///IvaratorCache" +doc.beans.BaseEventQuery.ivaratorMaxOpenFiles: 100 +>>>>>>> SECOND +<<<<<<< FIRST +======= +doc.beans.BaseEventQuery.lazySetMechanismEnabled: false +>>>>>>> SECOND +<<<<<<< FIRST +doc.beans.BaseEventQuery.logTimingDetails: true +======= +doc.beans.BaseEventQuery.logTimingDetails: false +>>>>>>> SECOND +doc.beans.BaseEventQuery.logicDescription: "Retrieve sharded events/documents, leveraging the global index tables as needed" +<<<<<<< FIRST +doc.beans.BaseEventQuery.maxConcurrentTasks: 10 +======= +>>>>>>> SECOND +<<<<<<< FIRST +doc.beans.BaseEventQuery.maxDepthThreshold: "${warehouse.defaults.maxDepthThreshold}" +doc.beans.BaseEventQuery.maxEvaluationPipelines: "${warehouse.defaults.maxEvaluationPipelines}" +doc.beans.BaseEventQuery.maxFieldIndexRangeSplit: "${warehouse.defaults.maxFieldIndexRangeSplit}" +doc.beans.BaseEventQuery.maxIndexScanTimeMillis: "${warehouse.defaults.maxIndexScanTimeMillis}" +doc.beans.BaseEventQuery.maxIvaratorSources: "${warehouse.defaults.maxIvaratorSources}" +doc.beans.BaseEventQuery.maxOrExpansionFstThreshold: "${warehouse.defaults.maxOrExpansionFstThreshold}" +doc.beans.BaseEventQuery.maxOrExpansionThreshold: "${warehouse.defaults.maxOrExpansionThreshold}" +doc.beans.BaseEventQuery.maxOrRangeThreshold: "${warehouse.defaults.maxOrRangeThreshold}" +doc.beans.BaseEventQuery.maxPipelineCachedResults: "${warehouse.defaults.maxPipelineCachedResults}" +======= +doc.beans.BaseEventQuery.maxDepthThreshold: 2000 +doc.beans.BaseEventQuery.maxEvaluationPipelines: 16 +doc.beans.BaseEventQuery.maxFieldIndexRangeSplit: 16 +doc.beans.BaseEventQuery.maxIndexScanTimeMillis: 3.1536E10 +doc.beans.BaseEventQuery.maxIvaratorSources: 20 +doc.beans.BaseEventQuery.maxOrExpansionFstThreshold: 750 +doc.beans.BaseEventQuery.maxOrExpansionThreshold: 500 +doc.beans.BaseEventQuery.maxOrRangeThreshold: 10 +doc.beans.BaseEventQuery.maxPipelineCachedResults: 16 +>>>>>>> SECOND +doc.beans.BaseEventQuery.maxResults: -1 +<<<<<<< FIRST +doc.beans.BaseEventQuery.maxUnfieldedExpansionThreshold: "${warehouse.defaults.maxUnfieldedExpansionThreshold}" +doc.beans.BaseEventQuery.maxValueExpansionThreshold: "${warehouse.defaults.maxValueExpansionThreshold}" +doc.beans.BaseEventQuery.metadataTableName: "${warehouse.tables.metadata.name}" +======= +doc.beans.BaseEventQuery.maxUnfieldedExpansionThreshold: 50 +doc.beans.BaseEventQuery.maxValueExpansionThreshold: 50 +doc.beans.BaseEventQuery.metadataTableName: "${table.name.metadata}" +>>>>>>> SECOND +doc.beans.BaseEventQuery.minimumSelectivity: 0.2 +<<<<<<< FIRST +doc.beans.BaseEventQuery.modelName: "${warehouse.defaults.modelName}" +doc.beans.BaseEventQuery.modelTableName: "${warehouse.tables.model.name}" +======= +doc.beans.BaseEventQuery.modelName: "DATAWAVE" +doc.beans.BaseEventQuery.modelTableName: "${table.name.metadata}" +>>>>>>> SECOND +<<<<<<< FIRST +======= +doc.beans.BaseEventQuery.querySyntaxParsers.JEXL: null +>>>>>>> SECOND +<<<<<<< FIRST +doc.beans.BaseEventQuery.queryThreads: "${warehouse.defaults.queryThreads}" +======= +doc.beans.BaseEventQuery.queryThreads: 100 +>>>>>>> SECOND +<<<<<<< FIRST +======= +doc.beans.BaseEventQuery.realmSuffixExclusionPatterns: "<.*>$" +>>>>>>> SECOND +<<<<<<< FIRST +doc.beans.BaseEventQuery.reverseIndexTableName: "${warehouse.tables.reverseIndex.name}" +======= +doc.beans.BaseEventQuery.reverseIndexTableName: "${table.name.shardReverseIndex}" +>>>>>>> SECOND +doc.beans.BaseEventQuery.sendTimingToStatsd: false +<<<<<<< FIRST +doc.beans.BaseEventQuery.shardsPerDayThreshold: "${warehouse.defaults.shardsPerDayThreshold}" +doc.beans.BaseEventQuery.statsdHost: "${warehouse.statsd.host}" +doc.beans.BaseEventQuery.statsdPort: "${warehouse.statsd.port}" +doc.beans.BaseEventQuery.tableName: "${warehouse.tables.shard.name}" +======= +doc.beans.BaseEventQuery.shardsPerDayThreshold: 20 +doc.beans.BaseEventQuery.statsdHost: "localhost" +doc.beans.BaseEventQuery.statsdPort: 8125 +doc.beans.BaseEventQuery.tableName: "${table.name.shard}" +>>>>>>> SECOND +<<<<<<< FIRST +======= +doc.beans.BaseEventQuery.tfAggregationThresholdMs: -1 +>>>>>>> SECOND +doc.beans.BaseEventQuery.useEnrichers: true +doc.beans.BaseEventQuery.useFilters: false +<<<<<<< FIRST +doc.beans.BaseEventQuery.zookeeperConfig: "${warehouse.accumulo.zookeepers}" +======= +doc.beans.BaseEventQuery.zookeeperConfig: "" +>>>>>>> SECOND +<<<<<<< FIRST +======= +doc.beans.BaseModelEventQuery.modelName: "DATAWAVE" +doc.beans.BaseModelEventQuery.modelTableName: "${table.name.metadata}" +>>>>>>> SECOND +doc.beans.ContentQuery.auditType: "NONE" +<<<<<<< FIRST +doc.beans.ContentQuery.checkpointable: "${warehouse.defaults.checkpointable}" +======= +>>>>>>> SECOND +doc.beans.ContentQuery.logicDescription: "Query that returns a document given the document identifier" +doc.beans.ContentQuery.maxResults: -1 +doc.beans.ContentQuery.maxWork: -1 +<<<<<<< FIRST +doc.beans.ContentQuery.tableName: "${warehouse.tables.shard.name}" +======= +doc.beans.ContentQuery.tableName: "${table.name.shard}" +>>>>>>> SECOND +<<<<<<< FIRST +doc.beans.CountQuery.checkpointable: "${warehouse.defaults.checkpointable}" +======= +>>>>>>> SECOND +doc.beans.CountQuery.logicDescription: "Retrieve event/document counts based on your search criteria" +doc.beans.DefaultQueryPlanner.compressOptionMappings: true +doc.beans.DefaultQueryPlanner[0]: 2611 +doc.beans.DefaultQueryPlanner[1]: true +doc.beans.DiscoveryQuery.allowLeadingWildcard: true +doc.beans.DiscoveryQuery.auditType: "NONE" +<<<<<<< FIRST +doc.beans.DiscoveryQuery.checkpointable: "${warehouse.defaults.checkpointable}" +======= +>>>>>>> SECOND +<<<<<<< FIRST +doc.beans.DiscoveryQuery.fullTableScanEnabled: "${warehouse.defaults.fullTableScanEnabled}" +doc.beans.DiscoveryQuery.indexTableName: "${warehouse.tables.index.name}" +======= +doc.beans.DiscoveryQuery.fullTableScanEnabled: false +doc.beans.DiscoveryQuery.indexTableName: "${table.name.shardIndex}" +>>>>>>> SECOND +doc.beans.DiscoveryQuery.logicDescription: "Discovery query that returns information from the index about the supplied term(s)" +doc.beans.DiscoveryQuery.maxResults: -1 +doc.beans.DiscoveryQuery.maxWork: -1 +<<<<<<< FIRST +doc.beans.DiscoveryQuery.metadataTableName: "${warehouse.tables.metadata.name}" +doc.beans.DiscoveryQuery.modelName: "${warehouse.defaults.modelName}" +doc.beans.DiscoveryQuery.modelTableName: "${warehouse.tables.model.name}" +doc.beans.DiscoveryQuery.reverseIndexTableName: "${warehouse.tables.reverseIndex.name}" +doc.beans.DiscoveryQuery.tableName: "${warehouse.tables.shard.name}" +======= +doc.beans.DiscoveryQuery.metadataTableName: "${table.name.metadata}" +doc.beans.DiscoveryQuery.modelName: "DATAWAVE" +doc.beans.DiscoveryQuery.modelTableName: "${table.name.metadata}" +doc.beans.DiscoveryQuery.reverseIndexTableName: "${table.name.shardReverseIndex}" +doc.beans.DiscoveryQuery.tableName: "${table.name.shardIndex}" +>>>>>>> SECOND +<<<<<<< FIRST +doc.beans.EdgeEventQuery.checkpointable: "${warehouse.defaults.checkpointable}" +======= +>>>>>>> SECOND +doc.beans.EdgeEventQuery.edgeModelName: "DATAWAVE_EDGE" +doc.beans.EdgeEventQuery.logicDescription: "Use results of an EdgeQuery to obtain events/documents that created the given edge" +<<<<<<< FIRST +doc.beans.EdgeEventQuery.modelTableName: "${warehouse.tables.model.name}" +======= +doc.beans.EdgeEventQuery.modelTableName: "${table.name.metadata}" +>>>>>>> SECOND +<<<<<<< FIRST +doc.beans.ErrorCountQuery.checkpointable: "${warehouse.defaults.checkpointable}" +======= +>>>>>>> SECOND +<<<<<<< FIRST +doc.beans.ErrorCountQuery.indexTableName: "${warehouse.errorTables.index.name}" +======= +doc.beans.ErrorCountQuery.indexTableName: "${table.name.errors.shardIndex}" +>>>>>>> SECOND +doc.beans.ErrorCountQuery.logicDescription: "Retrieve counts of errored events based on your search criteria" +<<<<<<< FIRST +doc.beans.ErrorCountQuery.metadataTableName: "${warehouse.errorTables.metadata.name}" +doc.beans.ErrorCountQuery.reverseIndexTableName: "${warehouse.errorTables.index.name}" +doc.beans.ErrorCountQuery.tableName: "${warehouse.errorTables.shard.name}" +======= +doc.beans.ErrorCountQuery.metadataTableName: "${table.name.errors.metadata}" +doc.beans.ErrorCountQuery.reverseIndexTableName: "${table.name.errors.shardReverseIndex}" +doc.beans.ErrorCountQuery.tableName: "${table.name.errors.shard}" +>>>>>>> SECOND +doc.beans.ErrorDiscoveryQuery.allowLeadingWildcard: true +doc.beans.ErrorDiscoveryQuery.auditType: "NONE" +<<<<<<< FIRST +doc.beans.ErrorDiscoveryQuery.checkpointable: "${warehouse.defaults.checkpointable}" +======= +>>>>>>> SECOND +<<<<<<< FIRST +doc.beans.ErrorDiscoveryQuery.fullTableScanEnabled: "${warehouse.defaults.fullTableScanEnabled}" +doc.beans.ErrorDiscoveryQuery.indexTableName: "${warehouse.errorTables.index.name}" +======= +doc.beans.ErrorDiscoveryQuery.fullTableScanEnabled: false +doc.beans.ErrorDiscoveryQuery.indexTableName: "${table.name.errors.shardIndex}" +>>>>>>> SECOND +doc.beans.ErrorDiscoveryQuery.logicDescription: "Discovery query that returns information from the ingest errors index about the supplied term(s)" +doc.beans.ErrorDiscoveryQuery.maxResults: -1 +doc.beans.ErrorDiscoveryQuery.maxWork: -1 +<<<<<<< FIRST +doc.beans.ErrorDiscoveryQuery.metadataTableName: "${warehouse.errorTables.metadata.name}" +doc.beans.ErrorDiscoveryQuery.modelName: "${warehouse.defaults.modelName}" +doc.beans.ErrorDiscoveryQuery.modelTableName: "${warehouse.errorTables.model.name}" +doc.beans.ErrorDiscoveryQuery.reverseIndexTableName: "${warehouse.errorTables.reverseIndex.name}" +doc.beans.ErrorDiscoveryQuery.tableName: "${warehouse.errorTables.shard.name}" +======= +doc.beans.ErrorDiscoveryQuery.metadataTableName: "${table.name.errors.metadata}" +doc.beans.ErrorDiscoveryQuery.modelName: "DATAWAVE" +doc.beans.ErrorDiscoveryQuery.modelTableName: "${table.name.metadata}" +doc.beans.ErrorDiscoveryQuery.reverseIndexTableName: "${table.name.errors.shardReverseIndex}" +doc.beans.ErrorDiscoveryQuery.tableName: "${table.name.errors.shardIndex}" +>>>>>>> SECOND +<<<<<<< FIRST +doc.beans.ErrorEventQuery.checkpointable: "${warehouse.defaults.checkpointable}" +======= +>>>>>>> SECOND +doc.beans.ErrorEventQuery.dateIndexTableName: "" +doc.beans.ErrorEventQuery.includeHierarchyFields: false +<<<<<<< FIRST +doc.beans.ErrorEventQuery.indexTableName: "${warehouse.errorTables.index.name}" +======= +doc.beans.ErrorEventQuery.indexTableName: "${table.name.errors.shardIndex}" +>>>>>>> SECOND +doc.beans.ErrorEventQuery.logicDescription: "Retrieve events/documents that encountered one or more errors during ingest" +<<<<<<< FIRST +doc.beans.ErrorEventQuery.metadataTableName: "${warehouse.errorTables.metadata.name}" +doc.beans.ErrorEventQuery.reverseIndexTableName: "${warehouse.errorTables.reverseIndex.name}" +doc.beans.ErrorEventQuery.tableName: "${warehouse.errorTables.shard.name}" +======= +doc.beans.ErrorEventQuery.metadataTableName: "${table.name.errors.metadata}" +doc.beans.ErrorEventQuery.reverseIndexTableName: "${table.name.errors.shardReverseIndex}" +doc.beans.ErrorEventQuery.tableName: "${table.name.errors.shard}" +>>>>>>> SECOND +doc.beans.ErrorFieldIndexCountQuery.auditType: "NONE" +<<<<<<< FIRST +doc.beans.ErrorFieldIndexCountQuery.checkpointable: false +======= +>>>>>>> SECOND +<<<<<<< FIRST +doc.beans.ErrorFieldIndexCountQuery.indexTableName: "${warehouse.errorTables.index.name}" +======= +doc.beans.ErrorFieldIndexCountQuery.indexTableName: "${table.name.errors.shardIndex}" +>>>>>>> SECOND +doc.beans.ErrorFieldIndexCountQuery.logicDescription: "FieldIndex count query (experimental)" +doc.beans.ErrorFieldIndexCountQuery.maxResults: -1 +doc.beans.ErrorFieldIndexCountQuery.maxUniqueValues: 20000 +doc.beans.ErrorFieldIndexCountQuery.maxWork: -1 +<<<<<<< FIRST +doc.beans.ErrorFieldIndexCountQuery.metadataTableName: "${warehouse.errorTables.metadata.name}" +======= +doc.beans.ErrorFieldIndexCountQuery.metadataTableName: "${table.name.errors.metadata}" +>>>>>>> SECOND +doc.beans.ErrorFieldIndexCountQuery.modelName: "DATAWAVE" +<<<<<<< FIRST +doc.beans.ErrorFieldIndexCountQuery.modelTableName: "${warehouse.errorTables.model.name}" +doc.beans.ErrorFieldIndexCountQuery.queryThreads: "${warehouse.defaults.queryThreads}" +doc.beans.ErrorFieldIndexCountQuery.reverseIndexTableName: "${warehouse.errorTables.reverseIndex.name}" +doc.beans.ErrorFieldIndexCountQuery.tableName: "${warehouse.errorTables.shard.name}" +======= +doc.beans.ErrorFieldIndexCountQuery.modelTableName: "${table.name.metadata}" +doc.beans.ErrorFieldIndexCountQuery.queryThreads: 100 +doc.beans.ErrorFieldIndexCountQuery.reverseIndexTableName: "${table.name.errors.shardReverseIndex}" +doc.beans.ErrorFieldIndexCountQuery.tableName: "${table.name.errors.shard}" +>>>>>>> SECOND +<<<<<<< FIRST +doc.beans.EventQuery.checkpointable: "${warehouse.defaults.checkpointable}" +======= +>>>>>>> SECOND +doc.beans.EventQuery.logicDescription: "Query the sharded event/document schema, leveraging the global index tables as needed" +doc.beans.FacetedQuery.auditType: "NONE" +<<<<<<< FIRST +doc.beans.FacetedQuery.checkpointable: "${warehouse.defaults.checkpointable}" +======= +>>>>>>> SECOND +doc.beans.FacetedQuery.facetHashTableName: "datawave.facetHashes" +doc.beans.FacetedQuery.facetMetadataTableName: "datawave.facetMetadata" +doc.beans.FacetedQuery.facetTableName: "datawave.facets" +doc.beans.FacetedQuery.facetedSearchType: "FIELD_VALUE_FACETS" +<<<<<<< FIRST +======= +doc.beans.FacetedQuery.fullTableScanEnabled: false +>>>>>>> SECOND +doc.beans.FacetedQuery.logicDescription: "Faceted search over indexed fields, returning aggregate counts for field values" +doc.beans.FacetedQuery.maximumFacetGrouping: 200 +doc.beans.FacetedQuery.minimumFacet: 1 +<<<<<<< FIRST +======= +doc.beans.FacetedQuery.querySyntaxParsers.JEXL: null +>>>>>>> SECOND +doc.beans.FacetedQuery.streaming: true +doc.beans.FieldIndexCountQuery.auditType: "NONE" +<<<<<<< FIRST +doc.beans.FieldIndexCountQuery.checkpointable: false +======= +>>>>>>> SECOND +<<<<<<< FIRST +doc.beans.FieldIndexCountQuery.indexTableName: "${warehouse.tables.index.name}" +======= +doc.beans.FieldIndexCountQuery.indexTableName: "${table.name.shardIndex}" +>>>>>>> SECOND +doc.beans.FieldIndexCountQuery.logicDescription: "Indexed Fields Only: Given FIELDNAME returns counts for each unique value. Given FIELDNAME:FIELDVALUE returns count for only that value." +doc.beans.FieldIndexCountQuery.maxResults: -1 +doc.beans.FieldIndexCountQuery.maxUniqueValues: 20000 +doc.beans.FieldIndexCountQuery.maxWork: -1 +<<<<<<< FIRST +doc.beans.FieldIndexCountQuery.metadataTableName: "${warehouse.tables.metadata.name}" +======= +doc.beans.FieldIndexCountQuery.metadataTableName: "${table.name.metadata}" +>>>>>>> SECOND +doc.beans.FieldIndexCountQuery.modelName: "DATAWAVE" +<<<<<<< FIRST +doc.beans.FieldIndexCountQuery.modelTableName: "${warehouse.tables.model.name}" +doc.beans.FieldIndexCountQuery.queryThreads: "${warehouse.defaults.queryThreads}" +doc.beans.FieldIndexCountQuery.reverseIndexTableName: "${warehouse.tables.reverseIndex.name}" +doc.beans.FieldIndexCountQuery.tableName: "${warehouse.tables.shard.name}" +======= +doc.beans.FieldIndexCountQuery.modelTableName: "${table.name.metadata}" +doc.beans.FieldIndexCountQuery.queryThreads: 100 +doc.beans.FieldIndexCountQuery.reverseIndexTableName: "${table.name.shardReverseIndex}" +doc.beans.FieldIndexCountQuery.tableName: "${table.name.shard}" +>>>>>>> SECOND +<<<<<<< FIRST +doc.beans.HitHighlights.accumuloPassword: "${warehouse.accumulo.password}" +======= +>>>>>>> SECOND +doc.beans.HitHighlights.auditType: "NONE" +<<<<<<< FIRST +doc.beans.HitHighlights.checkpointable: "${warehouse.defaults.checkpointable}" +doc.beans.HitHighlights.dateIndexTableName: "${warehouse.tables.dateIndex.name}" +doc.beans.HitHighlights.defaultDateTypeName: "EVENT" +======= +>>>>>>> SECOND +doc.beans.HitHighlights.eventPerDayThreshold: 40000 +<<<<<<< FIRST +doc.beans.HitHighlights.finalMaxTermThreshold: "${warehouse.defaults.finalMaxTermThreshold}" +doc.beans.HitHighlights.fullTableScanEnabled: "${warehouse.defaults.fullTableScanEnabled}" +doc.beans.HitHighlights.hdfsSiteConfigURLs: "${warehouse.defaults.hdfsSiteConfigURLs}" +======= +doc.beans.HitHighlights.finalMaxTermThreshold: 2000 +doc.beans.HitHighlights.fullTableScanEnabled: false +doc.beans.HitHighlights.hdfsSiteConfigURLs: "file:///etc/hadoop/conf/core-site.xml,file:///etc/hadoop/conf/hdfs-site.xml" +>>>>>>> SECOND +doc.beans.HitHighlights.includeDataTypeAsField: false +doc.beans.HitHighlights.includeGroupingContext: false +<<<<<<< FIRST +doc.beans.HitHighlights.indexTableName: "${warehouse.tables.index.name}" +doc.beans.HitHighlights.initialMaxTermThreshold: "${warehouse.defaults.initialMaxTermThreshold}" +======= +doc.beans.HitHighlights.indexTableName: "${table.name.shardIndex}" +doc.beans.HitHighlights.initialMaxTermThreshold: 2000 +>>>>>>> SECOND +doc.beans.HitHighlights.ivaratorCacheBufferSize: 10000 +doc.beans.HitHighlights.ivaratorCacheScanPersistThreshold: 100000 +<<<<<<< FIRST +doc.beans.HitHighlights.ivaratorCacheScanTimeoutMinutes: "${warehouse.defaults.ivaratorCacheScanTimeoutMinutes}" +doc.beans.HitHighlights.ivaratorFstHdfsBaseURIs: "${warehouse.defaults.ivaratorFstHdfsBaseURIs}" +doc.beans.HitHighlights.ivaratorMaxOpenFiles: "${warehouse.defaults.ivaratorMaxOpenFiles}" +======= +doc.beans.HitHighlights.ivaratorCacheScanTimeoutMinutes: 60 +doc.beans.HitHighlights.ivaratorFstHdfsBaseURIs: "hdfs:///IvaratorCache" +doc.beans.HitHighlights.ivaratorMaxOpenFiles: 100 +>>>>>>> SECOND +doc.beans.HitHighlights.logicDescription: "Fast boolean query over indexed fields, only returning fields queried on" +<<<<<<< FIRST +doc.beans.HitHighlights.maxDepthThreshold: "${warehouse.defaults.maxDepthThreshold}" +doc.beans.HitHighlights.maxEvaluationPipelines: "${warehouse.defaults.maxEvaluationPipelines}" +doc.beans.HitHighlights.maxFieldIndexRangeSplit: "${warehouse.defaults.maxFieldIndexRangeSplit}" +doc.beans.HitHighlights.maxOrExpansionFstThreshold: "${warehouse.defaults.maxOrExpansionFstThreshold}" +doc.beans.HitHighlights.maxOrExpansionThreshold: "${warehouse.defaults.maxOrExpansionThreshold}" +doc.beans.HitHighlights.maxOrRangeIvarators: "${warehouse.defaults.maxOrRangeIvarators}" +doc.beans.HitHighlights.maxOrRangeThreshold: "${warehouse.defaults.maxOrRangeThreshold}" +doc.beans.HitHighlights.maxPipelineCachedResults: "${warehouse.defaults.maxPipelineCachedResults}" +doc.beans.HitHighlights.maxRangesPerRangeIvarator: "${warehouse.defaults.maxRangesPerRangeIvarator}" +doc.beans.HitHighlights.maxUnfieldedExpansionThreshold: "${warehouse.defaults.maxUnfieldedExpansionThreshold}" +doc.beans.HitHighlights.maxValueExpansionThreshold: "${warehouse.defaults.maxValueExpansionThreshold}" +doc.beans.HitHighlights.metadataTableName: "${warehouse.tables.metadata.name}" +======= +doc.beans.HitHighlights.maxDepthThreshold: 2000 +doc.beans.HitHighlights.maxEvaluationPipelines: 16 +doc.beans.HitHighlights.maxFieldIndexRangeSplit: 16 +doc.beans.HitHighlights.maxOrExpansionFstThreshold: 750 +doc.beans.HitHighlights.maxOrExpansionThreshold: 500 +doc.beans.HitHighlights.maxOrRangeIvarators: 10 +doc.beans.HitHighlights.maxOrRangeThreshold: 10 +doc.beans.HitHighlights.maxPipelineCachedResults: 16 +doc.beans.HitHighlights.maxRangesPerRangeIvarator: 5 +doc.beans.HitHighlights.maxUnfieldedExpansionThreshold: 50 +doc.beans.HitHighlights.maxValueExpansionThreshold: 50 +doc.beans.HitHighlights.metadataTableName: "${table.name.metadata}" +>>>>>>> SECOND +doc.beans.HitHighlights.minimumSelectivity: 0.2 +<<<<<<< FIRST +======= +doc.beans.HitHighlights.querySyntaxParsers.JEXL: null +>>>>>>> SECOND +<<<<<<< FIRST +doc.beans.HitHighlights.queryThreads: "${warehouse.defaults.indexLookupThreads}" +doc.beans.HitHighlights.reverseIndexTableName: "${warehouse.tables.reverseIndex.name}" +doc.beans.HitHighlights.shardsPerDayThreshold: "${warehouse.defaults.shardsPerDayThreshold}" +doc.beans.HitHighlights.tableName: "${warehouse.tables.shard.name}" +======= +doc.beans.HitHighlights.queryThreads: 100 +doc.beans.HitHighlights.reverseIndexTableName: "${table.name.shardReverseIndex}" +doc.beans.HitHighlights.shardsPerDayThreshold: 20 +doc.beans.HitHighlights.tableName: "${table.name.shard}" +>>>>>>> SECOND +doc.beans.HitHighlights.useEnrichers: false +<<<<<<< FIRST +doc.beans.HitHighlights.zookeeperConfig: "${warehouse.accumulo.zookeepers}" +======= +doc.beans.HitHighlights.zookeeperConfig: "" +>>>>>>> SECOND +<<<<<<< FIRST +======= +doc.beans.IdTranslatorConfiguration.beginDate: 20100101 +doc.beans.IdTranslatorConfiguration.columnVisibility: "" +>>>>>>> SECOND +doc.beans.IndexStatsQuery.auditType: "NONE" +doc.beans.IndexStatsQuery.selectorExtractor: null +<<<<<<< FIRST +doc.beans.InternalQueryMetricsQuery.collectQueryMetrics: false +======= +doc.beans.LookupUUIDConfiguration.beginDate: 20100101 +doc.beans.LookupUUIDConfiguration.columnVisibility: "" +>>>>>>> SECOND +doc.beans.LuceneUUIDEventQuery.auditType: "NONE" +<<<<<<< FIRST +doc.beans.LuceneUUIDEventQuery.checkpointable: "${warehouse.defaults.checkpointable}" +======= +>>>>>>> SECOND +doc.beans.LuceneUUIDEventQuery.logicDescription: "Composite query logic that retrieves records from the event and error tables, based on known UUID fields, ie, those configured via UUIDTypeList in QueryLogicFactory.xml" +doc.beans.LuceneUUIDEventQuery.queryLogics.ErrorEventQuery.auditType: "NONE" +doc.beans.LuceneUUIDEventQuery.queryLogics.ErrorEventQuery.connPoolName: "UUID" +<<<<<<< FIRST +doc.beans.LuceneUUIDEventQuery.queryLogics.ErrorEventQuery.dateIndexTableName: "${warehouse.errorTables.dateIndex.name}" +doc.beans.LuceneUUIDEventQuery.queryLogics.ErrorEventQuery.indexTableName: "${warehouse.errorTables.index.name}" +======= +doc.beans.LuceneUUIDEventQuery.queryLogics.ErrorEventQuery.dateIndexTableName: "" +doc.beans.LuceneUUIDEventQuery.queryLogics.ErrorEventQuery.indexTableName: "${table.name.errors.shardIndex}" +>>>>>>> SECOND +doc.beans.LuceneUUIDEventQuery.queryLogics.ErrorEventQuery.logicDescription: "Lucene query for event/document UUIDs for events that encountered errors at ingest time" +<<<<<<< FIRST +======= +doc.beans.LuceneUUIDEventQuery.queryLogics.ErrorEventQuery.mandatoryQuerySyntax: "LUCENE-UUID" +>>>>>>> SECOND +<<<<<<< FIRST +doc.beans.LuceneUUIDEventQuery.queryLogics.ErrorEventQuery.metadataTableName: "${warehouse.errorTables.metadata.name}" +doc.beans.LuceneUUIDEventQuery.queryLogics.ErrorEventQuery.reverseIndexTableName: "${warehouse.errorTables.reverseIndex.name}" +doc.beans.LuceneUUIDEventQuery.queryLogics.ErrorEventQuery.tableName: "${warehouse.errorTables.shard.name}" +======= +doc.beans.LuceneUUIDEventQuery.queryLogics.ErrorEventQuery.metadataTableName: "${table.name.errors.metadata}" +doc.beans.LuceneUUIDEventQuery.queryLogics.ErrorEventQuery.reverseIndexTableName: "${table.name.errors.shardReverseIndex}" +doc.beans.LuceneUUIDEventQuery.queryLogics.ErrorEventQuery.tableName: "${table.name.errors.shard}" +>>>>>>> SECOND +doc.beans.LuceneUUIDEventQuery.queryLogics.EventQuery.auditType: "NONE" +doc.beans.LuceneUUIDEventQuery.queryLogics.EventQuery.connPoolName: "UUID" +doc.beans.LuceneUUIDEventQuery.queryLogics.EventQuery.logicDescription: "Lucene query for event/document UUIDs" +<<<<<<< FIRST +======= +doc.beans.LuceneUUIDEventQuery.queryLogics.EventQuery.mandatoryQuerySyntax: "LUCENE-UUID" +>>>>>>> SECOND +doc.beans.LuceneUUIDEventQuery.selectorExtractor: null +<<<<<<< FIRST +======= +doc.beans.MyRemoteUserOps.queryServiceHost: "localhost" +doc.beans.MyRemoteUserOps.queryServicePort: 8443 +doc.beans.MyRemoteUserOps.queryServiceScheme: "https" +doc.beans.MyRemoteUserOps.queryServiceURI: "/DataWave/Security/User/" +doc.beans.QueryLogicFactoryConfiguration.maxPageSize: 10000 +doc.beans.QueryLogicFactoryConfiguration.pageByteTrigger: 0 +>>>>>>> SECOND +doc.beans.QueryMetricsQuery.auditType: "NONE" +<<<<<<< FIRST +doc.beans.QueryMetricsQuery.checkpointable: "${warehouse.defaults.checkpointable}" +doc.beans.QueryMetricsQuery.collectQueryMetrics: true +======= +>>>>>>> SECOND +<<<<<<< FIRST +doc.beans.QueryMetricsQuery.dateIndexTableName: "${warehouse.metricTables.dateIndex.name}" +======= +doc.beans.QueryMetricsQuery.dateIndexTableName: "${table.name.queryMetrics.dateIndex}" +>>>>>>> SECOND +doc.beans.QueryMetricsQuery.includeHierarchyFields: false +<<<<<<< FIRST +doc.beans.QueryMetricsQuery.indexTableName: "${warehouse.metricTables.index.name}" +======= +doc.beans.QueryMetricsQuery.indexTableName: "${table.name.queryMetrics.shardIndex}" +>>>>>>> SECOND +doc.beans.QueryMetricsQuery.logicDescription: "Retrieve query metrics based on the given search term(s)" +<<<<<<< FIRST +doc.beans.QueryMetricsQuery.metadataTableName: "${warehouse.metricTables.metadata.name}" +======= +doc.beans.QueryMetricsQuery.metadataTableName: "${table.name.queryMetrics.metadata}" +>>>>>>> SECOND +doc.beans.QueryMetricsQuery.modelName: "NONE" +<<<<<<< FIRST +doc.beans.QueryMetricsQuery.modelTableName: "${warehouse.metricTables.model.name}" +doc.beans.QueryMetricsQuery.reverseIndexTableName: "${warehouse.metricTables.reverseIndex.name}" +doc.beans.QueryMetricsQuery.tableName: "${warehouse.metricTables.shard.name}" +======= +doc.beans.QueryMetricsQuery.modelTableName: "${table.name.queryMetrics.metadata}" +doc.beans.QueryMetricsQuery.reverseIndexTableName: "${table.name.queryMetrics.shardReverseIndex}" +doc.beans.QueryMetricsQuery.tableName: "${table.name.queryMetrics.shard}" +>>>>>>> SECOND +<<<<<<< FIRST +======= +doc.beans.RemoteEventQuery.auditType: "NONE" +doc.beans.RemoteEventQuery.checkpointable: false +doc.beans.RemoteEventQuery.logicDescription: "Retrieve sharded events/documents, leveraging the global index tables as needed" +doc.beans.RemoteEventQuery.maxResults: -1 +doc.beans.RemoteEventQuery.remoteQueryLogic: "EventQuery" +doc.beans.RemoteEventQuery.tableName: "${table.name.shard}" +doc.beans.RemoteQueryService.queryServiceHost: "query" +doc.beans.RemoteQueryService.queryServicePort: 8443 +doc.beans.RemoteQueryService.queryServiceScheme: "https" +doc.beans.RemoteQueryService.queryServiceURI: "/query/v1/" +doc.beans.TLDEventQuery.collapseUids: false +doc.beans.TLDEventQuery.indexFilteringClassNames: "datawave.query.function.NormalizedVersionPredicate" +>>>>>>> SECOND +doc.beans.TermFrequencyQuery.auditType: "NONE" +doc.beans.TermFrequencyQuery.logicDescription: "Query that returns data from the term frequency query table" +doc.beans.TermFrequencyQuery.maxResults: -1 +<<<<<<< FIRST +doc.beans.TermFrequencyQuery.maxWork: -14 +doc.beans.TermFrequencyQuery.tableName: "${warehouse.tables.shard.name}" +======= +doc.beans.TermFrequencyQuery.maxWork: -1 +doc.beans.TermFrequencyQuery.tableName: "${table.name.shard}" +>>>>>>> SECOND +<<<<<<< FIRST +======= +doc.beans.TimedVisitorManager.debugEnabled: false +doc.beans.TimedVisitorManager.validateAst: false +>>>>>>> SECOND +doc.beans.TokenizedLuceneToJexlQueryParser.tokenizeUnfieldedQueries: true +<<<<<<< FIRST +======= +doc.beans.skipTokenizeFields: "DOMETA" +doc.beans.tokenizeFields: "CONTENT" +>>>>>>> SECOND + +Refs (key: ref) +---------------------------------------- +<<<<<<< FIRST +doc.beans.BaseEventQuery.contentFieldNames: baseEventQueryContentFieldNames +======= +>>>>>>> SECOND +doc.beans.BaseEventQuery.dateIndexHelperFactory: dateIndexHelperFactory +<<<<<<< FIRST +doc.beans.BaseEventQuery.enricherClassNames: baseEventQueryEnricherClassNames +======= +>>>>>>> SECOND +<<<<<<< FIRST +doc.beans.BaseEventQuery.eventQueryDataDecoratorTransformer: baseEventQueryEventQueryDataDecoratorTransformer +======= +doc.beans.BaseEventQuery.eventQueryDataDecoratorTransformer: EventQueryDataDecoratorTransformer +>>>>>>> SECOND +<<<<<<< FIRST +doc.beans.BaseEventQuery.filterClassNames: baseEventQueryFilterClassNames +======= +>>>>>>> SECOND +<<<<<<< FIRST +doc.beans.BaseEventQuery.filterOptions: baseEventQueryFilterOptions +doc.beans.BaseEventQuery.hierarchyFieldOptions: baseEventQueryHierarchyFieldOptions +doc.beans.BaseEventQuery.ivaratorCacheDirConfigs: baseEventQueryIvaratorCacheDirConfigs +======= +doc.beans.BaseEventQuery.filterOptions: BaseEventQueryFilterOptions +doc.beans.BaseEventQuery.hierarchyFieldOptions: BaseEventQueryHierarchyFieldOptions +doc.beans.BaseEventQuery.ivaratorCacheDirConfigs: IvaratorCacheDirConfigs +>>>>>>> SECOND +doc.beans.BaseEventQuery.metadataHelperFactory: metadataHelperFactory +doc.beans.BaseEventQuery.queryPlanner: DefaultQueryPlanner +<<<<<<< FIRST +doc.beans.BaseEventQuery.querySyntaxParsers: baseEventQuerySyntaxParsers +doc.beans.BaseEventQuery.realmSuffixExclusionPatterns: baseEventQueryRealmSuffixExclusionPatterns +doc.beans.BaseEventQuery.requiredRoles: baseEventQueryRequiredRoles +======= +>>>>>>> SECOND +doc.beans.BaseEventQuery.selectorExtractor: DatawaveSelectorExtractor +<<<<<<< FIRST +======= +doc.beans.DatawaveSelectorExtractor.luceneToJexlQueryParser: LuceneToJexlQueryParser +>>>>>>> SECOND +doc.beans.DefaultQueryPlanner.queryModelProviderFactory: queryModelProviderFactory +<<<<<<< FIRST +======= +doc.beans.DefaultQueryPlanner.visitorManager: TimedVisitorManager +>>>>>>> SECOND +doc.beans.DiscoveryQuery.metadataHelperFactory: metadataHelperFactory +doc.beans.DiscoveryQuery.selectorExtractor: DatawaveSelectorExtractor +doc.beans.EdgeEventQuery.edgeDictionaryProvider: edgeDictionaryProvider +<<<<<<< FIRST +doc.beans.EdgeEventQuery.edgeModelFieldsFactory: edgeModelFieldsFactory +======= +>>>>>>> SECOND +<<<<<<< FIRST +doc.beans.EdgeEventQuery.edgeQueryModel: edgeQueryModel +======= +doc.beans.EdgeEventQuery.edgeQueryModel: edgeEventQueryModel +>>>>>>> SECOND +doc.beans.ErrorDiscoveryQuery.metadataHelperFactory: metadataHelperFactory +doc.beans.ErrorDiscoveryQuery.selectorExtractor: DatawaveSelectorExtractor +doc.beans.ErrorFieldIndexCountQuery.metadataHelperFactory: metadataHelperFactory +doc.beans.ErrorFieldIndexCountQuery.selectorExtractor: DatawaveSelectorExtractor +<<<<<<< FIRST +doc.beans.FacetedQuery.querySyntaxParsers: facetedQuerySyntaxParsers +======= +>>>>>>> SECOND +doc.beans.FieldIndexCountQuery.metadataHelperFactory: metadataHelperFactory +doc.beans.FieldIndexCountQuery.selectorExtractor: DatawaveSelectorExtractor +<<<<<<< FIRST +doc.beans.HitHighlights.dateIndexHelperFactory: dateIndexHelperFactory +======= +>>>>>>> SECOND +<<<<<<< FIRST +doc.beans.HitHighlights.ivaratorCacheDirConfigs: hitHighlightsIvaratorCacheDirConfigs +======= +doc.beans.HitHighlights.ivaratorCacheDirConfigs: IvaratorCacheDirConfigs +>>>>>>> SECOND +doc.beans.HitHighlights.metadataHelperFactory: metadataHelperFactory +<<<<<<< FIRST +doc.beans.HitHighlights.querySyntaxParsers: hitHighlightsSyntaxParsers +======= +>>>>>>> SECOND +doc.beans.HitHighlights.selectorExtractor: DatawaveSelectorExtractor +<<<<<<< FIRST +doc.beans.InternalQueryMetricsQuery.requiredRoles: internalQueryMetricsQueryRequiredRoles +======= +doc.beans.IdTranslatorConfiguration.uuidTypes: UUIDTypeList +doc.beans.LookupUUIDConfiguration.uuidTypes: UUIDTypeList +>>>>>>> SECOND +<<<<<<< FIRST +doc.beans.LuceneToJexlQueryParser.allowedFunctions: allowedFunctions +doc.beans.LuceneToJexlQueryParser.skipTokenizeUnfieldedFields: skipTokenizeUnfieldedFields +doc.beans.LuceneToJexlUUIDQueryParser.allowedFunctions: allowedFunctions +doc.beans.LuceneToJexlUUIDQueryParser.uuidTypes: uuidTypes +======= +doc.beans.LuceneToJexlQueryParser.allowedFunctions: allowedQueryFunctions +doc.beans.LuceneToJexlQueryParser.skipTokenizeUnfieldedFields: skipTokenizeFields +doc.beans.LuceneToJexlUUIDQueryParser.allowedFunctions: allowedQueryFunctions +doc.beans.LuceneToJexlUUIDQueryParser.uuidTypes: UUIDTypeList +>>>>>>> SECOND +<<<<<<< FIRST +doc.beans.LuceneUUIDEventQuery.queryLogics.ErrorEventQuery.mandatoryQuerySyntax: luceneUUIDEventQueryErrorEventQueryMandatoryQuerySyntax +doc.beans.LuceneUUIDEventQuery.queryLogics.EventQuery.mandatoryQuerySyntax: luceneUUIDEventQueryEventQueryMandatoryQuerySyntax +======= +doc.beans.MyRemoteUserOps.responseObjectFactory: responseObjectFactory +>>>>>>> SECOND +doc.beans.QueryMetricsQuery.metadataHelperFactory: metadataHelperFactory +<<<<<<< FIRST +======= +doc.beans.RemoteEventQuery.remoteQueryService: RemoteQueryService +doc.beans.RemoteEventQuery.selectorExtractor: DatawaveSelectorExtractor +doc.beans.RemoteEventQuery.userOperations: MyRemoteUserOps +doc.beans.RemoteQueryService.responseObjectFactory: responseObjectFactory +doc.beans.TimedVisitorManager.validator: AstValidator +>>>>>>> SECOND +<<<<<<< FIRST +doc.beans.TokenizedLuceneToJexlQueryParser.allowedFunctions: allowedFunctions +doc.beans.TokenizedLuceneToJexlQueryParser.skipTokenizeUnfieldedFields: skipTokenizeUnfieldedFields +doc.beans.TokenizedLuceneToJexlQueryParser.tokenizedFields: tokenizedFields +======= +doc.beans.TokenizedLuceneToJexlQueryParser.allowedFunctions: allowedQueryFunctions +doc.beans.TokenizedLuceneToJexlQueryParser.skipTokenizeUnfieldedFields: skipTokenizeFields +doc.beans.TokenizedLuceneToJexlQueryParser.tokenizedFields: tokenizeFields +>>>>>>> SECOND +<<<<<<< FIRST +doc.beans.allowedFunctions.parser: LuceneToJexlQueryParser +======= +doc.beans.allowedQueryFunctions.parser: LuceneToJexlQueryParser +>>>>>>> SECOND +doc.beans.baseQueryLogic.markingFunctions: markingFunctions +doc.beans.baseQueryLogic.responseObjectFactory: responseObjectFactory +<<<<<<< FIRST +======= +doc.beans.queryLogicFactory.queryLogicFactoryConfiguration: QueryLogicFactoryConfiguration +>>>>>>> SECOND \ No newline at end of file diff --git a/microservices/configcheck/src/test/resources/rendered/microservice/QueryLogicFactory.xml b/microservices/configcheck/src/test/resources/rendered/microservice/QueryLogicFactory.xml new file mode 100644 index 00000000000..a28dad51d0f --- /dev/null +++ b/microservices/configcheck/src/test/resources/rendered/microservice/QueryLogicFactory.xml @@ -0,0 +1,552 @@ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + 2611 + + + + true + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + diff --git a/microservices/configcheck/src/test/resources/rendered/microservice/analysis.txt b/microservices/configcheck/src/test/resources/rendered/microservice/analysis.txt new file mode 100644 index 00000000000..1601bee63b7 --- /dev/null +++ b/microservices/configcheck/src/test/resources/rendered/microservice/analysis.txt @@ -0,0 +1,225 @@ +Values (key: value) +---------------------------------------- +doc.beans.BaseEventQuery.accumuloPassword: "${warehouse.accumulo.password}" +doc.beans.BaseEventQuery.auditType: "ACTIVE" +doc.beans.BaseEventQuery.baseIteratorPriority: "${warehouse.defaults.baseIteratorPriority}" +doc.beans.BaseEventQuery.checkpointable: "${warehouse.defaults.checkpointable}" +doc.beans.BaseEventQuery.collapseUids: false +doc.beans.BaseEventQuery.collapseUidsThreshold: -1 +doc.beans.BaseEventQuery.collectQueryMetrics: true +doc.beans.BaseEventQuery.dateIndexTableName: "${warehouse.tables.dateIndex.name}" +doc.beans.BaseEventQuery.dateIndexThreads: "${warehouse.defaults.dateIndexThreads}" +doc.beans.BaseEventQuery.defaultDateTypeName: "EVENT" +doc.beans.BaseEventQuery.disableIndexOnlyDocuments: false +doc.beans.BaseEventQuery.evaluationOnlyFields: "" +doc.beans.BaseEventQuery.eventPerDayThreshold: "${warehouse.defaults.eventPerDayThreshold}" +doc.beans.BaseEventQuery.finalMaxTermThreshold: "${warehouse.defaults.finalMaxTermThreshold}" +doc.beans.BaseEventQuery.fullTableScanEnabled: "${warehouse.defaults.fullTableScanEnabled}" +doc.beans.BaseEventQuery.hdfsSiteConfigURLs: "${warehouse.defaults.hdfsSiteConfigURLs}" +doc.beans.BaseEventQuery.includeDataTypeAsField: false +doc.beans.BaseEventQuery.includeHierarchyFields: false +doc.beans.BaseEventQuery.indexLookupThreads: "${warehouse.defaults.indexLookupThreads}" +doc.beans.BaseEventQuery.indexOnlyFilterFunctionsEnabled: false +doc.beans.BaseEventQuery.indexTableName: "${warehouse.tables.index.name}" +doc.beans.BaseEventQuery.initialMaxTermThreshold: "${warehouse.defaults.initialMaxTermThreshold}" +doc.beans.BaseEventQuery.ivaratorCacheBufferSize: "${warehouse.defaults.ivaratorCacheBufferSize}" +doc.beans.BaseEventQuery.ivaratorCacheScanPersistThreshold: "${warehouse.defaults.ivaratorCacheScanPersistThreshold}" +doc.beans.BaseEventQuery.ivaratorCacheScanTimeoutMinutes: "${warehouse.defaults.ivaratorCacheScanTimeoutMinutes}" +doc.beans.BaseEventQuery.ivaratorFstHdfsBaseURIs: "${warehouse.defaults.ivaratorFstHdfsBaseURIs}" +doc.beans.BaseEventQuery.ivaratorMaxOpenFiles: "${warehouse.defaults.ivaratorMaxOpenFiles}" +doc.beans.BaseEventQuery.logTimingDetails: true +doc.beans.BaseEventQuery.logicDescription: "Retrieve sharded events/documents, leveraging the global index tables as needed" +doc.beans.BaseEventQuery.maxConcurrentTasks: 10 +doc.beans.BaseEventQuery.maxDepthThreshold: "${warehouse.defaults.maxDepthThreshold}" +doc.beans.BaseEventQuery.maxEvaluationPipelines: "${warehouse.defaults.maxEvaluationPipelines}" +doc.beans.BaseEventQuery.maxFieldIndexRangeSplit: "${warehouse.defaults.maxFieldIndexRangeSplit}" +doc.beans.BaseEventQuery.maxIndexScanTimeMillis: "${warehouse.defaults.maxIndexScanTimeMillis}" +doc.beans.BaseEventQuery.maxIvaratorSources: "${warehouse.defaults.maxIvaratorSources}" +doc.beans.BaseEventQuery.maxOrExpansionFstThreshold: "${warehouse.defaults.maxOrExpansionFstThreshold}" +doc.beans.BaseEventQuery.maxOrExpansionThreshold: "${warehouse.defaults.maxOrExpansionThreshold}" +doc.beans.BaseEventQuery.maxOrRangeThreshold: "${warehouse.defaults.maxOrRangeThreshold}" +doc.beans.BaseEventQuery.maxPipelineCachedResults: "${warehouse.defaults.maxPipelineCachedResults}" +doc.beans.BaseEventQuery.maxResults: -1 +doc.beans.BaseEventQuery.maxUnfieldedExpansionThreshold: "${warehouse.defaults.maxUnfieldedExpansionThreshold}" +doc.beans.BaseEventQuery.maxValueExpansionThreshold: "${warehouse.defaults.maxValueExpansionThreshold}" +doc.beans.BaseEventQuery.metadataTableName: "${warehouse.tables.metadata.name}" +doc.beans.BaseEventQuery.minimumSelectivity: 0.2 +doc.beans.BaseEventQuery.modelName: "${warehouse.defaults.modelName}" +doc.beans.BaseEventQuery.modelTableName: "${warehouse.tables.model.name}" +doc.beans.BaseEventQuery.queryThreads: "${warehouse.defaults.queryThreads}" +doc.beans.BaseEventQuery.reverseIndexTableName: "${warehouse.tables.reverseIndex.name}" +doc.beans.BaseEventQuery.sendTimingToStatsd: false +doc.beans.BaseEventQuery.shardsPerDayThreshold: "${warehouse.defaults.shardsPerDayThreshold}" +doc.beans.BaseEventQuery.statsdHost: "${warehouse.statsd.host}" +doc.beans.BaseEventQuery.statsdPort: "${warehouse.statsd.port}" +doc.beans.BaseEventQuery.tableName: "${warehouse.tables.shard.name}" +doc.beans.BaseEventQuery.useEnrichers: true +doc.beans.BaseEventQuery.useFilters: false +doc.beans.BaseEventQuery.zookeeperConfig: "${warehouse.accumulo.zookeepers}" +doc.beans.ContentQuery.auditType: "NONE" +doc.beans.ContentQuery.checkpointable: "${warehouse.defaults.checkpointable}" +doc.beans.ContentQuery.logicDescription: "Query that returns a document given the document identifier" +doc.beans.ContentQuery.maxResults: -1 +doc.beans.ContentQuery.maxWork: -1 +doc.beans.ContentQuery.tableName: "${warehouse.tables.shard.name}" +doc.beans.CountQuery.checkpointable: "${warehouse.defaults.checkpointable}" +doc.beans.CountQuery.logicDescription: "Retrieve event/document counts based on your search criteria" +doc.beans.DefaultQueryPlanner.compressOptionMappings: true +doc.beans.DefaultQueryPlanner[0]: 2611 +doc.beans.DefaultQueryPlanner[1]: true +doc.beans.DiscoveryQuery.allowLeadingWildcard: true +doc.beans.DiscoveryQuery.auditType: "NONE" +doc.beans.DiscoveryQuery.checkpointable: "${warehouse.defaults.checkpointable}" +doc.beans.DiscoveryQuery.fullTableScanEnabled: "${warehouse.defaults.fullTableScanEnabled}" +doc.beans.DiscoveryQuery.indexTableName: "${warehouse.tables.index.name}" +doc.beans.DiscoveryQuery.logicDescription: "Discovery query that returns information from the index about the supplied term(s)" +doc.beans.DiscoveryQuery.maxResults: -1 +doc.beans.DiscoveryQuery.maxWork: -1 +doc.beans.DiscoveryQuery.metadataTableName: "${warehouse.tables.metadata.name}" +doc.beans.DiscoveryQuery.modelName: "${warehouse.defaults.modelName}" +doc.beans.DiscoveryQuery.modelTableName: "${warehouse.tables.model.name}" +doc.beans.DiscoveryQuery.reverseIndexTableName: "${warehouse.tables.reverseIndex.name}" +doc.beans.DiscoveryQuery.tableName: "${warehouse.tables.shard.name}" +doc.beans.EdgeEventQuery.checkpointable: "${warehouse.defaults.checkpointable}" +doc.beans.EdgeEventQuery.edgeModelName: "DATAWAVE_EDGE" +doc.beans.EdgeEventQuery.logicDescription: "Use results of an EdgeQuery to obtain events/documents that created the given edge" +doc.beans.EdgeEventQuery.modelTableName: "${warehouse.tables.model.name}" +doc.beans.ErrorCountQuery.checkpointable: "${warehouse.defaults.checkpointable}" +doc.beans.ErrorCountQuery.indexTableName: "${warehouse.errorTables.index.name}" +doc.beans.ErrorCountQuery.logicDescription: "Retrieve counts of errored events based on your search criteria" +doc.beans.ErrorCountQuery.metadataTableName: "${warehouse.errorTables.metadata.name}" +doc.beans.ErrorCountQuery.reverseIndexTableName: "${warehouse.errorTables.index.name}" +doc.beans.ErrorCountQuery.tableName: "${warehouse.errorTables.shard.name}" +doc.beans.ErrorDiscoveryQuery.allowLeadingWildcard: true +doc.beans.ErrorDiscoveryQuery.auditType: "NONE" +doc.beans.ErrorDiscoveryQuery.checkpointable: "${warehouse.defaults.checkpointable}" +doc.beans.ErrorDiscoveryQuery.fullTableScanEnabled: "${warehouse.defaults.fullTableScanEnabled}" +doc.beans.ErrorDiscoveryQuery.indexTableName: "${warehouse.errorTables.index.name}" +doc.beans.ErrorDiscoveryQuery.logicDescription: "Discovery query that returns information from the ingest errors index about the supplied term(s)" +doc.beans.ErrorDiscoveryQuery.maxResults: -1 +doc.beans.ErrorDiscoveryQuery.maxWork: -1 +doc.beans.ErrorDiscoveryQuery.metadataTableName: "${warehouse.errorTables.metadata.name}" +doc.beans.ErrorDiscoveryQuery.modelName: "${warehouse.defaults.modelName}" +doc.beans.ErrorDiscoveryQuery.modelTableName: "${warehouse.errorTables.model.name}" +doc.beans.ErrorDiscoveryQuery.reverseIndexTableName: "${warehouse.errorTables.reverseIndex.name}" +doc.beans.ErrorDiscoveryQuery.tableName: "${warehouse.errorTables.shard.name}" +doc.beans.ErrorEventQuery.checkpointable: "${warehouse.defaults.checkpointable}" +doc.beans.ErrorEventQuery.dateIndexTableName: "" +doc.beans.ErrorEventQuery.includeHierarchyFields: false +doc.beans.ErrorEventQuery.indexTableName: "${warehouse.errorTables.index.name}" +doc.beans.ErrorEventQuery.logicDescription: "Retrieve events/documents that encountered one or more errors during ingest" +doc.beans.ErrorEventQuery.metadataTableName: "${warehouse.errorTables.metadata.name}" +doc.beans.ErrorEventQuery.reverseIndexTableName: "${warehouse.errorTables.reverseIndex.name}" +doc.beans.ErrorEventQuery.tableName: "${warehouse.errorTables.shard.name}" +doc.beans.ErrorFieldIndexCountQuery.auditType: "NONE" +doc.beans.ErrorFieldIndexCountQuery.checkpointable: false +doc.beans.ErrorFieldIndexCountQuery.indexTableName: "${warehouse.errorTables.index.name}" +doc.beans.ErrorFieldIndexCountQuery.logicDescription: "FieldIndex count query (experimental)" +doc.beans.ErrorFieldIndexCountQuery.maxResults: -1 +doc.beans.ErrorFieldIndexCountQuery.maxUniqueValues: 20000 +doc.beans.ErrorFieldIndexCountQuery.maxWork: -1 +doc.beans.ErrorFieldIndexCountQuery.metadataTableName: "${warehouse.errorTables.metadata.name}" +doc.beans.ErrorFieldIndexCountQuery.modelName: "DATAWAVE" +doc.beans.ErrorFieldIndexCountQuery.modelTableName: "${warehouse.errorTables.model.name}" +doc.beans.ErrorFieldIndexCountQuery.queryThreads: "${warehouse.defaults.queryThreads}" +doc.beans.ErrorFieldIndexCountQuery.reverseIndexTableName: "${warehouse.errorTables.reverseIndex.name}" +doc.beans.ErrorFieldIndexCountQuery.tableName: "${warehouse.errorTables.shard.name}" +doc.beans.EventQuery.checkpointable: "${warehouse.defaults.checkpointable}" +doc.beans.EventQuery.logicDescription: "Query the sharded event/document schema, leveraging the global index tables as needed" +doc.beans.FacetedQuery.auditType: "NONE" +doc.beans.FacetedQuery.checkpointable: "${warehouse.defaults.checkpointable}" +doc.beans.FacetedQuery.facetHashTableName: "datawave.facetHashes" +doc.beans.FacetedQuery.facetMetadataTableName: "datawave.facetMetadata" +doc.beans.FacetedQuery.facetTableName: "datawave.facets" +doc.beans.FacetedQuery.facetedSearchType: "FIELD_VALUE_FACETS" +doc.beans.FacetedQuery.logicDescription: "Faceted search over indexed fields, returning aggregate counts for field values" +doc.beans.FacetedQuery.maximumFacetGrouping: 200 +doc.beans.FacetedQuery.minimumFacet: 1 +doc.beans.FacetedQuery.streaming: true +doc.beans.FieldIndexCountQuery.auditType: "NONE" +doc.beans.FieldIndexCountQuery.checkpointable: false +doc.beans.FieldIndexCountQuery.indexTableName: "${warehouse.tables.index.name}" +doc.beans.FieldIndexCountQuery.logicDescription: "Indexed Fields Only: Given FIELDNAME returns counts for each unique value. Given FIELDNAME:FIELDVALUE returns count for only that value." +doc.beans.FieldIndexCountQuery.maxResults: -1 +doc.beans.FieldIndexCountQuery.maxUniqueValues: 20000 +doc.beans.FieldIndexCountQuery.maxWork: -1 +doc.beans.FieldIndexCountQuery.metadataTableName: "${warehouse.tables.metadata.name}" +doc.beans.FieldIndexCountQuery.modelName: "DATAWAVE" +doc.beans.FieldIndexCountQuery.modelTableName: "${warehouse.tables.model.name}" +doc.beans.FieldIndexCountQuery.queryThreads: "${warehouse.defaults.queryThreads}" +doc.beans.FieldIndexCountQuery.reverseIndexTableName: "${warehouse.tables.reverseIndex.name}" +doc.beans.FieldIndexCountQuery.tableName: "${warehouse.tables.shard.name}" +doc.beans.HitHighlights.accumuloPassword: "${warehouse.accumulo.password}" +doc.beans.HitHighlights.auditType: "NONE" +doc.beans.HitHighlights.checkpointable: "${warehouse.defaults.checkpointable}" +doc.beans.HitHighlights.dateIndexTableName: "${warehouse.tables.dateIndex.name}" +doc.beans.HitHighlights.defaultDateTypeName: "EVENT" +doc.beans.HitHighlights.eventPerDayThreshold: 40000 +doc.beans.HitHighlights.finalMaxTermThreshold: "${warehouse.defaults.finalMaxTermThreshold}" +doc.beans.HitHighlights.fullTableScanEnabled: "${warehouse.defaults.fullTableScanEnabled}" +doc.beans.HitHighlights.hdfsSiteConfigURLs: "${warehouse.defaults.hdfsSiteConfigURLs}" +doc.beans.HitHighlights.includeDataTypeAsField: false +doc.beans.HitHighlights.includeGroupingContext: false +doc.beans.HitHighlights.indexTableName: "${warehouse.tables.index.name}" +doc.beans.HitHighlights.initialMaxTermThreshold: "${warehouse.defaults.initialMaxTermThreshold}" +doc.beans.HitHighlights.ivaratorCacheBufferSize: 10000 +doc.beans.HitHighlights.ivaratorCacheScanPersistThreshold: 100000 +doc.beans.HitHighlights.ivaratorCacheScanTimeoutMinutes: "${warehouse.defaults.ivaratorCacheScanTimeoutMinutes}" +doc.beans.HitHighlights.ivaratorFstHdfsBaseURIs: "${warehouse.defaults.ivaratorFstHdfsBaseURIs}" +doc.beans.HitHighlights.ivaratorMaxOpenFiles: "${warehouse.defaults.ivaratorMaxOpenFiles}" +doc.beans.HitHighlights.logicDescription: "Fast boolean query over indexed fields, only returning fields queried on" +doc.beans.HitHighlights.maxDepthThreshold: "${warehouse.defaults.maxDepthThreshold}" +doc.beans.HitHighlights.maxEvaluationPipelines: "${warehouse.defaults.maxEvaluationPipelines}" +doc.beans.HitHighlights.maxFieldIndexRangeSplit: "${warehouse.defaults.maxFieldIndexRangeSplit}" +doc.beans.HitHighlights.maxOrExpansionFstThreshold: "${warehouse.defaults.maxOrExpansionFstThreshold}" +doc.beans.HitHighlights.maxOrExpansionThreshold: "${warehouse.defaults.maxOrExpansionThreshold}" +doc.beans.HitHighlights.maxOrRangeIvarators: "${warehouse.defaults.maxOrRangeIvarators}" +doc.beans.HitHighlights.maxOrRangeThreshold: "${warehouse.defaults.maxOrRangeThreshold}" +doc.beans.HitHighlights.maxPipelineCachedResults: "${warehouse.defaults.maxPipelineCachedResults}" +doc.beans.HitHighlights.maxRangesPerRangeIvarator: "${warehouse.defaults.maxRangesPerRangeIvarator}" +doc.beans.HitHighlights.maxUnfieldedExpansionThreshold: "${warehouse.defaults.maxUnfieldedExpansionThreshold}" +doc.beans.HitHighlights.maxValueExpansionThreshold: "${warehouse.defaults.maxValueExpansionThreshold}" +doc.beans.HitHighlights.metadataTableName: "${warehouse.tables.metadata.name}" +doc.beans.HitHighlights.minimumSelectivity: 0.2 +doc.beans.HitHighlights.queryThreads: "${warehouse.defaults.indexLookupThreads}" +doc.beans.HitHighlights.reverseIndexTableName: "${warehouse.tables.reverseIndex.name}" +doc.beans.HitHighlights.shardsPerDayThreshold: "${warehouse.defaults.shardsPerDayThreshold}" +doc.beans.HitHighlights.tableName: "${warehouse.tables.shard.name}" +doc.beans.HitHighlights.useEnrichers: false +doc.beans.HitHighlights.zookeeperConfig: "${warehouse.accumulo.zookeepers}" +doc.beans.IndexStatsQuery.auditType: "NONE" +doc.beans.IndexStatsQuery.selectorExtractor: null +doc.beans.InternalQueryMetricsQuery.collectQueryMetrics: false +doc.beans.LuceneUUIDEventQuery.auditType: "NONE" +doc.beans.LuceneUUIDEventQuery.checkpointable: "${warehouse.defaults.checkpointable}" +doc.beans.LuceneUUIDEventQuery.logicDescription: "Composite query logic that retrieves records from the event and error tables, based on known UUID fields, ie, those configured via UUIDTypeList in QueryLogicFactory.xml" +doc.beans.LuceneUUIDEventQuery.queryLogics.ErrorEventQuery.auditType: "NONE" +doc.beans.LuceneUUIDEventQuery.queryLogics.ErrorEventQuery.connPoolName: "UUID" +doc.beans.LuceneUUIDEventQuery.queryLogics.ErrorEventQuery.dateIndexTableName: "${warehouse.errorTables.dateIndex.name}" +doc.beans.LuceneUUIDEventQuery.queryLogics.ErrorEventQuery.indexTableName: "${warehouse.errorTables.index.name}" +doc.beans.LuceneUUIDEventQuery.queryLogics.ErrorEventQuery.logicDescription: "Lucene query for event/document UUIDs for events that encountered errors at ingest time" +doc.beans.LuceneUUIDEventQuery.queryLogics.ErrorEventQuery.metadataTableName: "${warehouse.errorTables.metadata.name}" +doc.beans.LuceneUUIDEventQuery.queryLogics.ErrorEventQuery.reverseIndexTableName: "${warehouse.errorTables.reverseIndex.name}" +doc.beans.LuceneUUIDEventQuery.queryLogics.ErrorEventQuery.tableName: "${warehouse.errorTables.shard.name}" +doc.beans.LuceneUUIDEventQuery.queryLogics.EventQuery.auditType: "NONE" +doc.beans.LuceneUUIDEventQuery.queryLogics.EventQuery.connPoolName: "UUID" +doc.beans.LuceneUUIDEventQuery.queryLogics.EventQuery.logicDescription: "Lucene query for event/document UUIDs" +doc.beans.LuceneUUIDEventQuery.selectorExtractor: null +doc.beans.QueryMetricsQuery.auditType: "NONE" +doc.beans.QueryMetricsQuery.checkpointable: "${warehouse.defaults.checkpointable}" +doc.beans.QueryMetricsQuery.collectQueryMetrics: true +doc.beans.QueryMetricsQuery.dateIndexTableName: "${warehouse.metricTables.dateIndex.name}" +doc.beans.QueryMetricsQuery.includeHierarchyFields: false +doc.beans.QueryMetricsQuery.indexTableName: "${warehouse.metricTables.index.name}" +doc.beans.QueryMetricsQuery.logicDescription: "Retrieve query metrics based on the given search term(s)" +doc.beans.QueryMetricsQuery.metadataTableName: "${warehouse.metricTables.metadata.name}" +doc.beans.QueryMetricsQuery.modelName: "NONE" +doc.beans.QueryMetricsQuery.modelTableName: "${warehouse.metricTables.model.name}" +doc.beans.QueryMetricsQuery.reverseIndexTableName: "${warehouse.metricTables.reverseIndex.name}" +doc.beans.QueryMetricsQuery.tableName: "${warehouse.metricTables.shard.name}" +doc.beans.TermFrequencyQuery.auditType: "NONE" +doc.beans.TermFrequencyQuery.logicDescription: "Query that returns data from the term frequency query table" +doc.beans.TermFrequencyQuery.maxResults: -1 +doc.beans.TermFrequencyQuery.maxWork: -14 +doc.beans.TermFrequencyQuery.tableName: "${warehouse.tables.shard.name}" +doc.beans.TokenizedLuceneToJexlQueryParser.tokenizeUnfieldedQueries: true \ No newline at end of file diff --git a/microservices/configcheck/src/test/resources/rendered/microservice/fullReport.txt b/microservices/configcheck/src/test/resources/rendered/microservice/fullReport.txt new file mode 100644 index 00000000000..bec9e5d5d77 --- /dev/null +++ b/microservices/configcheck/src/test/resources/rendered/microservice/fullReport.txt @@ -0,0 +1,955 @@ +Placeholders (key: ${placeholder}) +---------------------------------------- +doc.beans.BaseEventQuery.accumuloPassword: ${datawave.query.logic.logics.BaseEventQuery.accumuloPassword} +doc.beans.BaseEventQuery.auditType: ${datawave.query.logic.logics.BaseEventQuery.auditType} +doc.beans.BaseEventQuery.baseIteratorPriority: ${datawave.query.logic.logics.BaseEventQuery.baseIteratorPriority} +doc.beans.BaseEventQuery.checkpointable: ${datawave.query.logic.logics.BaseEventQuery.checkpointable} +doc.beans.BaseEventQuery.collapseUids: ${datawave.query.logic.logics.BaseEventQuery.collapseUids} +doc.beans.BaseEventQuery.collapseUidsThreshold: ${datawave.query.logic.logics.BaseEventQuery.collapseUidsThreshold} +doc.beans.BaseEventQuery.collectQueryMetrics: ${datawave.query.logic.logics.BaseEventQuery.collectQueryMetrics} +doc.beans.BaseEventQuery.dateIndexTableName: ${datawave.query.logic.logics.BaseEventQuery.dateIndexTableName} +doc.beans.BaseEventQuery.dateIndexThreads: ${datawave.query.logic.logics.BaseEventQuery.dateIndexThreads} +doc.beans.BaseEventQuery.defaultDateTypeName: ${datawave.query.logic.logics.BaseEventQuery.defaultDateTypeName} +doc.beans.BaseEventQuery.disableIndexOnlyDocuments: ${datawave.query.logic.logics.BaseEventQuery.disableIndexOnlyDocuments} +doc.beans.BaseEventQuery.evaluationOnlyFields: ${datawave.query.logic.logics.BaseEventQuery.evaluationOnlyFields} +doc.beans.BaseEventQuery.eventPerDayThreshold: ${datawave.query.logic.logics.BaseEventQuery.eventPerDayThreshold} +doc.beans.BaseEventQuery.finalMaxTermThreshold: ${datawave.query.logic.logics.BaseEventQuery.finalMaxTermThreshold} +doc.beans.BaseEventQuery.fullTableScanEnabled: ${datawave.query.logic.logics.BaseEventQuery.fullTableScanEnabled} +doc.beans.BaseEventQuery.hdfsSiteConfigURLs: ${datawave.query.logic.logics.BaseEventQuery.hdfsSiteConfigURLs} +doc.beans.BaseEventQuery.includeDataTypeAsField: ${datawave.query.logic.logics.BaseEventQuery.includeDataTypeAsField} +doc.beans.BaseEventQuery.includeHierarchyFields: ${datawave.query.logic.logics.BaseEventQuery.includeHierarchyFields} +doc.beans.BaseEventQuery.indexLookupThreads: ${datawave.query.logic.logics.BaseEventQuery.indexLookupThreads} +doc.beans.BaseEventQuery.indexOnlyFilterFunctionsEnabled: ${datawave.query.logic.logics.BaseEventQuery.indexOnlyFilterFunctionsEnabled} +doc.beans.BaseEventQuery.indexTableName: ${datawave.query.logic.logics.BaseEventQuery.indexTableName} +doc.beans.BaseEventQuery.initialMaxTermThreshold: ${datawave.query.logic.logics.BaseEventQuery.initialMaxTermThreshold} +doc.beans.BaseEventQuery.ivaratorCacheBufferSize: ${datawave.query.logic.logics.BaseEventQuery.ivaratorCacheBufferSize} +doc.beans.BaseEventQuery.ivaratorCacheScanPersistThreshold: ${datawave.query.logic.logics.BaseEventQuery.ivaratorCacheScanPersistThreshold} +doc.beans.BaseEventQuery.ivaratorCacheScanTimeoutMinutes: ${datawave.query.logic.logics.BaseEventQuery.ivaratorCacheScanTimeoutMinutes} +doc.beans.BaseEventQuery.ivaratorFstHdfsBaseURIs: ${datawave.query.logic.logics.BaseEventQuery.ivaratorFstHdfsBaseURIs} +doc.beans.BaseEventQuery.ivaratorMaxOpenFiles: ${datawave.query.logic.logics.BaseEventQuery.ivaratorMaxOpenFiles} +doc.beans.BaseEventQuery.logTimingDetails: ${datawave.query.logic.logics.BaseEventQuery.logTimingDetails} +doc.beans.BaseEventQuery.logicDescription: ${datawave.query.logic.logics.BaseEventQuery.logicDescription} +doc.beans.BaseEventQuery.maxConcurrentTasks: ${datawave.query.logic.logics.BaseEventQuery.maxConcurrentTasks} +doc.beans.BaseEventQuery.maxDepthThreshold: ${datawave.query.logic.logics.BaseEventQuery.maxDepthThreshold} +doc.beans.BaseEventQuery.maxEvaluationPipelines: ${datawave.query.logic.logics.BaseEventQuery.maxEvaluationPipelines} +doc.beans.BaseEventQuery.maxFieldIndexRangeSplit: ${datawave.query.logic.logics.BaseEventQuery.maxFieldIndexRangeSplit} +doc.beans.BaseEventQuery.maxIndexScanTimeMillis: ${datawave.query.logic.logics.BaseEventQuery.maxIndexScanTimeMillis} +doc.beans.BaseEventQuery.maxIvaratorSources: ${datawave.query.logic.logics.BaseEventQuery.maxIvaratorSources} +doc.beans.BaseEventQuery.maxOrExpansionFstThreshold: ${datawave.query.logic.logics.BaseEventQuery.maxOrExpansionFstThreshold} +doc.beans.BaseEventQuery.maxOrExpansionThreshold: ${datawave.query.logic.logics.BaseEventQuery.maxOrExpansionThreshold} +doc.beans.BaseEventQuery.maxOrRangeThreshold: ${datawave.query.logic.logics.BaseEventQuery.maxOrRangeThreshold} +doc.beans.BaseEventQuery.maxPipelineCachedResults: ${datawave.query.logic.logics.BaseEventQuery.maxPipelineCachedResults} +doc.beans.BaseEventQuery.maxResults: ${datawave.query.logic.logics.BaseEventQuery.maxResults} +doc.beans.BaseEventQuery.maxUnfieldedExpansionThreshold: ${datawave.query.logic.logics.BaseEventQuery.maxUnfieldedExpansionThreshold} +doc.beans.BaseEventQuery.maxValueExpansionThreshold: ${datawave.query.logic.logics.BaseEventQuery.maxValueExpansionThreshold} +doc.beans.BaseEventQuery.metadataTableName: ${datawave.query.logic.logics.BaseEventQuery.metadataTableName} +doc.beans.BaseEventQuery.minimumSelectivity: ${datawave.query.logic.logics.BaseEventQuery.minimumSelectivity} +doc.beans.BaseEventQuery.modelName: ${datawave.query.logic.logics.BaseEventQuery.modelName} +doc.beans.BaseEventQuery.modelTableName: ${datawave.query.logic.logics.BaseEventQuery.modelTableName} +doc.beans.BaseEventQuery.queryThreads: ${datawave.query.logic.logics.BaseEventQuery.queryThreads} +doc.beans.BaseEventQuery.reverseIndexTableName: ${datawave.query.logic.logics.BaseEventQuery.reverseIndexTableName} +doc.beans.BaseEventQuery.sendTimingToStatsd: ${datawave.query.logic.logics.BaseEventQuery.sendTimingToStatsd} +doc.beans.BaseEventQuery.shardsPerDayThreshold: ${datawave.query.logic.logics.BaseEventQuery.shardsPerDayThreshold} +doc.beans.BaseEventQuery.statsdHost: ${datawave.query.logic.logics.BaseEventQuery.statsdHost} +doc.beans.BaseEventQuery.statsdPort: ${datawave.query.logic.logics.BaseEventQuery.statsdPort} +doc.beans.BaseEventQuery.tableName: ${datawave.query.logic.logics.BaseEventQuery.tableName} +doc.beans.BaseEventQuery.useEnrichers: ${datawave.query.logic.logics.BaseEventQuery.useEnrichers} +doc.beans.BaseEventQuery.useFilters: ${datawave.query.logic.logics.BaseEventQuery.useFilters} +doc.beans.BaseEventQuery.zookeeperConfig: ${datawave.query.logic.logics.BaseEventQuery.zookeeperConfig} +doc.beans.ContentQuery.auditType: ${datawave.query.logic.logics.ContentQuery.auditType} +doc.beans.ContentQuery.checkpointable: ${datawave.query.logic.logics.ContentQuery.checkpointable} +doc.beans.ContentQuery.logicDescription: ${datawave.query.logic.logics.ContentQuery.logicDescription} +doc.beans.ContentQuery.maxResults: ${datawave.query.logic.logics.ContentQuery.maxResults} +doc.beans.ContentQuery.maxWork: ${datawave.query.logic.logics.ContentQuery.maxWork} +doc.beans.ContentQuery.tableName: ${datawave.query.logic.logics.ContentQuery.tableName} +doc.beans.CountQuery.checkpointable: ${datawave.query.logic.logics.CountQuery.checkpointable} +doc.beans.CountQuery.logicDescription: ${datawave.query.logic.logics.CountQuery.logicDescription} +doc.beans.DiscoveryQuery.allowLeadingWildcard: ${datawave.query.logic.logics.DiscoveryQuery.allowLeadingWildcard} +doc.beans.DiscoveryQuery.auditType: ${datawave.query.logic.logics.DiscoveryQuery.auditType} +doc.beans.DiscoveryQuery.checkpointable: ${datawave.query.logic.logics.DiscoveryQuery.checkpointable} +doc.beans.DiscoveryQuery.fullTableScanEnabled: ${datawave.query.logic.logics.DiscoveryQuery.fullTableScanEnabled} +doc.beans.DiscoveryQuery.indexTableName: ${datawave.query.logic.logics.DiscoveryQuery.indexTableName} +doc.beans.DiscoveryQuery.logicDescription: ${datawave.query.logic.logics.DiscoveryQuery.logicDescription} +doc.beans.DiscoveryQuery.maxResults: ${datawave.query.logic.logics.DiscoveryQuery.maxResults} +doc.beans.DiscoveryQuery.maxWork: ${datawave.query.logic.logics.DiscoveryQuery.maxWork} +doc.beans.DiscoveryQuery.metadataTableName: ${datawave.query.logic.logics.DiscoveryQuery.metadataTableName} +doc.beans.DiscoveryQuery.modelName: ${datawave.query.logic.logics.DiscoveryQuery.modelName} +doc.beans.DiscoveryQuery.modelTableName: ${datawave.query.logic.logics.DiscoveryQuery.modelTableName} +doc.beans.DiscoveryQuery.reverseIndexTableName: ${datawave.query.logic.logics.DiscoveryQuery.reverseIndexTableName} +doc.beans.DiscoveryQuery.tableName: ${datawave.query.logic.logics.DiscoveryQuery.tableName} +doc.beans.EdgeEventQuery.checkpointable: ${datawave.query.logic.logics.EdgeEventQuery.checkpointable} +doc.beans.EdgeEventQuery.edgeModelName: ${datawave.query.logic.logics.EdgeEventQuery.edgeModelName} +doc.beans.EdgeEventQuery.logicDescription: ${datawave.query.logic.logics.EdgeEventQuery.logicDescription} +doc.beans.EdgeEventQuery.modelTableName: ${datawave.query.logic.logics.EdgeEventQuery.modelTableName} +doc.beans.ErrorCountQuery.checkpointable: ${datawave.query.logic.logics.ErrorCountQuery.checkpointable} +doc.beans.ErrorCountQuery.indexTableName: ${datawave.query.logic.logics.ErrorCountQuery.indexTableName} +doc.beans.ErrorCountQuery.logicDescription: ${datawave.query.logic.logics.ErrorCountQuery.logicDescription} +doc.beans.ErrorCountQuery.metadataTableName: ${datawave.query.logic.logics.ErrorCountQuery.metadataTableName} +doc.beans.ErrorCountQuery.reverseIndexTableName: ${datawave.query.logic.logics.ErrorCountQuery.indexTableName} +doc.beans.ErrorCountQuery.tableName: ${datawave.query.logic.logics.ErrorCountQuery.tableName} +doc.beans.ErrorDiscoveryQuery.allowLeadingWildcard: ${datawave.query.logic.logics.ErrorDiscoveryQuery.allowLeadingWildcard} +doc.beans.ErrorDiscoveryQuery.auditType: ${datawave.query.logic.logics.ErrorDiscoveryQuery.auditType} +doc.beans.ErrorDiscoveryQuery.checkpointable: ${datawave.query.logic.logics.ErrorDiscoveryQuery.checkpointable} +doc.beans.ErrorDiscoveryQuery.fullTableScanEnabled: ${datawave.query.logic.logics.ErrorDiscoveryQuery.fullTableScanEnabled} +doc.beans.ErrorDiscoveryQuery.indexTableName: ${datawave.query.logic.logics.ErrorDiscoveryQuery.indexTableName} +doc.beans.ErrorDiscoveryQuery.logicDescription: ${datawave.query.logic.logics.ErrorDiscoveryQuery.logicDescription} +doc.beans.ErrorDiscoveryQuery.maxResults: ${datawave.query.logic.logics.ErrorDiscoveryQuery.maxResults} +doc.beans.ErrorDiscoveryQuery.maxWork: ${datawave.query.logic.logics.ErrorDiscoveryQuery.maxWork} +doc.beans.ErrorDiscoveryQuery.metadataTableName: ${datawave.query.logic.logics.ErrorDiscoveryQuery.metadataTableName} +doc.beans.ErrorDiscoveryQuery.modelName: ${datawave.query.logic.logics.ErrorDiscoveryQuery.modelName} +doc.beans.ErrorDiscoveryQuery.modelTableName: ${datawave.query.logic.logics.ErrorDiscoveryQuery.modelTableName} +doc.beans.ErrorDiscoveryQuery.reverseIndexTableName: ${datawave.query.logic.logics.ErrorDiscoveryQuery.reverseIndexTableName} +doc.beans.ErrorDiscoveryQuery.tableName: ${datawave.query.logic.logics.ErrorDiscoveryQuery.tableName} +doc.beans.ErrorEventQuery.checkpointable: ${datawave.query.logic.logics.ErrorEventQuery.checkpointable} +doc.beans.ErrorEventQuery.dateIndexTableName: ${datawave.query.logic.logics.ErrorEventQuery.dateIndexTableName} +doc.beans.ErrorEventQuery.includeHierarchyFields: ${datawave.query.logic.logics.ErrorEventQuery.includeHierarchyFields} +doc.beans.ErrorEventQuery.indexTableName: ${datawave.query.logic.logics.ErrorEventQuery.indexTableName} +doc.beans.ErrorEventQuery.logicDescription: ${datawave.query.logic.logics.ErrorEventQuery.logicDescription} +doc.beans.ErrorEventQuery.metadataTableName: ${datawave.query.logic.logics.ErrorEventQuery.metadataTableName} +doc.beans.ErrorEventQuery.reverseIndexTableName: ${datawave.query.logic.logics.ErrorEventQuery.reverseIndexTableName} +doc.beans.ErrorEventQuery.tableName: ${datawave.query.logic.logics.ErrorEventQuery.tableName} +doc.beans.ErrorFieldIndexCountQuery.auditType: ${datawave.query.logic.logics.ErrorFieldIndexCountQuery.auditType} +doc.beans.ErrorFieldIndexCountQuery.checkpointable: ${datawave.query.logic.logics.ErrorFieldIndexCountQuery.checkpointable} +doc.beans.ErrorFieldIndexCountQuery.indexTableName: ${datawave.query.logic.logics.ErrorFieldIndexCountQuery.indexTableName} +doc.beans.ErrorFieldIndexCountQuery.logicDescription: ${datawave.query.logic.logics.ErrorFieldIndexCountQuery.logicDescription} +doc.beans.ErrorFieldIndexCountQuery.maxResults: ${datawave.query.logic.logics.ErrorFieldIndexCountQuery.maxResults} +doc.beans.ErrorFieldIndexCountQuery.maxUniqueValues: ${datawave.query.logic.logics.ErrorFieldIndexCountQuery.maxUniqueValues} +doc.beans.ErrorFieldIndexCountQuery.maxWork: ${datawave.query.logic.logics.ErrorFieldIndexCountQuery.maxWork} +doc.beans.ErrorFieldIndexCountQuery.metadataTableName: ${datawave.query.logic.logics.ErrorFieldIndexCountQuery.metadataTableName} +doc.beans.ErrorFieldIndexCountQuery.modelName: ${datawave.query.logic.logics.ErrorFieldIndexCountQuery.modelName} +doc.beans.ErrorFieldIndexCountQuery.modelTableName: ${datawave.query.logic.logics.ErrorFieldIndexCountQuery.modelTableName} +doc.beans.ErrorFieldIndexCountQuery.queryThreads: ${datawave.query.logic.logics.ErrorFieldIndexCountQuery.queryThreads} +doc.beans.ErrorFieldIndexCountQuery.reverseIndexTableName: ${datawave.query.logic.logics.ErrorFieldIndexCountQuery.reverseIndexTableName} +doc.beans.ErrorFieldIndexCountQuery.tableName: ${datawave.query.logic.logics.ErrorFieldIndexCountQuery.tableName} +doc.beans.EventQuery.checkpointable: ${datawave.query.logic.logics.EventQuery.checkpointable} +doc.beans.EventQuery.logicDescription: ${datawave.query.logic.logics.EventQuery.logicDescription} +doc.beans.FacetedQuery.auditType: ${datawave.query.logic.logics.FacetedQuery.auditType} +doc.beans.FacetedQuery.checkpointable: ${datawave.query.logic.logics.FacetedQuery.checkpointable} +doc.beans.FacetedQuery.facetHashTableName: ${datawave.query.logic.logics.FacetedQuery.facetHashTableName} +doc.beans.FacetedQuery.facetMetadataTableName: ${datawave.query.logic.logics.FacetedQuery.facetMetadataTableName} +doc.beans.FacetedQuery.facetTableName: ${datawave.query.logic.logics.FacetedQuery.facetTableName} +doc.beans.FacetedQuery.facetedSearchType: ${datawave.query.logic.logics.FacetedQuery.facetedSearchType} +doc.beans.FacetedQuery.logicDescription: ${datawave.query.logic.logics.FacetedQuery.logicDescription} +doc.beans.FacetedQuery.maximumFacetGrouping: ${datawave.query.logic.logics.FacetedQuery.maximumFacetGrouping} +doc.beans.FacetedQuery.minimumFacet: ${datawave.query.logic.logics.FacetedQuery.minimumFacet} +doc.beans.FacetedQuery.streaming: ${datawave.query.logic.logics.FacetedQuery.streaming} +doc.beans.FieldIndexCountQuery.auditType: ${datawave.query.logic.logics.FieldIndexCountQuery.auditType} +doc.beans.FieldIndexCountQuery.checkpointable: ${datawave.query.logic.logics.FieldIndexCountQuery.checkpointable} +doc.beans.FieldIndexCountQuery.indexTableName: ${datawave.query.logic.logics.FieldIndexCountQuery.indexTableName} +doc.beans.FieldIndexCountQuery.logicDescription: ${datawave.query.logic.logics.FieldIndexCountQuery.logicDescription} +doc.beans.FieldIndexCountQuery.maxResults: ${datawave.query.logic.logics.FieldIndexCountQuery.maxResults} +doc.beans.FieldIndexCountQuery.maxUniqueValues: ${datawave.query.logic.logics.FieldIndexCountQuery.maxUniqueValues} +doc.beans.FieldIndexCountQuery.maxWork: ${datawave.query.logic.logics.FieldIndexCountQuery.maxWork} +doc.beans.FieldIndexCountQuery.metadataTableName: ${datawave.query.logic.logics.FieldIndexCountQuery.metadataTableName} +doc.beans.FieldIndexCountQuery.modelName: ${datawave.query.logic.logics.FieldIndexCountQuery.modelName} +doc.beans.FieldIndexCountQuery.modelTableName: ${datawave.query.logic.logics.FieldIndexCountQuery.modelTableName} +doc.beans.FieldIndexCountQuery.queryThreads: ${datawave.query.logic.logics.FieldIndexCountQuery.queryThreads} +doc.beans.FieldIndexCountQuery.reverseIndexTableName: ${datawave.query.logic.logics.FieldIndexCountQuery.reverseIndexTableName} +doc.beans.FieldIndexCountQuery.tableName: ${datawave.query.logic.logics.FieldIndexCountQuery.tableName} +doc.beans.HitHighlights.accumuloPassword: ${datawave.query.logic.logics.HitHighlights.accumuloPassword} +doc.beans.HitHighlights.auditType: ${datawave.query.logic.logics.HitHighlights.auditType} +doc.beans.HitHighlights.checkpointable: ${datawave.query.logic.logics.HitHighlights.checkpointable} +doc.beans.HitHighlights.dateIndexTableName: ${datawave.query.logic.logics.HitHighlights.dateIndexTableName} +doc.beans.HitHighlights.defaultDateTypeName: ${datawave.query.logic.logics.HitHighlights.defaultDateTypeName} +doc.beans.HitHighlights.eventPerDayThreshold: ${datawave.query.logic.logics.HitHighlights.eventPerDayThreshold} +doc.beans.HitHighlights.finalMaxTermThreshold: ${datawave.query.logic.logics.BaseEventQuery.finalMaxTermThreshold} +doc.beans.HitHighlights.fullTableScanEnabled: ${datawave.query.logic.logics.HitHighlights.fullTableScanEnabled} +doc.beans.HitHighlights.hdfsSiteConfigURLs: ${datawave.query.logic.logics.HitHighlights.hdfsSiteConfigURLs} +doc.beans.HitHighlights.includeDataTypeAsField: ${datawave.query.logic.logics.HitHighlights.includeDataTypeAsField} +doc.beans.HitHighlights.includeGroupingContext: ${datawave.query.logic.logics.HitHighlights.includeGroupingContext} +doc.beans.HitHighlights.indexTableName: ${datawave.query.logic.logics.HitHighlights.indexTableName} +doc.beans.HitHighlights.initialMaxTermThreshold: ${datawave.query.logic.logics.BaseEventQuery.initialMaxTermThreshold} +doc.beans.HitHighlights.ivaratorCacheBufferSize: ${datawave.query.logic.logics.HitHighlights.ivaratorCacheBufferSize} +doc.beans.HitHighlights.ivaratorCacheScanPersistThreshold: ${datawave.query.logic.logics.HitHighlights.ivaratorCacheScanPersistThreshold} +doc.beans.HitHighlights.ivaratorCacheScanTimeoutMinutes: ${datawave.query.logic.logics.HitHighlights.ivaratorCacheScanTimeoutMinutes} +doc.beans.HitHighlights.ivaratorFstHdfsBaseURIs: ${datawave.query.logic.logics.HitHighlights.ivaratorFstHdfsBaseURIs} +doc.beans.HitHighlights.ivaratorMaxOpenFiles: ${datawave.query.logic.logics.HitHighlights.ivaratorMaxOpenFiles} +doc.beans.HitHighlights.logicDescription: ${datawave.query.logic.logics.HitHighlights.logicDescription} +doc.beans.HitHighlights.maxDepthThreshold: ${datawave.query.logic.logics.HitHighlights.maxDepthThreshold} +doc.beans.HitHighlights.maxEvaluationPipelines: ${datawave.query.logic.logics.HitHighlights.maxEvaluationPipelines} +doc.beans.HitHighlights.maxFieldIndexRangeSplit: ${datawave.query.logic.logics.HitHighlights.maxFieldIndexRangeSplit} +doc.beans.HitHighlights.maxOrExpansionFstThreshold: ${datawave.query.logic.logics.HitHighlights.maxOrExpansionFstThreshold} +doc.beans.HitHighlights.maxOrExpansionThreshold: ${datawave.query.logic.logics.HitHighlights.maxOrExpansionThreshold} +doc.beans.HitHighlights.maxOrRangeIvarators: ${datawave.query.logic.logics.HitHighlights.maxOrRangeIvarators} +doc.beans.HitHighlights.maxOrRangeThreshold: ${datawave.query.logic.logics.HitHighlights.maxOrRangeThreshold} +doc.beans.HitHighlights.maxPipelineCachedResults: ${datawave.query.logic.logics.HitHighlights.maxPipelineCachedResults} +doc.beans.HitHighlights.maxRangesPerRangeIvarator: ${datawave.query.logic.logics.HitHighlights.maxRangesPerRangeIvarator} +doc.beans.HitHighlights.maxUnfieldedExpansionThreshold: ${datawave.query.logic.logics.HitHighlights.maxUnfieldedExpansionThreshold} +doc.beans.HitHighlights.maxValueExpansionThreshold: ${datawave.query.logic.logics.HitHighlights.maxValueExpansionThreshold} +doc.beans.HitHighlights.metadataTableName: ${datawave.query.logic.logics.HitHighlights.metadataTableName} +doc.beans.HitHighlights.minimumSelectivity: ${datawave.query.logic.logics.HitHighlights.minimumSelectivity} +doc.beans.HitHighlights.queryThreads: ${datawave.query.logic.logics.HitHighlights.queryThreads} +doc.beans.HitHighlights.reverseIndexTableName: ${datawave.query.logic.logics.HitHighlights.reverseIndexTableName} +doc.beans.HitHighlights.shardsPerDayThreshold: ${datawave.query.logic.logics.HitHighlights.shardsPerDayThreshold} +doc.beans.HitHighlights.tableName: ${datawave.query.logic.logics.HitHighlights.tableName} +doc.beans.HitHighlights.useEnrichers: ${datawave.query.logic.logics.HitHighlights.useEnrichers} +doc.beans.HitHighlights.zookeeperConfig: ${datawave.query.logic.logics.HitHighlights.zookeeperConfig} +doc.beans.IndexStatsQuery.auditType: ${datawave.query.logic.logics.IndexStatsQuery.auditType} +doc.beans.InternalQueryMetricsQuery.collectQueryMetrics: ${datawave.query.logic.logics.InternalQueryMetricsQuery.collectQueryMetrics} +doc.beans.LuceneUUIDEventQuery.auditType: ${datawave.query.logic.logics.LuceneUUIDEventQuery.auditType} +doc.beans.LuceneUUIDEventQuery.checkpointable: ${datawave.query.logic.logics.LuceneUUIDEventQuery.checkpointable} +doc.beans.LuceneUUIDEventQuery.logicDescription: ${datawave.query.logic.logics.LuceneUUIDEventQuery.logicDescription} +doc.beans.LuceneUUIDEventQuery.queryLogics.ErrorEventQuery.auditType: ${datawave.query.logic.logics.LuceneUUIDEventQuery.errorEventQuery.auditType} +doc.beans.LuceneUUIDEventQuery.queryLogics.ErrorEventQuery.connPoolName: ${datawave.query.logic.logics.LuceneUUIDEventQuery.errorEventQuery.connPoolName} +doc.beans.LuceneUUIDEventQuery.queryLogics.ErrorEventQuery.dateIndexTableName: ${datawave.query.logic.logics.LuceneUUIDEventQuery.errorEventQuery.dateIndexTableName} +doc.beans.LuceneUUIDEventQuery.queryLogics.ErrorEventQuery.indexTableName: ${datawave.query.logic.logics.LuceneUUIDEventQuery.errorEventQuery.indexTableName} +doc.beans.LuceneUUIDEventQuery.queryLogics.ErrorEventQuery.logicDescription: ${datawave.query.logic.logics.LuceneUUIDEventQuery.errorEventQuery.logicDescription} +doc.beans.LuceneUUIDEventQuery.queryLogics.ErrorEventQuery.metadataTableName: ${datawave.query.logic.logics.LuceneUUIDEventQuery.errorEventQuery.metadataTableName} +doc.beans.LuceneUUIDEventQuery.queryLogics.ErrorEventQuery.reverseIndexTableName: ${datawave.query.logic.logics.LuceneUUIDEventQuery.errorEventQuery.reverseIndexTableName} +doc.beans.LuceneUUIDEventQuery.queryLogics.ErrorEventQuery.tableName: ${datawave.query.logic.logics.LuceneUUIDEventQuery.errorEventQuery.tableName} +doc.beans.LuceneUUIDEventQuery.queryLogics.EventQuery.auditType: ${datawave.query.logic.logics.LuceneUUIDEventQuery.eventQuery.auditType} +doc.beans.LuceneUUIDEventQuery.queryLogics.EventQuery.connPoolName: ${datawave.query.logic.logics.LuceneUUIDEventQuery.eventQuery.connPoolName} +doc.beans.LuceneUUIDEventQuery.queryLogics.EventQuery.logicDescription: ${datawave.query.logic.logics.LuceneUUIDEventQuery.eventQuery.logicDescription} +doc.beans.QueryMetricsQuery.auditType: ${datawave.query.logic.logics.QueryMetricsQuery.auditType} +doc.beans.QueryMetricsQuery.checkpointable: ${datawave.query.logic.logics.QueryMetricsQuery.checkpointable} +doc.beans.QueryMetricsQuery.collectQueryMetrics: ${datawave.query.logic.logics.QueryMetricsQuery.collectQueryMetrics} +doc.beans.QueryMetricsQuery.dateIndexTableName: ${datawave.query.logic.logics.QueryMetricsQuery.dateIndexTableName} +doc.beans.QueryMetricsQuery.includeHierarchyFields: ${datawave.query.logic.logics.QueryMetricsQuery.includeHierarchyFields} +doc.beans.QueryMetricsQuery.indexTableName: ${datawave.query.logic.logics.QueryMetricsQuery.indexTableName} +doc.beans.QueryMetricsQuery.logicDescription: ${datawave.query.logic.logics.QueryMetricsQuery.logicDescription} +doc.beans.QueryMetricsQuery.metadataTableName: ${datawave.query.logic.logics.QueryMetricsQuery.metadataTableName} +doc.beans.QueryMetricsQuery.modelName: ${datawave.query.logic.logics.QueryMetricsQuery.modelName} +doc.beans.QueryMetricsQuery.modelTableName: ${datawave.query.logic.logics.QueryMetricsQuery.modelTableName} +doc.beans.QueryMetricsQuery.reverseIndexTableName: ${datawave.query.logic.logics.QueryMetricsQuery.reverseIndexTableName} +doc.beans.QueryMetricsQuery.tableName: ${datawave.query.logic.logics.QueryMetricsQuery.tableName} +doc.beans.TermFrequencyQuery.auditType: ${datawave.query.logic.logics.TermFrequencyQuery.auditType} +doc.beans.TermFrequencyQuery.logicDescription: ${datawave.query.logic.logics.TermFrequencyQuery.logicDescription} +doc.beans.TermFrequencyQuery.maxResults: ${datawave.query.logic.logics.TermFrequencyQuery.maxResults} +doc.beans.TermFrequencyQuery.maxWork: ${datawave.query.logic.logics.TermFrequencyQuery.maxWork} +doc.beans.TermFrequencyQuery.tableName: ${datawave.query.logic.logics.TermFrequencyQuery.tableName} + +Values (key: value) +---------------------------------------- +doc.beans.BaseEventQuery.accumuloPassword: "${warehouse.accumulo.password}" +doc.beans.BaseEventQuery.auditType: "ACTIVE" +doc.beans.BaseEventQuery.baseIteratorPriority: "${warehouse.defaults.baseIteratorPriority}" +doc.beans.BaseEventQuery.checkpointable: "${warehouse.defaults.checkpointable}" +doc.beans.BaseEventQuery.collapseUids: false +doc.beans.BaseEventQuery.collapseUidsThreshold: -1 +doc.beans.BaseEventQuery.collectQueryMetrics: true +doc.beans.BaseEventQuery.dateIndexTableName: "${warehouse.tables.dateIndex.name}" +doc.beans.BaseEventQuery.dateIndexThreads: "${warehouse.defaults.dateIndexThreads}" +doc.beans.BaseEventQuery.defaultDateTypeName: "EVENT" +doc.beans.BaseEventQuery.disableIndexOnlyDocuments: false +doc.beans.BaseEventQuery.evaluationOnlyFields: "" +doc.beans.BaseEventQuery.eventPerDayThreshold: "${warehouse.defaults.eventPerDayThreshold}" +doc.beans.BaseEventQuery.finalMaxTermThreshold: "${warehouse.defaults.finalMaxTermThreshold}" +doc.beans.BaseEventQuery.fullTableScanEnabled: "${warehouse.defaults.fullTableScanEnabled}" +doc.beans.BaseEventQuery.hdfsSiteConfigURLs: "${warehouse.defaults.hdfsSiteConfigURLs}" +doc.beans.BaseEventQuery.includeDataTypeAsField: false +doc.beans.BaseEventQuery.includeHierarchyFields: false +doc.beans.BaseEventQuery.indexLookupThreads: "${warehouse.defaults.indexLookupThreads}" +doc.beans.BaseEventQuery.indexOnlyFilterFunctionsEnabled: false +doc.beans.BaseEventQuery.indexTableName: "${warehouse.tables.index.name}" +doc.beans.BaseEventQuery.initialMaxTermThreshold: "${warehouse.defaults.initialMaxTermThreshold}" +doc.beans.BaseEventQuery.ivaratorCacheBufferSize: "${warehouse.defaults.ivaratorCacheBufferSize}" +doc.beans.BaseEventQuery.ivaratorCacheScanPersistThreshold: "${warehouse.defaults.ivaratorCacheScanPersistThreshold}" +doc.beans.BaseEventQuery.ivaratorCacheScanTimeoutMinutes: "${warehouse.defaults.ivaratorCacheScanTimeoutMinutes}" +doc.beans.BaseEventQuery.ivaratorFstHdfsBaseURIs: "${warehouse.defaults.ivaratorFstHdfsBaseURIs}" +doc.beans.BaseEventQuery.ivaratorMaxOpenFiles: "${warehouse.defaults.ivaratorMaxOpenFiles}" +doc.beans.BaseEventQuery.logTimingDetails: true +doc.beans.BaseEventQuery.logicDescription: "Retrieve sharded events/documents, leveraging the global index tables as needed" +doc.beans.BaseEventQuery.maxConcurrentTasks: 10 +doc.beans.BaseEventQuery.maxDepthThreshold: "${warehouse.defaults.maxDepthThreshold}" +doc.beans.BaseEventQuery.maxEvaluationPipelines: "${warehouse.defaults.maxEvaluationPipelines}" +doc.beans.BaseEventQuery.maxFieldIndexRangeSplit: "${warehouse.defaults.maxFieldIndexRangeSplit}" +doc.beans.BaseEventQuery.maxIndexScanTimeMillis: "${warehouse.defaults.maxIndexScanTimeMillis}" +doc.beans.BaseEventQuery.maxIvaratorSources: "${warehouse.defaults.maxIvaratorSources}" +doc.beans.BaseEventQuery.maxOrExpansionFstThreshold: "${warehouse.defaults.maxOrExpansionFstThreshold}" +doc.beans.BaseEventQuery.maxOrExpansionThreshold: "${warehouse.defaults.maxOrExpansionThreshold}" +doc.beans.BaseEventQuery.maxOrRangeThreshold: "${warehouse.defaults.maxOrRangeThreshold}" +doc.beans.BaseEventQuery.maxPipelineCachedResults: "${warehouse.defaults.maxPipelineCachedResults}" +doc.beans.BaseEventQuery.maxResults: -1 +doc.beans.BaseEventQuery.maxUnfieldedExpansionThreshold: "${warehouse.defaults.maxUnfieldedExpansionThreshold}" +doc.beans.BaseEventQuery.maxValueExpansionThreshold: "${warehouse.defaults.maxValueExpansionThreshold}" +doc.beans.BaseEventQuery.metadataTableName: "${warehouse.tables.metadata.name}" +doc.beans.BaseEventQuery.minimumSelectivity: 0.2 +doc.beans.BaseEventQuery.modelName: "${warehouse.defaults.modelName}" +doc.beans.BaseEventQuery.modelTableName: "${warehouse.tables.model.name}" +doc.beans.BaseEventQuery.queryThreads: "${warehouse.defaults.queryThreads}" +doc.beans.BaseEventQuery.reverseIndexTableName: "${warehouse.tables.reverseIndex.name}" +doc.beans.BaseEventQuery.sendTimingToStatsd: false +doc.beans.BaseEventQuery.shardsPerDayThreshold: "${warehouse.defaults.shardsPerDayThreshold}" +doc.beans.BaseEventQuery.statsdHost: "${warehouse.statsd.host}" +doc.beans.BaseEventQuery.statsdPort: "${warehouse.statsd.port}" +doc.beans.BaseEventQuery.tableName: "${warehouse.tables.shard.name}" +doc.beans.BaseEventQuery.useEnrichers: true +doc.beans.BaseEventQuery.useFilters: false +doc.beans.BaseEventQuery.zookeeperConfig: "${warehouse.accumulo.zookeepers}" +doc.beans.ContentQuery.auditType: "NONE" +doc.beans.ContentQuery.checkpointable: "${warehouse.defaults.checkpointable}" +doc.beans.ContentQuery.logicDescription: "Query that returns a document given the document identifier" +doc.beans.ContentQuery.maxResults: -1 +doc.beans.ContentQuery.maxWork: -1 +doc.beans.ContentQuery.tableName: "${warehouse.tables.shard.name}" +doc.beans.CountQuery.checkpointable: "${warehouse.defaults.checkpointable}" +doc.beans.CountQuery.logicDescription: "Retrieve event/document counts based on your search criteria" +doc.beans.DefaultQueryPlanner.compressOptionMappings: true +doc.beans.DefaultQueryPlanner[0]: 2611 +doc.beans.DefaultQueryPlanner[1]: true +doc.beans.DiscoveryQuery.allowLeadingWildcard: true +doc.beans.DiscoveryQuery.auditType: "NONE" +doc.beans.DiscoveryQuery.checkpointable: "${warehouse.defaults.checkpointable}" +doc.beans.DiscoveryQuery.fullTableScanEnabled: "${warehouse.defaults.fullTableScanEnabled}" +doc.beans.DiscoveryQuery.indexTableName: "${warehouse.tables.index.name}" +doc.beans.DiscoveryQuery.logicDescription: "Discovery query that returns information from the index about the supplied term(s)" +doc.beans.DiscoveryQuery.maxResults: -1 +doc.beans.DiscoveryQuery.maxWork: -1 +doc.beans.DiscoveryQuery.metadataTableName: "${warehouse.tables.metadata.name}" +doc.beans.DiscoveryQuery.modelName: "${warehouse.defaults.modelName}" +doc.beans.DiscoveryQuery.modelTableName: "${warehouse.tables.model.name}" +doc.beans.DiscoveryQuery.reverseIndexTableName: "${warehouse.tables.reverseIndex.name}" +doc.beans.DiscoveryQuery.tableName: "${warehouse.tables.shard.name}" +doc.beans.EdgeEventQuery.checkpointable: "${warehouse.defaults.checkpointable}" +doc.beans.EdgeEventQuery.edgeModelName: "DATAWAVE_EDGE" +doc.beans.EdgeEventQuery.logicDescription: "Use results of an EdgeQuery to obtain events/documents that created the given edge" +doc.beans.EdgeEventQuery.modelTableName: "${warehouse.tables.model.name}" +doc.beans.ErrorCountQuery.checkpointable: "${warehouse.defaults.checkpointable}" +doc.beans.ErrorCountQuery.indexTableName: "${warehouse.errorTables.index.name}" +doc.beans.ErrorCountQuery.logicDescription: "Retrieve counts of errored events based on your search criteria" +doc.beans.ErrorCountQuery.metadataTableName: "${warehouse.errorTables.metadata.name}" +doc.beans.ErrorCountQuery.reverseIndexTableName: "${warehouse.errorTables.index.name}" +doc.beans.ErrorCountQuery.tableName: "${warehouse.errorTables.shard.name}" +doc.beans.ErrorDiscoveryQuery.allowLeadingWildcard: true +doc.beans.ErrorDiscoveryQuery.auditType: "NONE" +doc.beans.ErrorDiscoveryQuery.checkpointable: "${warehouse.defaults.checkpointable}" +doc.beans.ErrorDiscoveryQuery.fullTableScanEnabled: "${warehouse.defaults.fullTableScanEnabled}" +doc.beans.ErrorDiscoveryQuery.indexTableName: "${warehouse.errorTables.index.name}" +doc.beans.ErrorDiscoveryQuery.logicDescription: "Discovery query that returns information from the ingest errors index about the supplied term(s)" +doc.beans.ErrorDiscoveryQuery.maxResults: -1 +doc.beans.ErrorDiscoveryQuery.maxWork: -1 +doc.beans.ErrorDiscoveryQuery.metadataTableName: "${warehouse.errorTables.metadata.name}" +doc.beans.ErrorDiscoveryQuery.modelName: "${warehouse.defaults.modelName}" +doc.beans.ErrorDiscoveryQuery.modelTableName: "${warehouse.errorTables.model.name}" +doc.beans.ErrorDiscoveryQuery.reverseIndexTableName: "${warehouse.errorTables.reverseIndex.name}" +doc.beans.ErrorDiscoveryQuery.tableName: "${warehouse.errorTables.shard.name}" +doc.beans.ErrorEventQuery.checkpointable: "${warehouse.defaults.checkpointable}" +doc.beans.ErrorEventQuery.dateIndexTableName: "" +doc.beans.ErrorEventQuery.includeHierarchyFields: false +doc.beans.ErrorEventQuery.indexTableName: "${warehouse.errorTables.index.name}" +doc.beans.ErrorEventQuery.logicDescription: "Retrieve events/documents that encountered one or more errors during ingest" +doc.beans.ErrorEventQuery.metadataTableName: "${warehouse.errorTables.metadata.name}" +doc.beans.ErrorEventQuery.reverseIndexTableName: "${warehouse.errorTables.reverseIndex.name}" +doc.beans.ErrorEventQuery.tableName: "${warehouse.errorTables.shard.name}" +doc.beans.ErrorFieldIndexCountQuery.auditType: "NONE" +doc.beans.ErrorFieldIndexCountQuery.checkpointable: false +doc.beans.ErrorFieldIndexCountQuery.indexTableName: "${warehouse.errorTables.index.name}" +doc.beans.ErrorFieldIndexCountQuery.logicDescription: "FieldIndex count query (experimental)" +doc.beans.ErrorFieldIndexCountQuery.maxResults: -1 +doc.beans.ErrorFieldIndexCountQuery.maxUniqueValues: 20000 +doc.beans.ErrorFieldIndexCountQuery.maxWork: -1 +doc.beans.ErrorFieldIndexCountQuery.metadataTableName: "${warehouse.errorTables.metadata.name}" +doc.beans.ErrorFieldIndexCountQuery.modelName: "DATAWAVE" +doc.beans.ErrorFieldIndexCountQuery.modelTableName: "${warehouse.errorTables.model.name}" +doc.beans.ErrorFieldIndexCountQuery.queryThreads: "${warehouse.defaults.queryThreads}" +doc.beans.ErrorFieldIndexCountQuery.reverseIndexTableName: "${warehouse.errorTables.reverseIndex.name}" +doc.beans.ErrorFieldIndexCountQuery.tableName: "${warehouse.errorTables.shard.name}" +doc.beans.EventQuery.checkpointable: "${warehouse.defaults.checkpointable}" +doc.beans.EventQuery.logicDescription: "Query the sharded event/document schema, leveraging the global index tables as needed" +doc.beans.FacetedQuery.auditType: "NONE" +doc.beans.FacetedQuery.checkpointable: "${warehouse.defaults.checkpointable}" +doc.beans.FacetedQuery.facetHashTableName: "datawave.facetHashes" +doc.beans.FacetedQuery.facetMetadataTableName: "datawave.facetMetadata" +doc.beans.FacetedQuery.facetTableName: "datawave.facets" +doc.beans.FacetedQuery.facetedSearchType: "FIELD_VALUE_FACETS" +doc.beans.FacetedQuery.logicDescription: "Faceted search over indexed fields, returning aggregate counts for field values" +doc.beans.FacetedQuery.maximumFacetGrouping: 200 +doc.beans.FacetedQuery.minimumFacet: 1 +doc.beans.FacetedQuery.streaming: true +doc.beans.FieldIndexCountQuery.auditType: "NONE" +doc.beans.FieldIndexCountQuery.checkpointable: false +doc.beans.FieldIndexCountQuery.indexTableName: "${warehouse.tables.index.name}" +doc.beans.FieldIndexCountQuery.logicDescription: "Indexed Fields Only: Given FIELDNAME returns counts for each unique value. Given FIELDNAME:FIELDVALUE returns count for only that value." +doc.beans.FieldIndexCountQuery.maxResults: -1 +doc.beans.FieldIndexCountQuery.maxUniqueValues: 20000 +doc.beans.FieldIndexCountQuery.maxWork: -1 +doc.beans.FieldIndexCountQuery.metadataTableName: "${warehouse.tables.metadata.name}" +doc.beans.FieldIndexCountQuery.modelName: "DATAWAVE" +doc.beans.FieldIndexCountQuery.modelTableName: "${warehouse.tables.model.name}" +doc.beans.FieldIndexCountQuery.queryThreads: "${warehouse.defaults.queryThreads}" +doc.beans.FieldIndexCountQuery.reverseIndexTableName: "${warehouse.tables.reverseIndex.name}" +doc.beans.FieldIndexCountQuery.tableName: "${warehouse.tables.shard.name}" +doc.beans.HitHighlights.accumuloPassword: "${warehouse.accumulo.password}" +doc.beans.HitHighlights.auditType: "NONE" +doc.beans.HitHighlights.checkpointable: "${warehouse.defaults.checkpointable}" +doc.beans.HitHighlights.dateIndexTableName: "${warehouse.tables.dateIndex.name}" +doc.beans.HitHighlights.defaultDateTypeName: "EVENT" +doc.beans.HitHighlights.eventPerDayThreshold: 40000 +doc.beans.HitHighlights.finalMaxTermThreshold: "${warehouse.defaults.finalMaxTermThreshold}" +doc.beans.HitHighlights.fullTableScanEnabled: "${warehouse.defaults.fullTableScanEnabled}" +doc.beans.HitHighlights.hdfsSiteConfigURLs: "${warehouse.defaults.hdfsSiteConfigURLs}" +doc.beans.HitHighlights.includeDataTypeAsField: false +doc.beans.HitHighlights.includeGroupingContext: false +doc.beans.HitHighlights.indexTableName: "${warehouse.tables.index.name}" +doc.beans.HitHighlights.initialMaxTermThreshold: "${warehouse.defaults.initialMaxTermThreshold}" +doc.beans.HitHighlights.ivaratorCacheBufferSize: 10000 +doc.beans.HitHighlights.ivaratorCacheScanPersistThreshold: 100000 +doc.beans.HitHighlights.ivaratorCacheScanTimeoutMinutes: "${warehouse.defaults.ivaratorCacheScanTimeoutMinutes}" +doc.beans.HitHighlights.ivaratorFstHdfsBaseURIs: "${warehouse.defaults.ivaratorFstHdfsBaseURIs}" +doc.beans.HitHighlights.ivaratorMaxOpenFiles: "${warehouse.defaults.ivaratorMaxOpenFiles}" +doc.beans.HitHighlights.logicDescription: "Fast boolean query over indexed fields, only returning fields queried on" +doc.beans.HitHighlights.maxDepthThreshold: "${warehouse.defaults.maxDepthThreshold}" +doc.beans.HitHighlights.maxEvaluationPipelines: "${warehouse.defaults.maxEvaluationPipelines}" +doc.beans.HitHighlights.maxFieldIndexRangeSplit: "${warehouse.defaults.maxFieldIndexRangeSplit}" +doc.beans.HitHighlights.maxOrExpansionFstThreshold: "${warehouse.defaults.maxOrExpansionFstThreshold}" +doc.beans.HitHighlights.maxOrExpansionThreshold: "${warehouse.defaults.maxOrExpansionThreshold}" +doc.beans.HitHighlights.maxOrRangeIvarators: "${warehouse.defaults.maxOrRangeIvarators}" +doc.beans.HitHighlights.maxOrRangeThreshold: "${warehouse.defaults.maxOrRangeThreshold}" +doc.beans.HitHighlights.maxPipelineCachedResults: "${warehouse.defaults.maxPipelineCachedResults}" +doc.beans.HitHighlights.maxRangesPerRangeIvarator: "${warehouse.defaults.maxRangesPerRangeIvarator}" +doc.beans.HitHighlights.maxUnfieldedExpansionThreshold: "${warehouse.defaults.maxUnfieldedExpansionThreshold}" +doc.beans.HitHighlights.maxValueExpansionThreshold: "${warehouse.defaults.maxValueExpansionThreshold}" +doc.beans.HitHighlights.metadataTableName: "${warehouse.tables.metadata.name}" +doc.beans.HitHighlights.minimumSelectivity: 0.2 +doc.beans.HitHighlights.queryThreads: "${warehouse.defaults.indexLookupThreads}" +doc.beans.HitHighlights.reverseIndexTableName: "${warehouse.tables.reverseIndex.name}" +doc.beans.HitHighlights.shardsPerDayThreshold: "${warehouse.defaults.shardsPerDayThreshold}" +doc.beans.HitHighlights.tableName: "${warehouse.tables.shard.name}" +doc.beans.HitHighlights.useEnrichers: false +doc.beans.HitHighlights.zookeeperConfig: "${warehouse.accumulo.zookeepers}" +doc.beans.IndexStatsQuery.auditType: "NONE" +doc.beans.IndexStatsQuery.selectorExtractor: null +doc.beans.InternalQueryMetricsQuery.collectQueryMetrics: false +doc.beans.LuceneUUIDEventQuery.auditType: "NONE" +doc.beans.LuceneUUIDEventQuery.checkpointable: "${warehouse.defaults.checkpointable}" +doc.beans.LuceneUUIDEventQuery.logicDescription: "Composite query logic that retrieves records from the event and error tables, based on known UUID fields, ie, those configured via UUIDTypeList in QueryLogicFactory.xml" +doc.beans.LuceneUUIDEventQuery.queryLogics.ErrorEventQuery.auditType: "NONE" +doc.beans.LuceneUUIDEventQuery.queryLogics.ErrorEventQuery.connPoolName: "UUID" +doc.beans.LuceneUUIDEventQuery.queryLogics.ErrorEventQuery.dateIndexTableName: "${warehouse.errorTables.dateIndex.name}" +doc.beans.LuceneUUIDEventQuery.queryLogics.ErrorEventQuery.indexTableName: "${warehouse.errorTables.index.name}" +doc.beans.LuceneUUIDEventQuery.queryLogics.ErrorEventQuery.logicDescription: "Lucene query for event/document UUIDs for events that encountered errors at ingest time" +doc.beans.LuceneUUIDEventQuery.queryLogics.ErrorEventQuery.metadataTableName: "${warehouse.errorTables.metadata.name}" +doc.beans.LuceneUUIDEventQuery.queryLogics.ErrorEventQuery.reverseIndexTableName: "${warehouse.errorTables.reverseIndex.name}" +doc.beans.LuceneUUIDEventQuery.queryLogics.ErrorEventQuery.tableName: "${warehouse.errorTables.shard.name}" +doc.beans.LuceneUUIDEventQuery.queryLogics.EventQuery.auditType: "NONE" +doc.beans.LuceneUUIDEventQuery.queryLogics.EventQuery.connPoolName: "UUID" +doc.beans.LuceneUUIDEventQuery.queryLogics.EventQuery.logicDescription: "Lucene query for event/document UUIDs" +doc.beans.LuceneUUIDEventQuery.selectorExtractor: null +doc.beans.QueryMetricsQuery.auditType: "NONE" +doc.beans.QueryMetricsQuery.checkpointable: "${warehouse.defaults.checkpointable}" +doc.beans.QueryMetricsQuery.collectQueryMetrics: true +doc.beans.QueryMetricsQuery.dateIndexTableName: "${warehouse.metricTables.dateIndex.name}" +doc.beans.QueryMetricsQuery.includeHierarchyFields: false +doc.beans.QueryMetricsQuery.indexTableName: "${warehouse.metricTables.index.name}" +doc.beans.QueryMetricsQuery.logicDescription: "Retrieve query metrics based on the given search term(s)" +doc.beans.QueryMetricsQuery.metadataTableName: "${warehouse.metricTables.metadata.name}" +doc.beans.QueryMetricsQuery.modelName: "NONE" +doc.beans.QueryMetricsQuery.modelTableName: "${warehouse.metricTables.model.name}" +doc.beans.QueryMetricsQuery.reverseIndexTableName: "${warehouse.metricTables.reverseIndex.name}" +doc.beans.QueryMetricsQuery.tableName: "${warehouse.metricTables.shard.name}" +doc.beans.TermFrequencyQuery.auditType: "NONE" +doc.beans.TermFrequencyQuery.logicDescription: "Query that returns data from the term frequency query table" +doc.beans.TermFrequencyQuery.maxResults: -1 +doc.beans.TermFrequencyQuery.maxWork: -14 +doc.beans.TermFrequencyQuery.tableName: "${warehouse.tables.shard.name}" +doc.beans.TokenizedLuceneToJexlQueryParser.tokenizeUnfieldedQueries: true + +Refs (key: ref) +---------------------------------------- +doc.beans.BaseEventQuery.contentFieldNames: baseEventQueryContentFieldNames +doc.beans.BaseEventQuery.dateIndexHelperFactory: dateIndexHelperFactory +doc.beans.BaseEventQuery.enricherClassNames: baseEventQueryEnricherClassNames +doc.beans.BaseEventQuery.eventQueryDataDecoratorTransformer: baseEventQueryEventQueryDataDecoratorTransformer +doc.beans.BaseEventQuery.filterClassNames: baseEventQueryFilterClassNames +doc.beans.BaseEventQuery.filterOptions: baseEventQueryFilterOptions +doc.beans.BaseEventQuery.hierarchyFieldOptions: baseEventQueryHierarchyFieldOptions +doc.beans.BaseEventQuery.ivaratorCacheDirConfigs: baseEventQueryIvaratorCacheDirConfigs +doc.beans.BaseEventQuery.metadataHelperFactory: metadataHelperFactory +doc.beans.BaseEventQuery.queryPlanner: DefaultQueryPlanner +doc.beans.BaseEventQuery.querySyntaxParsers: baseEventQuerySyntaxParsers +doc.beans.BaseEventQuery.realmSuffixExclusionPatterns: baseEventQueryRealmSuffixExclusionPatterns +doc.beans.BaseEventQuery.requiredRoles: baseEventQueryRequiredRoles +doc.beans.BaseEventQuery.selectorExtractor: DatawaveSelectorExtractor +doc.beans.DefaultQueryPlanner.queryModelProviderFactory: queryModelProviderFactory +doc.beans.DiscoveryQuery.metadataHelperFactory: metadataHelperFactory +doc.beans.DiscoveryQuery.selectorExtractor: DatawaveSelectorExtractor +doc.beans.EdgeEventQuery.edgeDictionaryProvider: edgeDictionaryProvider +doc.beans.EdgeEventQuery.edgeModelFieldsFactory: edgeModelFieldsFactory +doc.beans.EdgeEventQuery.edgeQueryModel: edgeQueryModel +doc.beans.ErrorDiscoveryQuery.metadataHelperFactory: metadataHelperFactory +doc.beans.ErrorDiscoveryQuery.selectorExtractor: DatawaveSelectorExtractor +doc.beans.ErrorFieldIndexCountQuery.metadataHelperFactory: metadataHelperFactory +doc.beans.ErrorFieldIndexCountQuery.selectorExtractor: DatawaveSelectorExtractor +doc.beans.FacetedQuery.querySyntaxParsers: facetedQuerySyntaxParsers +doc.beans.FieldIndexCountQuery.metadataHelperFactory: metadataHelperFactory +doc.beans.FieldIndexCountQuery.selectorExtractor: DatawaveSelectorExtractor +doc.beans.HitHighlights.dateIndexHelperFactory: dateIndexHelperFactory +doc.beans.HitHighlights.ivaratorCacheDirConfigs: hitHighlightsIvaratorCacheDirConfigs +doc.beans.HitHighlights.metadataHelperFactory: metadataHelperFactory +doc.beans.HitHighlights.querySyntaxParsers: hitHighlightsSyntaxParsers +doc.beans.HitHighlights.selectorExtractor: DatawaveSelectorExtractor +doc.beans.InternalQueryMetricsQuery.requiredRoles: internalQueryMetricsQueryRequiredRoles +doc.beans.LuceneToJexlQueryParser.allowedFunctions: allowedFunctions +doc.beans.LuceneToJexlQueryParser.skipTokenizeUnfieldedFields: skipTokenizeUnfieldedFields +doc.beans.LuceneToJexlUUIDQueryParser.allowedFunctions: allowedFunctions +doc.beans.LuceneToJexlUUIDQueryParser.uuidTypes: uuidTypes +doc.beans.LuceneUUIDEventQuery.queryLogics.ErrorEventQuery.mandatoryQuerySyntax: luceneUUIDEventQueryErrorEventQueryMandatoryQuerySyntax +doc.beans.LuceneUUIDEventQuery.queryLogics.EventQuery.mandatoryQuerySyntax: luceneUUIDEventQueryEventQueryMandatoryQuerySyntax +doc.beans.QueryMetricsQuery.metadataHelperFactory: metadataHelperFactory +doc.beans.TokenizedLuceneToJexlQueryParser.allowedFunctions: allowedFunctions +doc.beans.TokenizedLuceneToJexlQueryParser.skipTokenizeUnfieldedFields: skipTokenizeUnfieldedFields +doc.beans.TokenizedLuceneToJexlQueryParser.tokenizedFields: tokenizedFields +doc.beans.allowedFunctions.parser: LuceneToJexlQueryParser +doc.beans.baseQueryLogic.markingFunctions: markingFunctions +doc.beans.baseQueryLogic.responseObjectFactory: responseObjectFactory + +Effective Properties (name=value) +---------------------------------------- +datawave.query.logic.logics.BaseEventQuery.checkpointable=${warehouse.defaults.checkpointable} +datawave.query.logic.logics.BaseEventQuery.accumuloPassword=${warehouse.accumulo.password} +datawave.query.logic.logics.BaseEventQuery.tableName=${warehouse.tables.shard.name} +datawave.query.logic.logics.BaseEventQuery.dateIndexTableName=${warehouse.tables.dateIndex.name} +datawave.query.logic.logics.BaseEventQuery.defaultDateTypeName=EVENT +datawave.query.logic.logics.BaseEventQuery.metadataTableName=${warehouse.tables.metadata.name} +datawave.query.logic.logics.BaseEventQuery.indexTableName=${warehouse.tables.index.name} +datawave.query.logic.logics.BaseEventQuery.reverseIndexTableName=${warehouse.tables.reverseIndex.name} +datawave.query.logic.logics.BaseEventQuery.maxResults=-1 +datawave.query.logic.logics.BaseEventQuery.queryThreads=${warehouse.defaults.queryThreads} +datawave.query.logic.logics.BaseEventQuery.indexLookupThreads=${warehouse.defaults.indexLookupThreads} +datawave.query.logic.logics.BaseEventQuery.dateIndexThreads=${warehouse.defaults.dateIndexThreads} +datawave.query.logic.logics.BaseEventQuery.fullTableScanEnabled=${warehouse.defaults.fullTableScanEnabled} +datawave.query.logic.logics.BaseEventQuery.includeDataTypeAsField=false +datawave.query.logic.logics.BaseEventQuery.disableIndexOnlyDocuments=false +datawave.query.logic.logics.BaseEventQuery.indexOnlyFilterFunctionsEnabled=false +datawave.query.logic.logics.BaseEventQuery.includeHierarchyFields=false +datawave.query.logic.logics.BaseEventQuery.baseIteratorPriority=${warehouse.defaults.baseIteratorPriority} +datawave.query.logic.logics.BaseEventQuery.maxIndexScanTimeMillis=${warehouse.defaults.maxIndexScanTimeMillis} +datawave.query.logic.logics.BaseEventQuery.collapseUids=false +datawave.query.logic.logics.BaseEventQuery.collapseUidsThreshold=-1 +datawave.query.logic.logics.BaseEventQuery.useEnrichers=true +datawave.query.logic.logics.BaseEventQuery.minimumSelectivity=0.2 +datawave.query.logic.logics.BaseEventQuery.useFilters=false +datawave.query.logic.logics.BaseEventQuery.auditType=ACTIVE +datawave.query.logic.logics.BaseEventQuery.logicDescription=Retrieve sharded events/documents, leveraging the global index tables as needed +datawave.query.logic.logics.BaseEventQuery.eventPerDayThreshold=${warehouse.defaults.eventPerDayThreshold} +datawave.query.logic.logics.BaseEventQuery.shardsPerDayThreshold=${warehouse.defaults.shardsPerDayThreshold} +datawave.query.logic.logics.BaseEventQuery.initialMaxTermThreshold=${warehouse.defaults.initialMaxTermThreshold} +datawave.query.logic.logics.BaseEventQuery.finalMaxTermThreshold=${warehouse.defaults.finalMaxTermThreshold} +datawave.query.logic.logics.BaseEventQuery.maxDepthThreshold=${warehouse.defaults.maxDepthThreshold} +datawave.query.logic.logics.BaseEventQuery.maxUnfieldedExpansionThreshold=${warehouse.defaults.maxUnfieldedExpansionThreshold} +datawave.query.logic.logics.BaseEventQuery.maxValueExpansionThreshold=${warehouse.defaults.maxValueExpansionThreshold} +datawave.query.logic.logics.BaseEventQuery.maxOrExpansionThreshold=${warehouse.defaults.maxOrExpansionThreshold} +datawave.query.logic.logics.BaseEventQuery.maxOrRangeThreshold=${warehouse.defaults.maxOrRangeThreshold} +datawave.query.logic.logics.BaseEventQuery.maxOrExpansionFstThreshold=${warehouse.defaults.maxOrExpansionFstThreshold} +datawave.query.logic.logics.BaseEventQuery.maxFieldIndexRangeSplit=${warehouse.defaults.maxFieldIndexRangeSplit} +datawave.query.logic.logics.BaseEventQuery.maxIvaratorSources=${warehouse.defaults.maxIvaratorSources} +datawave.query.logic.logics.BaseEventQuery.maxEvaluationPipelines=${warehouse.defaults.maxEvaluationPipelines} +datawave.query.logic.logics.BaseEventQuery.maxPipelineCachedResults=${warehouse.defaults.maxPipelineCachedResults} +datawave.query.logic.logics.BaseEventQuery.hdfsSiteConfigURLs=${warehouse.defaults.hdfsSiteConfigURLs} +datawave.query.logic.logics.BaseEventQuery.zookeeperConfig=${warehouse.accumulo.zookeepers} +datawave.query.logic.logics.BaseEventQuery.ivaratorFstHdfsBaseURIs=${warehouse.defaults.ivaratorFstHdfsBaseURIs} +datawave.query.logic.logics.BaseEventQuery.ivaratorCacheBufferSize=${warehouse.defaults.ivaratorCacheBufferSize} +datawave.query.logic.logics.BaseEventQuery.ivaratorMaxOpenFiles=${warehouse.defaults.ivaratorMaxOpenFiles} +datawave.query.logic.logics.BaseEventQuery.ivaratorCacheScanPersistThreshold=${warehouse.defaults.ivaratorCacheScanPersistThreshold} +datawave.query.logic.logics.BaseEventQuery.ivaratorCacheScanTimeoutMinutes=${warehouse.defaults.ivaratorCacheScanTimeoutMinutes} +datawave.query.logic.logics.BaseEventQuery.modelTableName=${warehouse.tables.model.name} +datawave.query.logic.logics.BaseEventQuery.modelName=${warehouse.defaults.modelName} +datawave.query.logic.logics.BaseEventQuery.sendTimingToStatsd=false +datawave.query.logic.logics.BaseEventQuery.collectQueryMetrics=true +datawave.query.logic.logics.BaseEventQuery.logTimingDetails=true +datawave.query.logic.logics.BaseEventQuery.statsdHost=${warehouse.statsd.host} +datawave.query.logic.logics.BaseEventQuery.statsdPort=${warehouse.statsd.port} +datawave.query.logic.logics.BaseEventQuery.evaluationOnlyFields= +datawave.query.logic.logics.BaseEventQuery.maxConcurrentTasks=10 +datawave.query.logic.logics.LuceneUUIDEventQuery.logicDescription=Composite query logic that retrieves records from the event and error tables, based on known UUID fields, ie, those configured via UUIDTypeList in QueryLogicFactory.xml +datawave.query.logic.logics.LuceneUUIDEventQuery.auditType=NONE +datawave.query.logic.logics.LuceneUUIDEventQuery.eventQuery.auditType=NONE +datawave.query.logic.logics.LuceneUUIDEventQuery.eventQuery.logicDescription=Lucene query for event/document UUIDs +datawave.query.logic.logics.LuceneUUIDEventQuery.eventQuery.connPoolName=UUID +datawave.query.logic.logics.LuceneUUIDEventQuery.errorEventQuery.tableName=${warehouse.errorTables.shard.name} +datawave.query.logic.logics.LuceneUUIDEventQuery.errorEventQuery.dateIndexTableName=${warehouse.errorTables.dateIndex.name} +datawave.query.logic.logics.LuceneUUIDEventQuery.errorEventQuery.metadataTableName=${warehouse.errorTables.metadata.name} +datawave.query.logic.logics.LuceneUUIDEventQuery.errorEventQuery.indexTableName=${warehouse.errorTables.index.name} +datawave.query.logic.logics.LuceneUUIDEventQuery.errorEventQuery.reverseIndexTableName=${warehouse.errorTables.reverseIndex.name} +datawave.query.logic.logics.LuceneUUIDEventQuery.errorEventQuery.auditType=NONE +datawave.query.logic.logics.LuceneUUIDEventQuery.errorEventQuery.logicDescription=Lucene query for event/document UUIDs for events that encountered errors at ingest time +datawave.query.logic.logics.LuceneUUIDEventQuery.errorEventQuery.connPoolName=UUID +datawave.query.logic.logics.LuceneUUIDEventQuery.checkpointable=${warehouse.defaults.checkpointable} +datawave.query.logic.logics.EventQuery.checkpointable=${warehouse.defaults.checkpointable} +datawave.query.logic.logics.EventQuery.logicDescription=Query the sharded event/document schema, leveraging the global index tables as needed +datawave.query.logic.logics.ErrorEventQuery.checkpointable=${warehouse.defaults.checkpointable} +datawave.query.logic.logics.ErrorEventQuery.logicDescription=Retrieve events/documents that encountered one or more errors during ingest +datawave.query.logic.logics.ErrorEventQuery.tableName=${warehouse.errorTables.shard.name} +datawave.query.logic.logics.ErrorEventQuery.metadataTableName=${warehouse.errorTables.metadata.name} +datawave.query.logic.logics.ErrorEventQuery.dateIndexTableName= +datawave.query.logic.logics.ErrorEventQuery.indexTableName=${warehouse.errorTables.index.name} +datawave.query.logic.logics.ErrorEventQuery.reverseIndexTableName=${warehouse.errorTables.reverseIndex.name} +datawave.query.logic.logics.ErrorEventQuery.includeHierarchyFields=false +datawave.query.logic.logics.DiscoveryQuery.checkpointable=${warehouse.defaults.checkpointable} +datawave.query.logic.logics.DiscoveryQuery.tableName=${warehouse.tables.shard.name} +datawave.query.logic.logics.DiscoveryQuery.indexTableName=${warehouse.tables.index.name} +datawave.query.logic.logics.DiscoveryQuery.reverseIndexTableName=${warehouse.tables.reverseIndex.name} +datawave.query.logic.logics.DiscoveryQuery.maxResults=-1 +datawave.query.logic.logics.DiscoveryQuery.maxWork=-1 +datawave.query.logic.logics.DiscoveryQuery.metadataTableName=${warehouse.tables.metadata.name} +datawave.query.logic.logics.DiscoveryQuery.modelTableName=${warehouse.tables.model.name} +datawave.query.logic.logics.DiscoveryQuery.modelName=${warehouse.defaults.modelName} +datawave.query.logic.logics.DiscoveryQuery.fullTableScanEnabled=${warehouse.defaults.fullTableScanEnabled} +datawave.query.logic.logics.DiscoveryQuery.allowLeadingWildcard=true +datawave.query.logic.logics.DiscoveryQuery.auditType=NONE +datawave.query.logic.logics.DiscoveryQuery.logicDescription=Discovery query that returns information from the index about the supplied term(s) +datawave.query.logic.logics.ErrorDiscoveryQuery.checkpointable=${warehouse.defaults.checkpointable} +datawave.query.logic.logics.ErrorDiscoveryQuery.tableName=${warehouse.errorTables.shard.name} +datawave.query.logic.logics.ErrorDiscoveryQuery.indexTableName=${warehouse.errorTables.index.name} +datawave.query.logic.logics.ErrorDiscoveryQuery.reverseIndexTableName=${warehouse.errorTables.reverseIndex.name} +datawave.query.logic.logics.ErrorDiscoveryQuery.maxResults=-1 +datawave.query.logic.logics.ErrorDiscoveryQuery.maxWork=-1 +datawave.query.logic.logics.ErrorDiscoveryQuery.metadataTableName=${warehouse.errorTables.metadata.name} +datawave.query.logic.logics.ErrorDiscoveryQuery.modelTableName=${warehouse.errorTables.model.name} +datawave.query.logic.logics.ErrorDiscoveryQuery.modelName=${warehouse.defaults.modelName} +datawave.query.logic.logics.ErrorDiscoveryQuery.fullTableScanEnabled=${warehouse.defaults.fullTableScanEnabled} +datawave.query.logic.logics.ErrorDiscoveryQuery.allowLeadingWildcard=true +datawave.query.logic.logics.ErrorDiscoveryQuery.auditType=NONE +datawave.query.logic.logics.ErrorDiscoveryQuery.logicDescription=Discovery query that returns information from the ingest errors index about the supplied term(s) +datawave.query.logic.logics.ContentQuery.checkpointable=${warehouse.defaults.checkpointable} +datawave.query.logic.logics.ContentQuery.tableName=${warehouse.tables.shard.name} +datawave.query.logic.logics.ContentQuery.maxResults=-1 +datawave.query.logic.logics.ContentQuery.maxWork=-1 +datawave.query.logic.logics.ContentQuery.auditType=NONE +datawave.query.logic.logics.ContentQuery.logicDescription=Query that returns a document given the document identifier +datawave.query.logic.logics.CountQuery.checkpointable=${warehouse.defaults.checkpointable} +datawave.query.logic.logics.CountQuery.logicDescription=Retrieve event/document counts based on your search criteria +datawave.query.logic.logics.ErrorCountQuery.checkpointable=${warehouse.defaults.checkpointable} +datawave.query.logic.logics.ErrorCountQuery.logicDescription=Retrieve counts of errored events based on your search criteria +datawave.query.logic.logics.ErrorCountQuery.tableName=${warehouse.errorTables.shard.name} +datawave.query.logic.logics.ErrorCountQuery.metadataTableName=${warehouse.errorTables.metadata.name} +datawave.query.logic.logics.ErrorCountQuery.indexTableName=${warehouse.errorTables.index.name} +datawave.query.logic.logics.ErrorCountQuery.indexTableName=${warehouse.errorTables.index.name} +datawave.query.logic.logics.FieldIndexCountQuery.checkpointable=false +datawave.query.logic.logics.FieldIndexCountQuery.tableName=${warehouse.tables.shard.name} +datawave.query.logic.logics.FieldIndexCountQuery.indexTableName=${warehouse.tables.index.name} +datawave.query.logic.logics.FieldIndexCountQuery.reverseIndexTableName=${warehouse.tables.reverseIndex.name} +datawave.query.logic.logics.FieldIndexCountQuery.metadataTableName=${warehouse.tables.metadata.name} +datawave.query.logic.logics.FieldIndexCountQuery.maxResults=-1 +datawave.query.logic.logics.FieldIndexCountQuery.maxWork=-1 +datawave.query.logic.logics.FieldIndexCountQuery.queryThreads=${warehouse.defaults.queryThreads} +datawave.query.logic.logics.FieldIndexCountQuery.modelTableName=${warehouse.tables.model.name} +datawave.query.logic.logics.FieldIndexCountQuery.modelName=DATAWAVE +datawave.query.logic.logics.FieldIndexCountQuery.maxUniqueValues=20000 +datawave.query.logic.logics.FieldIndexCountQuery.auditType=NONE +datawave.query.logic.logics.FieldIndexCountQuery.logicDescription=Indexed Fields Only\: Given FIELDNAME returns counts for each unique value. Given FIELDNAME\:FIELDVALUE returns count for only that value. +datawave.query.logic.logics.ErrorFieldIndexCountQuery.checkpointable=false +datawave.query.logic.logics.ErrorFieldIndexCountQuery.tableName=${warehouse.errorTables.shard.name} +datawave.query.logic.logics.ErrorFieldIndexCountQuery.indexTableName=${warehouse.errorTables.index.name} +datawave.query.logic.logics.ErrorFieldIndexCountQuery.reverseIndexTableName=${warehouse.errorTables.reverseIndex.name} +datawave.query.logic.logics.ErrorFieldIndexCountQuery.metadataTableName=${warehouse.errorTables.metadata.name} +datawave.query.logic.logics.ErrorFieldIndexCountQuery.maxResults=-1 +datawave.query.logic.logics.ErrorFieldIndexCountQuery.maxWork=-1 +datawave.query.logic.logics.ErrorFieldIndexCountQuery.queryThreads=${warehouse.defaults.queryThreads} +datawave.query.logic.logics.ErrorFieldIndexCountQuery.modelTableName=${warehouse.errorTables.model.name} +datawave.query.logic.logics.ErrorFieldIndexCountQuery.modelName=DATAWAVE +datawave.query.logic.logics.ErrorFieldIndexCountQuery.maxUniqueValues=20000 +datawave.query.logic.logics.ErrorFieldIndexCountQuery.auditType=NONE +datawave.query.logic.logics.ErrorFieldIndexCountQuery.logicDescription=FieldIndex count query (experimental) +datawave.query.logic.logics.TermFrequencyQuery.tableName=${warehouse.tables.shard.name} +datawave.query.logic.logics.TermFrequencyQuery.maxResults=-1 +datawave.query.logic.logics.TermFrequencyQuery.maxWork=-14 +datawave.query.logic.logics.TermFrequencyQuery.auditType=NONE +datawave.query.logic.logics.TermFrequencyQuery.logicDescription=Query that returns data from the term frequency query table +datawave.query.logic.logics.IndexStatsQuery.auditType=NONE +datawave.query.logic.logics.QueryMetricsQuery.checkpointable=${warehouse.defaults.checkpointable} +datawave.query.logic.logics.QueryMetricsQuery.logicDescription=Retrieve query metrics based on the given search term(s) +datawave.query.logic.logics.QueryMetricsQuery.includeHierarchyFields=false +datawave.query.logic.logics.QueryMetricsQuery.modelTableName=${warehouse.metricTables.model.name} +datawave.query.logic.logics.QueryMetricsQuery.modelName=NONE +datawave.query.logic.logics.QueryMetricsQuery.tableName=${warehouse.metricTables.shard.name} +datawave.query.logic.logics.QueryMetricsQuery.dateIndexTableName=${warehouse.metricTables.dateIndex.name} +datawave.query.logic.logics.QueryMetricsQuery.metadataTableName=${warehouse.metricTables.metadata.name} +datawave.query.logic.logics.QueryMetricsQuery.indexTableName=${warehouse.metricTables.index.name} +datawave.query.logic.logics.QueryMetricsQuery.reverseIndexTableName=${warehouse.metricTables.reverseIndex.name} +datawave.query.logic.logics.QueryMetricsQuery.auditType=NONE +datawave.query.logic.logics.QueryMetricsQuery.collectQueryMetrics=true +datawave.query.logic.logics.InternalQueryMetricsQuery.collectQueryMetrics=false +datawave.query.logic.logics.FacetedQuery.checkpointable=${warehouse.defaults.checkpointable} +datawave.query.logic.logics.FacetedQuery.auditType=NONE +datawave.query.logic.logics.FacetedQuery.logicDescription=Faceted search over indexed fields, returning aggregate counts for field values +datawave.query.logic.logics.FacetedQuery.facetedSearchType=FIELD_VALUE_FACETS +datawave.query.logic.logics.FacetedQuery.facetTableName=datawave.facets +datawave.query.logic.logics.FacetedQuery.facetMetadataTableName=datawave.facetMetadata +datawave.query.logic.logics.FacetedQuery.facetHashTableName=datawave.facetHashes +datawave.query.logic.logics.FacetedQuery.maximumFacetGrouping=200 +datawave.query.logic.logics.FacetedQuery.minimumFacet=1 +datawave.query.logic.logics.FacetedQuery.streaming=true +datawave.query.logic.logics.HitHighlights.checkpointable=${warehouse.defaults.checkpointable} +datawave.query.logic.logics.HitHighlights.accumuloPassword=${warehouse.accumulo.password} +datawave.query.logic.logics.HitHighlights.tableName=${warehouse.tables.shard.name} +datawave.query.logic.logics.HitHighlights.dateIndexTableName=${warehouse.tables.dateIndex.name} +datawave.query.logic.logics.HitHighlights.defaultDateTypeName=EVENT +datawave.query.logic.logics.HitHighlights.metadataTableName=${warehouse.tables.metadata.name} +datawave.query.logic.logics.HitHighlights.indexTableName=${warehouse.tables.index.name} +datawave.query.logic.logics.HitHighlights.reverseIndexTableName=${warehouse.tables.reverseIndex.name} +datawave.query.logic.logics.HitHighlights.queryThreads=${warehouse.defaults.indexLookupThreads} +datawave.query.logic.logics.HitHighlights.fullTableScanEnabled=${warehouse.defaults.fullTableScanEnabled} +datawave.query.logic.logics.HitHighlights.minimumSelectivity=0.2 +datawave.query.logic.logics.HitHighlights.includeDataTypeAsField=false +datawave.query.logic.logics.HitHighlights.includeGroupingContext=false +datawave.query.logic.logics.HitHighlights.useEnrichers=false +datawave.query.logic.logics.HitHighlights.auditType=NONE +datawave.query.logic.logics.HitHighlights.logicDescription=Fast boolean query over indexed fields, only returning fields queried on +datawave.query.logic.logics.HitHighlights.eventPerDayThreshold=40000 +datawave.query.logic.logics.HitHighlights.shardsPerDayThreshold=${warehouse.defaults.shardsPerDayThreshold} +datawave.query.logic.logics.BaseEventQuery.initialMaxTermThreshold=${warehouse.defaults.initialMaxTermThreshold} +datawave.query.logic.logics.BaseEventQuery.finalMaxTermThreshold=${warehouse.defaults.finalMaxTermThreshold} +datawave.query.logic.logics.HitHighlights.maxDepthThreshold=${warehouse.defaults.maxDepthThreshold} +datawave.query.logic.logics.HitHighlights.maxUnfieldedExpansionThreshold=${warehouse.defaults.maxUnfieldedExpansionThreshold} +datawave.query.logic.logics.HitHighlights.maxValueExpansionThreshold=${warehouse.defaults.maxValueExpansionThreshold} +datawave.query.logic.logics.HitHighlights.maxOrExpansionThreshold=${warehouse.defaults.maxOrExpansionThreshold} +datawave.query.logic.logics.HitHighlights.maxOrRangeThreshold=${warehouse.defaults.maxOrRangeThreshold} +datawave.query.logic.logics.HitHighlights.maxRangesPerRangeIvarator=${warehouse.defaults.maxRangesPerRangeIvarator} +datawave.query.logic.logics.HitHighlights.maxOrRangeIvarators=${warehouse.defaults.maxOrRangeIvarators} +datawave.query.logic.logics.HitHighlights.maxOrExpansionFstThreshold=${warehouse.defaults.maxOrExpansionFstThreshold} +datawave.query.logic.logics.HitHighlights.maxFieldIndexRangeSplit=${warehouse.defaults.maxFieldIndexRangeSplit} +datawave.query.logic.logics.HitHighlights.maxEvaluationPipelines=${warehouse.defaults.maxEvaluationPipelines} +datawave.query.logic.logics.HitHighlights.maxPipelineCachedResults=${warehouse.defaults.maxPipelineCachedResults} +datawave.query.logic.logics.HitHighlights.hdfsSiteConfigURLs=${warehouse.defaults.hdfsSiteConfigURLs} +datawave.query.logic.logics.HitHighlights.zookeeperConfig=${warehouse.accumulo.zookeepers} +datawave.query.logic.logics.HitHighlights.ivaratorFstHdfsBaseURIs=${warehouse.defaults.ivaratorFstHdfsBaseURIs} +datawave.query.logic.logics.HitHighlights.ivaratorCacheBufferSize=10000 +datawave.query.logic.logics.HitHighlights.ivaratorMaxOpenFiles=${warehouse.defaults.ivaratorMaxOpenFiles} +datawave.query.logic.logics.HitHighlights.ivaratorCacheScanPersistThreshold=100000 +datawave.query.logic.logics.HitHighlights.ivaratorCacheScanTimeoutMinutes=${warehouse.defaults.ivaratorCacheScanTimeoutMinutes} +datawave.query.logic.logics.EdgeEventQuery.checkpointable=${warehouse.defaults.checkpointable} +datawave.query.logic.logics.EdgeEventQuery.logicDescription=Use results of an EdgeQuery to obtain events/documents that created the given edge +datawave.query.logic.logics.EdgeEventQuery.edgeModelName=DATAWAVE_EDGE +datawave.query.logic.logics.EdgeEventQuery.modelTableName=${warehouse.tables.model.name} + +Effective Yml +---------------------------------------- +datawave: + query: + logic: + logics: + BaseEventQuery: + checkpointable: "${warehouse.defaults.checkpointable}" + accumuloPassword: "${warehouse.accumulo.password}" + tableName: "${warehouse.tables.shard.name}" + dateIndexTableName: "${warehouse.tables.dateIndex.name}" + defaultDateTypeName: "EVENT" + metadataTableName: "${warehouse.tables.metadata.name}" + indexTableName: "${warehouse.tables.index.name}" + reverseIndexTableName: "${warehouse.tables.reverseIndex.name}" + maxResults: -1 + queryThreads: "${warehouse.defaults.queryThreads}" + indexLookupThreads: "${warehouse.defaults.indexLookupThreads}" + dateIndexThreads: "${warehouse.defaults.dateIndexThreads}" + fullTableScanEnabled: "${warehouse.defaults.fullTableScanEnabled}" + includeDataTypeAsField: false + disableIndexOnlyDocuments: false + indexOnlyFilterFunctionsEnabled: false + includeHierarchyFields: false + baseIteratorPriority: "${warehouse.defaults.baseIteratorPriority}" + maxIndexScanTimeMillis: "${warehouse.defaults.maxIndexScanTimeMillis}" + collapseUids: false + collapseUidsThreshold: -1 + useEnrichers: true + minimumSelectivity: 0.2 + useFilters: false + auditType: "ACTIVE" + logicDescription: "Retrieve sharded events/documents, leveraging the global index tables as needed" + eventPerDayThreshold: "${warehouse.defaults.eventPerDayThreshold}" + shardsPerDayThreshold: "${warehouse.defaults.shardsPerDayThreshold}" + initialMaxTermThreshold: "${warehouse.defaults.initialMaxTermThreshold}" + finalMaxTermThreshold: "${warehouse.defaults.finalMaxTermThreshold}" + maxDepthThreshold: "${warehouse.defaults.maxDepthThreshold}" + maxUnfieldedExpansionThreshold: "${warehouse.defaults.maxUnfieldedExpansionThreshold}" + maxValueExpansionThreshold: "${warehouse.defaults.maxValueExpansionThreshold}" + maxOrExpansionThreshold: "${warehouse.defaults.maxOrExpansionThreshold}" + maxOrRangeThreshold: "${warehouse.defaults.maxOrRangeThreshold}" + maxOrExpansionFstThreshold: "${warehouse.defaults.maxOrExpansionFstThreshold}" + maxFieldIndexRangeSplit: "${warehouse.defaults.maxFieldIndexRangeSplit}" + maxIvaratorSources: "${warehouse.defaults.maxIvaratorSources}" + maxEvaluationPipelines: "${warehouse.defaults.maxEvaluationPipelines}" + maxPipelineCachedResults: "${warehouse.defaults.maxPipelineCachedResults}" + hdfsSiteConfigURLs: "${warehouse.defaults.hdfsSiteConfigURLs}" + zookeeperConfig: "${warehouse.accumulo.zookeepers}" + ivaratorFstHdfsBaseURIs: "${warehouse.defaults.ivaratorFstHdfsBaseURIs}" + ivaratorCacheBufferSize: "${warehouse.defaults.ivaratorCacheBufferSize}" + ivaratorMaxOpenFiles: "${warehouse.defaults.ivaratorMaxOpenFiles}" + ivaratorCacheScanPersistThreshold: "${warehouse.defaults.ivaratorCacheScanPersistThreshold}" + ivaratorCacheScanTimeoutMinutes: "${warehouse.defaults.ivaratorCacheScanTimeoutMinutes}" + modelTableName: "${warehouse.tables.model.name}" + modelName: "${warehouse.defaults.modelName}" + sendTimingToStatsd: false + collectQueryMetrics: true + logTimingDetails: true + statsdHost: "${warehouse.statsd.host}" + statsdPort: "${warehouse.statsd.port}" + evaluationOnlyFields: "" + maxConcurrentTasks: 10 + LuceneUUIDEventQuery: + logicDescription: "Composite query logic that retrieves records from the event and error tables, based on known UUID fields, ie, those configured via UUIDTypeList in QueryLogicFactory.xml" + auditType: "NONE" + eventQuery: + auditType: "NONE" + logicDescription: "Lucene query for event/document UUIDs" + connPoolName: "UUID" + errorEventQuery: + tableName: "${warehouse.errorTables.shard.name}" + dateIndexTableName: "${warehouse.errorTables.dateIndex.name}" + metadataTableName: "${warehouse.errorTables.metadata.name}" + indexTableName: "${warehouse.errorTables.index.name}" + reverseIndexTableName: "${warehouse.errorTables.reverseIndex.name}" + auditType: "NONE" + logicDescription: "Lucene query for event/document UUIDs for events that encountered errors at ingest time" + connPoolName: "UUID" + checkpointable: "${warehouse.defaults.checkpointable}" + EventQuery: + checkpointable: "${warehouse.defaults.checkpointable}" + logicDescription: "Query the sharded event/document schema, leveraging the global index tables as needed" + ErrorEventQuery: + checkpointable: "${warehouse.defaults.checkpointable}" + logicDescription: "Retrieve events/documents that encountered one or more errors during ingest" + tableName: "${warehouse.errorTables.shard.name}" + metadataTableName: "${warehouse.errorTables.metadata.name}" + dateIndexTableName: "" + indexTableName: "${warehouse.errorTables.index.name}" + reverseIndexTableName: "${warehouse.errorTables.reverseIndex.name}" + includeHierarchyFields: false + DiscoveryQuery: + checkpointable: "${warehouse.defaults.checkpointable}" + tableName: "${warehouse.tables.shard.name}" + indexTableName: "${warehouse.tables.index.name}" + reverseIndexTableName: "${warehouse.tables.reverseIndex.name}" + maxResults: -1 + maxWork: -1 + metadataTableName: "${warehouse.tables.metadata.name}" + modelTableName: "${warehouse.tables.model.name}" + modelName: "${warehouse.defaults.modelName}" + fullTableScanEnabled: "${warehouse.defaults.fullTableScanEnabled}" + allowLeadingWildcard: true + auditType: "NONE" + logicDescription: "Discovery query that returns information from the index about the supplied term(s)" + ErrorDiscoveryQuery: + checkpointable: "${warehouse.defaults.checkpointable}" + tableName: "${warehouse.errorTables.shard.name}" + indexTableName: "${warehouse.errorTables.index.name}" + reverseIndexTableName: "${warehouse.errorTables.reverseIndex.name}" + maxResults: -1 + maxWork: -1 + metadataTableName: "${warehouse.errorTables.metadata.name}" + modelTableName: "${warehouse.errorTables.model.name}" + modelName: "${warehouse.defaults.modelName}" + fullTableScanEnabled: "${warehouse.defaults.fullTableScanEnabled}" + allowLeadingWildcard: true + auditType: "NONE" + logicDescription: "Discovery query that returns information from the ingest errors index about the supplied term(s)" + ContentQuery: + checkpointable: "${warehouse.defaults.checkpointable}" + tableName: "${warehouse.tables.shard.name}" + maxResults: -1 + maxWork: -1 + auditType: "NONE" + logicDescription: "Query that returns a document given the document identifier" + CountQuery: + checkpointable: "${warehouse.defaults.checkpointable}" + logicDescription: "Retrieve event/document counts based on your search criteria" + ErrorCountQuery: + checkpointable: "${warehouse.defaults.checkpointable}" + logicDescription: "Retrieve counts of errored events based on your search criteria" + tableName: "${warehouse.errorTables.shard.name}" + metadataTableName: "${warehouse.errorTables.metadata.name}" + indexTableName: "${warehouse.errorTables.index.name}" + FieldIndexCountQuery: + checkpointable: false + tableName: "${warehouse.tables.shard.name}" + indexTableName: "${warehouse.tables.index.name}" + reverseIndexTableName: "${warehouse.tables.reverseIndex.name}" + metadataTableName: "${warehouse.tables.metadata.name}" + maxResults: -1 + maxWork: -1 + queryThreads: "${warehouse.defaults.queryThreads}" + modelTableName: "${warehouse.tables.model.name}" + modelName: "DATAWAVE" + maxUniqueValues: 20000 + auditType: "NONE" + logicDescription: "Indexed Fields Only: Given FIELDNAME returns counts for each unique value. Given FIELDNAME:FIELDVALUE returns count for only that value." + ErrorFieldIndexCountQuery: + checkpointable: false + tableName: "${warehouse.errorTables.shard.name}" + indexTableName: "${warehouse.errorTables.index.name}" + reverseIndexTableName: "${warehouse.errorTables.reverseIndex.name}" + metadataTableName: "${warehouse.errorTables.metadata.name}" + maxResults: -1 + maxWork: -1 + queryThreads: "${warehouse.defaults.queryThreads}" + modelTableName: "${warehouse.errorTables.model.name}" + modelName: "DATAWAVE" + maxUniqueValues: 20000 + auditType: "NONE" + logicDescription: "FieldIndex count query (experimental)" + TermFrequencyQuery: + tableName: "${warehouse.tables.shard.name}" + maxResults: -1 + maxWork: -14 + auditType: "NONE" + logicDescription: "Query that returns data from the term frequency query table" + IndexStatsQuery: + auditType: "NONE" + QueryMetricsQuery: + checkpointable: "${warehouse.defaults.checkpointable}" + logicDescription: "Retrieve query metrics based on the given search term(s)" + includeHierarchyFields: false + modelTableName: "${warehouse.metricTables.model.name}" + modelName: "NONE" + tableName: "${warehouse.metricTables.shard.name}" + dateIndexTableName: "${warehouse.metricTables.dateIndex.name}" + metadataTableName: "${warehouse.metricTables.metadata.name}" + indexTableName: "${warehouse.metricTables.index.name}" + reverseIndexTableName: "${warehouse.metricTables.reverseIndex.name}" + auditType: "NONE" + collectQueryMetrics: true + InternalQueryMetricsQuery: + collectQueryMetrics: false + FacetedQuery: + checkpointable: "${warehouse.defaults.checkpointable}" + auditType: "NONE" + logicDescription: "Faceted search over indexed fields, returning aggregate counts for field values" + facetedSearchType: "FIELD_VALUE_FACETS" + facetTableName: "datawave.facets" + facetMetadataTableName: "datawave.facetMetadata" + facetHashTableName: "datawave.facetHashes" + maximumFacetGrouping: 200 + minimumFacet: 1 + streaming: true + HitHighlights: + checkpointable: "${warehouse.defaults.checkpointable}" + accumuloPassword: "${warehouse.accumulo.password}" + tableName: "${warehouse.tables.shard.name}" + dateIndexTableName: "${warehouse.tables.dateIndex.name}" + defaultDateTypeName: "EVENT" + metadataTableName: "${warehouse.tables.metadata.name}" + indexTableName: "${warehouse.tables.index.name}" + reverseIndexTableName: "${warehouse.tables.reverseIndex.name}" + queryThreads: "${warehouse.defaults.indexLookupThreads}" + fullTableScanEnabled: "${warehouse.defaults.fullTableScanEnabled}" + minimumSelectivity: 0.2 + includeDataTypeAsField: false + includeGroupingContext: false + useEnrichers: false + auditType: "NONE" + logicDescription: "Fast boolean query over indexed fields, only returning fields queried on" + eventPerDayThreshold: 40000 + shardsPerDayThreshold: "${warehouse.defaults.shardsPerDayThreshold}" + maxDepthThreshold: "${warehouse.defaults.maxDepthThreshold}" + maxUnfieldedExpansionThreshold: "${warehouse.defaults.maxUnfieldedExpansionThreshold}" + maxValueExpansionThreshold: "${warehouse.defaults.maxValueExpansionThreshold}" + maxOrExpansionThreshold: "${warehouse.defaults.maxOrExpansionThreshold}" + maxOrRangeThreshold: "${warehouse.defaults.maxOrRangeThreshold}" + maxRangesPerRangeIvarator: "${warehouse.defaults.maxRangesPerRangeIvarator}" + maxOrRangeIvarators: "${warehouse.defaults.maxOrRangeIvarators}" + maxOrExpansionFstThreshold: "${warehouse.defaults.maxOrExpansionFstThreshold}" + maxFieldIndexRangeSplit: "${warehouse.defaults.maxFieldIndexRangeSplit}" + maxEvaluationPipelines: "${warehouse.defaults.maxEvaluationPipelines}" + maxPipelineCachedResults: "${warehouse.defaults.maxPipelineCachedResults}" + hdfsSiteConfigURLs: "${warehouse.defaults.hdfsSiteConfigURLs}" + zookeeperConfig: "${warehouse.accumulo.zookeepers}" + ivaratorFstHdfsBaseURIs: "${warehouse.defaults.ivaratorFstHdfsBaseURIs}" + ivaratorCacheBufferSize: 10000 + ivaratorMaxOpenFiles: "${warehouse.defaults.ivaratorMaxOpenFiles}" + ivaratorCacheScanPersistThreshold: 100000 + ivaratorCacheScanTimeoutMinutes: "${warehouse.defaults.ivaratorCacheScanTimeoutMinutes}" + EdgeEventQuery: + checkpointable: "${warehouse.defaults.checkpointable}" + logicDescription: "Use results of an EdgeQuery to obtain events/documents that created the given edge" + edgeModelName: "DATAWAVE_EDGE" + modelTableName: "${warehouse.tables.model.name}" \ No newline at end of file diff --git a/microservices/configcheck/src/test/resources/rendered/webservice/QueryLogicFactory.xml b/microservices/configcheck/src/test/resources/rendered/webservice/QueryLogicFactory.xml new file mode 100644 index 00000000000..558d4b05073 --- /dev/null +++ b/microservices/configcheck/src/test/resources/rendered/webservice/QueryLogicFactory.xml @@ -0,0 +1,690 @@ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + DOMETA + + + + + CONTENT + + + + + + + + + + + + + + + + + + + + + + + + + + 2611 + + + + true + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + CONTENT + + + + + <.*>$ + + + + + + datawave.query.enrich.DatawaveTermFrequencyEnricher + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + datawave.query.function.NormalizedVersionPredicate + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + LUCENE-UUID + + + + + + + + + + + + + + + + + LUCENE-UUID + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + diff --git a/microservices/configcheck/src/test/resources/rendered/webservice/analysis.txt b/microservices/configcheck/src/test/resources/rendered/webservice/analysis.txt new file mode 100644 index 00000000000..88d63536104 --- /dev/null +++ b/microservices/configcheck/src/test/resources/rendered/webservice/analysis.txt @@ -0,0 +1,249 @@ +Values (key: value) +---------------------------------------- +doc.beans.AstValidator.validateFlatten: true +doc.beans.AstValidator.validateJunctions: true +doc.beans.AstValidator.validateLineage: true +doc.beans.AstValidator.validateQueryPropertyMarkers: true +doc.beans.AstValidator.validateReferenceExpressions: true +doc.beans.BaseEventQuery.accumuloPassword: "${accumulo.user.password}" +doc.beans.BaseEventQuery.auditType: "NONE" +doc.beans.BaseEventQuery.baseIteratorPriority: 100 +doc.beans.BaseEventQuery.collapseUids: false +doc.beans.BaseEventQuery.collapseUidsThreshold: -1 +doc.beans.BaseEventQuery.collectQueryMetrics: true +doc.beans.BaseEventQuery.contentFieldNames: "CONTENT" +doc.beans.BaseEventQuery.dateIndexTableName: "${table.name.dateIndex}" +doc.beans.BaseEventQuery.dateIndexThreads: 20 +doc.beans.BaseEventQuery.defaultDateTypeName: "EVENT" +doc.beans.BaseEventQuery.disableIndexOnlyDocuments: false +doc.beans.BaseEventQuery.docAggregationThresholdMs: -1 +doc.beans.BaseEventQuery.enricherClassNames: "datawave.query.enrich.DatawaveTermFrequencyEnricher" +doc.beans.BaseEventQuery.evaluationOnlyFields: "" +doc.beans.BaseEventQuery.eventPerDayThreshold: 40000 +doc.beans.BaseEventQuery.finalMaxTermThreshold: 2000 +doc.beans.BaseEventQuery.fullTableScanEnabled: false +doc.beans.BaseEventQuery.hdfsSiteConfigURLs: "file:///etc/hadoop/conf/core-site.xml,file:///etc/hadoop/conf/hdfs-site.xml" +doc.beans.BaseEventQuery.includeDataTypeAsField: false +doc.beans.BaseEventQuery.includeHierarchyFields: false +doc.beans.BaseEventQuery.indexLookupThreads: 100 +doc.beans.BaseEventQuery.indexOnlyFilterFunctionsEnabled: false +doc.beans.BaseEventQuery.indexTableName: "${table.name.shardIndex}" +doc.beans.BaseEventQuery.initialMaxTermThreshold: 2000 +doc.beans.BaseEventQuery.ivaratorCacheBufferSize: 10000 +doc.beans.BaseEventQuery.ivaratorCacheScanPersistThreshold: 100000 +doc.beans.BaseEventQuery.ivaratorCacheScanTimeoutMinutes: 60 +doc.beans.BaseEventQuery.ivaratorFstHdfsBaseURIs: "hdfs:///IvaratorCache" +doc.beans.BaseEventQuery.ivaratorMaxOpenFiles: 100 +doc.beans.BaseEventQuery.lazySetMechanismEnabled: false +doc.beans.BaseEventQuery.logTimingDetails: false +doc.beans.BaseEventQuery.logicDescription: "Retrieve sharded events/documents, leveraging the global index tables as needed" +doc.beans.BaseEventQuery.maxDepthThreshold: 2000 +doc.beans.BaseEventQuery.maxEvaluationPipelines: 16 +doc.beans.BaseEventQuery.maxFieldIndexRangeSplit: 16 +doc.beans.BaseEventQuery.maxIndexScanTimeMillis: 3.1536E10 +doc.beans.BaseEventQuery.maxIvaratorSources: 20 +doc.beans.BaseEventQuery.maxOrExpansionFstThreshold: 750 +doc.beans.BaseEventQuery.maxOrExpansionThreshold: 500 +doc.beans.BaseEventQuery.maxOrRangeThreshold: 10 +doc.beans.BaseEventQuery.maxPipelineCachedResults: 16 +doc.beans.BaseEventQuery.maxResults: -1 +doc.beans.BaseEventQuery.maxUnfieldedExpansionThreshold: 50 +doc.beans.BaseEventQuery.maxValueExpansionThreshold: 50 +doc.beans.BaseEventQuery.metadataTableName: "${table.name.metadata}" +doc.beans.BaseEventQuery.minimumSelectivity: 0.2 +doc.beans.BaseEventQuery.modelName: "DATAWAVE" +doc.beans.BaseEventQuery.modelTableName: "${table.name.metadata}" +doc.beans.BaseEventQuery.querySyntaxParsers.JEXL: null +doc.beans.BaseEventQuery.queryThreads: 100 +doc.beans.BaseEventQuery.realmSuffixExclusionPatterns: "<.*>$" +doc.beans.BaseEventQuery.reverseIndexTableName: "${table.name.shardReverseIndex}" +doc.beans.BaseEventQuery.sendTimingToStatsd: false +doc.beans.BaseEventQuery.shardsPerDayThreshold: 20 +doc.beans.BaseEventQuery.statsdHost: "localhost" +doc.beans.BaseEventQuery.statsdPort: 8125 +doc.beans.BaseEventQuery.tableName: "${table.name.shard}" +doc.beans.BaseEventQuery.tfAggregationThresholdMs: -1 +doc.beans.BaseEventQuery.useEnrichers: true +doc.beans.BaseEventQuery.useFilters: false +doc.beans.BaseEventQuery.zookeeperConfig: "" +doc.beans.BaseModelEventQuery.modelName: "DATAWAVE" +doc.beans.BaseModelEventQuery.modelTableName: "${table.name.metadata}" +doc.beans.ContentQuery.auditType: "NONE" +doc.beans.ContentQuery.logicDescription: "Query that returns a document given the document identifier" +doc.beans.ContentQuery.maxResults: -1 +doc.beans.ContentQuery.maxWork: -1 +doc.beans.ContentQuery.tableName: "${table.name.shard}" +doc.beans.CountQuery.logicDescription: "Retrieve event/document counts based on your search criteria" +doc.beans.DefaultQueryPlanner.compressOptionMappings: true +doc.beans.DefaultQueryPlanner[0]: 2611 +doc.beans.DefaultQueryPlanner[1]: true +doc.beans.DiscoveryQuery.allowLeadingWildcard: true +doc.beans.DiscoveryQuery.auditType: "NONE" +doc.beans.DiscoveryQuery.fullTableScanEnabled: false +doc.beans.DiscoveryQuery.indexTableName: "${table.name.shardIndex}" +doc.beans.DiscoveryQuery.logicDescription: "Discovery query that returns information from the index about the supplied term(s)" +doc.beans.DiscoveryQuery.maxResults: -1 +doc.beans.DiscoveryQuery.maxWork: -1 +doc.beans.DiscoveryQuery.metadataTableName: "${table.name.metadata}" +doc.beans.DiscoveryQuery.modelName: "DATAWAVE" +doc.beans.DiscoveryQuery.modelTableName: "${table.name.metadata}" +doc.beans.DiscoveryQuery.reverseIndexTableName: "${table.name.shardReverseIndex}" +doc.beans.DiscoveryQuery.tableName: "${table.name.shardIndex}" +doc.beans.EdgeEventQuery.edgeModelName: "DATAWAVE_EDGE" +doc.beans.EdgeEventQuery.logicDescription: "Use results of an EdgeQuery to obtain events/documents that created the given edge" +doc.beans.EdgeEventQuery.modelTableName: "${table.name.metadata}" +doc.beans.ErrorCountQuery.indexTableName: "${table.name.errors.shardIndex}" +doc.beans.ErrorCountQuery.logicDescription: "Retrieve counts of errored events based on your search criteria" +doc.beans.ErrorCountQuery.metadataTableName: "${table.name.errors.metadata}" +doc.beans.ErrorCountQuery.reverseIndexTableName: "${table.name.errors.shardReverseIndex}" +doc.beans.ErrorCountQuery.tableName: "${table.name.errors.shard}" +doc.beans.ErrorDiscoveryQuery.allowLeadingWildcard: true +doc.beans.ErrorDiscoveryQuery.auditType: "NONE" +doc.beans.ErrorDiscoveryQuery.fullTableScanEnabled: false +doc.beans.ErrorDiscoveryQuery.indexTableName: "${table.name.errors.shardIndex}" +doc.beans.ErrorDiscoveryQuery.logicDescription: "Discovery query that returns information from the ingest errors index about the supplied term(s)" +doc.beans.ErrorDiscoveryQuery.maxResults: -1 +doc.beans.ErrorDiscoveryQuery.maxWork: -1 +doc.beans.ErrorDiscoveryQuery.metadataTableName: "${table.name.errors.metadata}" +doc.beans.ErrorDiscoveryQuery.modelName: "DATAWAVE" +doc.beans.ErrorDiscoveryQuery.modelTableName: "${table.name.metadata}" +doc.beans.ErrorDiscoveryQuery.reverseIndexTableName: "${table.name.errors.shardReverseIndex}" +doc.beans.ErrorDiscoveryQuery.tableName: "${table.name.errors.shardIndex}" +doc.beans.ErrorEventQuery.dateIndexTableName: "" +doc.beans.ErrorEventQuery.includeHierarchyFields: false +doc.beans.ErrorEventQuery.indexTableName: "${table.name.errors.shardIndex}" +doc.beans.ErrorEventQuery.logicDescription: "Retrieve events/documents that encountered one or more errors during ingest" +doc.beans.ErrorEventQuery.metadataTableName: "${table.name.errors.metadata}" +doc.beans.ErrorEventQuery.reverseIndexTableName: "${table.name.errors.shardReverseIndex}" +doc.beans.ErrorEventQuery.tableName: "${table.name.errors.shard}" +doc.beans.ErrorFieldIndexCountQuery.auditType: "NONE" +doc.beans.ErrorFieldIndexCountQuery.indexTableName: "${table.name.errors.shardIndex}" +doc.beans.ErrorFieldIndexCountQuery.logicDescription: "FieldIndex count query (experimental)" +doc.beans.ErrorFieldIndexCountQuery.maxResults: -1 +doc.beans.ErrorFieldIndexCountQuery.maxUniqueValues: 20000 +doc.beans.ErrorFieldIndexCountQuery.maxWork: -1 +doc.beans.ErrorFieldIndexCountQuery.metadataTableName: "${table.name.errors.metadata}" +doc.beans.ErrorFieldIndexCountQuery.modelName: "DATAWAVE" +doc.beans.ErrorFieldIndexCountQuery.modelTableName: "${table.name.metadata}" +doc.beans.ErrorFieldIndexCountQuery.queryThreads: 100 +doc.beans.ErrorFieldIndexCountQuery.reverseIndexTableName: "${table.name.errors.shardReverseIndex}" +doc.beans.ErrorFieldIndexCountQuery.tableName: "${table.name.errors.shard}" +doc.beans.EventQuery.logicDescription: "Query the sharded event/document schema, leveraging the global index tables as needed" +doc.beans.FacetedQuery.auditType: "NONE" +doc.beans.FacetedQuery.facetHashTableName: "datawave.facetHashes" +doc.beans.FacetedQuery.facetMetadataTableName: "datawave.facetMetadata" +doc.beans.FacetedQuery.facetTableName: "datawave.facets" +doc.beans.FacetedQuery.facetedSearchType: "FIELD_VALUE_FACETS" +doc.beans.FacetedQuery.fullTableScanEnabled: false +doc.beans.FacetedQuery.logicDescription: "Faceted search over indexed fields, returning aggregate counts for field values" +doc.beans.FacetedQuery.maximumFacetGrouping: 200 +doc.beans.FacetedQuery.minimumFacet: 1 +doc.beans.FacetedQuery.querySyntaxParsers.JEXL: null +doc.beans.FacetedQuery.streaming: true +doc.beans.FieldIndexCountQuery.auditType: "NONE" +doc.beans.FieldIndexCountQuery.indexTableName: "${table.name.shardIndex}" +doc.beans.FieldIndexCountQuery.logicDescription: "Indexed Fields Only: Given FIELDNAME returns counts for each unique value. Given FIELDNAME:FIELDVALUE returns count for only that value." +doc.beans.FieldIndexCountQuery.maxResults: -1 +doc.beans.FieldIndexCountQuery.maxUniqueValues: 20000 +doc.beans.FieldIndexCountQuery.maxWork: -1 +doc.beans.FieldIndexCountQuery.metadataTableName: "${table.name.metadata}" +doc.beans.FieldIndexCountQuery.modelName: "DATAWAVE" +doc.beans.FieldIndexCountQuery.modelTableName: "${table.name.metadata}" +doc.beans.FieldIndexCountQuery.queryThreads: 100 +doc.beans.FieldIndexCountQuery.reverseIndexTableName: "${table.name.shardReverseIndex}" +doc.beans.FieldIndexCountQuery.tableName: "${table.name.shard}" +doc.beans.HitHighlights.auditType: "NONE" +doc.beans.HitHighlights.eventPerDayThreshold: 40000 +doc.beans.HitHighlights.finalMaxTermThreshold: 2000 +doc.beans.HitHighlights.fullTableScanEnabled: false +doc.beans.HitHighlights.hdfsSiteConfigURLs: "file:///etc/hadoop/conf/core-site.xml,file:///etc/hadoop/conf/hdfs-site.xml" +doc.beans.HitHighlights.includeDataTypeAsField: false +doc.beans.HitHighlights.includeGroupingContext: false +doc.beans.HitHighlights.indexTableName: "${table.name.shardIndex}" +doc.beans.HitHighlights.initialMaxTermThreshold: 2000 +doc.beans.HitHighlights.ivaratorCacheBufferSize: 10000 +doc.beans.HitHighlights.ivaratorCacheScanPersistThreshold: 100000 +doc.beans.HitHighlights.ivaratorCacheScanTimeoutMinutes: 60 +doc.beans.HitHighlights.ivaratorFstHdfsBaseURIs: "hdfs:///IvaratorCache" +doc.beans.HitHighlights.ivaratorMaxOpenFiles: 100 +doc.beans.HitHighlights.logicDescription: "Fast boolean query over indexed fields, only returning fields queried on" +doc.beans.HitHighlights.maxDepthThreshold: 2000 +doc.beans.HitHighlights.maxEvaluationPipelines: 16 +doc.beans.HitHighlights.maxFieldIndexRangeSplit: 16 +doc.beans.HitHighlights.maxOrExpansionFstThreshold: 750 +doc.beans.HitHighlights.maxOrExpansionThreshold: 500 +doc.beans.HitHighlights.maxOrRangeIvarators: 10 +doc.beans.HitHighlights.maxOrRangeThreshold: 10 +doc.beans.HitHighlights.maxPipelineCachedResults: 16 +doc.beans.HitHighlights.maxRangesPerRangeIvarator: 5 +doc.beans.HitHighlights.maxUnfieldedExpansionThreshold: 50 +doc.beans.HitHighlights.maxValueExpansionThreshold: 50 +doc.beans.HitHighlights.metadataTableName: "${table.name.metadata}" +doc.beans.HitHighlights.minimumSelectivity: 0.2 +doc.beans.HitHighlights.querySyntaxParsers.JEXL: null +doc.beans.HitHighlights.queryThreads: 100 +doc.beans.HitHighlights.reverseIndexTableName: "${table.name.shardReverseIndex}" +doc.beans.HitHighlights.shardsPerDayThreshold: 20 +doc.beans.HitHighlights.tableName: "${table.name.shard}" +doc.beans.HitHighlights.useEnrichers: false +doc.beans.HitHighlights.zookeeperConfig: "" +doc.beans.IdTranslatorConfiguration.beginDate: 20100101 +doc.beans.IdTranslatorConfiguration.columnVisibility: "" +doc.beans.IndexStatsQuery.auditType: "NONE" +doc.beans.IndexStatsQuery.selectorExtractor: null +doc.beans.LookupUUIDConfiguration.beginDate: 20100101 +doc.beans.LookupUUIDConfiguration.columnVisibility: "" +doc.beans.LuceneUUIDEventQuery.auditType: "NONE" +doc.beans.LuceneUUIDEventQuery.logicDescription: "Composite query logic that retrieves records from the event and error tables, based on known UUID fields, ie, those configured via UUIDTypeList in QueryLogicFactory.xml" +doc.beans.LuceneUUIDEventQuery.queryLogics.ErrorEventQuery.auditType: "NONE" +doc.beans.LuceneUUIDEventQuery.queryLogics.ErrorEventQuery.connPoolName: "UUID" +doc.beans.LuceneUUIDEventQuery.queryLogics.ErrorEventQuery.dateIndexTableName: "" +doc.beans.LuceneUUIDEventQuery.queryLogics.ErrorEventQuery.indexTableName: "${table.name.errors.shardIndex}" +doc.beans.LuceneUUIDEventQuery.queryLogics.ErrorEventQuery.logicDescription: "Lucene query for event/document UUIDs for events that encountered errors at ingest time" +doc.beans.LuceneUUIDEventQuery.queryLogics.ErrorEventQuery.mandatoryQuerySyntax: "LUCENE-UUID" +doc.beans.LuceneUUIDEventQuery.queryLogics.ErrorEventQuery.metadataTableName: "${table.name.errors.metadata}" +doc.beans.LuceneUUIDEventQuery.queryLogics.ErrorEventQuery.reverseIndexTableName: "${table.name.errors.shardReverseIndex}" +doc.beans.LuceneUUIDEventQuery.queryLogics.ErrorEventQuery.tableName: "${table.name.errors.shard}" +doc.beans.LuceneUUIDEventQuery.queryLogics.EventQuery.auditType: "NONE" +doc.beans.LuceneUUIDEventQuery.queryLogics.EventQuery.connPoolName: "UUID" +doc.beans.LuceneUUIDEventQuery.queryLogics.EventQuery.logicDescription: "Lucene query for event/document UUIDs" +doc.beans.LuceneUUIDEventQuery.queryLogics.EventQuery.mandatoryQuerySyntax: "LUCENE-UUID" +doc.beans.LuceneUUIDEventQuery.selectorExtractor: null +doc.beans.MyRemoteUserOps.queryServiceHost: "localhost" +doc.beans.MyRemoteUserOps.queryServicePort: 8443 +doc.beans.MyRemoteUserOps.queryServiceScheme: "https" +doc.beans.MyRemoteUserOps.queryServiceURI: "/DataWave/Security/User/" +doc.beans.QueryLogicFactoryConfiguration.maxPageSize: 10000 +doc.beans.QueryLogicFactoryConfiguration.pageByteTrigger: 0 +doc.beans.QueryMetricsQuery.auditType: "NONE" +doc.beans.QueryMetricsQuery.dateIndexTableName: "${table.name.queryMetrics.dateIndex}" +doc.beans.QueryMetricsQuery.includeHierarchyFields: false +doc.beans.QueryMetricsQuery.indexTableName: "${table.name.queryMetrics.shardIndex}" +doc.beans.QueryMetricsQuery.logicDescription: "Retrieve query metrics based on the given search term(s)" +doc.beans.QueryMetricsQuery.metadataTableName: "${table.name.queryMetrics.metadata}" +doc.beans.QueryMetricsQuery.modelName: "NONE" +doc.beans.QueryMetricsQuery.modelTableName: "${table.name.queryMetrics.metadata}" +doc.beans.QueryMetricsQuery.reverseIndexTableName: "${table.name.queryMetrics.shardReverseIndex}" +doc.beans.QueryMetricsQuery.tableName: "${table.name.queryMetrics.shard}" +doc.beans.RemoteEventQuery.auditType: "NONE" +doc.beans.RemoteEventQuery.checkpointable: false +doc.beans.RemoteEventQuery.logicDescription: "Retrieve sharded events/documents, leveraging the global index tables as needed" +doc.beans.RemoteEventQuery.maxResults: -1 +doc.beans.RemoteEventQuery.remoteQueryLogic: "EventQuery" +doc.beans.RemoteEventQuery.tableName: "${table.name.shard}" +doc.beans.RemoteQueryService.queryServiceHost: "query" +doc.beans.RemoteQueryService.queryServicePort: 8443 +doc.beans.RemoteQueryService.queryServiceScheme: "https" +doc.beans.RemoteQueryService.queryServiceURI: "/query/v1/" +doc.beans.TLDEventQuery.collapseUids: false +doc.beans.TLDEventQuery.indexFilteringClassNames: "datawave.query.function.NormalizedVersionPredicate" +doc.beans.TermFrequencyQuery.auditType: "NONE" +doc.beans.TermFrequencyQuery.logicDescription: "Query that returns data from the term frequency query table" +doc.beans.TermFrequencyQuery.maxResults: -1 +doc.beans.TermFrequencyQuery.maxWork: -1 +doc.beans.TermFrequencyQuery.tableName: "${table.name.shard}" +doc.beans.TimedVisitorManager.debugEnabled: false +doc.beans.TimedVisitorManager.validateAst: false +doc.beans.TokenizedLuceneToJexlQueryParser.tokenizeUnfieldedQueries: true +doc.beans.skipTokenizeFields: "DOMETA" +doc.beans.tokenizeFields: "CONTENT" \ No newline at end of file diff --git a/microservices/configcheck/src/test/resources/rendered/webservice/fullReport.txt b/microservices/configcheck/src/test/resources/rendered/webservice/fullReport.txt new file mode 100644 index 00000000000..25cf48e7bae --- /dev/null +++ b/microservices/configcheck/src/test/resources/rendered/webservice/fullReport.txt @@ -0,0 +1,674 @@ +Placeholders (key: ${placeholder}) +---------------------------------------- +doc.beans.BaseEventQuery.accumuloPassword: ${accumulo.user.password} +doc.beans.BaseEventQuery.baseIteratorPriority: ${beq.baseIteratorPriority} +doc.beans.BaseEventQuery.collapseUids: ${query.collapse.uids} +doc.beans.BaseEventQuery.collapseUidsThreshold: ${query.collapse.uids.threshold} +doc.beans.BaseEventQuery.dateIndexTableName: ${date.index.table.name} +doc.beans.BaseEventQuery.dateIndexThreads: ${date.index.threads} +doc.beans.BaseEventQuery.defaultDateTypeName: ${default.date.type.name} +doc.beans.BaseEventQuery.disableIndexOnlyDocuments: ${disable.index.only.documents} +doc.beans.BaseEventQuery.evaluationOnlyFields: ${evaluation.only.fields} +doc.beans.BaseEventQuery.finalMaxTermThreshold: ${beq.finalMaxTermThreshold} +doc.beans.BaseEventQuery.fullTableScanEnabled: ${beq.fullTableScanEnabled} +doc.beans.BaseEventQuery.hdfsSiteConfigURLs: ${hdfs.site.config.urls} +doc.beans.BaseEventQuery.includeHierarchyFields: ${include.hierarchy.fields} +doc.beans.BaseEventQuery.indexLookupThreads: ${index.query.threads} +doc.beans.BaseEventQuery.indexOnlyFilterFunctionsEnabled: ${enable.index.only.filter.functions} +doc.beans.BaseEventQuery.indexTableName: ${index.table.name} +doc.beans.BaseEventQuery.initialMaxTermThreshold: ${beq.initialMaxTermThreshold} +doc.beans.BaseEventQuery.ivaratorCacheScanTimeoutMinutes: ${query.max.call.time.minutes} +doc.beans.BaseEventQuery.ivaratorFstHdfsBaseURIs: ${ivarator.fst.hdfs.base.uris} +doc.beans.BaseEventQuery.ivaratorMaxOpenFiles: ${beq.maxIvaratorOpenFiles} +doc.beans.BaseEventQuery.maxDepthThreshold: ${beq.maxDepthThreshold} +doc.beans.BaseEventQuery.maxEvaluationPipelines: ${beq.evaluationPipelines} +doc.beans.BaseEventQuery.maxFieldIndexRangeSplit: ${beq.fieldIndexRangeSplit} +doc.beans.BaseEventQuery.maxIndexScanTimeMillis: ${query.max.index.scan.ms} +doc.beans.BaseEventQuery.maxIvaratorSources: ${beq.maxIvaratorSources} +doc.beans.BaseEventQuery.maxOrExpansionFstThreshold: ${beq.orExpansionFstThreshold} +doc.beans.BaseEventQuery.maxOrExpansionThreshold: ${beq.orExpansionThreshold} +doc.beans.BaseEventQuery.maxOrRangeThreshold: ${beq.orRangeThreshold} +doc.beans.BaseEventQuery.maxPipelineCachedResults: ${beq.pipelineCachedResults} +doc.beans.BaseEventQuery.maxResults: ${event.query.max.results} +doc.beans.BaseEventQuery.maxUnfieldedExpansionThreshold: ${beq.unfieldedExpansionThreshold} +doc.beans.BaseEventQuery.maxValueExpansionThreshold: ${beq.valueExpansionThreshold} +doc.beans.BaseEventQuery.metadataTableName: ${metadata.table.name} +doc.beans.BaseEventQuery.modelTableName: ${metadata.table.name} +doc.beans.BaseEventQuery.queryThreads: ${shard.query.threads} +doc.beans.BaseEventQuery.reverseIndexTableName: ${rindex.table.name} +doc.beans.BaseEventQuery.shardsPerDayThreshold: ${beq.shardsPerDayThreshold} +doc.beans.BaseEventQuery.tableName: ${shard.table.name} +doc.beans.BaseEventQuery.useFilters: ${event.query.filters.enabled} +doc.beans.BaseEventQuery.zookeeperConfig: ${ivarator.zookeeper.hosts} +doc.beans.BaseModelEventQuery.modelTableName: ${metadata.table.name} +doc.beans.ContentQuery.tableName: ${shard.table.name} +doc.beans.DiscoveryQuery.indexTableName: ${index.table.name} +doc.beans.DiscoveryQuery.metadataTableName: ${metadata.table.name} +doc.beans.DiscoveryQuery.modelTableName: ${metadata.table.name} +doc.beans.DiscoveryQuery.reverseIndexTableName: ${rindex.table.name} +doc.beans.DiscoveryQuery.tableName: ${index.table.name} +doc.beans.EdgeEventQuery.modelTableName: ${metadata.table.name} +doc.beans.ErrorCountQuery.indexTableName: ${error.index.table.name} +doc.beans.ErrorCountQuery.metadataTableName: ${error.metadata.table.name} +doc.beans.ErrorCountQuery.reverseIndexTableName: ${error.rindex.table.name} +doc.beans.ErrorCountQuery.tableName: ${error.shard.table.name} +doc.beans.ErrorDiscoveryQuery.indexTableName: ${error.index.table.name} +doc.beans.ErrorDiscoveryQuery.metadataTableName: ${error.metadata.table.name} +doc.beans.ErrorDiscoveryQuery.modelTableName: ${metadata.table.name} +doc.beans.ErrorDiscoveryQuery.reverseIndexTableName: ${error.rindex.table.name} +doc.beans.ErrorDiscoveryQuery.tableName: ${error.index.table.name} +doc.beans.ErrorEventQuery.indexTableName: ${error.index.table.name} +doc.beans.ErrorEventQuery.metadataTableName: ${error.metadata.table.name} +doc.beans.ErrorEventQuery.reverseIndexTableName: ${error.rindex.table.name} +doc.beans.ErrorEventQuery.tableName: ${error.shard.table.name} +doc.beans.ErrorFieldIndexCountQuery.indexTableName: ${error.index.table.name} +doc.beans.ErrorFieldIndexCountQuery.metadataTableName: ${error.metadata.table.name} +doc.beans.ErrorFieldIndexCountQuery.modelTableName: ${metadata.table.name} +doc.beans.ErrorFieldIndexCountQuery.queryThreads: ${shard.query.threads} +doc.beans.ErrorFieldIndexCountQuery.reverseIndexTableName: ${error.rindex.table.name} +doc.beans.ErrorFieldIndexCountQuery.tableName: ${error.shard.table.name} +doc.beans.FacetedQuery.facetHashTableName: ${table.name.facet.hashes} +doc.beans.FacetedQuery.facetMetadataTableName: ${table.name.facet.metadata} +doc.beans.FacetedQuery.facetTableName: ${table.name.facet} +doc.beans.FieldIndexCountQuery.indexTableName: ${index.table.name} +doc.beans.FieldIndexCountQuery.metadataTableName: ${metadata.table.name} +doc.beans.FieldIndexCountQuery.modelTableName: ${metadata.table.name} +doc.beans.FieldIndexCountQuery.queryThreads: ${shard.query.threads} +doc.beans.FieldIndexCountQuery.reverseIndexTableName: ${rindex.table.name} +doc.beans.FieldIndexCountQuery.tableName: ${shard.table.name} +doc.beans.HitHighlights.finalMaxTermThreshold: ${beq.finalMaxTermThreshold} +doc.beans.HitHighlights.hdfsSiteConfigURLs: ${hdfs.site.config.urls} +doc.beans.HitHighlights.indexTableName: ${index.table.name} +doc.beans.HitHighlights.initialMaxTermThreshold: ${beq.initialMaxTermThreshold} +doc.beans.HitHighlights.ivaratorCacheScanTimeoutMinutes: ${query.max.call.time.minutes} +doc.beans.HitHighlights.ivaratorFstHdfsBaseURIs: ${ivarator.fst.hdfs.base.uris} +doc.beans.HitHighlights.ivaratorMaxOpenFiles: ${beq.maxIvaratorOpenFiles} +doc.beans.HitHighlights.maxDepthThreshold: ${beq.maxDepthThreshold} +doc.beans.HitHighlights.maxEvaluationPipelines: ${beq.evaluationPipelines} +doc.beans.HitHighlights.maxFieldIndexRangeSplit: ${beq.fieldIndexRangeSplit} +doc.beans.HitHighlights.maxOrExpansionFstThreshold: ${beq.orExpansionFstThreshold} +doc.beans.HitHighlights.maxOrExpansionThreshold: ${beq.orExpansionThreshold} +doc.beans.HitHighlights.maxOrRangeIvarators: ${beq.maxOrRangeIvarators} +doc.beans.HitHighlights.maxOrRangeThreshold: ${beq.orRangeThreshold} +doc.beans.HitHighlights.maxPipelineCachedResults: ${beq.pipelineCachedResults} +doc.beans.HitHighlights.maxRangesPerRangeIvarator: ${beq.maxRangesPerRangeIvarator} +doc.beans.HitHighlights.maxUnfieldedExpansionThreshold: ${beq.unfieldedExpansionThreshold} +doc.beans.HitHighlights.maxValueExpansionThreshold: ${beq.valueExpansionThreshold} +doc.beans.HitHighlights.metadataTableName: ${metadata.table.name} +doc.beans.HitHighlights.queryThreads: ${index.query.threads} +doc.beans.HitHighlights.reverseIndexTableName: ${rindex.table.name} +doc.beans.HitHighlights.shardsPerDayThreshold: ${beq.shardsPerDayThreshold} +doc.beans.HitHighlights.tableName: ${shard.table.name} +doc.beans.HitHighlights.zookeeperConfig: ${ivarator.zookeeper.hosts} +doc.beans.IdTranslatorConfiguration.beginDate: ${lookup.uuid.beginDate} +doc.beans.LookupUUIDConfiguration.beginDate: ${lookup.uuid.beginDate} +doc.beans.LuceneUUIDEventQuery.queryLogics.ErrorEventQuery.indexTableName: ${error.index.table.name} +doc.beans.LuceneUUIDEventQuery.queryLogics.ErrorEventQuery.metadataTableName: ${error.metadata.table.name} +doc.beans.LuceneUUIDEventQuery.queryLogics.ErrorEventQuery.reverseIndexTableName: ${error.rindex.table.name} +doc.beans.LuceneUUIDEventQuery.queryLogics.ErrorEventQuery.tableName: ${error.shard.table.name} +doc.beans.QueryLogicFactoryConfiguration.maxPageSize: ${query.max.page.size} +doc.beans.QueryLogicFactoryConfiguration.pageByteTrigger: ${query.page.byte.trigger} +doc.beans.QueryMetricsQuery.dateIndexTableName: ${querymetrics.dateindex.table.name} +doc.beans.QueryMetricsQuery.indexTableName: ${querymetrics.index.table.name} +doc.beans.QueryMetricsQuery.metadataTableName: ${querymetrics.metadata.table.name} +doc.beans.QueryMetricsQuery.modelTableName: ${querymetrics.metadata.table.name} +doc.beans.QueryMetricsQuery.reverseIndexTableName: ${querymetrics.rindex.table.name} +doc.beans.QueryMetricsQuery.tableName: ${querymetrics.shard.table.name} +doc.beans.RemoteEventQuery.maxResults: ${event.query.max.results} +doc.beans.RemoteEventQuery.tableName: ${shard.table.name} +doc.beans.TermFrequencyQuery.tableName: ${shard.table.name} + +Values (key: value) +---------------------------------------- +doc.beans.AstValidator.validateFlatten: true +doc.beans.AstValidator.validateJunctions: true +doc.beans.AstValidator.validateLineage: true +doc.beans.AstValidator.validateQueryPropertyMarkers: true +doc.beans.AstValidator.validateReferenceExpressions: true +doc.beans.BaseEventQuery.accumuloPassword: "${accumulo.user.password}" +doc.beans.BaseEventQuery.auditType: "NONE" +doc.beans.BaseEventQuery.baseIteratorPriority: 100 +doc.beans.BaseEventQuery.collapseUids: false +doc.beans.BaseEventQuery.collapseUidsThreshold: -1 +doc.beans.BaseEventQuery.collectQueryMetrics: true +doc.beans.BaseEventQuery.contentFieldNames: "CONTENT" +doc.beans.BaseEventQuery.dateIndexTableName: "${table.name.dateIndex}" +doc.beans.BaseEventQuery.dateIndexThreads: 20 +doc.beans.BaseEventQuery.defaultDateTypeName: "EVENT" +doc.beans.BaseEventQuery.disableIndexOnlyDocuments: false +doc.beans.BaseEventQuery.docAggregationThresholdMs: -1 +doc.beans.BaseEventQuery.enricherClassNames: "datawave.query.enrich.DatawaveTermFrequencyEnricher" +doc.beans.BaseEventQuery.evaluationOnlyFields: "" +doc.beans.BaseEventQuery.eventPerDayThreshold: 40000 +doc.beans.BaseEventQuery.finalMaxTermThreshold: 2000 +doc.beans.BaseEventQuery.fullTableScanEnabled: false +doc.beans.BaseEventQuery.hdfsSiteConfigURLs: "file:///etc/hadoop/conf/core-site.xml,file:///etc/hadoop/conf/hdfs-site.xml" +doc.beans.BaseEventQuery.includeDataTypeAsField: false +doc.beans.BaseEventQuery.includeHierarchyFields: false +doc.beans.BaseEventQuery.indexLookupThreads: 100 +doc.beans.BaseEventQuery.indexOnlyFilterFunctionsEnabled: false +doc.beans.BaseEventQuery.indexTableName: "${table.name.shardIndex}" +doc.beans.BaseEventQuery.initialMaxTermThreshold: 2000 +doc.beans.BaseEventQuery.ivaratorCacheBufferSize: 10000 +doc.beans.BaseEventQuery.ivaratorCacheScanPersistThreshold: 100000 +doc.beans.BaseEventQuery.ivaratorCacheScanTimeoutMinutes: 60 +doc.beans.BaseEventQuery.ivaratorFstHdfsBaseURIs: "hdfs:///IvaratorCache" +doc.beans.BaseEventQuery.ivaratorMaxOpenFiles: 100 +doc.beans.BaseEventQuery.lazySetMechanismEnabled: false +doc.beans.BaseEventQuery.logTimingDetails: false +doc.beans.BaseEventQuery.logicDescription: "Retrieve sharded events/documents, leveraging the global index tables as needed" +doc.beans.BaseEventQuery.maxDepthThreshold: 2000 +doc.beans.BaseEventQuery.maxEvaluationPipelines: 16 +doc.beans.BaseEventQuery.maxFieldIndexRangeSplit: 16 +doc.beans.BaseEventQuery.maxIndexScanTimeMillis: 3.1536E10 +doc.beans.BaseEventQuery.maxIvaratorSources: 20 +doc.beans.BaseEventQuery.maxOrExpansionFstThreshold: 750 +doc.beans.BaseEventQuery.maxOrExpansionThreshold: 500 +doc.beans.BaseEventQuery.maxOrRangeThreshold: 10 +doc.beans.BaseEventQuery.maxPipelineCachedResults: 16 +doc.beans.BaseEventQuery.maxResults: -1 +doc.beans.BaseEventQuery.maxUnfieldedExpansionThreshold: 50 +doc.beans.BaseEventQuery.maxValueExpansionThreshold: 50 +doc.beans.BaseEventQuery.metadataTableName: "${table.name.metadata}" +doc.beans.BaseEventQuery.minimumSelectivity: 0.2 +doc.beans.BaseEventQuery.modelName: "DATAWAVE" +doc.beans.BaseEventQuery.modelTableName: "${table.name.metadata}" +doc.beans.BaseEventQuery.querySyntaxParsers.JEXL: null +doc.beans.BaseEventQuery.queryThreads: 100 +doc.beans.BaseEventQuery.realmSuffixExclusionPatterns: "<.*>$" +doc.beans.BaseEventQuery.reverseIndexTableName: "${table.name.shardReverseIndex}" +doc.beans.BaseEventQuery.sendTimingToStatsd: false +doc.beans.BaseEventQuery.shardsPerDayThreshold: 20 +doc.beans.BaseEventQuery.statsdHost: "localhost" +doc.beans.BaseEventQuery.statsdPort: 8125 +doc.beans.BaseEventQuery.tableName: "${table.name.shard}" +doc.beans.BaseEventQuery.tfAggregationThresholdMs: -1 +doc.beans.BaseEventQuery.useEnrichers: true +doc.beans.BaseEventQuery.useFilters: false +doc.beans.BaseEventQuery.zookeeperConfig: "" +doc.beans.BaseModelEventQuery.modelName: "DATAWAVE" +doc.beans.BaseModelEventQuery.modelTableName: "${table.name.metadata}" +doc.beans.ContentQuery.auditType: "NONE" +doc.beans.ContentQuery.logicDescription: "Query that returns a document given the document identifier" +doc.beans.ContentQuery.maxResults: -1 +doc.beans.ContentQuery.maxWork: -1 +doc.beans.ContentQuery.tableName: "${table.name.shard}" +doc.beans.CountQuery.logicDescription: "Retrieve event/document counts based on your search criteria" +doc.beans.DefaultQueryPlanner.compressOptionMappings: true +doc.beans.DefaultQueryPlanner[0]: 2611 +doc.beans.DefaultQueryPlanner[1]: true +doc.beans.DiscoveryQuery.allowLeadingWildcard: true +doc.beans.DiscoveryQuery.auditType: "NONE" +doc.beans.DiscoveryQuery.fullTableScanEnabled: false +doc.beans.DiscoveryQuery.indexTableName: "${table.name.shardIndex}" +doc.beans.DiscoveryQuery.logicDescription: "Discovery query that returns information from the index about the supplied term(s)" +doc.beans.DiscoveryQuery.maxResults: -1 +doc.beans.DiscoveryQuery.maxWork: -1 +doc.beans.DiscoveryQuery.metadataTableName: "${table.name.metadata}" +doc.beans.DiscoveryQuery.modelName: "DATAWAVE" +doc.beans.DiscoveryQuery.modelTableName: "${table.name.metadata}" +doc.beans.DiscoveryQuery.reverseIndexTableName: "${table.name.shardReverseIndex}" +doc.beans.DiscoveryQuery.tableName: "${table.name.shardIndex}" +doc.beans.EdgeEventQuery.edgeModelName: "DATAWAVE_EDGE" +doc.beans.EdgeEventQuery.logicDescription: "Use results of an EdgeQuery to obtain events/documents that created the given edge" +doc.beans.EdgeEventQuery.modelTableName: "${table.name.metadata}" +doc.beans.ErrorCountQuery.indexTableName: "${table.name.errors.shardIndex}" +doc.beans.ErrorCountQuery.logicDescription: "Retrieve counts of errored events based on your search criteria" +doc.beans.ErrorCountQuery.metadataTableName: "${table.name.errors.metadata}" +doc.beans.ErrorCountQuery.reverseIndexTableName: "${table.name.errors.shardReverseIndex}" +doc.beans.ErrorCountQuery.tableName: "${table.name.errors.shard}" +doc.beans.ErrorDiscoveryQuery.allowLeadingWildcard: true +doc.beans.ErrorDiscoveryQuery.auditType: "NONE" +doc.beans.ErrorDiscoveryQuery.fullTableScanEnabled: false +doc.beans.ErrorDiscoveryQuery.indexTableName: "${table.name.errors.shardIndex}" +doc.beans.ErrorDiscoveryQuery.logicDescription: "Discovery query that returns information from the ingest errors index about the supplied term(s)" +doc.beans.ErrorDiscoveryQuery.maxResults: -1 +doc.beans.ErrorDiscoveryQuery.maxWork: -1 +doc.beans.ErrorDiscoveryQuery.metadataTableName: "${table.name.errors.metadata}" +doc.beans.ErrorDiscoveryQuery.modelName: "DATAWAVE" +doc.beans.ErrorDiscoveryQuery.modelTableName: "${table.name.metadata}" +doc.beans.ErrorDiscoveryQuery.reverseIndexTableName: "${table.name.errors.shardReverseIndex}" +doc.beans.ErrorDiscoveryQuery.tableName: "${table.name.errors.shardIndex}" +doc.beans.ErrorEventQuery.dateIndexTableName: "" +doc.beans.ErrorEventQuery.includeHierarchyFields: false +doc.beans.ErrorEventQuery.indexTableName: "${table.name.errors.shardIndex}" +doc.beans.ErrorEventQuery.logicDescription: "Retrieve events/documents that encountered one or more errors during ingest" +doc.beans.ErrorEventQuery.metadataTableName: "${table.name.errors.metadata}" +doc.beans.ErrorEventQuery.reverseIndexTableName: "${table.name.errors.shardReverseIndex}" +doc.beans.ErrorEventQuery.tableName: "${table.name.errors.shard}" +doc.beans.ErrorFieldIndexCountQuery.auditType: "NONE" +doc.beans.ErrorFieldIndexCountQuery.indexTableName: "${table.name.errors.shardIndex}" +doc.beans.ErrorFieldIndexCountQuery.logicDescription: "FieldIndex count query (experimental)" +doc.beans.ErrorFieldIndexCountQuery.maxResults: -1 +doc.beans.ErrorFieldIndexCountQuery.maxUniqueValues: 20000 +doc.beans.ErrorFieldIndexCountQuery.maxWork: -1 +doc.beans.ErrorFieldIndexCountQuery.metadataTableName: "${table.name.errors.metadata}" +doc.beans.ErrorFieldIndexCountQuery.modelName: "DATAWAVE" +doc.beans.ErrorFieldIndexCountQuery.modelTableName: "${table.name.metadata}" +doc.beans.ErrorFieldIndexCountQuery.queryThreads: 100 +doc.beans.ErrorFieldIndexCountQuery.reverseIndexTableName: "${table.name.errors.shardReverseIndex}" +doc.beans.ErrorFieldIndexCountQuery.tableName: "${table.name.errors.shard}" +doc.beans.EventQuery.logicDescription: "Query the sharded event/document schema, leveraging the global index tables as needed" +doc.beans.FacetedQuery.auditType: "NONE" +doc.beans.FacetedQuery.facetHashTableName: "datawave.facetHashes" +doc.beans.FacetedQuery.facetMetadataTableName: "datawave.facetMetadata" +doc.beans.FacetedQuery.facetTableName: "datawave.facets" +doc.beans.FacetedQuery.facetedSearchType: "FIELD_VALUE_FACETS" +doc.beans.FacetedQuery.fullTableScanEnabled: false +doc.beans.FacetedQuery.logicDescription: "Faceted search over indexed fields, returning aggregate counts for field values" +doc.beans.FacetedQuery.maximumFacetGrouping: 200 +doc.beans.FacetedQuery.minimumFacet: 1 +doc.beans.FacetedQuery.querySyntaxParsers.JEXL: null +doc.beans.FacetedQuery.streaming: true +doc.beans.FieldIndexCountQuery.auditType: "NONE" +doc.beans.FieldIndexCountQuery.indexTableName: "${table.name.shardIndex}" +doc.beans.FieldIndexCountQuery.logicDescription: "Indexed Fields Only: Given FIELDNAME returns counts for each unique value. Given FIELDNAME:FIELDVALUE returns count for only that value." +doc.beans.FieldIndexCountQuery.maxResults: -1 +doc.beans.FieldIndexCountQuery.maxUniqueValues: 20000 +doc.beans.FieldIndexCountQuery.maxWork: -1 +doc.beans.FieldIndexCountQuery.metadataTableName: "${table.name.metadata}" +doc.beans.FieldIndexCountQuery.modelName: "DATAWAVE" +doc.beans.FieldIndexCountQuery.modelTableName: "${table.name.metadata}" +doc.beans.FieldIndexCountQuery.queryThreads: 100 +doc.beans.FieldIndexCountQuery.reverseIndexTableName: "${table.name.shardReverseIndex}" +doc.beans.FieldIndexCountQuery.tableName: "${table.name.shard}" +doc.beans.HitHighlights.auditType: "NONE" +doc.beans.HitHighlights.eventPerDayThreshold: 40000 +doc.beans.HitHighlights.finalMaxTermThreshold: 2000 +doc.beans.HitHighlights.fullTableScanEnabled: false +doc.beans.HitHighlights.hdfsSiteConfigURLs: "file:///etc/hadoop/conf/core-site.xml,file:///etc/hadoop/conf/hdfs-site.xml" +doc.beans.HitHighlights.includeDataTypeAsField: false +doc.beans.HitHighlights.includeGroupingContext: false +doc.beans.HitHighlights.indexTableName: "${table.name.shardIndex}" +doc.beans.HitHighlights.initialMaxTermThreshold: 2000 +doc.beans.HitHighlights.ivaratorCacheBufferSize: 10000 +doc.beans.HitHighlights.ivaratorCacheScanPersistThreshold: 100000 +doc.beans.HitHighlights.ivaratorCacheScanTimeoutMinutes: 60 +doc.beans.HitHighlights.ivaratorFstHdfsBaseURIs: "hdfs:///IvaratorCache" +doc.beans.HitHighlights.ivaratorMaxOpenFiles: 100 +doc.beans.HitHighlights.logicDescription: "Fast boolean query over indexed fields, only returning fields queried on" +doc.beans.HitHighlights.maxDepthThreshold: 2000 +doc.beans.HitHighlights.maxEvaluationPipelines: 16 +doc.beans.HitHighlights.maxFieldIndexRangeSplit: 16 +doc.beans.HitHighlights.maxOrExpansionFstThreshold: 750 +doc.beans.HitHighlights.maxOrExpansionThreshold: 500 +doc.beans.HitHighlights.maxOrRangeIvarators: 10 +doc.beans.HitHighlights.maxOrRangeThreshold: 10 +doc.beans.HitHighlights.maxPipelineCachedResults: 16 +doc.beans.HitHighlights.maxRangesPerRangeIvarator: 5 +doc.beans.HitHighlights.maxUnfieldedExpansionThreshold: 50 +doc.beans.HitHighlights.maxValueExpansionThreshold: 50 +doc.beans.HitHighlights.metadataTableName: "${table.name.metadata}" +doc.beans.HitHighlights.minimumSelectivity: 0.2 +doc.beans.HitHighlights.querySyntaxParsers.JEXL: null +doc.beans.HitHighlights.queryThreads: 100 +doc.beans.HitHighlights.reverseIndexTableName: "${table.name.shardReverseIndex}" +doc.beans.HitHighlights.shardsPerDayThreshold: 20 +doc.beans.HitHighlights.tableName: "${table.name.shard}" +doc.beans.HitHighlights.useEnrichers: false +doc.beans.HitHighlights.zookeeperConfig: "" +doc.beans.IdTranslatorConfiguration.beginDate: 20100101 +doc.beans.IdTranslatorConfiguration.columnVisibility: "" +doc.beans.IndexStatsQuery.auditType: "NONE" +doc.beans.IndexStatsQuery.selectorExtractor: null +doc.beans.LookupUUIDConfiguration.beginDate: 20100101 +doc.beans.LookupUUIDConfiguration.columnVisibility: "" +doc.beans.LuceneUUIDEventQuery.auditType: "NONE" +doc.beans.LuceneUUIDEventQuery.logicDescription: "Composite query logic that retrieves records from the event and error tables, based on known UUID fields, ie, those configured via UUIDTypeList in QueryLogicFactory.xml" +doc.beans.LuceneUUIDEventQuery.queryLogics.ErrorEventQuery.auditType: "NONE" +doc.beans.LuceneUUIDEventQuery.queryLogics.ErrorEventQuery.connPoolName: "UUID" +doc.beans.LuceneUUIDEventQuery.queryLogics.ErrorEventQuery.dateIndexTableName: "" +doc.beans.LuceneUUIDEventQuery.queryLogics.ErrorEventQuery.indexTableName: "${table.name.errors.shardIndex}" +doc.beans.LuceneUUIDEventQuery.queryLogics.ErrorEventQuery.logicDescription: "Lucene query for event/document UUIDs for events that encountered errors at ingest time" +doc.beans.LuceneUUIDEventQuery.queryLogics.ErrorEventQuery.mandatoryQuerySyntax: "LUCENE-UUID" +doc.beans.LuceneUUIDEventQuery.queryLogics.ErrorEventQuery.metadataTableName: "${table.name.errors.metadata}" +doc.beans.LuceneUUIDEventQuery.queryLogics.ErrorEventQuery.reverseIndexTableName: "${table.name.errors.shardReverseIndex}" +doc.beans.LuceneUUIDEventQuery.queryLogics.ErrorEventQuery.tableName: "${table.name.errors.shard}" +doc.beans.LuceneUUIDEventQuery.queryLogics.EventQuery.auditType: "NONE" +doc.beans.LuceneUUIDEventQuery.queryLogics.EventQuery.connPoolName: "UUID" +doc.beans.LuceneUUIDEventQuery.queryLogics.EventQuery.logicDescription: "Lucene query for event/document UUIDs" +doc.beans.LuceneUUIDEventQuery.queryLogics.EventQuery.mandatoryQuerySyntax: "LUCENE-UUID" +doc.beans.LuceneUUIDEventQuery.selectorExtractor: null +doc.beans.MyRemoteUserOps.queryServiceHost: "localhost" +doc.beans.MyRemoteUserOps.queryServicePort: 8443 +doc.beans.MyRemoteUserOps.queryServiceScheme: "https" +doc.beans.MyRemoteUserOps.queryServiceURI: "/DataWave/Security/User/" +doc.beans.QueryLogicFactoryConfiguration.maxPageSize: 10000 +doc.beans.QueryLogicFactoryConfiguration.pageByteTrigger: 0 +doc.beans.QueryMetricsQuery.auditType: "NONE" +doc.beans.QueryMetricsQuery.dateIndexTableName: "${table.name.queryMetrics.dateIndex}" +doc.beans.QueryMetricsQuery.includeHierarchyFields: false +doc.beans.QueryMetricsQuery.indexTableName: "${table.name.queryMetrics.shardIndex}" +doc.beans.QueryMetricsQuery.logicDescription: "Retrieve query metrics based on the given search term(s)" +doc.beans.QueryMetricsQuery.metadataTableName: "${table.name.queryMetrics.metadata}" +doc.beans.QueryMetricsQuery.modelName: "NONE" +doc.beans.QueryMetricsQuery.modelTableName: "${table.name.queryMetrics.metadata}" +doc.beans.QueryMetricsQuery.reverseIndexTableName: "${table.name.queryMetrics.shardReverseIndex}" +doc.beans.QueryMetricsQuery.tableName: "${table.name.queryMetrics.shard}" +doc.beans.RemoteEventQuery.auditType: "NONE" +doc.beans.RemoteEventQuery.checkpointable: false +doc.beans.RemoteEventQuery.logicDescription: "Retrieve sharded events/documents, leveraging the global index tables as needed" +doc.beans.RemoteEventQuery.maxResults: -1 +doc.beans.RemoteEventQuery.remoteQueryLogic: "EventQuery" +doc.beans.RemoteEventQuery.tableName: "${table.name.shard}" +doc.beans.RemoteQueryService.queryServiceHost: "query" +doc.beans.RemoteQueryService.queryServicePort: 8443 +doc.beans.RemoteQueryService.queryServiceScheme: "https" +doc.beans.RemoteQueryService.queryServiceURI: "/query/v1/" +doc.beans.TLDEventQuery.collapseUids: false +doc.beans.TLDEventQuery.indexFilteringClassNames: "datawave.query.function.NormalizedVersionPredicate" +doc.beans.TermFrequencyQuery.auditType: "NONE" +doc.beans.TermFrequencyQuery.logicDescription: "Query that returns data from the term frequency query table" +doc.beans.TermFrequencyQuery.maxResults: -1 +doc.beans.TermFrequencyQuery.maxWork: -1 +doc.beans.TermFrequencyQuery.tableName: "${table.name.shard}" +doc.beans.TimedVisitorManager.debugEnabled: false +doc.beans.TimedVisitorManager.validateAst: false +doc.beans.TokenizedLuceneToJexlQueryParser.tokenizeUnfieldedQueries: true +doc.beans.skipTokenizeFields: "DOMETA" +doc.beans.tokenizeFields: "CONTENT" + +Refs (key: ref) +---------------------------------------- +doc.beans.BaseEventQuery.dateIndexHelperFactory: dateIndexHelperFactory +doc.beans.BaseEventQuery.eventQueryDataDecoratorTransformer: EventQueryDataDecoratorTransformer +doc.beans.BaseEventQuery.filterOptions: BaseEventQueryFilterOptions +doc.beans.BaseEventQuery.hierarchyFieldOptions: BaseEventQueryHierarchyFieldOptions +doc.beans.BaseEventQuery.ivaratorCacheDirConfigs: IvaratorCacheDirConfigs +doc.beans.BaseEventQuery.metadataHelperFactory: metadataHelperFactory +doc.beans.BaseEventQuery.queryPlanner: DefaultQueryPlanner +doc.beans.BaseEventQuery.selectorExtractor: DatawaveSelectorExtractor +doc.beans.DatawaveSelectorExtractor.luceneToJexlQueryParser: LuceneToJexlQueryParser +doc.beans.DefaultQueryPlanner.queryModelProviderFactory: queryModelProviderFactory +doc.beans.DefaultQueryPlanner.visitorManager: TimedVisitorManager +doc.beans.DiscoveryQuery.metadataHelperFactory: metadataHelperFactory +doc.beans.DiscoveryQuery.selectorExtractor: DatawaveSelectorExtractor +doc.beans.EdgeEventQuery.edgeDictionaryProvider: edgeDictionaryProvider +doc.beans.EdgeEventQuery.edgeQueryModel: edgeEventQueryModel +doc.beans.ErrorDiscoveryQuery.metadataHelperFactory: metadataHelperFactory +doc.beans.ErrorDiscoveryQuery.selectorExtractor: DatawaveSelectorExtractor +doc.beans.ErrorFieldIndexCountQuery.metadataHelperFactory: metadataHelperFactory +doc.beans.ErrorFieldIndexCountQuery.selectorExtractor: DatawaveSelectorExtractor +doc.beans.FieldIndexCountQuery.metadataHelperFactory: metadataHelperFactory +doc.beans.FieldIndexCountQuery.selectorExtractor: DatawaveSelectorExtractor +doc.beans.HitHighlights.ivaratorCacheDirConfigs: IvaratorCacheDirConfigs +doc.beans.HitHighlights.metadataHelperFactory: metadataHelperFactory +doc.beans.HitHighlights.selectorExtractor: DatawaveSelectorExtractor +doc.beans.IdTranslatorConfiguration.uuidTypes: UUIDTypeList +doc.beans.LookupUUIDConfiguration.uuidTypes: UUIDTypeList +doc.beans.LuceneToJexlQueryParser.allowedFunctions: allowedQueryFunctions +doc.beans.LuceneToJexlQueryParser.skipTokenizeUnfieldedFields: skipTokenizeFields +doc.beans.LuceneToJexlUUIDQueryParser.allowedFunctions: allowedQueryFunctions +doc.beans.LuceneToJexlUUIDQueryParser.uuidTypes: UUIDTypeList +doc.beans.MyRemoteUserOps.responseObjectFactory: responseObjectFactory +doc.beans.QueryMetricsQuery.metadataHelperFactory: metadataHelperFactory +doc.beans.RemoteEventQuery.remoteQueryService: RemoteQueryService +doc.beans.RemoteEventQuery.selectorExtractor: DatawaveSelectorExtractor +doc.beans.RemoteEventQuery.userOperations: MyRemoteUserOps +doc.beans.RemoteQueryService.responseObjectFactory: responseObjectFactory +doc.beans.TimedVisitorManager.validator: AstValidator +doc.beans.TokenizedLuceneToJexlQueryParser.allowedFunctions: allowedQueryFunctions +doc.beans.TokenizedLuceneToJexlQueryParser.skipTokenizeUnfieldedFields: skipTokenizeFields +doc.beans.TokenizedLuceneToJexlQueryParser.tokenizedFields: tokenizeFields +doc.beans.allowedQueryFunctions.parser: LuceneToJexlQueryParser +doc.beans.baseQueryLogic.markingFunctions: markingFunctions +doc.beans.baseQueryLogic.responseObjectFactory: responseObjectFactory +doc.beans.queryLogicFactory.queryLogicFactoryConfiguration: QueryLogicFactoryConfiguration + +Effective Properties (name=value) +---------------------------------------- +shard.table.name=${table.name.shard} +event.query.max.results=-1 +accumulo.user.password=${accumulo.user.password} +shard.table.name=${table.name.shard} +date.index.table.name=${table.name.dateIndex} +default.date.type.name=EVENT +metadata.table.name=${table.name.metadata} +index.table.name=${table.name.shardIndex} +rindex.table.name=${table.name.shardReverseIndex} +event.query.max.results=-1 +shard.query.threads=100 +index.query.threads=100 +date.index.threads=20 +beq.fullTableScanEnabled=false +disable.index.only.documents=false +enable.index.only.filter.functions=false +include.hierarchy.fields=false +beq.baseIteratorPriority=100 +query.max.index.scan.ms=31536000000 +query.collapse.uids=false +query.collapse.uids.threshold=-1 +event.query.filters.enabled=false +beq.shardsPerDayThreshold=20 +beq.initialMaxTermThreshold=2000 +beq.finalMaxTermThreshold=2000 +beq.maxDepthThreshold=2000 +beq.unfieldedExpansionThreshold=50 +beq.valueExpansionThreshold=50 +beq.orExpansionThreshold=500 +beq.orRangeThreshold=10 +beq.orExpansionFstThreshold=750 +beq.fieldIndexRangeSplit=16 +beq.maxIvaratorSources=20 +beq.evaluationPipelines=16 +beq.pipelineCachedResults=16 +hdfs.site.config.urls=file\:///etc/hadoop/conf/core-site.xml,file\:///etc/hadoop/conf/hdfs-site.xml +ivarator.zookeeper.hosts= +ivarator.fst.hdfs.base.uris=hdfs\:///IvaratorCache +beq.maxIvaratorOpenFiles=100 +query.max.call.time.minutes=60 +metadata.table.name=${table.name.metadata} +evaluation.only.fields= +metadata.table.name=${table.name.metadata} +lookup.uuid.beginDate=20100101 +lookup.uuid.beginDate=20100101 +query.max.page.size=10000 +query.page.byte.trigger=0 +shard.table.name=${table.name.shard} +index.table.name=${table.name.shardIndex} +rindex.table.name=${table.name.shardReverseIndex} +metadata.table.name=${table.name.metadata} +shard.query.threads=100 +metadata.table.name=${table.name.metadata} +shard.table.name=${table.name.shard} +shard.table.name=${table.name.shard} +error.shard.table.name=${table.name.errors.shard} +error.index.table.name=${table.name.errors.shardIndex} +error.rindex.table.name=${table.name.errors.shardReverseIndex} +error.metadata.table.name=${table.name.errors.metadata} +shard.query.threads=100 +metadata.table.name=${table.name.metadata} +error.shard.table.name=${table.name.errors.shard} +error.metadata.table.name=${table.name.errors.metadata} +error.index.table.name=${table.name.errors.shardIndex} +error.rindex.table.name=${table.name.errors.shardReverseIndex} +error.shard.table.name=${table.name.errors.shard} +error.metadata.table.name=${table.name.errors.metadata} +error.index.table.name=${table.name.errors.shardIndex} +error.rindex.table.name=${table.name.errors.shardReverseIndex} +querymetrics.metadata.table.name=${table.name.queryMetrics.metadata} +querymetrics.shard.table.name=${table.name.queryMetrics.shard} +querymetrics.dateindex.table.name=${table.name.queryMetrics.dateIndex} +querymetrics.metadata.table.name=${table.name.queryMetrics.metadata} +querymetrics.index.table.name=${table.name.queryMetrics.shardIndex} +querymetrics.rindex.table.name=${table.name.queryMetrics.shardReverseIndex} +index.table.name=${table.name.shardIndex} +index.table.name=${table.name.shardIndex} +rindex.table.name=${table.name.shardReverseIndex} +metadata.table.name=${table.name.metadata} +metadata.table.name=${table.name.metadata} +error.index.table.name=${table.name.errors.shardIndex} +error.index.table.name=${table.name.errors.shardIndex} +error.rindex.table.name=${table.name.errors.shardReverseIndex} +metadata.table.name=${table.name.metadata} +error.metadata.table.name=${table.name.errors.metadata} +error.shard.table.name=${table.name.errors.shard} +error.metadata.table.name=${table.name.errors.metadata} +error.index.table.name=${table.name.errors.shardIndex} +error.rindex.table.name=${table.name.errors.shardReverseIndex} +table.name.facet=datawave.facets +table.name.facet.metadata=datawave.facetMetadata +table.name.facet.hashes=datawave.facetHashes +shard.table.name=${table.name.shard} +metadata.table.name=${table.name.metadata} +index.table.name=${table.name.shardIndex} +rindex.table.name=${table.name.shardReverseIndex} +index.query.threads=100 +beq.shardsPerDayThreshold=20 +beq.initialMaxTermThreshold=2000 +beq.finalMaxTermThreshold=2000 +beq.maxDepthThreshold=2000 +beq.unfieldedExpansionThreshold=50 +beq.valueExpansionThreshold=50 +beq.orExpansionThreshold=500 +beq.orRangeThreshold=10 +beq.maxRangesPerRangeIvarator=5 +beq.maxOrRangeIvarators=10 +beq.orExpansionFstThreshold=750 +beq.fieldIndexRangeSplit=16 +beq.evaluationPipelines=16 +beq.pipelineCachedResults=16 +hdfs.site.config.urls=file\:///etc/hadoop/conf/core-site.xml,file\:///etc/hadoop/conf/hdfs-site.xml +ivarator.zookeeper.hosts= +ivarator.fst.hdfs.base.uris=hdfs\:///IvaratorCache +beq.maxIvaratorOpenFiles=100 +query.max.call.time.minutes=60 +metadata.table.name=${table.name.metadata} + +Effective Yml +---------------------------------------- +shard: + table: + name: "${table.name.shard}" + query: + threads: "100" +event: + query: + max: + results: "-1" + filters: + enabled: "false" +accumulo: + user: + password: "${accumulo.user.password}" +date: + index: + table: + name: "${table.name.dateIndex}" + threads: "20" +default: + date: + type: + name: "EVENT" +metadata: + table: + name: "${table.name.metadata}" +index: + table: + name: "${table.name.shardIndex}" + query: + threads: "100" +rindex: + table: + name: "${table.name.shardReverseIndex}" +beq: + fullTableScanEnabled: "false" + baseIteratorPriority: "100" + shardsPerDayThreshold: "20" + initialMaxTermThreshold: "2000" + finalMaxTermThreshold: "2000" + maxDepthThreshold: "2000" + unfieldedExpansionThreshold: "50" + valueExpansionThreshold: "50" + orExpansionThreshold: "500" + orRangeThreshold: "10" + orExpansionFstThreshold: "750" + fieldIndexRangeSplit: "16" + maxIvaratorSources: "20" + evaluationPipelines: "16" + pipelineCachedResults: "16" + maxIvaratorOpenFiles: "100" + maxRangesPerRangeIvarator: "5" + maxOrRangeIvarators: "10" +disable: + index: + only: + documents: "false" +enable: + index: + only: + filter: + functions: "false" +include: + hierarchy: + fields: "false" +query: + max: + index: + scan: + ms: "31536000000" + call: + time: + minutes: "60" + page: + size: "10000" + collapse: + uids: "false" + '[uids.threshold]': "-1" + threshold: "-1" + page: + byte: + trigger: "0" +hdfs: + site: + config: + urls: "file:///etc/hadoop/conf/core-site.xml,file:///etc/hadoop/conf/hdfs-site.xml" +ivarator: + zookeeper: + hosts: "" + fst: + hdfs: + base: + uris: "hdfs:///IvaratorCache" +evaluation: + only: + fields: "" +lookup: + uuid: + beginDate: "20100101" +error: + shard: + table: + name: "${table.name.errors.shard}" + index: + table: + name: "${table.name.errors.shardIndex}" + rindex: + table: + name: "${table.name.errors.shardReverseIndex}" + metadata: + table: + name: "${table.name.errors.metadata}" +querymetrics: + metadata: + table: + name: "${table.name.queryMetrics.metadata}" + shard: + table: + name: "${table.name.queryMetrics.shard}" + dateindex: + table: + name: "${table.name.queryMetrics.dateIndex}" + index: + table: + name: "${table.name.queryMetrics.shardIndex}" + rindex: + table: + name: "${table.name.queryMetrics.shardReverseIndex}" +table: + name: + facet: "datawave.facets" + '[facet.metadata]': "datawave.facetMetadata" + metadata: "datawave.facetMetadata" + '[facet.hashes]': "datawave.facetHashes" + hashes: "datawave.facetHashes" \ No newline at end of file diff --git a/microservices/microservice-parent b/microservices/microservice-parent index 11435518737..6207c9d6576 160000 --- a/microservices/microservice-parent +++ b/microservices/microservice-parent @@ -1 +1 @@ -Subproject commit 114355187371a069b66cc3d56b7f44eb7cb55ca2 +Subproject commit 6207c9d65768c191773099a6f39f2b935aa52acd diff --git a/microservices/microservice-service-parent b/microservices/microservice-service-parent index f0644113ab9..8064d20ccf5 160000 --- a/microservices/microservice-service-parent +++ b/microservices/microservice-service-parent @@ -1 +1 @@ -Subproject commit f0644113ab913c77e5b19b1a2210e28d841390d6 +Subproject commit 8064d20ccf5fb48dbf1d309503ffafa8ddaafb6c diff --git a/microservices/pom.xml b/microservices/pom.xml index 3d3c7a0611e..396af33b295 100644 --- a/microservices/pom.xml +++ b/microservices/pom.xml @@ -4,7 +4,7 @@ gov.nsa.datawave datawave-parent - 6.5.0-SNAPSHOT + 7.13.0-SNAPSHOT gov.nsa.datawave.microservice datawave-microservice-build-parent @@ -37,5 +37,16 @@ microservice-service-parent + + configcheck + + + configcheck + + + + configcheck + + diff --git a/microservices/services/accumulo b/microservices/services/accumulo index e8f790d304a..7ecb4dd5602 160000 --- a/microservices/services/accumulo +++ b/microservices/services/accumulo @@ -1 +1 @@ -Subproject commit e8f790d304ac240680a11255924c01837dc0220b +Subproject commit 7ecb4dd5602395e4f41a48673cd5cc9e3e966f10 diff --git a/microservices/services/audit b/microservices/services/audit index f66cc4a9abe..2714c97b19f 160000 --- a/microservices/services/audit +++ b/microservices/services/audit @@ -1 +1 @@ -Subproject commit f66cc4a9abe75d4d95aeadf2e58097c7739c83c0 +Subproject commit 2714c97b19fdc9635fc7f18e3d1489e6d92a017d diff --git a/microservices/services/authorization b/microservices/services/authorization index a730d64c58b..292be85b633 160000 --- a/microservices/services/authorization +++ b/microservices/services/authorization @@ -1 +1 @@ -Subproject commit a730d64c58b0495963a1b0e188cb1d8cb3cb9fa5 +Subproject commit 292be85b633c6f0f99ae9b3b382a4373ffc720a7 diff --git a/microservices/services/config b/microservices/services/config index 5155789b277..fc187c0e4b9 160000 --- a/microservices/services/config +++ b/microservices/services/config @@ -1 +1 @@ -Subproject commit 5155789b2775cbf754d4634cc1a00d8c0a7ea8ad +Subproject commit fc187c0e4b90c3eaea16dad3ba9ba5330262bcf7 diff --git a/microservices/services/dictionary b/microservices/services/dictionary index 1843b7a44ce..653cf5b5963 160000 --- a/microservices/services/dictionary +++ b/microservices/services/dictionary @@ -1 +1 @@ -Subproject commit 1843b7a44ceb3321cc26bc691c830be31a29183e +Subproject commit 653cf5b59634151b3eea881133fe604a0050df6e diff --git a/microservices/services/file-provider b/microservices/services/file-provider new file mode 160000 index 00000000000..27080f3b943 --- /dev/null +++ b/microservices/services/file-provider @@ -0,0 +1 @@ +Subproject commit 27080f3b943722f84aaa8af93e4fda7b41b50bd9 diff --git a/microservices/services/hazelcast b/microservices/services/hazelcast index 1dc8d74fea8..8abc2ef7e91 160000 --- a/microservices/services/hazelcast +++ b/microservices/services/hazelcast @@ -1 +1 @@ -Subproject commit 1dc8d74fea842123af714cd097403242ddc61d83 +Subproject commit 8abc2ef7e91c90bee920b129f67d540c61217f0a diff --git a/microservices/services/map b/microservices/services/map new file mode 160000 index 00000000000..473ec437082 --- /dev/null +++ b/microservices/services/map @@ -0,0 +1 @@ +Subproject commit 473ec437082e661f51132a9254877b6bb27def84 diff --git a/microservices/services/mapreduce-query b/microservices/services/mapreduce-query new file mode 160000 index 00000000000..22af8f93c88 --- /dev/null +++ b/microservices/services/mapreduce-query @@ -0,0 +1 @@ +Subproject commit 22af8f93c887db097c09078a477323be7877b184 diff --git a/microservices/services/modification b/microservices/services/modification new file mode 160000 index 00000000000..768007f3195 --- /dev/null +++ b/microservices/services/modification @@ -0,0 +1 @@ +Subproject commit 768007f3195f688149666ba8a2a10b3d56d30fbf diff --git a/microservices/services/pom.xml b/microservices/services/pom.xml index 63443ae7071..af7613a61dd 100644 --- a/microservices/services/pom.xml +++ b/microservices/services/pom.xml @@ -4,7 +4,7 @@ gov.nsa.datawave.microservice datawave-microservice-build-parent - 6.5.0-SNAPSHOT + 7.13.0-SNAPSHOT datawave-microservice-service-build-parent pom @@ -66,6 +66,17 @@ dictionary + + submodule-service-file-provider + + + file-provider/pom.xml + + + + file-provider + + submodule-service-hazelcast @@ -77,6 +88,50 @@ hazelcast + + submodule-service-mapreduce-query + + + mapreduce-query/pom.xml + + + + mapreduce-query + + + + submodule-service-modification + + + modification/pom.xml + + + + modification + + + + submodule-service-query + + + query/pom.xml + + + + query + + + + submodule-service-query-executor + + + query-executor/pom.xml + + + + query-executor + + submodule-service-query-metric diff --git a/microservices/services/query b/microservices/services/query new file mode 160000 index 00000000000..14c65febd3d --- /dev/null +++ b/microservices/services/query @@ -0,0 +1 @@ +Subproject commit 14c65febd3d10548b972f8d3d213f82f9fa86918 diff --git a/microservices/services/query-executor b/microservices/services/query-executor new file mode 160000 index 00000000000..4517c4c29ea --- /dev/null +++ b/microservices/services/query-executor @@ -0,0 +1 @@ +Subproject commit 4517c4c29ea3c79499a306d7986bb81a00f12517 diff --git a/microservices/services/query-metric b/microservices/services/query-metric index 01c174971f2..4bce0c89219 160000 --- a/microservices/services/query-metric +++ b/microservices/services/query-metric @@ -1 +1 @@ -Subproject commit 01c174971f26cfff4f5efb95516681edef8adc64 +Subproject commit 4bce0c89219d8bb7901f1e9c4993460a7bd50452 diff --git a/microservices/starters/audit b/microservices/starters/audit index 93756a38505..ef18c9e6521 160000 --- a/microservices/starters/audit +++ b/microservices/starters/audit @@ -1 +1 @@ -Subproject commit 93756a385054b5d21836a812fb9c47f81b3aa44d +Subproject commit ef18c9e6521c36a8fa64cb40a266cf6e532b0e64 diff --git a/microservices/starters/cache b/microservices/starters/cache index 7c6516ef6c1..30196007910 160000 --- a/microservices/starters/cache +++ b/microservices/starters/cache @@ -1 +1 @@ -Subproject commit 7c6516ef6c18d69d537a3ff675d6e6843a5cf9bd +Subproject commit 3019600791021114e50b387cc312c97375b979ff diff --git a/microservices/starters/cached-results b/microservices/starters/cached-results new file mode 160000 index 00000000000..b22a3d6a17a --- /dev/null +++ b/microservices/starters/cached-results @@ -0,0 +1 @@ +Subproject commit b22a3d6a17a7f4eddfd5dc8e205e937b294e1c3c diff --git a/microservices/starters/datawave b/microservices/starters/datawave index a92951fd5fb..081e0a028a8 160000 --- a/microservices/starters/datawave +++ b/microservices/starters/datawave @@ -1 +1 @@ -Subproject commit a92951fd5fb691a9450722b6e2d4ff81a8fbc9c5 +Subproject commit 081e0a028a85ab562a1fa419a590310bed030a7b diff --git a/microservices/starters/metadata b/microservices/starters/metadata index ed6e3557d43..28337e6c230 160000 --- a/microservices/starters/metadata +++ b/microservices/starters/metadata @@ -1 +1 @@ -Subproject commit ed6e3557d43bfc6cd03989a040df3facce39695a +Subproject commit 28337e6c2306b44a888a4f6ba7825f268ab6ff18 diff --git a/microservices/starters/pom.xml b/microservices/starters/pom.xml index 7998871d120..e59a2a4e49f 100644 --- a/microservices/starters/pom.xml +++ b/microservices/starters/pom.xml @@ -4,7 +4,7 @@ gov.nsa.datawave.microservice datawave-microservice-build-parent - 6.5.0-SNAPSHOT + 7.13.0-SNAPSHOT datawave-microservice-starter-build-parent pom @@ -12,36 +12,47 @@ - submodule-starter-datawave + submodule-starter-audit - datawave/pom.xml + audit/pom.xml - datawave + audit - submodule-starter-audit + submodule-starter-cache - audit/pom.xml + cache/pom.xml - audit + cache - submodule-starter-cache + submodule-starter-cached-results - cache/pom.xml + cached-results/pom.xml - cache + cached-results + + + + submodule-starter-datawave + + + datawave/pom.xml + + + + datawave @@ -55,6 +66,17 @@ metadata + + submodule-starter-query + + + query/pom.xml + + + + query + + submodule-starter-query-metric diff --git a/microservices/starters/query b/microservices/starters/query new file mode 160000 index 00000000000..ae730eab061 --- /dev/null +++ b/microservices/starters/query @@ -0,0 +1 @@ +Subproject commit ae730eab0610c414bdfefbcaa0e072ec2751fa72 diff --git a/microservices/starters/query-metric b/microservices/starters/query-metric index 8155aa3e653..fad3ed7859f 160000 --- a/microservices/starters/query-metric +++ b/microservices/starters/query-metric @@ -1 +1 @@ -Subproject commit 8155aa3e6538882ec7f55e75b67ace55867cb28a +Subproject commit fad3ed7859fdc612a555ed498bd897aca168cfd9 diff --git a/pom.xml b/pom.xml index 7ac5086a37a..3d470b98d46 100644 --- a/pom.xml +++ b/pom.xml @@ -3,7 +3,7 @@ 4.0.0 gov.nsa.datawave datawave-parent - 6.5.0-SNAPSHOT + 7.13.0-SNAPSHOT pom DataWave DataWave is a Java-based ingest and query framework that leverages Apache Accumulo to provide fast, secure access to your data. @@ -26,16 +26,23 @@ HEAD https://github.com/NationalSecurityAgency/datawave + + + github-datawave + GitHub Datawave Apache Maven Packages + https://maven.pkg.github.com/NationalSecurityAgency/datawave + + 11 11 UTF-8 1C - 2.1.1 + 2.1.2 1.4.1.Final 1.0.0.Final 3.20.2 - 1.8.2 + 1.11.3 3.1.0 1.9.4 1.4 @@ -43,83 +50,93 @@ 3.2.2 4.3 1.10 - 2.7 + 2.8.0 2.6 - 2.1.1 + 3.3 2.6 1.2 1.6 - 1.8 + 1.11.0 5.2.0 + 5.2.0 + 4.0.0 + 4.0.0 + 4.0.0 + 4.0.0 + 4.0.0 + 3.0.0 + 4.0.1 + 1.0.0 + 4.0.8 + 3.0.0 + 1.0.0 + 4.0.7 + 3.0.3 1.9.0 - 4.0.2 + 5.2.0 2.15.0 - 2.22.1 + 2.22.5 28.1 1.2.0 31.1-jre + 1.0.1 2.0.3 1.1.1 3.3.5 1.1.1 + 1.3 4.5.13 4.4.8 - 3.0.1 + 4.0.1 9.4.21.Final 2.10.0.pr1 + 1.9.13 + 2.3.3 3.24.0-GA 3.1.0 2.0.1.Final + 2.3.1 + 2.3.3 1.72 6.1.26 4.0.19.Final 0.11.2 + 20231013 1.19.0 4.13.2 - 5.5.2 - 5.5.2 + 5.10.2 + 2.7.2b2 2.20 2.20 2.17.2 7.5.0 2.5.2 1.6.0 - 3.0.0 - 3.0.1 - 3.0.0 - 3.0.0 - 3.0.0 - 2.0.0 - 3.0.0 - 3.0.0 - 2.0.0 - 3.0.0 - 2.0.1 1.2 2.23.0 - 8.0.16 + 8.0.28 4.1.42.Final 2.1 5.0.3.Final 2.0.9 - 3.7.1 + 3.16.3 1.6.2 1.7.36 3.1.5 5.2.2.RELEASE ${version.spring} 2.9.6 - 3.0.0-M6 + 3.5.2 0.17.0 3.1.1.Final 3.1.1.Final 2.3.5.Final 17.0.1.Final - 5.0.3 + 5.4.0 3.1.4 2.12.2 - 3.8.1 + 3.8.3 @@ -183,7 +200,7 @@ com.google.guava failureaccess - 1.0.1 + ${version.google-guava-failure} com.google.guava @@ -318,42 +335,47 @@ gov.nsa.datawave.microservice accumulo-api - ${version.microservice.accumulo-api} + ${version.datawave.accumulo-api} gov.nsa.datawave.microservice accumulo-utils - ${version.microservice.accumulo-utils} + ${version.datawave.accumulo-utils} gov.nsa.datawave.microservice audit-api - ${version.microservice.audit-api} + ${version.datawave.audit-api} gov.nsa.datawave.microservice authorization-api - ${version.microservice.authorization-api} + ${version.datawave.authorization-api} gov.nsa.datawave.microservice base-rest-responses - ${version.microservice.base-rest-responses} + ${version.datawave.base-rest-responses} gov.nsa.datawave.microservice common-utils - ${version.microservice.common-utils} + ${version.datawave.common-utils} gov.nsa.datawave.microservice dictionary-api - ${version.microservice.dictionary-api} + ${version.datawave.dictionary-api} + + + gov.nsa.datawave.microservice + mapreduce-query-api + ${version.datawave.mapreduce-query-api} gov.nsa.datawave.microservice metadata-utils - ${version.microservice.metadata-utils} + ${version.datawave.metadata-utils} log4j @@ -372,12 +394,23 @@ gov.nsa.datawave.microservice metrics-reporter - ${version.microservice.metrics-reporter} + ${version.datawave.metrics-reporter} + + + gov.nsa.datawave.microservice + query-api + ${version.datawave.query-api} + + + gov.nsa.datawave.microservice + query-api + ${version.datawave.query-api} + jboss gov.nsa.datawave.microservice query-metric-api - ${version.microservice.query-metric-api} + ${version.datawave.query-metric-api} gov.nsa.datawave @@ -412,7 +445,7 @@ gov.nsa.datawave.microservice type-utils - ${version.microservice.type-utils} + ${version.datawave.type-utils} log4j @@ -422,6 +455,10 @@ org.slf4j slf4j-log4j12 + + org.apache.hadoop + hadoop-common + @@ -593,8 +630,8 @@ org.apache.commons - commons-jexl - ${version.commons-jexl} + commons-jexl3 + ${version.commons-jexl3} org.apache.commons @@ -609,17 +646,33 @@ log4j * + + org.apache.zookeeper + zookeeper + org.apache.curator curator-framework ${version.curator} + + + org.apache.zookeeper + zookeeper + + org.apache.curator curator-recipes ${version.curator} + + + org.apache.zookeeper + zookeeper + + org.apache.deltaspike.core @@ -671,17 +724,6 @@ hadoop-client-runtime ${version.hadoop} - - org.apache.hadoop - hadoop-common - ${version.hadoop} - - - * - * - - - org.apache.hadoop hadoop-distcp @@ -803,6 +845,20 @@ org.apache.zookeeper zookeeper ${version.zookeeper} + + + org.slf4j + slf4j-log4j12 + + + log4j + log4j + + + ch.qos.logback + logback-classic + + org.eclipse.emf @@ -1049,6 +1105,13 @@ pom import + + org.junit + junit-bom + ${version.junit.bom} + pom + import + org.springframework spring-framework-bom @@ -1072,7 +1135,7 @@ gov.nsa.datawave.microservice base-rest-responses - ${version.microservice.base-rest-responses} + ${version.datawave.base-rest-responses} tests test-jar test @@ -1099,7 +1162,7 @@ org.apache.curator curator-test - ${version.curator} + ${version.curator.test} test true @@ -1133,12 +1196,6 @@ ${version.weld-test} test - - org.junit.jupiter - junit-jupiter-api - ${version.junit.jupiter} - test - org.mockito mockito-core @@ -1163,22 +1220,24 @@ ${version.powermock} test + + org.powermock + powermock-reflect + ${version.powermock} + test + - true false - datawave-github-mvn-repo - https://raw.githubusercontent.com/NationalSecurityAgency/datawave/mvn-repo + github-datawave + https://maven.pkg.github.com/NationalSecurityAgency/datawave @@ -1197,11 +1256,6 @@ - true @@ -1209,8 +1263,8 @@ false - datawave-github-mvn-repo - https://raw.githubusercontent.com/NationalSecurityAgency/datawave/mvn-repo + github-datawave + https://maven.pkg.github.com/NationalSecurityAgency/datawave @@ -1353,15 +1407,49 @@ 3.1.0 checkstyle.xml - false + true false + true + basedir=${datawave.root} + + ch.qos.reload4j + reload4j + 1.2.22 + + + ch.qos.reload4j + reload4j + 1.2.22 + com.puppycrawl.tools checkstyle 8.29 + + org.apache.maven.doxia + doxia-site-renderer + 1.4 + + + log4j + log4j + + + + + org.apache.maven.doxia + doxia-site-renderer + 1.4 + + + log4j + log4j + + + @@ -1412,6 +1500,30 @@ org.apache.maven.plugins maven-dependency-plugin 3.1.1 + + + ch.qos.reload4j + reload4j + 1.2.22 + + + org.apache.maven.doxia + doxia-site-renderer + 1.4 + + + log4j + log4j + + + + + + org.apache.maven.shared + maven-dependency-analyzer + 1.11.1 + + org.apache.maven.plugins @@ -1447,6 +1559,7 @@ log4j:log4j org.slf4j:slf4j-log4j12 + ch.qos.logback:logback-classic @@ -1470,9 +1583,14 @@ - org.apache.maven.surefire - surefire-junit47 - ${version.surefire.plugin} + org.junit.jupiter + junit-jupiter-engine + ${version.junit.bom} + + + org.junit.vintage + junit-vintage-engine + ${version.junit.bom} @@ -1519,6 +1637,24 @@ org.apache.maven.plugins maven-javadoc-plugin 3.1.1 + + + ch.qos.reload4j + reload4j + 1.2.22 + + + org.apache.maven.doxia + doxia-site-renderer + 1.7.4 + + + log4j + log4j + + + + org.apache.maven.plugins @@ -1536,6 +1672,7 @@ 2.7 UTF-8 + \ @@ -1556,9 +1693,14 @@ ${version.surefire.plugin} - org.apache.maven.surefire - surefire-junit47 - ${version.surefire.plugin} + org.junit.jupiter + junit-jupiter-engine + ${version.junit.bom} + + + org.junit.vintage + junit-vintage-engine + ${version.junit.bom} @@ -1570,7 +1712,7 @@ org.codehaus.mojo build-helper-maven-plugin - 3.0.0 + 3.3.0 org.codehaus.mojo @@ -1768,6 +1910,24 @@ org.apache.maven.plugins maven-javadoc-plugin + + + ch.qos.reload4j + reload4j + 1.2.22 + + + org.apache.maven.doxia + doxia-site-renderer + 1.7.4 + + + log4j + log4j + + + + @@ -1844,6 +2004,17 @@ + + quickstart + + + quickstart-docker + + + + contrib/datawave-quickstart/docker + + microservices @@ -1881,6 +2052,54 @@ + + compose + + compose + compose + + + + + + gov.nsa.datawave.plugins + read-properties + + + compose.properties + + + + + + + + + kubernetes + + false + + + kubernetes + kubernetes + + + + + + gov.nsa.datawave.plugins + read-properties + + + kubernetes.properties + kubernetes-passwords.properties + + + + + + + javadoc @@ -1923,9 +2142,9 @@ clover - com.cenqua.clover + com.atlassian.clover clover - RELEASE + [4.1.2,) diff --git a/properties/compose.properties b/properties/compose.properties new file mode 100644 index 00000000000..7971d5568b0 --- /dev/null +++ b/properties/compose.properties @@ -0,0 +1,336 @@ +CONFIGURATION=test +RCPT_TO=hadoop@localhost + +docker.image.prefix= + +# ingest properties +DATAWAVE_INGEST_HOME=/opt/datawave/contrib/datawave-quickstart/datawave-ingest + +WAREHOUSE_ACCUMULO_HOME=/opt/datawave/contrib/datawave-quickstart/accumulo +WAREHOUSE_HDFS_NAME_NODE=hdfs://localhost:9000 +WAREHOUSE_JOBTRACKER_NODE=localhost:8032 +WAREHOUSE_ZOOKEEPERS=localhost:2181 +WAREHOUSE_INSTANCE_NAME=my-instance-01 +#Sets variable sets the zookeeper location for the warehouse side +zookeeper.hosts=localhost:2181 + +INGEST_ACCUMULO_HOME=/opt/datawave/contrib/datawave-quickstart/accumulo +INGEST_HDFS_NAME_NODE=hdfs://localhost:9000 +INGEST_JOBTRACKER_NODE=localhost:8050 +INGEST_ZOOKEEPERS=localhost:2181 +INGEST_INSTANCE_NAME=my-instance-01 + +JOB_CACHE_REPLICATION=1 + +STAGING_HOSTS=localhost +DUMPER_HOSTS=localhost +INGEST_HOST=localhost +ROLLUP_HOST=localhost + +#extra mapreduce options (e.g. mapreduce.task.io.sort.mb and the like) +MAPRED_INGEST_OPTS=-useInlineCombiner -ingestMetricsDisabled + +#extra HADOOP_OPTS (java options) +HADOOP_INGEST_OPTS= + +#extra CHILD_OPTS (java options) +CHILD_INGEST_OPTS= + +BULK_CHILD_MAP_MAX_MEMORY_MB=2048 +LIVE_CHILD_MAP_MAX_MEMORY_MB=1024 +BULK_CHILD_REDUCE_MAX_MEMORY_MB=2048 +LIVE_CHILD_REDUCE_MAX_MEMORY_MB=1024 + +BULK_INGEST_DATA_TYPES=shardStats +LIVE_INGEST_DATA_TYPES=wikipedia,mycsv,myjson + +# Clear out these values if you do not want standard shard ingest. +DEFAULT_SHARD_HANDLER_CLASSES=datawave.ingest.mapreduce.handler.shard.AbstractColumnBasedHandler +ALL_HANDLER_CLASSES=datawave.ingest.mapreduce.handler.edge.ProtobufEdgeDataTypeHandler,datawave.ingest.mapreduce.handler.dateindex.DateIndexDataTypeHandler + +BULK_INGEST_REDUCERS=10 +LIVE_INGEST_REDUCERS=10 + +# Note the max blocks per job must be less than or equal to the number of mappers +INGEST_BULK_JOBS=1 +INGEST_BULK_MAPPERS=4 +INGEST_MAX_BULK_BLOCKS_PER_JOB=4 +INGEST_LIVE_JOBS=1 +INGEST_LIVE_MAPPERS=4 +INGEST_MAX_LIVE_BLOCKS_PER_JOB=4 + +INDEX_STATS_MAX_MAPPERS=7 + +NUM_MAP_LOADERS=1 + +USERNAME=root +PASSWORD=secret + +ZOOKEEPER_HOME=/opt/datawave/contrib/datawave-quickstart/zookeeper +HADOOP_HOME=/opt/datawave/contrib/datawave-quickstart/hadoop +MAPRED_HOME=/opt/datawave/contrib/datawave-quickstart/hadoop + +WAREHOUSE_HADOOP_CONF=/opt/datawave/contrib/datawave-quickstart/hadoop/etc/hadoop +INGEST_HADOOP_CONF=/opt/datawave/contrib/datawave-quickstart/hadoop/etc/hadoop + +HDFS_BASE_DIR=/datawave/ingest + +MONITOR_SERVER_HOST=localhost + +LOG_DIR=/opt/datawave/contrib/datawave-quickstart/datawave-ingest/logs +PDSH_LOG_DIR=${LOG_DIR}/pdsh_logs +FLAG_DIR=/opt/datawave/contrib/datawave-quickstart/data/datawave/flags +FLAG_MAKER_CONFIG=/opt/datawave/contrib/datawave-quickstart/datawave-ingest/config/flag-maker-live.xml +BIN_DIR_FOR_FLAGS=/opt/datawave/contrib/datawave-quickstart/datawave-ingest/bin + +PYTHON=/usr/bin/python + +# Setting discard interval to 0 in order to disable auto-ageoff @ ingest time +EVENT_DISCARD_INTERVAL=0 + +# Setting discard interval to 0 in order to disable auto-ageoff @ ingest time +EVENT_DISCARD_FUTURE_INTERVAL=0 + +DATAWAVE_CACHE_PORT=20444 + +EDGE_DEFINITION_FILE=config/edge-definitions.xml + +ERROR_TABLE=errors +ANALYTIC_MTX=analytic_metrics +LOADER_MTX=loader_metrics +INGEST_MTX=ingest_metrics +BULK_INGEST_METRIC_THRESHOLD=1500000 +LIVE_INGEST_METRIC_THRESHOLD=1500000 + +KEYSTORE=/opt/datawave/web-services/deploy/application/src/main/wildfly/overlay/standalone/configuration/certificates/testServer.p12 +KEYSTORE_TYPE=PKCS12 +KEYSTORE_PASSWORD=ChangeIt +TRUSTSTORE=/opt/datawave/web-services/deploy/application/src/main/wildfly/overlay/standalone/configuration/certificates/ca.jks + +FLAG_METRICS_DIR=/opt/datawave/contrib/datawave-quickstart/data/datawave/flagMetrics +TRUSTSTORE_PASSWORD=ChangeIt +TRUSTSTORE_TYPE=JKS + +cluster.name=quickstart +accumulo.instance.name=my-instance-01 +accumulo.user.name=root +accumulo.user.password=secret +cached.results.hdfs.uri=hdfs://localhost:9000 +cached.results.export.dir=/CachedResults + +lock.file.dir=/opt/datawave/contrib/datawave-quickstart/data/datawave/ingest-lock-files +JAVA_HOME=/usr/lib/jvm/java-1.8.0-openjdk + +# query properties +server.keystore.password=secret +mysql.user.password=datawave +jboss.jmx.password=secret +hornetq.cluster.password=secret +hornetq.system.password=secret + +server.truststore.password=Changeit + +#Sets up the Atom Service +atom.wildfly.hostname=localhost +atom.wildfly.port.number=8443 +atom.connection.pool.name=WAREHOUSE + + +# other properties +rpm.file.owner=rpmowner +rpm.file.group=rpmowner +rpm.file.accumulo.owner=accumulo-owner +rpm.file.accumulo.group=accumulo-owner + +# Enable full table scans for the base event query? +#beq.fullTableScanEnabled=true + +event.query.data.decorators= \ + \ +\n \ +\n \ +\n \ +\n \ +\n \ +\n \ +\n \ +\n \ +\n \ +\n \ +\n \ +\n \ +\n \ +\n \ +\n \ +\n \ +\n \ +\n \ +\n \ +\n \ +\n \ +\n \ +\n \ +\n + +lookup.uuid.uuidTypes= \ + \ +\n \ +\n \ +\n \ +\n \ +\n \ +\n \ +\n \ +\n \ +\n \ +\n \ +\n \ +\n \ +\n \ +\n \ +\n \ +\n \ +\n \ +\n \ +\n \ +\n \ +\n \ +\n \ +\n \ +\n \ +\n \ +\n \ +\n \ +\n \ +\n \ +\n \ +\n \ +\n + +query.metrics.marking=(PUBLIC) +query.metrics.visibility=PUBLIC + +metrics.warehouse.namenode=localhost +metrics.warehouse.hadoop.path=/local/hadoop +metrics.reporter.class=datawave.metrics.NoOpMetricsReporterFactory + +metadatahelper.default.auths=PUBLIC + +security.npe.ou.entries=EXAMPLE_SERVER_OU1,EXAMPLE_SERVER_OU2 +security.subject.dn.pattern=(?:^|,)\\s*OU\\s*=\\s*My Department\\s*(?:,|$) + +datawave.docs.menu.extras=
  • Accumulo
  • + +type.metadata.hdfs.uri=hdfs://localhost:9000 +mapReduce.hdfs.uri=hdfs://localhost:9000 +bulkResults.hdfs.uri=hdfs://localhost:9000 +jboss.log.hdfs.uri=hdfs://localhost:9000 +jboss.managed.executor.service.default.max.threads=48 +mapReduce.job.tracker=localhost:8050 +bulkResults.job.tracker=localhost:8050 +ingest.data.types=wikipedia,mycsv,myjson,shardStats +PASSWORD_INGEST_ENV=/opt/datawave/contrib/datawave-quickstart/datawave-ingest/config/ingest-passwd.sh +hdfs.site.config.urls=file:///opt/datawave/contrib/datawave-quickstart/hadoop/etc/hadoop/core-site.xml,file:///opt/datawave/contrib/datawave-quickstart/hadoop/etc/hadoop/hdfs-site.xml +table.shard.numShardsPerDay=1 + +############################ +# +# Security Settings +# +############################ +# Whether or not to use the remote authorization service +security.use.remoteauthservice=true +# Whether or not to use the test authorization service that loads canned users +security.use.testauthservice=false + +# Configuration for the remote DatawaveUser service +# +# Find the host and port of the service using a SRV DNS lookup +security.remoteuserservice.srv.lookup.enabled=false +# The DNS servers to use for the SRV lookup +security.remoteuserservice.srv.lookup.servers=127.0.0.1 +# The port on which the DNS server that serves SRV records is listening +security.remoteuserservice.srv.lookup.port=8600 +# The scheme to use when connecting to the remote user service +security.remoteuserservice.scheme=https +# The host to connect to (or do a SRV lookup on) for the remote user service +security.remoteuserservice.host=authorization +# The port to connect to (unless a SRV lookup was performed) for the remote user service +security.remoteuserservice.port=8443 + +############################ +# +# Audit Settings +# +############################ +# Whether or not to use the remote audit service +auditing.use.remoteauditservice=true + +# Configuration for the remote audit service +# +# Find the host and port of the service using a SRV DNS lookup +auditing.remoteauditservice.srv.lookup.enabled=false +# The DNS servers to use for the SRV lookup +auditing.remoteauditservice.srv.lookup.servers=127.0.0.1 +# The port on which the DNS server that serves SRV records is listening +auditing.remoteauditservice.srv.lookup.port=8600 +# The scheme to use when connecting to the remote audit service +auditing.remoteauditservice.scheme=https +# The host to connect to (or do a SRV lookup on) for the remote audit service +auditing.remoteauditservice.host=audit +# The port to connect to (unless a SRV lookup was performed) for the remote audit service +auditing.remoteauditservice.port=8443 + +############################ +# +# Dictionary Settings +# +############################ + +# Configuration for the remote dictionary service +# +# The scheme to use when connecting to the remote dictionary service +dictionary.remoteservice.scheme=https +# The host to connect to (or do a SRV lookup on) for the remote dictionary service +dictionary.remoteservice.host=localhost +# The port to connect to (unless a SRV lookup was performed) for the remote dictionary service +dictionary.remoteservice.port=8643 + +############################ +# +# Configuration for the remote Accumulo service +# +############################ +# Find the host and port of the service using a SRV DNS lookup +accumulo.remoteservice.srv.lookup.enabled=false +# The DNS servers to use for the SRV lookup +accumulo.remoteservice.srv.lookup.servers=127.0.0.1 +# The port on which the DNS server that serves SRV records is listening +accumulo.remoteservice.srv.lookup.port=8600 +# The scheme to use when connecting to the remote user service +accumulo.remoteservice.scheme=https +# The host to connect to (or do a SRV lookup on) for the remote user service +accumulo.remoteservice.host=accumulo +# The port to connect to (unless a SRV lookup was performed) for the remote user service +accumulo.remoteservice.port=8443 + +############################ +# +# Configuration for the remote Query Metric service +# +############################ +# Whether or not to use the remote query metric service +querymetric.remoteservice.enabled=true + +# Find the host and port of the service using a SRV DNS lookup +querymetric.remoteservice.srv.lookup.enabled=false +# The DNS servers to use for the SRV lookup +querymetric.remoteservice.srv.lookup.servers=127.0.0.1 +# The port on which the DNS server that serves SRV records is listening +querymetric.remoteservice.srv.lookup.port=8600 +# The scheme to use when connecting to the remote query metric service +querymetric.remoteservice.scheme=https +# The host to connect to (or do a SRV lookup on) for the remote query metric service +querymetric.remoteservice.host=metrics +# The port to connect to (unless a SRV lookup was performed) for the remote query metric service +querymetric.remoteservice.port=8443 diff --git a/properties/default.properties b/properties/default.properties index 1a366ee60de..5f7cbbc4854 100644 --- a/properties/default.properties +++ b/properties/default.properties @@ -72,6 +72,46 @@ security.remoteuserservice.host=localhost # The port to connect to (unless a SRV lookup was performed) for the remote user service security.remoteuserservice.port=8643 +############################ +# +# Audit Settings +# +############################ +# Whether or not to use the remote audit service +auditing.use.remoteauditservice=false + +# Configuration for the remote audit service +# +# Find the host and port of the service using a SRV DNS lookup +auditing.remoteauditservice.srv.lookup.enabled=false +# The DNS servers to use for the SRV lookup +auditing.remoteauditservice.srv.lookup.servers=127.0.0.1 +# The port on which the DNS server that serves SRV records is listening +auditing.remoteauditservice.srv.lookup.port=8600 +# The scheme to use when connecting to the remote audit service +auditing.remoteauditservice.scheme=https +# The host to connect to (or do a SRV lookup on) for the remote audit service +auditing.remoteauditservice.host=audit +# The port to connect to (unless a SRV lookup was performed) for the remote audit service +auditing.remoteauditservice.port=8443 + +############################ +# +# Dictionary Settings +# +############################ + +# Configuration for the remote dictionary service +# +# The scheme to use when connecting to the remote dictionary service +dictionary.remoteservice.scheme=https +# The host to connect to (or do a SRV lookup on) for the remote dictionary service +dictionary.remoteservice.host=localhost +# The port to connect to (unless a SRV lookup was performed) for the remote dictionary service +dictionary.remoteservice.port=8843 +# Use the configured scheme/host/port for redirect calls (instead of those from the request) +dictionary.remoteservice.useConfiguredURIForRedirect=true + ############################ # # Configuration for the remote Accumulo service @@ -109,6 +149,8 @@ querymetric.remoteservice.host=localhost querymetric.remoteservice.port=9043 # Is the remote service enabled querymetric.remoteservice.enabled=false +# Use the configured scheme/host/port for redirect calls (instead of those from the request) +querymetric.remoteservice.useConfiguredURIForRedirect=true ############################ # @@ -301,10 +343,13 @@ hornetq.port= webapp.transport.guarantee=CONFIDENTIAL # Tell the login module to expect client cert, and not DN stuffed in a header. trusted.header.login=false +trusted.header.issuer-header-name=X-SSL-ClientCert-Issuer +trusted.header.subject-header-name=X-SSL-ClientCert-Subject # web service response namespaces datawave.webservice.namespace=http://webservice.datawave.nsa/v1 # Name of the Cluster cluster.name=DEV +lock.file.dir=/var/run/datawave ############################ # @@ -354,7 +399,7 @@ lookup.uuid.mappings= # Default uuidTypes lookup.uuid.uuidTypes= # Default lookup.uuid.beginDate -lookup.uuid.beginDate=20100101 +lookup.uuid.beginDate=19700101 ############################ # @@ -381,8 +426,6 @@ hierarchy.field.options= # BaseEventQuery (beq) thresholds beq.baseIteratorPriority=100 -beq.eventPerDayThreshold=40000 -beq.shardsPerDayThreshold=20 # max number of terms BEFORE all expansions (calculated based on how much the initial parser can handle before hitting a stack overflow: between 3500 and 3750) beq.initialMaxTermThreshold=2000 # max number of terms AFTER all expansions (calculated based on how much the initial parser can handle before hitting a stack overflow: between 3500 and 3750) @@ -408,6 +451,8 @@ beq.maxTermExpansionThreshold=2000 beq.fieldIndexRangeSplit=16 # The max number of sources that can be created across ivarators for one scan beq.maxIvaratorSources=20 +# The max wait time in ms for an ivarator source (default 30 minutes) +beq.maxIvaratorSourceWait=1800000 # The max number of files that one ivarator can open at one time beq.maxIvaratorOpenFiles=100 # The max number of evaluation pipelines. They are run in a pool of threads controlled by the tserver.datawave.evaluation.threads accumulo configuration property which defaults to 100 (IteratorThreadPoolManager). @@ -651,11 +696,3 @@ basemaps= {\ accessToken: 'your.mapbox.access.token' \ }) \ } - -########################## -# -# The response object factory class -# -########################## -response.object.factory.class=datawave.webservice.query.result.event.DefaultResponseObjectFactory - diff --git a/properties/dev.properties b/properties/dev.properties index 37f03d40ae7..ea526d63026 100644 --- a/properties/dev.properties +++ b/properties/dev.properties @@ -88,6 +88,9 @@ PYTHON=/usr/bin/python # Setting discard interval to 0 in order to disable auto-ageoff @ ingest time EVENT_DISCARD_INTERVAL=0 +# Setting discard interval to 0 in order to disable auto-ageoff @ ingest time +EVENT_DISCARD_FUTURE_INTERVAL=0 + DATAWAVE_CACHE_PORT=20444 EDGE_DEFINITION_FILE=config/edge-definitions.xml @@ -194,6 +197,7 @@ event.query.data.decorators= \ \n \ \n \ \n \ +\n \ \n \ \n \ \n \ @@ -205,34 +209,43 @@ event.query.data.decorators= \ \n \ \n \ \n \ +\n \ \n \ \n lookup.uuid.uuidTypes= \ \ \n \ -\n \ +\n \ +\n \ +\n \ +\n \ +\n \ +\n \ +\n \ +\n \ +\n \ \n \ \n \ \n \ -\n \ +\n \ \n \ \n \ \n \ \n \ -\n \ +\n \ \n \ \n \ \n \ -\n \ +\n \ \n \ \n \ \n \ -\n \ +\n \ \n \ \n \ \n \ -\n \ +\n \ \n query.metrics.marking=(PUBLIC) diff --git a/properties/kubernetes.properties b/properties/kubernetes.properties new file mode 100644 index 00000000000..71711b88633 --- /dev/null +++ b/properties/kubernetes.properties @@ -0,0 +1,259 @@ +CONFIGURATION=compose +RCPT_TO=hadoop@localhost + +docker.image.prefix=ghcr.io/nationalsecurityagency/ + +docker.image.accumulo.tag=2.1.3 + +# ingest properties +DATAWAVE_INGEST_HOME=/opt/datawave-ingest/current + +trusted.header.login=true + +hdfs.site.config.urls=file:///etc/hadoop/conf/core-site.xml,file:///etc/hadoop/conf/hdfs-site.xml + +WAREHOUSE_ACCUMULO_HOME=/opt/accumulo +WAREHOUSE_HDFS_NAME_NODE=hdfs://hdfs-nn:9000 +WAREHOUSE_JOBTRACKER_NODE=yarn-rm:8032 +WAREHOUSE_ZOOKEEPERS=zookeeper:2181 +WAREHOUSE_INSTANCE_NAME=dev +#Sets variable sets the zookeeper location for the warehouse side +zookeeper.hosts=zookeeper:2181 + +INGEST_ACCUMULO_HOME=/opt/accumulo +INGEST_HDFS_NAME_NODE=hdfs://hdfs-nn:9000 +INGEST_JOBTRACKER_NODE=yarn-rm:8032 +INGEST_ZOOKEEPERS=zookeeper:2181 +INGEST_INSTANCE_NAME=dev + +JOB_CACHE_REPLICATION=1 + +STAGING_HOSTS=`nodeattr -c staging` +DUMPER_HOSTS=ingestmaster +INGEST_HOST=ingestmaster +ROLLUP_HOST=ingestmaster + +#extra mapreduce options (e.g. mapreduce.task.io.sort.mb and the like) +MAPRED_INGEST_OPTS=-useInlineCombiner + +#extra HADOOP_OPTS (java options) +HADOOP_INGEST_OPTS= + +#extra CHILD_OPTS (java options) +CHILD_INGEST_OPTS= + +BULK_CHILD_MAP_MAX_MEMORY_MB=2048 +LIVE_CHILD_MAP_MAX_MEMORY_MB=1024 +BULK_CHILD_REDUCE_MAX_MEMORY_MB=2048 +LIVE_CHILD_REDUCE_MAX_MEMORY_MB=1024 + +BULK_INGEST_DATA_TYPES=shardStats +LIVE_INGEST_DATA_TYPES=wikipedia,mycsv,myjson + +# Clear out these values if you do not want standard shard ingest. +DEFAULT_SHARD_HANDLER_CLASSES=datawave.ingest.mapreduce.handler.shard.AbstractColumnBasedHandler +ALL_HANDLER_CLASSES=datawave.ingest.mapreduce.handler.edge.ProtobufEdgeDataTypeHandler,datawave.ingest.mapreduce.handler.dateindex.DateIndexDataTypeHandler + +BULK_INGEST_REDUCERS=10 +LIVE_INGEST_REDUCERS=10 + +# Note the max blocks per job must be less than or equal to the number of mappers +INGEST_BULK_JOBS=1 +INGEST_BULK_MAPPERS=4 +INGEST_MAX_BULK_BLOCKS_PER_JOB=4 +INGEST_LIVE_JOBS=1 +INGEST_LIVE_MAPPERS=4 +INGEST_MAX_LIVE_BLOCKS_PER_JOB=4 + +INDEX_STATS_MAX_MAPPERS=1 + +NUM_MAP_LOADERS=1 + +USERNAME=root +PASSWORD=root + +ZOOKEEPER_HOME=/usr/lib/zookeeper +HADOOP_HOME=/usr/local/hadoop +MAPRED_HOME=/usr/local/hadoop-mapreduce + +WAREHOUSE_HADOOP_CONF=/usr/local/hadoop/etc/hadoop/ +INGEST_HADOOP_CONF=/usr/local/hadoop/etc/hadoop/ + +HDFS_BASE_DIR=/data + +MONITOR_SERVER_HOST=monitor + +LOG_DIR=/srv/logs/ingest +PDSH_LOG_DIR=${LOG_DIR}/pdsh_logs +FLAG_DIR=/srv/data/datawave/flags +FLAG_MAKER_CONFIG=/opt/datawave-ingest/current/config/flag-maker-live.xml,/opt/datawave-ingest/current/config/flag-maker-bulk.xml +BIN_DIR_FOR_FLAGS=/opt/datawave-ingest/current/bin + +PYTHON=/usr/bin/python + +# Setting discard interval to 0 in order to disable auto-ageoff @ ingest time +EVENT_DISCARD_INTERVAL=0 + +DATAWAVE_CACHE_PORT=20444 + +EDGE_DEFINITION_FILE=config/edge-definitions.xml + +ERROR_TABLE=errors +ANALYTIC_MTX=analytic_metrics +LOADER_MTX=loader_metrics +INGEST_MTX=ingest_metrics +BULK_INGEST_METRIC_THRESHOLD=1500000 +LIVE_INGEST_METRIC_THRESHOLD=1500000 + +KEYSTORE=/data/certs/keystore.p12 +KEYSTORE_TYPE=PKCS12 +KEYSTORE_PASSWORD=changeme +TRUSTSTORE=/data/certs/truststore.jks + +FLAG_METRICS_DIR=/srv/data/datawave/flagMetrics +TRUSTSTORE_PASSWORD=changeme +TRUSTSTORE_TYPE=JKS + +cluster.name=WAREHOUSE +accumulo.instance.name=dev +accumulo.user.name=root +accumulo.user.password=root +cached.results.hdfs.uri=hdfs://hdfs-nn:9000/ +cached.results.export.dir=/CachedResults + +lock.file.dir=/var/run/datawave +JAVA_HOME=/usr/lib/jvm/java/ + +# query properties +server.keystore.password=changeme +mysql.user.password=datawave +jboss.jmx.password=blah +hornetq.cluster.password=blah +hornetq.system.password=blah + +server.truststore.password=changeme + +#Sets up the Atom Service +atom.wildfly.hostname=localhost +atom.wildfly.port.number=8443 +atom.connection.pool.name=WAREHOUSE + +PASSWORD_INGEST_ENV=/opt/datawave-ingest/ingest-passwd.sh + +security.use.testauthservice=false +security.testauthservice.context.entry=classpath*:datawave/security/TestDatawaveUserServiceConfiguration.xml + +security.testauthservice.users= \ +\n + +security.use.remoteauthservice=true +security.remoteuserservice.scheme=https +# The host to connect to (or do a SRV lookup on) for the remote user service +security.remoteuserservice.host=dwv-web-authorization +# The port to connect to (unless a SRV lookup was performed) for the remote user service +security.remoteuserservice.port=8443 + +webapp.transport.guarantee=NONE + +# other properties +rpm.file.owner=datawave +rpm.file.group=datawave +rpm.file.accumulo.owner=accumulo +rpm.file.accumulo.group=accumulo + +# Enable full table scans for the base event query? +#beq.fullTableScanEnabled=true + +event.query.data.decorators= \ + \ +\n \ +\n \ +\n \ +\n \ +\n \ +\n \ +\n \ +\n \ +\n \ +\n \ +\n \ +\n \ +\n \ +\n \ +\n \ +\n \ +\n \ +\n \ +\n \ +\n \ +\n \ +\n + + +lookup.uuid.uuidTypes= \ + \ +\n \ +\n \ +\n \ +\n \ +\n \ +\n \ +\n \ +\n \ +\n \ +\n \ +\n \ +\n \ +\n \ +\n \ +\n \ +\n \ +\n \ +\n \ +\n \ +\n \ +\n \ +\n \ +\n \ +\n \ +\n \ +\n \ +\n \ +\n \ +\n \ +\n \ +\n \ +\n + + + +query.metrics.marking=(PUBLIC) +query.metrics.visibility=PUBLIC + +metrics.warehouse.namenode=localhost +metrics.warehouse.hadoop.path=/usr/local/hadoop +metrics.reporter.class=datawave.metrics.NoOpMetricsReporterFactory + +metadatahelper.default.auths=PUBLIC + +security.npe.ou.entries=EXAMPLE_SERVER_OU1,EXAMPLE_SERVER_OU2 +security.subject.dn.pattern=(?:^|,)\\s*OU\\s*=\\s*My Department\\s*(?:,|$) + +datawave.docs.menu.extras=
  • Accumulo
  • + diff --git a/warehouse/accumulo-extensions/pom.xml b/warehouse/accumulo-extensions/pom.xml index a4492b380c6..e1d89cc4670 100644 --- a/warehouse/accumulo-extensions/pom.xml +++ b/warehouse/accumulo-extensions/pom.xml @@ -4,7 +4,7 @@ gov.nsa.datawave datawave-warehouse-parent - 6.5.0-SNAPSHOT + 7.13.0-SNAPSHOT datawave-accumulo-extensions ${project.artifactId} diff --git a/warehouse/accumulo-extensions/src/main/java/datawave/ingest/table/volumeChoosers/ShardedTableDateBasedTieredVolumeChooser.java b/warehouse/accumulo-extensions/src/main/java/datawave/ingest/table/volumeChoosers/ShardedTableDateBasedTieredVolumeChooser.java index e4e6ad3d27c..1538083213e 100644 --- a/warehouse/accumulo-extensions/src/main/java/datawave/ingest/table/volumeChoosers/ShardedTableDateBasedTieredVolumeChooser.java +++ b/warehouse/accumulo-extensions/src/main/java/datawave/ingest/table/volumeChoosers/ShardedTableDateBasedTieredVolumeChooser.java @@ -52,6 +52,7 @@ public class ShardedTableDateBasedTieredVolumeChooser extends RandomVolumeChoose private static final DateTimeFormatter FORMATTER = DateTimeFormatter.ofPattern(DATE_PATTERN); private static Pattern SHARD_PATTERN = Pattern.compile("\\d{8}_\\d+"); + private static Pattern SHARD_PATTERN_NO_SUFFIX = Pattern.compile("\\d{8}"); @Override public String choose(VolumeChooserEnvironment env, Set options) { @@ -75,7 +76,7 @@ public String choose(VolumeChooserEnvironment env, Set options) { } else { String endRowString = endRow.toString(); - if (SHARD_PATTERN.matcher(endRowString).matches()) { + if (SHARD_PATTERN.matcher(endRowString).matches() || SHARD_PATTERN_NO_SUFFIX.matcher(endRowString).matches()) { String date = endRowString.substring(0, 8); LocalDate rowDate = LocalDate.parse(date, FORMATTER); LocalDate today = LocalDate.now(); diff --git a/warehouse/accumulo-extensions/src/test/java/datawave/ingest/table/volumeChoosers/ShardedTableDateBasedTieredVolumeChooserTest.java b/warehouse/accumulo-extensions/src/test/java/datawave/ingest/table/volumeChoosers/ShardedTableDateBasedTieredVolumeChooserTest.java index 230b29895ba..1e6a58735f8 100644 --- a/warehouse/accumulo-extensions/src/test/java/datawave/ingest/table/volumeChoosers/ShardedTableDateBasedTieredVolumeChooserTest.java +++ b/warehouse/accumulo-extensions/src/test/java/datawave/ingest/table/volumeChoosers/ShardedTableDateBasedTieredVolumeChooserTest.java @@ -75,6 +75,24 @@ public void testAllValidInputOldData() { assertTrue(oldVolumes.contains(choice)); } + @Test + public void testAllValidInputNoSuffix() { + + String newVolumes = "newData1,newData2,newData3"; + String oldVolumes = "oldData1,oldData2"; + long daysBack = 125L; + + Map tiers = new HashMap<>(); + tiers.put(0L, newVolumes); + tiers.put(daysBack, oldVolumes); + + String shardId = "20000202"; + setupMock(tiers, shardId); + ShardedTableDateBasedTieredVolumeChooser chooser = new ShardedTableDateBasedTieredVolumeChooser(); + String choice = chooser.choose(env, options); + assertTrue(oldVolumes.contains(choice)); + } + @Test public void testAllValidInputNewData() { diff --git a/warehouse/age-off-utils/pom.xml b/warehouse/age-off-utils/pom.xml new file mode 100644 index 00000000000..38a8617dd33 --- /dev/null +++ b/warehouse/age-off-utils/pom.xml @@ -0,0 +1,70 @@ + + + 4.0.0 + + gov.nsa.datawave + datawave-warehouse-parent + 7.13.0-SNAPSHOT + + datawave-age-off-utils + ${project.artifactId} + + + + com.google.guava + guava + compile + + + + gov.nsa.datawave + datawave-age-off + ${project.version} + compile + + + xml-apis + xml-apis + + + + + org.apache.accumulo + accumulo-core + compile + + + + org.slf4j + slf4j-api + compile + + + + xerces + xercesImpl + ${version.xerces} + compile + + + + xml-apis + xml-apis + 1.4.01 + compile + + + + gov.nsa.datawave + datawave-age-off + ${project.version} + tests + test + + + junit + junit + test + + + diff --git a/warehouse/age-off-utils/src/main/java/datawave/age/off/util/AgeOffCsvColumnInformation.java b/warehouse/age-off-utils/src/main/java/datawave/age/off/util/AgeOffCsvColumnInformation.java new file mode 100644 index 00000000000..407d6baaf4e --- /dev/null +++ b/warehouse/age-off-utils/src/main/java/datawave/age/off/util/AgeOffCsvColumnInformation.java @@ -0,0 +1,46 @@ +package datawave.age.off.util; + +import java.text.MessageFormat; +import java.util.Arrays; + +public class AgeOffCsvColumnInformation { + + int patternColumnNumber = -1; + int durationColumnNumber = -1; + int labelColumnNumber = -1; + int overrideColumnNumber = -1; + + // required + private static final String PATTERN_COLUMN_HEADER = "pattern"; + // required + private static final String DURATION_COLUMN_HEADER = "duration"; + // optional + private static final String LABEL_COLUMN_NUMBER = "label"; + // optional - conditionally override duration + private static final String DURATION_OVERRIDE_COLUMN_HEADER = "override"; + + public void parseHeader(String[] headerTokens) { + int columnNumber = 0; + for (String headerToken : headerTokens) { + switch (headerToken.trim().toLowerCase()) { + case DURATION_COLUMN_HEADER: + this.durationColumnNumber = columnNumber; + break; + case LABEL_COLUMN_NUMBER: + this.labelColumnNumber = columnNumber; + break; + case PATTERN_COLUMN_HEADER: + this.patternColumnNumber = columnNumber; + break; + case DURATION_OVERRIDE_COLUMN_HEADER: + this.overrideColumnNumber = columnNumber; + break; + } + columnNumber++; + } + if (this.durationColumnNumber == -1 || this.patternColumnNumber == -1) { + throw new IllegalStateException(MessageFormat.format("Unable to find {0} or {1} in {2}", DURATION_COLUMN_HEADER, PATTERN_COLUMN_HEADER, + Arrays.toString(headerTokens))); + } + } +} diff --git a/warehouse/age-off-utils/src/main/java/datawave/age/off/util/AgeOffCsvToMatchPatternFormatter.java b/warehouse/age-off-utils/src/main/java/datawave/age/off/util/AgeOffCsvToMatchPatternFormatter.java new file mode 100644 index 00000000000..bdaa4794dbd --- /dev/null +++ b/warehouse/age-off-utils/src/main/java/datawave/age/off/util/AgeOffCsvToMatchPatternFormatter.java @@ -0,0 +1,204 @@ +package datawave.age.off.util; + +import java.io.IOException; +import java.io.Writer; +import java.util.Arrays; + +import org.slf4j.Logger; +import org.slf4j.LoggerFactory; + +import com.google.common.annotations.VisibleForTesting; + +/** + * Reformats csv input into an age off match pattern. Expects a header to appear as the first line that's not a comment or whitespace-only. See + * ConfigurableAgeOffFilter. + */ +public class AgeOffCsvToMatchPatternFormatter { + private static final Logger log = LoggerFactory.getLogger(AgeOffCsvToMatchPatternFormatter.class); + + private static final String COMMA = ","; + private static final char COLON = ':'; + private static final char EQUALS = '='; + private static final char NEW_LINE = '\n'; + private static final char SPACE = ' '; + private final AgeOffCsvToMatchPatternFormatterConfiguration configuration; + private AgeOffCsvColumnInformation columnInformation; + + public AgeOffCsvToMatchPatternFormatter(AgeOffCsvToMatchPatternFormatterConfiguration configuration) { + this.configuration = configuration; + } + + /** + * Reformats each input line and outputs to writer + * + * @param writer + * output writer + * @throws IOException + * i/o exception with writer + */ + @VisibleForTesting + void write(Writer writer) throws IOException { + while (configuration.getInputIterator().hasNext()) { + String inputLine = configuration.getInputIterator().next(); + reformat(writer, inputLine); + } + } + + private void reformat(Writer writer, String inputLine) throws IOException { + String trimmedLine = inputLine.trim(); + + if (isWhitespaceOnly(trimmedLine)) { + writer.write(inputLine + "\n"); + } else if (isComment(trimmedLine)) { + writer.write(createComment(trimmedLine)); + } else { + // Use -1 to prevent chopping of empty tokens + String[] tokens = inputLine.split(COMMA, -1); + + if (columnInformation == null) { + log.debug("Attempting to parse header: {}", inputLine); + initializeHeader(tokens); + } else { + writer.write(reformatLine(tokens)); + } + } + } + + private boolean isWhitespaceOnly(String trimmedLine) { + return trimmedLine.equals(""); + } + + private void initializeHeader(String[] tokens) { + columnInformation = new AgeOffCsvColumnInformation(); + columnInformation.parseHeader(tokens); + } + + private boolean isComment(String trimmedLine) { + return trimmedLine.startsWith("#"); + } + + private String createComment(String trimmedLine) { + return "\n"; + } + + private String reformatLine(String[] tokens) { + StringBuilder sb = new StringBuilder(); + + appendLabel(tokens, sb); + + appendLiteral(tokens, sb); + + appendEquivalenceSymbol(sb); + + appendValue(tokens, sb); + + sb.append(NEW_LINE); + + return sb.toString(); + } + + private void appendValue(String[] tokens, StringBuilder sb) { + String value = ""; + + // use override value if it exists for this line (it might be empty) + if (configuration.useOverrides()) { + if (tokens.length <= columnInformation.overrideColumnNumber) { + log.error("Unable to process override {}", Arrays.toString(tokens)); + throw new IllegalStateException("Unable to process override from " + Arrays.toString(tokens)); + } + value = tokens[columnInformation.overrideColumnNumber].trim(); + } + + // if overrides are disabled or override was missing + if (value.length() == 0) { + if (tokens.length <= columnInformation.durationColumnNumber) { + log.error("Unable to process duration {}", Arrays.toString(tokens)); + throw new IllegalStateException("Unable to process duration from " + Arrays.toString(tokens)); + } + value = tokens[columnInformation.durationColumnNumber].trim(); + } + + if (value.length() == 0) { + log.error("Unable to find non-empty override or duration {}", Arrays.toString(tokens)); + throw new IllegalStateException("Unable to find non-empty override or duration from tokens: " + Arrays.toString(tokens)); + } + sb.append(attemptValueMapping(value)); + } + + private String attemptValueMapping(String originalValue) { + if (null == configuration.getValueMapping()) { + return originalValue; + } + + String replacementValue = configuration.getValueMapping().get(originalValue); + if (null == replacementValue) { + return originalValue; + } + return replacementValue; + } + + private void appendLabel(String[] tokens, StringBuilder sb) { + if (configuration.shouldDisableLabel()) { + return; + } + + if (tokens.length <= columnInformation.labelColumnNumber) { + log.error("Unable to process label {}", Arrays.toString(tokens)); + throw new IllegalStateException("Unable to process label from " + Arrays.toString(tokens)); + } + + String label = ""; + + if (null != configuration.getStaticLabel()) { + label = configuration.getStaticLabel(); + } else if (columnInformation.labelColumnNumber != -1) { + label = tokens[columnInformation.labelColumnNumber].trim(); + } + + if (label.length() == 0) { + log.error("Unable to apply non-empty label {}", Arrays.toString(tokens)); + throw new IllegalStateException("Unable to apply non-empty label from " + Arrays.toString(tokens)); + } + sb.append(label).append(SPACE); + } + + private void appendLiteral(String[] tokens, StringBuilder sb) { + if (tokens.length <= columnInformation.patternColumnNumber) { + log.error("Unable to process literal {}", Arrays.toString(tokens)); + throw new IllegalStateException("Not enough tokens"); + } + + if (configuration.shouldQuoteLiteral()) { + sb.append(configuration.getQuoteCharacter()); + } + + String literal = tokens[columnInformation.patternColumnNumber].trim(); + if (literal.length() == 0) { + log.error("Unable to find non-empty literal {}", Arrays.toString(tokens)); + throw new IllegalStateException("Unable to find non-empty literal from tokens: " + Arrays.toString(tokens)); + } + + if (configuration.shouldUpperCaseLiterals()) { + literal = literal.toUpperCase(); + } else if (configuration.shouldLowerCaseLiterals()) { + literal = literal.toLowerCase(); + } + sb.append(literal); + + if (configuration.shouldQuoteLiteral()) { + sb.append(configuration.getQuoteCharacter()); + } + } + + private void appendEquivalenceSymbol(StringBuilder sb) { + if (configuration.shouldPadEquivalence()) { + sb.append(SPACE); + } + + sb.append(configuration.useColons() ? COLON : EQUALS); + + if (configuration.shouldPadEquivalence()) { + sb.append(SPACE); + } + } +} diff --git a/warehouse/age-off-utils/src/main/java/datawave/age/off/util/AgeOffCsvToMatchPatternFormatterConfiguration.java b/warehouse/age-off-utils/src/main/java/datawave/age/off/util/AgeOffCsvToMatchPatternFormatterConfiguration.java new file mode 100644 index 00000000000..e0e8c47f770 --- /dev/null +++ b/warehouse/age-off-utils/src/main/java/datawave/age/off/util/AgeOffCsvToMatchPatternFormatterConfiguration.java @@ -0,0 +1,124 @@ +package datawave.age.off.util; + +import java.util.Iterator; +import java.util.Map; + +public class AgeOffCsvToMatchPatternFormatterConfiguration { + private String staticLabel; + private char quoteCharacter; + private boolean shouldQuoteLiteral; + private boolean useColons; // when false, use equals sign + private boolean shouldPadEquivalence; // when false, no spaces around equals sign + private boolean shouldLowerCaseLiterals; + private boolean shouldUpperCaseLiterals; + private Map valueMapping; + private boolean useOverrides; + private boolean disableLabel; + private Iterator input; + + private AgeOffCsvToMatchPatternFormatterConfiguration() {} + + public String getStaticLabel() { + return staticLabel; + } + + public char getQuoteCharacter() { + return quoteCharacter; + } + + public boolean shouldQuoteLiteral() { + return shouldQuoteLiteral; + } + + public boolean useColons() { + return useColons; + } + + public boolean useOverrides() { + return useOverrides; + } + + public boolean shouldPadEquivalence() { + return shouldPadEquivalence; + } + + public boolean shouldLowerCaseLiterals() { + return shouldLowerCaseLiterals; + } + + public boolean shouldUpperCaseLiterals() { + return shouldUpperCaseLiterals; + } + + public Map getValueMapping() { + return valueMapping; + } + + public boolean shouldDisableLabel() { + return disableLabel; + } + + public Iterator getInputIterator() { + return input; + } + + public static class Builder { + + final AgeOffCsvToMatchPatternFormatterConfiguration result = new AgeOffCsvToMatchPatternFormatterConfiguration(); + + public Builder useStaticLabel(String label) { + this.result.staticLabel = label; + return this; + } + + public Builder quoteLiterals(char quoteCharacter) { + this.result.shouldQuoteLiteral = true; + this.result.quoteCharacter = quoteCharacter; + return this; + } + + public Builder useColonForEquivalence() { + this.result.useColons = true; + return this; + } + + public Builder padEquivalencesWithSpace() { + this.result.shouldPadEquivalence = true; + return this; + } + + public Builder toUpperCaseLiterals() { + this.result.shouldUpperCaseLiterals = true; + return this; + } + + public Builder toLowerCaseLiterals() { + this.result.shouldLowerCaseLiterals = true; + return this; + } + + public Builder useAgeOffMapping(Map valueMapping) { + this.result.valueMapping = valueMapping; + return this; + } + + public AgeOffCsvToMatchPatternFormatterConfiguration build() { + return result; + } + + public Builder useOverrides() { + this.result.useOverrides = true; + return this; + } + + public Builder disableLabel() { + this.result.disableLabel = true; + return this; + } + + public Builder setInput(Iterator input) { + this.result.input = input; + return this; + } + } +} diff --git a/warehouse/age-off-utils/src/main/java/datawave/age/off/util/AgeOffFileConfiguration.java b/warehouse/age-off-utils/src/main/java/datawave/age/off/util/AgeOffFileConfiguration.java new file mode 100644 index 00000000000..10dabd2eeab --- /dev/null +++ b/warehouse/age-off-utils/src/main/java/datawave/age/off/util/AgeOffFileConfiguration.java @@ -0,0 +1,51 @@ +package datawave.age.off.util; + +import java.util.ArrayList; +import java.util.List; + +public class AgeOffFileConfiguration { + private String parentFileName; + private final List ruleConfigurations = new ArrayList<>(); + private String indentation; + + private AgeOffFileConfiguration() {} + + public String getParentFileName() { + return parentFileName; + } + + public String getIndentation() { + return indentation; + } + + public List getRuleConfigurations() { + return ruleConfigurations; + } + + public static class Builder { + private final AgeOffFileConfiguration fileConfiguration = new AgeOffFileConfiguration(); + private final List ruleBuilders = new ArrayList<>(); + + public Builder withParentFile(String parentFileName) { + fileConfiguration.parentFileName = parentFileName; + return this; + } + + public Builder addNextRule(AgeOffRuleConfiguration.Builder ruleConfigurationBuilder) { + ruleBuilders.add(ruleConfigurationBuilder); + return this; + } + + public Builder withIndentation(String indentation) { + fileConfiguration.indentation = indentation; + return this; + } + + public AgeOffFileConfiguration build() { + for (AgeOffRuleConfiguration.Builder ruleConfigurationBuilder : ruleBuilders) { + fileConfiguration.ruleConfigurations.add(ruleConfigurationBuilder.build()); + } + return fileConfiguration; + } + } +} diff --git a/warehouse/age-off-utils/src/main/java/datawave/age/off/util/AgeOffFileGenerator.java b/warehouse/age-off-utils/src/main/java/datawave/age/off/util/AgeOffFileGenerator.java new file mode 100644 index 00000000000..b10723f6a5e --- /dev/null +++ b/warehouse/age-off-utils/src/main/java/datawave/age/off/util/AgeOffFileGenerator.java @@ -0,0 +1,76 @@ +package datawave.age.off.util; + +import java.io.IOException; +import java.io.Writer; + +import org.slf4j.Logger; +import org.slf4j.LoggerFactory; + +/** + * Using an AgeOffFileConfiguration, writes an xml age off file containing an optional parent reference and a set of ordered rules. + */ +public class AgeOffFileGenerator { + private static final Logger log = LoggerFactory.getLogger(AgeOffFileGenerator.class); + + private final AgeOffFileConfiguration configuration; + private final String indent; + private Writer writer; + + public AgeOffFileGenerator(AgeOffFileConfiguration configuration) { + this.configuration = configuration; + this.indent = this.configuration.getIndentation(); + } + + /** + * Writes the file to the writer. + * + * @throws IOException + * i/o exception with writer + */ + public void format(Writer writer) throws IOException { + this.writer = writer; + openConfigurationElement(); + writeParentElement(); + writeRules(); + closeConfiguration(); + } + + private void closeConfiguration() throws IOException { + this.writer.write("\n"); + } + + private void writeParentElement() throws IOException { + String parentFileName = this.configuration.getParentFileName(); + + if (null != parentFileName) { + log.debug("Writing parent file name: {}", parentFileName); + this.writer.write(this.indent + "" + parentFileName + "\n"); + } + } + + private void writeRules() throws IOException { + this.writer.write(this.indent + "\n"); + + for (AgeOffRuleConfiguration ruleConfiguration : this.configuration.getRuleConfigurations()) { + writeRule(ruleConfiguration); + } + + this.writer.write(this.indent + "\n"); + } + + private void writeRule(AgeOffRuleConfiguration ruleConfiguration) throws IOException { + log.debug("formatting ruleConfiguration {}", ruleConfiguration.getRuleLabel()); + + AgeOffRuleFormatter ruleFormatter = new AgeOffRuleFormatter(ruleConfiguration); + + // add two indentations: one for under the ageOffConfiguration element and another to go under the rules element + String ruleIndentation = this.configuration.getIndentation() + this.configuration.getIndentation(); + ruleFormatter.format(new IndentingDelegatingWriter(ruleIndentation, this.writer)); + + } + + private void openConfigurationElement() throws IOException { + this.writer.write("\n"); + this.writer.write("\n"); + } +} diff --git a/warehouse/age-off-utils/src/main/java/datawave/age/off/util/AgeOffRuleConfiguration.java b/warehouse/age-off-utils/src/main/java/datawave/age/off/util/AgeOffRuleConfiguration.java new file mode 100644 index 00000000000..6791a6b2b7d --- /dev/null +++ b/warehouse/age-off-utils/src/main/java/datawave/age/off/util/AgeOffRuleConfiguration.java @@ -0,0 +1,111 @@ +package datawave.age.off.util; + +import java.util.ArrayList; + +import org.apache.xerces.dom.DocumentImpl; +import org.w3c.dom.Element; + +import datawave.iterators.filter.ageoff.FilterRule; + +public class AgeOffRuleConfiguration { + private static final String DEFAULT_INDENTATION = " "; + private AgeOffCsvToMatchPatternFormatterConfiguration patternConfiguration; + private String ruleLabel; + private Class filterClass; + private boolean shouldMerge; + private String indentation = DEFAULT_INDENTATION; + private String ttlDuration; + private String ttlUnits; + private ArrayList customElements; + + private AgeOffRuleConfiguration() {} + + public String getIndentation() { + return indentation; + } + + public ArrayList getCustomElements() { + return customElements; + } + + public boolean shouldMerge() { + return shouldMerge; + } + + public Class getFilterClass() { + return filterClass; + } + + public String getRuleLabel() { + return ruleLabel; + } + + public AgeOffCsvToMatchPatternFormatterConfiguration getPatternConfiguration() { + return patternConfiguration; + } + + public String getTtlUnits() { + return ttlUnits; + } + + public String getTtlDuration() { + return ttlDuration; + } + + public static class Builder { + private final AgeOffRuleConfiguration result = new AgeOffRuleConfiguration(); + private AgeOffCsvToMatchPatternFormatterConfiguration.Builder patternConfigurationBuilder; + + public Builder withPatternConfigurationBuilder(AgeOffCsvToMatchPatternFormatterConfiguration.Builder patternConfigurationBuilder) { + this.patternConfigurationBuilder = patternConfigurationBuilder; + return this; + } + + public Builder withRuleLabel(String ruleLabel) { + result.ruleLabel = ruleLabel; + return this; + } + + public Builder withFilterClass(Class filterClass) { + result.filterClass = filterClass; + return this; + } + + public Builder useMerge() { + result.shouldMerge = true; + return this; + } + + public Builder withIndentation(String indentation) { + result.indentation = indentation; + return this; + } + + public Builder withTtl(String duration, String units) { + result.ttlDuration = duration; + result.ttlUnits = units; + return this; + } + + public AgeOffRuleConfiguration build() { + if (this.patternConfigurationBuilder != null) { + result.patternConfiguration = this.patternConfigurationBuilder.build(); + } + return result; + } + + public Builder addSimpleElement(String elementName, String textContent) { + Element element = new DocumentImpl().createElement(elementName); + element.setTextContent(textContent); + return addCustomElement(element); + } + + public Builder addCustomElement(Element customElement) { + if (result.customElements == null) { + result.customElements = new ArrayList<>(); + } + result.customElements.add(customElement); + return this; + } + } +} diff --git a/warehouse/age-off-utils/src/main/java/datawave/age/off/util/AgeOffRuleFormatter.java b/warehouse/age-off-utils/src/main/java/datawave/age/off/util/AgeOffRuleFormatter.java new file mode 100644 index 00000000000..152f23e7bdc --- /dev/null +++ b/warehouse/age-off-utils/src/main/java/datawave/age/off/util/AgeOffRuleFormatter.java @@ -0,0 +1,118 @@ +package datawave.age.off.util; + +import java.io.IOException; +import java.io.StringWriter; +import java.io.Writer; + +import javax.xml.transform.OutputKeys; +import javax.xml.transform.Transformer; +import javax.xml.transform.TransformerConfigurationException; +import javax.xml.transform.TransformerException; +import javax.xml.transform.TransformerFactory; +import javax.xml.transform.dom.DOMSource; +import javax.xml.transform.stream.StreamResult; + +import org.slf4j.Logger; +import org.slf4j.LoggerFactory; + +import com.google.common.annotations.VisibleForTesting; + +import datawave.ingest.util.cache.watch.AgeOffRuleLoader; + +/** + * Formats a rule + */ +public class AgeOffRuleFormatter { + private static final Logger log = LoggerFactory.getLogger(AgeOffRuleFormatter.class); + + private final AgeOffRuleConfiguration configuration; + private final String indent; + private static int index = 0; + + public AgeOffRuleFormatter(AgeOffRuleConfiguration configuration) { + this.configuration = configuration; + this.indent = this.configuration.getIndentation(); + } + + /** + * Outputs the configured rule to the writer. Will not close the writer. + * + * @param writer + * output writer + * @throws IOException + * i/o exception with writer + */ + @VisibleForTesting + void format(Writer writer) throws IOException { + + AgeOffRuleLoader.RuleConfig ruleConfig = createRuleConfig(this.configuration); + + writer.write(transformToXmlString(ruleConfig)); + } + + private AgeOffRuleLoader.RuleConfig createRuleConfig(AgeOffRuleConfiguration configuration) throws IOException { + AgeOffRuleLoader.RuleConfig ruleConfig = new AgeOffRuleLoader.RuleConfig(this.configuration.getFilterClass().getName(), index++); + ruleConfig.label(configuration.getRuleLabel()); + ruleConfig.setIsMerge(this.configuration.shouldMerge()); + ruleConfig.ttlValue(this.configuration.getTtlDuration()); + ruleConfig.ttlUnits(this.configuration.getTtlUnits()); + ruleConfig.matchPattern(buildMatchPattern()); + ruleConfig.customElements(this.configuration.getCustomElements()); + return ruleConfig; + } + + private String transformToXmlString(AgeOffRuleLoader.RuleConfig ruleConfig) throws IOException { + try { + Transformer trans = initializeXmlTransformer(); + + Writer writer = new StringWriter(); + + StreamResult result = new StreamResult(writer); + DOMSource source = new DOMSource(new RuleConfigDocument(ruleConfig)); + trans.transform(source, result); + + return writer.toString(); + } catch (TransformerException e) { + throw new IOException("Failed to transform to XML", e); + } + } + + private Transformer initializeXmlTransformer() throws TransformerConfigurationException { + Transformer trans = TransformerFactory.newInstance().newTransformer(); + trans.setOutputProperty(OutputKeys.OMIT_XML_DECLARATION, "yes"); + trans.setOutputProperty(OutputKeys.METHOD, "xml"); + trans.setOutputProperty(OutputKeys.INDENT, "yes"); + trans.setOutputProperty("{http://xml.apache.org/xslt}indent-amount", calculateIndentAmount()); + return trans; + } + + private String calculateIndentAmount() { + int length = configuration.getIndentation().length(); + // add another four for every tab + length += (int) (4 * configuration.getIndentation().chars().filter(character -> character == '\t').count()); + return Integer.toString(length); + } + + private String buildMatchPattern() throws IOException { + if (configuration.getPatternConfiguration() == null) { + return ""; + } + + log.debug("Writing match pattern"); + + StringWriter writer = new StringWriter(); + AgeOffCsvToMatchPatternFormatter patternFormatter = new AgeOffCsvToMatchPatternFormatter(configuration.getPatternConfiguration()); + + // add two indentations: one for items under the rule element and another for items under the matchPattern element + String extraIndentation = this.indent + this.indent; + patternFormatter.write(new IndentingDelegatingWriter(extraIndentation, writer)); + + String result = writer.toString(); + + // final indentation to precede the closing of matchPattern + if (result.endsWith("\n")) { + return result + this.indent; + } + return result; + } +} diff --git a/warehouse/age-off-utils/src/main/java/datawave/age/off/util/IndentingDelegatingWriter.java b/warehouse/age-off-utils/src/main/java/datawave/age/off/util/IndentingDelegatingWriter.java new file mode 100644 index 00000000000..9af89959e21 --- /dev/null +++ b/warehouse/age-off-utils/src/main/java/datawave/age/off/util/IndentingDelegatingWriter.java @@ -0,0 +1,78 @@ +package datawave.age.off.util; + +import java.io.IOException; +import java.io.Writer; + +public class IndentingDelegatingWriter extends Writer { + private final Writer writer; + private final String indentation; + private boolean shouldIndentNextWrite; + + public IndentingDelegatingWriter(String indentation, Writer writer) { + this.indentation = indentation; + this.writer = writer; + this.shouldIndentNextWrite = true; + } + + @Override + public void write(String line) throws IOException { + if (this.shouldIndentNextWrite) { + this.writer.write(indentation); + this.shouldIndentNextWrite = false; + } + + String indentedLine = line.replaceAll("\n", "\n" + indentation); + + // withhold indentation until later + if (indentedLine.endsWith("\n" + indentation)) { + indentedLine = indentedLine.substring(0, indentedLine.length() - indentation.length()); + shouldIndentNextWrite = true; + } + this.writer.write(indentedLine); + } + + @Override + public void flush() throws IOException { + this.writer.flush(); + } + + @Override + public void close() throws IOException { + this.writer.close(); + } + + @Override + public void write(char[] cbuf, int off, int len) throws IOException { + throw new UnsupportedOperationException(); + } + + @Override + public void write(int c) throws IOException { + throw new UnsupportedOperationException(); + } + + @Override + public void write(char[] cbuf) throws IOException { + throw new UnsupportedOperationException(); + } + + @Override + public void write(String str, int off, int len) throws IOException { + throw new UnsupportedOperationException(); + } + + @Override + public Writer append(CharSequence csq) throws IOException { + throw new UnsupportedOperationException(); + } + + @Override + public Writer append(CharSequence csq, int start, int end) throws IOException { + throw new UnsupportedOperationException(); + } + + @Override + public Writer append(char c) throws IOException { + throw new UnsupportedOperationException(); + } +} diff --git a/warehouse/age-off-utils/src/main/java/datawave/age/off/util/RuleConfigDocument.java b/warehouse/age-off-utils/src/main/java/datawave/age/off/util/RuleConfigDocument.java new file mode 100644 index 00000000000..ad4eef2600c --- /dev/null +++ b/warehouse/age-off-utils/src/main/java/datawave/age/off/util/RuleConfigDocument.java @@ -0,0 +1,110 @@ +package datawave.age.off.util; + +import javax.xml.transform.stream.StreamResult; + +import org.apache.xerces.dom.DocumentImpl; +import org.w3c.dom.Element; +import org.w3c.dom.Node; +import org.w3c.dom.ProcessingInstruction; + +import datawave.ingest.util.cache.watch.AgeOffRuleLoader; + +/** + * Creats a transformable node from an AgeOffRuleLoader.RuleConfig + */ +public class RuleConfigDocument extends DocumentImpl { + private static final String FILTER_CLASS_ELEMENT_NAME = "filterClass"; + private static final String MATCH_PATTERN_ELEMENT_NAME = "matchPattern"; + private static final String TTL_ELEMENT_NAME = "ttl"; + private static final String TTL_UNITS_ATTRIBUTE_NAME = "units"; + private static final String RULE_ELEMENT_NAME = "rule"; + private static final String LABEL_ATTRIBUTE_NAME = "label"; + private static final String MODE_ATTRIBUTE_NAME = "mode"; + private static final String MERGE_ATTRIBUTE_VALUE = "merge"; + private static final char[] COMMENT_ESCAPE_CHARACTERS = new char[] {'<', '>'}; + + private final Element rule; + private final AgeOffRuleLoader.RuleConfig ruleConfig; + + public RuleConfigDocument(AgeOffRuleLoader.RuleConfig ruleConfig) { + super(); + + this.ruleConfig = ruleConfig; + + this.rule = createRuleElement(); + super.appendChild(this.rule); + + appendElementsToRule(); + } + + private Element createRuleElement() { + Element rule = this.createElement(RULE_ELEMENT_NAME); + + if (null != this.ruleConfig.label) { + rule.setAttribute(LABEL_ATTRIBUTE_NAME, this.ruleConfig.label); + } + + if (this.ruleConfig.isMerge) { + rule.setAttribute(MODE_ATTRIBUTE_NAME, MERGE_ATTRIBUTE_VALUE); + } + return rule; + } + + private void appendElementsToRule() { + appendFilterClassElement(); + appendTtlElement(); + appendMatchPatternElement(); + appendCustomElements(); + } + + private void appendFilterClassElement() { + Element filterClassElement = super.createElement(FILTER_CLASS_ELEMENT_NAME); + filterClassElement.setTextContent(this.ruleConfig.filterClassName); + rule.appendChild(filterClassElement); + } + + private void appendCustomElements() { + if (null != this.ruleConfig.customElements) { + for (Element customElement : this.ruleConfig.customElements) { + Node importedNode = super.importNode(customElement, true); + rule.appendChild(importedNode); + } + } + } + + private void appendMatchPatternElement() { + if (null != this.ruleConfig.matchPattern && !this.ruleConfig.matchPattern.isBlank()) { + disableCommentEscaping(rule); + + Element matchPatternElement = super.createElement(MATCH_PATTERN_ELEMENT_NAME); + matchPatternElement.setTextContent("\n" + this.ruleConfig.matchPattern); + rule.appendChild(matchPatternElement); + + enableCommentEscaping(rule); + } + } + + private void appendTtlElement() { + if (null != this.ruleConfig.ttlValue) { + Element ttlElement = super.createElement(TTL_ELEMENT_NAME); + ttlElement.setAttribute(TTL_UNITS_ATTRIBUTE_NAME, this.ruleConfig.ttlUnits); + ttlElement.setTextContent(this.ruleConfig.ttlValue); + rule.appendChild(ttlElement); + } + } + + private void enableCommentEscaping(Element rule) { + adjustEscaping(rule, StreamResult.PI_ENABLE_OUTPUT_ESCAPING); + } + + private void disableCommentEscaping(Element rule) { + adjustEscaping(rule, StreamResult.PI_DISABLE_OUTPUT_ESCAPING); + } + + private void adjustEscaping(Element rule, String piEnableOutputEscaping) { + for (char specialCharacter : COMMENT_ESCAPE_CHARACTERS) { + ProcessingInstruction escapeInstruction = super.createProcessingInstruction(piEnableOutputEscaping, String.valueOf(specialCharacter)); + rule.appendChild(escapeInstruction); + } + } +} diff --git a/warehouse/age-off-utils/src/test/java/datawave/age/off/util/AgeOffCsvToMatchPatternFormatterTest.java b/warehouse/age-off-utils/src/test/java/datawave/age/off/util/AgeOffCsvToMatchPatternFormatterTest.java new file mode 100644 index 00000000000..f3dbcd20c55 --- /dev/null +++ b/warehouse/age-off-utils/src/test/java/datawave/age/off/util/AgeOffCsvToMatchPatternFormatterTest.java @@ -0,0 +1,401 @@ +package datawave.age.off.util; + +import static org.junit.Assert.assertEquals; + +import java.io.File; +import java.io.IOException; +import java.io.StringWriter; +import java.net.URISyntaxException; +import java.nio.file.Files; +import java.util.Arrays; +import java.util.Iterator; +import java.util.List; +import java.util.function.Function; +import java.util.stream.Collectors; + +import org.junit.Test; + +import datawave.age.off.util.AgeOffCsvToMatchPatternFormatterConfiguration.Builder; + +public class AgeOffCsvToMatchPatternFormatterTest { + private static final String SHELF_LIFE_FILE_IN = "/filter/shelf-life.csv"; + private static final String SHELF_LIFE_FRIDGE_FILE_OUT = "/filter/shelf-life.refrigerator.matchPattern"; + private static final String SHELF_LIFE_FILE_STATIC_LABEL = "/filter/shelf-life-static-label.refrigerator.matchPattern"; + private static final String SHELF_LIFE_FREEZER_FILE_OUT = "/filter/shelf-life.freezer.matchPattern"; + + private static final String HEADER_WITH_LABEL = "label,pattern,duration"; + private static final String HEADER_WITHOUT_LABEL = "pattern,duration"; + + // @formatter:off + private static final String INPUT_TEXT = + "bakingPowder, 365d\n" + + "driedBeans,548d\n" + + "bakingSoda,\t720d\n" + + " coffeeGround , 90d\n " + + "coffeeWholeBean ,183d\n" + + " coffeeInstant,730d\n" + + "twinkies," + Integer.MAX_VALUE + "d\n"; + // @formatter:on + + private static final String INPUT_TEXT_WITH_LABEL = HEADER_WITH_LABEL + "\n" + adjustEachLine(INPUT_TEXT, item -> "dryFood, " + item); + public static final String INPUT_TEXT_WITHOUT_LABEL = HEADER_WITHOUT_LABEL + "\n" + INPUT_TEXT; + + @Test + public void reformatsFile() throws IOException, URISyntaxException { + Builder builder = new Builder().useColonForEquivalence().quoteLiterals('"').padEquivalencesWithSpace(); + setShelfLifeInputFile(builder); + String expectedResult = readFileContents(SHELF_LIFE_FRIDGE_FILE_OUT); + assertEquals(expectedResult, reformat(builder)); + } + + @Test + public void appliesFreezerMapping() throws IOException, URISyntaxException { + Builder builder = new Builder().useColonForEquivalence().quoteLiterals('"').padEquivalencesWithSpace(); + builder.useAgeOffMapping(new FridgeToFreezerMapping()); + setShelfLifeInputFile(builder); + String expectedResult = readFileContents(SHELF_LIFE_FREEZER_FILE_OUT); + assertEquals(expectedResult, reformat(builder)); + } + + @Test + public void reformatsFileStaticLabel() throws IOException, URISyntaxException { + Builder builder = new Builder().padEquivalencesWithSpace().useStaticLabel("foodStorage"); + setShelfLifeInputFile(builder); + String expectedResult = readFileContents(SHELF_LIFE_FILE_STATIC_LABEL); + + assertEquals(expectedResult, reformat(builder)); + } + + @Test + public void outputStaticLabelAndEquals() throws IOException { + // @formatter:off + String expectedOutputText = + "dryFood bakingPowder=365d\n" + + "dryFood driedBeans=548d\n"+ + "dryFood bakingSoda=720d\n"+ + "dryFood coffeeGround=90d\n"+ + "dryFood coffeeWholeBean=183d\n"+ + "dryFood coffeeInstant=730d\n" + + "dryFood twinkies=2147483647d\n"; + // @formatter:on + + Builder builder = new Builder(); + builder.useStaticLabel("dryFood"); + assertEquals(expectedOutputText, reformat(builder, INPUT_TEXT_WITHOUT_LABEL)); + } + + @Test + public void outputQuotedLiteralAndColon() throws IOException { + // @formatter:off + String expectedOutputText = + "\"bakingPowder\" : 365d\n" + + "\"driedBeans\" : 548d\n" + + "\"bakingSoda\" : 720d\n" + + "\"coffeeGround\" : 90d\n" + + "\"coffeeWholeBean\" : 183d\n" + + "\"coffeeInstant\" : 730d\n" + + "\"twinkies\" : 2147483647d\n"; + // @formatter:on + + Builder builder = new Builder().quoteLiterals('"').padEquivalencesWithSpace().useColonForEquivalence(); + builder.disableLabel(); + assertEquals(expectedOutputText, reformat(builder, INPUT_TEXT_WITHOUT_LABEL)); + } + + @Test + public void propagatesEmptyLines() throws IOException { + // @formatter:off + String inputText = HEADER_WITH_LABEL + "\n" + + "dryFood,bakingPowder,365d\n" + + "\n" + + "\n" + + "dryFood,driedBeans,548d"; + + String expectedOutputText = + "dryFood bakingPowder=365d\n" + + "\n" + + "\n" + + "dryFood driedBeans=548d\n"; + // @formatter:on + + Builder builder = new Builder(); + assertEquals(expectedOutputText, reformat(builder, inputText)); + } + + @Test + public void propagatesCommentedLines() throws IOException { + // @formatter:off + String inputText = HEADER_WITH_LABEL + "\n" + + "dryFood,bakingPowder,365d\n" + + "\n" + + "# Beans are Legumes\n" + + "dryFood,driedBeans,548d"; + + String expectedOutputText = + "dryFood bakingPowder=365d\n" + + "\n" + + "\n" + + "dryFood driedBeans=548d\n"; + // @formatter:on + + Builder builder = new Builder(); + + assertEquals(expectedOutputText, reformat(builder, inputText)); + } + + @Test + public void toUpperCaseLiterals() throws IOException { + // @formatter:off + String expectedOutputText = + "dryFood BAKINGPOWDER=365d\n" + + "dryFood DRIEDBEANS=548d\n"+ + "dryFood BAKINGSODA=720d\n"+ + "dryFood COFFEEGROUND=90d\n"+ + "dryFood COFFEEWHOLEBEAN=183d\n"+ + "dryFood COFFEEINSTANT=730d\n" + + "dryFood TWINKIES=" + Integer.MAX_VALUE + "d\n"; + // @formatter:on + + Builder builder = new Builder(); + builder.toUpperCaseLiterals(); + assertEquals(expectedOutputText, reformat(builder, INPUT_TEXT_WITH_LABEL)); + } + + @Test + public void toLowerCaseLiterals() throws IOException { + // @formatter:off + String expectedOutputText = + "dryFood bakingpowder=365d\n" + + "dryFood driedbeans=548d\n"+ + "dryFood bakingsoda=720d\n"+ + "dryFood coffeeground=90d\n"+ + "dryFood coffeewholebean=183d\n"+ + "dryFood coffeeinstant=730d\n" + + "dryFood twinkies=2147483647d\n"; + // @formatter:on + + Builder builder = new Builder(); + builder.toLowerCaseLiterals(); + assertEquals(expectedOutputText, reformat(builder, INPUT_TEXT_WITH_LABEL)); + } + + @Test + public void ignoresExtraColumns() throws IOException { + // @formatter:off + String expectedOutputText = + "dryFood bakingPowder=365d\n" + + "dryFood driedBeans=548d\n"+ + "dryFood bakingSoda=720d\n"+ + "dryFood coffeeGround=90d\n"+ + "dryFood coffeeWholeBean=183d\n"+ + "dryFood coffeeInstant=730d\n" + + "dryFood twinkies=2147483647d\n"; + // @formatter:on + + Builder builder = new Builder(); + + String inputWithExtraColumns = adjustEachLine(INPUT_TEXT_WITH_LABEL, item -> item + ",extra,stuff"); + assertEquals(expectedOutputText, reformat(builder, inputWithExtraColumns)); + } + + @Test + public void ignoresLabel() throws IOException { + // @formatter:off + String expectedOutputText = + "bakingPowder=365d\n" + + "driedBeans=548d\n"+ + "bakingSoda=720d\n"+ + "coffeeGround=90d\n"+ + "coffeeWholeBean=183d\n"+ + "coffeeInstant=730d\n" + + "twinkies=2147483647d\n"; + // @formatter:on + + Builder builder = new Builder(); + builder.disableLabel(); + + String inputWithExtraColumns = adjustEachLine(INPUT_TEXT_WITH_LABEL, item -> item + ",extra,stuff"); + assertEquals(expectedOutputText, reformat(builder, inputWithExtraColumns)); + } + + @Test + public void appliesOverrideWhenConfigured() throws IOException { + // @formatter:off + + // add an override for driedBeans from 548d to 365d + String input = prepareInputWithOverride(); + + String expectedOutputText = + "bakingPowder=365d\n" + + "driedBeans=365d\n"+ + "bakingSoda=720d\n"+ + "coffeeGround=90d\n"+ + "coffeeWholeBean=183d\n"+ + "coffeeInstant=730d\n" + + "twinkies=2147483647d\n"; + // @formatter:on + + Builder builder = new Builder(); + builder.useOverrides(); + builder.disableLabel(); + assertEquals(expectedOutputText, reformat(builder, input)); + } + + @Test + public void ignoresOverrideWhenNotConfigured() throws IOException { + // @formatter:off + + // add an override for driedBeans from 548d to 365d + String input = prepareInputWithOverride(); + + String expectedOutputText = + "bakingPowder=365d\n" + + "driedBeans=548d\n"+ + "bakingSoda=720d\n"+ + "coffeeGround=90d\n"+ + "coffeeWholeBean=183d\n"+ + "coffeeInstant=730d\n" + + "twinkies=2147483647d\n"; + // @formatter:on + + // deliberately not building to use overrides + assertEquals(expectedOutputText, reformat(new Builder().disableLabel(), input)); + } + + @Test(expected = IllegalStateException.class) + public void failsWithMissingPatternToken() throws IOException { + // @formatter:off + String input = HEADER_WITHOUT_LABEL + "\n" + + "bakingPowder, 365d\n" + + "driedBeans,548d\n" + + "\t720d\n" + // missing pattern + " coffeeGround , 90d\n " + + "coffeeWholeBean ,183d\n" + + " coffeeInstant,730d\n";; + // @formatter:on + + reformat(new Builder(), input); + } + + @Test(expected = IllegalStateException.class) + public void failsWithEmptyPatternToken() throws IOException { + // @formatter:off + String input = HEADER_WITHOUT_LABEL + "\n" + + "bakingPowder, 365d\n" + + "driedBeans,548d\n" + + ",\t720d\n" + // empty pattern + " coffeeGround , 90d\n " + + "coffeeWholeBean ,183d\n" + + " coffeeInstant,730d\n";; + // @formatter:on + + reformat(new Builder(), input); + } + + @Test(expected = IllegalStateException.class) + public void failsWithMissingLabelToken() throws IOException { + // @formatter:off + String input = HEADER_WITH_LABEL + "\n" + + "lbl,bakingPowder, 365d\n" + + "lbl,driedBeans,548d\n" + + "\t720d\n" + // missing label + " lbl, coffeeGround , 90d\n " + + "lbl,coffeeWholeBean ,183d\n" + + " lbl, coffeeInstant,730d\n";; + // @formatter:on + + reformat(new Builder(), input); + } + + @Test(expected = IllegalStateException.class) + public void failsWithEmptyLabelToken() throws IOException { + // @formatter:off + String input = HEADER_WITH_LABEL + "\n" + + "lbl,bakingPowder, 365d\n" + + "lbl,driedBeans,548d\n" + + " , coffeeGround , 90d\n " + // empty label + "lbl,coffeeWholeBean ,183d\n" + + " lbl, coffeeInstant,730d\n";; + // @formatter:on + + reformat(new Builder(), input); + } + + @Test(expected = IllegalStateException.class) + public void failsWithMissingDurationToken() throws IOException { + // @formatter:off + String input = HEADER_WITHOUT_LABEL + "\n" + + "bakingPowder, 365d\n" + + "driedBeans\n" + // missing duration + " coffeeGround , 90d\n " + + "coffeeWholeBean ,183d\n" + + " coffeeInstant,730d\n";; + // @formatter:on + + reformat(new Builder(), input); + } + + @Test(expected = IllegalStateException.class) + public void failsWithMissingHeaderDurationToken() throws IOException { + // @formatter:off + String input = "bakingPowder, 365d\n"; + // @formatter:on + + reformat(new Builder(), input); + } + + @Test(expected = IllegalStateException.class) + public void failsWithEmptyDurationToken() throws IOException { + // @formatter:off + String input = HEADER_WITHOUT_LABEL + "\n" + + "bakingPowder, 365d\n" + + "driedBeans,\n" + // empty duration + " coffeeGround , 90d\n " + + "coffeeWholeBean ,183d\n" + + " coffeeInstant,730d\n";; + // @formatter:on + + Builder builder = new Builder(); + + reformat(builder, input); + } + + private String prepareInputWithOverride() { + // add an override for driedBeans from 548d to 365d + String inputWithOneOverride = adjustEachLine(INPUT_TEXT, line -> { + if (line.contains("driedBeans")) { + return line + ",365d"; + } + return line + ","; + }); + + return HEADER_WITHOUT_LABEL + ",override" + "\n" + inputWithOneOverride; + } + + private String readFileContents(String shelfLifeFileOut) throws IOException, URISyntaxException { + List expectedFileContents = Files.readAllLines(new File(this.getClass().getResource(shelfLifeFileOut).toURI()).toPath()); + return expectedFileContents.stream().collect(Collectors.joining("\n")); + } + + static Builder setShelfLifeInputFile(Builder builder) throws IOException, URISyntaxException { + File file = new File(AgeOffCsvToMatchPatternFormatterTest.class.getResource(SHELF_LIFE_FILE_IN).toURI()); + Iterator lineIterator = Files.lines(file.toPath()).iterator(); + return builder.setInput(lineIterator); + } + + private String reformat(Builder builder, String inputText) throws IOException { + builder.setInput(inputText.lines().iterator()); + return reformat(builder); + } + + private String reformat(Builder builder) throws IOException { + StringWriter out = new StringWriter(); + AgeOffCsvToMatchPatternFormatter generator = new AgeOffCsvToMatchPatternFormatter(builder.build()); + generator.write(out); + return out.toString(); + } + + private static String adjustEachLine(String input, Function lineFormatter) { + return Arrays.stream(input.split("\\n")).map(lineFormatter).collect(Collectors.joining("\n")) + "\n"; + } +} diff --git a/warehouse/age-off-utils/src/test/java/datawave/age/off/util/AgeOffFileGeneratorTest.java b/warehouse/age-off-utils/src/test/java/datawave/age/off/util/AgeOffFileGeneratorTest.java new file mode 100644 index 00000000000..11a1b38a7e6 --- /dev/null +++ b/warehouse/age-off-utils/src/test/java/datawave/age/off/util/AgeOffFileGeneratorTest.java @@ -0,0 +1,402 @@ +package datawave.age.off.util; + +import static org.junit.Assert.assertEquals; +import static org.junit.Assert.assertFalse; +import static org.junit.Assert.assertTrue; + +import java.io.ByteArrayInputStream; +import java.io.File; +import java.io.IOException; +import java.io.InputStream; +import java.io.StringWriter; +import java.io.Writer; +import java.lang.reflect.InvocationTargetException; +import java.net.URISyntaxException; +import java.nio.charset.StandardCharsets; +import java.nio.file.Files; +import java.util.List; +import java.util.stream.Collectors; + +import org.apache.accumulo.core.data.Key; +import org.apache.accumulo.core.data.Value; +import org.apache.accumulo.core.iterators.IteratorEnvironment; +import org.apache.xerces.dom.DocumentImpl; +import org.junit.Rule; +import org.junit.Test; +import org.junit.rules.TemporaryFolder; +import org.w3c.dom.Element; +import org.w3c.dom.Node; + +import datawave.ingest.util.cache.watch.AgeOffRuleLoader; +import datawave.ingest.util.cache.watch.TestFilter; +import datawave.iterators.filter.ColumnVisibilityLabeledFilter; +import datawave.iterators.filter.ageoff.AppliedRule; +import datawave.iterators.filter.ageoff.ConfigurableIteratorEnvironment; +import datawave.iterators.filter.ageoff.FilterRule; + +public class AgeOffFileGeneratorTest { + private static final String PARENT_FILE_NAME = "alternate-root.xml"; + + // @formatter:off + private static final String EXPECTED_FILE_CONTENTS = + "\n" + + "\n" + + " " + PARENT_FILE_NAME + "\n" + + " \n" + + " \n" + + " datawave.iterators.filter.ColumnVisibilityLabeledFilter\n" + + " \n" + + " dryFood bakingPowder=365d\n" + + " dryFood driedBeans=548d\n" + + " dryFood bakingSoda=720d\n" + + " dryFood coffeeGround=90d\n" + + " dryFood coffeeWholeBean=183d\n" + + " dryFood coffeeInstant=730d\n" + + " dryFood twinkies=" + Integer.MAX_VALUE + "d\n" + + " \n" + + " \n" + + " \n" + + " datawave.iterators.filter.ageoff.DataTypeAgeOffFilter\n" + + " 720\n" + + " foo,bar\n" + + " 44\n" + + " \n" + + " \n" + + " datawave.ingest.util.cache.watch.TestFilter\n" + + " 10\n" + + " 1\n" + + " \n" + + " false\n" + + " \n" + + " \n" + + "\n"; + + private static final String OTHER_EXPECTED_FILE_CONTENTS = + "\n" + + "\n" + + " test-root-field.xml\n" + + " \n" + + " \n" + + " datawave.iterators.filter.ageoff.DataTypeAgeOffFilter\n" + + " true\n" + + " \n" + + " \n" + + " datawave.iterators.filter.ageoff.FieldAgeOffFilter\n" + + " 5\n" + + " true\n" + + " field_y,field_z\n" + + " 1\n" + + " 2\n" + + " \n" + + " \n" + + " datawave.iterators.filter.EdgeColumnQualifierTokenFilter\n" + + " \n" + + " \"egg\" : 10d\n" + + " \"chicken\" : 10d\n" + + " \"ham\" : 10d\n" + + " \"tunaSalad\" : 10d\n" + + " \"macaroni\" : 10d\n" + + " \n" + + " \n" + + " \"hotDogsOpened\" : 30d\n" + + " \"hotDogsUnopened\" : 60d\n" + + " \"luncheonOpened\" : 14d\n" + + " \"luncheonDeliSliced\" : 14d\n" + + " \"luncheonUnopened\" : 60d\n" + + " \"bacon\" : 30d\n" + + " \"rawChickenSausage\" : 7d\n" + + " \"rawTurkeySausage\" : 7d\n" + + " \"rawPorkSausage\" : 7d\n" + + " \"rawBeefSausage\" : 7d\n" + + " \"cookedChickenSausage\" : 30d\n" + + " \"cookedTurkeySausage\" : 30d\n" + + " \"cookedPorkSausage\" : 30d\n" + + " \"cookedBeefSausage\" : 30d\n" + + " \"frozenSausageAfterCooking\" : 10d\n" + + " \"hamburger\" : 7d\n" + + " \"groundBeef\" : 7d\n" + + " \"turkey\" : 7d\n" + + " \"groundChicken\" : 7d\n" + + " \"otherPoultry\" : 7d\n" + + " \"veal\" : 7d\n" + + " \"pork\" : 7d\n" + + " \"lamb\" : 7d\n" + + " \"mixturesOfOtherGroundMeats\" : 7d\n" + + " \"steaks\" : 14d\n" + + " \"chops\" : 14d\n" + + " \"roasts\" : 14d\n" + + " \"freshUncuredUncooked\" : 14d\n" + + " \"freshUncuredCooked\" : 10d\n" + + " \"curedUncooked\" : 30d\n" + + " \"unopenedCookedAndSealedAtPlant\" : 60d\n" + + " \"cookedStoreWrappedWhole\" : 30d\n" + + " \"cookedStoreWrappedCut\" : 14d\n" + + " \"cookedCountryHam\" : 30d\n" + + " \"cannedUnopenedLabeledKeepRefrigerated\" : 0s\n" + + " \"cannedOpenedShelfStable\" : 10d\n" + + " \"unopenedShelfStableCannedAtRoomTemperature\" : 1826d\n" + + " \"prosciutto\" : 0s\n" + + " \"parma\" : 0s\n" + + " \"sarrano\" : 0s\n" + + " \"dryItalian\" : 0s\n" + + " \"spanishType\" : 0s\n" + + " \"cut\" : 0s\n" + + " \"wholeChicken\" : 7d\n" + + " \"wholeTurkey\" : 7d\n" + + " \"chickenPieces\" : 7d\n" + + " \"turkeyPieces\" : 7d\n" + + " \"bluefish\" : 7d\n" + + " \"catfish\" : 7d\n" + + " \"mackerel\" : 7d\n" + + " \"mullet\" : 7d\n" + + " \"salmon\" : 7d\n" + + " \"tuna\" : 7d\n" + + " \"cod\" : 7d\n" + + " \"flounder\" : 7d\n" + + " \"haddock\" : 7d\n" + + " \"halibut\" : 7d\n" + + " \"sole\" : 7d\n" + + " \"pollock\" : 7d\n" + + " \"oceanPerch\" : 7d\n" + + " \"rockfish\" : 7d\n" + + " \"seaTrout\" : 7d\n" + + " \"freshCrabMeat\" : 10d\n" + + " \"freshLobster\" : 10d\n" + + " \"liveCrab\" : 3d\n" + + " \"liveLobster\" : 3d\n" + + " \"scallops\" : 42d\n" + + " \"shrimp\" : 14d\n" + + " \"crayfish\" : 14d\n" + + " \"shuckedClams\" : 42d\n" + + " \"mussels\" : 42d\n" + + " \"oysters\" : 42d\n" + + " \"squid\" : 7d\n" + + " \n" + + " \n" + + " \"rawEggInShell\" : 365d\n" + + " \"rawEggWhitesAndYolks\" : 10d\n" + + " \"eggFrozenInShell\" : 0s\n" + + " \"eggInBrokenShell\" : 0s\n" + + " \"hardCookedEggs\" : 30d\n" + + " \"unopenedEggSubstitutes\" : 30d\n" + + " \"openedEggSubstitutes\" : 7d\n" + + " \"unfrozenUnopenedEggSubstitutes\" : 30d\n" + + " \"unfrozenOpenedEggSubstitutes\" : 10d\n" + + " \"eggCasseroles\" : 10d\n" + + " \"commercialEggNog\" : 14d\n" + + " \"homemadeEggNog\" : 10d\n" + + " \"bakedPumpkinPies\" : 10d\n" + + " \"bakedPecanPies\" : 10d\n" + + " \"bakedCustardPies\" : 10d\n" + + " \"bakedChiffonPies\" : 10d\n" + + " \"quicheWithFilling\" : 14d\n" + + " \"vegetableOrMeatAdded\" : 10d\n" + + " \"cookedMeat\" : 10d\n" + + " \"cookedPoultry\" : 10d\n" + + " \"chickenNuggets\" : 10d\n" + + " \"chickenPatties\" : 10d\n" + + " \"pizza\" : 10d\n" + + " \n" + + " \n" + + " \n" + + "\n"; + // @formatter:on + + @Rule + public TemporaryFolder temporaryFolder = new TemporaryFolder(); + + @Test + public void generateFileContentsWithMultipleRules() throws IOException { + AgeOffFileConfiguration.Builder builder = createBuilderForMultipleRules(); + assertEquals(EXPECTED_FILE_CONTENTS, generateFileContentsInMemory(builder)); + } + + @Test + public void writeMultipleRulesToLocalFile() throws IOException { + File temporaryFile = writeToFile(createBuilderForMultipleRules()); + String actualResult = Files.readAllLines(temporaryFile.toPath()).stream().collect(Collectors.joining("\n")) + "\n"; + assertEquals(EXPECTED_FILE_CONTENTS, actualResult); + } + + private AgeOffFileConfiguration.Builder createBuilderForMultipleRules() { + AgeOffRuleConfiguration.Builder colVisFilterRule = defineColVisFilterRule(); + AgeOffRuleConfiguration.Builder testFilterRule = defineTestFilterRule(); + AgeOffRuleConfiguration.Builder dataTypeRule = defineDataTypeRule(); + + AgeOffFileConfiguration.Builder builder = new AgeOffFileConfiguration.Builder(); + builder.withParentFile(PARENT_FILE_NAME); + builder.addNextRule(colVisFilterRule); + builder.addNextRule(dataTypeRule); + builder.addNextRule(testFilterRule); + builder.withIndentation(" "); + return builder; + } + + @Test + public void createAnotherFileWithMultipleRules() throws IOException, URISyntaxException { + AgeOffRuleConfiguration.Builder dataTypeIndexTableRule = defineDataTypeRuleIndexTable(); + AgeOffRuleConfiguration.Builder fieldRule = defineFieldAgeOffRule(); + + AgeOffFileConfiguration.Builder builder = new AgeOffFileConfiguration.Builder(); + builder.withParentFile("test-root-field.xml"); + builder.addNextRule(dataTypeIndexTableRule); + builder.addNextRule(fieldRule); + builder.addNextRule(defineColQualifierRule()); + builder.withIndentation(" "); + assertEquals(OTHER_EXPECTED_FILE_CONTENTS, generateFileContentsInMemory(builder)); + } + + @Test + public void fileLoadable() throws IOException, InvocationTargetException, NoSuchMethodException { + AgeOffRuleLoader.AgeOffFileLoaderDependencyProvider provider = new TestProvider(); + + List rules = loadAgeOffFilterFile(provider, EXPECTED_FILE_CONTENTS); + assertEquals(3, rules.size()); + rulesRunWithoutErrors(rules); + + List otherRules = loadAgeOffFilterFile(provider, OTHER_EXPECTED_FILE_CONTENTS); + // inherited a rule from parent + assertEquals(4, otherRules.size()); + rulesRunWithoutErrors(rules); + } + + private void rulesRunWithoutErrors(List rules) { + Key key = new Key(); + key.setTimestamp(System.currentTimeMillis() + 1000000L); + for (FilterRule filterRule : rules) { + boolean result = ((AppliedRule) filterRule).accept(key, new Value()); + if (filterRule.getClass() == TestFilter.class) { + assertFalse(filterRule.toString(), result); + } else { + assertTrue(filterRule.toString(), result); + } + } + } + + private List loadAgeOffFilterFile(AgeOffRuleLoader.AgeOffFileLoaderDependencyProvider provider, String fileContents) + throws IOException, InvocationTargetException, NoSuchMethodException { + return new AgeOffRuleLoader(provider).load(toInputStream(fileContents)); + } + + private ByteArrayInputStream toInputStream(String fileContents) { + return new ByteArrayInputStream(fileContents.getBytes(StandardCharsets.UTF_8)); + } + + private AgeOffRuleConfiguration.Builder defineTestFilterRule() { + AgeOffRuleConfiguration.Builder builder = new AgeOffRuleConfiguration.Builder(); + builder.withFilterClass(datawave.ingest.util.cache.watch.TestFilter.class); + + String duration = "10"; + String units = "ms"; + builder.withTtl(duration, units); + + builder.addSimpleElement("matchPattern", "1"); + + Element ttlElement = new DocumentImpl().createElement("myTagName"); + ttlElement.setAttribute("ttl", "1234"); + builder.addCustomElement(ttlElement); + + builder.addSimpleElement("filtersWater", Boolean.FALSE.toString()); + + return builder; + } + + private AgeOffRuleConfiguration.Builder defineColVisFilterRule() { + AgeOffCsvToMatchPatternFormatterConfiguration.Builder patternBuilder = new AgeOffCsvToMatchPatternFormatterConfiguration.Builder(); + patternBuilder.useStaticLabel("dryFood"); + patternBuilder.setInput(AgeOffCsvToMatchPatternFormatterTest.INPUT_TEXT_WITHOUT_LABEL.lines().iterator()); + + AgeOffRuleConfiguration.Builder builder = new AgeOffRuleConfiguration.Builder(); + builder.withPatternConfigurationBuilder(patternBuilder); + builder.withRuleLabel("labeledPatternsFormat"); + builder.withFilterClass(ColumnVisibilityLabeledFilter.class); + builder.useMerge(); + return builder; + } + + private AgeOffRuleConfiguration.Builder defineColQualifierRule() throws IOException, URISyntaxException { + AgeOffCsvToMatchPatternFormatterConfiguration.Builder patternBuilder = new AgeOffCsvToMatchPatternFormatterConfiguration.Builder(); + patternBuilder.padEquivalencesWithSpace(); + patternBuilder.useColonForEquivalence(); + patternBuilder.quoteLiterals('"'); + AgeOffCsvToMatchPatternFormatterTest.setShelfLifeInputFile(patternBuilder); + patternBuilder.useAgeOffMapping(new FridgeToFreezerMapping()); + + AgeOffRuleConfiguration.Builder builder = new AgeOffRuleConfiguration.Builder(); + patternBuilder.disableLabel(); + builder.withPatternConfigurationBuilder(patternBuilder); + builder.withRuleLabel("edge"); + builder.withFilterClass(datawave.iterators.filter.EdgeColumnQualifierTokenFilter.class); + return builder; + } + + private AgeOffRuleConfiguration.Builder defineDataTypeRule() { + AgeOffRuleConfiguration.Builder builder = new AgeOffRuleConfiguration.Builder(); + builder.withIndentation("\t"); + builder.withFilterClass(datawave.iterators.filter.ageoff.DataTypeAgeOffFilter.class); + + String duration = "720"; + String units = "d"; + builder.withTtl(duration, units); + + builder.addSimpleElement("datatypes", "foo,bar"); + builder.addSimpleElement("bar.ttl", Integer.toString(44)); + + return builder; + } + + private AgeOffRuleConfiguration.Builder defineDataTypeRuleIndexTable() { + AgeOffRuleConfiguration.Builder builder = new AgeOffRuleConfiguration.Builder(); + builder.withIndentation(" "); + builder.useMerge(); + builder.withFilterClass(datawave.iterators.filter.ageoff.DataTypeAgeOffFilter.class); + + builder.addSimpleElement("isindextable", Boolean.TRUE.toString()); + return builder; + } + + private AgeOffRuleConfiguration.Builder defineFieldAgeOffRule() { + AgeOffRuleConfiguration.Builder builder = new AgeOffRuleConfiguration.Builder(); + builder.withIndentation(" "); + builder.withFilterClass(datawave.iterators.filter.ageoff.FieldAgeOffFilter.class); + + String duration = "5"; + String units = "s"; + builder.withTtl(duration, units); + + builder.addSimpleElement("isindextable", Boolean.TRUE.toString()); + builder.addSimpleElement("fields", "field_y,field_z"); + builder.addSimpleElement("field_y.ttl", Integer.toString(1)); + builder.addSimpleElement("field_z.ttl", Integer.toString(2)); + + return builder; + } + + private String generateFileContentsInMemory(AgeOffFileConfiguration.Builder builder) throws IOException { + StringWriter writer = new StringWriter(); + AgeOffFileGenerator generator = new AgeOffFileGenerator(builder.build()); + generator.format(writer); + return writer.toString(); + } + + private File writeToFile(AgeOffFileConfiguration.Builder builder) throws IOException { + File temporaryFile = temporaryFolder.newFile(); + Writer writer = Files.newBufferedWriter(temporaryFile.toPath()); + AgeOffFileGenerator generator = new AgeOffFileGenerator(builder.build()); + generator.format(writer); + writer.close(); + return temporaryFile; + } + + private class TestProvider implements AgeOffRuleLoader.AgeOffFileLoaderDependencyProvider { + @Override + public IteratorEnvironment getIterEnv() { + return new ConfigurableIteratorEnvironment(); + } + + @Override + public InputStream getParentStream(Node parent) { + return this.getClass().getResourceAsStream("/filter/" + parent.getTextContent()); + } + } +} diff --git a/warehouse/age-off-utils/src/test/java/datawave/age/off/util/AgeOffRuleFormatterTest.java b/warehouse/age-off-utils/src/test/java/datawave/age/off/util/AgeOffRuleFormatterTest.java new file mode 100644 index 00000000000..fe0fa744e67 --- /dev/null +++ b/warehouse/age-off-utils/src/test/java/datawave/age/off/util/AgeOffRuleFormatterTest.java @@ -0,0 +1,196 @@ +package datawave.age.off.util; + +import static org.junit.Assert.assertEquals; + +import java.io.IOException; +import java.io.StringWriter; + +import org.apache.xerces.dom.DocumentImpl; +import org.junit.Test; +import org.w3c.dom.Element; + +public class AgeOffRuleFormatterTest { + /** + * + * 720 foo,bar 44 + * + * @throws IOException + */ + @Test + public void createRuleFromCsv() throws IOException { + // @formatter:off + String expectedOutputText = + "\n" + + " datawave.ingest.util.cache.watch.TestTrieFilter\n" + + " \n" + + " dryFood bakingPowder=365d\n" + + " dryFood driedBeans=548d\n" + + " dryFood bakingSoda=720d\n" + + " dryFood coffeeGround=90d\n" + + " dryFood coffeeWholeBean=183d\n" + + " dryFood coffeeInstant=730d\n" + + " dryFood twinkies=" + Integer.MAX_VALUE + "d\n" + + " \n" + + "\n"; + // @formatter:on + + AgeOffCsvToMatchPatternFormatterConfiguration.Builder patternBuilder = new AgeOffCsvToMatchPatternFormatterConfiguration.Builder(); + patternBuilder.useStaticLabel("dryFood"); + patternBuilder.setInput(AgeOffCsvToMatchPatternFormatterTest.INPUT_TEXT_WITHOUT_LABEL.lines().iterator()); + + AgeOffRuleConfiguration.Builder builder = new AgeOffRuleConfiguration.Builder(); + builder.withPatternConfigurationBuilder(patternBuilder); + builder.withRuleLabel("test"); + builder.withFilterClass(datawave.ingest.util.cache.watch.TestTrieFilter.class); + builder.useMerge(); + + assertEquals(expectedOutputText, generateRule(builder)); + } + + @Test + public void createRuleWithoutMatchPatternFormatter() throws IOException { + // @formatter:off + String expectedOutputText = + "\n" + + " datawave.ingest.util.cache.watch.TestFilter\n" + + " 10\n" + + " 1\n" + + " \n" + + " false\n" + + "\n"; + // @formatter:on + + AgeOffRuleConfiguration.Builder builder = new AgeOffRuleConfiguration.Builder(); + builder.withFilterClass(datawave.ingest.util.cache.watch.TestFilter.class); + + String duration = "10"; + String units = "ms"; + builder.withTtl(duration, units); + + builder.addSimpleElement("matchPattern", "1"); + + Element element = new DocumentImpl().createElement("myTagName"); + element.setAttribute("ttl", "1234"); + builder.addCustomElement(element); + + builder.addSimpleElement("filtersWater", Boolean.FALSE.toString()); + + assertEquals(expectedOutputText, generateRule(builder)); + } + + @Test + public void createRuleFromDataTypeAgeOffFilterJavaDoc() throws IOException { + // @formatter:off + String expectedOutputText = + "\n" + + " datawave.iterators.filter.ageoff.DataTypeAgeOffFilter\n" + + " 720\n" + + " foo,bar\n" + + " 44\n" + + "\n"; + // @formatter:on + + AgeOffRuleConfiguration.Builder builder = new AgeOffRuleConfiguration.Builder(); + builder.withIndentation("\t"); + builder.withFilterClass(datawave.iterators.filter.ageoff.DataTypeAgeOffFilter.class); + + String duration = "720"; + String units = "d"; + builder.withTtl(duration, units); + + builder.addSimpleElement("datatypes", "foo,bar"); + builder.addSimpleElement("bar.ttl", "44"); + + assertEquals(expectedOutputText, generateRule(builder)); + } + + @Test + public void createRuleForDataTypeIndexTable() throws IOException { + // @formatter:off + String expectedOutputText = + "\n" + + " datawave.iterators.filter.ageoff.DataTypeAgeOffFilter\n" + + " true\n" + + "\n"; + // @formatter:on + + AgeOffRuleConfiguration.Builder builder = new AgeOffRuleConfiguration.Builder(); + builder.withIndentation(" "); + builder.useMerge(); + builder.withFilterClass(datawave.iterators.filter.ageoff.DataTypeAgeOffFilter.class); + + builder.addSimpleElement("isindextable", Boolean.TRUE.toString()); + + assertEquals(expectedOutputText, generateRule(builder)); + } + + @Test + public void createFieldAgeOffRule() throws IOException { + // @formatter:off + String fieldAgeOffRule = + "\n" + + " datawave.iterators.filter.ageoff.FieldAgeOffFilter\n" + + " 5\n" + + " true\n" + + " field_y,field_z\n" + + " 1\n" + + " 2\n" + + "\n"; + // @formatter:on + + AgeOffRuleConfiguration.Builder builder = new AgeOffRuleConfiguration.Builder(); + builder.withIndentation(" "); + builder.withFilterClass(datawave.iterators.filter.ageoff.FieldAgeOffFilter.class); + + String duration = "5"; + String units = "s"; + builder.withTtl(duration, units); + + builder.addSimpleElement("isindextable", Boolean.TRUE.toString()); + + builder.addSimpleElement("fields", "field_y,field_z"); + + builder.addSimpleElement("field_y.ttl", "1"); + builder.addSimpleElement("field_z.ttl", "2"); + + assertEquals(fieldAgeOffRule, generateRule(builder)); + } + + @Test + public void createFieldAgeOffRuleTtlAlternative() throws IOException { + // @formatter:off + String fieldAgeOffRule = + "\n" + + " datawave.iterators.filter.ageoff.FieldAgeOffFilter\n" + + " s\n" + + " 5\n" + + " *\n" + + " field_y,field_z\n" + + " 1\n" + + " 2\n" + + "\n"; + // @formatter:on + + AgeOffRuleConfiguration.Builder builder = new AgeOffRuleConfiguration.Builder(); + builder.withIndentation(" "); + builder.withFilterClass(datawave.iterators.filter.ageoff.FieldAgeOffFilter.class); + + builder.addSimpleElement("ttlUnits", "s"); + + builder.addSimpleElement("ttlValue", "5"); + + builder.addSimpleElement("matchPattern", "*"); + builder.addSimpleElement("fields", "field_y,field_z"); + builder.addSimpleElement("field_y.ttl", "1"); + builder.addSimpleElement("field_z.ttl", "2"); + + assertEquals(fieldAgeOffRule, generateRule(builder)); + } + + private String generateRule(AgeOffRuleConfiguration.Builder builder) throws IOException { + StringWriter out = new StringWriter(); + AgeOffRuleFormatter generator = new AgeOffRuleFormatter(builder.build()); + generator.format(out); + return out.toString(); + } +} diff --git a/warehouse/age-off-utils/src/test/java/datawave/age/off/util/FridgeToFreezerMapping.java b/warehouse/age-off-utils/src/test/java/datawave/age/off/util/FridgeToFreezerMapping.java new file mode 100644 index 00000000000..83fbe0c82da --- /dev/null +++ b/warehouse/age-off-utils/src/test/java/datawave/age/off/util/FridgeToFreezerMapping.java @@ -0,0 +1,34 @@ +package datawave.age.off.util; + +import java.util.HashMap; + +public class FridgeToFreezerMapping extends HashMap { + private static final long serialVersionUID = 332498820763181265L; + + public FridgeToFreezerMapping() { + super(); + this.put("1h", "0s"); + this.put("1d", "3d"); + this.put("2d", "7d"); + this.put("3d", "7d"); + this.put("4d", "10d"); + this.put("5d", "14d"); + this.put("7d", "30d"); + this.put("10d", "42d"); + this.put("35d", "365d"); + this.put("14d", "60d"); + this.put("90d", "365d"); + this.put("270d", "1095d"); + this.put("730d", "1826d"); + } + + @Override + public String get(Object key) { + String result = super.get(key); + // If uncertain, discard + if (null == result) { + return "0s"; + } + return result; + } +} diff --git a/warehouse/age-off-utils/src/test/java/datawave/age/off/util/IndentingDelegatingWriterTest.java b/warehouse/age-off-utils/src/test/java/datawave/age/off/util/IndentingDelegatingWriterTest.java new file mode 100644 index 00000000000..56bb1622f75 --- /dev/null +++ b/warehouse/age-off-utils/src/test/java/datawave/age/off/util/IndentingDelegatingWriterTest.java @@ -0,0 +1,97 @@ +package datawave.age.off.util; + +import static org.junit.Assert.assertEquals; + +import java.io.IOException; +import java.io.StringWriter; +import java.io.Writer; + +import org.junit.Test; + +public class IndentingDelegatingWriterTest { + @Test + public void addsIndentation() throws IOException { + StringWriter originalWriter = new StringWriter(); + Writer writer = new IndentingDelegatingWriter(" ", originalWriter); + writer.write("\n"); + writer.write("a\n"); + writer.write("b\n"); + writer.close(); + + // @formatter:off + String expectedText = + " \n" + + " a\n" + + " b\n"; + // @formatter:on + + assertEquals(expectedText, originalWriter.toString()); + } + + @Test + public void nestedIndentation() throws IOException { + Writer writer = new StringWriter(); + // outer element + Writer outerLayer = new IndentingDelegatingWriter("--", writer); + // nested element + Writer innerLayer = new IndentingDelegatingWriter(" ", outerLayer); + + outerLayer.write("outer\n"); + + innerLayer.write("\n"); + innerLayer.write("a\n"); + innerLayer.write("b\n"); + + outerLayer.write("outer\n"); + + innerLayer.close(); + // @formatter:off + String expectedText = + "--outer\n" + + "-- \n" + + "-- a\n" + + "-- b\n" + + "--outer\n"; + + // @formatter:on + assertEquals(expectedText, writer.toString()); + } + + @Test + public void multiNestedIndentation() throws IOException { + Writer writer = new StringWriter(); + // outer element + Writer outerLayer = new IndentingDelegatingWriter("--", writer); + // nested element + Writer middleLayer = new IndentingDelegatingWriter("++", outerLayer); + // further nested element + Writer innerLayer = new IndentingDelegatingWriter(" ", middleLayer); + + outerLayer.write("outer\n"); + + middleLayer.write("\n"); + middleLayer.write("a\n"); + innerLayer.write("1\n"); + innerLayer.write("2\n"); + innerLayer.write("3\n"); + + middleLayer.write("b\n"); + + outerLayer.write("outer\n"); + + innerLayer.close(); + // @formatter:off + String expectedText = + "--outer\n" + + "--++\n" + + "--++a\n" + + "--++ 1\n" + + "--++ 2\n" + + "--++ 3\n" + + "--++b\n" + + "--outer\n"; + + // @formatter:on + assertEquals(expectedText, writer.toString()); + } +} diff --git a/warehouse/age-off-utils/src/test/java/datawave/age/off/util/RuleConfigDocumentTest.java b/warehouse/age-off-utils/src/test/java/datawave/age/off/util/RuleConfigDocumentTest.java new file mode 100644 index 00000000000..9d2559ead50 --- /dev/null +++ b/warehouse/age-off-utils/src/test/java/datawave/age/off/util/RuleConfigDocumentTest.java @@ -0,0 +1,164 @@ +package datawave.age.off.util; + +import static org.junit.Assert.assertEquals; + +import java.io.IOException; +import java.io.StringWriter; +import java.io.Writer; +import java.util.Arrays; +import java.util.List; + +import javax.xml.transform.OutputKeys; +import javax.xml.transform.Transformer; +import javax.xml.transform.TransformerConfigurationException; +import javax.xml.transform.TransformerException; +import javax.xml.transform.TransformerFactory; +import javax.xml.transform.dom.DOMSource; +import javax.xml.transform.stream.StreamResult; + +import org.apache.xerces.dom.DocumentImpl; +import org.junit.Test; +import org.w3c.dom.Element; + +import datawave.ingest.util.cache.watch.AgeOffRuleLoader; + +public class RuleConfigDocumentTest { + + @Test + public void includesFilterClass() throws IOException { + String actual = transformToXmlString(new AgeOffRuleLoader.RuleConfig("myclass", 1)); + + // @formatter:off + String expected = "\n" + + " myclass\n" + + "\n"; + // @formatter:on + assertEquals(actual, expected, actual); + } + + @Test + public void includesTtl() throws IOException { + AgeOffRuleLoader.RuleConfig ruleConfig = new AgeOffRuleLoader.RuleConfig("myclass", 1); + ruleConfig.ttlUnits("h"); + ruleConfig.ttlValue("2468"); + String actual = transformToXmlString(ruleConfig); + // @formatter:off + String expected = "\n" + + " myclass\n" + + " 2468\n" + + "\n"; + // @formatter:on + assertEquals(actual, expected, actual); + } + + @Test + public void includesMatchPattern() throws IOException { + AgeOffRuleLoader.RuleConfig ruleConfig = new AgeOffRuleLoader.RuleConfig("myclass", 1); + ruleConfig.matchPattern("1234\n"); + String actual = transformToXmlString(ruleConfig); + + // @formatter:off + String expected = "\n" + + " myclass\n" + + " \n1234\n\n" + + "\n"; + // @formatter:on + assertEquals(actual, expected, actual); + } + + @Test + public void includesMerge() throws IOException { + AgeOffRuleLoader.RuleConfig ruleConfig = new AgeOffRuleLoader.RuleConfig("myclass", 1); + ruleConfig.setIsMerge(true); + String actual = transformToXmlString(ruleConfig); + + // @formatter:off + String expected = "\n" + + " myclass\n" + + "\n"; + // @formatter:on + assertEquals(actual, expected, actual); + } + + @Test + public void includesCustomElements() throws IOException { + AgeOffRuleLoader.RuleConfig ruleConfig = new AgeOffRuleLoader.RuleConfig("myclass", 1); + List elements = Arrays.asList(new DocumentImpl().createElement("a"), new DocumentImpl().createElement("b")); + ruleConfig.customElements(elements); + String actual = transformToXmlString(ruleConfig); + + // @formatter:off + String expected = "\n" + + " myclass\n" + + " \n" + + " \n" + + "\n"; + // @formatter:on + assertEquals(actual, expected, actual); + } + + @Test + public void includesLabel() throws IOException { + AgeOffRuleLoader.RuleConfig ruleConfig = new AgeOffRuleLoader.RuleConfig("myclass", 1); + ruleConfig.label("tag"); + String actual = transformToXmlString(ruleConfig); + + // @formatter:off + String expected = "\n" + + " myclass\n" + + "\n"; + // @formatter:on + assertEquals(actual, expected, actual); + } + + @Test + public void includesAll() throws IOException { + AgeOffRuleLoader.RuleConfig ruleConfig = new AgeOffRuleLoader.RuleConfig("myclass", 1); + ruleConfig.ttlUnits("h"); + ruleConfig.ttlValue("2468"); + ruleConfig.matchPattern("1234\n"); + ruleConfig.setIsMerge(true); + List elements = Arrays.asList(new DocumentImpl().createElement("a"), new DocumentImpl().createElement("b")); + ruleConfig.customElements(elements); + ruleConfig.label("tag"); + String actual = transformToXmlString(ruleConfig); + + // @formatter:off + String expected = "\n" + + " myclass\n" + + " 2468\n" + + " \n" + + "1234\n" + + "\n" + + " \n" + + " \n" + + "\n"; + // @formatter:on + assertEquals(actual, expected, actual); + } + + private String transformToXmlString(AgeOffRuleLoader.RuleConfig ruleConfig) throws IOException { + try { + Transformer transformer = initializeXmlTransformer(); + + Writer writer = new StringWriter(); + + StreamResult result = new StreamResult(writer); + DOMSource source = new DOMSource(new RuleConfigDocument(ruleConfig)); + transformer.transform(source, result); + + return writer.toString(); + } catch (TransformerException e) { + throw new IOException("Failed to transform to XML", e); + } + } + + private static Transformer initializeXmlTransformer() throws TransformerConfigurationException { + Transformer trans = TransformerFactory.newInstance().newTransformer(); + trans.setOutputProperty(OutputKeys.OMIT_XML_DECLARATION, "yes"); + trans.setOutputProperty(OutputKeys.METHOD, "xml"); + trans.setOutputProperty(OutputKeys.INDENT, "yes"); + trans.setOutputProperty("{http://xml.apache.org/xslt}indent-amount", "2"); + return trans; + } +} diff --git a/warehouse/age-off-utils/src/test/resources/filter/shelf-life-static-label.refrigerator.matchPattern b/warehouse/age-off-utils/src/test/resources/filter/shelf-life-static-label.refrigerator.matchPattern new file mode 100644 index 00000000000..48e8177a730 --- /dev/null +++ b/warehouse/age-off-utils/src/test/resources/filter/shelf-life-static-label.refrigerator.matchPattern @@ -0,0 +1,106 @@ +foodStorage egg = 4d +foodStorage chicken = 4d +foodStorage ham = 4d +foodStorage tunaSalad = 4d +foodStorage macaroni = 4d + + +foodStorage hotDogsOpened = 7d +foodStorage hotDogsUnopened = 14d +foodStorage luncheonOpened = 5d +foodStorage luncheonDeliSliced = 5d +foodStorage luncheonUnopened = 14d +foodStorage bacon = 7d +foodStorage rawChickenSausage = 2d +foodStorage rawTurkeySausage = 2d +foodStorage rawPorkSausage = 2d +foodStorage rawBeefSausage = 2d +foodStorage cookedChickenSausage = 7d +foodStorage cookedTurkeySausage = 7d +foodStorage cookedPorkSausage = 7d +foodStorage cookedBeefSausage = 7d +foodStorage frozenSausageAfterCooking = 4d +foodStorage hamburger = 2d +foodStorage groundBeef = 2d +foodStorage turkey = 2d +foodStorage groundChicken = 2d +foodStorage otherPoultry = 2d +foodStorage veal = 2d +foodStorage pork = 2d +foodStorage lamb = 2d +foodStorage mixturesOfOtherGroundMeats = 2d +foodStorage steaks = 5d +foodStorage chops = 5d +foodStorage roasts = 5d +foodStorage freshUncuredUncooked = 5d +foodStorage freshUncuredCooked = 4d +foodStorage curedUncooked = 7d +foodStorage unopenedCookedAndSealedAtPlant = 14d +foodStorage cookedStoreWrappedWhole = 7d +foodStorage cookedStoreWrappedCut = 5d +foodStorage cookedCountryHam = 7d +foodStorage cannedUnopenedLabeledKeepRefrigerated = 9m +foodStorage cannedOpenedShelfStable = 4d +foodStorage unopenedShelfStableCannedAtRoomTemperature = 730d +foodStorage prosciutto = 3m +foodStorage parma = 3m +foodStorage sarrano = 3m +foodStorage dryItalian = 3m +foodStorage spanishType = 3m +foodStorage cut = 3m +foodStorage wholeChicken = 2d +foodStorage wholeTurkey = 2d +foodStorage chickenPieces = 2d +foodStorage turkeyPieces = 2d +foodStorage bluefish = 3d +foodStorage catfish = 3d +foodStorage mackerel = 3d +foodStorage mullet = 3d +foodStorage salmon = 3d +foodStorage tuna = 3d +foodStorage cod = 3d +foodStorage flounder = 3d +foodStorage haddock = 3d +foodStorage halibut = 3d +foodStorage sole = 3d +foodStorage pollock = 3d +foodStorage oceanPerch = 3d +foodStorage rockfish = 3d +foodStorage seaTrout = 3d +foodStorage freshCrabMeat = 4d +foodStorage freshLobster = 4d +foodStorage liveCrab = 1d +foodStorage liveLobster = 1d +foodStorage scallops = 10d +foodStorage shrimp = 5d +foodStorage crayfish = 5d +foodStorage shuckedClams = 10d +foodStorage mussels = 10d +foodStorage oysters = 10d +foodStorage squid = 3d + + +foodStorage rawEggInShell = 35d +foodStorage rawEggWhitesAndYolks = 4d +foodStorage eggFrozenInShell = 1h +foodStorage eggInBrokenShell = 0s +foodStorage hardCookedEggs = 7d +foodStorage unopenedEggSubstitutes = 7d +foodStorage openedEggSubstitutes = 3d +foodStorage unfrozenUnopenedEggSubstitutes = 7d +foodStorage unfrozenOpenedEggSubstitutes = 4d +foodStorage eggCasseroles = 4d +foodStorage commercialEggNog = 5d +foodStorage homemadeEggNog = 4d +foodStorage bakedPumpkinPies = 4d +foodStorage bakedPecanPies = 4d +foodStorage bakedCustardPies = 4d +foodStorage bakedChiffonPies = 4d +foodStorage quicheWithFilling = 5d +foodStorage vegetableOrMeatAdded = 4d +foodStorage cookedMeat = 4d +foodStorage cookedPoultry = 4d +foodStorage chickenNuggets = 4d +foodStorage chickenPatties = 4d +foodStorage pizza = 4d + diff --git a/warehouse/age-off-utils/src/test/resources/filter/shelf-life.csv b/warehouse/age-off-utils/src/test/resources/filter/shelf-life.csv new file mode 100644 index 00000000000..76319940573 --- /dev/null +++ b/warehouse/age-off-utils/src/test/resources/filter/shelf-life.csv @@ -0,0 +1,106 @@ +label,pattern,duration +salad,egg,4d +salad,chicken,4d +salad,ham,4d +salad,tunaSalad,4d +salad,macaroni,4d + +# Meats +hotDogs,hotDogsOpened,7d +hotDogs,hotDogsUnopened,14d +luncheonMeat,luncheonOpened,5d +luncheonMeat,luncheonDeliSliced,5d +luncheonMeat,luncheonUnopened,14d +baconAndSausage,bacon,7d +baconAndSausage,rawChickenSausage,2d +baconAndSausage,rawTurkeySausage,2d +baconAndSausage,rawPorkSausage,2d +baconAndSausage,rawBeefSausage,2d +baconAndSausage,cookedChickenSausage,7d +baconAndSausage,cookedTurkeySausage,7d +baconAndSausage,cookedPorkSausage,7d +baconAndSausage,cookedBeefSausage,7d +baconAndSausage,frozenSausageAfterCooking,4d +groundMeat,hamburger,2d +groundMeat,groundBeef,2d +groundMeat,turkey,2d +groundMeat,groundChicken,2d +groundMeat,otherPoultry,2d +groundMeat,veal,2d +groundMeat,pork,2d +groundMeat,lamb,2d +groundMeat,mixturesOfOtherGroundMeats,2d +freshCutsRedMeatNotHam,steaks,5d +freshCutsRedMeatNotHam,chops,5d +freshCutsRedMeatNotHam,roasts,5d +ham,freshUncuredUncooked,5d +ham,freshUncuredCooked,4d +ham,curedUncooked,7d +ham,unopenedCookedAndSealedAtPlant,14d +ham,cookedStoreWrappedWhole,7d +ham,cookedStoreWrappedCut,5d +ham,cookedCountryHam,7d +ham,cannedUnopenedLabeledKeepRefrigerated,9m +ham,cannedOpenedShelfStable,4d +ham,unopenedShelfStableCannedAtRoomTemperature,730d +ham,prosciutto,3m +ham,parma,3m +ham,sarrano,3m +ham,dryItalian,3m +ham,spanishType,3m +ham,cut,3m +freshPoultry,wholeChicken,2d +freshPoultry,wholeTurkey,2d +freshPoultry,chickenPieces,2d +freshPoultry,turkeyPieces,2d +finFish,bluefish,3d +finFish,catfish,3d +finFish,mackerel,3d +finFish,mullet,3d +finFish,salmon,3d +finFish,tuna,3d +finFish,cod,3d +finFish,flounder,3d +finFish,haddock,3d +finFish,halibut,3d +finFish,sole,3d +finFish,pollock,3d +finFish,oceanPerch,3d +finFish,rockfish,3d +finFish,seaTrout,3d +shellFish,freshCrabMeat,4d +shellFish,freshLobster,4d +shellFish,liveCrab,1d +shellFish,liveLobster,1d +shellFish,scallops,10d +shellFish,shrimp,5d +shellFish,crayfish,5d +shellFish,shuckedClams,10d +shellFish,mussels,10d +shellFish,oysters,10d +shellFish,squid,3d + +# Other +eggs,rawEggInShell,35d +eggs,rawEggWhitesAndYolks,4d +eggs,eggFrozenInShell,1h +eggs,eggInBrokenShell,0s +eggs,hardCookedEggs,7d +eggs,unopenedEggSubstitutes,7d +eggs,openedEggSubstitutes,3d +eggs,unfrozenUnopenedEggSubstitutes,7d +eggs,unfrozenOpenedEggSubstitutes,4d +eggs,eggCasseroles,4d +eggs,commercialEggNog,5d +eggs,homemadeEggNog,4d +eggs,bakedPumpkinPies,4d +eggs,bakedPecanPies,4d +eggs,bakedCustardPies,4d +eggs,bakedChiffonPies,4d +eggs,quicheWithFilling,5d +soupsAndStews,vegetableOrMeatAdded,4d +leftovers,cookedMeat,4d +leftovers,cookedPoultry,4d +leftovers,chickenNuggets,4d +leftovers,chickenPatties,4d +leftovers,pizza,4d \ No newline at end of file diff --git a/warehouse/age-off-utils/src/test/resources/filter/shelf-life.freezer.matchPattern b/warehouse/age-off-utils/src/test/resources/filter/shelf-life.freezer.matchPattern new file mode 100644 index 00000000000..981b5baa328 --- /dev/null +++ b/warehouse/age-off-utils/src/test/resources/filter/shelf-life.freezer.matchPattern @@ -0,0 +1,106 @@ +salad "egg" : 10d +salad "chicken" : 10d +salad "ham" : 10d +salad "tunaSalad" : 10d +salad "macaroni" : 10d + + +hotDogs "hotDogsOpened" : 30d +hotDogs "hotDogsUnopened" : 60d +luncheonMeat "luncheonOpened" : 14d +luncheonMeat "luncheonDeliSliced" : 14d +luncheonMeat "luncheonUnopened" : 60d +baconAndSausage "bacon" : 30d +baconAndSausage "rawChickenSausage" : 7d +baconAndSausage "rawTurkeySausage" : 7d +baconAndSausage "rawPorkSausage" : 7d +baconAndSausage "rawBeefSausage" : 7d +baconAndSausage "cookedChickenSausage" : 30d +baconAndSausage "cookedTurkeySausage" : 30d +baconAndSausage "cookedPorkSausage" : 30d +baconAndSausage "cookedBeefSausage" : 30d +baconAndSausage "frozenSausageAfterCooking" : 10d +groundMeat "hamburger" : 7d +groundMeat "groundBeef" : 7d +groundMeat "turkey" : 7d +groundMeat "groundChicken" : 7d +groundMeat "otherPoultry" : 7d +groundMeat "veal" : 7d +groundMeat "pork" : 7d +groundMeat "lamb" : 7d +groundMeat "mixturesOfOtherGroundMeats" : 7d +freshCutsRedMeatNotHam "steaks" : 14d +freshCutsRedMeatNotHam "chops" : 14d +freshCutsRedMeatNotHam "roasts" : 14d +ham "freshUncuredUncooked" : 14d +ham "freshUncuredCooked" : 10d +ham "curedUncooked" : 30d +ham "unopenedCookedAndSealedAtPlant" : 60d +ham "cookedStoreWrappedWhole" : 30d +ham "cookedStoreWrappedCut" : 14d +ham "cookedCountryHam" : 30d +ham "cannedUnopenedLabeledKeepRefrigerated" : 0s +ham "cannedOpenedShelfStable" : 10d +ham "unopenedShelfStableCannedAtRoomTemperature" : 1826d +ham "prosciutto" : 0s +ham "parma" : 0s +ham "sarrano" : 0s +ham "dryItalian" : 0s +ham "spanishType" : 0s +ham "cut" : 0s +freshPoultry "wholeChicken" : 7d +freshPoultry "wholeTurkey" : 7d +freshPoultry "chickenPieces" : 7d +freshPoultry "turkeyPieces" : 7d +finFish "bluefish" : 7d +finFish "catfish" : 7d +finFish "mackerel" : 7d +finFish "mullet" : 7d +finFish "salmon" : 7d +finFish "tuna" : 7d +finFish "cod" : 7d +finFish "flounder" : 7d +finFish "haddock" : 7d +finFish "halibut" : 7d +finFish "sole" : 7d +finFish "pollock" : 7d +finFish "oceanPerch" : 7d +finFish "rockfish" : 7d +finFish "seaTrout" : 7d +shellFish "freshCrabMeat" : 10d +shellFish "freshLobster" : 10d +shellFish "liveCrab" : 3d +shellFish "liveLobster" : 3d +shellFish "scallops" : 42d +shellFish "shrimp" : 14d +shellFish "crayfish" : 14d +shellFish "shuckedClams" : 42d +shellFish "mussels" : 42d +shellFish "oysters" : 42d +shellFish "squid" : 7d + + +eggs "rawEggInShell" : 365d +eggs "rawEggWhitesAndYolks" : 10d +eggs "eggFrozenInShell" : 0s +eggs "eggInBrokenShell" : 0s +eggs "hardCookedEggs" : 30d +eggs "unopenedEggSubstitutes" : 30d +eggs "openedEggSubstitutes" : 7d +eggs "unfrozenUnopenedEggSubstitutes" : 30d +eggs "unfrozenOpenedEggSubstitutes" : 10d +eggs "eggCasseroles" : 10d +eggs "commercialEggNog" : 14d +eggs "homemadeEggNog" : 10d +eggs "bakedPumpkinPies" : 10d +eggs "bakedPecanPies" : 10d +eggs "bakedCustardPies" : 10d +eggs "bakedChiffonPies" : 10d +eggs "quicheWithFilling" : 14d +soupsAndStews "vegetableOrMeatAdded" : 10d +leftovers "cookedMeat" : 10d +leftovers "cookedPoultry" : 10d +leftovers "chickenNuggets" : 10d +leftovers "chickenPatties" : 10d +leftovers "pizza" : 10d + diff --git a/warehouse/age-off-utils/src/test/resources/filter/shelf-life.refrigerator.matchPattern b/warehouse/age-off-utils/src/test/resources/filter/shelf-life.refrigerator.matchPattern new file mode 100644 index 00000000000..415abe223ba --- /dev/null +++ b/warehouse/age-off-utils/src/test/resources/filter/shelf-life.refrigerator.matchPattern @@ -0,0 +1,106 @@ +salad "egg" : 4d +salad "chicken" : 4d +salad "ham" : 4d +salad "tunaSalad" : 4d +salad "macaroni" : 4d + + +hotDogs "hotDogsOpened" : 7d +hotDogs "hotDogsUnopened" : 14d +luncheonMeat "luncheonOpened" : 5d +luncheonMeat "luncheonDeliSliced" : 5d +luncheonMeat "luncheonUnopened" : 14d +baconAndSausage "bacon" : 7d +baconAndSausage "rawChickenSausage" : 2d +baconAndSausage "rawTurkeySausage" : 2d +baconAndSausage "rawPorkSausage" : 2d +baconAndSausage "rawBeefSausage" : 2d +baconAndSausage "cookedChickenSausage" : 7d +baconAndSausage "cookedTurkeySausage" : 7d +baconAndSausage "cookedPorkSausage" : 7d +baconAndSausage "cookedBeefSausage" : 7d +baconAndSausage "frozenSausageAfterCooking" : 4d +groundMeat "hamburger" : 2d +groundMeat "groundBeef" : 2d +groundMeat "turkey" : 2d +groundMeat "groundChicken" : 2d +groundMeat "otherPoultry" : 2d +groundMeat "veal" : 2d +groundMeat "pork" : 2d +groundMeat "lamb" : 2d +groundMeat "mixturesOfOtherGroundMeats" : 2d +freshCutsRedMeatNotHam "steaks" : 5d +freshCutsRedMeatNotHam "chops" : 5d +freshCutsRedMeatNotHam "roasts" : 5d +ham "freshUncuredUncooked" : 5d +ham "freshUncuredCooked" : 4d +ham "curedUncooked" : 7d +ham "unopenedCookedAndSealedAtPlant" : 14d +ham "cookedStoreWrappedWhole" : 7d +ham "cookedStoreWrappedCut" : 5d +ham "cookedCountryHam" : 7d +ham "cannedUnopenedLabeledKeepRefrigerated" : 9m +ham "cannedOpenedShelfStable" : 4d +ham "unopenedShelfStableCannedAtRoomTemperature" : 730d +ham "prosciutto" : 3m +ham "parma" : 3m +ham "sarrano" : 3m +ham "dryItalian" : 3m +ham "spanishType" : 3m +ham "cut" : 3m +freshPoultry "wholeChicken" : 2d +freshPoultry "wholeTurkey" : 2d +freshPoultry "chickenPieces" : 2d +freshPoultry "turkeyPieces" : 2d +finFish "bluefish" : 3d +finFish "catfish" : 3d +finFish "mackerel" : 3d +finFish "mullet" : 3d +finFish "salmon" : 3d +finFish "tuna" : 3d +finFish "cod" : 3d +finFish "flounder" : 3d +finFish "haddock" : 3d +finFish "halibut" : 3d +finFish "sole" : 3d +finFish "pollock" : 3d +finFish "oceanPerch" : 3d +finFish "rockfish" : 3d +finFish "seaTrout" : 3d +shellFish "freshCrabMeat" : 4d +shellFish "freshLobster" : 4d +shellFish "liveCrab" : 1d +shellFish "liveLobster" : 1d +shellFish "scallops" : 10d +shellFish "shrimp" : 5d +shellFish "crayfish" : 5d +shellFish "shuckedClams" : 10d +shellFish "mussels" : 10d +shellFish "oysters" : 10d +shellFish "squid" : 3d + + +eggs "rawEggInShell" : 35d +eggs "rawEggWhitesAndYolks" : 4d +eggs "eggFrozenInShell" : 1h +eggs "eggInBrokenShell" : 0s +eggs "hardCookedEggs" : 7d +eggs "unopenedEggSubstitutes" : 7d +eggs "openedEggSubstitutes" : 3d +eggs "unfrozenUnopenedEggSubstitutes" : 7d +eggs "unfrozenOpenedEggSubstitutes" : 4d +eggs "eggCasseroles" : 4d +eggs "commercialEggNog" : 5d +eggs "homemadeEggNog" : 4d +eggs "bakedPumpkinPies" : 4d +eggs "bakedPecanPies" : 4d +eggs "bakedCustardPies" : 4d +eggs "bakedChiffonPies" : 4d +eggs "quicheWithFilling" : 5d +soupsAndStews "vegetableOrMeatAdded" : 4d +leftovers "cookedMeat" : 4d +leftovers "cookedPoultry" : 4d +leftovers "chickenNuggets" : 4d +leftovers "chickenPatties" : 4d +leftovers "pizza" : 4d + diff --git a/warehouse/age-off/pom.xml b/warehouse/age-off/pom.xml new file mode 100644 index 00000000000..64b5d313f9a --- /dev/null +++ b/warehouse/age-off/pom.xml @@ -0,0 +1,93 @@ + + + 4.0.0 + + gov.nsa.datawave + datawave-warehouse-parent + 7.13.0-SNAPSHOT + + datawave-age-off + ${project.artifactId} + + + + com.google.guava + guava + compile + + + + gov.nsa.datawave + datawave-core + compile + + + xml-apis + xml-apis + + + + + + gov.nsa.datawave.microservice + common-utils + compile + + + + org.apache.accumulo + accumulo-core + compile + + + + org.apache.hadoop + hadoop-client-api + compile + + + + org.apache.logging.log4j + log4j-1.2-api + compile + + + junit + junit + test + + + + org.hamcrest + hamcrest-core + ${version.hamcrest} + test + + + org.mockito + mockito-core + test + + + + org.slf4j + slf4j-api + test + + + + + + org.apache.maven.plugins + maven-jar-plugin + + + + test-jar + + + + + + + diff --git a/warehouse/core/src/main/java/datawave/ingest/util/cache/watch/FileRuleWatcher.java b/warehouse/age-off/src/main/java/datawave/ingest/util/cache/watch/AgeOffRuleLoader.java similarity index 72% rename from warehouse/core/src/main/java/datawave/ingest/util/cache/watch/FileRuleWatcher.java rename to warehouse/age-off/src/main/java/datawave/ingest/util/cache/watch/AgeOffRuleLoader.java index e786683a80b..6d233fd91eb 100644 --- a/warehouse/core/src/main/java/datawave/ingest/util/cache/watch/FileRuleWatcher.java +++ b/warehouse/age-off/src/main/java/datawave/ingest/util/cache/watch/AgeOffRuleLoader.java @@ -2,22 +2,20 @@ import java.io.IOException; import java.io.InputStream; +import java.lang.reflect.InvocationTargetException; import java.util.ArrayList; import java.util.Collection; import java.util.HashMap; import java.util.List; import java.util.Map; -import java.util.Map.Entry; import java.util.stream.Collectors; import java.util.stream.IntStream; import javax.xml.parsers.DocumentBuilder; import javax.xml.parsers.DocumentBuilderFactory; +import javax.xml.parsers.ParserConfigurationException; import org.apache.accumulo.core.iterators.IteratorEnvironment; -import org.apache.hadoop.conf.Configuration; -import org.apache.hadoop.fs.FileSystem; -import org.apache.hadoop.fs.Path; import org.apache.hadoop.io.IOUtils; import org.apache.log4j.Logger; import org.w3c.dom.Document; @@ -25,119 +23,66 @@ import org.w3c.dom.NamedNodeMap; import org.w3c.dom.Node; import org.w3c.dom.NodeList; +import org.xml.sax.SAXException; import datawave.iterators.filter.AgeOffConfigParams; import datawave.iterators.filter.ageoff.FilterOptions; import datawave.iterators.filter.ageoff.FilterRule; -/** - * - */ -public class FileRuleWatcher extends FileSystemWatcher> { +public class AgeOffRuleLoader { + private static final Logger log = Logger.getLogger(AgeOffRuleLoader.class); + private final AgeOffFileLoaderDependencyProvider loaderConfig; - private static final Logger log = Logger.getLogger(FileRuleWatcher.class); - - private final IteratorEnvironment iterEnv; - - /** - * @param fs - * @param filePath - * @param configuredDiff - * @throws IOException - */ - public FileRuleWatcher(FileSystem fs, Path filePath, long configuredDiff) throws IOException { - this(fs, filePath, configuredDiff, null); - } - - /** - * @param fs - * @param filePath - * @param configuredDiff - * @param iterEnv - * @throws IOException - */ - public FileRuleWatcher(FileSystem fs, Path filePath, long configuredDiff, IteratorEnvironment iterEnv) throws IOException { - super(fs, filePath, configuredDiff); - this.iterEnv = iterEnv; - } - - /** - * @param filePath - * @param configuredDiff - * @throws IOException - */ - public FileRuleWatcher(Path filePath, long configuredDiff) throws IOException { - this(filePath, configuredDiff, null); + public AgeOffRuleLoader(AgeOffFileLoaderDependencyProvider loaderConfig) { + this.loaderConfig = loaderConfig; } - /** - * @param filePath - * @param configuredDiff - * @param iterEnv - * @throws IOException - */ - public FileRuleWatcher(Path filePath, long configuredDiff, IteratorEnvironment iterEnv) throws IOException { - super(filePath.getFileSystem(new Configuration()), filePath, configuredDiff); - this.iterEnv = iterEnv; - } - - /* - * (non-Javadoc) - * - * @see datawave.ingest.util.cache.watch.FileSystemWatcher#loadContents(java.io.InputStream) - */ - @Override - protected Collection loadContents(InputStream in) throws IOException { - - try { - List mergedRuleConfigs = loadRuleConfigs(in); - List filterRules = new ArrayList<>(); - /** - * This has been changed to support extended options. - */ - for (RuleConfig ruleConfig : mergedRuleConfigs) { + public List load(InputStream in) throws IOException { + List mergedRuleConfigs = loadRuleConfigs(in); + List filterRules = new ArrayList<>(); + /** + * This has been changed to support extended options. + */ + for (RuleConfig ruleConfig : mergedRuleConfigs) { + try { + if (ruleConfig.filterClassName == null) { + throw new IllegalArgumentException("The filter class must not be null"); + } - try { - FilterRule filter = (FilterRule) Class.forName(ruleConfig.filterClassName).getDeclaredConstructor().newInstance(); + FilterRule filter = (FilterRule) Class.forName(ruleConfig.filterClassName).getDeclaredConstructor().newInstance(); - FilterOptions option = new FilterOptions(); + FilterOptions option = new FilterOptions(); - if (ruleConfig.ttlValue != null) { - option.setTTL(Long.parseLong(ruleConfig.ttlValue)); - } - if (ruleConfig.ttlUnits != null) { - option.setTTLUnits(ruleConfig.ttlUnits); - } - option.setOption(AgeOffConfigParams.MATCHPATTERN, ruleConfig.matchPattern); + if (ruleConfig.ttlValue != null) { + option.setTTL(Long.parseLong(ruleConfig.ttlValue)); + } + if (ruleConfig.ttlUnits != null) { + option.setTTLUnits(ruleConfig.ttlUnits); + } + option.setOption(AgeOffConfigParams.MATCHPATTERN, ruleConfig.matchPattern); - StringBuilder extOptions = new StringBuilder(); + StringBuilder extOptions = new StringBuilder(); - for (Entry myOption : ruleConfig.extendedOptions.entrySet()) { - option.setOption(myOption.getKey(), myOption.getValue()); - extOptions.append(myOption.getKey()).append(","); - } + for (Map.Entry myOption : ruleConfig.extendedOptions.entrySet()) { + option.setOption(myOption.getKey(), myOption.getValue()); + extOptions.append(myOption.getKey()).append(","); + } - int extOptionLen = extOptions.length(); + int extOptionLen = extOptions.length(); - option.setOption(AgeOffConfigParams.EXTENDED_OPTIONS, extOptions.toString().substring(0, extOptionLen - 1)); + option.setOption(AgeOffConfigParams.EXTENDED_OPTIONS, extOptions.substring(0, extOptionLen - 1)); - filter.init(option, iterEnv); + filter.init(option, loaderConfig.getIterEnv()); - filterRules.add(filter); + filterRules.add(filter); - } catch (InstantiationException | ClassNotFoundException | IllegalAccessException e) { - log.error(e); - throw new IOException(e); - } + } catch (IllegalArgumentException | InstantiationException | ClassNotFoundException | IllegalAccessException | InvocationTargetException + | NoSuchMethodException e) { + log.trace("An error occurred while loading age-off rules, the exception will be rethrown", e); + throw new IOException(e); } - return filterRules; - } catch (Exception ex) { - log.error("uh oh: " + ex); - throw new IOException(ex); - } finally { - IOUtils.closeStream(in); } - + return filterRules; } protected List loadRuleConfigs(InputStream in) throws IOException { @@ -196,8 +141,8 @@ protected List loadRuleConfigs(InputStream in) throws IOException { ruleConfigs.addAll(childRules); // @formatter:on - } catch (Exception ex) { - log.error("uh oh: " + ex); + } catch (ParserConfigurationException | SAXException ex) { + log.trace("An error occurred while loading age-off rules, the exception will be rethrown", ex); throw new IOException(ex); } finally { IOUtils.closeStream(in); @@ -317,17 +262,9 @@ private RuleConfig getRuleConfigForNode(NodeList rules, int index) { // Return the RuleConfigs found within the configuration file referenced in the provided Node's text private Collection loadParentRuleConfigs(Node parent) throws IOException { Collection rules = new ArrayList<>(); - String parentPathStr = parent.getTextContent(); + InputStream parentRuleInputStream = this.loaderConfig.getParentStream(parent); - if (null == parentPathStr || parentPathStr.isEmpty()) { - throw new IllegalArgumentException("Invalid parent config path, none specified!"); - } - // loading parent relative to dir that child is in. - Path parentPath = new Path(this.filePath.getParent(), parentPathStr); - if (!fs.exists(parentPath)) { - throw new IllegalArgumentException("Invalid parent config path specified, " + parentPathStr + " does not exist!"); - } - rules.addAll(loadRuleConfigs(fs.open(parentPath))); + rules.addAll(loadRuleConfigs(parentRuleInputStream)); return rules; } @@ -383,16 +320,18 @@ private boolean isMergeRule(Node nodeItem) { /** * Temporary holding class for rule configs to allow merges of rules; + * */ - private static class RuleConfig { - String ttlValue = null; - String ttlUnits = null; - String matchPattern = ""; - String filterClassName = null; - String label; - boolean isMerge = false; - Map extendedOptions = new HashMap<>(); - int priority = -1; + public static class RuleConfig { + private int priority; + public String filterClassName; + public String ttlValue = null; + public String ttlUnits = null; + public String matchPattern = null; + public String label; + public boolean isMerge = false; + public Map extendedOptions = new HashMap<>(); + public List customElements = new ArrayList<>(); public RuleConfig(String filterClassName, int priority) { this.filterClassName = filterClassName; @@ -419,6 +358,11 @@ public RuleConfig label(String label) { return this; } + public RuleConfig customElements(List customElements) { + this.customElements = customElements; + return this; + } + public String getLabel() { return label; } @@ -433,4 +377,10 @@ public RuleConfig extendedOptions(Map extendedOptions) { return this; } } + + public interface AgeOffFileLoaderDependencyProvider { + IteratorEnvironment getIterEnv(); + + InputStream getParentStream(Node parent) throws IOException; + } } diff --git a/warehouse/age-off/src/main/java/datawave/ingest/util/cache/watch/FileLoaderDependencyProvider.java b/warehouse/age-off/src/main/java/datawave/ingest/util/cache/watch/FileLoaderDependencyProvider.java new file mode 100644 index 00000000000..acf58f389dc --- /dev/null +++ b/warehouse/age-off/src/main/java/datawave/ingest/util/cache/watch/FileLoaderDependencyProvider.java @@ -0,0 +1,42 @@ +package datawave.ingest.util.cache.watch; + +import java.io.IOException; +import java.io.InputStream; + +import org.apache.accumulo.core.iterators.IteratorEnvironment; +import org.apache.hadoop.fs.FileSystem; +import org.apache.hadoop.fs.Path; +import org.w3c.dom.Node; + +class FileLoaderDependencyProvider implements AgeOffRuleLoader.AgeOffFileLoaderDependencyProvider { + private final FileSystem fs; + private final Path filePath; + private final IteratorEnvironment iterEnv; + + FileLoaderDependencyProvider(FileSystem fs, Path filePath, IteratorEnvironment iterEnv) { + this.fs = fs; + this.filePath = filePath; + this.iterEnv = iterEnv; + } + + @Override + public IteratorEnvironment getIterEnv() { + return iterEnv; + } + + @Override + public InputStream getParentStream(Node parent) throws IOException { + + String parentPathStr = parent.getTextContent(); + + if (null == parentPathStr || parentPathStr.isEmpty()) { + throw new IllegalArgumentException("Invalid parent config path, none specified!"); + } + // loading parent relative to dir that child is in. + Path parentPath = new Path(filePath.getParent(), parentPathStr); + if (!fs.exists(parentPath)) { + throw new IllegalArgumentException("Invalid parent config path specified, " + parentPathStr + " does not exist!"); + } + return fs.open(parentPath); + } +} diff --git a/warehouse/age-off/src/main/java/datawave/ingest/util/cache/watch/FileRuleCacheLoader.java b/warehouse/age-off/src/main/java/datawave/ingest/util/cache/watch/FileRuleCacheLoader.java new file mode 100644 index 00000000000..11de4a5622b --- /dev/null +++ b/warehouse/age-off/src/main/java/datawave/ingest/util/cache/watch/FileRuleCacheLoader.java @@ -0,0 +1,48 @@ +package datawave.ingest.util.cache.watch; + +import java.io.IOException; + +import org.apache.hadoop.fs.Path; + +import com.google.common.cache.CacheLoader; +import com.google.common.util.concurrent.Futures; +import com.google.common.util.concurrent.ListenableFuture; + +/** + * Cache loader implementation for loading {@link FileRuleCacheValue} referencing {@link Path} keys. + */ +public class FileRuleCacheLoader extends CacheLoader { + private final static int CONFIGURED_DIFF = 1; + + /** + * Reloads a new {@link FileRuleCacheValue} if the cached value has changes, otherwise returns the @param oldValue. + * + * @param key + * the key to reload for + * @param oldValue + * the existing value + * @return a new value if there are changes, otherwise @param oldValue is returned + * @throws IOException + * if any errors occur when loading a new instance of the cache value + */ + @Override + public ListenableFuture reload(String key, FileRuleCacheValue oldValue) throws IOException { + // checks here are performed on the caller thread + FileRuleCacheValue resultValue = oldValue.hasChanges() ? load(key) : oldValue; + return Futures.immediateFuture(resultValue); + } + + /** + * Loads a new rule cache value instance + * + * @param key + * the non-null key whose value should be loaded + * @return a new rule cache value instance + * @throws IOException + * if any errors occur when loading a new instance of the cache value + */ + @Override + public FileRuleCacheValue load(String key) throws IOException { + return FileRuleCacheValue.newCacheValue(key, CONFIGURED_DIFF); + } +} diff --git a/warehouse/age-off/src/main/java/datawave/ingest/util/cache/watch/FileRuleCacheValue.java b/warehouse/age-off/src/main/java/datawave/ingest/util/cache/watch/FileRuleCacheValue.java new file mode 100644 index 00000000000..b38b33fbd2c --- /dev/null +++ b/warehouse/age-off/src/main/java/datawave/ingest/util/cache/watch/FileRuleCacheValue.java @@ -0,0 +1,126 @@ +package datawave.ingest.util.cache.watch; + +import java.io.IOException; +import java.io.InputStream; +import java.util.Collection; + +import org.apache.accumulo.core.iterators.IteratorEnvironment; +import org.apache.hadoop.conf.Configuration; +import org.apache.hadoop.fs.FileSystem; +import org.apache.hadoop.fs.Path; +import org.apache.log4j.Logger; + +import com.google.common.annotations.VisibleForTesting; + +import datawave.iterators.filter.ageoff.AppliedRule; +import datawave.iterators.filter.ageoff.FilterRule; + +/** + * Rule cache value implementation for use with age-off rule loading. The implementation is thread-safe and supports concurrent access for all methods. + */ +public class FileRuleCacheValue { + private final static Logger log = Logger.getLogger(FileRuleCacheValue.class); + + private final Path filePath; + private final long configuredDiff; + private final FileSystem fs; + + private volatile FileRuleReference ruleRef; + + FileRuleCacheValue(FileSystem fs, Path filePath, long configuredDiff) { + this.filePath = filePath; + this.configuredDiff = configuredDiff; + this.fs = fs; + } + + /** + * Creates a new instance of this class for the specified @param filePath. Actual evaluation of the @param filePath are deferred until calls to + * {@link #newRulesetView(long, IteratorEnvironment)} + * + * @param filePath + * the file path to prepare a cached representation on + * @param configuredDiff + * the threshold time (in milliseconds) for when timestamp differences are considered changes + * @return a new cache value instance + * @throws IOException + * if the cache value instance cannot be created + */ + public static FileRuleCacheValue newCacheValue(String filePath, long configuredDiff) throws IOException { + Path filePathObj = new Path(filePath); + FileSystem fs = filePathObj.getFileSystem(new Configuration()); + return new FileRuleCacheValue(fs, filePathObj, configuredDiff); + } + + /** + * Gets the file path of this instance. + * + * @return path for the instance + */ + public Path getFilePath() { + return filePath; + } + + /** + * Check if the cached representation has changes. Changes are determined by checking the baseline modification time when the cached representation was + * discovered against the current modification time of the file. + * + * @return true if there are changes, otherwise false + */ + public boolean hasChanges() { + if (ruleRef == null) { + return true; + } + long currentTime; + try { + currentTime = fs.getFileStatus(filePath).getModificationTime(); + } catch (IOException e) { + log.debug("Error getting file status for: " + filePath, e); + return true; + } + long previousTime = ruleRef.getTimestamp(); + boolean changed = (currentTime - previousTime) > configuredDiff; + if (log.isTraceEnabled()) { + log.trace("Changes result: " + changed + ", current time: " + currentTime); + } + return changed; + } + + /** + * Creates a new ruleset view of the file. The initial call to the method will lazily create the base rules and return a view of the baseline rules. The + * next calls will create new view copies derived from the baseline rules. + * + * @param scanStart + * the start of a scan operation to use for the ruleset + * @param iterEnv + * the iterator environment for the scan + * @return a deep copy of the cached {@link AppliedRule} baseline rules + * @throws IOException + * if there are errors during the cache value creation, on initial call + */ + public Collection newRulesetView(long scanStart, IteratorEnvironment iterEnv) throws IOException { + // rule initialization/copies are performed on the calling thread + // the base iterator rules will use an iterator environment from the caller (and keep in the AppliedRule) + // the deep copy always creates new views of the rules with the caller's iterator environment + if (ruleRef == null) { + long ts = fs.getFileStatus(filePath).getModificationTime(); + Collection rulesBase = loadFilterRules(iterEnv); + ruleRef = new FileRuleReference(ts, rulesBase); + } + return ruleRef.deepCopy(scanStart, iterEnv); + } + + @VisibleForTesting + Collection loadFilterRules(IteratorEnvironment iterEnv) throws IOException { + AgeOffRuleLoader ruleLoader = new AgeOffRuleLoader(new FileLoaderDependencyProvider(fs, filePath, iterEnv)); + Collection rulesBase; + try (InputStream in = fs.open(filePath)) { + rulesBase = ruleLoader.load(in); + } + return rulesBase; + } + + @VisibleForTesting + FileRuleReference getRuleRef() { + return ruleRef; + } +} diff --git a/warehouse/age-off/src/main/java/datawave/ingest/util/cache/watch/FileRuleReference.java b/warehouse/age-off/src/main/java/datawave/ingest/util/cache/watch/FileRuleReference.java new file mode 100644 index 00000000000..e11cdee6ed1 --- /dev/null +++ b/warehouse/age-off/src/main/java/datawave/ingest/util/cache/watch/FileRuleReference.java @@ -0,0 +1,27 @@ +package datawave.ingest.util.cache.watch; + +import java.util.Collection; +import java.util.stream.Collectors; + +import org.apache.accumulo.core.iterators.IteratorEnvironment; + +import datawave.iterators.filter.ageoff.AppliedRule; +import datawave.iterators.filter.ageoff.FilterRule; + +class FileRuleReference { + private final long ts; + private final Collection rulesBase; + + FileRuleReference(long ts, Collection rulesBase) { + this.ts = ts; + this.rulesBase = rulesBase; + } + + public long getTimestamp() { + return ts; + } + + public Collection deepCopy(long scanStart, IteratorEnvironment iterEnv) { + return rulesBase.stream().map(rule -> (AppliedRule) rule.deepCopy(scanStart, iterEnv)).collect(Collectors.toList()); + } +} diff --git a/warehouse/age-off/src/main/java/datawave/ingest/util/cache/watch/FileRuleWatcher.java b/warehouse/age-off/src/main/java/datawave/ingest/util/cache/watch/FileRuleWatcher.java new file mode 100644 index 00000000000..fd2397debd3 --- /dev/null +++ b/warehouse/age-off/src/main/java/datawave/ingest/util/cache/watch/FileRuleWatcher.java @@ -0,0 +1,97 @@ +package datawave.ingest.util.cache.watch; + +import java.io.IOException; +import java.io.InputStream; +import java.util.Collection; + +import org.apache.accumulo.core.iterators.IteratorEnvironment; +import org.apache.hadoop.conf.Configuration; +import org.apache.hadoop.fs.FileSystem; +import org.apache.hadoop.fs.Path; +import org.apache.hadoop.io.IOUtils; +import org.apache.log4j.Logger; + +import datawave.iterators.filter.ageoff.FilterRule; + +/** + * File Rule Watch + */ +public class FileRuleWatcher extends FileSystemWatcher> { + + private static final Logger log = Logger.getLogger(FileRuleWatcher.class); + + private final IteratorEnvironment iterEnv; + + /** + * @param fs + * file system + * @param filePath + * path to the file + * @param configuredDiff + * configured diff + * @throws IOException + * if there is a problem reading the file + */ + public FileRuleWatcher(FileSystem fs, Path filePath, long configuredDiff) throws IOException { + this(fs, filePath, configuredDiff, null); + } + + /** + * @param fs + * file system + * @param filePath + * path to the file + * @param configuredDiff + * configured diff + * @param iterEnv + * iterator environment + * @throws IOException + * if there is a problem reading the file + */ + public FileRuleWatcher(FileSystem fs, Path filePath, long configuredDiff, IteratorEnvironment iterEnv) throws IOException { + super(fs, filePath, configuredDiff); + this.iterEnv = iterEnv; + } + + /** + * @param filePath + * path to the file + * @param configuredDiff + * configured diff + * @throws IOException + * if there is an error reading the file + */ + public FileRuleWatcher(Path filePath, long configuredDiff) throws IOException { + this(filePath, configuredDiff, null); + } + + /** + * @param filePath + * the path to the file + * @param configuredDiff + * configured diff + * @param iterEnv + * iterator environment + * @throws IOException + * if there is an error reading the file + */ + public FileRuleWatcher(Path filePath, long configuredDiff, IteratorEnvironment iterEnv) throws IOException { + super(filePath.getFileSystem(new Configuration()), filePath, configuredDiff); + this.iterEnv = iterEnv; + } + + /* + * (non-Javadoc) + * + * @see datawave.ingest.util.cache.watch.FileSystemWatcher#loadContents(java.io.InputStream) + */ + @Override + protected Collection loadContents(InputStream in) throws IOException { + try { + AgeOffRuleLoader ruleLoader = new AgeOffRuleLoader(new FileLoaderDependencyProvider(fs, filePath, iterEnv)); + return ruleLoader.load(in); + } finally { + IOUtils.closeStream(in); + } + } +} diff --git a/warehouse/core/src/main/java/datawave/iterators/filter/AgeOffConfigParams.java b/warehouse/age-off/src/main/java/datawave/iterators/filter/AgeOffConfigParams.java similarity index 100% rename from warehouse/core/src/main/java/datawave/iterators/filter/AgeOffConfigParams.java rename to warehouse/age-off/src/main/java/datawave/iterators/filter/AgeOffConfigParams.java diff --git a/warehouse/core/src/main/java/datawave/iterators/filter/AgeOffFilterBase.java b/warehouse/age-off/src/main/java/datawave/iterators/filter/AgeOffFilterBase.java similarity index 100% rename from warehouse/core/src/main/java/datawave/iterators/filter/AgeOffFilterBase.java rename to warehouse/age-off/src/main/java/datawave/iterators/filter/AgeOffFilterBase.java diff --git a/warehouse/core/src/main/java/datawave/iterators/filter/AgeOffTtlUnits.java b/warehouse/age-off/src/main/java/datawave/iterators/filter/AgeOffTtlUnits.java similarity index 100% rename from warehouse/core/src/main/java/datawave/iterators/filter/AgeOffTtlUnits.java rename to warehouse/age-off/src/main/java/datawave/iterators/filter/AgeOffTtlUnits.java diff --git a/warehouse/core/src/main/java/datawave/iterators/filter/ColumnFamilyRegexFilter.java b/warehouse/age-off/src/main/java/datawave/iterators/filter/ColumnFamilyRegexFilter.java similarity index 100% rename from warehouse/core/src/main/java/datawave/iterators/filter/ColumnFamilyRegexFilter.java rename to warehouse/age-off/src/main/java/datawave/iterators/filter/ColumnFamilyRegexFilter.java diff --git a/warehouse/core/src/main/java/datawave/iterators/filter/ColumnQualifierRegexFilter.java b/warehouse/age-off/src/main/java/datawave/iterators/filter/ColumnQualifierRegexFilter.java similarity index 100% rename from warehouse/core/src/main/java/datawave/iterators/filter/ColumnQualifierRegexFilter.java rename to warehouse/age-off/src/main/java/datawave/iterators/filter/ColumnQualifierRegexFilter.java diff --git a/warehouse/core/src/main/java/datawave/iterators/filter/ColumnVisibilityAndFilter.java b/warehouse/age-off/src/main/java/datawave/iterators/filter/ColumnVisibilityAndFilter.java similarity index 97% rename from warehouse/core/src/main/java/datawave/iterators/filter/ColumnVisibilityAndFilter.java rename to warehouse/age-off/src/main/java/datawave/iterators/filter/ColumnVisibilityAndFilter.java index 8b210cccb28..86cf76b2ba3 100644 --- a/warehouse/core/src/main/java/datawave/iterators/filter/ColumnVisibilityAndFilter.java +++ b/warehouse/age-off/src/main/java/datawave/iterators/filter/ColumnVisibilityAndFilter.java @@ -23,7 +23,7 @@ public class ColumnVisibilityAndFilter extends TokenFilterBase { @Override public boolean hasToken(Key k, Value v, byte[][] testTokens) { - boolean found[] = new boolean[testTokens.length]; + boolean[] found = new boolean[testTokens.length]; int numFound = 0; byte[] cv = k.getColumnVisibilityData().getBackingArray(); diff --git a/warehouse/core/src/main/java/datawave/iterators/filter/ColumnVisibilityOrFilter.java b/warehouse/age-off/src/main/java/datawave/iterators/filter/ColumnVisibilityOrFilter.java similarity index 100% rename from warehouse/core/src/main/java/datawave/iterators/filter/ColumnVisibilityOrFilter.java rename to warehouse/age-off/src/main/java/datawave/iterators/filter/ColumnVisibilityOrFilter.java diff --git a/warehouse/core/src/main/java/datawave/iterators/filter/ColumnVisibilityRegexFilter.java b/warehouse/age-off/src/main/java/datawave/iterators/filter/ColumnVisibilityRegexFilter.java similarity index 100% rename from warehouse/core/src/main/java/datawave/iterators/filter/ColumnVisibilityRegexFilter.java rename to warehouse/age-off/src/main/java/datawave/iterators/filter/ColumnVisibilityRegexFilter.java diff --git a/warehouse/core/src/main/java/datawave/iterators/filter/ColumnVisibilityTokenizingFilter.java b/warehouse/age-off/src/main/java/datawave/iterators/filter/ColumnVisibilityTokenizingFilter.java similarity index 100% rename from warehouse/core/src/main/java/datawave/iterators/filter/ColumnVisibilityTokenizingFilter.java rename to warehouse/age-off/src/main/java/datawave/iterators/filter/ColumnVisibilityTokenizingFilter.java diff --git a/warehouse/core/src/main/java/datawave/iterators/filter/ConfigurableAgeOffFilter.java b/warehouse/age-off/src/main/java/datawave/iterators/filter/ConfigurableAgeOffFilter.java similarity index 86% rename from warehouse/core/src/main/java/datawave/iterators/filter/ConfigurableAgeOffFilter.java rename to warehouse/age-off/src/main/java/datawave/iterators/filter/ConfigurableAgeOffFilter.java index 675b3cf6083..21f1f692bb0 100644 --- a/warehouse/core/src/main/java/datawave/iterators/filter/ConfigurableAgeOffFilter.java +++ b/warehouse/age-off/src/main/java/datawave/iterators/filter/ConfigurableAgeOffFilter.java @@ -1,21 +1,20 @@ package datawave.iterators.filter; import java.io.IOException; -import java.util.ArrayList; import java.util.Arrays; import java.util.Collection; import java.util.Date; import java.util.Iterator; import java.util.List; import java.util.Map; -import java.util.Objects; import java.util.TreeMap; import java.util.concurrent.ExecutionException; -import java.util.concurrent.ScheduledThreadPoolExecutor; +import java.util.concurrent.Executors; +import java.util.concurrent.ScheduledExecutorService; +import java.util.concurrent.ThreadFactory; import java.util.concurrent.TimeUnit; import org.apache.accumulo.core.client.PluginEnvironment; -import org.apache.accumulo.core.conf.AccumuloConfiguration; import org.apache.accumulo.core.data.Key; import org.apache.accumulo.core.data.Value; import org.apache.accumulo.core.iterators.Filter; @@ -23,22 +22,21 @@ import org.apache.accumulo.core.iterators.IteratorUtil; import org.apache.accumulo.core.iterators.OptionDescriber; import org.apache.accumulo.core.iterators.SortedKeyValueIterator; -import org.apache.accumulo.core.util.threads.ThreadPools; -import org.apache.hadoop.conf.Configuration; -import org.apache.hadoop.fs.FileSystem; -import org.apache.hadoop.fs.Path; import org.apache.log4j.Logger; +import com.google.common.annotations.VisibleForTesting; import com.google.common.base.Preconditions; import com.google.common.cache.CacheBuilder; import com.google.common.cache.LoadingCache; import com.google.common.collect.Lists; +import com.google.common.util.concurrent.ThreadFactoryBuilder; -import datawave.ingest.util.cache.ReloadableCacheBuilder; -import datawave.ingest.util.cache.watch.FileRuleWatcher; +import datawave.ingest.util.cache.watch.FileRuleCacheLoader; +import datawave.ingest.util.cache.watch.FileRuleCacheValue; import datawave.iterators.filter.ageoff.AgeOffPeriod; import datawave.iterators.filter.ageoff.AppliedRule; import datawave.iterators.filter.ageoff.FilterRule; +import datawave.util.CompositeTimestamp; /** * This class provides a subclass of the {@code org.apache.accumulo.core.iterators.Filter} class and implements the {@code Option Describer} interface. It @@ -52,15 +50,17 @@ * {@code AgeOffConfigParams.FILTER_CONFIG} {@code AgeOffConfigParams.TTL_SHORT_CIRCUIT} can be optionally used to short circuit invoking the filters and will * allow all records younger thatn that interval to be passed through. The units definition is used for both {@code AgeOffConfigParams.TTL} and * {@code AgeOffConfigParams.TTL_SHORT_CIRCUIT}. - * + *

    * *

    * The filtering rules are stored in a configuration file, which may be stored in the local file system, or in HDFS. If it is stored in the local filesystem, * then it must be available on all of the tablet servers' filesystems. The configuration file should be specified as a full URL such as * {@code file:///opt/accumulo/config/configFilter.xml} or {@code hdfs://config/filters/configFilter.xml}. + *

    * *

    * The TTL Units may be the following values: + *

    *
      *
    • {@code ms} - milliseconds *
    • {@code s} - seconds @@ -71,8 +71,7 @@ * *

      * Sample Configuration File: - * - *

      + *

      * *
        * <ageoffConfiguration>
      @@ -107,9 +106,10 @@ public class ConfigurableAgeOffFilter extends Filter implements OptionDescriber
       
           private static final Logger log = Logger.getLogger(ConfigurableAgeOffFilter.class);
       
      -    private static final ScheduledThreadPoolExecutor SIMPLE_TIMER = ThreadPools.getServerThreadPools().createScheduledExecutorService(1,
      -                    ConfigurableAgeOffFilter.class.getSimpleName() + "-ruleCache-refresh", false);
      +    private static final ThreadFactory TIMER_THREAD_FACTORY = new ThreadFactoryBuilder()
      +                    .setNameFormat(ConfigurableAgeOffFilter.class.getSimpleName() + "-ruleCache-refresh-%d").build();
       
      +    private static final ScheduledExecutorService SIMPLE_TIMER = Executors.newSingleThreadScheduledExecutor(TIMER_THREAD_FACTORY);
           public static final String UPDATE_INTERVAL_MS_PROP = "tserver.datawave.ageoff.cache.update.interval.ms";
           protected static final long DEFAULT_UPDATE_INTERVAL_MS = 5;
           protected static long UPDATE_INTERVAL_MS = DEFAULT_UPDATE_INTERVAL_MS;
      @@ -118,11 +118,7 @@ public class ConfigurableAgeOffFilter extends Filter implements OptionDescriber
           protected static final long DEFAULT_EXPIRATION_INTERVAL_MS = 60 * 60 * 1000L; // default 1 hour
           protected static long EXPIRATION_INTERVAL_MS = DEFAULT_EXPIRATION_INTERVAL_MS;
       
      -    /**
      -     * Changed filter list to use FilterRule
      -     */
      -
      -    protected static LoadingCache> ruleCache = null;
      +    protected static volatile LoadingCache ruleCache = null;
       
           protected Collection filterList;
       
      @@ -134,10 +130,12 @@ public class ConfigurableAgeOffFilter extends Filter implements OptionDescriber
       
           protected String filename;
       
      -    protected static FileSystem fs = null;
      -
           protected IteratorEnvironment myEnv;
       
      +    protected PluginEnvironment pluginEnv;
      +
      +    protected FileRuleCacheValue cacheValue;
      +
           // Adding the ability to disable the filter checks in the case of a system-initialized major compaction for example.
           // The thought is that we force compactions where we want the data to aged off.
           // The system-initialized compactions are on data just imported in which case they are not expected to remove much.
      @@ -171,7 +169,7 @@ public boolean accept(Key k, Value v) {
                   return true;
       
               // short circuit check
      -        long timeStamp = k.getTimestamp();
      +        long timeStamp = CompositeTimestamp.getAgeOffDate(k.getTimestamp());
               if (timeStamp > this.shortCircuitDateMillis)
                   return true;
       
      @@ -200,11 +198,17 @@ public boolean accept(Key k, Value v) {
           public SortedKeyValueIterator deepCopy(IteratorEnvironment env) {
       
               myEnv = env;
      +        pluginEnv = env == null ? null : env.getPluginEnv();
               return ((ConfigurableAgeOffFilter) super.deepCopy(env)).initialize(this);
           }
       
           /**
            * initialize the object via some other configurable age off filter.
      +     *
      +     * @param other
      +     *            another filter to base this one off
      +     *
      +     * @return the configurable age off filter
            */
           protected ConfigurableAgeOffFilter initialize(ConfigurableAgeOffFilter other) {
       
      @@ -227,11 +231,20 @@ protected ConfigurableAgeOffFilter initialize(ConfigurableAgeOffFilter other) {
            * Initialize this object with a set of string parameters representing the configuration options for this iterator.
            *
            * @param ttl
      +     *            time to live
            * @param ttlUnits
      +     *            time to live units
      +     *
      +     * @param ttlShortCircuitStr
      +     *            time to live short circuit string
            * @param scanStart
      +     *            scan start time
            * @param fileName
      +     *            file name
            * @throws IOException
      +     *             if error reading the file
            * @throws IllegalArgumentException
      +     *             if illegal arguments passed
            */
           protected void initialize(final String ttl, final String ttlUnits, final String ttlShortCircuitStr, final long scanStart, final String fileName)
                           throws IllegalArgumentException, IOException {
      @@ -294,9 +307,13 @@ protected void initialize(final String ttl, final String ttlUnits, final String
           /**
            * Used to initialize the default parameters used by this implementation of {@code Filter}, as well as the sub-filters specified in the configuration file.
            *
      +     * @param source
      +     *            the source key values
            * @param options
            *            {@code Map} object contain the configuration parameters for this {@code Filter} implementation. The parameters required are
            *            specified in the {@code AgeOffConfigParams.TTL}, {@code AgeOffConfigParams.TTL_UNITS}, and {@code AgeOffConfigParams.FILTER_CONFIG}.
      +     * @param env
      +     *            the iterator environment
            * @see org.apache.accumulo.core.iterators.Filter#init(SortedKeyValueIterator, Map, IteratorEnvironment)
            */
           @Override
      @@ -304,6 +321,7 @@ public void init(SortedKeyValueIterator source, Map op
               super.init(source, options, env);
       
               myEnv = env;
      +        pluginEnv = env == null ? null : env.getPluginEnv();
               disabled = shouldDisableForNonFullCompaction(options, env) || shouldDisableForNonUserCompaction(options, env);
       
               Preconditions.checkNotNull(options, "Configuration filename and " + "the default ttl must be set for the ConfigurableAgeOffFilter");
      @@ -311,7 +329,6 @@ public void init(SortedKeyValueIterator source, Map op
               long sessionScanStart = options.containsKey(AgeOffConfigParams.SCAN_START_TIMESTAMP)
                               ? Long.parseLong(options.get(AgeOffConfigParams.SCAN_START_TIMESTAMP))
                               : System.currentTimeMillis();
      -
               initialize(options.get(AgeOffConfigParams.TTL), options.get(AgeOffConfigParams.TTL_UNITS), options.get(AgeOffConfigParams.TTL_SHORT_CIRCUIT),
                               sessionScanStart, options.get(AgeOffConfigParams.FILTER_CONFIG));
       
      @@ -326,7 +343,9 @@ public void init(SortedKeyValueIterator source, Map op
            * 
    * * @param options + * map of options * @param env + * the iterator environment * @return true only if we should disable filtering */ private boolean shouldDisableForNonFullCompaction(Map options, IteratorEnvironment env) { @@ -366,7 +385,9 @@ private boolean shouldDisableForNonFullCompaction(Map options, It * * * @param options + * map of options * @param env + * the iterator environment * @return true only if we should disable filtering */ private boolean shouldDisableForNonUserCompaction(Map options, IteratorEnvironment env) { @@ -398,22 +419,24 @@ private boolean shouldDisableForNonUserCompaction(Map options, It } /** - * This method instantiates the the necessary implementations of the {@code Filter} interface, as they are defined in the configuration file specified by + * This method instantiates the necessary implementations of the {@code Filter} interface, as they are defined in the configuration file specified by * {@code this.filename}. * * @throws IllegalArgumentException * if there is an error in the configuration file * @throws IOException + * if there is an error reading the configuration file */ private void initFilterRules() throws IllegalArgumentException, IOException { - // filename if (null == ruleCache) { synchronized (ConfigurableAgeOffFilter.class) { if (null == ruleCache) { UPDATE_INTERVAL_MS = getLongProperty(UPDATE_INTERVAL_MS_PROP, DEFAULT_UPDATE_INTERVAL_MS); // 5 ms EXPIRATION_INTERVAL_MS = getLongProperty(EXPIRATION_INTERVAL_MS_PROP, DEFAULT_EXPIRATION_INTERVAL_MS); // 1 hour + log.debug("Configured refresh interval (ms): " + UPDATE_INTERVAL_MS); + log.debug("Configured expiration interval (ms): " + EXPIRATION_INTERVAL_MS); ruleCache = CacheBuilder.newBuilder().refreshAfterWrite(UPDATE_INTERVAL_MS, TimeUnit.MILLISECONDS) - .expireAfterAccess(EXPIRATION_INTERVAL_MS, TimeUnit.MILLISECONDS).build(new ReloadableCacheBuilder()); + .expireAfterAccess(EXPIRATION_INTERVAL_MS, TimeUnit.MILLISECONDS).build(new FileRuleCacheLoader()); // this will schedule a check to see if the update or expiration intervals have changed // if so the ruleCache will be rebuilt with these new intervals SIMPLE_TIMER.scheduleWithFixedDelay(() -> { @@ -426,7 +449,7 @@ private void initFilterRules() throws IllegalArgumentException, IOException { log.info("Changing " + EXPIRATION_INTERVAL_MS_PROP + " to " + expiration); EXPIRATION_INTERVAL_MS = expiration; ruleCache = CacheBuilder.newBuilder().refreshAfterWrite(UPDATE_INTERVAL_MS, TimeUnit.MILLISECONDS) - .expireAfterAccess(EXPIRATION_INTERVAL_MS, TimeUnit.MILLISECONDS).build(new ReloadableCacheBuilder()); + .expireAfterAccess(EXPIRATION_INTERVAL_MS, TimeUnit.MILLISECONDS).build(new FileRuleCacheLoader()); } } catch (Throwable t) { log.error(t, t); @@ -436,60 +459,29 @@ private void initFilterRules() throws IllegalArgumentException, IOException { } } - Path filePath = new Path(filename); - if (null == fs) { - synchronized (ConfigurableAgeOffFilter.class) { - if (null == fs) { - if (log.isTraceEnabled()) { - log.trace("Setting FileSystem reference"); - } - fs = filePath.getFileSystem(new Configuration()); - } - } - } else { - if (log.isTraceEnabled()) { - log.trace("Reusing file system reference."); - } + try { + cacheValue = ruleCache.get(filename); + } catch (ExecutionException e) { + throw new IOException(e); } - FileRuleWatcher watcherKey = new FileRuleWatcher(fs, filePath, 1, myEnv); - - copyRules(watcherKey); + // the rule cache value will be static + // initial load of the baseline rules will occur here, that call may take time + // after initial load, each subsequent initialize performs a deep copy (init) against the AppliedRule + // the internal deep copy operation (i.e. calls after initial load) are anticipated to be quick + filterList = cacheValue.newRulesetView(scanStart, myEnv); } private long getLongProperty(final String prop, final long defaultValue) { - if (this.myEnv != null && this.myEnv.getConfig() != null) { - AccumuloConfiguration conf = this.myEnv.getConfig(); - Map properties = new TreeMap<>(); - conf.getProperties(properties, p -> Objects.equals(prop, p)); - if (properties.containsKey(prop)) { - return Long.parseLong(properties.get(prop)); + if (pluginEnv != null && pluginEnv.getConfiguration() != null) { + String propValue = pluginEnv.getConfiguration().get(prop); + if (propValue != null) { + return Long.parseLong(propValue); } } return defaultValue; } - protected void copyRules(FileRuleWatcher watcherKey) throws IOException { - filterList = new ArrayList<>(); - try { - // rule cache is lazily loaded, so the act of getting the key will populate it with the key - // and trigger a bunch of loading logic which will ultimately call - // FileRuleWatcher.loadContents() which will return the rules - Collection rules = ruleCache.get(watcherKey); - - if (rules != null) { - for (FilterRule rule : rules) { - // NOTE: this propagates the anchor time (scanStart) to all of the applied rules - // This is used to calculate the AgeOffPeriod for all of the rules - filterList.add((AppliedRule) rule.deepCopy(this.scanStart, myEnv)); - } - } - - } catch (ExecutionException e) { - throw new IOException(e); - } - } - /** * This method is used by accumulo and its command line shell to prompt the user for the configuration options for this {@code Filter}. * @@ -562,6 +554,11 @@ private boolean validatePropertyIsBoolean(Map options, String pro return true; } + @VisibleForTesting + FileRuleCacheValue getFileRuleCacheValue() { + return cacheValue; + } + /** * Clear the file watcher cache. */ @@ -572,7 +569,7 @@ public static void clearCache() { } } - public static LoadingCache> getCache() { + public static LoadingCache getCache() { return ruleCache; } diff --git a/warehouse/core/src/main/java/datawave/iterators/filter/DateInColQualAgeOffFilter.java b/warehouse/age-off/src/main/java/datawave/iterators/filter/DateInColQualAgeOffFilter.java similarity index 100% rename from warehouse/core/src/main/java/datawave/iterators/filter/DateInColQualAgeOffFilter.java rename to warehouse/age-off/src/main/java/datawave/iterators/filter/DateInColQualAgeOffFilter.java diff --git a/warehouse/core/src/main/java/datawave/iterators/filter/EdgeColumnQualifierTokenFilter.java b/warehouse/age-off/src/main/java/datawave/iterators/filter/EdgeColumnQualifierTokenFilter.java similarity index 100% rename from warehouse/core/src/main/java/datawave/iterators/filter/EdgeColumnQualifierTokenFilter.java rename to warehouse/age-off/src/main/java/datawave/iterators/filter/EdgeColumnQualifierTokenFilter.java diff --git a/warehouse/core/src/main/java/datawave/iterators/filter/RegexFilterBase.java b/warehouse/age-off/src/main/java/datawave/iterators/filter/RegexFilterBase.java similarity index 97% rename from warehouse/core/src/main/java/datawave/iterators/filter/RegexFilterBase.java rename to warehouse/age-off/src/main/java/datawave/iterators/filter/RegexFilterBase.java index 52e18edfb82..d19a9e5f670 100644 --- a/warehouse/core/src/main/java/datawave/iterators/filter/RegexFilterBase.java +++ b/warehouse/age-off/src/main/java/datawave/iterators/filter/RegexFilterBase.java @@ -13,6 +13,7 @@ import datawave.iterators.filter.ageoff.AgeOffPeriod; import datawave.iterators.filter.ageoff.AppliedRule; import datawave.iterators.filter.ageoff.FilterOptions; +import datawave.util.CompositeTimestamp; /** * This class provides an abstract base class to be extended to filter based on matching a REGEX to the {@code String} object that represents some portion of a @@ -69,7 +70,7 @@ public boolean accept(AgeOffPeriod period, Key k, Value v) { String keyField = getKeyField(k, v); Matcher matcher = pattern.matcher(keyField); if (matcher.find()) { - long timeStamp = k.getTimestamp(); + long timeStamp = CompositeTimestamp.getAgeOffDate(k.getTimestamp()); dtFlag = timeStamp > period.getCutOffMilliseconds(); if (log.isTraceEnabled()) { log.trace("timeStamp = " + timeStamp); @@ -109,6 +110,7 @@ public void init(FilterOptions options) { * @param options * {@code Map} object containing the TTL, TTL_UNITS, and MATCHPATTERN for the filter rule. * @param iterEnv + * iterator environment * @see datawave.iterators.filter.AgeOffConfigParams */ @Override diff --git a/warehouse/core/src/main/java/datawave/iterators/filter/TokenFilterBase.java b/warehouse/age-off/src/main/java/datawave/iterators/filter/TokenFilterBase.java similarity index 97% rename from warehouse/core/src/main/java/datawave/iterators/filter/TokenFilterBase.java rename to warehouse/age-off/src/main/java/datawave/iterators/filter/TokenFilterBase.java index ddb99fda6b0..b6a59a034da 100644 --- a/warehouse/core/src/main/java/datawave/iterators/filter/TokenFilterBase.java +++ b/warehouse/age-off/src/main/java/datawave/iterators/filter/TokenFilterBase.java @@ -10,6 +10,7 @@ import datawave.iterators.filter.ageoff.AgeOffPeriod; import datawave.iterators.filter.ageoff.AppliedRule; import datawave.iterators.filter.ageoff.FilterOptions; +import datawave.util.CompositeTimestamp; import datawave.util.StringUtils; /** @@ -69,7 +70,7 @@ public boolean accept(AgeOffPeriod period, Key k, Value v) { dtFlag = true; } else { if (hasToken(k, v, patternBytes)) { - long timeStamp = k.getTimestamp(); + long timeStamp = CompositeTimestamp.getAgeOffDate(k.getTimestamp()); dtFlag = timeStamp > period.getCutOffMilliseconds(); if (log.isTraceEnabled()) { log.trace("timeStamp = " + timeStamp); @@ -105,11 +106,12 @@ public void init(FilterOptions options) { } /** - * Required by the {@code FilterRule} interface. Used to initialize the the {@code FilterRule} implementation + * Required by the {@code FilterRule} interface. Used to initialize the {@code FilterRule} implementation * * @param options * {@code Map} object containing the TTL, TTL_UNITS, and MATCHPATTERN for the filter rule. * @param iterEnv + * iterator environment * @see datawave.iterators.filter.AgeOffConfigParams */ @Override diff --git a/warehouse/core/src/main/java/datawave/iterators/filter/TokenSpecParser.java b/warehouse/age-off/src/main/java/datawave/iterators/filter/TokenSpecParser.java similarity index 94% rename from warehouse/core/src/main/java/datawave/iterators/filter/TokenSpecParser.java rename to warehouse/age-off/src/main/java/datawave/iterators/filter/TokenSpecParser.java index 603ae37cf8a..a46c0893109 100644 --- a/warehouse/core/src/main/java/datawave/iterators/filter/TokenSpecParser.java +++ b/warehouse/age-off/src/main/java/datawave/iterators/filter/TokenSpecParser.java @@ -9,12 +9,24 @@ public abstract class TokenSpecParser { /** - * Add a new token with its TTL to the the structure. + * Add a new token with its TTL to the structure. + * + * @param token + * byte array token + * @param ttl + * time to live + * + * @return a TokenSpecParser child object */ public abstract B addToken(byte[] token, long ttl); /** * Parse additional token configurations from a string. + * + * @param configuration + * figuration string for the parser + * + * @return TokenSpecParser child object */ public B parse(String configuration) { ParserState parser = new ParserState(configuration); @@ -129,6 +141,8 @@ private List tokenize(String input) { /** * Return the next token without advancing. + * + * @return the next token */ protected ParseToken peek() { if (nextTokenPos >= parseTokens.size()) { @@ -139,6 +153,11 @@ protected ParseToken peek() { /** * Consume the next token, assuming it's of the specified type, and return its content. + * + * @param type + * the parse token type + * + * @return the content of the next token */ protected String expect(ParseTokenType type) { ParseToken next = peek(); @@ -153,6 +172,9 @@ protected String expect(ParseTokenType type) { /** * Parse the entire input and add it to the TtlTrieBuilder. + * + * @param builder + * token spec parser */ protected void parseTo(TokenSpecParser builder) { ParseToken initialToken; @@ -175,6 +197,8 @@ protected void parseTo(TokenSpecParser builder) { /** * Read a string literal. + * + * @return the parsed string literal */ protected String parseStrliteral() { ParseToken token = peek(); diff --git a/warehouse/core/src/main/java/datawave/iterators/filter/TokenTtlTrie.java b/warehouse/age-off/src/main/java/datawave/iterators/filter/TokenTtlTrie.java similarity index 94% rename from warehouse/core/src/main/java/datawave/iterators/filter/TokenTtlTrie.java rename to warehouse/age-off/src/main/java/datawave/iterators/filter/TokenTtlTrie.java index 691e85040fd..86cca0b429a 100644 --- a/warehouse/core/src/main/java/datawave/iterators/filter/TokenTtlTrie.java +++ b/warehouse/age-off/src/main/java/datawave/iterators/filter/TokenTtlTrie.java @@ -38,6 +38,11 @@ public int size() { /** * Scan the specified string for tokens, returning the ttl of the best priority token found, or null if no tokens were found. + * + * @param rawString + * the raw string + * + * @return the ttl for the best priority token found */ public Long scan(byte[] rawString) { int bestPriority = Integer.MAX_VALUE; @@ -88,11 +93,11 @@ public static class Builder extends TokenSpecParser { private final List stateTtlList = new ArrayList<>(); private final List statePriorityList = new ArrayList<>(); private final Set delimiters = new HashSet<>(); - private boolean isMerge; + private final boolean isMerge; public enum MERGE_MODE { ON, OFF - }; + } Builder() { this(MERGE_MODE.OFF); @@ -111,6 +116,11 @@ public int size() { /** * Set the delimiter set. + * + * @param delimiters + * the delimiter set + * + * @return builder to set the delimiters */ public Builder setDelimiters(byte[] delimiters) { this.delimiters.clear(); @@ -122,6 +132,13 @@ public Builder setDelimiters(byte[] delimiters) { /** * Add a token to the TtlTrie under construction, along with the TTL value the specified token should be associated with. + * + * @param token + * the token to add + * @param ttl + * time to live + * + * @return builder to add provided token */ @Override public Builder addToken(byte[] token, long ttl) { diff --git a/warehouse/core/src/main/java/datawave/iterators/filter/TokenizingFilterBase.java b/warehouse/age-off/src/main/java/datawave/iterators/filter/TokenizingFilterBase.java similarity index 95% rename from warehouse/core/src/main/java/datawave/iterators/filter/TokenizingFilterBase.java rename to warehouse/age-off/src/main/java/datawave/iterators/filter/TokenizingFilterBase.java index b3d4cb45b2d..edb4e219f1d 100644 --- a/warehouse/core/src/main/java/datawave/iterators/filter/TokenizingFilterBase.java +++ b/warehouse/age-off/src/main/java/datawave/iterators/filter/TokenizingFilterBase.java @@ -10,6 +10,7 @@ import datawave.iterators.filter.ageoff.AgeOffPeriod; import datawave.iterators.filter.ageoff.AppliedRule; import datawave.iterators.filter.ageoff.FilterOptions; +import datawave.util.CompositeTimestamp; /** * TokenizingAgeoffFilter cuts a field into tokens (splitting at a specified set of delimiters), and makes ageoff decisions based on whether or not any of the @@ -60,6 +61,11 @@ public abstract class TokenizingFilterBase extends AppliedRule { /** * Return a list of delimiters for scans. While the default is to pull this information out of the {@code <delimiters>} tag in the configuration, * subclasses may wish to override this to provide fixed delimiter sets. + * + * @param options + * filter options + * + * @return list of delimiters for scans */ public byte[] getDelimiters(FilterOptions options) { String delimiters = options.getOption(DELIMITERS_TAG); @@ -114,7 +120,7 @@ public boolean accept(AgeOffPeriod period, Key k, Value V) { cutoffTimestamp -= calculatedTTL; } ruleApplied = true; - return k.getTimestamp() > cutoffTimestamp; + return CompositeTimestamp.getAgeOffDate(k.getTimestamp()) > cutoffTimestamp; } @Override diff --git a/warehouse/core/src/main/java/datawave/iterators/filter/ageoff/AgeOffPeriod.java b/warehouse/age-off/src/main/java/datawave/iterators/filter/ageoff/AgeOffPeriod.java similarity index 96% rename from warehouse/core/src/main/java/datawave/iterators/filter/ageoff/AgeOffPeriod.java rename to warehouse/age-off/src/main/java/datawave/iterators/filter/ageoff/AgeOffPeriod.java index 788d70e65b1..aa020d7522f 100644 --- a/warehouse/core/src/main/java/datawave/iterators/filter/ageoff/AgeOffPeriod.java +++ b/warehouse/age-off/src/main/java/datawave/iterators/filter/ageoff/AgeOffPeriod.java @@ -12,9 +12,9 @@ */ public class AgeOffPeriod { - private long cutOffMillis; - private long ttl; - private String ttlUnits; + private final long cutOffMillis; + private final long ttl; + private final String ttlUnits; private static final Logger log = Logger.getLogger(AgeOffPeriod.class); diff --git a/warehouse/core/src/main/java/datawave/iterators/filter/ageoff/AppliedRule.java b/warehouse/age-off/src/main/java/datawave/iterators/filter/ageoff/AppliedRule.java similarity index 88% rename from warehouse/core/src/main/java/datawave/iterators/filter/ageoff/AppliedRule.java rename to warehouse/age-off/src/main/java/datawave/iterators/filter/ageoff/AppliedRule.java index 9784525cd57..d123f7d5104 100644 --- a/warehouse/core/src/main/java/datawave/iterators/filter/ageoff/AppliedRule.java +++ b/warehouse/age-off/src/main/java/datawave/iterators/filter/ageoff/AppliedRule.java @@ -59,7 +59,14 @@ public void init(FilterOptions options, IteratorEnvironment iterEnv) { this.iterEnv = iterEnv; } - /** Perform initialization in support of a deepCopy, copying any expensive state from the parent. */ + /** + * Perform initialization in support of a deepCopy, copying any expensive state from the parent. + * + * @param newOptions + * filter options + * @param parentCopy + * appolied rule of parent copy + **/ protected void deepCopyInit(FilterOptions newOptions, AppliedRule parentCopy) { init(newOptions, iterEnv); } @@ -105,7 +112,9 @@ public FilterRule deepCopy(AgeOffPeriod period, IteratorEnvironment iterEnv) { newFilter.deepCopyInit(currentOptions, this); // for some reason this needs to come after deep copy init newFilter.ageOffPeriod = new AgeOffPeriod(period.getCutOffMilliseconds()); - log.trace("Age off is " + newFilter.ageOffPeriod.getCutOffMilliseconds()); + if (log.isTraceEnabled()) { + log.trace("Age off is " + newFilter.ageOffPeriod.getCutOffMilliseconds()); + } return newFilter; } catch (InstantiationException | IllegalAccessException | NoSuchMethodException | InvocationTargetException e) { log.error(e); @@ -115,7 +124,10 @@ public FilterRule deepCopy(AgeOffPeriod period, IteratorEnvironment iterEnv) { /** * @param scanStart - * @return + * scan start time + * @param iterEnv + * iterator environment + * @return new filter rule */ public FilterRule deepCopy(long scanStart, IteratorEnvironment iterEnv) { AppliedRule newFilter; @@ -127,7 +139,9 @@ public FilterRule deepCopy(long scanStart, IteratorEnvironment iterEnv) { newFilter.deepCopyInit(newOptions, this); // for some reason this needs to come after deep copy init newFilter.ageOffPeriod = new AgeOffPeriod(scanStart, currentOptions.ttl, currentOptions.ttlUnits); - log.trace("Age off is " + newFilter.ageOffPeriod.getCutOffMilliseconds()); + if (log.isTraceEnabled()) { + log.trace("Age off is " + newFilter.ageOffPeriod.getCutOffMilliseconds()); + } return newFilter; } catch (InstantiationException | IllegalAccessException e) { log.error(e); @@ -136,7 +150,7 @@ public FilterRule deepCopy(long scanStart, IteratorEnvironment iterEnv) { } /** - * @return + * @return the age off period */ protected AgeOffPeriod getPeriod() { return ageOffPeriod; diff --git a/warehouse/core/src/main/java/datawave/iterators/filter/ageoff/DataTypeAgeOffFilter.java b/warehouse/age-off/src/main/java/datawave/iterators/filter/ageoff/DataTypeAgeOffFilter.java similarity index 97% rename from warehouse/core/src/main/java/datawave/iterators/filter/ageoff/DataTypeAgeOffFilter.java rename to warehouse/age-off/src/main/java/datawave/iterators/filter/ageoff/DataTypeAgeOffFilter.java index 8acf643d18b..a9d7ee7f54b 100644 --- a/warehouse/core/src/main/java/datawave/iterators/filter/ageoff/DataTypeAgeOffFilter.java +++ b/warehouse/age-off/src/main/java/datawave/iterators/filter/ageoff/DataTypeAgeOffFilter.java @@ -16,6 +16,7 @@ import com.google.common.collect.Sets; import datawave.iterators.filter.AgeOffConfigParams; +import datawave.util.CompositeTimestamp; /** * Data type age off filter. Traverses through indexed tables @@ -209,11 +210,11 @@ public boolean accept(AgeOffPeriod period, Key k, Value v) { if (dataTypeCutoff == null) { if (defaultCutoffTime >= 0) { ruleApplied = true; - accept = k.getTimestamp() > defaultCutoffTime; + accept = CompositeTimestamp.getAgeOffDate(k.getTimestamp()) > defaultCutoffTime; } } else { ruleApplied = true; - accept = k.getTimestamp() > dataTypeCutoff; + accept = CompositeTimestamp.getAgeOffDate(k.getTimestamp()) > dataTypeCutoff; } // after age-off is applied check, if we are accepting this KeyValue and this is a Scan on a dataType which only accepts on timestamp // only continue to accept the KeyValue if the timestamp for the dataType matches what is configured @@ -241,6 +242,7 @@ public void init(FilterOptions options) { * @param options * {@code Map} object containing the TTL, TTL_UNITS, and MATCHPATTERN for the filter rule. * @param iterEnv + * iterator environment * @see datawave.iterators.filter.AgeOffConfigParams */ public void init(FilterOptions options, IteratorEnvironment iterEnv) { diff --git a/warehouse/core/src/main/java/datawave/iterators/filter/ageoff/FieldAgeOffFilter.java b/warehouse/age-off/src/main/java/datawave/iterators/filter/ageoff/FieldAgeOffFilter.java similarity index 86% rename from warehouse/core/src/main/java/datawave/iterators/filter/ageoff/FieldAgeOffFilter.java rename to warehouse/age-off/src/main/java/datawave/iterators/filter/ageoff/FieldAgeOffFilter.java index 91d7130a749..53d230868e8 100644 --- a/warehouse/core/src/main/java/datawave/iterators/filter/ageoff/FieldAgeOffFilter.java +++ b/warehouse/age-off/src/main/java/datawave/iterators/filter/ageoff/FieldAgeOffFilter.java @@ -17,24 +17,21 @@ import datawave.iterators.filter.AgeOffConfigParams; import datawave.iterators.filter.ColumnVisibilityOrFilter; +import datawave.util.CompositeTimestamp; /** - * Data type age off filter. Traverses through indexed tables - * - * - * and non-indexed tables. Example follows. Note that - * - * any data type TTL will follow the same units specified in ttl units + * Field age off filter. Traverses through indexed tables and non-indexed tables. Example follows. Note that any field TTL will follow the same units specified + * in ttl units * *
      * {@code
      *
      * 
      *     
    - *         datawave.iterators.filter.ageoff.DataTypeAgeOffFilter
    + *         datawave.iterators.filter.ageoff.FieldAgeOffFilter
      *         720
    - *         foo,bar
    - *         44
    + *         fieldA,fieldB
    + *         44
      *     
      * 
      * }
    @@ -47,7 +44,7 @@ protected enum FieldExclusionType {
         }
     
         public static final String OPTION_PREFIX = "field.";
    -    private ColumnVisibilityOrFilter cvOrFilter = new ColumnVisibilityOrFilter();
    +    private final ColumnVisibilityOrFilter cvOrFilter = new ColumnVisibilityOrFilter();
         /**
          * Null byte
          */
    @@ -87,7 +84,7 @@ protected enum FieldExclusionType {
         private static final Logger log = Logger.getLogger(FieldAgeOffFilter.class);
     
         /**
    -     * Determine whether or not the rules are applied
    +     * Determine whether the rules are applied
          */
         protected boolean ruleApplied = false;
     
    @@ -107,9 +104,9 @@ protected enum FieldExclusionType {
         protected Set fieldExcludeOptions = null;
     
         /**
    -     * Required by the {@code FilterRule} interface. This method returns a {@code boolean} value indicating whether or not to allow the {@code (Key, Value)}
    -     * pair through the rule. A value of {@code true} indicates that he pair should be passed onward through the {@code Iterator} stack, and {@code false}
    -     * indicates that the {@code (Key, Value)} pair should not be passed on.
    +     * Required by the {@code FilterRule} interface. This method returns a {@code boolean} value indicating whether to allow the {@code (Key, Value)} pair
    +     * through the rule. A value of {@code true} indicates that he pair should be passed onward through the {@code Iterator} stack, and {@code false} indicates
    +     * that the {@code (Key, Value)} pair should not be passed on.
          *
          * 

    * If the value provided in the parameter {@code k} does not match the REGEX pattern specified in this filter's configuration options, then a value of @@ -119,17 +116,12 @@ protected enum FieldExclusionType { * {@code Key} object containing the row, column family, and column qualifier. * @param v * {@code Value} object containing the value corresponding to the {@code Key: k} - * @return {@code boolean} value indicating whether or not to allow the {@code Key, Value} through the {@code Filter}. + * @return {@code boolean} value indicating whether to allow the {@code Key, Value} through the {@code Filter}. */ @Override public boolean accept(AgeOffPeriod period, Key k, Value v) { ruleApplied = false; - // if accepted by ColumnVisibilityOrFilter logic, pass the K/V up the iterator stack - // otherwise evaluate based on field - if (cvOrFilter.hasToken(k, v, this.cvOrFilter.getPatternBytes()) == false) { - return true; - } // get the column qualifier, so that we can use it throughout final byte[] cq = k.getColumnQualifierData().getBackingArray(); @@ -137,9 +129,7 @@ public boolean accept(AgeOffPeriod period, Key k, Value v) { ByteSequence field = null; FieldExclusionType candidateExclusionType = null; - /** - * Supports the shard and index table. There should not be a failure, however if either one is used on the incorrect table - */ + // Supports the shard and index table. There should not be a failure, however if either one is used on the incorrect table if (isIndextable) { field = k.getColumnFamilyData(); @@ -209,13 +199,15 @@ public boolean accept(AgeOffPeriod period, Key k, Value v) { return true; } - Long dataTypeCutoff = (fieldTimes.containsKey(field)) ? fieldTimes.get(field) : null; - if (dataTypeCutoff != null) { - ruleApplied = true; - return k.getTimestamp() > dataTypeCutoff; + Long dataTypeCutoff = fieldTimes.get(field); + + // evaluating field first then the ColumnVisibilityOrFilter based on performance test results + if (dataTypeCutoff == null || !cvOrFilter.hasToken(k, v, this.cvOrFilter.getPatternBytes())) { + return true; } - return true; + ruleApplied = true; + return CompositeTimestamp.getAgeOffDate(k.getTimestamp()) > dataTypeCutoff; } /** @@ -235,6 +227,8 @@ public void init(FilterOptions options) { * @param options * {@code Map} object containing the TTL, TTL_UNITS, and MATCHPATTERN for the filter rule. * @param iterEnv + * iterator environment + * * @see datawave.iterators.filter.AgeOffConfigParams */ public void init(FilterOptions options, IteratorEnvironment iterEnv) { @@ -276,7 +270,7 @@ protected void init(FilterOptions options, final long startScan, IteratorEnviron isIndextable = Boolean.parseBoolean(iterEnv.getConfig().get("table.custom." + AgeOffConfigParams.IS_INDEX_TABLE)); } } else { // legacy - isIndextable = Boolean.valueOf(options.getOption(AgeOffConfigParams.IS_INDEX_TABLE)); + isIndextable = Boolean.parseBoolean(options.getOption(AgeOffConfigParams.IS_INDEX_TABLE)); } fieldExcludeOptions = new HashSet<>(); diff --git a/warehouse/core/src/main/java/datawave/iterators/filter/ageoff/FilterOptions.java b/warehouse/age-off/src/main/java/datawave/iterators/filter/ageoff/FilterOptions.java similarity index 100% rename from warehouse/core/src/main/java/datawave/iterators/filter/ageoff/FilterOptions.java rename to warehouse/age-off/src/main/java/datawave/iterators/filter/ageoff/FilterOptions.java diff --git a/warehouse/core/src/main/java/datawave/iterators/filter/ageoff/FilterRule.java b/warehouse/age-off/src/main/java/datawave/iterators/filter/ageoff/FilterRule.java similarity index 75% rename from warehouse/core/src/main/java/datawave/iterators/filter/ageoff/FilterRule.java rename to warehouse/age-off/src/main/java/datawave/iterators/filter/ageoff/FilterRule.java index e98790fc336..2916d833daa 100644 --- a/warehouse/core/src/main/java/datawave/iterators/filter/ageoff/FilterRule.java +++ b/warehouse/age-off/src/main/java/datawave/iterators/filter/ageoff/FilterRule.java @@ -10,7 +10,7 @@ */ public interface FilterRule { /** - * Used to initialize the the {@code FilterRule} implementation + * Used to initialize the {@code FilterRule} implementation * * @param options * {@code Map} object @@ -18,17 +18,21 @@ public interface FilterRule { void init(FilterOptions options); /** - * Used to initialize the the {@code FilterRule} implementation + * Used to initialize the {@code FilterRule} implementation * * @param options * {@code Map} object * @param iterEnv + * iterator environment */ void init(FilterOptions options, IteratorEnvironment iterEnv); /** * Used to test a {@code Key/Value} pair, and returns {@code true} if it is accepted * + * @param iter + * key/value iterator + * * @return {@code boolean} value. */ boolean accept(SortedKeyValueIterator iter); @@ -37,7 +41,10 @@ public interface FilterRule { /** * @param scanStart - * @return + * index to start scan + * @param iterEnv + * the iterator environment + * @return a copy */ FilterRule deepCopy(long scanStart, IteratorEnvironment iterEnv); diff --git a/warehouse/core/src/main/java/datawave/iterators/filter/ageoff/MaximumAgeOffFilter.java b/warehouse/age-off/src/main/java/datawave/iterators/filter/ageoff/MaximumAgeOffFilter.java similarity index 95% rename from warehouse/core/src/main/java/datawave/iterators/filter/ageoff/MaximumAgeOffFilter.java rename to warehouse/age-off/src/main/java/datawave/iterators/filter/ageoff/MaximumAgeOffFilter.java index b4fe4b7c9ed..643bb59accc 100644 --- a/warehouse/core/src/main/java/datawave/iterators/filter/ageoff/MaximumAgeOffFilter.java +++ b/warehouse/age-off/src/main/java/datawave/iterators/filter/ageoff/MaximumAgeOffFilter.java @@ -6,6 +6,7 @@ import org.apache.log4j.Logger; import datawave.iterators.filter.AgeOffConfigParams; +import datawave.util.CompositeTimestamp; /** * Data type age off filter. Traverses through indexed tables @@ -59,7 +60,7 @@ public boolean accept(AgeOffPeriod period, Key k, Value v) { // this rule determines whether to accept / deny (ageoff) a K/V // based solely on whether a timestamp is before (older than) the cutoff for aging off - if (k.getTimestamp() > period.getCutOffMilliseconds()) { + if (CompositeTimestamp.getAgeOffDate(k.getTimestamp()) > period.getCutOffMilliseconds()) { return true; } else { return false; @@ -83,6 +84,8 @@ public void init(FilterOptions options) { * @param options * {@code Map} object containing the TTL, TTL_UNITS, and MATCHPATTERN for the filter rule. * @param iterEnv + * iterator environment + * * @see datawave.iterators.filter.AgeOffConfigParams */ public void init(FilterOptions options, IteratorEnvironment iterEnv) { diff --git a/warehouse/core/src/test/java/datawave/ingest/util/cache/watch/DifferentClassesMergeTest.java b/warehouse/age-off/src/test/java/datawave/ingest/util/cache/watch/DifferentClassesMergeTest.java similarity index 95% rename from warehouse/core/src/test/java/datawave/ingest/util/cache/watch/DifferentClassesMergeTest.java rename to warehouse/age-off/src/test/java/datawave/ingest/util/cache/watch/DifferentClassesMergeTest.java index df9166e8a39..aacbe3de00e 100644 --- a/warehouse/core/src/test/java/datawave/ingest/util/cache/watch/DifferentClassesMergeTest.java +++ b/warehouse/age-off/src/test/java/datawave/ingest/util/cache/watch/DifferentClassesMergeTest.java @@ -24,13 +24,12 @@ * Tests to verify capability of merging configs for rules that use different filters with overlapping matchPattern formats */ public class DifferentClassesMergeTest { - private static final String ROOT_FILTER_CONFIGURATION_FILE = "/alternate-root.xml"; - private static final String CHILD_FILTER_CONFIGURATION_FILE = "/alternate-child.xml"; + private static final String ROOT_FILTER_CONFIGURATION_FILE = "/filter/alternate-root.xml"; + private static final String CHILD_FILTER_CONFIGURATION_FILE = "/filter/alternate-child.xml"; private static final long VERY_OLD_TIMESTAMP = -1000L * 60 * 60 * 24 * 365 * 1000; // 1,000 years in the past private static final long TIMESTAMP_IN_FUTURE = 1000L * 60 * 60 * 24 * 365 * 1000; // 1,000 years in the future public static final long DAYS_AGO = -1000L * 60 * 60 * 24; - private FileRuleWatcher watcher; private ColumnVisibilityLabeledFilter parentFilter; // childFilter inherits matchPattern contents from parentFilter private EdgeColumnQualifierTokenFilter childFilter; @@ -40,12 +39,11 @@ public void before() throws IOException { // create childFilter Path childPath = new Path(this.getClass().getResource(CHILD_FILTER_CONFIGURATION_FILE).toString()); FileSystem fs = childPath.getFileSystem(new Configuration()); - watcher = new FileRuleWatcher(fs, childPath, 1); - childFilter = (EdgeColumnQualifierTokenFilter) loadRulesFromFile(watcher, fs, childPath); + childFilter = (EdgeColumnQualifierTokenFilter) loadRulesFromFile(fs, childPath); // create parentFilter Path rootPath = new Path(this.getClass().getResource(ROOT_FILTER_CONFIGURATION_FILE).toString()); - parentFilter = (ColumnVisibilityLabeledFilter) loadRulesFromFile(watcher, fs, rootPath); + parentFilter = (ColumnVisibilityLabeledFilter) loadRulesFromFile(fs, rootPath); } @Test @@ -208,8 +206,9 @@ private void assertParentRejects(String colVis, long timestamp) { assertFalse(parentFilter.accept(key, new Value())); } - private static AppliedRule loadRulesFromFile(FileRuleWatcher watcher, FileSystem fs, Path filePath) throws IOException { - Collection rules = watcher.loadContents(fs.open(filePath)); + private static AppliedRule loadRulesFromFile(FileSystem fs, Path filePath) throws IOException { + FileRuleCacheValue ruleValue = new FileRuleCacheValue(fs, filePath, 1); + Collection rules = ruleValue.loadFilterRules(null); // should only have the single rule assertEquals(1, rules.size()); return (AppliedRule) rules.iterator().next(); diff --git a/warehouse/age-off/src/test/java/datawave/ingest/util/cache/watch/FileRuleCacheLoaderTest.java b/warehouse/age-off/src/test/java/datawave/ingest/util/cache/watch/FileRuleCacheLoaderTest.java new file mode 100644 index 00000000000..d2506f755e9 --- /dev/null +++ b/warehouse/age-off/src/test/java/datawave/ingest/util/cache/watch/FileRuleCacheLoaderTest.java @@ -0,0 +1,44 @@ +package datawave.ingest.util.cache.watch; + +import static org.junit.Assert.assertNotSame; +import static org.junit.Assert.assertSame; +import static org.junit.jupiter.api.Assertions.assertEquals; +import static org.mockito.Mockito.mock; +import static org.mockito.Mockito.when; + +import org.junit.Test; + +import com.google.common.util.concurrent.ListenableFuture; + +public class FileRuleCacheLoaderTest { + @Test + public void testReloadReturnsNewInstanceWhenChanged() throws Exception { + String path = "file:/path/to/file"; + FileRuleCacheLoader loader = new FileRuleCacheLoader(); + FileRuleCacheValue val = mock(FileRuleCacheValue.class); + when(val.hasChanges()).thenReturn(true); + ListenableFuture reloadedVal = loader.reload(path, val); + + assertNotSame(val, reloadedVal.get()); + } + + @Test + public void testReloadReturnsSameInstanceWhenNotChanged() throws Exception { + String path = "file:/path/to/file"; + FileRuleCacheLoader loader = new FileRuleCacheLoader(); + FileRuleCacheValue val = mock(FileRuleCacheValue.class); + when(val.hasChanges()).thenReturn(false); + ListenableFuture reloadedVal = loader.reload(path, val); + + assertSame(val, reloadedVal.get()); + } + + @Test + public void testLoadWillCreateNewInstance() throws Exception { + String path = "file:/path/to/file"; + FileRuleCacheLoader loader = new FileRuleCacheLoader(); + FileRuleCacheValue loadedVal = loader.load(path); + + assertEquals(path, loadedVal.getFilePath().toString()); + } +} diff --git a/warehouse/age-off/src/test/java/datawave/ingest/util/cache/watch/FileRuleCacheValueTest.java b/warehouse/age-off/src/test/java/datawave/ingest/util/cache/watch/FileRuleCacheValueTest.java new file mode 100644 index 00000000000..ef6660ed9e7 --- /dev/null +++ b/warehouse/age-off/src/test/java/datawave/ingest/util/cache/watch/FileRuleCacheValueTest.java @@ -0,0 +1,167 @@ +package datawave.ingest.util.cache.watch; + +import java.io.IOException; +import java.util.Collection; +import java.util.HashMap; +import java.util.Map; + +import org.apache.hadoop.conf.Configuration; +import org.apache.hadoop.fs.FileStatus; +import org.apache.hadoop.fs.FileSystem; +import org.apache.hadoop.fs.Path; +import org.junit.Assert; +import org.junit.Before; +import org.junit.Test; +import org.mockito.Mockito; + +import datawave.iterators.filter.AgeOffConfigParams; +import datawave.iterators.filter.ageoff.AppliedRule; +import datawave.iterators.filter.ageoff.FilterRule; + +public class FileRuleCacheValueTest { + // Derived from original FileRuleWatcherTest unit tests + + private static final String FILTER_CONFIGURATION_FILE = "/filter/test-filter-rules.xml"; + private static final String FILE_WITH_MISSING_FILTER_CLASS = "/filter/missing-filter-class.xml"; + private static final String DEFAULT_UNITS = "d"; + private FileRuleCacheValue ruleValue; + private FileSystem fs; + private Path filePath; + private Collection rules; + private Map rulesByMatchPattern; + + @Before + public void before() throws IOException { + rulesByMatchPattern = new HashMap<>(); + filePath = new Path(this.getClass().getResource(FILTER_CONFIGURATION_FILE).toString()); + fs = filePath.getFileSystem(new Configuration()); + ruleValue = new FileRuleCacheValue(fs, filePath, 1); + rules = ruleValue.loadFilterRules(null); + Assert.assertEquals(5, rules.size()); + for (FilterRule rule : rules) { + Assert.assertEquals(TestFilter.class, rule.getClass()); + TestFilter testFilter = (TestFilter) rule; + String matchPattern = testFilter.options.getOption(AgeOffConfigParams.MATCHPATTERN); + rulesByMatchPattern.put(matchPattern, testFilter); + } + } + + @Test + public void verifyNoBleedOverOfTTlValue() { + Assert.assertEquals(10, rulesByMatchPattern.get("1").options.getTTL()); + Assert.assertEquals(-1, rulesByMatchPattern.get("A").options.getTTL()); + Assert.assertEquals(50, rulesByMatchPattern.get("B").options.getTTL()); + Assert.assertEquals(-1, rulesByMatchPattern.get("C").options.getTTL()); + Assert.assertEquals(10, rulesByMatchPattern.get("D").options.getTTL()); + } + + @Test + public void verifyNoBleedOverOfTTlUnits() { + Assert.assertEquals("ms", rulesByMatchPattern.get("1").options.getTTLUnits()); + Assert.assertEquals(DEFAULT_UNITS, rulesByMatchPattern.get("A").options.getTTLUnits()); + Assert.assertEquals("d", rulesByMatchPattern.get("B").options.getTTLUnits()); + Assert.assertEquals(DEFAULT_UNITS, rulesByMatchPattern.get("C").options.getTTLUnits()); + Assert.assertEquals("ms", rulesByMatchPattern.get("D").options.getTTLUnits()); + } + + @Test + public void verifyNoBleedOverOfExtendedOptions() { + Assert.assertEquals("false", rulesByMatchPattern.get("1").options.getOption("filtersWater")); + Assert.assertNull(rulesByMatchPattern.get("A").options.getOption("filtersWater")); + Assert.assertEquals("true", rulesByMatchPattern.get("B").options.getOption("filtersWater")); + Assert.assertNull(rulesByMatchPattern.get("C").options.getOption("filtersWater")); + Assert.assertEquals("false", rulesByMatchPattern.get("D").options.getOption("filtersWater")); + + Assert.assertEquals("1234", rulesByMatchPattern.get("1").options.getOption("myTagName.ttl")); + Assert.assertNull(rulesByMatchPattern.get("A").options.getOption("myTagName.ttl")); + Assert.assertNull(rulesByMatchPattern.get("B").options.getOption("myTagName.ttl")); + Assert.assertNull(rulesByMatchPattern.get("C").options.getOption("myTagName.ttl")); + Assert.assertNull(rulesByMatchPattern.get("D").options.getOption("myTagName.ttl")); + } + + @Test + public void verifyDeepCopyWillSeeDifferentRules() throws IOException { + Collection v1 = ruleValue.newRulesetView(0, null); + FileRuleReference r1 = ruleValue.getRuleRef(); + Collection v2 = ruleValue.newRulesetView(0, null); + FileRuleReference r2 = ruleValue.getRuleRef(); + + // check to ensure the applied rules returned are different objects + // but the underlying rule reference is not changing + Assert.assertNotSame(v1, v2); + Assert.assertSame(r1, r2); + } + + @Test(expected = IOException.class) + public void verifyFilterClass() throws IOException { + Path fileWithMissingClassname = new Path(this.getClass().getResource(FILE_WITH_MISSING_FILTER_CLASS).toString()); + FileRuleCacheValue exValue = new FileRuleCacheValue(fs, fileWithMissingClassname, 1); + rules = exValue.loadFilterRules(null); + } + + @Test + public void verifyNumericFieldInOptions() { + // backwards compatibility + Assert.assertEquals("2468", rulesByMatchPattern.get("D").options.getOption("last.ttl")); + Assert.assertEquals(10, rulesByMatchPattern.get("D").options.getTTL()); + Assert.assertEquals("ms", rulesByMatchPattern.get("D").options.getTTLUnits()); + // revised options + Assert.assertEquals("first,last", rulesByMatchPattern.get("D").options.getOption("fields")); + Assert.assertEquals("1234", rulesByMatchPattern.get("D").options.getOption("field.middle.ttl")); + Assert.assertEquals("m", rulesByMatchPattern.get("D").options.getOption("field.middle.ttlUnits")); + Assert.assertEquals("10", rulesByMatchPattern.get("D").options.getOption("field.suffix.ttl")); + Assert.assertEquals("d", rulesByMatchPattern.get("D").options.getOption("field.suffix.ttlUnits")); + Assert.assertEquals("77", rulesByMatchPattern.get("D").options.getOption("datatype.012345.ttl")); + Assert.assertEquals("ms", rulesByMatchPattern.get("D").options.getOption("datatype.012345.ttlUnits")); + } + + @Test + public void verifyHasChangesIfNotInitializedReturnsChanges() { + FileSystem fs = Mockito.mock(FileSystem.class); + Path path = new Path("hdfs://path/to/file"); + FileRuleCacheValue val = new FileRuleCacheValue(fs, path, 1L); + Assert.assertTrue(val.hasChanges()); + } + + @Test + public void verifyHasChangesIfThrowsReturnsChanges() throws IOException { + FileSystem fsSpy = Mockito.spy(fs); + Mockito.when(fsSpy.getFileStatus(filePath)).thenThrow(new IllegalStateException("Unable to fetch status")); + FileRuleCacheValue val = new FileRuleCacheValue(fs, filePath, 1L); + Assert.assertTrue(val.hasChanges()); + } + + @Test + public void verifyHasChangesWhenChanges() throws IOException { + long timestampBaseline = 0L; + long configuredDiff = 1L; + FileSystem fsSpy = Mockito.spy(fs); + FileStatus statusBase = Mockito.mock(FileStatus.class); + FileStatus statusUnchanged = Mockito.mock(FileStatus.class); + FileStatus statusChanged = Mockito.mock(FileStatus.class); + Mockito.when(statusBase.getModificationTime()).thenReturn(timestampBaseline); + Mockito.when(statusUnchanged.getModificationTime()).thenReturn(timestampBaseline + configuredDiff); + Mockito.when(statusChanged.getModificationTime()).thenReturn(timestampBaseline + configuredDiff + 1); + Mockito.when(fsSpy.getFileStatus(filePath)).thenReturn(statusBase); + + FileRuleCacheValue val = new FileRuleCacheValue(fsSpy, filePath, configuredDiff); + val.newRulesetView(0L, null); + + Assert.assertNotNull(val.getRuleRef()); + + // assert no changes after loading + Assert.assertFalse("Expected no changes initial evaluation", val.hasChanges()); + + // reset status to be unchanged (more than baseline) + Mockito.when(fsSpy.getFileStatus(filePath)).thenReturn(statusUnchanged); + + // assert no changes after loading + Assert.assertFalse("Expected no changes at threshold evaluation", val.hasChanges()); + + // reset status to be changed + Mockito.when(fsSpy.getFileStatus(filePath)).thenReturn(statusChanged); + + // assert changes now detected when (modificationTime - baseline) > configuredDiff + Assert.assertTrue("Expected evaluation has changes", val.hasChanges()); + } +} diff --git a/warehouse/core/src/test/java/datawave/ingest/util/cache/watch/FileRuleDataTypeMergeTest.java b/warehouse/age-off/src/test/java/datawave/ingest/util/cache/watch/FileRuleDataTypeMergeTest.java similarity index 81% rename from warehouse/core/src/test/java/datawave/ingest/util/cache/watch/FileRuleDataTypeMergeTest.java rename to warehouse/age-off/src/test/java/datawave/ingest/util/cache/watch/FileRuleDataTypeMergeTest.java index 242f2ad35f4..03845eaa3aa 100644 --- a/warehouse/core/src/test/java/datawave/ingest/util/cache/watch/FileRuleDataTypeMergeTest.java +++ b/warehouse/age-off/src/test/java/datawave/ingest/util/cache/watch/FileRuleDataTypeMergeTest.java @@ -1,8 +1,8 @@ package datawave.ingest.util.cache.watch; import static org.hamcrest.CoreMatchers.is; +import static org.hamcrest.MatcherAssert.assertThat; import static org.junit.Assert.assertEquals; -import static org.junit.Assert.assertThat; import java.io.IOException; import java.util.Collection; @@ -19,10 +19,9 @@ * Tests to verify capability of merging configs that use filters that inherit from {@code FieldAgeOffFilter} */ public class FileRuleDataTypeMergeTest { - private static final String ROOT_FILTER_CONFIGURATION_FILE = "/test-root-data-type.xml"; - private static final String CHILD_FILTER_CONFIGURATION_FILE = "/test-customized-data-type.xml"; + private static final String ROOT_FILTER_CONFIGURATION_FILE = "/filter/test-root-data-type.xml"; + private static final String CHILD_FILTER_CONFIGURATION_FILE = "/filter/test-customized-data-type.xml"; - private FileRuleWatcher watcher; private TestDataTypeFilter parentFilter; // this one inherits defaults from parentFilter private TestDataTypeFilter childFilter; @@ -32,9 +31,8 @@ public void before() throws IOException { Path childPath = new Path(this.getClass().getResource(CHILD_FILTER_CONFIGURATION_FILE).toString()); Path rootPath = new Path(this.getClass().getResource(ROOT_FILTER_CONFIGURATION_FILE).toString()); FileSystem fs = childPath.getFileSystem(new Configuration()); - watcher = new FileRuleWatcher(fs, childPath, 1); - parentFilter = (TestDataTypeFilter) loadRulesFromFile(watcher, fs, rootPath); - childFilter = (TestDataTypeFilter) loadRulesFromFile(watcher, fs, childPath); + parentFilter = (TestDataTypeFilter) loadRulesFromFile(fs, rootPath); + childFilter = (TestDataTypeFilter) loadRulesFromFile(fs, childPath); } @Test @@ -66,8 +64,9 @@ public void verifyOverridenValues() throws IOException { assertThat(childFilter.options.getOption("zip.ttl"), is("123")); } - private static FilterRule loadRulesFromFile(FileRuleWatcher watcher, FileSystem fs, Path filePath) throws IOException { - Collection rules = watcher.loadContents(fs.open(filePath)); + private static FilterRule loadRulesFromFile(FileSystem fs, Path filePath) throws IOException { + FileRuleCacheValue ruleValue = new FileRuleCacheValue(fs, filePath, 1); + Collection rules = ruleValue.loadFilterRules(null); // should only have the single rule assertThat(rules.size(), is(1)); for (FilterRule rule : rules) { diff --git a/warehouse/core/src/test/java/datawave/ingest/util/cache/watch/FileRuleFieldMergeTest.java b/warehouse/age-off/src/test/java/datawave/ingest/util/cache/watch/FileRuleFieldMergeTest.java similarity index 78% rename from warehouse/core/src/test/java/datawave/ingest/util/cache/watch/FileRuleFieldMergeTest.java rename to warehouse/age-off/src/test/java/datawave/ingest/util/cache/watch/FileRuleFieldMergeTest.java index 808e9b0778a..41a331f550b 100644 --- a/warehouse/core/src/test/java/datawave/ingest/util/cache/watch/FileRuleFieldMergeTest.java +++ b/warehouse/age-off/src/test/java/datawave/ingest/util/cache/watch/FileRuleFieldMergeTest.java @@ -1,8 +1,8 @@ package datawave.ingest.util.cache.watch; import static org.hamcrest.CoreMatchers.is; +import static org.hamcrest.MatcherAssert.assertThat; import static org.junit.Assert.assertEquals; -import static org.junit.Assert.assertThat; import java.io.IOException; import java.util.Collection; @@ -21,10 +21,9 @@ * Tests to verify capability of merging configs that use filters that inherit from {@code FieldAgeOffFilter} */ public class FileRuleFieldMergeTest { - private static final String ROOT_FILTER_CONFIGURATION_FILE = "/test-root-field.xml"; - private static final String CHILD_FILTER_CONFIGURATION_FILE = "/test-customized-field.xml"; + static final String ROOT_FILTER_CONFIGURATION_FILE = "/filter/test-root-field.xml"; + private static final String CHILD_FILTER_CONFIGURATION_FILE = "/filter/test-customized-field.xml"; - private FileRuleWatcher watcher; private TestFieldFilter parentFilter; // this one inherits defaults from parentFilter private TestFieldFilter childFilter; @@ -34,9 +33,8 @@ public void before() throws IOException { Path childPath = new Path(this.getClass().getResource(CHILD_FILTER_CONFIGURATION_FILE).toString()); Path rootPath = new Path(this.getClass().getResource(ROOT_FILTER_CONFIGURATION_FILE).toString()); FileSystem fs = childPath.getFileSystem(new Configuration()); - watcher = new FileRuleWatcher(fs, childPath, 1); - parentFilter = (TestFieldFilter) loadRulesFromFile(watcher, fs, rootPath); - childFilter = (TestFieldFilter) loadRulesFromFile(watcher, fs, childPath); + parentFilter = (TestFieldFilter) loadRulesFromFile(fs, rootPath); + childFilter = (TestFieldFilter) loadRulesFromFile(fs, childPath); } @Test @@ -62,8 +60,9 @@ private Boolean isIndexTable(TestFieldFilter filter) { return Boolean.valueOf(filter.options.getOption("isindextable", "false")); } - private static FilterRule loadRulesFromFile(FileRuleWatcher watcher, FileSystem fs, Path filePath) throws IOException { - Collection rules = watcher.loadContents(fs.open(filePath)); + private static FilterRule loadRulesFromFile(FileSystem fs, Path filePath) throws IOException { + FileRuleCacheValue cacheValue = new FileRuleCacheValue(fs, filePath, 1); + Collection rules = cacheValue.loadFilterRules(null); // should only have the single rule assertThat(rules.size(), is(1)); for (FilterRule rule : rules) { diff --git a/warehouse/core/src/test/java/datawave/ingest/util/cache/watch/FileRuleLoadContentsMergeFiltersTest.java b/warehouse/age-off/src/test/java/datawave/ingest/util/cache/watch/FileRuleLoadContentsMergeFiltersTest.java similarity index 90% rename from warehouse/core/src/test/java/datawave/ingest/util/cache/watch/FileRuleLoadContentsMergeFiltersTest.java rename to warehouse/age-off/src/test/java/datawave/ingest/util/cache/watch/FileRuleLoadContentsMergeFiltersTest.java index 6e30690159e..7fd7a325c70 100644 --- a/warehouse/core/src/test/java/datawave/ingest/util/cache/watch/FileRuleLoadContentsMergeFiltersTest.java +++ b/warehouse/age-off/src/test/java/datawave/ingest/util/cache/watch/FileRuleLoadContentsMergeFiltersTest.java @@ -2,8 +2,8 @@ import static org.hamcrest.CoreMatchers.equalTo; import static org.hamcrest.CoreMatchers.is; +import static org.hamcrest.MatcherAssert.assertThat; import static org.junit.Assert.assertFalse; -import static org.junit.Assert.assertThat; import static org.junit.Assert.assertTrue; import java.io.IOException; @@ -30,12 +30,11 @@ * Tests to verify capability of merging configs that use filters that inherit from {@code TokenizingFilterBase} */ public class FileRuleLoadContentsMergeFiltersTest { - private static final String ROOT_FILTER_CONFIGURATION_FILE = "/test-root-rules.xml"; - private static final String CHILD_FILTER_CONFIGURATION_FILE = "/test-customized-rules.xml"; + private static final String ROOT_FILTER_CONFIGURATION_FILE = "/filter/test-root-rules.xml"; + private static final String CHILD_FILTER_CONFIGURATION_FILE = "/filter/test-customized-rules.xml"; private static final long MILLIS_IN_DAY = 24 * 60 * 60 * 1000; private static final int MILLIS_IN_ONE_SEC = 60 * 1000; - private FileRuleWatcher watcher; private TestTrieFilter parentFilter; // this one inherits defaults from parentFilter private TestTrieFilter childFilter; @@ -50,9 +49,8 @@ public void before() throws IOException { Path childPath = new Path(this.getClass().getResource(CHILD_FILTER_CONFIGURATION_FILE).toString()); Path rootPath = new Path(this.getClass().getResource(ROOT_FILTER_CONFIGURATION_FILE).toString()); FileSystem fs = childPath.getFileSystem(new Configuration()); - watcher = new FileRuleWatcher(fs, childPath, 1); - parentFilter = (TestTrieFilter) loadRulesFromFile(watcher, fs, rootPath, 3); - childFilter = (TestTrieFilter) loadRulesFromFile(watcher, fs, childPath, 4); + parentFilter = (TestTrieFilter) loadRulesFromFile(fs, rootPath, 3); + childFilter = (TestTrieFilter) loadRulesFromFile(fs, childPath, 4); anchorTime = System.currentTimeMillis(); // use this to configure the age off evaluation, all units expected to be in days filterOptions = new FilterOptions(); @@ -99,9 +97,11 @@ public void testNewConfigMaintainsOrder() throws Exception { Path rootPath = new Path(this.getClass().getResource(ROOT_FILTER_CONFIGURATION_FILE).toString()); Path childPath = new Path(this.getClass().getResource(CHILD_FILTER_CONFIGURATION_FILE).toString()); FileSystem fs = childPath.getFileSystem(new Configuration()); + FileRuleCacheValue parentValue = new FileRuleCacheValue(fs, rootPath, 1); + FileRuleCacheValue childValue = new FileRuleCacheValue(fs, childPath, 1); - List parentRules = (List) watcher.loadContents(fs.open(rootPath)); - List childRules = (List) watcher.loadContents(fs.open(childPath)); + List parentRules = (List) parentValue.loadFilterRules(null); + List childRules = (List) childValue.loadFilterRules(null); // should have one extra rule in child assertThat(childRules.size(), is(equalTo(parentRules.size() + 1))); @@ -186,8 +186,9 @@ public void testTtl() { assertTrue(filter.isFilterRuleApplied()); } - private static FilterRule loadRulesFromFile(FileRuleWatcher watcher, FileSystem fs, Path filePath, int expectedNumRules) throws IOException { - Collection rules = watcher.loadContents(fs.open(filePath)); + private static FilterRule loadRulesFromFile(FileSystem fs, Path filePath, int expectedNumRules) throws IOException { + FileRuleCacheValue cacheValue = new FileRuleCacheValue(fs, filePath, 1); + Collection rules = cacheValue.loadFilterRules(null); assertThat(rules.size(), is(expectedNumRules)); // only return the TestTrieFilter for this test Optional first = rules.stream().filter(r -> r instanceof TestTrieFilter).findFirst(); diff --git a/warehouse/core/src/test/java/datawave/ingest/util/cache/watch/FileRuleWatcherTest.java b/warehouse/age-off/src/test/java/datawave/ingest/util/cache/watch/FileRuleWatcherTest.java similarity index 97% rename from warehouse/core/src/test/java/datawave/ingest/util/cache/watch/FileRuleWatcherTest.java rename to warehouse/age-off/src/test/java/datawave/ingest/util/cache/watch/FileRuleWatcherTest.java index 07ffe11b8cb..5ef17e897ae 100644 --- a/warehouse/core/src/test/java/datawave/ingest/util/cache/watch/FileRuleWatcherTest.java +++ b/warehouse/age-off/src/test/java/datawave/ingest/util/cache/watch/FileRuleWatcherTest.java @@ -16,8 +16,8 @@ import datawave.iterators.filter.ageoff.FilterRule; public class FileRuleWatcherTest { - private static final String FILTER_CONFIGURATION_FILE = "/test-filter-rules.xml"; - private static final String FILE_WITH_MISSING_FILTER_CLASS = "/missing-filter-class.xml"; + private static final String FILTER_CONFIGURATION_FILE = "/filter/test-filter-rules.xml"; + private static final String FILE_WITH_MISSING_FILTER_CLASS = "/filter/missing-filter-class.xml"; private static final String DEFAULT_UNITS = "d"; private FileRuleWatcher watcher; private FileSystem fs; diff --git a/warehouse/core/src/test/java/datawave/ingest/util/cache/watch/TestDataTypeFilter.java b/warehouse/age-off/src/test/java/datawave/ingest/util/cache/watch/TestDataTypeFilter.java similarity index 100% rename from warehouse/core/src/test/java/datawave/ingest/util/cache/watch/TestDataTypeFilter.java rename to warehouse/age-off/src/test/java/datawave/ingest/util/cache/watch/TestDataTypeFilter.java diff --git a/warehouse/core/src/test/java/datawave/ingest/util/cache/watch/TestFieldFilter.java b/warehouse/age-off/src/test/java/datawave/ingest/util/cache/watch/TestFieldFilter.java similarity index 100% rename from warehouse/core/src/test/java/datawave/ingest/util/cache/watch/TestFieldFilter.java rename to warehouse/age-off/src/test/java/datawave/ingest/util/cache/watch/TestFieldFilter.java diff --git a/warehouse/core/src/test/java/datawave/ingest/util/cache/watch/TestFilter.java b/warehouse/age-off/src/test/java/datawave/ingest/util/cache/watch/TestFilter.java similarity index 100% rename from warehouse/core/src/test/java/datawave/ingest/util/cache/watch/TestFilter.java rename to warehouse/age-off/src/test/java/datawave/ingest/util/cache/watch/TestFilter.java diff --git a/warehouse/core/src/test/java/datawave/ingest/util/cache/watch/TestTrieFilter.java b/warehouse/age-off/src/test/java/datawave/ingest/util/cache/watch/TestTrieFilter.java similarity index 100% rename from warehouse/core/src/test/java/datawave/ingest/util/cache/watch/TestTrieFilter.java rename to warehouse/age-off/src/test/java/datawave/ingest/util/cache/watch/TestTrieFilter.java diff --git a/warehouse/core/src/test/java/datawave/iterators/filter/ColumnVisibilityLabeledFilter.java b/warehouse/age-off/src/test/java/datawave/iterators/filter/ColumnVisibilityLabeledFilter.java similarity index 96% rename from warehouse/core/src/test/java/datawave/iterators/filter/ColumnVisibilityLabeledFilter.java rename to warehouse/age-off/src/test/java/datawave/iterators/filter/ColumnVisibilityLabeledFilter.java index c1bbff96cb4..214381672ce 100644 --- a/warehouse/core/src/test/java/datawave/iterators/filter/ColumnVisibilityLabeledFilter.java +++ b/warehouse/age-off/src/test/java/datawave/iterators/filter/ColumnVisibilityLabeledFilter.java @@ -11,6 +11,7 @@ import datawave.iterators.filter.ageoff.AgeOffPeriod; import datawave.iterators.filter.ageoff.AppliedRule; import datawave.iterators.filter.ageoff.FilterOptions; +import datawave.util.CompositeTimestamp; import datawave.util.StringUtils; /** @@ -47,7 +48,7 @@ public void init(FilterOptions options, IteratorEnvironment iterEnv) { if (options.getOption(AgeOffConfigParams.MATCHPATTERN) != null) { String[] lines = StringUtils.split(options.getOption(AgeOffConfigParams.MATCHPATTERN), '\n'); - patternToTtl = new HashMap(lines.length); + patternToTtl = new HashMap<>(lines.length); for (String line : lines) { populateMapWithTimeToLiveValue(patternToTtl, line); } @@ -108,7 +109,7 @@ public boolean accept(AgeOffPeriod ageOffPeriod, Key k, Value V) { cutOff -= timeToLive; } this.filterRuleApplied = true; - return k.getTimestamp() > cutOff; + return CompositeTimestamp.getAgeOffDate(k.getTimestamp()) > cutOff; } } return true; diff --git a/warehouse/core/src/test/java/datawave/iterators/filter/ConfigurableAgeOffFilterTest.java b/warehouse/age-off/src/test/java/datawave/iterators/filter/ConfigurableAgeOffFilterTest.java similarity index 84% rename from warehouse/core/src/test/java/datawave/iterators/filter/ConfigurableAgeOffFilterTest.java rename to warehouse/age-off/src/test/java/datawave/iterators/filter/ConfigurableAgeOffFilterTest.java index fad857d5e55..9290f93f0ac 100644 --- a/warehouse/core/src/test/java/datawave/iterators/filter/ConfigurableAgeOffFilterTest.java +++ b/warehouse/age-off/src/test/java/datawave/iterators/filter/ConfigurableAgeOffFilterTest.java @@ -1,10 +1,9 @@ package datawave.iterators.filter; -import static org.easymock.EasyMock.expect; -import static org.easymock.EasyMock.replay; import static org.hamcrest.CoreMatchers.is; +import static org.hamcrest.MatcherAssert.assertThat; import static org.junit.Assert.assertNotNull; -import static org.junit.Assert.assertThat; +import static org.junit.Assert.assertSame; import java.io.IOException; import java.net.URL; @@ -15,7 +14,6 @@ import java.util.List; import java.util.Map; -import org.apache.accumulo.core.client.PluginEnvironment; import org.apache.accumulo.core.conf.AccumuloConfiguration; import org.apache.accumulo.core.conf.DefaultConfiguration; import org.apache.accumulo.core.data.Key; @@ -23,47 +21,36 @@ import org.apache.accumulo.core.iterators.IteratorEnvironment; import org.apache.accumulo.core.iterators.IteratorUtil; import org.apache.accumulo.core.iterators.SortedKeyValueIterator; -import org.apache.accumulo.core.util.ConfigurationImpl; -import org.easymock.EasyMockRunner; -import org.easymock.EasyMockSupport; -import org.easymock.Mock; -import org.junit.Before; import org.junit.Test; -import org.junit.runner.RunWith; +import datawave.ingest.util.cache.watch.FileRuleCacheValue; import datawave.iterators.filter.ageoff.AppliedRule; +import datawave.iterators.filter.ageoff.ConfigurableIteratorEnvironment; import datawave.iterators.filter.ageoff.FilterOptions; +import datawave.query.iterator.SortedListKeyValueIterator; +import datawave.util.CompositeTimestamp; -@RunWith(EasyMockRunner.class) -public class ConfigurableAgeOffFilterTest extends EasyMockSupport { +public class ConfigurableAgeOffFilterTest { private static long MILLIS_IN_DAY = 1000L * 60 * 60 * 24L; // reused in tests but contents never accessed private static Value VALUE = new Value(); - @Mock - private IteratorEnvironment env; - @Mock - private PluginEnvironment pluginEnv; - @Mock - private SortedKeyValueIterator source; - private AccumuloConfiguration conf = DefaultConfiguration.getInstance(); - @Before - public void setUp() throws Exception { - expect(pluginEnv.getConfiguration()).andReturn(new ConfigurationImpl(conf)).anyTimes(); - - expect(env.getConfig()).andReturn(conf).anyTimes(); - expect(env.getPluginEnv()).andReturn(pluginEnv).anyTimes(); + private SortedKeyValueIterator source = new SortedListKeyValueIterator(Map. of().entrySet().iterator()); - // These two are only for the disabled test - expect(env.getIteratorScope()).andReturn(IteratorUtil.IteratorScope.majc).anyTimes(); - expect(env.isFullMajorCompaction()).andReturn(false).anyTimes(); - expect(env.isUserCompaction()).andReturn(false).anyTimes(); + private ConfigurableIteratorEnvironment env = new ConfigurableIteratorEnvironment(conf, IteratorUtil.IteratorScope.majc) { + @Override + public boolean isFullMajorCompaction() { + return false; + } - replay(env, pluginEnv); - } + @Override + public boolean isUserCompaction() { + return false; + } + }; @Test public void testAcceptKeyValue_OnlyUserMajc() throws Exception { @@ -108,7 +95,7 @@ public void testAcceptKeyValue_OnlyTtlNoInnerFilters() throws Exception { public void testAcceptKeyValue_WithFile() throws Exception { ConfigurableAgeOffFilter filter = new ConfigurableAgeOffFilter(); Map options = getOptionsMap(30, AgeOffTtlUnits.DAYS); - options.put(AgeOffConfigParams.FILTER_CONFIG, pathFromClassloader("/test-root-rules.xml")); + options.put(AgeOffConfigParams.FILTER_CONFIG, pathFromClassloader("/filter/test-root-rules.xml")); filter.init(source, options, env); // the file uses TestFilter which always returns false for accept and filter applied @@ -117,6 +104,27 @@ public void testAcceptKeyValue_WithFile() throws Exception { assertThat(filter.accept(getKey(daysAgo(123)), VALUE), is(false)); } + @Test + public void testInit_WillCachePreviousValue() throws Exception { + Map options = getOptionsMap(30, AgeOffTtlUnits.DAYS); + ConfigurableAgeOffFilter filter1 = new ConfigurableAgeOffFilter(); + options.put(AgeOffConfigParams.FILTER_CONFIG, pathFromClassloader("/filter/test-root-rules.xml")); + filter1.init(source, options, env); + + ConfigurableAgeOffFilter filter2 = new ConfigurableAgeOffFilter(); + filter2.init(source, options, env); + + FileRuleCacheValue cacheValue1 = filter1.getFileRuleCacheValue(); + FileRuleCacheValue cacheValue2 = filter2.getFileRuleCacheValue(); + + assertNotNull(cacheValue1); + assertNotNull(cacheValue2); + + // tests that both cache values are identical showing that the cache retrieval + // used by the init sees the same value + assertSame(cacheValue1, cacheValue2); + } + @Test public void testAcceptKeyValue_TtlSet() throws Exception { ConfigurableAgeOffFilter filter = new ConfigurableAgeOffFilter(); @@ -131,6 +139,9 @@ public void testAcceptKeyValue_TtlSet() throws Exception { // copy cofigs to actual filter we are testing filter.initialize(wrapper); + long tomorrow = System.currentTimeMillis() + CompositeTimestamp.MILLIS_PER_DAY; + long compositeTS = CompositeTimestamp.getCompositeTimeStamp(daysAgo(365), tomorrow); + // brand new key should be good assertThat(filter.accept(new Key(), VALUE), is(true)); // first five will hit the ttl short circuit @@ -148,6 +159,8 @@ public void testAcceptKeyValue_TtlSet() throws Exception { assertThat(filter.accept(getKey("foo", daysAgo(8)), VALUE), is(true)); // this is really old and matches so should not be accepted assertThat(filter.accept(getKey("foo", daysAgo(365)), VALUE), is(false)); + // this is really old and matches, but has a future age off date, so should be accepted + assertThat(filter.accept(getKey("foo", compositeTS), VALUE), is(true)); } @@ -160,7 +173,7 @@ public void testAcceptKeyValue_MultipleFilters() throws Exception { rules.addAll(singleColumnFamilyMatcher("bar", options)); // for holding the filters FilterWrapper wrapper = getWrappedFilterWithRules(rules, source, options, env); - // copy cofigs to actual filter we are testing + // copy configs to actual filter we are testing filter.initialize(wrapper); // created two rules diff --git a/warehouse/core/src/test/java/datawave/iterators/filter/TokenTtlTrieTest.java b/warehouse/age-off/src/test/java/datawave/iterators/filter/TokenTtlTrieTest.java similarity index 95% rename from warehouse/core/src/test/java/datawave/iterators/filter/TokenTtlTrieTest.java rename to warehouse/age-off/src/test/java/datawave/iterators/filter/TokenTtlTrieTest.java index 1f3febfdcf0..08fa6dfde33 100644 --- a/warehouse/core/src/test/java/datawave/iterators/filter/TokenTtlTrieTest.java +++ b/warehouse/age-off/src/test/java/datawave/iterators/filter/TokenTtlTrieTest.java @@ -107,7 +107,7 @@ public void fuzzTtlTrie() { log.info(String.format("Built trie in %d ns/entry", duration / BENCHMARK_SIZE)); for (long entry : trieEntries) { - Assert.assertEquals((Long) entry, trie.scan(String.format("%06x00", entry).getBytes())); + assertEquals((Long) entry, trie.scan(String.format("%06x00", entry).getBytes())); Assert.assertNull(trie.scan(String.format("%06x00", entry - 1).getBytes())); } } @@ -118,10 +118,10 @@ public void testParser() { .parse("" + "\"foo\":2ms\n" + "\"b\\u0061r\":3d,\n" + "\"b\\x61z\":4m,\n\n").build(); Assert.assertNull(trie.scan("foobar,barbaz;bazfoo".getBytes())); - Assert.assertEquals((Long) 2L, trie.scan("foobar,foo;barfoo".getBytes())); - Assert.assertEquals((Long) 2L, trie.scan("bar;foo".getBytes())); - Assert.assertEquals((long) AgeOffPeriod.getTtlUnitsFactor("d") * 3L, (long) trie.scan("bar,baz,foobar".getBytes())); - Assert.assertEquals((Long) (AgeOffPeriod.getTtlUnitsFactor("m") * 4L), trie.scan("buffer,baz".getBytes())); + assertEquals((Long) 2L, trie.scan("foobar,foo;barfoo".getBytes())); + assertEquals((Long) 2L, trie.scan("bar;foo".getBytes())); + Assert.assertEquals(AgeOffPeriod.getTtlUnitsFactor("d") * 3L, (long) trie.scan("bar,baz,foobar".getBytes())); + assertEquals((Long) (AgeOffPeriod.getTtlUnitsFactor("m") * 4L), trie.scan("buffer,baz".getBytes())); Assert.assertNull(trie.scan("b;ba,banana,bread,apple,pie".getBytes())); } diff --git a/warehouse/age-off/src/test/java/datawave/iterators/filter/ageoff/ConfigurableIteratorEnvironment.java b/warehouse/age-off/src/test/java/datawave/iterators/filter/ageoff/ConfigurableIteratorEnvironment.java new file mode 100644 index 00000000000..ac1841d9ea7 --- /dev/null +++ b/warehouse/age-off/src/test/java/datawave/iterators/filter/ageoff/ConfigurableIteratorEnvironment.java @@ -0,0 +1,123 @@ +package datawave.iterators.filter.ageoff; + +import java.io.IOException; + +import org.apache.accumulo.core.client.PluginEnvironment; +import org.apache.accumulo.core.client.SampleNotPresentException; +import org.apache.accumulo.core.client.TableNotFoundException; +import org.apache.accumulo.core.client.sample.SamplerConfiguration; +import org.apache.accumulo.core.conf.AccumuloConfiguration; +import org.apache.accumulo.core.data.Key; +import org.apache.accumulo.core.data.TableId; +import org.apache.accumulo.core.data.Value; +import org.apache.accumulo.core.iterators.IteratorEnvironment; +import org.apache.accumulo.core.iterators.IteratorUtil; +import org.apache.accumulo.core.iterators.SortedKeyValueIterator; +import org.apache.accumulo.core.security.Authorizations; +import org.apache.accumulo.core.util.ConfigurationImpl; + +public class ConfigurableIteratorEnvironment implements IteratorEnvironment { + + private IteratorUtil.IteratorScope scope; + private AccumuloConfiguration conf; + + public ConfigurableIteratorEnvironment() { + scope = null; + conf = null; + } + + public ConfigurableIteratorEnvironment(AccumuloConfiguration conf, IteratorUtil.IteratorScope scope) { + this.conf = conf; + this.scope = scope; + + } + + public void setConf(AccumuloConfiguration conf) { + this.conf = conf; + } + + public void setScope(IteratorUtil.IteratorScope scope) { + this.scope = scope; + } + + @Override + public SortedKeyValueIterator reserveMapFileReader(String s) throws IOException { + return null; + } + + @Override + public AccumuloConfiguration getConfig() { + return conf; + } + + @Override + public IteratorUtil.IteratorScope getIteratorScope() { + return scope; + } + + @Override + public boolean isFullMajorCompaction() { + throw new UnsupportedOperationException(); + } + + @Override + public boolean isUserCompaction() { + throw new UnsupportedOperationException(); + } + + @Override + public void registerSideChannel(SortedKeyValueIterator sortedKeyValueIterator) { + throw new UnsupportedOperationException(); + } + + @Override + public Authorizations getAuthorizations() { + throw new UnsupportedOperationException(); + } + + @Override + public IteratorEnvironment cloneWithSamplingEnabled() { + throw new SampleNotPresentException(); + } + + @Override + public boolean isSamplingEnabled() { + return false; + } + + @Override + public SamplerConfiguration getSamplerConfiguration() { + return null; + } + + @Override + public PluginEnvironment getPluginEnv() { + return new PluginEnvironment() { + + @Override + public Configuration getConfiguration() { + return null; + } + + @Override + public Configuration getConfiguration(TableId tableId) { + return new ConfigurationImpl(conf); + } + + @Override + public String getTableName(TableId tableId) throws TableNotFoundException { + return null; + } + + @Override + public T instantiate(String s, Class aClass) throws Exception { + return null; + } + + @Override + public T instantiate(TableId tableId, String s, Class aClass) throws Exception { + return null; + } + }; + } +} diff --git a/warehouse/core/src/test/java/datawave/iterators/filter/ageoff/FieldAgeOffFilterTest.java b/warehouse/age-off/src/test/java/datawave/iterators/filter/ageoff/FieldAgeOffFilterTest.java similarity index 92% rename from warehouse/core/src/test/java/datawave/iterators/filter/ageoff/FieldAgeOffFilterTest.java rename to warehouse/age-off/src/test/java/datawave/iterators/filter/ageoff/FieldAgeOffFilterTest.java index 0e4f361952c..36571bdaf04 100644 --- a/warehouse/core/src/test/java/datawave/iterators/filter/ageoff/FieldAgeOffFilterTest.java +++ b/warehouse/age-off/src/test/java/datawave/iterators/filter/ageoff/FieldAgeOffFilterTest.java @@ -1,28 +1,20 @@ package datawave.iterators.filter.ageoff; -import java.io.IOException; import java.util.HashMap; import java.util.Iterator; import java.util.Map; -import org.apache.accumulo.core.client.SampleNotPresentException; -import org.apache.accumulo.core.client.sample.SamplerConfiguration; import org.apache.accumulo.core.conf.AccumuloConfiguration; import org.apache.accumulo.core.conf.DefaultConfiguration; import org.apache.accumulo.core.conf.Property; import org.apache.accumulo.core.data.Key; import org.apache.accumulo.core.data.Value; -import org.apache.accumulo.core.iterators.IteratorEnvironment; -import org.apache.accumulo.core.iterators.IteratorUtil; -import org.apache.accumulo.core.iterators.SortedKeyValueIterator; -import org.apache.accumulo.core.security.Authorizations; import org.junit.Assert; import org.junit.Test; -import com.google.common.base.Predicate; - import datawave.iterators.filter.AgeOffConfigParams; import datawave.iterators.filter.AgeOffTtlUnits; +import datawave.util.CompositeTimestamp; public class FieldAgeOffFilterTest { private static final String VISIBILITY_PATTERN = "MY_VIS"; @@ -31,67 +23,6 @@ public class FieldAgeOffFilterTest { private ConfigurableIteratorEnvironment iterEnv = new ConfigurableIteratorEnvironment(); - private class ConfigurableIteratorEnvironment implements IteratorEnvironment { - - private IteratorUtil.IteratorScope scope; - private AccumuloConfiguration conf; - - public ConfigurableIteratorEnvironment() { - scope = null; - conf = null; - } - - public void setConf(AccumuloConfiguration conf) { - this.conf = conf; - } - - @Override - public SortedKeyValueIterator reserveMapFileReader(String s) throws IOException { - return null; - } - - @Override - public AccumuloConfiguration getConfig() { - return conf; - } - - @Override - public IteratorUtil.IteratorScope getIteratorScope() { - return scope; - } - - @Override - public boolean isFullMajorCompaction() { - throw new UnsupportedOperationException(); - } - - @Override - public void registerSideChannel(SortedKeyValueIterator sortedKeyValueIterator) { - throw new UnsupportedOperationException(); - } - - @Override - public Authorizations getAuthorizations() { - throw new UnsupportedOperationException(); - } - - @Override - public IteratorEnvironment cloneWithSamplingEnabled() { - throw new SampleNotPresentException(); - } - - @Override - public boolean isSamplingEnabled() { - return false; - } - - @Override - public SamplerConfiguration getSamplerConfiguration() { - return null; - } - - } - public class EditableAccumuloConfiguration extends AccumuloConfiguration { private Map map = new HashMap<>(); @@ -154,6 +85,7 @@ public void testIndexTrueUsesDefaultWhenFieldLacksTtl() { Key key = new Key("1234", "field_z\\x00my-uuid", "field_z\u0000value", VISIBILITY_PATTERN, tenSecondsAgo); Assert.assertFalse(ageOffFilter.accept(filterOptions.getAgeOffPeriod(System.currentTimeMillis()), key, new Value())); Assert.assertTrue(ageOffFilter.isFilterRuleApplied()); + key = new Key("1234", "field_y", "field_y\u0000value", VISIBILITY_PATTERN, tenSecondsAgo); Assert.assertFalse(ageOffFilter.accept(filterOptions.getAgeOffPeriod(System.currentTimeMillis()), key, new Value())); Assert.assertTrue(ageOffFilter.isFilterRuleApplied()); @@ -306,6 +238,39 @@ public void testIgnoresDocument() { Assert.assertFalse(ageOffFilter.isFilterRuleApplied()); } + @Test + public void testCompositeTimestamp() { + EditableAccumuloConfiguration conf = new EditableAccumuloConfiguration(DefaultConfiguration.getInstance()); + conf.put("table.custom.isindextable", "true"); + iterEnv.setConf(conf); + + long tenSecondsAgo = System.currentTimeMillis() - (10L * ONE_SEC); + long tomorrow = System.currentTimeMillis() + CompositeTimestamp.MILLIS_PER_DAY; + + long compositeTS = CompositeTimestamp.getCompositeTimeStamp(tenSecondsAgo, tomorrow); + + FieldAgeOffFilter ageOffFilter = new FieldAgeOffFilter(); + FilterOptions filterOptions = createFilterOptionsWithPattern(); + // set the default to 5 seconds + filterOptions.setTTL(5L); + filterOptions.setTTLUnits(AgeOffTtlUnits.SECONDS); + // set up ttls for field_y and field_z only, deliberately exclude the ttl for field_y + filterOptions.setOption("fields", "field_y"); + filterOptions.setOption("field_y.ttl", "2"); // 2 seconds + ageOffFilter.init(filterOptions, iterEnv); + + // age off date allows this to accept + Key key = new Key("1234", "field_y", "field_y\u0000value", VISIBILITY_PATTERN, compositeTS); + Assert.assertTrue(ageOffFilter.accept(filterOptions.getAgeOffPeriod(System.currentTimeMillis()), key, new Value())); + Assert.assertTrue(ageOffFilter.isFilterRuleApplied()); + + // vanilla date does not + key = new Key("1234", "field_y", "field_y\u0000value", VISIBILITY_PATTERN, tenSecondsAgo); + Assert.assertFalse(ageOffFilter.accept(filterOptions.getAgeOffPeriod(System.currentTimeMillis()), key, new Value())); + Assert.assertTrue(ageOffFilter.isFilterRuleApplied()); + + } + @Test public void testKeepsMatchBeforeTtl() { long oneSecondAgo = System.currentTimeMillis() - (1 * ONE_SEC); diff --git a/warehouse/core/src/test/resources/alternate-child.xml b/warehouse/age-off/src/test/resources/filter/alternate-child.xml similarity index 100% rename from warehouse/core/src/test/resources/alternate-child.xml rename to warehouse/age-off/src/test/resources/filter/alternate-child.xml diff --git a/warehouse/core/src/test/resources/alternate-root.xml b/warehouse/age-off/src/test/resources/filter/alternate-root.xml similarity index 100% rename from warehouse/core/src/test/resources/alternate-root.xml rename to warehouse/age-off/src/test/resources/filter/alternate-root.xml diff --git a/warehouse/core/src/test/resources/missing-filter-class.xml b/warehouse/age-off/src/test/resources/filter/missing-filter-class.xml similarity index 100% rename from warehouse/core/src/test/resources/missing-filter-class.xml rename to warehouse/age-off/src/test/resources/filter/missing-filter-class.xml diff --git a/warehouse/core/src/test/resources/test-customized-data-type.xml b/warehouse/age-off/src/test/resources/filter/test-customized-data-type.xml similarity index 100% rename from warehouse/core/src/test/resources/test-customized-data-type.xml rename to warehouse/age-off/src/test/resources/filter/test-customized-data-type.xml diff --git a/warehouse/core/src/test/resources/test-customized-field.xml b/warehouse/age-off/src/test/resources/filter/test-customized-field.xml similarity index 100% rename from warehouse/core/src/test/resources/test-customized-field.xml rename to warehouse/age-off/src/test/resources/filter/test-customized-field.xml diff --git a/warehouse/core/src/test/resources/test-customized-rules.xml b/warehouse/age-off/src/test/resources/filter/test-customized-rules.xml similarity index 100% rename from warehouse/core/src/test/resources/test-customized-rules.xml rename to warehouse/age-off/src/test/resources/filter/test-customized-rules.xml diff --git a/warehouse/core/src/test/resources/test-filter-rules.xml b/warehouse/age-off/src/test/resources/filter/test-filter-rules.xml similarity index 100% rename from warehouse/core/src/test/resources/test-filter-rules.xml rename to warehouse/age-off/src/test/resources/filter/test-filter-rules.xml diff --git a/warehouse/core/src/test/resources/test-root-data-type.xml b/warehouse/age-off/src/test/resources/filter/test-root-data-type.xml similarity index 100% rename from warehouse/core/src/test/resources/test-root-data-type.xml rename to warehouse/age-off/src/test/resources/filter/test-root-data-type.xml diff --git a/warehouse/core/src/test/resources/test-root-field.xml b/warehouse/age-off/src/test/resources/filter/test-root-field.xml similarity index 100% rename from warehouse/core/src/test/resources/test-root-field.xml rename to warehouse/age-off/src/test/resources/filter/test-root-field.xml diff --git a/warehouse/core/src/test/resources/test-root-rules.xml b/warehouse/age-off/src/test/resources/filter/test-root-rules.xml similarity index 100% rename from warehouse/core/src/test/resources/test-root-rules.xml rename to warehouse/age-off/src/test/resources/filter/test-root-rules.xml diff --git a/warehouse/assemble/datawave/pom.xml b/warehouse/assemble/datawave/pom.xml index ff4993f6295..960c15ee07e 100644 --- a/warehouse/assemble/datawave/pom.xml +++ b/warehouse/assemble/datawave/pom.xml @@ -4,10 +4,10 @@ gov.nsa.datawave assemble-parent - 6.5.0-SNAPSHOT + 7.13.0-SNAPSHOT assemble-datawave - pom + jar ${project.artifactId} @@ -82,6 +82,11 @@ gov.nsa.datawave datawave-ingest-scripts + + gov.nsa.datawave + datawave-ingest-ssdeep + ${project.version} + gov.nsa.datawave datawave-ingest-wikipedia @@ -267,6 +272,19 @@ + + maven-clean-plugin + 2.4.1 + + + auto-clean + + clean + + initialize + + + maven-dependency-plugin @@ -283,7 +301,7 @@ datawave-ingest-scripts - META-INF/**,bin/ingest/ingest-versions.sh,bin/ingest/ingest-env.sh,bin/system/install.sh + META-INF/**,bin/ingest/ingest-versions.sh,bin/ingest/ingest-env.sh,bin/system/install.sh,bin/util/file_locker.sh ${project.build.directory}/archive @@ -317,7 +335,7 @@ datawave-ingest-scripts - bin/ingest/ingest-versions.sh,bin/ingest/ingest-env.sh,bin/ingest/tables-env.sh,bin/system/install.sh + bin/ingest/ingest-versions.sh,bin/ingest/ingest-env.sh,bin/ingest/tables-env.sh,bin/system/install.sh,bin/util/file_locker.sh ${project.build.directory}/archive-tmp @@ -403,6 +421,38 @@ + + copy-rpm + + copy-resources + + verify + + ${project.build.directory}/docker + + + ${project.build.directory}/rpm/datawave-dw-${build.env}/RPMS/noarch/ + false + + + + + + copy-dockerfile + + copy-resources + + verify + + ${project.build.directory}/docker + + + ${project.basedir}/src/main/docker + true + + + + copy-additional-env @@ -423,6 +473,48 @@ + + docker + + + docker-release + + + + + + com.spotify + docker-maven-plugin + 1.1.1 + + ${docker.image.prefix}datawave/ingest-${build.env} + true + + latest + ${project.version} + + ${project.build.directory}/docker + + + / + ${project.build.directory} + rpm/RPMS/noarch/${project.build.finalName}-${build.env}.rpm + + + + + + tag-image + + build + + install + + + + + + - - - - + + myjson.data.category.date + PREMIERED + Known date field to be used, if found, for the shard row id. Otherwise, current date will be used + myjson.data.category.date.formats diff --git a/warehouse/ingest-configuration/src/main/resources/config/shard-ingest-config.xml b/warehouse/ingest-configuration/src/main/resources/config/shard-ingest-config.xml index 05001c62094..86150eb31d4 100644 --- a/warehouse/ingest-configuration/src/main/resources/config/shard-ingest-config.xml +++ b/warehouse/ingest-configuration/src/main/resources/config/shard-ingest-config.xml @@ -70,6 +70,15 @@ datawave.ingest.table.config.ShardTableConfigHelper + + ${table.name.shardIndex}.disable.versioning.iterator + true + + + ${table.name.shardReverseIndex}.disable.versioning.iterator + true + + markings.setup.iterator.enabled false diff --git a/warehouse/ingest-core/pom.xml b/warehouse/ingest-core/pom.xml index b04b804446a..90fb606c9be 100644 --- a/warehouse/ingest-core/pom.xml +++ b/warehouse/ingest-core/pom.xml @@ -4,7 +4,7 @@ gov.nsa.datawave datawave-warehouse-parent - 6.5.0-SNAPSHOT + 7.13.0-SNAPSHOT datawave-ingest-core jar @@ -17,7 +17,7 @@ com.sun.xml.bind jaxb-impl - 2.3.3 + ${version.jaxb-impl} com.timgroup @@ -38,14 +38,14 @@ ${project.version} - gov.nsa.datawave.webservices - datawave-ws-common-util + gov.nsa.datawave.core + datawave-core-common-util ${project.version} jakarta.xml.bind jakarta.xml.bind-api - 2.3.3 + ${version.jakarta} jline @@ -62,7 +62,7 @@ org.apache.commons - commons-jexl + commons-jexl3 org.apache.hadoop @@ -75,6 +75,16 @@ org.apache.hadoop hadoop-mapreduce-client-common + + + org.eclipse.jetty + * + + + org.eclipse.jetty.websocket + * + + org.apache.hadoop diff --git a/warehouse/ingest-core/src/main/java/datawave/IdentityDataType.java b/warehouse/ingest-core/src/main/java/datawave/IdentityDataType.java index 757d6bedef1..aa51f4acd2d 100644 --- a/warehouse/ingest-core/src/main/java/datawave/IdentityDataType.java +++ b/warehouse/ingest-core/src/main/java/datawave/IdentityDataType.java @@ -25,6 +25,11 @@ public String normalizeRegex(String in) { throw new UnsupportedOperationException(); } + // @Override + public boolean normalizedRegexIsLossy(String in) { + throw new UnsupportedOperationException(); + } + @Override public Collection expand(String in) { throw new UnsupportedOperationException(); diff --git a/warehouse/ingest-core/src/main/java/datawave/ingest/TableCreator.java b/warehouse/ingest-core/src/main/java/datawave/ingest/TableCreator.java index eaf6e41bc2c..131b994feac 100644 --- a/warehouse/ingest-core/src/main/java/datawave/ingest/TableCreator.java +++ b/warehouse/ingest-core/src/main/java/datawave/ingest/TableCreator.java @@ -7,15 +7,15 @@ public class TableCreator { - private static Configuration config = new Configuration(); + private static final Configuration config = new Configuration(); - private static Logger log = Logger.getLogger(TableCreator.class); + private static final Logger log = Logger.getLogger(TableCreator.class); public static void main(String[] args) { Configuration conf = OptionsParser.parseArguments(args, config); try { TableConfigurationUtil tableConfigUtil = new TableConfigurationUtil(conf); - tableConfigUtil.registerTableNamesFromConfigFiles(conf); + TableConfigurationUtil.registerTableNamesFromConfigFiles(conf); tableConfigUtil.configureTables(conf); } catch (Exception e) { log.error("Unable to create tables", e); diff --git a/warehouse/ingest-core/src/main/java/datawave/ingest/config/BaseHdfsFileCacheUtil.java b/warehouse/ingest-core/src/main/java/datawave/ingest/config/BaseHdfsFileCacheUtil.java index 781542ad057..57f7d5f8991 100644 --- a/warehouse/ingest-core/src/main/java/datawave/ingest/config/BaseHdfsFileCacheUtil.java +++ b/warehouse/ingest-core/src/main/java/datawave/ingest/config/BaseHdfsFileCacheUtil.java @@ -18,7 +18,9 @@ public abstract class BaseHdfsFileCacheUtil { protected Path cacheFilePath; protected final Configuration conf; protected AccumuloHelper accumuloHelper; + protected String delimiter = "\t"; + private static final int MAX_RETRIES = 3; protected short cacheReplicas = 3; private static final Logger log = Logger.getLogger(BaseHdfsFileCacheUtil.class); @@ -40,14 +42,23 @@ public void setDelimiter(String delimiter) { } public void read() throws IOException { - log.info("Reading cache at " + this.cacheFilePath); - try (BufferedReader in = new BufferedReader(new InputStreamReader(FileSystem.get(this.cacheFilePath.toUri(), conf).open(this.cacheFilePath)))) { - readCache(in); - } catch (IOException ex) { - if (shouldRefreshCache(this.conf)) { - update(); - } else { - throw new IOException("Unable to read cache file at " + this.cacheFilePath, ex); + + int attempts = 0; + boolean retry = true; + while (retry && attempts <= MAX_RETRIES) { + attempts++; + + log.info("Reading cache at " + this.cacheFilePath); + try (BufferedReader in = new BufferedReader(new InputStreamReader(FileSystem.get(this.cacheFilePath.toUri(), conf).open(this.cacheFilePath)))) { + readCache(in); + retry = false; + } catch (IOException ex) { + if (shouldRefreshCache(this.conf)) { + update(); + } else if (attempts == MAX_RETRIES) { + throw new IOException("Unable to read cache file at " + this.cacheFilePath, ex); + } + } } diff --git a/warehouse/ingest-core/src/main/java/datawave/ingest/config/RawRecordContainerImpl.java b/warehouse/ingest-core/src/main/java/datawave/ingest/config/RawRecordContainerImpl.java index 8e06d2c614e..96dea3dc999 100644 --- a/warehouse/ingest-core/src/main/java/datawave/ingest/config/RawRecordContainerImpl.java +++ b/warehouse/ingest-core/src/main/java/datawave/ingest/config/RawRecordContainerImpl.java @@ -37,6 +37,7 @@ import datawave.ingest.data.config.ingest.IgnorableErrorHelperInterface; import datawave.ingest.protobuf.RawRecordContainer.Data; import datawave.marking.MarkingFunctions; +import datawave.util.CompositeTimestamp; public class RawRecordContainerImpl implements Writable, Configurable, RawRecordContainer { @@ -61,7 +62,10 @@ public class RawRecordContainerImpl implements Writable, Configurable, RawRecord private Multimap fatalErrors = HashMultimap.create(); private Multimap ignorableErrorHelpers = HashMultimap.create(); - private long eventDate = Long.MIN_VALUE; + /** + * This is the composite date for this event + */ + private long timestamp = CompositeTimestamp.INVALID_TIMESTAMP; private Type dataType = null; private UID uid = null; private UIDBuilder uidBuilder; @@ -179,13 +183,13 @@ public void setDataType(Type dataType) { } @Override - public long getDate() { - return this.eventDate; + public long getTimestamp() { + return this.timestamp; } @Override - public void setDate(long date) { - this.eventDate = date; + public void setTimestamp(long date) { + this.timestamp = date; } @Override @@ -421,7 +425,7 @@ public boolean equals(Object other) { } RawRecordContainerImpl e = (RawRecordContainerImpl) other; EqualsBuilder equals = new EqualsBuilder(); - equals.append(this.eventDate, e.eventDate); + equals.append(this.timestamp, e.timestamp); equals.append(this.dataType, e.dataType); equals.append(this.uid, e.uid); equals.append(this.errors, e.errors); @@ -439,7 +443,7 @@ public boolean equals(Object other) { @Override public int hashCode() { - int result = (int) (eventDate ^ (eventDate >>> 32)); + int result = (int) (timestamp ^ (timestamp >>> 32)); result = 31 * result + (dataType != null ? dataType.hashCode() : 0); result = 31 * result + (uid != null ? uid.hashCode() : 0); result = 31 * result + (errors != null ? errors.hashCode() : 0); @@ -465,7 +469,7 @@ public int hashCode() { protected RawRecordContainerImpl copyInto(RawRecordContainerImpl rrci) { copyConfiguration(rrci); - rrci.eventDate = this.eventDate; + rrci.timestamp = this.timestamp; rrci.dataType = this.dataType; rrci.uid = this.uid; rrci.errors = new ConcurrentSkipListSet<>(this.errors); @@ -645,7 +649,7 @@ public void reloadConfiguration() { @Override public void write(DataOutput out) throws IOException { Data.Builder builder = Data.newBuilder(); - builder.setDate(this.eventDate); + builder.setDate(this.timestamp); if (null != this.dataType) builder.setDataType(this.dataType.typeName()); if (null != this.uid) @@ -679,7 +683,7 @@ public void readFields(DataInput in) throws IOException { in.readFully(buf); Data data = Data.parseFrom(buf); - this.eventDate = data.getDate(); + this.timestamp = data.getDate(); if (data.hasDataType()) try { this.dataType = TypeRegistry.getType(data.getDataType()); @@ -715,7 +719,7 @@ public void readFields(DataInput in) throws IOException { * Resets state for re-use. */ public void clear() { - eventDate = Long.MIN_VALUE; + timestamp = CompositeTimestamp.INVALID_TIMESTAMP; dataType = null; uid = null; errors.clear(); @@ -740,7 +744,7 @@ public void clear() { @Override public String toString() { ToStringBuilder buf = new ToStringBuilder(this); - buf.append("eventDate", this.eventDate); + buf.append("timestamp", this.timestamp); buf.append("dataType", dataType.typeName()); buf.append("uid", String.valueOf(this.uid)); buf.append("errors", errors); diff --git a/warehouse/ingest-core/src/main/java/datawave/ingest/data/RawRecordContainer.java b/warehouse/ingest-core/src/main/java/datawave/ingest/data/RawRecordContainer.java index dde4200bc6f..189820a4853 100644 --- a/warehouse/ingest-core/src/main/java/datawave/ingest/data/RawRecordContainer.java +++ b/warehouse/ingest-core/src/main/java/datawave/ingest/data/RawRecordContainer.java @@ -9,6 +9,7 @@ import org.apache.accumulo.core.security.ColumnVisibility; import datawave.data.hash.UID; +import datawave.util.CompositeTimestamp; /** * Generic container used to hold raw source data. It is used in various parts of the ingest framework and is typically persisted as an "event" within DW's @@ -45,7 +46,43 @@ public interface RawRecordContainer { * * @return the date for this raw record */ - long getDate(); + default long getDate() { + return CompositeTimestamp.getEventDate(getTimestamp()); + } + + /** + * Gets the ageoff date associated with the record + * + * @return the ageoff date for this raw record + */ + default long getAgeOffDate() { + return CompositeTimestamp.getAgeOffDate(getTimestamp()); + } + + /** + * Get the composite timestamp asociated with this record (@see CompositeTimestamp) + * + * @return + */ + long getTimestamp(); + + /** + * Determine if the timestamp has been set. This avoids having to compare the timestamp with an arbitrary value + */ + default boolean isTimestampSet() { + return getTimestamp() != CompositeTimestamp.INVALID_TIMESTAMP; + } + + /** + * This is synonomis with setTimestamp(date, date) + * + * @param timestamp + * primary date to be associated with the record, a.k.a. the "event date" + */ + @Deprecated + default void setDate(long timestamp) { + setTimestamp(timestamp); + } /** * In the DW data model, this date is often referred to as "event date" and represents the primary date value for the record. At ingest time, it is @@ -56,11 +93,29 @@ public interface RawRecordContainer { * Thus, this date is typically leveraged by DW's query api as the basis for 'begin' / 'end' date ranges for user queries. However, DW also has the ability * to leverage other dates within your records at query time, if needed. So, for date filtering concerns, you're not necessarily stuck with your choice of * 'event' date in this regard + *

    * - * @param date + *

    + * This date is treated as a composite timestamp which includes the age off date as well. If the ageoff date is identical to the event date (which is + * usually the case), then the event date and the timestamp will be the exactly same value. See CompositeTimestamp for more information. The getDate() + * method will only return the event date portion of this date and the getAgeOffDate() will return the ageoff portion. This method should eventually be + * deprecated and setTimestamp should be used instead. + *

    + * + * @param timestamp * primary date to be associated with the record, a.k.a. the "event date" */ - void setDate(long date); + void setTimestamp(long timestamp); + + /** + * Equivalent to setTimestamp(CompositeTimestamp.getCompositeTimestamp(eventDate, ageOffDate)); + * + * @param eventDate + * @param ageOffDate + */ + default void setTimestamp(long eventDate, long ageOffDate) { + setTimestamp(CompositeTimestamp.getCompositeTimeStamp(eventDate, ageOffDate)); + } Collection getErrors(); diff --git a/warehouse/ingest-core/src/main/java/datawave/ingest/data/TypeRegistry.java b/warehouse/ingest-core/src/main/java/datawave/ingest/data/TypeRegistry.java index 6cb450fe66a..98537561123 100644 --- a/warehouse/ingest-core/src/main/java/datawave/ingest/data/TypeRegistry.java +++ b/warehouse/ingest-core/src/main/java/datawave/ingest/data/TypeRegistry.java @@ -17,6 +17,7 @@ import com.google.common.collect.HashMultimap; import com.google.common.collect.Multimap; +import datawave.core.common.logging.ThreadConfigurableLogger; import datawave.ingest.data.config.ConfigurationHelper; import datawave.ingest.data.config.DataTypeOverrideHelper; import datawave.ingest.data.config.filter.KeyValueFilter; @@ -24,7 +25,6 @@ import datawave.ingest.mapreduce.handler.DataTypeHandler; import datawave.marking.MarkingFunctions; import datawave.util.StringUtils; -import datawave.webservice.common.logging.ThreadConfigurableLogger; public class TypeRegistry extends HashMap { diff --git a/warehouse/ingest-core/src/main/java/datawave/ingest/data/config/XMLFieldConfigHelper.java b/warehouse/ingest-core/src/main/java/datawave/ingest/data/config/XMLFieldConfigHelper.java index 1c5b53299dc..6d09758cc4e 100644 --- a/warehouse/ingest-core/src/main/java/datawave/ingest/data/config/XMLFieldConfigHelper.java +++ b/warehouse/ingest-core/src/main/java/datawave/ingest/data/config/XMLFieldConfigHelper.java @@ -3,9 +3,11 @@ import java.io.IOException; import java.io.InputStream; import java.net.URI; -import java.util.Collection; +import java.util.HashMap; import java.util.HashSet; +import java.util.Map; import java.util.Set; +import java.util.TreeMap; import java.util.regex.Matcher; import javax.xml.parsers.ParserConfigurationException; @@ -38,19 +40,16 @@ public final class XMLFieldConfigHelper implements FieldConfigHelper { private boolean noMatchReverseTokenized = false; private String noMatchFieldType = null; - private final Set knownFields = new HashSet<>(); + private final Map knownFields = new HashMap<>(); + private TreeMap patterns = new TreeMap<>(new BaseIngestHelper.MatcherComparator()); - private final Set storedFields = new HashSet<>(); - private final Set indexedFields = new HashSet<>(); - private final Set reverseIndexedFields = new HashSet<>(); - private final Set tokenizedFields = new HashSet<>(); - private final Set reverseTokenizedFields = new HashSet<>(); - - private final Set storedFieldPatterns = new HashSet<>(); - private final Set indexedFieldPatterns = new HashSet<>(); - private final Set reverseIndexedFieldPatterns = new HashSet<>(); - private final Set tokenizedFieldPatterns = new HashSet<>(); - private final Set reverseTokenizedFieldPatterns = new HashSet<>(); + public static class FieldInfo { + boolean stored; + boolean indexed; + boolean reverseIndexed; + boolean tokenized; + boolean reverseTokenized; + } /** * Attempt to load the field config fieldHelper from the specified file, which is expected to be found on the classpath. @@ -100,10 +99,9 @@ private static InputStream getAsStream(String fieldConfigPath) { } public String toString() { - return "[FieldConfigHelper: " + knownFields.size() + " known fields, " + storedFields.size() + " stored fields, " + indexedFields.size() - + " indexed fields, " + reverseIndexedFields.size() + " reverse indexed fields, " + tokenizedFields.size() + " tokenized fields, " - + reverseTokenizedFields.size() + " reverse tokenized fields; " + "nomatch, indexed:" + noMatchIndexed + " reverseIndexed:" - + noMatchReverseIndexed + " tokenized:" + noMatchTokenized + " reverseTokenized:" + noMatchReverseTokenized + "]"; + return "[FieldConfigHelper: " + knownFields.size() + " known fields, " + patterns.size() + " of those are patterns, " + "nomatch, indexed:" + + noMatchIndexed + " reverseIndexed:" + noMatchReverseIndexed + " tokenized:" + noMatchTokenized + " reverseTokenized:" + + noMatchReverseTokenized + "]"; } @@ -115,9 +113,14 @@ public XMLFieldConfigHelper(InputStream in, BaseIngestHelper helper) throws Pars log.info("Loaded FieldConfigHelper: " + this); } - public boolean addKnownField(String fieldName) { + public boolean addKnownField(String fieldName, FieldInfo info) { // must track the fields we've seen so we can properly apply default rules. - return knownFields.add(fieldName); + return (knownFields.put(fieldName, info) == null); + } + + public boolean addKnownFieldPattern(String fieldName, FieldInfo info, Matcher pattern) { + patterns.put(pattern, fieldName); + return addKnownField(fieldName, info); } public void setNoMatchFieldType(String fieldType) { @@ -126,33 +129,27 @@ public void setNoMatchFieldType(String fieldType) { @Override public boolean isStoredField(String fieldName) { - if (knownFields.contains(fieldName)) { - return this.storedFields.contains(fieldName); + if (knownFields.containsKey(fieldName)) { + return this.knownFields.get(fieldName).stored; } - if (findMatchingPattern(fieldName, this.storedFieldPatterns)) { - return true; + String pattern = findMatchingPattern(fieldName); + if (pattern != null) { + return this.knownFields.get(pattern).stored; } return isNoMatchStored(); } - public void addStoredField(String fieldName) { - this.storedFields.add(fieldName); - } - - public void addStoredFieldPattern(String pattern) { - this.storedFieldPatterns.add(BaseIngestHelper.compileFieldNamePattern(pattern)); - } - @Override public boolean isIndexedField(String fieldName) { - if (knownFields.contains(fieldName)) { - return this.indexedFields.contains(fieldName); + if (knownFields.containsKey(fieldName)) { + return this.knownFields.get(fieldName).indexed; } - if (findMatchingPattern(fieldName, this.indexedFieldPatterns)) { - return true; + String pattern = findMatchingPattern(fieldName); + if (pattern != null) { + return this.knownFields.get(pattern).indexed; } return isNoMatchIndexed(); @@ -163,77 +160,48 @@ public boolean isIndexOnlyField(String fieldName) { return isIndexedField(fieldName) && !isStoredField(fieldName); } - public void addIndexedField(String fieldName) { - this.indexedFields.add(fieldName); - } - - public void addIndexedFieldPattern(String pattern) { - this.indexedFieldPatterns.add(BaseIngestHelper.compileFieldNamePattern(pattern)); - } - @Override public boolean isReverseIndexedField(String fieldName) { - if (knownFields.contains(fieldName)) { - return this.reverseIndexedFields.contains(fieldName); + if (knownFields.containsKey(fieldName)) { + return this.knownFields.get(fieldName).reverseIndexed; } - if (findMatchingPattern(fieldName, this.reverseIndexedFieldPatterns)) { - return true; + String pattern = findMatchingPattern(fieldName); + if (pattern != null) { + return this.knownFields.get(pattern).reverseIndexed; } return isNoMatchReverseIndexed(); } - public void addReverseIndexedField(String fieldName) { - this.reverseIndexedFields.add(fieldName); - } - - public void addReverseIndexedFieldPattern(String pattern) { - this.reverseIndexedFieldPatterns.add(BaseIngestHelper.compileFieldNamePattern(pattern)); - } - @Override public boolean isTokenizedField(String fieldName) { - if (knownFields.contains(fieldName)) { - return this.tokenizedFields.contains(fieldName); + if (knownFields.containsKey(fieldName)) { + return this.knownFields.get(fieldName).tokenized; } - if (findMatchingPattern(fieldName, this.tokenizedFieldPatterns)) { - return true; + String pattern = findMatchingPattern(fieldName); + if (pattern != null) { + return this.knownFields.get(pattern).tokenized; } return isNoMatchTokenized(); } - public void addTokenizedField(String fieldName) { - this.tokenizedFields.add(fieldName); - } - - public void addTokenizedFieldPattern(String pattern) { - this.tokenizedFieldPatterns.add(BaseIngestHelper.compileFieldNamePattern(pattern)); - } - @Override public boolean isReverseTokenizedField(String fieldName) { - if (knownFields.contains(fieldName)) { - return this.reverseTokenizedFields.contains(fieldName); + if (knownFields.containsKey(fieldName)) { + return this.knownFields.get(fieldName).reverseTokenized; } - if (findMatchingPattern(fieldName, this.reverseTokenizedFieldPatterns)) { - return true; + String pattern = findMatchingPattern(fieldName); + if (pattern != null) { + return this.knownFields.get(pattern).reverseTokenized; } return isNoMatchReverseTokenized(); } - public void addReverseTokenizedField(String fieldName) { - this.reverseTokenizedFields.add(fieldName); - } - - public void addReverseTokenizedFieldPattern(String pattern) { - this.reverseTokenizedFieldPatterns.add(BaseIngestHelper.compileFieldNamePattern(pattern)); - } - public boolean isNoMatchStored() { return noMatchStored; } @@ -279,17 +247,11 @@ public void setNoMatchReverseTokenized(boolean noMatchReverseTokenized) { * * @param fieldName * the field name - * @param patterns - * the patterns to check * @return whether any patterns were found or not */ - private boolean findMatchingPattern(String fieldName, Collection patterns) { - for (Matcher m : patterns) { - if (m.reset(fieldName).matches()) { - return true; - } - } - return false; + private String findMatchingPattern(String fieldName) { + Matcher bestMatch = BaseIngestHelper.getBestMatch(patterns.keySet(), fieldName); + return (bestMatch == null ? null : patterns.get(bestMatch)); } static final class FieldConfigHandler extends DefaultHandler { @@ -437,11 +399,12 @@ void startField(String uri, String localName, String qName, Attributes attribute final int sz = attributes.getLength(); String name = null; - boolean stored = this.defaultStored; - boolean indexed = this.defaultIndexed; - boolean reverseIndexed = this.defaultReverseIndexed; - boolean tokenized = this.defaultTokenized; - boolean reverseTokenized = this.defaultReverseTokenized; + FieldInfo fieldInfo = new FieldInfo(); + fieldInfo.stored = this.defaultStored; + fieldInfo.indexed = this.defaultIndexed; + fieldInfo.reverseIndexed = this.defaultReverseIndexed; + fieldInfo.tokenized = this.defaultTokenized; + fieldInfo.reverseTokenized = this.defaultReverseTokenized; String fieldType = this.defaultFieldType; for (int i = 0; i < sz; i++) { @@ -449,15 +412,15 @@ void startField(String uri, String localName, String qName, Attributes attribute final String lv = attributes.getValue(i); if (STORED.equals(qn)) { - stored = Boolean.parseBoolean(lv); + fieldInfo.stored = Boolean.parseBoolean(lv); } else if (INDEXED.equals(qn)) { - indexed = Boolean.parseBoolean(lv); + fieldInfo.indexed = Boolean.parseBoolean(lv); } else if (REVERSE_INDEXED.equals(qn)) { - reverseIndexed = Boolean.parseBoolean(lv); + fieldInfo.reverseIndexed = Boolean.parseBoolean(lv); } else if (TOKENIZED.equals(qn)) { - tokenized = Boolean.parseBoolean(lv); + fieldInfo.tokenized = Boolean.parseBoolean(lv); } else if (REVERSE_TOKENIZED.equals(qn)) { - reverseTokenized = Boolean.parseBoolean(lv); + fieldInfo.reverseTokenized = Boolean.parseBoolean(lv); } else if ("name".equals(qn)) { name = lv; } else if (INDEX_TYPE.equals(qn)) { @@ -469,31 +432,10 @@ void startField(String uri, String localName, String qName, Attributes attribute if (name == null) { throw new IllegalArgumentException("No field called 'name' specified"); - } else if (!this.fieldHelper.addKnownField(name)) { + } else if (!this.fieldHelper.addKnownField(name, fieldInfo)) { throw new IllegalArgumentException( "Field " + name + " was already seen, check configuration file for duplicate entries (among fieldPattern, field tags)"); } - - if (stored) { - this.fieldHelper.addStoredField(name); - } - - if (indexed) { - this.fieldHelper.addIndexedField(name); - } - - if (reverseIndexed) { - this.fieldHelper.addReverseIndexedField(name); - } - - if (tokenized) { - this.fieldHelper.addTokenizedField(name); - } - - if (reverseTokenized) { - this.fieldHelper.addReverseTokenizedField(name); - } - if (fieldType != null) { if (this.ingestHelper != null) { this.ingestHelper.updateDatawaveTypes(name, fieldType); @@ -512,11 +454,12 @@ void startFieldPattern(String uri, String localName, String qName, Attributes at final int sz = attributes.getLength(); String pattern = null; - boolean stored = this.defaultStored; - boolean indexed = this.defaultIndexed; - boolean reverseIndexed = this.defaultReverseIndexed; - boolean tokenized = this.defaultTokenized; - boolean reverseTokenized = this.defaultReverseTokenized; + FieldInfo fieldInfo = new FieldInfo(); + fieldInfo.stored = this.defaultStored; + fieldInfo.indexed = this.defaultIndexed; + fieldInfo.reverseIndexed = this.defaultReverseIndexed; + fieldInfo.tokenized = this.defaultTokenized; + fieldInfo.reverseTokenized = this.defaultReverseTokenized; String fieldType = this.defaultFieldType; for (int i = 0; i < sz; i++) { @@ -524,15 +467,15 @@ void startFieldPattern(String uri, String localName, String qName, Attributes at final String lv = attributes.getValue(i); if (STORED.equals(qn)) { - stored = Boolean.parseBoolean(lv); + fieldInfo.stored = Boolean.parseBoolean(lv); } else if (INDEXED.equals(qn)) { - indexed = Boolean.parseBoolean(lv); + fieldInfo.indexed = Boolean.parseBoolean(lv); } else if (REVERSE_INDEXED.equals(qn)) { - reverseIndexed = Boolean.parseBoolean(lv); + fieldInfo.reverseIndexed = Boolean.parseBoolean(lv); } else if (TOKENIZED.equals(qn)) { - tokenized = Boolean.parseBoolean(lv); + fieldInfo.tokenized = Boolean.parseBoolean(lv); } else if (REVERSE_TOKENIZED.equals(qn)) { - reverseTokenized = Boolean.parseBoolean(lv); + fieldInfo.reverseTokenized = Boolean.parseBoolean(lv); } else if ("pattern".equals(qn)) { pattern = lv; } else if (INDEX_TYPE.equals(qn)) { @@ -544,31 +487,11 @@ void startFieldPattern(String uri, String localName, String qName, Attributes at if (pattern == null) { throw new IllegalArgumentException("No field called 'name' specified"); - } else if (!this.fieldHelper.addKnownField(pattern)) { + } else if (!this.fieldHelper.addKnownFieldPattern(pattern, fieldInfo, BaseIngestHelper.compileFieldNamePattern(pattern))) { throw new IllegalArgumentException( "Field pattern " + pattern + " is already known, check configuration file for duplicates (among fieldPattern, field tag)"); } - if (stored) { - this.fieldHelper.addStoredFieldPattern(pattern); - } - - if (indexed) { - this.fieldHelper.addIndexedFieldPattern(pattern); - } - - if (reverseIndexed) { - this.fieldHelper.addReverseIndexedFieldPattern(pattern); - } - - if (tokenized) { - this.fieldHelper.addTokenizedFieldPattern(pattern); - } - - if (reverseTokenized) { - this.fieldHelper.addReverseTokenizedFieldPattern(pattern); - } - if (fieldType != null) { if (this.ingestHelper != null) { this.ingestHelper.updateDatawaveTypes(pattern, fieldType); diff --git a/warehouse/ingest-core/src/main/java/datawave/ingest/data/config/ingest/BaseIngestHelper.java b/warehouse/ingest-core/src/main/java/datawave/ingest/data/config/ingest/BaseIngestHelper.java index f8c86c13e2b..c3d28d3a2d8 100644 --- a/warehouse/ingest-core/src/main/java/datawave/ingest/data/config/ingest/BaseIngestHelper.java +++ b/warehouse/ingest-core/src/main/java/datawave/ingest/data/config/ingest/BaseIngestHelper.java @@ -1,8 +1,8 @@ package datawave.ingest.data.config.ingest; -import java.util.ArrayList; import java.util.Collection; import java.util.Collections; +import java.util.Comparator; import java.util.HashMap; import java.util.HashSet; import java.util.Iterator; @@ -19,12 +19,13 @@ import com.google.common.base.Splitter; import com.google.common.collect.HashMultimap; -import com.google.common.collect.LinkedListMultimap; import com.google.common.collect.Lists; import com.google.common.collect.Maps; import com.google.common.collect.Multimap; import com.google.common.collect.Sets; +import com.google.common.collect.TreeMultimap; +import datawave.core.common.logging.ThreadConfigurableLogger; import datawave.data.normalizer.NormalizationException; import datawave.data.type.NoOpType; import datawave.data.type.OneToManyNormalizerType; @@ -40,13 +41,13 @@ import datawave.ingest.data.config.NormalizedFieldAndValue; import datawave.ingest.data.config.XMLFieldConfigHelper; import datawave.util.StringUtils; -import datawave.webservice.common.logging.ThreadConfigurableLogger; /** * Specialization of the Helper type that validates the configuration for Ingest purposes. These helper classes also have the logic to parse the field names and * fields values from the datatypes that they represent. */ public abstract class BaseIngestHelper extends AbstractIngestHelper implements CompositeIngest, VirtualIngest { + /** * Configuration parameter to specify that data should be marked for delete on ingest. */ @@ -142,7 +143,7 @@ public abstract class BaseIngestHelper extends AbstractIngestHelper implements C private Multimap> typeFieldMap = null; private Multimap> typePatternMap = null; - private Multimap> typeCompiledPatternMap = null; + private TreeMultimap> typeCompiledPatternMap = null; protected Set indexOnlyFields = Sets.newHashSet(); protected Set indexedFields = Sets.newHashSet(); @@ -187,6 +188,27 @@ public enum FailurePolicy { protected FieldConfigHelper fieldConfigHelper = null; + /** + * This matcher is used to create a deterministic ordering of regular expressions + */ + public static class MatcherComparator implements Comparator { + + @Override + public int compare(Matcher o1, Matcher o2) { + String o1str = o1.pattern().pattern(); + int o1len = o1str.length(); + String o2str = o2.pattern().pattern(); + int o2len = o2str.length(); + if (o1len > o2len) { + return -1; + } else if (o1len == o2len) { + return o2str.compareTo(o1str); + } else { + return 1; + } + } + } + @Override public void setup(Configuration config) { super.setup(config); @@ -586,7 +608,8 @@ public boolean isDataTypeField(String fieldName) { } private void compilePatterns() { - Multimap> patterns = LinkedListMultimap.create(); + TreeMultimap> patterns = TreeMultimap.create(new MatcherComparator(), + (o1, o2) -> o1.toString().compareTo(o2.toString())); if (typePatternMap != null) { for (String pattern : typePatternMap.keySet()) { patterns.putAll(compileFieldNamePattern(pattern), typePatternMap.get(pattern)); @@ -611,43 +634,25 @@ public List> getDataTypes(String fieldName) { compilePatterns(); } - List patternLengths = new ArrayList<>(); - int bestMatch = 0; - Collection> bestMatchTypes = null; - - for (Matcher patternMatcher : typeCompiledPatternMap.keySet()) { - Collection> patternTypes = typeCompiledPatternMap.get(patternMatcher); - - if (patternMatcher.reset(fieldName).matches()) { - - if (useMostPreciseFieldTypeRegex) { - int patternLength = patternMatcher.pattern().toString().length(); - if (patternLengths.contains(patternLength)) { - log.warn("Multiple regular expression patterns with the same length exist for matching field " + fieldName - + ". Only the last one read will be used. Please verify your configurations."); - } - patternLengths.add(patternLength); - - if (patternLength >= bestMatch) { - bestMatch = patternLength; - bestMatchTypes = patternTypes; - } - - } else { - types.addAll(patternTypes); - typeFieldMap.putAll(fieldName, patternTypes); + if (useMostPreciseFieldTypeRegex) { + Matcher bestMatch = getBestMatch(typeCompiledPatternMap.keySet(), fieldName); + if (null != bestMatch) { + Collection> bestMatchTypes = typeCompiledPatternMap.get(bestMatch); + types.addAll(bestMatchTypes); + typeFieldMap.putAll(fieldName, bestMatchTypes); + } + } else { + for (Matcher patternMatcher : typeCompiledPatternMap.keySet()) { + if (patternMatcher.reset(fieldName).matches()) { + Collection> matchTypes = typeCompiledPatternMap.get(patternMatcher); + types.addAll(matchTypes); + typeFieldMap.putAll(fieldName, matchTypes); } } } - if (null != bestMatchTypes) { - types.addAll(bestMatchTypes); - typeFieldMap.putAll(fieldName, bestMatchTypes); - } - } // if no types were defined or matched via regex, use the default - if (types.isEmpty()) { types.addAll(typeFieldMap.get(null)); } @@ -655,6 +660,25 @@ public List> getDataTypes(String fieldName) { return types; } + public static Matcher getBestMatch(Set patterns, String fieldName) { + Matcher bestMatch = null; + for (Matcher patternMatcher : patterns) { + if (bestMatch != null && patternMatcher.pattern().pattern().length() < bestMatch.pattern().pattern().length()) { + break; + } + if (patternMatcher.reset(fieldName).matches()) { + if (bestMatch != null) { + log.warn("Multiple regular expression patterns with the same length exist for matching field " + fieldName + + ". The pattern that sorts lexicographically last will be used. Please verify your configurations."); + break; + } else { + bestMatch = patternMatcher; + } + } + } + return bestMatch; + } + /** * This is a helper routine that will return a normalized field value using the configured normalizer * @@ -962,8 +986,12 @@ protected void applyNormalizationAndAddToResults(Multimap + * A marker interface that demonstrates an {@link IngestHelperInterface} must apply a filter to the event fields. + *

    * * @see datawave.ingest.data.config.ingest.IngestFieldFilter */ diff --git a/warehouse/ingest-core/src/main/java/datawave/ingest/input/reader/AbstractEventRecordReader.java b/warehouse/ingest-core/src/main/java/datawave/ingest/input/reader/AbstractEventRecordReader.java index 52e0c50e864..20083dae89e 100644 --- a/warehouse/ingest-core/src/main/java/datawave/ingest/input/reader/AbstractEventRecordReader.java +++ b/warehouse/ingest-core/src/main/java/datawave/ingest/input/reader/AbstractEventRecordReader.java @@ -35,6 +35,7 @@ import datawave.ingest.data.config.MarkingsHelper; import datawave.policy.IngestPolicyEnforcer; import datawave.policy.Policy; +import datawave.util.CompositeTimestamp; public abstract class AbstractEventRecordReader extends RecordReader implements EventRecordReader { @@ -246,7 +247,7 @@ protected void extractEventDate(final String fieldName, final String fieldValue) } } } - if (event.getDate() == Long.MIN_VALUE) { + if (!event.isTimestampSet()) { List patterns = new ArrayList<>(formatters.size()); for (SimpleDateFormat formatter : formatters) { patterns.add(formatter.toPattern()); diff --git a/warehouse/ingest-core/src/main/java/datawave/ingest/input/reader/event/EventErrorSummary.java b/warehouse/ingest-core/src/main/java/datawave/ingest/input/reader/event/EventErrorSummary.java index 848d6859e0a..49ca15bf671 100644 --- a/warehouse/ingest-core/src/main/java/datawave/ingest/input/reader/event/EventErrorSummary.java +++ b/warehouse/ingest-core/src/main/java/datawave/ingest/input/reader/event/EventErrorSummary.java @@ -14,9 +14,11 @@ import org.apache.accumulo.core.data.Key; import org.apache.accumulo.core.data.KeyValue; import org.apache.accumulo.core.data.Value; -import org.apache.commons.jexl2.JexlContext; -import org.apache.commons.jexl2.JexlEngine; -import org.apache.commons.jexl2.Script; +import org.apache.commons.jexl3.JexlBuilder; +import org.apache.commons.jexl3.JexlContext; +import org.apache.commons.jexl3.JexlEngine; +import org.apache.commons.jexl3.JexlScript; +import org.apache.commons.jexl3.internal.Engine; import org.apache.commons.lang.builder.ToStringBuilder; import org.apache.hadoop.io.Text; import org.apache.hadoop.mapreduce.TaskInputOutputContext; @@ -301,10 +303,10 @@ public boolean matches(String jobName, String dataType, String uid, Set } if (matches && jexlQuery != null) { // Get a JexlEngine initialized with the correct JexlArithmetic for this Document - JexlEngine engine = new JexlEngine(); + JexlEngine engine = new Engine(new JexlBuilder().strict(false)); // Evaluate the JexlContext against the Script - Script script = engine.createScript(jexlQuery); + JexlScript script = engine.createScript(jexlQuery); Object o = script.execute(this); diff --git a/warehouse/ingest-core/src/main/java/datawave/ingest/mapreduce/DataTypeDiscardFutureIntervalPredicate.java b/warehouse/ingest-core/src/main/java/datawave/ingest/mapreduce/DataTypeDiscardFutureIntervalPredicate.java new file mode 100644 index 00000000000..71d6bcb7fe0 --- /dev/null +++ b/warehouse/ingest-core/src/main/java/datawave/ingest/mapreduce/DataTypeDiscardFutureIntervalPredicate.java @@ -0,0 +1,59 @@ +package datawave.ingest.mapreduce; + +import org.apache.hadoop.conf.Configuration; +import org.apache.log4j.Logger; + +import datawave.ingest.data.RawRecordContainer; +import datawave.ingest.metric.IngestInput; +import datawave.ingest.time.Now; + +public class DataTypeDiscardFutureIntervalPredicate implements RawRecordPredicate { + + private static final Logger log = Logger.getLogger(DataTypeDiscardFutureIntervalPredicate.class); + + /** + * number which will be used to evaluate whether or not an Event should be processed. If the Event.getEventDate() is less than (now + interval) then it will + * be processed. + */ + public static final String DISCARD_FUTURE_INTERVAL = "event.discard.future.interval"; + + private static final Now now = Now.getInstance(); + + private long discardFutureInterval = 0L; + + @Override + public void setConfiguration(String type, Configuration conf) { + long defaultInterval = conf.getLong(DISCARD_FUTURE_INTERVAL, 0l); + this.discardFutureInterval = conf.getLong(type + "." + DISCARD_FUTURE_INTERVAL, defaultInterval); + log.info("Setting up type: " + type + " with future interval " + this.discardFutureInterval); + } + + @Override + public boolean shouldProcess(RawRecordContainer record) { + // Determine whether the event date is greater than the interval. Excluding fatal error events. + if (discardFutureInterval != 0L && (record.getDate() > (now.get() + discardFutureInterval))) { + if (log.isInfoEnabled()) + log.info("Event with time " + record.getDate() + " newer than specified interval of " + (now.get() + discardFutureInterval) + ", skipping..."); + return false; + } + return true; + } + + @Override + public String getCounterName() { + return IngestInput.FUTURE_EVENT.name(); + } + + @Override + public int hashCode() { + return (int) discardFutureInterval; + } + + @Override + public boolean equals(Object obj) { + if (obj instanceof DataTypeDiscardFutureIntervalPredicate) { + return discardFutureInterval == (((DataTypeDiscardFutureIntervalPredicate) obj).discardFutureInterval); + } + return false; + } +} diff --git a/warehouse/ingest-core/src/main/java/datawave/ingest/mapreduce/DataTypeDiscardIntervalPredicate.java b/warehouse/ingest-core/src/main/java/datawave/ingest/mapreduce/DataTypeDiscardIntervalPredicate.java new file mode 100644 index 00000000000..69a0857cc26 --- /dev/null +++ b/warehouse/ingest-core/src/main/java/datawave/ingest/mapreduce/DataTypeDiscardIntervalPredicate.java @@ -0,0 +1,59 @@ +package datawave.ingest.mapreduce; + +import org.apache.hadoop.conf.Configuration; +import org.apache.log4j.Logger; + +import datawave.ingest.data.RawRecordContainer; +import datawave.ingest.metric.IngestInput; +import datawave.ingest.time.Now; + +public class DataTypeDiscardIntervalPredicate implements RawRecordPredicate { + + private static final Logger log = Logger.getLogger(DataTypeDiscardIntervalPredicate.class); + + /** + * number which will be used to evaluate whether or not an Event should be processed. If the Event.getEventDate() is greater than (now - interval) then it + * will be processed. + */ + public static final String DISCARD_INTERVAL = "event.discard.interval"; + + private static final Now now = Now.getInstance(); + + private long discardInterval = 0L; + + @Override + public void setConfiguration(String type, Configuration conf) { + long defaultInterval = conf.getLong(DISCARD_INTERVAL, 0l); + this.discardInterval = conf.getLong(type + "." + DISCARD_INTERVAL, defaultInterval); + log.info("Setting up type: " + type + " with interval " + this.discardInterval); + } + + @Override + public boolean shouldProcess(RawRecordContainer record) { + // Determine whether the event date is greater than the interval. Excluding fatal error events. + if (discardInterval != 0L && (record.getDate() < (now.get() - discardInterval))) { + if (log.isInfoEnabled()) + log.info("Event with time " + record.getDate() + " older than specified interval of " + (now.get() - discardInterval) + ", skipping..."); + return false; + } + return true; + } + + @Override + public String getCounterName() { + return IngestInput.OLD_EVENT.name(); + } + + @Override + public int hashCode() { + return (int) discardInterval; + } + + @Override + public boolean equals(Object obj) { + if (obj instanceof DataTypeDiscardIntervalPredicate) { + return discardInterval == (((DataTypeDiscardIntervalPredicate) obj).discardInterval); + } + return false; + } +} diff --git a/warehouse/ingest-core/src/main/java/datawave/ingest/mapreduce/EventMapper.java b/warehouse/ingest-core/src/main/java/datawave/ingest/mapreduce/EventMapper.java index 86e600060f8..edce34ef644 100644 --- a/warehouse/ingest-core/src/main/java/datawave/ingest/mapreduce/EventMapper.java +++ b/warehouse/ingest-core/src/main/java/datawave/ingest/mapreduce/EventMapper.java @@ -5,6 +5,7 @@ import java.util.ArrayList; import java.util.Arrays; import java.util.Collection; +import java.util.Collections; import java.util.Date; import java.util.HashMap; import java.util.HashSet; @@ -16,6 +17,7 @@ import java.util.Stack; import java.util.TreeMap; import java.util.concurrent.TimeUnit; +import java.util.stream.Collectors; import org.apache.accumulo.core.data.Mutation; import org.apache.accumulo.core.data.Value; @@ -100,11 +102,11 @@ public class EventMapper extends StatsDE private static final Logger log = Logger.getLogger(EventMapper.class); - /** - * number which will be used to evaluate whether or not an Event should be processed. If the Event.getEventDate() is greater than (now - interval) then it - * will be processed. - */ - public static final String DISCARD_INTERVAL = "event.discard.interval"; + // for backward compatibility, these DISCARD constants are maintained here as well + public static final String DISCARD_INTERVAL = DataTypeDiscardIntervalPredicate.DISCARD_INTERVAL; + public static final String DISCARD_FUTURE_INTERVAL = DataTypeDiscardFutureIntervalPredicate.DISCARD_FUTURE_INTERVAL; + + public static final String RECORD_PREDICATES = "event.predicates"; public static final String CONTEXT_WRITER_CLASS = "ingest.event.mapper.context.writer.class"; public static final String CONTEXT_WRITER_OUTPUT_TABLE_COUNTERS = "ingest.event.mapper.context.writer.output.table.counters"; @@ -132,14 +134,16 @@ public class EventMapper extends StatsDE protected Map>> typeMap = new HashMap<>(); - /** - * might as well cache the discard interval - */ - protected Map dataTypeDiscardIntervalCache = new HashMap<>(); + // Predicates are used to filter out events if needed. If predicates exist + // for a datatype, then the predicates need to all pass (return true) + // inorder to ingest the record. The event is otherwise dropped and a counter is + // incremented for the predicate class. + protected Map> predicateMap = new HashMap<>(); - private FileSplit split = null; + // base set of predicates + private Collection predicates = Collections.emptyList(); - private long interval = 0l; + private FileSplit split = null; private static Now now = Now.getInstance(); @@ -187,7 +191,11 @@ public void setup(Context context) throws IOException, InterruptedException { // Initialize the Type Registry TypeRegistry.getInstance(context.getConfiguration()); - interval = context.getConfiguration().getLong(DISCARD_INTERVAL, 0l); + // load the predicates applied to all types + predicates = new HashSet<>(context.getConfiguration().getTrimmedStringCollection(RECORD_PREDICATES)); + // always add the discard interval predicates + predicates.add(DataTypeDiscardIntervalPredicate.class.getName()); + predicates.add(DataTypeDiscardFutureIntervalPredicate.class.getName()); // default to true, but it can be disabled createSequenceFileName = context.getConfiguration().getBoolean(LOAD_SEQUENCE_FILE_NAME, true); @@ -286,7 +294,7 @@ public void setup(Context context) throws IOException, InterruptedException { } /** - * Get the data type handlers for a given type name. This will also fill the dataTypeDiscardIntervalCache and the validators as a side effect. + * Get the data type handlers for a given type name. This will also fill the predicate map and the validators as a side effect. * * @param typeStr * name of the type @@ -300,11 +308,7 @@ private List> loadDataType(String typeStr, Context context) typeMap.put(typeStr, new ArrayList<>()); - long myInterval = context.getConfiguration().getLong(typeStr + "." + DISCARD_INTERVAL, interval); - - dataTypeDiscardIntervalCache.put(typeStr, myInterval); - - log.info("Setting up type: " + typeStr + " with interval " + myInterval); + predicateMap.put(typeStr, getPredicates(typeStr, context, predicates)); if (!TypeRegistry.getTypeNames().contains(typeStr)) { log.warn("Attempted to load configuration for a type that does not exist in the registry: " + typeStr); @@ -364,6 +368,35 @@ private List> loadDataType(String typeStr, Context context) return typeMap.get(typeStr); } + private Set getPredicates(final String type, final Context context, final Collection basePredicates) { + Collection predicateClasses = new HashSet<>(context.getConfiguration().getTrimmedStringCollection(type + "." + RECORD_PREDICATES)); + predicateClasses.addAll(basePredicates); + if (!predicateClasses.isEmpty()) { + return predicateClasses.stream().map(s -> { + try { + return Class.forName(s); + } catch (ClassNotFoundException e) { + throw new IllegalArgumentException("Cannot load predicate for type " + type + ": " + s, e); + } + }).filter(c -> { + if (!RawRecordPredicate.class.isAssignableFrom(c)) { + throw new IllegalArgumentException("Predicate " + c.getName() + " for type " + type + " is not a RawRecordPredicate."); + } + return true; + }).map(c -> { + try { + RawRecordPredicate predicate = (RawRecordPredicate) c.getDeclaredConstructor().newInstance(); + predicate.setConfiguration(type, context.getConfiguration()); + return predicate; + } catch (Exception e) { + throw new IllegalArgumentException("Predicate " + c.getName() + " for type " + type + " could not be constructed.", e); + } + }).collect(Collectors.toSet()); + } else { + return Collections.EMPTY_SET; + } + } + private List getDataTypeFilterClassNames() { SortedMap priorityToFilters = new TreeMap<>(); @@ -394,7 +427,7 @@ public void map(K1 key, V1 value, Context context) throws IOException, Interrupt eventMapperTimer.start(); } - // ensure this datatype's handlers etc are loaded such that the dataTypeDiscardIntervalCache and validators are filled as well + // ensure this datatype's handlers etc are loaded such that the predicates and validators are filled as well List> typeHandlers = loadDataType(value.getDataType().typeName(), context); // This is a little bit fragile, but there is no other way @@ -402,8 +435,6 @@ public void map(K1 key, V1 value, Context context) throws IOException, Interrupt // using this to set some counters that collect stats. MultiTableRangePartitioner.setContext(context); - Long myInterval = dataTypeDiscardIntervalCache.get(value.getDataType().typeName()); - // setup the configuration on the event // this is automatically done by the sequence reader.... // value.setConf(context.getConfiguration()); @@ -463,12 +494,17 @@ public void map(K1 key, V1 value, Context context) throws IOException, Interrupt value.setAuxProperty(ErrorDataTypeHandler.PROCESSED_COUNT, "1"); } - // Determine whether the event date is greater than the interval. Excluding fatal error events. - if (!value.fatalError() && null != myInterval && 0L != myInterval && (value.getDate() < (now.get() - myInterval))) { - if (log.isInfoEnabled()) - log.info("Event with time " + value.getDate() + " older than specified interval of " + (now.get() - myInterval) + ", skipping..."); - getCounter(context, IngestInput.OLD_EVENT).increment(1); - return; + if (!value.fatalError()) { + // Determine whether the event should be filtered for any other reason + Set predicates = predicateMap.get(value.getDataType().typeName()); + if (null != predicates && !predicates.isEmpty()) { + for (RawRecordPredicate predicate : predicates) { + if (!predicate.test(value)) { + getCounter(context, IngestInput.FILTER.name(), predicate.getCounterName()).increment(1); + return; + } + } + } } // Add the list of handlers with the ALL specified handlers diff --git a/warehouse/ingest-core/src/main/java/datawave/ingest/mapreduce/RawRecordPredicate.java b/warehouse/ingest-core/src/main/java/datawave/ingest/mapreduce/RawRecordPredicate.java new file mode 100644 index 00000000000..a2246ca5338 --- /dev/null +++ b/warehouse/ingest-core/src/main/java/datawave/ingest/mapreduce/RawRecordPredicate.java @@ -0,0 +1,54 @@ +package datawave.ingest.mapreduce; + +import java.util.function.Predicate; + +import org.apache.hadoop.conf.Configuration; + +import datawave.ingest.data.RawRecordContainer; + +/** + * An implementation of this interface can be used with the EventMapper to filter out events that should not be processed. It is expected that there is an empty + * constructor. The setConfiguration(Configuration) method will be called prior to any filtering such that the class can configure itself appropriately. + */ +public interface RawRecordPredicate extends Predicate { + + /** + * This is the main method used to filter out records that should not be processed. + * + * @param record + * The raw record container under review + * @return true if the event is ok to ingest + */ + boolean shouldProcess(RawRecordContainer record); + + /** + * This method will be called after configuration with the map-reduce configuration. + * + * @param type + * The datatype for which this predicate is being constructed + * @param conf + * The hadoop configuration object + */ + default void setConfiguration(String type, Configuration conf) {} + + /** + * The counter name used for records that are dropped. Uses the simple name of the class implementation by default. + * + * @return The counter name + */ + default String getCounterName() { + return this.getClass().getSimpleName(); + } + + /** + * The implementation of the java util Predicate method. This method should not be overridden. + * + * @param record + * The raw record container under review + * @return true if the record should be ingested + */ + @Override + default boolean test(RawRecordContainer record) { + return shouldProcess(record); + } +} diff --git a/warehouse/ingest-core/src/main/java/datawave/ingest/mapreduce/handler/atom/AtomDataTypeHandler.java b/warehouse/ingest-core/src/main/java/datawave/ingest/mapreduce/handler/atom/AtomDataTypeHandler.java index b0f22fc8539..f48dfa0a9ec 100644 --- a/warehouse/ingest-core/src/main/java/datawave/ingest/mapreduce/handler/atom/AtomDataTypeHandler.java +++ b/warehouse/ingest-core/src/main/java/datawave/ingest/mapreduce/handler/atom/AtomDataTypeHandler.java @@ -180,20 +180,20 @@ public long process(KEYIN key, RawRecordContainer event, Multimap @@ -49,7 +49,7 @@ * This class creates the following Mutations or Key/Values:
    *
    *

    Table NameConnection PoolAuthorizationsReload Interval (ms)Max RowsLast RefreshRefreshing Now
    ").append(cache.getTableName()).append("").append(cache.getConnectionPoolName()).append("
    - * + * * * * @@ -70,7 +70,7 @@ * *

    * The table with the name specified by {@link #DATEINDEX_TNAME} will be the date index table. - * + *

    * * * @param diff --git a/warehouse/ingest-core/src/main/java/datawave/ingest/mapreduce/handler/edge/ProtobufEdgeDataTypeHandler.java b/warehouse/ingest-core/src/main/java/datawave/ingest/mapreduce/handler/edge/ProtobufEdgeDataTypeHandler.java index bbc04965e2d..090e43f407e 100644 --- a/warehouse/ingest-core/src/main/java/datawave/ingest/mapreduce/handler/edge/ProtobufEdgeDataTypeHandler.java +++ b/warehouse/ingest-core/src/main/java/datawave/ingest/mapreduce/handler/edge/ProtobufEdgeDataTypeHandler.java @@ -23,7 +23,7 @@ import org.apache.accumulo.core.data.Key; import org.apache.accumulo.core.data.Value; import org.apache.accumulo.core.security.ColumnVisibility; -import org.apache.commons.jexl2.Script; +import org.apache.commons.jexl3.JexlScript; import org.apache.hadoop.conf.Configuration; import org.apache.hadoop.io.Text; import org.apache.hadoop.mapreduce.StatusReporter; @@ -135,7 +135,7 @@ public class ProtobufEdgeDataTypeHandler implements Exten private EdgePreconditionJexlEvaluation edgePreconditionEvaluation; private EdgePreconditionCacheHelper edgePreconditionCacheHelper; private EdgePreconditionArithmetic arithmetic = new EdgePreconditionArithmetic(); - private Map scriptCache; + private Map scriptCache; protected String edgeTableName = null; protected String metadataTableName = null; @@ -574,8 +574,10 @@ public long process(KEYIN key, RawRecordContainer event, Multimap mSink = null; for (EdgeDefinition edgeDef : edgeDefs) { - arithmetic.clearMatchingGroups(); + arithmetic.clear(); Map> matchingGroups = new HashMap<>(); + Map> excludedGroups = new HashMap<>(); + String jexlPreconditions = null; // don't bother evaluating preconditions if we know this event doesn't have the necessary fields for this edge @@ -612,10 +614,28 @@ public long process(KEYIN key, RawRecordContainer event, Multimap> eventMetadataRegistr // add to the eventMetadataRegistry map Key baseKey = createMetadataEdgeKey(edgeValue, edgeValue.getSource(), edgeValue.getSource().getIndexedFieldValue(), edgeValue.getSink(), edgeValue.getSink().getIndexedFieldValue(), this.getVisibility(edgeValue)); - Key fwdMetaKey = EdgeKey.getMetadataKey(baseKey); Key revMetaKey = EdgeKey.getMetadataKey(EdgeKey.swapSourceSink(EdgeKey.decode(baseKey)).encode()); @@ -1020,7 +1039,6 @@ protected String getEdgeDefGroup(String groupedFieldName) { protected long writeEdges(EdgeDataBundle value, TaskInputOutputContext context, ContextWriter contextWriter, boolean validActivtyDate, boolean sameActivityDate, long eventDate) throws IOException, InterruptedException { - long edgesCreated = 0; if (eventDate < newFormatStartDate) { edgesCreated += writeEdges(value, context, contextWriter, EdgeKey.DATE_TYPE.OLD_EVENT); @@ -1149,7 +1167,7 @@ private Key createMetadataEdgeKey(EdgeDataBundle edgeValue, VertexValue source, protected Key createEdgeKey(EdgeDataBundle edgeValue, VertexValue source, String sourceValue, VertexValue sink, String sinkValue, Text visibility, EdgeKey.DATE_TYPE date_type) { - return createEdgeKey(edgeValue, source, sourceValue, sink, sinkValue, visibility, edgeValue.getEventDate(), date_type); + return createEdgeKey(edgeValue, source, sourceValue, sink, sinkValue, visibility, edgeValue.getEvent().getTimestamp(), date_type); } private Key createEdgeKey(EdgeDataBundle edgeValue, VertexValue source, String sourceValue, VertexValue sink, String sinkValue, Text visibility, @@ -1173,7 +1191,7 @@ protected Key createStatsKey(STATS_TYPE statsType, EdgeDataBundle edgeValue, Ver builder.setSourceData(value).setStatsType(statsType).setType(edgeValue.getEdgeType()).setYyyymmdd(edgeValue.getYyyyMMdd(date_type)) .setSourceRelationship(vertex.getRelationshipType()).setSourceAttribute1(vertex.getCollectionType()) .setAttribute3(edgeValue.getEdgeAttribute3()).setAttribute2(edgeValue.getEdgeAttribute2()).setColvis(visibility) - .setTimestamp(edgeValue.getEventDate()).setDateType(date_type); + .setTimestamp(edgeValue.getEvent().getTimestamp()).setDateType(date_type); builder.setDeleted(edgeValue.isDeleting()); Key key = builder.build().encode(); boolean isNewKey = false; diff --git a/warehouse/ingest-core/src/main/java/datawave/ingest/mapreduce/handler/edge/evaluation/EdgeJexlEngine.java b/warehouse/ingest-core/src/main/java/datawave/ingest/mapreduce/handler/edge/evaluation/EdgeJexlEngine.java index 96606430783..86f81deddb6 100644 --- a/warehouse/ingest-core/src/main/java/datawave/ingest/mapreduce/handler/edge/evaluation/EdgeJexlEngine.java +++ b/warehouse/ingest-core/src/main/java/datawave/ingest/mapreduce/handler/edge/evaluation/EdgeJexlEngine.java @@ -1,22 +1,20 @@ package datawave.ingest.mapreduce.handler.edge.evaluation; -import java.util.Map; +import org.apache.commons.jexl3.JexlBuilder; +import org.apache.commons.jexl3.JexlContext; +import org.apache.commons.jexl3.JexlOptions; +import org.apache.commons.jexl3.internal.Engine; +import org.apache.commons.jexl3.internal.Frame; +import org.apache.commons.jexl3.internal.Interpreter; -import org.apache.commons.jexl2.Interpreter; -import org.apache.commons.jexl2.JexlArithmetic; -import org.apache.commons.jexl2.JexlContext; -import org.apache.commons.jexl2.JexlEngine; -import org.apache.commons.jexl2.introspection.Uberspect; -import org.apache.commons.logging.Log; +public class EdgeJexlEngine extends Engine { -public class EdgeJexlEngine extends JexlEngine { - - public EdgeJexlEngine(Uberspect anUberspect, JexlArithmetic anArithmetic, Map theFunctions, Log log) { - super(anUberspect, anArithmetic, theFunctions, log); + public EdgeJexlEngine(JexlBuilder conf) { + super(conf); } @Override - protected Interpreter createInterpreter(JexlContext context, boolean strictFlag, boolean silentFlag) { - return new EdgeJexlInterpreter(this, context, strictFlag, silentFlag); + protected Interpreter createInterpreter(JexlContext context, Frame frame, JexlOptions opts) { + return new EdgeJexlInterpreter(this, opts, context, frame); } } diff --git a/warehouse/ingest-core/src/main/java/datawave/ingest/mapreduce/handler/edge/evaluation/EdgeJexlInterpreter.java b/warehouse/ingest-core/src/main/java/datawave/ingest/mapreduce/handler/edge/evaluation/EdgeJexlInterpreter.java index e63095dcd34..ce0259a070c 100644 --- a/warehouse/ingest-core/src/main/java/datawave/ingest/mapreduce/handler/edge/evaluation/EdgeJexlInterpreter.java +++ b/warehouse/ingest-core/src/main/java/datawave/ingest/mapreduce/handler/edge/evaluation/EdgeJexlInterpreter.java @@ -1,14 +1,21 @@ package datawave.ingest.mapreduce.handler.edge.evaluation; -import org.apache.commons.jexl2.Interpreter; -import org.apache.commons.jexl2.JexlContext; -import org.apache.commons.jexl2.JexlException; -import org.apache.commons.jexl2.parser.ASTOrNode; +import org.apache.commons.jexl3.JexlContext; +import org.apache.commons.jexl3.JexlEngine; +import org.apache.commons.jexl3.JexlException; +import org.apache.commons.jexl3.JexlOperator; +import org.apache.commons.jexl3.JexlOptions; +import org.apache.commons.jexl3.internal.Frame; +import org.apache.commons.jexl3.internal.Interpreter; +import org.apache.commons.jexl3.parser.ASTERNode; +import org.apache.commons.jexl3.parser.ASTNENode; +import org.apache.commons.jexl3.parser.ASTNRNode; +import org.apache.commons.jexl3.parser.ASTOrNode; public class EdgeJexlInterpreter extends Interpreter { - public EdgeJexlInterpreter(EdgeJexlEngine edgeJexlEngine, JexlContext context, boolean strictFlag, boolean silentFlag) { - super(edgeJexlEngine, context, strictFlag, silentFlag); + public EdgeJexlInterpreter(EdgeJexlEngine engine, JexlOptions opts, JexlContext context, Frame eFrame) { + super(engine, opts, context, eFrame); } // we want to avoid short circuiting an OR so we generate all possible edges if they are group aware @@ -37,4 +44,49 @@ public Object visit(ASTOrNode node, Object data) { return (matchesL || matchesR); } + @Override + protected Object visit(ASTNENode node, Object data) { + Object left = node.jjtGetChild(0).jjtAccept(this, data); + Object right = node.jjtGetChild(1).jjtAccept(this, data); + + try { + if (this.arithmetic instanceof EdgePreconditionArithmetic) { + Object result = ((EdgePreconditionArithmetic) this.arithmetic).notEquals(left, right); + return result; + } else { + Object result = !this.arithmetic.equals(left, right); + return result; + } + + } catch (ArithmeticException xrt) { + throw new JexlException(this.findNullOperand(node, left, right), "!= error", xrt); + } + } + + @Override + protected Object visit(final ASTERNode node, final Object data) { + final Object left = node.jjtGetChild(0).jjtAccept(this, data); + final Object right = node.jjtGetChild(1).jjtAccept(this, data); + + return this.arithmetic.contains(left, right); + } + + @Override + protected Object visit(final ASTNRNode node, final Object data) { + final Object left = node.jjtGetChild(0).jjtAccept(this, data); + final Object right = node.jjtGetChild(1).jjtAccept(this, data); + try { + if (this.arithmetic instanceof EdgePreconditionArithmetic) { + Object result = ((EdgePreconditionArithmetic) this.arithmetic).notContains(left, right); + return result; + } else { + Object result = !this.arithmetic.contains(left, right); + return result; + } + + } catch (ArithmeticException xrt) { + throw new JexlException(this.findNullOperand(node, left, right), "=~ error", xrt); + } + } + } diff --git a/warehouse/ingest-core/src/main/java/datawave/ingest/mapreduce/handler/edge/evaluation/EdgePreconditionArithmetic.java b/warehouse/ingest-core/src/main/java/datawave/ingest/mapreduce/handler/edge/evaluation/EdgePreconditionArithmetic.java index 00a1b0d7871..29922d79f54 100644 --- a/warehouse/ingest-core/src/main/java/datawave/ingest/mapreduce/handler/edge/evaluation/EdgePreconditionArithmetic.java +++ b/warehouse/ingest-core/src/main/java/datawave/ingest/mapreduce/handler/edge/evaluation/EdgePreconditionArithmetic.java @@ -7,7 +7,7 @@ import java.util.Map; import java.util.Set; -import org.apache.commons.jexl2.JexlArithmetic; +import org.apache.commons.jexl3.JexlArithmetic; import datawave.attribute.EventField; import datawave.attribute.EventFieldValueTuple; @@ -15,6 +15,9 @@ public class EdgePreconditionArithmetic extends JexlArithmetic { private Map> matchingGroups = new HashMap<>(); + private Map> excludedGroups = new HashMap<>(); + + private boolean negated = false; public EdgePreconditionArithmetic() { super(false); @@ -87,25 +90,52 @@ public boolean equals(final Object left, final Object right) { } @Override - public boolean matches(Object left, Object right) { - - if (left == null && right == null) { - // if both are null L == R - return true; - } - if (left == null || right == null) { - // we know both aren't null, therefore L != R - return false; - } - final String arg = left.toString(); + public Boolean contains(Object left, Object right) { boolean matches = false; - if (right instanceof java.util.regex.Pattern) { - matches = ((java.util.regex.Pattern) right).matcher(arg).matches(); - if (matches) { - addMatchingGroup(left); + + if (left instanceof Collection && !(right instanceof Collection)) { + Object newRight = EventFieldValueTuple.getValue(right); + + Iterator iter = ((Collection) left).iterator(); + while (iter.hasNext()) { + Object tuple = iter.next(); + Object newLeft = EventFieldValueTuple.getValue(tuple); + if (super.contains(newRight, newLeft)) { + addMatchingGroup(tuple); + matches = true; + } + } + + } else if (!(left instanceof Collection) && (right instanceof Collection)) { + throw new IllegalArgumentException("Please ensure regular expression preconditions are of the form 'FIELD =~ regex' and not reversed."); + + } else if ((left instanceof Collection) && (right instanceof Collection)) { + + Iterator iter = ((Collection) left).iterator(); + while (iter.hasNext()) { + Object lefttuple = iter.next(); + Iterator iter2 = ((Collection) right).iterator(); + while (iter2.hasNext()) { + Object righttuple = iter2.next(); + Object newLeft = EventFieldValueTuple.getValue(lefttuple); + Object newRight = EventFieldValueTuple.getValue(righttuple); + if (super.contains(newRight, newLeft) || newLeft.toString().contains(newRight.toString())) { + addMatchingGroup(righttuple); + addMatchingGroup(lefttuple); + matches = true; + } + } } } else { - matches = arg.matches(right.toString()); + Object newLeft = EventFieldValueTuple.getValue(left); + Object newRight = EventFieldValueTuple.getValue(right); + + if (super.contains(newLeft, newRight)) { + addMatchingGroup(newLeft); + addMatchingGroup(newRight); + matches = true; + + } } return matches; } @@ -309,12 +339,169 @@ private void addMatchingGroup(Object o) { } } + private void addExcludedGroup(Object o) { + if (o instanceof EventFieldValueTuple) { + String fieldName = EventFieldValueTuple.getFieldName(o); + String commonality = EventField.getGroup(fieldName); + String group = EventField.getSubgroup(fieldName); + Set groups = excludedGroups.get(commonality); + if (groups == null) { + groups = new HashSet<>(); + groups.add(group); + excludedGroups.put(commonality, groups); + } else + groups.add(group); + } + } + public Map> getMatchingGroups() { return matchingGroups; } + public Map> getExcludedGroups() { + return excludedGroups; + } + public void clearMatchingGroups() { matchingGroups = new HashMap<>(); } + public void clearExcludedGroups() { + excludedGroups = new HashMap<>(); + } + + public void clear() { + clearMatchingGroups(); + clearExcludedGroups(); + } + + public Object notEquals(Object left, Object right) { + boolean matches = false; + + if (left instanceof Collection && !(right instanceof Collection)) { + Object newRight = EventFieldValueTuple.getValue(right); + + Iterator iter = ((Collection) left).iterator(); + while (iter.hasNext()) { + Object tuple = iter.next(); + Object newLeft = EventFieldValueTuple.getValue(tuple); + if (!super.equals(newLeft, newRight)) { + addMatchingGroup(tuple); + matches = true; + } else { + addExcludedGroup(tuple); + } + } + + } else if (!(left instanceof Collection) && (right instanceof Collection)) { + Object newLeft = EventFieldValueTuple.getValue(left); + + Iterator iter = ((Collection) right).iterator(); + while (iter.hasNext()) { + Object tuple = iter.next(); + Object newRight = EventFieldValueTuple.getValue(tuple); + if (!super.equals(newLeft, newRight)) { + addMatchingGroup(tuple); + matches = true; + } else { + addExcludedGroup(tuple); + } + } + + } else if ((left instanceof Collection) && (right instanceof Collection)) { + + Iterator iter = ((Collection) right).iterator(); + while (iter.hasNext()) { + Object lefttuple = iter.next(); + Iterator iter2 = ((Collection) left).iterator(); + while (iter2.hasNext()) { + Object righttuple = iter2.next(); + Object newLeft = EventFieldValueTuple.getValue(lefttuple); + Object newRight = EventFieldValueTuple.getValue(righttuple); + if (!super.equals(newLeft, newRight)) { + addMatchingGroup(righttuple); + addMatchingGroup(lefttuple); + matches = true; + } else { + addExcludedGroup(righttuple); + addExcludedGroup(lefttuple); + } + } + } + } else { + Object newLeft = EventFieldValueTuple.getValue(left); + Object newRight = EventFieldValueTuple.getValue(right); + + if (!super.equals(newLeft, newRight)) { + addMatchingGroup(newLeft); + addMatchingGroup(newRight); + matches = true; + } else { + addExcludedGroup(newLeft); + addExcludedGroup(newRight); + } + } + + return matches; + } + + public Object notContains(Object left, Object right) { + boolean matches = false; + + if (left instanceof Collection && !(right instanceof Collection)) { + Object newRight = EventFieldValueTuple.getValue(right); + + Iterator iter = ((Collection) left).iterator(); + while (iter.hasNext()) { + Object tuple = iter.next(); + Object newLeft = EventFieldValueTuple.getValue(tuple); + if (!super.contains(newRight, newLeft)) { + addMatchingGroup(tuple); + matches = true; + } else { + addExcludedGroup(tuple); + } + } + + } else if (!(left instanceof Collection) && (right instanceof Collection)) { + + throw new IllegalArgumentException("Please ensure regular expression preconditions are of the form 'FIELD =~ regex' and not reversed."); + + } else if ((left instanceof Collection) && (right instanceof Collection)) { + + Iterator iter = ((Collection) left).iterator(); + while (iter.hasNext()) { + Object lefttuple = iter.next(); + Iterator iter2 = ((Collection) right).iterator(); + while (iter2.hasNext()) { + Object righttuple = iter2.next(); + Object newLeft = EventFieldValueTuple.getValue(lefttuple); + Object newRight = EventFieldValueTuple.getValue(righttuple); + if (!super.contains(newRight, newLeft) && !newLeft.toString().contains(newRight.toString())) { + addMatchingGroup(righttuple); + addMatchingGroup(lefttuple); + matches = true; + } else { + addExcludedGroup(righttuple); + addExcludedGroup(lefttuple); + } + } + } + } else { + Object newLeft = EventFieldValueTuple.getValue(left); + Object newRight = EventFieldValueTuple.getValue(right); + + if (!super.contains(newRight, newLeft) && !newLeft.toString().contains(newRight.toString())) { + addMatchingGroup(newLeft); + addMatchingGroup(newRight); + matches = true; + } else { + addExcludedGroup(newLeft); + addExcludedGroup(newRight); + + } + } + return matches; + } + } diff --git a/warehouse/ingest-core/src/main/java/datawave/ingest/mapreduce/handler/edge/evaluation/EdgePreconditionCacheHelper.java b/warehouse/ingest-core/src/main/java/datawave/ingest/mapreduce/handler/edge/evaluation/EdgePreconditionCacheHelper.java index db8cba48755..f5168879f7c 100644 --- a/warehouse/ingest-core/src/main/java/datawave/ingest/mapreduce/handler/edge/evaluation/EdgePreconditionCacheHelper.java +++ b/warehouse/ingest-core/src/main/java/datawave/ingest/mapreduce/handler/edge/evaluation/EdgePreconditionCacheHelper.java @@ -4,8 +4,9 @@ import java.util.List; import java.util.Map; -import org.apache.commons.jexl2.JexlEngine; -import org.apache.commons.jexl2.Script; +import org.apache.commons.jexl3.JexlBuilder; +import org.apache.commons.jexl3.JexlEngine; +import org.apache.commons.jexl3.JexlScript; import datawave.ingest.mapreduce.handler.edge.define.EdgeDefinition; import datawave.ingest.mapreduce.handler.edge.define.EdgeDefinitionConfigurationHelper; @@ -22,14 +23,17 @@ public EdgePreconditionCacheHelper(EdgePreconditionArithmetic arithmetic) { } private void createEngine(EdgePreconditionArithmetic arithmetic) { - this.setEngine(new EdgeJexlEngine(null, arithmetic, null, null)); - this.getEngine().setDebug(false); // Turn off debugging to make things go faster - this.getEngine().setCache(50); // Set cache size lower than default value of 512 + // @formatter:off + this.setEngine(new EdgeJexlEngine(new JexlBuilder() + .arithmetic(arithmetic) + .debug(false) // Turn off debugging to make things go faster + .cache(50))); // Set cache size lower than default value of 512 + // @formatter:on } - public Map createScriptCacheFromEdges(Map edges) { + public Map createScriptCacheFromEdges(Map edges) { - Map scriptCache = new HashMap<>(); + Map scriptCache = new HashMap<>(); for (String dataTypeKey : edges.keySet()) { List edgeList = edges.get(dataTypeKey).getEdges(); @@ -43,7 +47,7 @@ public Map createScriptCacheFromEdges(Map private HashSet createFilterKeysFromEdgeDefinitions(List edges) { long start = System.currentTimeMillis(); - JexlEngine engine = new JexlEngine(); - Script script; + JexlEngine engine = new Engine(); + JexlScript script; HashSet filterFields = new HashSet<>(); for (EdgeDefinition edgeDef : edges) { if (edgeDef.hasJexlPrecondition()) { @@ -80,7 +81,7 @@ private HashSet createFilterKeysFromEdgeDefinitions(List return filterFields; } - private HashSet extractTermsFromJexlScript(Script script) { + private HashSet extractTermsFromJexlScript(JexlScript script) { HashSet terms = new HashSet<>(); Set> scriptVariables = script.getVariables(); diff --git a/warehouse/ingest-core/src/main/java/datawave/ingest/mapreduce/handler/edge/evaluation/EdgePreconditionJexlEvaluation.java b/warehouse/ingest-core/src/main/java/datawave/ingest/mapreduce/handler/edge/evaluation/EdgePreconditionJexlEvaluation.java index 706b1a20385..919423788db 100644 --- a/warehouse/ingest-core/src/main/java/datawave/ingest/mapreduce/handler/edge/evaluation/EdgePreconditionJexlEvaluation.java +++ b/warehouse/ingest-core/src/main/java/datawave/ingest/mapreduce/handler/edge/evaluation/EdgePreconditionJexlEvaluation.java @@ -2,8 +2,8 @@ import java.util.Collection; -import org.apache.commons.jexl2.JexlArithmetic; -import org.apache.commons.jexl2.Script; +import org.apache.commons.jexl3.JexlArithmetic; +import org.apache.commons.jexl3.JexlScript; import org.slf4j.Logger; import org.slf4j.LoggerFactory; @@ -14,7 +14,7 @@ * This class operates against a {@code Multimap} normalizedFields during ingest. * */ -public class EdgePreconditionJexlEvaluation implements Predicate\n"; - private static final String JQUERY_INCLUDES = - "\n"; - private static final String MAP_INCLUDES = - "\n" + - ""; - // @formatter:on - - public QueryGeometryResponse() { - this(null, null); - } - - public QueryGeometryResponse(String queryId, String basemaps) { - this.queryId = queryId; - this.basemaps = basemaps; - } - - @XmlElement(name = "queryId", nillable = true) - protected String queryId = null; - - @XmlTransient - protected String basemaps = null; - - @XmlElementWrapper(name = "features") - @XmlElement(name = "feature") - protected List result = null; - - @Override - public String getTitle() { - if (queryId != null) - return TITLE + " - " + queryId; - return TITLE; - } - - @Override - public String getHeadContent() { - String basemapData = "\n"; - String featureData = "\n"; - return String.join("\n", featureData, JQUERY_INCLUDES, LEAFLET_INCLUDES, basemapData, MAP_INCLUDES); - } - - @Override - public String getPageHeader() { - return getTitle(); - } - - @Override - public String getMainContent() { - return "
    "; - } - - private String toGeoJsonFeatures() { - if (!this.result.isEmpty()) - return "[ " + this.result.stream().map(QueryGeometry::toGeoJsonFeature).collect(Collectors.joining(", ")) + " ]"; - else - return "undefined"; - } - - public String getQueryId() { - return queryId; - } - - public void setQueryId(String queryId) { - this.queryId = queryId; - } - - public List getResult() { - return result; - } - - public void setResult(List result) { - this.result = result; - } - - public String getBasemaps() { - return basemaps; - } - - public void setBasemaps(String basemaps) { - this.basemaps = basemaps; - } -} diff --git a/web-services/client/src/main/java/datawave/webservice/query/result/event/DefaultEvent.java b/web-services/client/src/main/java/datawave/webservice/query/result/event/DefaultEvent.java index a0419689d04..65fe18277d0 100644 --- a/web-services/client/src/main/java/datawave/webservice/query/result/event/DefaultEvent.java +++ b/web-services/client/src/main/java/datawave/webservice/query/result/event/DefaultEvent.java @@ -55,6 +55,15 @@ public String toString() { return getMarkings() + ": " + (this.fields != null ? this.fields.toString() : "fields are null"); } + @Override + public Map getMarkings() { + if (markings != null) { + return markings; + } else { + return super.getMarkings(); + } + } + public void setMarkings(Map markings) { if (null != markings) { this.markings = new HashMap<>(markings); diff --git a/web-services/client/src/main/java/datawave/webservice/query/result/event/DefaultField.java b/web-services/client/src/main/java/datawave/webservice/query/result/event/DefaultField.java index 704dcc2a6f8..e059c49e7e0 100644 --- a/web-services/client/src/main/java/datawave/webservice/query/result/event/DefaultField.java +++ b/web-services/client/src/main/java/datawave/webservice/query/result/event/DefaultField.java @@ -17,6 +17,9 @@ import org.apache.commons.lang.builder.EqualsBuilder; import org.apache.commons.lang.builder.HashCodeBuilder; +import com.fasterxml.jackson.annotation.JsonIgnore; +import com.fasterxml.jackson.annotation.JsonProperty; + import datawave.data.type.Type; import datawave.webservice.query.util.TypedValue; import datawave.webservice.xml.util.StringMapAdapter; @@ -81,34 +84,63 @@ public Map getMarkings() { return markings; } + public void setColumnVisibility(String columnVisibility) { + this.columnVisibility = columnVisibility; + } + + public String getColumnVisibility() { + return columnVisibility; + } + + public void setTimestamp(Long timestamp) { + this.timestamp = timestamp; + } + public Long getTimestamp() { return timestamp; } - public String getValueString() { - if (value.getValue() instanceof Type) { - return ((Type) value.getValue()).getDelegate().toString(); - } else if (value.getValue() instanceof String) { - return (String) value.getValue(); - } else { - return value.getValue().toString(); - } + public void setName(String name) { + this.name = name; + } + + public String getName() { + return name; + } + + public void setTypedValue(TypedValue value) { + this.value = value; } public TypedValue getTypedValue() { return this.value; } - public Object getValueOfTypedValue() { - return (null == value) ? null : value.getValue(); + @JsonIgnore + public void setValue(Object value) { + if (value instanceof TypedValue) { + this.value = (TypedValue) value; + } else { + this.value = new TypedValue(value); + } } - public void setTimestamp(Long timestamp) { - this.timestamp = timestamp; + @JsonIgnore + @XmlTransient + public Object getValueOfTypedValue() { + return (null == value) ? null : value.getValue(); } - public void setValue(Object value) { - this.value = new TypedValue(value); + @JsonIgnore + @XmlTransient + public String getValueString() { + if (value.getValue() instanceof Type) { + return ((Type) value.getValue()).getDelegate().toString(); + } else if (value.getValue() instanceof String) { + return (String) value.getValue(); + } else { + return value.getValue().toString(); + } } @Override @@ -149,14 +181,6 @@ public boolean equals(Object o) { return false; } - public String getName() { - return name; - } - - public void setName(String name) { - this.name = name; - } - @Override public Schema cachedSchema() { return SCHEMA; @@ -264,12 +288,4 @@ public int getFieldNumber(String name) { fieldMap.put("value", 5); } }; - - public String getColumnVisibility() { - return columnVisibility; - } - - public void setColumnVisibility(String columnVisibility) { - this.columnVisibility = columnVisibility; - } } diff --git a/web-services/client/src/main/java/datawave/webservice/query/result/event/EventBase.java b/web-services/client/src/main/java/datawave/webservice/query/result/event/EventBase.java index 00bb63dcb87..5314c8f1b0a 100644 --- a/web-services/client/src/main/java/datawave/webservice/query/result/event/EventBase.java +++ b/web-services/client/src/main/java/datawave/webservice/query/result/event/EventBase.java @@ -40,6 +40,8 @@ public abstract class EventBase> implements HasMarkings /** * Get the approximate size of this event in bytes. Used by the ObjectSizeOf mechanism in the webservice. Throws an exception if the local size was not set * to allow the ObjectSizeOf mechanism to do its thang. + * + * @return the size in bytes */ public abstract long sizeInBytes(); diff --git a/web-services/client/src/main/java/datawave/webservice/query/result/event/ResponseObjectFactory.java b/web-services/client/src/main/java/datawave/webservice/query/result/event/ResponseObjectFactory.java index f70105354fb..13af445d055 100644 --- a/web-services/client/src/main/java/datawave/webservice/query/result/event/ResponseObjectFactory.java +++ b/web-services/client/src/main/java/datawave/webservice/query/result/event/ResponseObjectFactory.java @@ -1,11 +1,11 @@ package datawave.webservice.query.result.event; +import datawave.microservice.query.Query; import datawave.user.AuthorizationsListBase; import datawave.webservice.dictionary.data.DataDictionaryBase; import datawave.webservice.dictionary.data.DescriptionBase; import datawave.webservice.dictionary.data.FieldsBase; import datawave.webservice.metadata.MetadataFieldBase; -import datawave.webservice.query.Query; import datawave.webservice.query.cachedresults.CacheableQueryRow; import datawave.webservice.query.result.EdgeQueryResponseBase; import datawave.webservice.query.result.edge.EdgeBase; @@ -38,7 +38,7 @@ public abstract class ResponseObjectFactory { * provided here, then a javax.ws.rs.ext.Provider must be created which implements {@code ContextResolver}. Therein a resolver for a * LookupResponse needs to include the provided implementation within a jaxb context to ensure appropriate serialization. * - * @return + * @return the keybase */ public abstract KeyBase getKey(); diff --git a/web-services/client/src/main/java/datawave/webservice/query/result/logic/QueryLogicDescription.java b/web-services/client/src/main/java/datawave/webservice/query/result/logic/QueryLogicDescription.java index 938a64a9fb9..e7fb218d0a6 100644 --- a/web-services/client/src/main/java/datawave/webservice/query/result/logic/QueryLogicDescription.java +++ b/web-services/client/src/main/java/datawave/webservice/query/result/logic/QueryLogicDescription.java @@ -10,8 +10,7 @@ import javax.xml.bind.annotation.XmlAttribute; import javax.xml.bind.annotation.XmlElement; import javax.xml.bind.annotation.XmlElementWrapper; - -import com.fasterxml.jackson.annotation.JsonProperty; +import javax.xml.bind.annotation.XmlElements; @XmlAccessorType(XmlAccessType.NONE) @XmlAccessorOrder(XmlAccessOrder.ALPHABETICAL) @@ -41,16 +40,14 @@ public QueryLogicDescription() {} @XmlElement(name = "Syntax") private List querySyntax = null; - @JsonProperty("SupportedParameters") - // work-around for bug in jackson-databind @XmlElementWrapper(name = "SupportedParameters") - @XmlElement(name = "Parameter") + // work-around for bug in jackson-databind + @XmlElements(@XmlElement(name = "Parameter", type = String.class)) private List supportedParams = null; - @JsonProperty("RequiredParameters") - // work-around for bug in jackson-databind @XmlElementWrapper(name = "RequiredParameters") - @XmlElement(name = "Parameter") + // work-around for bug in jackson-databind + @XmlElements(@XmlElement(name = "Parameter", type = String.class)) private List requiredParams = null; @XmlElementWrapper(name = "ExampleQueries") @@ -136,5 +133,4 @@ public List getExampleQueries() { public void setExampleQueries(List exampleQueries) { this.exampleQueries = exampleQueries; } - } diff --git a/web-services/client/src/main/java/datawave/webservice/result/QueryImplListResponse.java b/web-services/client/src/main/java/datawave/webservice/result/QueryImplListResponse.java index 743e1bd9247..007c8bf8e93 100644 --- a/web-services/client/src/main/java/datawave/webservice/result/QueryImplListResponse.java +++ b/web-services/client/src/main/java/datawave/webservice/result/QueryImplListResponse.java @@ -4,7 +4,6 @@ import java.util.ArrayList; import java.util.LinkedList; import java.util.List; -import java.util.Objects; import javax.xml.bind.annotation.XmlAccessOrder; import javax.xml.bind.annotation.XmlAccessType; @@ -14,7 +13,7 @@ import javax.xml.bind.annotation.XmlRootElement; import javax.xml.bind.annotation.XmlTransient; -import datawave.webservice.query.Query; +import datawave.microservice.query.Query; import datawave.webservice.query.exception.QueryExceptionType; import io.protostuff.Input; import io.protostuff.Message; diff --git a/web-services/client/src/main/java/datawave/webservice/result/ResponseMessages.java b/web-services/client/src/main/java/datawave/webservice/result/ResponseMessages.java index 00c2a2ba0fe..9036b931e6c 100644 --- a/web-services/client/src/main/java/datawave/webservice/result/ResponseMessages.java +++ b/web-services/client/src/main/java/datawave/webservice/result/ResponseMessages.java @@ -13,48 +13,75 @@ public interface VoidResponseOrBuilder extends com.google.protobuf.MessageLiteOr // required uint64 operation_time_ms = 1 [default = 0]; /** * required uint64 operation_time_ms = 1 [default = 0]; + * + * @return if it has operation time in ms */ boolean hasOperationTimeMs(); /** * required uint64 operation_time_ms = 1 [default = 0]; + * + * @return the operation time in ms */ long getOperationTimeMs(); // repeated string messages = 2; /** * repeated string messages = 2; + * + * @return list of messages */ java.util.List getMessagesList(); /** * repeated string messages = 2; + * + * @return the number of messages in the list */ int getMessagesCount(); /** * repeated string messages = 2; + * + * @param index + * the index + * + * @return the message at the provided index */ java.lang.String getMessages(int index); /** * repeated string messages = 2; + * + * @param index + * the index + * + * @return the message byte string at the provided index */ com.google.protobuf.ByteString getMessagesBytes(int index); // repeated .datawave.webservice.query.exception.QueryExceptionType exceptions = 3; /** * repeated .datawave.webservice.query.exception.QueryExceptionType exceptions = 3; + * + * @return list of exceptions */ java.util.List getExceptionsList(); /** * repeated .datawave.webservice.query.exception.QueryExceptionType exceptions = 3; + * + * @param index + * the index + * + * @return the exception at the provided index */ datawave.webservice.query.exception.ExceptionMessages.QueryExceptionType getExceptions(int index); /** * repeated .datawave.webservice.query.exception.QueryExceptionType exceptions = 3; + * + * @return the number of exceptions */ int getExceptionsCount(); } @@ -157,6 +184,8 @@ public com.google.protobuf.Parser getParserForType() { /** * required uint64 operation_time_ms = 1 [default = 0]; + * + * @return if it has an operation time */ public boolean hasOperationTimeMs() { return ((bitField0_ & 0x00000001) == 0x00000001); @@ -164,6 +193,8 @@ public boolean hasOperationTimeMs() { /** * required uint64 operation_time_ms = 1 [default = 0]; + * + * @return the operation time in ms */ public long getOperationTimeMs() { return operationTimeMs_; @@ -175,6 +206,8 @@ public long getOperationTimeMs() { /** * repeated string messages = 2; + * + * @return the list of messages */ public java.util.List getMessagesList() { return messages_; @@ -182,6 +215,8 @@ public java.util.List getMessagesList() { /** * repeated string messages = 2; + * + * @return the number of messages */ public int getMessagesCount() { return messages_.size(); @@ -189,6 +224,11 @@ public int getMessagesCount() { /** * repeated string messages = 2; + * + * @param index + * the index + * + * @return the message at the index provided */ public java.lang.String getMessages(int index) { return messages_.get(index); @@ -196,6 +236,11 @@ public java.lang.String getMessages(int index) { /** * repeated string messages = 2; + * + * @param index + * the index + * + * @return message byte string at the provided index */ public com.google.protobuf.ByteString getMessagesBytes(int index) { return messages_.getByteString(index); @@ -207,6 +252,8 @@ public com.google.protobuf.ByteString getMessagesBytes(int index) { /** * repeated .datawave.webservice.query.exception.QueryExceptionType exceptions = 3; + * + * @return list of exceptions */ public java.util.List getExceptionsList() { return exceptions_; @@ -214,6 +261,8 @@ public java.util.Listrepeated .datawave.webservice.query.exception.QueryExceptionType exceptions = 3; + * + * @return list of exceptions */ public java.util.List getExceptionsOrBuilderList() { return exceptions_; @@ -221,6 +270,8 @@ public java.util.Listrepeated .datawave.webservice.query.exception.QueryExceptionType exceptions = 3; + * + * @return the number of exceptions in the list */ public int getExceptionsCount() { return exceptions_.size(); @@ -228,6 +279,11 @@ public int getExceptionsCount() { /** * repeated .datawave.webservice.query.exception.QueryExceptionType exceptions = 3; + * + * @param index + * the index + * + * @return the exception at the index */ public datawave.webservice.query.exception.ExceptionMessages.QueryExceptionType getExceptions(int index) { return exceptions_.get(index); @@ -235,6 +291,11 @@ public datawave.webservice.query.exception.ExceptionMessages.QueryExceptionType /** * repeated .datawave.webservice.query.exception.QueryExceptionType exceptions = 3; + * + * @param index + * the index + * + * @return the exception at the index */ public datawave.webservice.query.exception.ExceptionMessages.QueryExceptionTypeOrBuilder getExceptionsOrBuilder(int index) { return exceptions_.get(index); @@ -497,6 +558,8 @@ public Builder mergeFrom(com.google.protobuf.CodedInputStream input, com.google. /** * required uint64 operation_time_ms = 1 [default = 0]; + * + * @return if it has operation time in ms */ public boolean hasOperationTimeMs() { return ((bitField0_ & 0x00000001) == 0x00000001); @@ -504,6 +567,8 @@ public boolean hasOperationTimeMs() { /** * required uint64 operation_time_ms = 1 [default = 0]; + * + * @return operation time in ms */ public long getOperationTimeMs() { return operationTimeMs_; @@ -511,6 +576,11 @@ public long getOperationTimeMs() { /** * required uint64 operation_time_ms = 1 [default = 0]; + * + * @param value + * the new operation time in ms + * + * @return builder to set operation time in ms */ public Builder setOperationTimeMs(long value) { bitField0_ |= 0x00000001; @@ -521,6 +591,8 @@ public Builder setOperationTimeMs(long value) { /** * required uint64 operation_time_ms = 1 [default = 0]; + * + * @return builder to clear operation time */ public Builder clearOperationTimeMs() { bitField0_ = (bitField0_ & ~0x00000001); @@ -541,6 +613,8 @@ private void ensureMessagesIsMutable() { /** * repeated string messages = 2; + * + * @return the message list */ public java.util.List getMessagesList() { return java.util.Collections.unmodifiableList(messages_); @@ -548,6 +622,8 @@ public java.util.List getMessagesList() { /** * repeated string messages = 2; + * + * @return the number of messages in the list */ public int getMessagesCount() { return messages_.size(); @@ -555,6 +631,11 @@ public int getMessagesCount() { /** * repeated string messages = 2; + * + * @param index + * the index + * + * @return the message at the provided index */ public java.lang.String getMessages(int index) { return messages_.get(index); @@ -562,6 +643,11 @@ public java.lang.String getMessages(int index) { /** * repeated string messages = 2; + * + * @param index + * the index + * + * @return the message byte string at the provided index */ public com.google.protobuf.ByteString getMessagesBytes(int index) { return messages_.getByteString(index); @@ -569,6 +655,13 @@ public com.google.protobuf.ByteString getMessagesBytes(int index) { /** * repeated string messages = 2; + * + * @param index + * the index + * @param value + * the new messagew + * + * @return builder to set the message value at the provided index */ public Builder setMessages(int index, java.lang.String value) { if (value == null) { @@ -582,6 +675,11 @@ public Builder setMessages(int index, java.lang.String value) { /** * repeated string messages = 2; + * + * @param value + * a message to add to the lsit + * + * @return builder to add the provided message */ public Builder addMessages(java.lang.String value) { if (value == null) { @@ -595,6 +693,11 @@ public Builder addMessages(java.lang.String value) { /** * repeated string messages = 2; + * + * @param values + * iterable of messages to add + * + * @return builder to add all messages provided */ public Builder addAllMessages(java.lang.Iterable values) { ensureMessagesIsMutable(); @@ -605,6 +708,8 @@ public Builder addAllMessages(java.lang.Iterable values) { /** * repeated string messages = 2; + * + * @return builder to clear the message list */ public Builder clearMessages() { messages_ = com.google.protobuf.LazyStringArrayList.EMPTY; @@ -615,6 +720,11 @@ public Builder clearMessages() { /** * repeated string messages = 2; + * + * @param value + * new message byte string + * + * @return builder to add the provided message as byte string */ public Builder addMessagesBytes(com.google.protobuf.ByteString value) { if (value == null) { @@ -638,6 +748,8 @@ private void ensureExceptionsIsMutable() { /** * repeated .datawave.webservice.query.exception.QueryExceptionType exceptions = 3; + * + * @return list of exceptions */ public java.util.List getExceptionsList() { return java.util.Collections.unmodifiableList(exceptions_); @@ -645,6 +757,8 @@ public java.util.Listrepeated .datawave.webservice.query.exception.QueryExceptionType exceptions = 3; + * + * @return the number of exceptions in the list */ public int getExceptionsCount() { return exceptions_.size(); @@ -652,6 +766,11 @@ public int getExceptionsCount() { /** * repeated .datawave.webservice.query.exception.QueryExceptionType exceptions = 3; + * + * @param index + * the index + * + * @return the exception at the provided index */ public datawave.webservice.query.exception.ExceptionMessages.QueryExceptionType getExceptions(int index) { return exceptions_.get(index); @@ -659,6 +778,13 @@ public datawave.webservice.query.exception.ExceptionMessages.QueryExceptionType /** * repeated .datawave.webservice.query.exception.QueryExceptionType exceptions = 3; + * + * @param index + * the index + * @param value + * the new exception type + * + * @return builder to set exception at provided index to the value */ public Builder setExceptions(int index, datawave.webservice.query.exception.ExceptionMessages.QueryExceptionType value) { if (value == null) { @@ -672,6 +798,13 @@ public Builder setExceptions(int index, datawave.webservice.query.exception.Exce /** * repeated .datawave.webservice.query.exception.QueryExceptionType exceptions = 3; + * + * @param index + * the index + * @param builderForValue + * query exception type builder + * + * @return builder to set the exception at the index to the provided value */ public Builder setExceptions(int index, datawave.webservice.query.exception.ExceptionMessages.QueryExceptionType.Builder builderForValue) { ensureExceptionsIsMutable(); @@ -682,6 +815,11 @@ public Builder setExceptions(int index, datawave.webservice.query.exception.Exce /** * repeated .datawave.webservice.query.exception.QueryExceptionType exceptions = 3; + * + * @param value + * the exception type to add + * + * @return builder to add the provided value */ public Builder addExceptions(datawave.webservice.query.exception.ExceptionMessages.QueryExceptionType value) { if (value == null) { @@ -695,6 +833,13 @@ public Builder addExceptions(datawave.webservice.query.exception.ExceptionMessag /** * repeated .datawave.webservice.query.exception.QueryExceptionType exceptions = 3; + * + * @param index + * the index + * @param value + * the exception to add + * + * @return builder to add the provided exception to the provided index */ public Builder addExceptions(int index, datawave.webservice.query.exception.ExceptionMessages.QueryExceptionType value) { if (value == null) { @@ -708,6 +853,11 @@ public Builder addExceptions(int index, datawave.webservice.query.exception.Exce /** * repeated .datawave.webservice.query.exception.QueryExceptionType exceptions = 3; + * + * @param builderForValue + * the builder with new value + * + * @return builder to add provided exception */ public Builder addExceptions(datawave.webservice.query.exception.ExceptionMessages.QueryExceptionType.Builder builderForValue) { ensureExceptionsIsMutable(); @@ -718,6 +868,13 @@ public Builder addExceptions(datawave.webservice.query.exception.ExceptionMessag /** * repeated .datawave.webservice.query.exception.QueryExceptionType exceptions = 3; + * + * @param index + * the index + * @param builderForValue + * the builder to add a value + * + * @return builder to add exception at provided index */ public Builder addExceptions(int index, datawave.webservice.query.exception.ExceptionMessages.QueryExceptionType.Builder builderForValue) { ensureExceptionsIsMutable(); @@ -728,6 +885,11 @@ public Builder addExceptions(int index, datawave.webservice.query.exception.Exce /** * repeated .datawave.webservice.query.exception.QueryExceptionType exceptions = 3; + * + * @param values + * iterable of values to add to exceptions list + * + * @return builder to add provided values */ public Builder addAllExceptions(java.lang.Iterable values) { ensureExceptionsIsMutable(); @@ -738,6 +900,8 @@ public Builder addAllExceptions(java.lang.Iterablerepeated .datawave.webservice.query.exception.QueryExceptionType exceptions = 3; + * + * @return builder to clear exceptions */ public Builder clearExceptions() { exceptions_ = java.util.Collections.emptyList(); @@ -748,6 +912,11 @@ public Builder clearExceptions() { /** * repeated .datawave.webservice.query.exception.QueryExceptionType exceptions = 3; + * + * @param index + * the index + * + * @return builder to remove exceptions at the provided index */ public Builder removeExceptions(int index) { ensureExceptionsIsMutable(); @@ -772,75 +941,115 @@ public interface QueryImplListResponseOrBuilder extends com.google.protobuf.Mess // required uint64 operation_time_ms = 1 [default = 0]; /** * required uint64 operation_time_ms = 1 [default = 0]; + * + * @return if it has operation time */ boolean hasOperationTimeMs(); /** * required uint64 operation_time_ms = 1 [default = 0]; + * + * @return the operation time in ms */ long getOperationTimeMs(); // repeated string messages = 2; /** * repeated string messages = 2; + * + * @return the messages list */ java.util.List getMessagesList(); /** * repeated string messages = 2; + * + * @return the number of messages in the list */ int getMessagesCount(); /** * repeated string messages = 2; + * + * @param index + * the index + * + * @return the message at the provided index */ java.lang.String getMessages(int index); /** * repeated string messages = 2; + * + * @param index + * the index + * + * @return the message byte string at the provided index */ com.google.protobuf.ByteString getMessagesBytes(int index); // repeated .datawave.webservice.query.exception.QueryExceptionType exceptions = 3; /** * repeated .datawave.webservice.query.exception.QueryExceptionType exceptions = 3; + * + * @return list of exceptions */ java.util.List getExceptionsList(); /** * repeated .datawave.webservice.query.exception.QueryExceptionType exceptions = 3; + * + * @param index + * the index + * + * @return the exception at the provided index */ datawave.webservice.query.exception.ExceptionMessages.QueryExceptionType getExceptions(int index); /** * repeated .datawave.webservice.query.exception.QueryExceptionType exceptions = 3; + * + * @return the number of exceptions in the list */ int getExceptionsCount(); // repeated .datawave.webservice.query.QueryImpl query = 4; /** * repeated .datawave.webservice.query.QueryImpl query = 4; + * + * @return the query list */ java.util.List getQueryList(); /** * repeated .datawave.webservice.query.QueryImpl query = 4; + * + * @param index + * the index + * + * @return the query message at the provided index */ datawave.webservice.query.QueryMessages.QueryImpl getQuery(int index); /** * repeated .datawave.webservice.query.QueryImpl query = 4; + * + * @return the number of queries in the list */ int getQueryCount(); // optional uint32 numResults = 5 [default = 0]; /** * optional uint32 numResults = 5 [default = 0]; + * + * @return if it has a number of results */ boolean hasNumResults(); /** * optional uint32 numResults = 5 [default = 0]; + * + * @return the number of results */ int getNumResults(); } @@ -959,6 +1168,8 @@ public com.google.protobuf.Parser getParserForType() { /** * required uint64 operation_time_ms = 1 [default = 0]; + * + * @return if it has operation time */ public boolean hasOperationTimeMs() { return ((bitField0_ & 0x00000001) == 0x00000001); @@ -966,6 +1177,8 @@ public boolean hasOperationTimeMs() { /** * required uint64 operation_time_ms = 1 [default = 0]; + * + * @return the operation time in ms */ public long getOperationTimeMs() { return operationTimeMs_; @@ -977,6 +1190,8 @@ public long getOperationTimeMs() { /** * repeated string messages = 2; + * + * @return the messages list */ public java.util.List getMessagesList() { return messages_; @@ -984,6 +1199,8 @@ public java.util.List getMessagesList() { /** * repeated string messages = 2; + * + * @return the number of messages in the list */ public int getMessagesCount() { return messages_.size(); @@ -991,6 +1208,11 @@ public int getMessagesCount() { /** * repeated string messages = 2; + * + * @param index + * the index + * + * @return the message at the provided index */ public java.lang.String getMessages(int index) { return messages_.get(index); @@ -998,6 +1220,11 @@ public java.lang.String getMessages(int index) { /** * repeated string messages = 2; + * + * @param index + * the index + * + * @return the message byte string at the provided index */ public com.google.protobuf.ByteString getMessagesBytes(int index) { return messages_.getByteString(index); @@ -1009,6 +1236,8 @@ public com.google.protobuf.ByteString getMessagesBytes(int index) { /** * repeated .datawave.webservice.query.exception.QueryExceptionType exceptions = 3; + * + * @return the exception list */ public java.util.List getExceptionsList() { return exceptions_; @@ -1016,6 +1245,8 @@ public java.util.Listrepeated .datawave.webservice.query.exception.QueryExceptionType exceptions = 3; + * + * @return the exception list */ public java.util.List getExceptionsOrBuilderList() { return exceptions_; @@ -1023,6 +1254,8 @@ public java.util.Listrepeated .datawave.webservice.query.exception.QueryExceptionType exceptions = 3; + * + * @return the number of exceptions in the list */ public int getExceptionsCount() { return exceptions_.size(); @@ -1030,6 +1263,11 @@ public int getExceptionsCount() { /** * repeated .datawave.webservice.query.exception.QueryExceptionType exceptions = 3; + * + * @param index + * the index + * + * @return the exception at the provided index */ public datawave.webservice.query.exception.ExceptionMessages.QueryExceptionType getExceptions(int index) { return exceptions_.get(index); @@ -1037,6 +1275,11 @@ public datawave.webservice.query.exception.ExceptionMessages.QueryExceptionType /** * repeated .datawave.webservice.query.exception.QueryExceptionType exceptions = 3; + * + * @param index + * the index + * + * @return exception at the provided index */ public datawave.webservice.query.exception.ExceptionMessages.QueryExceptionTypeOrBuilder getExceptionsOrBuilder(int index) { return exceptions_.get(index); @@ -1048,6 +1291,8 @@ public datawave.webservice.query.exception.ExceptionMessages.QueryExceptionTypeO /** * repeated .datawave.webservice.query.QueryImpl query = 4; + * + * @return the query list */ public java.util.List getQueryList() { return query_; @@ -1055,6 +1300,8 @@ public java.util.List getQuer /** * repeated .datawave.webservice.query.QueryImpl query = 4; + * + * @return the query list */ public java.util.List getQueryOrBuilderList() { return query_; @@ -1062,6 +1309,8 @@ public java.util.Listrepeated .datawave.webservice.query.QueryImpl query = 4; + * + * @return the number of queries in the list */ public int getQueryCount() { return query_.size(); @@ -1069,6 +1318,11 @@ public int getQueryCount() { /** * repeated .datawave.webservice.query.QueryImpl query = 4; + * + * @param index + * the index + * + * @return the query at the provided index */ public datawave.webservice.query.QueryMessages.QueryImpl getQuery(int index) { return query_.get(index); @@ -1076,6 +1330,11 @@ public datawave.webservice.query.QueryMessages.QueryImpl getQuery(int index) { /** * repeated .datawave.webservice.query.QueryImpl query = 4; + * + * @param index + * the index + * + * @return the query at the provided index */ public datawave.webservice.query.QueryMessages.QueryImplOrBuilder getQueryOrBuilder(int index) { return query_.get(index); @@ -1087,6 +1346,8 @@ public datawave.webservice.query.QueryMessages.QueryImplOrBuilder getQueryOrBuil /** * optional uint32 numResults = 5 [default = 0]; + * + * @return if there are a number of results */ public boolean hasNumResults() { return ((bitField0_ & 0x00000002) == 0x00000002); @@ -1094,6 +1355,8 @@ public boolean hasNumResults() { /** * optional uint32 numResults = 5 [default = 0]; + * + * @return the number of results */ public int getNumResults() { return numResults_; @@ -1410,6 +1673,8 @@ public Builder mergeFrom(com.google.protobuf.CodedInputStream input, com.google. /** * required uint64 operation_time_ms = 1 [default = 0]; + * + * @return if it has operation time */ public boolean hasOperationTimeMs() { return ((bitField0_ & 0x00000001) == 0x00000001); @@ -1417,6 +1682,8 @@ public boolean hasOperationTimeMs() { /** * required uint64 operation_time_ms = 1 [default = 0]; + * + * @return the operation time in ms */ public long getOperationTimeMs() { return operationTimeMs_; @@ -1424,6 +1691,11 @@ public long getOperationTimeMs() { /** * required uint64 operation_time_ms = 1 [default = 0]; + * + * @param value + * new operation time in ms + * + * @return builder to set operation time to provided value */ public Builder setOperationTimeMs(long value) { bitField0_ |= 0x00000001; @@ -1434,6 +1706,8 @@ public Builder setOperationTimeMs(long value) { /** * required uint64 operation_time_ms = 1 [default = 0]; + * + * @return builder to clear operation time */ public Builder clearOperationTimeMs() { bitField0_ = (bitField0_ & ~0x00000001); @@ -1454,6 +1728,8 @@ private void ensureMessagesIsMutable() { /** * repeated string messages = 2; + * + * @return the messages list */ public java.util.List getMessagesList() { return java.util.Collections.unmodifiableList(messages_); @@ -1461,6 +1737,8 @@ public java.util.List getMessagesList() { /** * repeated string messages = 2; + * + * @return the number of messages in the list */ public int getMessagesCount() { return messages_.size(); @@ -1468,6 +1746,11 @@ public int getMessagesCount() { /** * repeated string messages = 2; + * + * @param index + * the index + * + * @return the message at the provided index */ public java.lang.String getMessages(int index) { return messages_.get(index); @@ -1475,6 +1758,11 @@ public java.lang.String getMessages(int index) { /** * repeated string messages = 2; + * + * @param index + * the index + * + * @return the message byte string at the provided index */ public com.google.protobuf.ByteString getMessagesBytes(int index) { return messages_.getByteString(index); @@ -1482,6 +1770,13 @@ public com.google.protobuf.ByteString getMessagesBytes(int index) { /** * repeated string messages = 2; + * + * @param index + * the index + * @param value + * the new message + * + * @return builder to set the message at the provided index to the provided value */ public Builder setMessages(int index, java.lang.String value) { if (value == null) { @@ -1495,6 +1790,11 @@ public Builder setMessages(int index, java.lang.String value) { /** * repeated string messages = 2; + * + * @param value + * the new message + * + * @return builder to add the message */ public Builder addMessages(java.lang.String value) { if (value == null) { @@ -1508,6 +1808,11 @@ public Builder addMessages(java.lang.String value) { /** * repeated string messages = 2; + * + * @param values + * iterable of messages to add to the list + * + * @return builder to add all messages */ public Builder addAllMessages(java.lang.Iterable values) { ensureMessagesIsMutable(); @@ -1518,6 +1823,8 @@ public Builder addAllMessages(java.lang.Iterable values) { /** * repeated string messages = 2; + * + * @return builder to clear messages */ public Builder clearMessages() { messages_ = com.google.protobuf.LazyStringArrayList.EMPTY; @@ -1528,6 +1835,11 @@ public Builder clearMessages() { /** * repeated string messages = 2; + * + * @param value + * message byte string + * + * @return builder to add message byte string */ public Builder addMessagesBytes(com.google.protobuf.ByteString value) { if (value == null) { @@ -1551,6 +1863,8 @@ private void ensureExceptionsIsMutable() { /** * repeated .datawave.webservice.query.exception.QueryExceptionType exceptions = 3; + * + * @return list of exceptions */ public java.util.List getExceptionsList() { return java.util.Collections.unmodifiableList(exceptions_); @@ -1558,6 +1872,8 @@ public java.util.Listrepeated .datawave.webservice.query.exception.QueryExceptionType exceptions = 3; + * + * @return the size of the exceptions list */ public int getExceptionsCount() { return exceptions_.size(); @@ -1565,6 +1881,11 @@ public int getExceptionsCount() { /** * repeated .datawave.webservice.query.exception.QueryExceptionType exceptions = 3; + * + * @param index + * the index + * + * @return the exception at the provided index */ public datawave.webservice.query.exception.ExceptionMessages.QueryExceptionType getExceptions(int index) { return exceptions_.get(index); @@ -1572,6 +1893,13 @@ public datawave.webservice.query.exception.ExceptionMessages.QueryExceptionType /** * repeated .datawave.webservice.query.exception.QueryExceptionType exceptions = 3; + * + * @param index + * the index + * @param value + * the exception to add + * + * @return builder to set the exception at the provided index */ public Builder setExceptions(int index, datawave.webservice.query.exception.ExceptionMessages.QueryExceptionType value) { if (value == null) { @@ -1585,6 +1913,13 @@ public Builder setExceptions(int index, datawave.webservice.query.exception.Exce /** * repeated .datawave.webservice.query.exception.QueryExceptionType exceptions = 3; + * + * @param index + * the index + * @param builderForValue + * builder with the value for the exception to add + * + * @return builder to set exception to provided builder */ public Builder setExceptions(int index, datawave.webservice.query.exception.ExceptionMessages.QueryExceptionType.Builder builderForValue) { ensureExceptionsIsMutable(); @@ -1595,6 +1930,11 @@ public Builder setExceptions(int index, datawave.webservice.query.exception.Exce /** * repeated .datawave.webservice.query.exception.QueryExceptionType exceptions = 3; + * + * @param value + * exception to add + * + * @return builder to add exceptions provided */ public Builder addExceptions(datawave.webservice.query.exception.ExceptionMessages.QueryExceptionType value) { if (value == null) { @@ -1608,6 +1948,13 @@ public Builder addExceptions(datawave.webservice.query.exception.ExceptionMessag /** * repeated .datawave.webservice.query.exception.QueryExceptionType exceptions = 3; + * + * @param index + * the index + * @param value + * the exception to add + * + * @return builder to add exception to provided index */ public Builder addExceptions(int index, datawave.webservice.query.exception.ExceptionMessages.QueryExceptionType value) { if (value == null) { @@ -1621,6 +1968,11 @@ public Builder addExceptions(int index, datawave.webservice.query.exception.Exce /** * repeated .datawave.webservice.query.exception.QueryExceptionType exceptions = 3; + * + * @param builderForValue + * builder for the exception + * + * @return builder to add exception provided */ public Builder addExceptions(datawave.webservice.query.exception.ExceptionMessages.QueryExceptionType.Builder builderForValue) { ensureExceptionsIsMutable(); @@ -1631,6 +1983,13 @@ public Builder addExceptions(datawave.webservice.query.exception.ExceptionMessag /** * repeated .datawave.webservice.query.exception.QueryExceptionType exceptions = 3; + * + * @param index + * the index + * @param builderForValue + * builder to create exception + * + * @return builder to add exception from builder to provided index */ public Builder addExceptions(int index, datawave.webservice.query.exception.ExceptionMessages.QueryExceptionType.Builder builderForValue) { ensureExceptionsIsMutable(); @@ -1641,6 +2000,11 @@ public Builder addExceptions(int index, datawave.webservice.query.exception.Exce /** * repeated .datawave.webservice.query.exception.QueryExceptionType exceptions = 3; + * + * @param values + * iterable of exceptions to add + * + * @return builder to add provided exceptions */ public Builder addAllExceptions(java.lang.Iterable values) { ensureExceptionsIsMutable(); @@ -1651,6 +2015,8 @@ public Builder addAllExceptions(java.lang.Iterablerepeated .datawave.webservice.query.exception.QueryExceptionType exceptions = 3; + * + * @return builder to clear exceptions */ public Builder clearExceptions() { exceptions_ = java.util.Collections.emptyList(); @@ -1661,6 +2027,11 @@ public Builder clearExceptions() { /** * repeated .datawave.webservice.query.exception.QueryExceptionType exceptions = 3; + * + * @param index + * the index + * + * @return builder to remove the exception at the provided index */ public Builder removeExceptions(int index) { ensureExceptionsIsMutable(); @@ -1681,6 +2052,8 @@ private void ensureQueryIsMutable() { /** * repeated .datawave.webservice.query.QueryImpl query = 4; + * + * @return the query list */ public java.util.List getQueryList() { return java.util.Collections.unmodifiableList(query_); @@ -1688,6 +2061,8 @@ public java.util.List getQuer /** * repeated .datawave.webservice.query.QueryImpl query = 4; + * + * @return the number of queries in the list */ public int getQueryCount() { return query_.size(); @@ -1695,6 +2070,11 @@ public int getQueryCount() { /** * repeated .datawave.webservice.query.QueryImpl query = 4; + * + * @param index + * the index + * + * @return return the query at the provided index */ public datawave.webservice.query.QueryMessages.QueryImpl getQuery(int index) { return query_.get(index); @@ -1702,6 +2082,13 @@ public datawave.webservice.query.QueryMessages.QueryImpl getQuery(int index) { /** * repeated .datawave.webservice.query.QueryImpl query = 4; + * + * @param index + * the index + * @param value + * the query to set + * + * @return builder to set the query at the provided index to the provided value */ public Builder setQuery(int index, datawave.webservice.query.QueryMessages.QueryImpl value) { if (value == null) { @@ -1715,6 +2102,13 @@ public Builder setQuery(int index, datawave.webservice.query.QueryMessages.Query /** * repeated .datawave.webservice.query.QueryImpl query = 4; + * + * @param index + * the index + * @param builderForValue + * the builder for the value + * + * @return builder to set query at the provided index */ public Builder setQuery(int index, datawave.webservice.query.QueryMessages.QueryImpl.Builder builderForValue) { ensureQueryIsMutable(); @@ -1725,6 +2119,11 @@ public Builder setQuery(int index, datawave.webservice.query.QueryMessages.Query /** * repeated .datawave.webservice.query.QueryImpl query = 4; + * + * @param value + * the query to add + * + * @return builder to add the query value */ public Builder addQuery(datawave.webservice.query.QueryMessages.QueryImpl value) { if (value == null) { @@ -1738,6 +2137,13 @@ public Builder addQuery(datawave.webservice.query.QueryMessages.QueryImpl value) /** * repeated .datawave.webservice.query.QueryImpl query = 4; + * + * @param index + * the index + * @param value + * the query to add + * + * @return builder to add query to provided index */ public Builder addQuery(int index, datawave.webservice.query.QueryMessages.QueryImpl value) { if (value == null) { @@ -1751,6 +2157,11 @@ public Builder addQuery(int index, datawave.webservice.query.QueryMessages.Query /** * repeated .datawave.webservice.query.QueryImpl query = 4; + * + * @param builderForValue + * builder with the query to add + * + * @return builder to add query */ public Builder addQuery(datawave.webservice.query.QueryMessages.QueryImpl.Builder builderForValue) { ensureQueryIsMutable(); @@ -1761,6 +2172,13 @@ public Builder addQuery(datawave.webservice.query.QueryMessages.QueryImpl.Builde /** * repeated .datawave.webservice.query.QueryImpl query = 4; + * + * @param index + * the index + * @param builderForValue + * query to add + * + * @return builder to add query to provided index */ public Builder addQuery(int index, datawave.webservice.query.QueryMessages.QueryImpl.Builder builderForValue) { ensureQueryIsMutable(); @@ -1771,6 +2189,11 @@ public Builder addQuery(int index, datawave.webservice.query.QueryMessages.Query /** * repeated .datawave.webservice.query.QueryImpl query = 4; + * + * @param values + * iterable of queries to add + * + * @return builder to add all provided queries */ public Builder addAllQuery(java.lang.Iterable values) { ensureQueryIsMutable(); @@ -1781,6 +2204,8 @@ public Builder addAllQuery(java.lang.Iterablerepeated .datawave.webservice.query.QueryImpl query = 4; + * + * @return builder to clear the query list */ public Builder clearQuery() { query_ = java.util.Collections.emptyList(); @@ -1791,6 +2216,11 @@ public Builder clearQuery() { /** * repeated .datawave.webservice.query.QueryImpl query = 4; + * + * @param index + * the index + * + * @return builder to remove the query at the provided index */ public Builder removeQuery(int index) { ensureQueryIsMutable(); @@ -1804,6 +2234,8 @@ public Builder removeQuery(int index) { /** * optional uint32 numResults = 5 [default = 0]; + * + * @return if it has a number of results */ public boolean hasNumResults() { return ((bitField0_ & 0x00000010) == 0x00000010); @@ -1811,6 +2243,8 @@ public boolean hasNumResults() { /** * optional uint32 numResults = 5 [default = 0]; + * + * @return the number of results */ public int getNumResults() { return numResults_; @@ -1818,6 +2252,11 @@ public int getNumResults() { /** * optional uint32 numResults = 5 [default = 0]; + * + * @param value + * new number of results + * + * @return builder to set the number of results */ public Builder setNumResults(int value) { bitField0_ |= 0x00000010; @@ -1828,6 +2267,8 @@ public Builder setNumResults(int value) { /** * optional uint32 numResults = 5 [default = 0]; + * + * @return builder to clear the number of results */ public Builder clearNumResults() { bitField0_ = (bitField0_ & ~0x00000010); @@ -1852,157 +2293,225 @@ public interface GenericResponseOrBuilder extends com.google.protobuf.MessageLit // required uint64 operation_time_ms = 1 [default = 0]; /** * required uint64 operation_time_ms = 1 [default = 0]; + * + * @return if it has operation time */ boolean hasOperationTimeMs(); /** * required uint64 operation_time_ms = 1 [default = 0]; + * + * @return the operation time in ms */ long getOperationTimeMs(); // repeated string messages = 2; + /** * repeated string messages = 2; + * + * @return list of messages */ java.util.List getMessagesList(); /** * repeated string messages = 2; + * + * @return the number of messages */ int getMessagesCount(); /** * repeated string messages = 2; + * + * @param index + * the index + * + * @return the message at the provided index */ java.lang.String getMessages(int index); /** * repeated string messages = 2; + * + * @param index + * the index + * + * @return the byte string for the message at the provided index */ com.google.protobuf.ByteString getMessagesBytes(int index); // repeated .datawave.webservice.query.exception.QueryExceptionType exceptions = 3; /** * repeated .datawave.webservice.query.exception.QueryExceptionType exceptions = 3; + * + * @return the list of exceptions */ java.util.List getExceptionsList(); /** * repeated .datawave.webservice.query.exception.QueryExceptionType exceptions = 3; + * + * @param index + * the index + * + * @return the exception at the provided index */ datawave.webservice.query.exception.ExceptionMessages.QueryExceptionType getExceptions(int index); /** * repeated .datawave.webservice.query.exception.QueryExceptionType exceptions = 3; + * + * @return the number of exceptions */ int getExceptionsCount(); // required string result_class_name = 4; /** * required string result_class_name = 4; + * + * @return if it has a result class name */ boolean hasResultClassName(); /** * required string result_class_name = 4; + * + * @return the result class name */ java.lang.String getResultClassName(); /** * required string result_class_name = 4; + * + * @return the result class name byte string */ com.google.protobuf.ByteString getResultClassNameBytes(); // optional string result_as_string = 5; /** * optional string result_as_string = 5; + * + * @return if it has result as a string */ boolean hasResultAsString(); /** * optional string result_as_string = 5; + * + * @return the results as a string */ java.lang.String getResultAsString(); /** * optional string result_as_string = 5; + * + * @return the result as a byte string */ com.google.protobuf.ByteString getResultAsStringBytes(); // optional bool result_as_boolean = 6; /** * optional bool result_as_boolean = 6; + * + * @return if it has the result as boolean */ boolean hasResultAsBoolean(); /** * optional bool result_as_boolean = 6; + * + * @return the result as a boolean */ boolean getResultAsBoolean(); // optional sint32 result_as_int = 7; /** * optional sint32 result_as_int = 7; + * + * @return if it has result as integer */ boolean hasResultAsInt(); /** * optional sint32 result_as_int = 7; + * + * @return the result as an integer */ int getResultAsInt(); // optional sint64 result_as_long = 8; /** * optional sint64 result_as_long = 8; + * + * @return if it has result as a long */ boolean hasResultAsLong(); /** * optional sint64 result_as_long = 8; + * + * @return the result as a long */ long getResultAsLong(); // optional float result_as_float = 9; /** * optional float result_as_float = 9; + * + * @return if it has the result as a float */ boolean hasResultAsFloat(); /** * optional float result_as_float = 9; + * + * @return the result as a float */ float getResultAsFloat(); // optional double result_as_double = 10; /** * optional double result_as_double = 10; + * + * @return if it has the result as a double */ boolean hasResultAsDouble(); /** * optional double result_as_double = 10; + * + * @return the result as a double */ double getResultAsDouble(); // optional bytes result_as_bytes = 11; /** * optional bytes result_as_bytes = 11; + * + * @return if it has result as bytes */ boolean hasResultAsBytes(); /** * optional bytes result_as_bytes = 11; + * + * @return the result as bytes */ com.google.protobuf.ByteString getResultAsBytes(); // optional .datawave.webservice.results.cached.result.Description result_as_description = 12; /** * optional .datawave.webservice.results.cached.result.Description result_as_description = 12; + * + * @return if the result is a description */ boolean hasResultAsDescription(); /** * optional .datawave.webservice.results.cached.result.Description result_as_description = 12; + * + * @return the result as a description */ datawave.webservice.results.cached.result.CachedresultMessages.Description getResultAsDescription(); } @@ -2159,6 +2668,8 @@ public com.google.protobuf.Parser getParserForType() { /** * required uint64 operation_time_ms = 1 [default = 0]; + * + * @return if it has an operation time */ public boolean hasOperationTimeMs() { return ((bitField0_ & 0x00000001) == 0x00000001); @@ -2166,6 +2677,8 @@ public boolean hasOperationTimeMs() { /** * required uint64 operation_time_ms = 1 [default = 0]; + * + * @return the operation time in ms */ public long getOperationTimeMs() { return operationTimeMs_; @@ -2177,6 +2690,8 @@ public long getOperationTimeMs() { /** * repeated string messages = 2; + * + * @return the message list */ public java.util.List getMessagesList() { return messages_; @@ -2184,6 +2699,8 @@ public java.util.List getMessagesList() { /** * repeated string messages = 2; + * + * @return the number of messages in the lsit */ public int getMessagesCount() { return messages_.size(); @@ -2191,6 +2708,11 @@ public int getMessagesCount() { /** * repeated string messages = 2; + * + * @param index + * the index + * + * @return the message at the provided index */ public java.lang.String getMessages(int index) { return messages_.get(index); @@ -2198,6 +2720,11 @@ public java.lang.String getMessages(int index) { /** * repeated string messages = 2; + * + * @param index + * the index + * + * @return message as byte string at the provided index */ public com.google.protobuf.ByteString getMessagesBytes(int index) { return messages_.getByteString(index); @@ -2209,6 +2736,8 @@ public com.google.protobuf.ByteString getMessagesBytes(int index) { /** * repeated .datawave.webservice.query.exception.QueryExceptionType exceptions = 3; + * + * @return the exceptions list */ public java.util.List getExceptionsList() { return exceptions_; @@ -2216,6 +2745,8 @@ public java.util.Listrepeated .datawave.webservice.query.exception.QueryExceptionType exceptions = 3; + * + * @return the exceptions list */ public java.util.List getExceptionsOrBuilderList() { return exceptions_; @@ -2223,6 +2754,8 @@ public java.util.Listrepeated .datawave.webservice.query.exception.QueryExceptionType exceptions = 3; + * + * @return the number of exceptions in the list */ public int getExceptionsCount() { return exceptions_.size(); @@ -2230,6 +2763,11 @@ public int getExceptionsCount() { /** * repeated .datawave.webservice.query.exception.QueryExceptionType exceptions = 3; + * + * @param index + * the index + * + * @return the exception at the provided index */ public datawave.webservice.query.exception.ExceptionMessages.QueryExceptionType getExceptions(int index) { return exceptions_.get(index); @@ -2237,6 +2775,11 @@ public datawave.webservice.query.exception.ExceptionMessages.QueryExceptionType /** * repeated .datawave.webservice.query.exception.QueryExceptionType exceptions = 3; + * + * @param index + * the index + * + * @return the exception at the index provided */ public datawave.webservice.query.exception.ExceptionMessages.QueryExceptionTypeOrBuilder getExceptionsOrBuilder(int index) { return exceptions_.get(index); @@ -2248,6 +2791,8 @@ public datawave.webservice.query.exception.ExceptionMessages.QueryExceptionTypeO /** * required string result_class_name = 4; + * + * @return if it has a result class name */ public boolean hasResultClassName() { return ((bitField0_ & 0x00000002) == 0x00000002); @@ -2255,6 +2800,8 @@ public boolean hasResultClassName() { /** * required string result_class_name = 4; + * + * @return the result class name */ public java.lang.String getResultClassName() { java.lang.Object ref = resultClassName_; @@ -2272,6 +2819,8 @@ public java.lang.String getResultClassName() { /** * required string result_class_name = 4; + * + * @return the result class name byte string */ public com.google.protobuf.ByteString getResultClassNameBytes() { java.lang.Object ref = resultClassName_; @@ -2290,6 +2839,8 @@ public com.google.protobuf.ByteString getResultClassNameBytes() { /** * optional string result_as_string = 5; + * + * @return if it has results as a string */ public boolean hasResultAsString() { return ((bitField0_ & 0x00000004) == 0x00000004); @@ -2297,6 +2848,8 @@ public boolean hasResultAsString() { /** * optional string result_as_string = 5; + * + * @return the results as a string */ public java.lang.String getResultAsString() { java.lang.Object ref = resultAsString_; @@ -2314,6 +2867,8 @@ public java.lang.String getResultAsString() { /** * optional string result_as_string = 5; + * + * @return the result as a byte string */ public com.google.protobuf.ByteString getResultAsStringBytes() { java.lang.Object ref = resultAsString_; @@ -2332,6 +2887,8 @@ public com.google.protobuf.ByteString getResultAsStringBytes() { /** * optional bool result_as_boolean = 6; + * + * @return if it has result as a boolean */ public boolean hasResultAsBoolean() { return ((bitField0_ & 0x00000008) == 0x00000008); @@ -2339,6 +2896,8 @@ public boolean hasResultAsBoolean() { /** * optional bool result_as_boolean = 6; + * + * @return the result as a boolean */ public boolean getResultAsBoolean() { return resultAsBoolean_; @@ -2350,6 +2909,8 @@ public boolean getResultAsBoolean() { /** * optional sint32 result_as_int = 7; + * + * @return if it has result as an integer */ public boolean hasResultAsInt() { return ((bitField0_ & 0x00000010) == 0x00000010); @@ -2357,6 +2918,8 @@ public boolean hasResultAsInt() { /** * optional sint32 result_as_int = 7; + * + * @return result as an integer */ public int getResultAsInt() { return resultAsInt_; @@ -2368,6 +2931,8 @@ public int getResultAsInt() { /** * optional sint64 result_as_long = 8; + * + * @return if it has result as a long */ public boolean hasResultAsLong() { return ((bitField0_ & 0x00000020) == 0x00000020); @@ -2375,6 +2940,8 @@ public boolean hasResultAsLong() { /** * optional sint64 result_as_long = 8; + * + * @return the result as a long */ public long getResultAsLong() { return resultAsLong_; @@ -2386,6 +2953,8 @@ public long getResultAsLong() { /** * optional float result_as_float = 9; + * + * @return if it has result as a float */ public boolean hasResultAsFloat() { return ((bitField0_ & 0x00000040) == 0x00000040); @@ -2393,6 +2962,8 @@ public boolean hasResultAsFloat() { /** * optional float result_as_float = 9; + * + * @return the result as a float */ public float getResultAsFloat() { return resultAsFloat_; @@ -2404,6 +2975,8 @@ public float getResultAsFloat() { /** * optional double result_as_double = 10; + * + * @return if it has a result as a double */ public boolean hasResultAsDouble() { return ((bitField0_ & 0x00000080) == 0x00000080); @@ -2411,6 +2984,8 @@ public boolean hasResultAsDouble() { /** * optional double result_as_double = 10; + * + * @return the result as a double */ public double getResultAsDouble() { return resultAsDouble_; @@ -2422,6 +2997,8 @@ public double getResultAsDouble() { /** * optional bytes result_as_bytes = 11; + * + * @return if it has result as a byte string */ public boolean hasResultAsBytes() { return ((bitField0_ & 0x00000100) == 0x00000100); @@ -2429,6 +3006,8 @@ public boolean hasResultAsBytes() { /** * optional bytes result_as_bytes = 11; + * + * @return the result as a byte string */ public com.google.protobuf.ByteString getResultAsBytes() { return resultAsBytes_; @@ -2440,6 +3019,8 @@ public com.google.protobuf.ByteString getResultAsBytes() { /** * optional .datawave.webservice.results.cached.result.Description result_as_description = 12; + * + * @return if iut has the result as a description */ public boolean hasResultAsDescription() { return ((bitField0_ & 0x00000200) == 0x00000200); @@ -2447,6 +3028,8 @@ public boolean hasResultAsDescription() { /** * optional .datawave.webservice.results.cached.result.Description result_as_description = 12; + * + * @return the result as a description */ public datawave.webservice.results.cached.result.CachedresultMessages.Description getResultAsDescription() { return resultAsDescription_; @@ -2877,6 +3460,8 @@ public Builder mergeFrom(com.google.protobuf.CodedInputStream input, com.google. /** * required uint64 operation_time_ms = 1 [default = 0]; + * + * @return if it has operation time */ public boolean hasOperationTimeMs() { return ((bitField0_ & 0x00000001) == 0x00000001); @@ -2884,6 +3469,8 @@ public boolean hasOperationTimeMs() { /** * required uint64 operation_time_ms = 1 [default = 0]; + * + * @return the operation time in ms */ public long getOperationTimeMs() { return operationTimeMs_; @@ -2891,6 +3478,11 @@ public long getOperationTimeMs() { /** * required uint64 operation_time_ms = 1 [default = 0]; + * + * @param value + * the new operation time in ms + * + * @return builder to set operation time in ms */ public Builder setOperationTimeMs(long value) { bitField0_ |= 0x00000001; @@ -2901,6 +3493,8 @@ public Builder setOperationTimeMs(long value) { /** * required uint64 operation_time_ms = 1 [default = 0]; + * + * @return builder to clear operation time */ public Builder clearOperationTimeMs() { bitField0_ = (bitField0_ & ~0x00000001); @@ -2921,6 +3515,8 @@ private void ensureMessagesIsMutable() { /** * repeated string messages = 2; + * + * @return the message list */ public java.util.List getMessagesList() { return java.util.Collections.unmodifiableList(messages_); @@ -2928,6 +3524,8 @@ public java.util.List getMessagesList() { /** * repeated string messages = 2; + * + * @return the messages count */ public int getMessagesCount() { return messages_.size(); @@ -2935,6 +3533,11 @@ public int getMessagesCount() { /** * repeated string messages = 2; + * + * @param index + * the index + * + * @return the message at the provided index */ public java.lang.String getMessages(int index) { return messages_.get(index); @@ -2942,6 +3545,11 @@ public java.lang.String getMessages(int index) { /** * repeated string messages = 2; + * + * @param index + * the index + * + * @return the message byte string for the provided index */ public com.google.protobuf.ByteString getMessagesBytes(int index) { return messages_.getByteString(index); @@ -2949,6 +3557,13 @@ public com.google.protobuf.ByteString getMessagesBytes(int index) { /** * repeated string messages = 2; + * + * @param index + * the index + * @param value + * the message to add + * + * @return builder to set the message at the provided index */ public Builder setMessages(int index, java.lang.String value) { if (value == null) { @@ -2962,6 +3577,11 @@ public Builder setMessages(int index, java.lang.String value) { /** * repeated string messages = 2; + * + * @param value + * the new message + * + * @return builder to add the provided value */ public Builder addMessages(java.lang.String value) { if (value == null) { @@ -2975,6 +3595,11 @@ public Builder addMessages(java.lang.String value) { /** * repeated string messages = 2; + * + * @param values + * iterable of messages to add + * + * @return builder to add the values provided */ public Builder addAllMessages(java.lang.Iterable values) { ensureMessagesIsMutable(); @@ -2985,6 +3610,8 @@ public Builder addAllMessages(java.lang.Iterable values) { /** * repeated string messages = 2; + * + * @return builder to clear messages */ public Builder clearMessages() { messages_ = com.google.protobuf.LazyStringArrayList.EMPTY; @@ -2995,6 +3622,11 @@ public Builder clearMessages() { /** * repeated string messages = 2; + * + * @param value + * message byte string + * + * @return builder to add message byte string */ public Builder addMessagesBytes(com.google.protobuf.ByteString value) { if (value == null) { @@ -3018,6 +3650,8 @@ private void ensureExceptionsIsMutable() { /** * repeated .datawave.webservice.query.exception.QueryExceptionType exceptions = 3; + * + * @return the list of exceptions */ public java.util.List getExceptionsList() { return java.util.Collections.unmodifiableList(exceptions_); @@ -3025,6 +3659,8 @@ public java.util.Listrepeated .datawave.webservice.query.exception.QueryExceptionType exceptions = 3; + * + * @return the number of exceptions */ public int getExceptionsCount() { return exceptions_.size(); @@ -3032,6 +3668,11 @@ public int getExceptionsCount() { /** * repeated .datawave.webservice.query.exception.QueryExceptionType exceptions = 3; + * + * @param index + * the index + * + * @return the exception at the provided index */ public datawave.webservice.query.exception.ExceptionMessages.QueryExceptionType getExceptions(int index) { return exceptions_.get(index); @@ -3039,6 +3680,13 @@ public datawave.webservice.query.exception.ExceptionMessages.QueryExceptionType /** * repeated .datawave.webservice.query.exception.QueryExceptionType exceptions = 3; + * + * @param index + * the index + * @param value + * the new exception to add + * + * @return builder to set exception at index to provided value */ public Builder setExceptions(int index, datawave.webservice.query.exception.ExceptionMessages.QueryExceptionType value) { if (value == null) { @@ -3052,6 +3700,13 @@ public Builder setExceptions(int index, datawave.webservice.query.exception.Exce /** * repeated .datawave.webservice.query.exception.QueryExceptionType exceptions = 3; + * + * @param index + * the index + * @param builderForValue + * builder for the value + * + * @return builder to set exception at provided index */ public Builder setExceptions(int index, datawave.webservice.query.exception.ExceptionMessages.QueryExceptionType.Builder builderForValue) { ensureExceptionsIsMutable(); @@ -3062,6 +3717,11 @@ public Builder setExceptions(int index, datawave.webservice.query.exception.Exce /** * repeated .datawave.webservice.query.exception.QueryExceptionType exceptions = 3; + * + * @param value + * the exception to add + * + * @return builder to add exception */ public Builder addExceptions(datawave.webservice.query.exception.ExceptionMessages.QueryExceptionType value) { if (value == null) { @@ -3075,6 +3735,13 @@ public Builder addExceptions(datawave.webservice.query.exception.ExceptionMessag /** * repeated .datawave.webservice.query.exception.QueryExceptionType exceptions = 3; + * + * @param index + * the index + * @param value + * the exception to add + * + * @return builder to add exception to the list */ public Builder addExceptions(int index, datawave.webservice.query.exception.ExceptionMessages.QueryExceptionType value) { if (value == null) { @@ -3088,6 +3755,11 @@ public Builder addExceptions(int index, datawave.webservice.query.exception.Exce /** * repeated .datawave.webservice.query.exception.QueryExceptionType exceptions = 3; + * + * @param builderForValue + * builder to create exception to add + * + * @return builder to add exception */ public Builder addExceptions(datawave.webservice.query.exception.ExceptionMessages.QueryExceptionType.Builder builderForValue) { ensureExceptionsIsMutable(); @@ -3098,6 +3770,13 @@ public Builder addExceptions(datawave.webservice.query.exception.ExceptionMessag /** * repeated .datawave.webservice.query.exception.QueryExceptionType exceptions = 3; + * + * @param index + * the index + * @param builderForValue + * builder to create exception + * + * @return builder to add exception at provided index */ public Builder addExceptions(int index, datawave.webservice.query.exception.ExceptionMessages.QueryExceptionType.Builder builderForValue) { ensureExceptionsIsMutable(); @@ -3108,6 +3787,11 @@ public Builder addExceptions(int index, datawave.webservice.query.exception.Exce /** * repeated .datawave.webservice.query.exception.QueryExceptionType exceptions = 3; + * + * @param values + * iterable of exceptions to add + * + * @return builder to add exceptions */ public Builder addAllExceptions(java.lang.Iterable values) { ensureExceptionsIsMutable(); @@ -3118,6 +3802,8 @@ public Builder addAllExceptions(java.lang.Iterablerepeated .datawave.webservice.query.exception.QueryExceptionType exceptions = 3; + * + * @return builder to clear exceptions */ public Builder clearExceptions() { exceptions_ = java.util.Collections.emptyList(); @@ -3128,6 +3814,11 @@ public Builder clearExceptions() { /** * repeated .datawave.webservice.query.exception.QueryExceptionType exceptions = 3; + * + * @param index + * the index + * + * @return builder to remove exception at given index */ public Builder removeExceptions(int index) { ensureExceptionsIsMutable(); @@ -3141,6 +3832,8 @@ public Builder removeExceptions(int index) { /** * required string result_class_name = 4; + * + * @return if it has result class name */ public boolean hasResultClassName() { return ((bitField0_ & 0x00000008) == 0x00000008); @@ -3148,6 +3841,8 @@ public boolean hasResultClassName() { /** * required string result_class_name = 4; + * + * @return the result class name */ public java.lang.String getResultClassName() { java.lang.Object ref = resultClassName_; @@ -3162,6 +3857,8 @@ public java.lang.String getResultClassName() { /** * required string result_class_name = 4; + * + * @return result class name byte string */ public com.google.protobuf.ByteString getResultClassNameBytes() { java.lang.Object ref = resultClassName_; @@ -3176,6 +3873,11 @@ public com.google.protobuf.ByteString getResultClassNameBytes() { /** * required string result_class_name = 4; + * + * @param value + * the new result class name + * + * @return builder to set result class name */ public Builder setResultClassName(java.lang.String value) { if (value == null) { @@ -3189,6 +3891,8 @@ public Builder setResultClassName(java.lang.String value) { /** * required string result_class_name = 4; + * + * @return builder to clear the result class name */ public Builder clearResultClassName() { bitField0_ = (bitField0_ & ~0x00000008); @@ -3199,6 +3903,11 @@ public Builder clearResultClassName() { /** * required string result_class_name = 4; + * + * @param value + * result class name byte string + * + * @return builder to set result class name */ public Builder setResultClassNameBytes(com.google.protobuf.ByteString value) { if (value == null) { @@ -3215,6 +3924,8 @@ public Builder setResultClassNameBytes(com.google.protobuf.ByteString value) { /** * optional string result_as_string = 5; + * + * return if it has result as a string */ public boolean hasResultAsString() { return ((bitField0_ & 0x00000010) == 0x00000010); @@ -3222,6 +3933,8 @@ public boolean hasResultAsString() { /** * optional string result_as_string = 5; + * + * @return the result as a string */ public java.lang.String getResultAsString() { java.lang.Object ref = resultAsString_; @@ -3236,6 +3949,8 @@ public java.lang.String getResultAsString() { /** * optional string result_as_string = 5; + * + * @return the result as a string */ public com.google.protobuf.ByteString getResultAsStringBytes() { java.lang.Object ref = resultAsString_; @@ -3250,6 +3965,11 @@ public com.google.protobuf.ByteString getResultAsStringBytes() { /** * optional string result_as_string = 5; + * + * @param value + * new result string + * + * @return builder to set result string */ public Builder setResultAsString(java.lang.String value) { if (value == null) { @@ -3263,6 +3983,8 @@ public Builder setResultAsString(java.lang.String value) { /** * optional string result_as_string = 5; + * + * @return builder to clear string result */ public Builder clearResultAsString() { bitField0_ = (bitField0_ & ~0x00000010); @@ -3273,6 +3995,11 @@ public Builder clearResultAsString() { /** * optional string result_as_string = 5; + * + * @param value + * byte string + * + * @return builder to set result to byte string */ public Builder setResultAsStringBytes(com.google.protobuf.ByteString value) { if (value == null) { @@ -3289,6 +4016,8 @@ public Builder setResultAsStringBytes(com.google.protobuf.ByteString value) { /** * optional bool result_as_boolean = 6; + * + * @return if the result is a boolean */ public boolean hasResultAsBoolean() { return ((bitField0_ & 0x00000020) == 0x00000020); @@ -3296,6 +4025,8 @@ public boolean hasResultAsBoolean() { /** * optional bool result_as_boolean = 6; + * + * @return the result as a boolean */ public boolean getResultAsBoolean() { return resultAsBoolean_; @@ -3303,6 +4034,11 @@ public boolean getResultAsBoolean() { /** * optional bool result_as_boolean = 6; + * + * @param value + * the new boolean value + * + * @return the result as a boolean */ public Builder setResultAsBoolean(boolean value) { bitField0_ |= 0x00000020; @@ -3313,6 +4049,8 @@ public Builder setResultAsBoolean(boolean value) { /** * optional bool result_as_boolean = 6; + * + * @return builder to clear result as a boolean */ public Builder clearResultAsBoolean() { bitField0_ = (bitField0_ & ~0x00000020); @@ -3326,6 +4064,8 @@ public Builder clearResultAsBoolean() { /** * optional sint32 result_as_int = 7; + * + * @return if the result has an integer */ public boolean hasResultAsInt() { return ((bitField0_ & 0x00000040) == 0x00000040); @@ -3333,6 +4073,8 @@ public boolean hasResultAsInt() { /** * optional sint32 result_as_int = 7; + * + * @return the result as an integer */ public int getResultAsInt() { return resultAsInt_; @@ -3340,6 +4082,11 @@ public int getResultAsInt() { /** * optional sint32 result_as_int = 7; + * + * @param value + * the new int value + * + * @return builder to set result to provided value */ public Builder setResultAsInt(int value) { bitField0_ |= 0x00000040; @@ -3350,6 +4097,8 @@ public Builder setResultAsInt(int value) { /** * optional sint32 result_as_int = 7; + * + * @return builder to clear results as int */ public Builder clearResultAsInt() { bitField0_ = (bitField0_ & ~0x00000040); @@ -3363,6 +4112,8 @@ public Builder clearResultAsInt() { /** * optional sint64 result_as_long = 8; + * + * @return if it has result as a long */ public boolean hasResultAsLong() { return ((bitField0_ & 0x00000080) == 0x00000080); @@ -3370,6 +4121,8 @@ public boolean hasResultAsLong() { /** * optional sint64 result_as_long = 8; + * + * @return the result as a long */ public long getResultAsLong() { return resultAsLong_; @@ -3377,6 +4130,11 @@ public long getResultAsLong() { /** * optional sint64 result_as_long = 8; + * + * @param value + * the new long value + * + * @return builder to set result to long value provided */ public Builder setResultAsLong(long value) { bitField0_ |= 0x00000080; @@ -3387,6 +4145,8 @@ public Builder setResultAsLong(long value) { /** * optional sint64 result_as_long = 8; + * + * @return builder to clear result as long */ public Builder clearResultAsLong() { bitField0_ = (bitField0_ & ~0x00000080); @@ -3400,6 +4160,8 @@ public Builder clearResultAsLong() { /** * optional float result_as_float = 9; + * + * @return if the result is a float */ public boolean hasResultAsFloat() { return ((bitField0_ & 0x00000100) == 0x00000100); @@ -3407,6 +4169,8 @@ public boolean hasResultAsFloat() { /** * optional float result_as_float = 9; + * + * @return the result as a float */ public float getResultAsFloat() { return resultAsFloat_; @@ -3414,6 +4178,11 @@ public float getResultAsFloat() { /** * optional float result_as_float = 9; + * + * @param value + * the new float value + * + * @return builder to set result as float provided */ public Builder setResultAsFloat(float value) { bitField0_ |= 0x00000100; @@ -3424,6 +4193,8 @@ public Builder setResultAsFloat(float value) { /** * optional float result_as_float = 9; + * + * @return builder to clear results as float */ public Builder clearResultAsFloat() { bitField0_ = (bitField0_ & ~0x00000100); @@ -3437,6 +4208,8 @@ public Builder clearResultAsFloat() { /** * optional double result_as_double = 10; + * + * @return if it has result as a double */ public boolean hasResultAsDouble() { return ((bitField0_ & 0x00000200) == 0x00000200); @@ -3444,6 +4217,8 @@ public boolean hasResultAsDouble() { /** * optional double result_as_double = 10; + * + * @return results as a double */ public double getResultAsDouble() { return resultAsDouble_; @@ -3451,6 +4226,11 @@ public double getResultAsDouble() { /** * optional double result_as_double = 10; + * + * @param value + * the new double value + * + * @return builder to set result as a double */ public Builder setResultAsDouble(double value) { bitField0_ |= 0x00000200; @@ -3461,6 +4241,8 @@ public Builder setResultAsDouble(double value) { /** * optional double result_as_double = 10; + * + * @return builder to clear result as double */ public Builder clearResultAsDouble() { bitField0_ = (bitField0_ & ~0x00000200); @@ -3474,6 +4256,8 @@ public Builder clearResultAsDouble() { /** * optional bytes result_as_bytes = 11; + * + * @return if it has result as byte string */ public boolean hasResultAsBytes() { return ((bitField0_ & 0x00000400) == 0x00000400); @@ -3481,6 +4265,8 @@ public boolean hasResultAsBytes() { /** * optional bytes result_as_bytes = 11; + * + * @return the result as byte string */ public com.google.protobuf.ByteString getResultAsBytes() { return resultAsBytes_; @@ -3488,6 +4274,11 @@ public com.google.protobuf.ByteString getResultAsBytes() { /** * optional bytes result_as_bytes = 11; + * + * @param value + * new value as byte string + * + * @return builder to set result as byte string */ public Builder setResultAsBytes(com.google.protobuf.ByteString value) { if (value == null) { @@ -3501,6 +4292,8 @@ public Builder setResultAsBytes(com.google.protobuf.ByteString value) { /** * optional bytes result_as_bytes = 11; + * + * @return builder to clear rseult as byte string */ public Builder clearResultAsBytes() { bitField0_ = (bitField0_ & ~0x00000400); @@ -3515,6 +4308,8 @@ public Builder clearResultAsBytes() { /** * optional .datawave.webservice.results.cached.result.Description result_as_description = 12; + * + * @return if it has result as a description */ public boolean hasResultAsDescription() { return ((bitField0_ & 0x00000800) == 0x00000800); @@ -3522,6 +4317,8 @@ public boolean hasResultAsDescription() { /** * optional .datawave.webservice.results.cached.result.Description result_as_description = 12; + * + * @return the result as a description */ public datawave.webservice.results.cached.result.CachedresultMessages.Description getResultAsDescription() { return resultAsDescription_; @@ -3529,6 +4326,11 @@ public datawave.webservice.results.cached.result.CachedresultMessages.Descriptio /** * optional .datawave.webservice.results.cached.result.Description result_as_description = 12; + * + * @param value + * the new description value + * + * @return builder to set result as description provided */ public Builder setResultAsDescription(datawave.webservice.results.cached.result.CachedresultMessages.Description value) { if (value == null) { @@ -3542,6 +4344,11 @@ public Builder setResultAsDescription(datawave.webservice.results.cached.result. /** * optional .datawave.webservice.results.cached.result.Description result_as_description = 12; + * + * @param builderForValue + * builder to create description + * + * @return builder to set result to provided description */ public Builder setResultAsDescription(datawave.webservice.results.cached.result.CachedresultMessages.Description.Builder builderForValue) { resultAsDescription_ = builderForValue.build(); @@ -3552,6 +4359,11 @@ public Builder setResultAsDescription(datawave.webservice.results.cached.result. /** * optional .datawave.webservice.results.cached.result.Description result_as_description = 12; + * + * @param value + * the new description value + * + * @return builder to merge result as description provided */ public Builder mergeResultAsDescription(datawave.webservice.results.cached.result.CachedresultMessages.Description value) { if (((bitField0_ & 0x00000800) == 0x00000800) @@ -3568,6 +4380,8 @@ public Builder mergeResultAsDescription(datawave.webservice.results.cached.resul /** * optional .datawave.webservice.results.cached.result.Description result_as_description = 12; + * + * @return builder to clear the result as description */ public Builder clearResultAsDescription() { resultAsDescription_ = datawave.webservice.results.cached.result.CachedresultMessages.Description.getDefaultInstance(); diff --git a/web-services/client/src/main/java/datawave/webservice/results/cached/result/CachedresultMessages.java b/web-services/client/src/main/java/datawave/webservice/results/cached/result/CachedresultMessages.java index de2d63157fb..c9d3e2aa47a 100644 --- a/web-services/client/src/main/java/datawave/webservice/results/cached/result/CachedresultMessages.java +++ b/web-services/client/src/main/java/datawave/webservice/results/cached/result/CachedresultMessages.java @@ -13,16 +13,22 @@ public interface ColumnVisibilityOrBuilder extends com.google.protobuf.MessageLi // required string visibility = 1; /** * required string visibility = 1; + * + * @return if it has visibility */ boolean hasVisibility(); /** * required string visibility = 1; + * + * @return the visibility */ java.lang.String getVisibility(); /** * required string visibility = 1; + * + * @return the visibility as a byte string */ com.google.protobuf.ByteString getVisibilityBytes(); } @@ -102,6 +108,8 @@ public com.google.protobuf.Parser getParserForType() { /** * required string visibility = 1; + * + * @return if it has visibility */ public boolean hasVisibility() { return ((bitField0_ & 0x00000001) == 0x00000001); @@ -109,6 +117,8 @@ public boolean hasVisibility() { /** * required string visibility = 1; + * + * @return the visibility */ public java.lang.String getVisibility() { java.lang.Object ref = visibility_; @@ -126,6 +136,8 @@ public java.lang.String getVisibility() { /** * required string visibility = 1; + * + * @return the visibility as a byte string */ public com.google.protobuf.ByteString getVisibilityBytes() { java.lang.Object ref = visibility_; @@ -347,6 +359,8 @@ public Builder mergeFrom(com.google.protobuf.CodedInputStream input, com.google. /** * required string visibility = 1; + * + * @return if it has visibility */ public boolean hasVisibility() { return ((bitField0_ & 0x00000001) == 0x00000001); @@ -354,6 +368,8 @@ public boolean hasVisibility() { /** * required string visibility = 1; + * + * @return the visibility */ public java.lang.String getVisibility() { java.lang.Object ref = visibility_; @@ -368,6 +384,8 @@ public java.lang.String getVisibility() { /** * required string visibility = 1; + * + * @return the visibility byte string */ public com.google.protobuf.ByteString getVisibilityBytes() { java.lang.Object ref = visibility_; @@ -382,6 +400,11 @@ public com.google.protobuf.ByteString getVisibilityBytes() { /** * required string visibility = 1; + * + * @param value + * the new visibility value + * + * @return builder to set visibility to the provided value */ public Builder setVisibility(java.lang.String value) { if (value == null) { @@ -395,6 +418,8 @@ public Builder setVisibility(java.lang.String value) { /** * required string visibility = 1; + * + * @return builder to clear visibility */ public Builder clearVisibility() { bitField0_ = (bitField0_ & ~0x00000001); @@ -405,6 +430,11 @@ public Builder clearVisibility() { /** * required string visibility = 1; + * + * @param value + * visibility byte string + * + * @return builder to set the visibility bytes */ public Builder setVisibilityBytes(com.google.protobuf.ByteString value) { if (value == null) { @@ -432,16 +462,22 @@ public interface AttributeOrBuilder extends com.google.protobuf.MessageLiteOrBui // required string name = 1; /** * required string name = 1; + * + * @return if it has a name */ boolean hasName(); /** * required string name = 1; + * + * @return the name */ java.lang.String getName(); /** * required string name = 1; + * + * @return the name as a byte string */ com.google.protobuf.ByteString getNameBytes(); } @@ -521,6 +557,8 @@ public com.google.protobuf.Parser getParserForType() { /** * required string name = 1; + * + * @return if it has a name */ public boolean hasName() { return ((bitField0_ & 0x00000001) == 0x00000001); @@ -528,6 +566,8 @@ public boolean hasName() { /** * required string name = 1; + * + * @return the name */ public java.lang.String getName() { java.lang.Object ref = name_; @@ -545,6 +585,8 @@ public java.lang.String getName() { /** * required string name = 1; + * + * @return the name as a byte string */ public com.google.protobuf.ByteString getNameBytes() { java.lang.Object ref = name_; @@ -765,6 +807,8 @@ public Builder mergeFrom(com.google.protobuf.CodedInputStream input, com.google. /** * required string name = 1; + * + * @return if it has a name */ public boolean hasName() { return ((bitField0_ & 0x00000001) == 0x00000001); @@ -772,6 +816,8 @@ public boolean hasName() { /** * required string name = 1; + * + * @return the name */ public java.lang.String getName() { java.lang.Object ref = name_; @@ -786,6 +832,8 @@ public java.lang.String getName() { /** * required string name = 1; + * + * @return the name as a byte string */ public com.google.protobuf.ByteString getNameBytes() { java.lang.Object ref = name_; @@ -800,6 +848,11 @@ public com.google.protobuf.ByteString getNameBytes() { /** * required string name = 1; + * + * @param value + * the new name value + * + * @return builder to set the name */ public Builder setName(java.lang.String value) { if (value == null) { @@ -813,6 +866,8 @@ public Builder setName(java.lang.String value) { /** * required string name = 1; + * + * @return builder to clear the name */ public Builder clearName() { bitField0_ = (bitField0_ & ~0x00000001); @@ -823,6 +878,11 @@ public Builder clearName() { /** * required string name = 1; + * + * @param value + * the name byte string + * + * @return builder to set the name to the provided byte string */ public Builder setNameBytes(com.google.protobuf.ByteString value) { if (value == null) { @@ -850,48 +910,73 @@ public interface DescriptionOrBuilder extends com.google.protobuf.MessageLiteOrB // required string view = 1; /** * required string view = 1; + * + * @return if it has a view */ boolean hasView(); /** * required string view = 1; + * + * @return the view */ java.lang.String getView(); /** * required string view = 1; + * + * @return the view as a byte string + * */ com.google.protobuf.ByteString getViewBytes(); // repeated .datawave.webservice.results.cached.result.Attribute attributes = 2; /** * repeated .datawave.webservice.results.cached.result.Attribute attributes = 2; + * + * @return list of attributes */ java.util.List getAttributesList(); /** * repeated .datawave.webservice.results.cached.result.Attribute attributes = 2; + * + * @param index + * the index + * + * @return the attribute at the provided index */ datawave.webservice.results.cached.result.CachedresultMessages.Attribute getAttributes(int index); /** * repeated .datawave.webservice.results.cached.result.Attribute attributes = 2; + * + * @return the number of attributes */ int getAttributesCount(); // repeated .datawave.webservice.results.cached.result.ColumnVisibility visibilities = 3; /** * repeated .datawave.webservice.results.cached.result.ColumnVisibility visibilities = 3; + * + * @return the visibility list */ java.util.List getVisibilitiesList(); /** * repeated .datawave.webservice.results.cached.result.ColumnVisibility visibilities = 3; + * + * @param index + * the index + * + * @return the visibility at the provided index */ datawave.webservice.results.cached.result.CachedresultMessages.ColumnVisibility getVisibilities(int index); /** * repeated .datawave.webservice.results.cached.result.ColumnVisibility visibilities = 3; + * + * @return the number of visibility */ int getVisibilitiesCount(); } @@ -995,6 +1080,8 @@ public com.google.protobuf.Parser getParserForType() { /** * required string view = 1; + * + * @return if there is a view */ public boolean hasView() { return ((bitField0_ & 0x00000001) == 0x00000001); @@ -1002,6 +1089,8 @@ public boolean hasView() { /** * required string view = 1; + * + * @return the view */ public java.lang.String getView() { java.lang.Object ref = view_; @@ -1019,6 +1108,8 @@ public java.lang.String getView() { /** * required string view = 1; + * + * @return the view as a byte string */ public com.google.protobuf.ByteString getViewBytes() { java.lang.Object ref = view_; @@ -1037,6 +1128,8 @@ public com.google.protobuf.ByteString getViewBytes() { /** * repeated .datawave.webservice.results.cached.result.Attribute attributes = 2; + * + * @return the attributes list */ public java.util.List getAttributesList() { return attributes_; @@ -1044,6 +1137,8 @@ public java.util.Listrepeated .datawave.webservice.results.cached.result.Attribute attributes = 2; + * + * @return list of attributes */ public java.util.List getAttributesOrBuilderList() { return attributes_; @@ -1051,6 +1146,8 @@ public java.util.Listrepeated .datawave.webservice.results.cached.result.Attribute attributes = 2; + * + * @return the number of attributes in the list */ public int getAttributesCount() { return attributes_.size(); @@ -1058,6 +1155,11 @@ public int getAttributesCount() { /** * repeated .datawave.webservice.results.cached.result.Attribute attributes = 2; + * + * @param index + * the index + * + * @return the attribute at the provided index */ public datawave.webservice.results.cached.result.CachedresultMessages.Attribute getAttributes(int index) { return attributes_.get(index); @@ -1065,6 +1167,11 @@ public datawave.webservice.results.cached.result.CachedresultMessages.Attribute /** * repeated .datawave.webservice.results.cached.result.Attribute attributes = 2; + * + * @param index + * the index + * + * @return the attribute at the provided index */ public datawave.webservice.results.cached.result.CachedresultMessages.AttributeOrBuilder getAttributesOrBuilder(int index) { return attributes_.get(index); @@ -1076,6 +1183,8 @@ public datawave.webservice.results.cached.result.CachedresultMessages.AttributeO /** * repeated .datawave.webservice.results.cached.result.ColumnVisibility visibilities = 3; + * + * @return the visibility list */ public java.util.List getVisibilitiesList() { return visibilities_; @@ -1083,6 +1192,8 @@ public java.util.Listrepeated .datawave.webservice.results.cached.result.ColumnVisibility visibilities = 3; + * + * @return the visibility list */ public java.util.List getVisibilitiesOrBuilderList() { return visibilities_; @@ -1090,6 +1201,8 @@ public java.util.Listrepeated .datawave.webservice.results.cached.result.ColumnVisibility visibilities = 3; + * + * @return the number of visibilities in the list */ public int getVisibilitiesCount() { return visibilities_.size(); @@ -1097,6 +1210,11 @@ public int getVisibilitiesCount() { /** * repeated .datawave.webservice.results.cached.result.ColumnVisibility visibilities = 3; + * + * @param index + * the index + * + * @return the visibility at the provided index */ public datawave.webservice.results.cached.result.CachedresultMessages.ColumnVisibility getVisibilities(int index) { return visibilities_.get(index); @@ -1104,6 +1222,11 @@ public datawave.webservice.results.cached.result.CachedresultMessages.ColumnVisi /** * repeated .datawave.webservice.results.cached.result.ColumnVisibility visibilities = 3; + * + * @param index + * the index + * + * @return visibility at the provided index */ public datawave.webservice.results.cached.result.CachedresultMessages.ColumnVisibilityOrBuilder getVisibilitiesOrBuilder(int index) { return visibilities_.get(index); @@ -1390,6 +1513,8 @@ public Builder mergeFrom(com.google.protobuf.CodedInputStream input, com.google. /** * required string view = 1; + * + * @return if it has a view */ public boolean hasView() { return ((bitField0_ & 0x00000001) == 0x00000001); @@ -1397,6 +1522,8 @@ public boolean hasView() { /** * required string view = 1; + * + * @return the view */ public java.lang.String getView() { java.lang.Object ref = view_; @@ -1411,6 +1538,8 @@ public java.lang.String getView() { /** * required string view = 1; + * + * @return the view as a byte string */ public com.google.protobuf.ByteString getViewBytes() { java.lang.Object ref = view_; @@ -1425,6 +1554,11 @@ public com.google.protobuf.ByteString getViewBytes() { /** * required string view = 1; + * + * @param value + * the new view + * + * @return builder to set the view to the provided value */ public Builder setView(java.lang.String value) { if (value == null) { @@ -1438,6 +1572,8 @@ public Builder setView(java.lang.String value) { /** * required string view = 1; + * + * @return builder to clear the view */ public Builder clearView() { bitField0_ = (bitField0_ & ~0x00000001); @@ -1448,6 +1584,11 @@ public Builder clearView() { /** * required string view = 1; + * + * @param value + * the new view value + * + * @return builder to set view bytes to provided value */ public Builder setViewBytes(com.google.protobuf.ByteString value) { if (value == null) { @@ -1471,6 +1612,8 @@ private void ensureAttributesIsMutable() { /** * repeated .datawave.webservice.results.cached.result.Attribute attributes = 2; + * + * @return attributes list */ public java.util.List getAttributesList() { return java.util.Collections.unmodifiableList(attributes_); @@ -1478,6 +1621,8 @@ public java.util.Listrepeated .datawave.webservice.results.cached.result.Attribute attributes = 2; + * + * @return the number of attributes in the list */ public int getAttributesCount() { return attributes_.size(); @@ -1485,6 +1630,11 @@ public int getAttributesCount() { /** * repeated .datawave.webservice.results.cached.result.Attribute attributes = 2; + * + * @param index + * the index + * + * @return the attribute at the provided index */ public datawave.webservice.results.cached.result.CachedresultMessages.Attribute getAttributes(int index) { return attributes_.get(index); @@ -1492,6 +1642,13 @@ public datawave.webservice.results.cached.result.CachedresultMessages.Attribute /** * repeated .datawave.webservice.results.cached.result.Attribute attributes = 2; + * + * @param index + * the index + * @param value + * the new attribute + * + * @return builder to set the attribute at the provided index to the provided value */ public Builder setAttributes(int index, datawave.webservice.results.cached.result.CachedresultMessages.Attribute value) { if (value == null) { @@ -1505,6 +1662,13 @@ public Builder setAttributes(int index, datawave.webservice.results.cached.resul /** * repeated .datawave.webservice.results.cached.result.Attribute attributes = 2; + * + * @param index + * the index + * @param builderForValue + * the new attribute builder + * + * @return builder to set the attribute at the provided index to the provided value */ public Builder setAttributes(int index, datawave.webservice.results.cached.result.CachedresultMessages.Attribute.Builder builderForValue) { ensureAttributesIsMutable(); @@ -1515,6 +1679,11 @@ public Builder setAttributes(int index, datawave.webservice.results.cached.resul /** * repeated .datawave.webservice.results.cached.result.Attribute attributes = 2; + * + * @param value + * the new attribute + * + * @return builder to add the provided attribute */ public Builder addAttributes(datawave.webservice.results.cached.result.CachedresultMessages.Attribute value) { if (value == null) { @@ -1528,6 +1697,13 @@ public Builder addAttributes(datawave.webservice.results.cached.result.Cachedres /** * repeated .datawave.webservice.results.cached.result.Attribute attributes = 2; + * + * @param index + * the index + * @param value + * the attribute to add + * + * @return builder to add the provided attribute to the provided index */ public Builder addAttributes(int index, datawave.webservice.results.cached.result.CachedresultMessages.Attribute value) { if (value == null) { @@ -1541,6 +1717,11 @@ public Builder addAttributes(int index, datawave.webservice.results.cached.resul /** * repeated .datawave.webservice.results.cached.result.Attribute attributes = 2; + * + * @param builderForValue + * builder to create attribute + * + * @return builder to add attribute provided */ public Builder addAttributes(datawave.webservice.results.cached.result.CachedresultMessages.Attribute.Builder builderForValue) { ensureAttributesIsMutable(); @@ -1551,6 +1732,13 @@ public Builder addAttributes(datawave.webservice.results.cached.result.Cachedres /** * repeated .datawave.webservice.results.cached.result.Attribute attributes = 2; + * + * @param index + * the index + * @param builderForValue + * builder to create attribute + * + * @return builder to add attribute at provided index */ public Builder addAttributes(int index, datawave.webservice.results.cached.result.CachedresultMessages.Attribute.Builder builderForValue) { ensureAttributesIsMutable(); @@ -1561,6 +1749,11 @@ public Builder addAttributes(int index, datawave.webservice.results.cached.resul /** * repeated .datawave.webservice.results.cached.result.Attribute attributes = 2; + * + * @param values + * iterable of values to add + * + * @return builder to add iterable of attributes */ public Builder addAllAttributes(java.lang.Iterable values) { ensureAttributesIsMutable(); @@ -1571,6 +1764,8 @@ public Builder addAllAttributes(java.lang.Iterablerepeated .datawave.webservice.results.cached.result.Attribute attributes = 2; + * + * @return builder to clear attributes */ public Builder clearAttributes() { attributes_ = java.util.Collections.emptyList(); @@ -1581,6 +1776,11 @@ public Builder clearAttributes() { /** * repeated .datawave.webservice.results.cached.result.Attribute attributes = 2; + * + * @param index + * the index + * + * @return builder to remove attributes */ public Builder removeAttributes(int index) { ensureAttributesIsMutable(); @@ -1602,6 +1802,8 @@ private void ensureVisibilitiesIsMutable() { /** * repeated .datawave.webservice.results.cached.result.ColumnVisibility visibilities = 3; + * + * @return list of visibilities */ public java.util.List getVisibilitiesList() { return java.util.Collections.unmodifiableList(visibilities_); @@ -1609,6 +1811,8 @@ public java.util.Listrepeated .datawave.webservice.results.cached.result.ColumnVisibility visibilities = 3; + * + * @return the number of visibilities in the list */ public int getVisibilitiesCount() { return visibilities_.size(); @@ -1616,6 +1820,11 @@ public int getVisibilitiesCount() { /** * repeated .datawave.webservice.results.cached.result.ColumnVisibility visibilities = 3; + * + * @param index + * the index + * + * @return the visibility at the provided index */ public datawave.webservice.results.cached.result.CachedresultMessages.ColumnVisibility getVisibilities(int index) { return visibilities_.get(index); @@ -1623,6 +1832,13 @@ public datawave.webservice.results.cached.result.CachedresultMessages.ColumnVisi /** * repeated .datawave.webservice.results.cached.result.ColumnVisibility visibilities = 3; + * + * @param index + * the index + * @param value + * the visibility to add + * + * @return builder to set the visibility at the provided index to the provided value */ public Builder setVisibilities(int index, datawave.webservice.results.cached.result.CachedresultMessages.ColumnVisibility value) { if (value == null) { @@ -1636,6 +1852,13 @@ public Builder setVisibilities(int index, datawave.webservice.results.cached.res /** * repeated .datawave.webservice.results.cached.result.ColumnVisibility visibilities = 3; + * + * @param index + * the index + * @param builderForValue + * visibility builder + * + * @return builder to set the visibility at the provided index to the provided value */ public Builder setVisibilities(int index, datawave.webservice.results.cached.result.CachedresultMessages.ColumnVisibility.Builder builderForValue) { ensureVisibilitiesIsMutable(); @@ -1646,6 +1869,11 @@ public Builder setVisibilities(int index, datawave.webservice.results.cached.res /** * repeated .datawave.webservice.results.cached.result.ColumnVisibility visibilities = 3; + * + * @param value + * visibility to add + * + * @return builder to add visibility */ public Builder addVisibilities(datawave.webservice.results.cached.result.CachedresultMessages.ColumnVisibility value) { if (value == null) { @@ -1659,6 +1887,13 @@ public Builder addVisibilities(datawave.webservice.results.cached.result.Cachedr /** * repeated .datawave.webservice.results.cached.result.ColumnVisibility visibilities = 3; + * + * @param index + * the index + * @param value + * the visibility to add + * + * @return builder to add visibility */ public Builder addVisibilities(int index, datawave.webservice.results.cached.result.CachedresultMessages.ColumnVisibility value) { if (value == null) { @@ -1672,6 +1907,11 @@ public Builder addVisibilities(int index, datawave.webservice.results.cached.res /** * repeated .datawave.webservice.results.cached.result.ColumnVisibility visibilities = 3; + * + * @param builderForValue + * visibility builder + * + * @return builder to add visibility */ public Builder addVisibilities(datawave.webservice.results.cached.result.CachedresultMessages.ColumnVisibility.Builder builderForValue) { ensureVisibilitiesIsMutable(); @@ -1682,6 +1922,13 @@ public Builder addVisibilities(datawave.webservice.results.cached.result.Cachedr /** * repeated .datawave.webservice.results.cached.result.ColumnVisibility visibilities = 3; + * + * @param index + * the index + * @param builderForValue + * visibility builder + * + * @return builder to add visibility at provided index */ public Builder addVisibilities(int index, datawave.webservice.results.cached.result.CachedresultMessages.ColumnVisibility.Builder builderForValue) { ensureVisibilitiesIsMutable(); @@ -1692,6 +1939,11 @@ public Builder addVisibilities(int index, datawave.webservice.results.cached.res /** * repeated .datawave.webservice.results.cached.result.ColumnVisibility visibilities = 3; + * + * @param values + * iterable of visilibities + * + * @return builder to add all visibilities provided */ public Builder addAllVisibilities( java.lang.Iterable values) { @@ -1703,6 +1955,8 @@ public Builder addAllVisibilities( /** * repeated .datawave.webservice.results.cached.result.ColumnVisibility visibilities = 3; + * + * @return builder to clear visibilities */ public Builder clearVisibilities() { visibilities_ = java.util.Collections.emptyList(); @@ -1713,6 +1967,11 @@ public Builder clearVisibilities() { /** * repeated .datawave.webservice.results.cached.result.ColumnVisibility visibilities = 3; + * + * @param index + * the index + * + * @return builder to remove the visibility at the proivided index */ public Builder removeVisibilities(int index) { ensureVisibilitiesIsMutable(); diff --git a/web-services/client/src/main/java/datawave/webservice/results/mr/MapReduceJobDescription.java b/web-services/client/src/main/java/datawave/webservice/results/mr/MapReduceJobDescription.java index 0f30bf56cff..4bdb108453a 100644 --- a/web-services/client/src/main/java/datawave/webservice/results/mr/MapReduceJobDescription.java +++ b/web-services/client/src/main/java/datawave/webservice/results/mr/MapReduceJobDescription.java @@ -10,6 +10,7 @@ import javax.xml.bind.annotation.XmlAttribute; import javax.xml.bind.annotation.XmlElement; import javax.xml.bind.annotation.XmlElementWrapper; +import javax.xml.bind.annotation.XmlElements; import javax.xml.bind.annotation.XmlRootElement; import com.fasterxml.jackson.annotation.JsonProperty; @@ -30,16 +31,14 @@ public class MapReduceJobDescription implements Serializable { @XmlElement(name = "JobType", required = true) protected String jobType = null; - @JsonProperty(value = "RequiredRuntimeParameters") // work-around for bug in jackson-databind @XmlElementWrapper(name = "RequiredRuntimeParameters") - @XmlElement(name = "Parameter") + @XmlElements(@XmlElement(name = "Parameter", type = String.class)) protected List requiredRuntimeParameters = null; - @JsonProperty(value = "OptionalRuntimeParameters") // work-around for bug in jackson-databind @XmlElementWrapper(name = "OptionalRuntimeParameters") - @XmlElement(name = "Parameter") + @XmlElements(@XmlElement(name = "Parameter", type = String.class)) protected List optionalRuntimeParameters = null; @JsonProperty(value = "WorkflowAlgorithmDescriptions") diff --git a/web-services/client/src/main/java/datawave/webservice/results/mr/MapReduceJobDescriptionList.java b/web-services/client/src/main/java/datawave/webservice/results/mr/MapReduceJobDescriptionList.java new file mode 100644 index 00000000000..1afcdf65d15 --- /dev/null +++ b/web-services/client/src/main/java/datawave/webservice/results/mr/MapReduceJobDescriptionList.java @@ -0,0 +1,34 @@ +package datawave.webservice.results.mr; + +import java.io.Serializable; +import java.util.ArrayList; +import java.util.List; + +import javax.xml.bind.annotation.XmlAccessOrder; +import javax.xml.bind.annotation.XmlAccessType; +import javax.xml.bind.annotation.XmlAccessorOrder; +import javax.xml.bind.annotation.XmlAccessorType; +import javax.xml.bind.annotation.XmlElement; +import javax.xml.bind.annotation.XmlElementWrapper; +import javax.xml.bind.annotation.XmlRootElement; + +import datawave.webservice.result.BaseResponse; + +@XmlRootElement(name = "MapReduceJobDescriptionList") +@XmlAccessorType(XmlAccessType.NONE) +@XmlAccessorOrder(XmlAccessOrder.ALPHABETICAL) +public class MapReduceJobDescriptionList extends BaseResponse implements Serializable { + private static final long serialVersionUID = 1L; + + @XmlElementWrapper(name = "MapReduceJobDescriptionList") + @XmlElement(name = "MapReduceJobDescription") + List results = new ArrayList(); + + public List getResults() { + return results; + } + + public void setResults(List results) { + this.results = results; + } +} diff --git a/web-services/common-util/src/main/resources/source-templates/datawave/webservice/common/result/package-info.java b/web-services/client/src/main/resources/source-templates/datawave/webservice/query/map/package-info.java similarity index 74% rename from web-services/common-util/src/main/resources/source-templates/datawave/webservice/common/result/package-info.java rename to web-services/client/src/main/resources/source-templates/datawave/webservice/query/map/package-info.java index 5065eff71f2..a1d62ad9bd3 100644 --- a/web-services/common-util/src/main/resources/source-templates/datawave/webservice/common/result/package-info.java +++ b/web-services/client/src/main/resources/source-templates/datawave/webservice/query/map/package-info.java @@ -1,7 +1,6 @@ @XmlSchema(namespace="${datawave.webservice.namespace}", elementFormDefault=XmlNsForm.QUALIFIED, xmlns={@XmlNs(prefix = "", namespaceURI = "${datawave.webservice.namespace}")}) -package datawave.webservice.common.result; +package datawave.webservice.query.map; import javax.xml.bind.annotation.XmlNs; import javax.xml.bind.annotation.XmlNsForm; -import javax.xml.bind.annotation.XmlSchema; - +import javax.xml.bind.annotation.XmlSchema; \ No newline at end of file diff --git a/web-services/client/src/main/resources/source-templates/datawave/webservice/query/result/metadata/package-info.java b/web-services/client/src/main/resources/source-templates/datawave/webservice/query/result/metadata/package-info.java deleted file mode 100644 index 7c64d7234a9..00000000000 --- a/web-services/client/src/main/resources/source-templates/datawave/webservice/query/result/metadata/package-info.java +++ /dev/null @@ -1,7 +0,0 @@ -@XmlSchema(namespace="${datawave.webservice.namespace}", elementFormDefault=XmlNsForm.QUALIFIED, xmlns={@XmlNs(prefix = "", namespaceURI = "${datawave.webservice.namespace}")}) -package datawave.webservice.query.result.metadata; - -import javax.xml.bind.annotation.XmlNs; -import javax.xml.bind.annotation.XmlNsForm; -import javax.xml.bind.annotation.XmlSchema; - diff --git a/web-services/client/src/test/java/datawave/user/UserAuthorizationsTest.java b/web-services/client/src/test/java/datawave/user/UserAuthorizationsTest.java new file mode 100644 index 00000000000..27e7adab842 --- /dev/null +++ b/web-services/client/src/test/java/datawave/user/UserAuthorizationsTest.java @@ -0,0 +1,82 @@ +package datawave.user; + +import static org.junit.Assert.assertArrayEquals; +import static org.junit.Assert.assertEquals; +import static org.junit.Assert.assertNotNull; +import static org.powermock.api.support.membermodification.MemberMatcher.field; +import static org.powermock.api.support.membermodification.MemberMatcher.fields; + +import java.lang.reflect.Field; +import java.util.Arrays; +import java.util.TreeSet; + +import org.junit.Before; +import org.junit.Test; + +import io.protostuff.LinkedBuffer; +import io.protostuff.Message; +import io.protostuff.ProtobufIOUtil; + +public class UserAuthorizationsTest { + @Test + public void testFieldConfiguration() { + String[] expecteds = new String[] {"SCHEMA", "auths", "serialVersionUID"}; + testFieldNames(expecteds, UserAuthorizations.class); + } + + @Test + public void testSerialization() throws Exception { + TreeSet auths = new TreeSet(); + auths.add("a1"); + auths.add("a2"); + auths.add("a3"); + testRoundTrip(UserAuthorizations.class, new String[] {"auths"}, new Object[] {auths}); + } + + protected LinkedBuffer buffer; + + @Before + public void setUp() { + buffer = LinkedBuffer.allocate(4096); + } + + protected > void testFieldNames(String[] fieldNames, Class clazz) { + Field[] fields = fields(clazz); + assertEquals("The number of fields in " + clazz.getName() + " has changed. Please update " + getClass().getName() + ".", fieldNames.length, + fields.length); + + String[] actualFieldNames = new String[fields.length]; + for (int i = 0; i < fields.length; ++i) + actualFieldNames[i] = fields[i].getName(); + + Arrays.sort(fieldNames); + Arrays.sort(actualFieldNames); + assertArrayEquals("Serialization/deserialization of " + clazz.getName() + " failed.", fieldNames, actualFieldNames); + } + + protected > void testRoundTrip(Class clazz, String[] fieldNames, Object[] fieldValues) throws Exception { + assertNotNull(fieldNames); + assertNotNull(fieldValues); + assertEquals(fieldNames.length, fieldValues.length); + + T original = clazz.newInstance(); + for (int i = 0; i < fieldNames.length; ++i) + field(clazz, fieldNames[i]).set(original, fieldValues[i]); + + T reconstructed = roundTrip(original); + for (int i = 0; i < fieldNames.length; ++i) + assertEquals(fieldValues[i], field(clazz, fieldNames[i]).get(reconstructed)); + } + + protected > T roundTrip(T message) throws Exception { + byte[] bytes = toProtobufBytes(message); + T response = message.cachedSchema().newMessage(); + ProtobufIOUtil.mergeFrom(bytes, response, message.cachedSchema()); + return response; + } + + protected > byte[] toProtobufBytes(T message) { + return ProtobufIOUtil.toByteArray(message, message.cachedSchema(), buffer); + } + +} diff --git a/web-services/client/src/test/java/datawave/webservice/query/QueryParametersTest.java b/web-services/client/src/test/java/datawave/webservice/query/QueryParametersTest.java index ed250e175ae..dbf6357bb37 100644 --- a/web-services/client/src/test/java/datawave/webservice/query/QueryParametersTest.java +++ b/web-services/client/src/test/java/datawave/webservice/query/QueryParametersTest.java @@ -11,6 +11,10 @@ import org.springframework.util.LinkedMultiValueMap; import org.springframework.util.MultiValueMap; +import datawave.microservice.query.DefaultQueryParameters; +import datawave.microservice.query.QueryParameters; +import datawave.microservice.query.QueryPersistence; + public class QueryParametersTest { private QueryParameters qp; @@ -58,7 +62,7 @@ public void beforeTests() { } private QueryParameters buildQueryParameters() { - QueryParametersImpl qpBuilder = new QueryParametersImpl(); + DefaultQueryParameters qpBuilder = new DefaultQueryParameters(); qpBuilder.setAuths(auths); qpBuilder.setBeginDate(beginDate); qpBuilder.setEndDate(endDate); @@ -99,8 +103,8 @@ public void testAllTheParams() { // Test and validate date formatting, parsing try { - Assert.assertEquals(formatDateCheck, QueryParametersImpl.formatDate(beginDate)); - Assert.assertEquals(parseDateCheck, QueryParametersImpl.parseStartDate(QueryParametersImpl.formatDate(beginDate))); + Assert.assertEquals(formatDateCheck, DefaultQueryParameters.formatDate(beginDate)); + Assert.assertEquals(parseDateCheck, DefaultQueryParameters.parseStartDate(DefaultQueryParameters.formatDate(beginDate))); } catch (ParseException e) { log.error(e); } @@ -115,10 +119,10 @@ public void testAllTheParams() { params.add(QueryParameters.QUERY_PERSISTENCE, "PERSISTENT"); params.add(QueryParameters.QUERY_PAGESIZE, "10"); params.add(QueryParameters.QUERY_AUTHORIZATIONS, "auths"); - params.add(QueryParameters.QUERY_EXPIRATION, QueryParametersImpl.formatDate(expDate).toString()); + params.add(QueryParameters.QUERY_EXPIRATION, DefaultQueryParameters.formatDate(expDate).toString()); params.add(QueryParameters.QUERY_TRACE, "trace"); - params.add(QueryParameters.QUERY_BEGIN, QueryParametersImpl.formatDate(beginDate).toString()); - params.add(QueryParameters.QUERY_END, QueryParametersImpl.formatDate(endDate).toString()); + params.add(QueryParameters.QUERY_BEGIN, DefaultQueryParameters.formatDate(beginDate).toString()); + params.add(QueryParameters.QUERY_END, DefaultQueryParameters.formatDate(endDate).toString()); params.add(QueryParameters.QUERY_PARAMS, "params"); params.add(QueryParameters.QUERY_LOGIC_NAME, "logicName"); } catch (ParseException e) { diff --git a/web-services/client/src/test/java/datawave/webservice/query/TestQueryImpl.java b/web-services/client/src/test/java/datawave/webservice/query/TestQueryImpl.java index 5205127c571..8c1b6eaf3fb 100644 --- a/web-services/client/src/test/java/datawave/webservice/query/TestQueryImpl.java +++ b/web-services/client/src/test/java/datawave/webservice/query/TestQueryImpl.java @@ -7,7 +7,8 @@ import org.junit.Before; import org.junit.Test; -import datawave.webservice.query.QueryImpl.Parameter; +import datawave.microservice.query.QueryImpl; +import datawave.microservice.query.QueryImpl.Parameter; public class TestQueryImpl { diff --git a/web-services/client/src/test/java/datawave/webservice/query/TestQueryParameters.java b/web-services/client/src/test/java/datawave/webservice/query/TestQueryParameters.java index 4d349c98f33..24ae4377a35 100644 --- a/web-services/client/src/test/java/datawave/webservice/query/TestQueryParameters.java +++ b/web-services/client/src/test/java/datawave/webservice/query/TestQueryParameters.java @@ -11,20 +11,23 @@ import org.springframework.util.LinkedMultiValueMap; import org.springframework.util.MultiValueMap; +import datawave.microservice.query.DefaultQueryParameters; +import datawave.microservice.query.QueryParameters; + public class TestQueryParameters { - private QueryParametersImpl qp; + private DefaultQueryParameters qp; private MultiValueMap parameters; @Before public void setup() { - qp = new QueryParametersImpl(); + qp = new DefaultQueryParameters(); parameters = new LinkedMultiValueMap<>(); - parameters.set(QueryParameters.QUERY_AUTHORIZATIONS, "ALL"); - parameters.set(QueryParameters.QUERY_NAME, "Test"); - parameters.set(QueryParameters.QUERY_PERSISTENCE, "TRANSIENT"); - parameters.set(QueryParameters.QUERY_STRING, "FOO == BAR"); - parameters.set(QueryParameters.QUERY_LOGIC_NAME, "LogicName"); + parameters.add(QueryParameters.QUERY_AUTHORIZATIONS, "ALL"); + parameters.add(QueryParameters.QUERY_NAME, "Test"); + parameters.add(QueryParameters.QUERY_PERSISTENCE, "TRANSIENT"); + parameters.add(QueryParameters.QUERY_STRING, "FOO == BAR"); + parameters.add(QueryParameters.QUERY_LOGIC_NAME, "LogicName"); } @Test @@ -38,7 +41,7 @@ public void testNullExpirationDate() { @Test public void test24HoursExpirationDate() { - parameters.set(QueryParameters.QUERY_EXPIRATION, "+24Hours"); + parameters.add(QueryParameters.QUERY_EXPIRATION, "+24Hours"); qp.validate(parameters); SimpleDateFormat format = new SimpleDateFormat("yyyyMMdd HHmmss"); @@ -51,7 +54,7 @@ public void testDaysExpirationDate() { SimpleDateFormat format = new SimpleDateFormat("yyyyMMdd"); SimpleDateFormat msFormat = new SimpleDateFormat("yyyyMMdd HHmmss.SSS"); String expDateString = format.format(DateUtils.addDays(new Date(), 1)); - parameters.set(QueryParameters.QUERY_EXPIRATION, expDateString); + parameters.add(QueryParameters.QUERY_EXPIRATION, expDateString); qp.validate(parameters); assertEquals(expDateString + " 235959.999", msFormat.format(qp.getExpirationDate())); } @@ -61,7 +64,7 @@ public void testTimeExpirationDate() { SimpleDateFormat format = new SimpleDateFormat("yyyyMMdd HHmmss"); SimpleDateFormat msFormat = new SimpleDateFormat("yyyyMMdd HHmmss.SSS"); String expDateString = format.format(DateUtils.addDays(new Date(), 1)); - parameters.set(QueryParameters.QUERY_EXPIRATION, expDateString); + parameters.add(QueryParameters.QUERY_EXPIRATION, expDateString); qp.validate(parameters); assertEquals(expDateString + ".999", msFormat.format(qp.getExpirationDate())); } @@ -70,7 +73,7 @@ public void testTimeExpirationDate() { public void testTimeMillisExpirationDate() { SimpleDateFormat format = new SimpleDateFormat("yyyyMMdd HHmmss.SSS"); String expDateString = format.format(DateUtils.addDays(new Date(), 1)); - parameters.set(QueryParameters.QUERY_EXPIRATION, expDateString); + parameters.add(QueryParameters.QUERY_EXPIRATION, expDateString); qp.validate(parameters); assertEquals(expDateString, format.format(qp.getExpirationDate())); } @@ -82,7 +85,7 @@ public void testStartDateNoTime() { String startDateStr = format.format(new Date()); parameters.remove(QueryParameters.QUERY_BEGIN); - parameters.set(QueryParameters.QUERY_BEGIN, startDateStr); + parameters.add(QueryParameters.QUERY_BEGIN, startDateStr); qp.validate(parameters); assertEquals(startDateStr + " 000000.000", msFormat.format(qp.getBeginDate())); } @@ -94,7 +97,7 @@ public void testStartDateNoMs() { String startDateStr = format.format(new Date()); parameters.remove(QueryParameters.QUERY_BEGIN); - parameters.set(QueryParameters.QUERY_BEGIN, startDateStr); + parameters.add(QueryParameters.QUERY_BEGIN, startDateStr); qp.validate(parameters); assertEquals(startDateStr + ".000", msFormat.format(qp.getBeginDate())); } @@ -106,7 +109,7 @@ public void testEndDateNoTime() { String endDateStr = format.format(new Date()); parameters.remove(QueryParameters.QUERY_END); - parameters.set(QueryParameters.QUERY_END, endDateStr); + parameters.add(QueryParameters.QUERY_END, endDateStr); qp.validate(parameters); assertEquals(endDateStr + " 235959.999", msFormat.format(qp.getEndDate())); } @@ -118,7 +121,7 @@ public void testEndDateNoMs() { String endDateStr = format.format(new Date()); parameters.remove(QueryParameters.QUERY_END); - parameters.set(QueryParameters.QUERY_END, endDateStr); + parameters.add(QueryParameters.QUERY_END, endDateStr); qp.validate(parameters); assertEquals(endDateStr + ".999", msFormat.format(qp.getEndDate())); } diff --git a/web-services/common-util/pom.xml b/web-services/common-util/pom.xml index 4b4c7cc541b..220de0a1cdd 100644 --- a/web-services/common-util/pom.xml +++ b/web-services/common-util/pom.xml @@ -4,7 +4,7 @@ gov.nsa.datawave.webservices datawave-ws-parent - 6.5.0-SNAPSHOT + 7.13.0-SNAPSHOT datawave-ws-common-util jar @@ -31,6 +31,29 @@ datawave-common ${project.version} + + gov.nsa.datawave.core + datawave-core-common + ${project.version} + + + gov.nsa.datawave.core + datawave-core-common-util + ${project.version} + jboss + + + gov.nsa.datawave.core + datawave-core-connection-pool + + + + + gov.nsa.datawave.core + datawave-core-connection-pool + ${project.version} + jboss + gov.nsa.datawave.microservice accumulo-utils diff --git a/web-services/common-util/src/main/java/datawave/security/authorization/UserOperations.java b/web-services/common-util/src/main/java/datawave/security/authorization/UserOperations.java deleted file mode 100644 index 0f5b090b46e..00000000000 --- a/web-services/common-util/src/main/java/datawave/security/authorization/UserOperations.java +++ /dev/null @@ -1,60 +0,0 @@ -package datawave.security.authorization; - -import java.util.ArrayList; -import java.util.Collection; -import java.util.List; -import java.util.Map; -import java.util.Set; -import java.util.function.Function; -import java.util.stream.Collectors; - -import com.google.common.collect.HashMultimap; -import com.google.common.collect.Multimap; - -import datawave.user.AuthorizationsListBase; -import datawave.webservice.result.GenericResponse; - -/** - * A user operations service is one that can pass calls off to another external user operations endpoint - */ -public interface UserOperations { - - AuthorizationsListBase listEffectiveAuthorizations(Object callerObject) throws AuthorizationException; - - GenericResponse flushCachedCredentials(Object callerObject) throws AuthorizationException; - - default DatawavePrincipal getRemoteUser(DatawavePrincipal principal) throws AuthorizationException { - // get the effective authorizations for this user - AuthorizationsListBase auths = listEffectiveAuthorizations(principal); - - // create a new set of proxied users - List mappedUsers = new ArrayList<>(); - Map localUsers = principal.getProxiedUsers().stream() - .collect(Collectors.toMap(DatawaveUser::getDn, Function.identity(), (v1, v2) -> v2)); - - // create a mapped user for the primary user with the auths returned by listEffectiveAuthorizations - SubjectIssuerDNPair primaryDn = SubjectIssuerDNPair.of(auths.getUserDn(), auths.getIssuerDn()); - DatawaveUser localUser = localUsers.get(primaryDn); - mappedUsers.add(new DatawaveUser(primaryDn, localUser.getUserType(), auths.getAllAuths(), auths.getAuthMapping().keySet(), - toMultimap(auths.getAuthMapping()), System.currentTimeMillis())); - - // for each proxied user, create a new user with the auths returned by listEffectiveAuthorizations - Map> authMap = auths.getAuths(); - for (Map.Entry> entry : authMap.entrySet()) { - SubjectIssuerDNPair pair = SubjectIssuerDNPair.of(entry.getKey().subjectDN, entry.getKey().issuerDN); - if (!pair.equals(primaryDn)) { - mappedUsers.add(new DatawaveUser(pair, DatawaveUser.UserType.SERVER, entry.getValue(), null, null, System.currentTimeMillis())); - } - } - - // return a principal with the mapped users - return new DatawavePrincipal(mappedUsers); - } - - static Multimap toMultimap(Map> map) { - Multimap multimap = HashMultimap.create(); - map.entrySet().stream().forEach(e -> multimap.putAll(e.getKey(), e.getValue())); - return multimap; - } - -} diff --git a/web-services/common-util/src/main/java/datawave/security/util/DnUtils.java b/web-services/common-util/src/main/java/datawave/security/util/DnUtils.java index 14c65ddf508..037f1a48f16 100644 --- a/web-services/common-util/src/main/java/datawave/security/util/DnUtils.java +++ b/web-services/common-util/src/main/java/datawave/security/util/DnUtils.java @@ -31,9 +31,11 @@ public class DnUtils { /** Parsed NPE OU identifiers */ static final List NPE_OU_LIST; + private static final Logger log = LoggerFactory.getLogger(DnUtils.class); private static final datawave.microservice.security.util.DnUtils dnUtils; + static { InputStream in = null; try { diff --git a/web-services/common-util/src/main/java/datawave/security/util/WSAuthorizationsUtil.java b/web-services/common-util/src/main/java/datawave/security/util/WSAuthorizationsUtil.java index 2d2123bb149..de62b9bb600 100644 --- a/web-services/common-util/src/main/java/datawave/security/util/WSAuthorizationsUtil.java +++ b/web-services/common-util/src/main/java/datawave/security/util/WSAuthorizationsUtil.java @@ -17,6 +17,7 @@ public class WSAuthorizationsUtil extends AuthorizationsUtil { * Merge principals. This can be used to create a composite view of a principal when including remote systems * * @param principals + * the principal users * @return The merge principal */ public static DatawavePrincipal mergePrincipals(DatawavePrincipal... principals) { diff --git a/web-services/common-util/src/main/java/datawave/webservice/query/cache/ResultsPage.java b/web-services/common-util/src/main/java/datawave/webservice/query/cache/ResultsPage.java deleted file mode 100644 index b0fcad2497c..00000000000 --- a/web-services/common-util/src/main/java/datawave/webservice/query/cache/ResultsPage.java +++ /dev/null @@ -1,45 +0,0 @@ -package datawave.webservice.query.cache; - -import java.util.ArrayList; -import java.util.List; - -/** - * - */ -public class ResultsPage { - public enum Status { - NONE, PARTIAL, COMPLETE - }; - - private List results = null; - private Status status = null; - - public ResultsPage() { - this(new ArrayList<>()); - } - - public ResultsPage(List c) { - this(c, (c.isEmpty() ? Status.NONE : Status.COMPLETE)); - } - - public ResultsPage(List c, Status s) { - setResults(c); - setStatus(s); - } - - public Status getStatus() { - return status; - } - - public void setStatus(Status status) { - this.status = status; - } - - public List getResults() { - return results; - } - - public void setResults(List results) { - this.results = results; - } -} diff --git a/web-services/common/pom.xml b/web-services/common/pom.xml index ff3538043fd..dcf641c672a 100644 --- a/web-services/common/pom.xml +++ b/web-services/common/pom.xml @@ -4,7 +4,7 @@ gov.nsa.datawave.webservices datawave-ws-parent - 6.5.0-SNAPSHOT + 7.13.0-SNAPSHOT datawave-ws-common ejb @@ -43,6 +43,17 @@ gov.nsa.datawave datawave-in-memory-accumulo + + gov.nsa.datawave.core + datawave-core-common + ${project.version} + + + gov.nsa.datawave.core + datawave-core-connection-pool + ${project.version} + jboss + gov.nsa.datawave.microservice audit-api @@ -295,7 +306,7 @@ lib - stream,accumulo-trace,accumulo-tserver,zookeeper,commons-pool2,spring-core,spring-asm,spring-aop,spring-beans,spring-context,spring-context-support,spring-expression,commons-lang,accumulo-core,libthrift,guava,jul-to-slf4j-stub,hadoop-auth,hadoop-common,hadoop-hdfs,hadoop-mapreduce-client-core,hadoop-yarn-api,hadoop-yarn-common,hadoop-mapreduce-client-common,hadoop-mapreduce-client-shuffle,hadoop-mapreduce-client-jobclient,hadoop-yarn-client,httpcore,httpclient,avro,commons-compress,xz,commons-vfs2,accumulo-start,commons-configuration,json-simple,aopalliance,curator-client,curator-framework,curator-recipes + stream,accumulo-trace,accumulo-tserver,zookeeper,commons-pool2,spring-core,spring-asm,spring-aop,spring-beans,spring-context,spring-context-support,spring-expression,commons-lang,accumulo-core,libthrift,guava,jul-to-slf4j-stub,hadoop-auth,hadoop-client-api,hadoop-client-runtime,hadoop-hdfs,hadoop-mapreduce-client-core,hadoop-yarn-api,hadoop-yarn-common,hadoop-mapreduce-client-common,hadoop-mapreduce-client-shuffle,hadoop-mapreduce-client-jobclient,hadoop-yarn-client,httpcore,httpclient,avro,commons-compress,xz,commons-vfs2,accumulo-start,commons-configuration,json-simple,aopalliance,curator-client,curator-framework,curator-recipes true diff --git a/web-services/common/src/main/java/datawave/metrics/remote/RemoteQueryMetricService.java b/web-services/common/src/main/java/datawave/metrics/remote/RemoteQueryMetricService.java index 80bc49af096..edc47e3fb9c 100644 --- a/web-services/common/src/main/java/datawave/metrics/remote/RemoteQueryMetricService.java +++ b/web-services/common/src/main/java/datawave/metrics/remote/RemoteQueryMetricService.java @@ -1,5 +1,6 @@ package datawave.metrics.remote; +import java.net.URI; import java.text.SimpleDateFormat; import java.util.Collection; import java.util.Date; @@ -15,7 +16,9 @@ import org.apache.deltaspike.core.api.config.ConfigProperty; import org.apache.http.HttpEntity; +import org.apache.http.client.utils.URIBuilder; import org.apache.http.entity.StringEntity; +import org.xbill.DNS.TextParseException; import com.codahale.metrics.Counter; import com.codahale.metrics.annotation.Metric; @@ -27,11 +30,11 @@ import datawave.configuration.RefreshableScope; import datawave.microservice.querymetric.BaseQueryMetric; import datawave.microservice.querymetric.BaseQueryMetricListResponse; +import datawave.microservice.querymetric.QueryGeometryResponse; import datawave.microservice.querymetric.QueryMetricsSummaryResponse; import datawave.security.authorization.DatawavePrincipal; import datawave.security.system.CallerPrincipal; import datawave.webservice.common.remote.RemoteHttpService; -import datawave.webservice.query.map.QueryGeometryResponse; import datawave.webservice.result.VoidResponse; /** @@ -43,13 +46,13 @@ @Priority(Interceptor.Priority.APPLICATION) public class RemoteQueryMetricService extends RemoteHttpService { - private static final String UPDATE_METRIC_SUFFIX = "updateMetric"; - private static final String UPDATE_METRICS_SUFFIX = "updateMetrics"; - private static final String ID_METRIC_SUFFIX = "id/%s"; - private static final String MAP_METRIC_SUFFIX = "id/%s/map"; - private static final String SUMMARY_ALL_SUFFIX = "summary/all"; - private static final String SUMMARY_USER_SUFFIX = "summary/user"; - private static final String AUTH_HEADER_NAME = "Authorization"; + public static final String UPDATE_METRIC_SUFFIX = "updateMetric"; + public static final String UPDATE_METRICS_SUFFIX = "updateMetrics"; + public static final String ID_METRIC_SUFFIX = "id/%s"; + public static final String MAP_METRIC_SUFFIX = "id/%s/map"; + public static final String SUMMARY_ALL_SUFFIX = "summary/all"; + public static final String SUMMARY_USER_SUFFIX = "summary/user"; + public static final String AUTH_HEADER_NAME = "Authorization"; private ObjectReader baseQueryMetricListResponseReader; private ObjectReader queryGeometryResponseReader; private ObjectReader queryMetricsSummaryResponseReader; @@ -78,6 +81,10 @@ public class RemoteQueryMetricService extends RemoteHttpService { @ConfigProperty(name = "dw.remoteQueryMetricService.port", defaultValue = "8443") private int servicePort; + @Inject + @ConfigProperty(name = "dw.remoteQueryMetricService.useConfiguredURIForRedirect", defaultValue = "false") + private boolean useConfiguredURIForRedirect; + @Inject @ConfigProperty(name = "dw.remoteQueryMetricService.uri", defaultValue = "/querymetric/v1/") private String serviceURI; @@ -224,6 +231,10 @@ public QueryMetricsSummaryResponse summaryUser(Date begin, Date end) { // @formatter:on } + public URIBuilder buildRedirectURI(String suffix, URI baseURI) throws TextParseException { + return buildRedirectURI(suffix, baseURI, useConfiguredURIForRedirect); + } + protected String getBearer() { return "Bearer " + jwtTokenHandler.createTokenFromUsers(callerPrincipal.getName(), callerPrincipal.getProxiedUsers()); } diff --git a/web-services/common/src/main/java/datawave/webservice/common/audit/AuditBean.java b/web-services/common/src/main/java/datawave/webservice/common/audit/AuditBean.java index 453688a386b..6a237bee723 100644 --- a/web-services/common/src/main/java/datawave/webservice/common/audit/AuditBean.java +++ b/web-services/common/src/main/java/datawave/webservice/common/audit/AuditBean.java @@ -14,6 +14,7 @@ import org.apache.log4j.Logger; import org.jboss.resteasy.annotations.GZIP; +import org.springframework.util.MultiValueMap; import datawave.webservice.common.exception.DatawaveWebApplicationException; import datawave.webservice.query.exception.DatawaveErrorCode; @@ -55,7 +56,7 @@ public VoidResponse auditRest(MultivaluedMap parameters) { } } - public String audit(MultivaluedMap parameters) throws Exception { + public String audit(MultiValueMap parameters) throws Exception { return auditService.audit(auditParameterBuilder.convertAndValidate(parameters)); } } diff --git a/web-services/common/src/main/java/datawave/webservice/common/audit/AuditParameterBuilder.java b/web-services/common/src/main/java/datawave/webservice/common/audit/AuditParameterBuilder.java index 0b7ac168bf2..6bd8c63b8dd 100644 --- a/web-services/common/src/main/java/datawave/webservice/common/audit/AuditParameterBuilder.java +++ b/web-services/common/src/main/java/datawave/webservice/common/audit/AuditParameterBuilder.java @@ -4,6 +4,8 @@ import javax.ws.rs.core.MultivaluedMap; +import org.springframework.util.MultiValueMap; + /** * A utility to extract parameters from a REST call and convert them, as necessary, into parameters that are required by the auditor. */ @@ -16,7 +18,7 @@ public interface AuditParameterBuilder { * the query parameters * @return validated parameters */ - Map convertAndValidate(MultivaluedMap queryParameters); + Map convertAndValidate(MultiValueMap queryParameters); /** * Builds validated audit parameters for a direct call to the audit service. That is, the parameters passed in are expected to be those used by the audit diff --git a/web-services/common/src/main/java/datawave/webservice/common/audit/DefaultAuditParameterBuilder.java b/web-services/common/src/main/java/datawave/webservice/common/audit/DefaultAuditParameterBuilder.java index 8b36f149e3b..80c0bda55a9 100644 --- a/web-services/common/src/main/java/datawave/webservice/common/audit/DefaultAuditParameterBuilder.java +++ b/web-services/common/src/main/java/datawave/webservice/common/audit/DefaultAuditParameterBuilder.java @@ -7,14 +7,16 @@ import org.jboss.resteasy.specimpl.MultivaluedMapImpl; import org.slf4j.Logger; import org.slf4j.LoggerFactory; +import org.springframework.util.MultiValueMap; -import datawave.webservice.query.QueryParameters; +import datawave.core.common.audit.PrivateAuditConstants; +import datawave.microservice.query.QueryParameters; public class DefaultAuditParameterBuilder implements AuditParameterBuilder { private Logger log = LoggerFactory.getLogger(getClass().getName()); @Override - public Map convertAndValidate(MultivaluedMap queryParameters) { + public Map convertAndValidate(MultiValueMap queryParameters) { AuditParameters validatedParams = new AuditParameters(); MultivaluedMapImpl auditParams = new MultivaluedMapImpl<>(); diff --git a/web-services/common/src/main/java/datawave/webservice/common/cache/AccumuloTableCacheBean.java b/web-services/common/src/main/java/datawave/webservice/common/cache/AccumuloTableCacheBean.java new file mode 100644 index 00000000000..0c9bc484c50 --- /dev/null +++ b/web-services/common/src/main/java/datawave/webservice/common/cache/AccumuloTableCacheBean.java @@ -0,0 +1,213 @@ +package datawave.webservice.common.cache; + +import java.util.List; + +import javax.annotation.PostConstruct; +import javax.annotation.PreDestroy; +import javax.annotation.Resource; +import javax.annotation.security.DeclareRoles; +import javax.annotation.security.RolesAllowed; +import javax.annotation.security.RunAs; +import javax.ejb.Local; +import javax.ejb.LocalBean; +import javax.ejb.Lock; +import javax.ejb.LockType; +import javax.ejb.Schedule; +import javax.ejb.Singleton; +import javax.ejb.Startup; +import javax.enterprise.concurrent.ManagedExecutorService; +import javax.inject.Inject; +import javax.interceptor.Interceptors; +import javax.jms.Destination; +import javax.jms.JMSContext; +import javax.ws.rs.GET; +import javax.ws.rs.Path; +import javax.ws.rs.PathParam; +import javax.ws.rs.Produces; + +import org.apache.deltaspike.core.api.config.ConfigProperty; +import org.apache.deltaspike.core.api.exclude.Exclude; +import org.apache.log4j.Logger; +import org.jboss.resteasy.annotations.GZIP; + +import datawave.accumulo.inmemory.InMemoryInstance; +import datawave.annotation.Required; +import datawave.configuration.DatawaveEmbeddedProjectStageHolder; +import datawave.core.common.cache.AccumuloTableCache; +import datawave.core.common.cache.AccumuloTableCacheImpl; +import datawave.core.common.cache.AccumuloTableCacheProperties; +import datawave.core.common.connection.AccumuloConnectionFactory; +import datawave.core.common.result.AccumuloTableCacheStatus; +import datawave.core.common.result.TableCacheDescription; +import datawave.interceptor.RequiredInterceptor; +import datawave.webservice.common.exception.DatawaveWebApplicationException; +import datawave.webservice.query.exception.QueryException; +import datawave.webservice.result.VoidResponse; + +/** + * Object that caches data from Accumulo tables. + */ +@Path("/Common/AccumuloTableCache") +@RunAs("InternalUser") +@RolesAllowed({"AuthorizedUser", "AuthorizedQueryServer", "AuthorizedServer", "InternalUser", "Administrator", "JBossAdministrator"}) +@DeclareRoles({"AuthorizedUser", "AuthorizedQueryServer", "AuthorizedServer", "InternalUser", "Administrator", "JBossAdministrator"}) +@Local(AccumuloTableCache.class) +@LocalBean +@Startup +// tells the container to initialize on startup +@Singleton +// this is a singleton bean in the container +@Lock(LockType.READ) +@Exclude(ifProjectStage = DatawaveEmbeddedProjectStageHolder.DatawaveEmbedded.class) +public class AccumuloTableCacheBean implements AccumuloTableCache { + + private final Logger log = Logger.getLogger(this.getClass()); + + @Inject + private JMSContext jmsContext; + + @Resource(mappedName = "java:/topic/AccumuloTableCache") + private Destination cacheTopic; + + @Resource + private ManagedExecutorService executorService; + + @Inject + @ConfigProperty(name = "dw.warehouse.zookeepers") + private String zookeepers = null; + @SuppressWarnings("MismatchedQueryAndUpdateOfCollection") + @Inject + @ConfigProperty(name = "dw.cache.tableNames", defaultValue = "DatawaveMetadata,QueryMetrics_m,errorMetadata") + private List tableNames; + @Inject + @ConfigProperty(name = "dw.cache.pool", defaultValue = "WAREHOUSE") + private String poolName; + @Inject + @ConfigProperty(name = "dw.cache.reloadInterval", defaultValue = "86400000") + private long reloadInterval; + @Inject + @ConfigProperty(name = "dw.cacheCoordinator.evictionReaperIntervalSeconds", defaultValue = "30") + private int evictionReaperIntervalInSeconds; + @Inject + @ConfigProperty(name = "dw.cacheCoordinator.numLocks", defaultValue = "300") + private int numLocks; + @Inject + @ConfigProperty(name = "dw.cacheCoordinator.maxRetries", defaultValue = "10") + private int maxRetries; + + private AccumuloTableCacheImpl tableCache; + + public AccumuloTableCacheBean() {} + + @PostConstruct + private void setup() { + AccumuloTableCacheProperties config = new AccumuloTableCacheProperties().withTableNames(tableNames).withPoolName(poolName).withNumLocks(numLocks) + .withZookeepers(zookeepers).withMaxRetries(maxRetries).withReloadInterval(reloadInterval) + .withEvictionReaperIntervalInSeconds(evictionReaperIntervalInSeconds); + + log.debug("Called AccumuloTableCacheBean and accumuloTableCacheConfiguration = " + config); + + tableCache = new AccumuloTableCacheImpl(executorService, config); + } + + @Override + public void setConnectionFactory(AccumuloConnectionFactory connectionFactory) { + tableCache.setConnectionFactory(connectionFactory); + } + + @Override + public InMemoryInstance getInstance() { + return tableCache.getInstance(); + } + + @Schedule(hour = "*", minute = "*", second = "1", persistent = false) + @Override + public void submitReloadTasks() { + tableCache.submitReloadTasks(); + } + + @PreDestroy + public void stop() { + close(); + } + + @Override + public void close() { + tableCache.close(); + tableCache = null; + } + + /** + * JBossAdministrator or Administrator credentials required. + * + * @param tableName + * the name of the table for which the cached version is to be reloaded + * @return datawave.webservice.result.VoidResponse + * @RequestHeader X-ProxiedEntitiesChain use when proxying request for user + * @RequestHeader X-ProxiedIssuersChain required when using X-ProxiedEntitiesChain, specify one issuer DN per subject DN listed in X-ProxiedEntitiesChain + * @RequestHeader query-session-id session id value used for load balancing purposes. query-session-id can be placed in the request in a Cookie header or as + * a query parameter + * @ResponseHeader X-OperationTimeInMS time spent on the server performing the operation, does not account for network or result serialization + * + * @HTTP 200 success + * @HTTP 404 queries not found using {@code id} + * @HTTP 500 internal server error + */ + @GET + @Path("/reload/{tableName}") + @Produces({"application/xml", "text/xml", "application/json", "text/yaml", "text/x-yaml", "application/x-yaml", "application/x-protobuf", + "application/x-protostuff"}) + @GZIP + @Interceptors(RequiredInterceptor.class) + public VoidResponse reloadCache(@Required("tableName") @PathParam("tableName") String tableName) { + VoidResponse response = new VoidResponse(); + try { + reloadTableCache(tableName); + } catch (Exception e) { + response.addException(new QueryException(e).getBottomQueryException()); + throw new DatawaveWebApplicationException(e, response); + } + return response; + } + + @Override + public void reloadTableCache(String tableName) { + tableCache.reloadTableCache(tableName); + sendCacheReloadMessage(tableName); + } + + /** + * JBossAdministrator or Administrator credentials required. + * + * @return datawave.webservice.common.result.AccumuloTableCacheStatus + * @RequestHeader X-ProxiedEntitiesChain use when proxying request for user + * @RequestHeader X-ProxiedIssuersChain required when using X-ProxiedEntitiesChain, specify one issuer DN per subject DN listed in X-ProxiedEntitiesChain + * @RequestHeader query-session-id session id value used for load balancing purposes. query-session-id can be placed in the request in a Cookie header or as + * a query parameter + * @ResponseHeader X-OperationTimeInMS time spent on the server performing the operation, does not account for network or result serialization + * + * @HTTP 200 success + */ + @GET + @Path("/") + @Produces({"application/xml", "text/xml", "application/json", "text/yaml", "text/x-yaml", "application/x-yaml", "application/x-protobuf", + "application/x-protostuff", "text/html"}) + @GZIP + public AccumuloTableCacheStatus getStatus() { + AccumuloTableCacheStatus response = new AccumuloTableCacheStatus(); + response.getCaches().addAll(getTableCaches()); + return response; + } + + @Override + public List getTableCaches() { + return tableCache.getTableCaches(); + } + + private void sendCacheReloadMessage(String tableName) { + log.warn("table:" + tableName + " sending cache reload message about table " + tableName); + + jmsContext.createProducer().send(cacheTopic, tableName); + } + +} diff --git a/web-services/common/src/main/java/datawave/webservice/common/cache/AccumuloTableCacheConfiguration.java b/web-services/common/src/main/java/datawave/webservice/common/cache/AccumuloTableCacheConfiguration.java deleted file mode 100644 index b6b98f24776..00000000000 --- a/web-services/common/src/main/java/datawave/webservice/common/cache/AccumuloTableCacheConfiguration.java +++ /dev/null @@ -1,53 +0,0 @@ -package datawave.webservice.common.cache; - -import java.util.Collections; -import java.util.HashMap; -import java.util.List; -import java.util.Map; - -import javax.annotation.PostConstruct; -import javax.inject.Inject; - -import org.apache.deltaspike.core.api.config.ConfigProperty; - -public class AccumuloTableCacheConfiguration { - - @Inject - @ConfigProperty(name = "dw.warehouse.zookeepers") - private String zookeepers = null; - @SuppressWarnings("MismatchedQueryAndUpdateOfCollection") - @Inject - @ConfigProperty(name = "dw.cache.tableNames", defaultValue = "DatawaveMetadata,QueryMetrics_m,errorMetadata") - private List tableNames; - @Inject - @ConfigProperty(name = "dw.cache.pool", defaultValue = "WAREHOUSE") - private String poolName; - @Inject - @ConfigProperty(name = "dw.cache.reloadInterval", defaultValue = "86400000") - private long reloadInterval; - - private Map caches = new HashMap<>(); - - @PostConstruct - private void initializeCaches() { - for (String tableName : tableNames) { - BaseTableCache cache = new BaseTableCache(); - cache.setTableName(tableName); - cache.setConnectionPoolName(poolName); - cache.setReloadInterval(reloadInterval); - caches.put(tableName, cache); - } - } - - public String getZookeepers() { - return zookeepers; - } - - public void setZookeepers(String zookeepers) { - this.zookeepers = zookeepers; - } - - public Map getCaches() { - return Collections.unmodifiableMap(caches); - } -} diff --git a/web-services/common/src/main/java/datawave/webservice/common/connection/AccumuloConnectionFactoryBean.java b/web-services/common/src/main/java/datawave/webservice/common/connection/AccumuloConnectionFactoryBean.java index 426450eceaf..87cf86b19e3 100644 --- a/web-services/common/src/main/java/datawave/webservice/common/connection/AccumuloConnectionFactoryBean.java +++ b/web-services/common/src/main/java/datawave/webservice/common/connection/AccumuloConnectionFactoryBean.java @@ -1,17 +1,9 @@ package datawave.webservice.common.connection; import java.security.Principal; -import java.text.SimpleDateFormat; -import java.util.ArrayList; import java.util.Collection; -import java.util.Collections; -import java.util.Date; -import java.util.HashMap; -import java.util.HashSet; import java.util.List; import java.util.Map; -import java.util.Map.Entry; -import java.util.Set; import javax.annotation.PostConstruct; import javax.annotation.PreDestroy; @@ -20,6 +12,7 @@ import javax.annotation.security.PermitAll; import javax.annotation.security.RolesAllowed; import javax.annotation.security.RunAs; +import javax.ejb.EJB; import javax.ejb.EJBContext; import javax.ejb.Local; import javax.ejb.LocalBean; @@ -35,26 +28,21 @@ import javax.ws.rs.Produces; import org.apache.accumulo.core.client.AccumuloClient; -import org.apache.accumulo.core.client.admin.SecurityOperations; -import org.apache.accumulo.core.client.security.tokens.PasswordToken; -import org.apache.accumulo.core.util.Pair; -import org.apache.commons.lang.StringUtils; -import org.apache.commons.lang.mutable.MutableInt; +import org.apache.deltaspike.core.api.config.ConfigProperty; import org.apache.deltaspike.core.api.exclude.Exclude; import org.apache.deltaspike.core.api.jmx.JmxManaged; import org.apache.deltaspike.core.api.jmx.MBean; import org.apache.log4j.Logger; import org.jboss.resteasy.annotations.GZIP; -import datawave.accumulo.inmemory.InMemoryAccumuloClient; import datawave.configuration.DatawaveEmbeddedProjectStageHolder; +import datawave.core.common.cache.AccumuloTableCache; +import datawave.core.common.connection.AccumuloConnectionFactory; +import datawave.core.common.connection.AccumuloConnectionFactoryImpl; +import datawave.core.common.result.ConnectionFactoryResponse; +import datawave.core.common.result.ConnectionPool; import datawave.security.authorization.DatawavePrincipal; -import datawave.webservice.common.cache.AccumuloTableCache; -import datawave.webservice.common.connection.config.ConnectionPoolConfiguration; import datawave.webservice.common.connection.config.ConnectionPoolsConfiguration; -import datawave.webservice.common.result.Connection; -import datawave.webservice.common.result.ConnectionFactoryResponse; -import datawave.webservice.common.result.ConnectionPool; @Path("/Common/AccumuloConnectionFactory") @Produces({"application/xml", "text/xml", "application/json", "text/yaml", "text/x-yaml", "application/x-yaml", "text/html"}) @@ -82,126 +70,42 @@ public class AccumuloConnectionFactoryBean implements AccumuloConnectionFactory @Resource private EJBContext context; - @Inject + @EJB private AccumuloTableCache cache; - private Map> pools; + @Inject + @ConfigProperty(name = "dw.connectionPool.default", defaultValue = "WAREHOUSE") + private String defaultPool = null; + @SuppressWarnings("MismatchedQueryAndUpdateOfCollection") @Inject - private ConnectionPoolsConfiguration connectionPoolsConfiguration; + @ConfigProperty(name = "dw.connectionPool.pools", defaultValue = "WAREHOUSE,METRICS") + private List poolNames; - private String defaultPoolName = null; + private AccumuloConnectionFactory factory; @PostConstruct public void init() { - this.pools = new HashMap<>(); - - if (this.connectionPoolsConfiguration == null) { - log.error("connectionPoolsConfiguration was null - aborting init()"); - return; - } - HashMap> instances = new HashMap<>(); - this.defaultPoolName = connectionPoolsConfiguration.getDefaultPool(); - for (Entry entry : connectionPoolsConfiguration.getPools().entrySet()) { - Map p = new HashMap<>(); - ConnectionPoolConfiguration conf = entry.getValue(); - p.put(Priority.ADMIN, createConnectionPool(conf, conf.getAdminPriorityPoolSize())); - p.put(Priority.HIGH, createConnectionPool(conf, conf.getHighPriorityPoolSize())); - p.put(Priority.NORMAL, createConnectionPool(conf, conf.getNormalPriorityPoolSize())); - p.put(Priority.LOW, createConnectionPool(conf, conf.getLowPriorityPoolSize())); - this.pools.put(entry.getKey(), Collections.unmodifiableMap(p)); - try { - setupMockAccumuloUser(conf, p.get(Priority.NORMAL), instances); - } catch (Exception e) { - log.error("Error configuring mock accumulo user for AccumuloConnectionFactoryBean.", e); - } - - // Initialize the distributed tracing system. This needs to be done once at application startup. Since - // it is tied to Accumulo connections, we do it here in this singleton bean. - String appName = "datawave_ws"; - try { - appName = System.getProperty("app", "datawave_ws"); - } catch (SecurityException e) { - log.warn("Unable to retrieve system property \"app\": " + e.getMessage()); - } - } - - cache.setConnectionFactory(this); + ConnectionPoolsConfiguration config = new ConnectionPoolsConfiguration().withDefaultPool(defaultPool).withPoolNames(poolNames).build(); + factory = AccumuloConnectionFactoryImpl.getInstance(cache, config); } - private AccumuloClientPool createConnectionPool(ConnectionPoolConfiguration conf, int limit) { - AccumuloClientPoolFactory factory = new AccumuloClientPoolFactory(conf.getUsername(), conf.getPassword(), conf.getZookeepers(), conf.getInstance()); - AccumuloClientPool pool = new AccumuloClientPool(factory); - pool.setTestOnBorrow(true); - pool.setTestOnReturn(true); - pool.setMaxTotal(limit); - pool.setMaxIdle(-1); - - try { - pool.addObject(); - } catch (Exception e) { - log.error("Error pre-populating connection pool", e); - } - - return pool; + @PreDestroy + public void tearDown() { + close(); } - private void setupMockAccumuloUser(ConnectionPoolConfiguration conf, AccumuloClientPool pool, HashMap> instances) - throws Exception { - AccumuloClient c = null; + @Override + public void close() { try { - c = pool.borrowObject(new HashMap<>()); - - Pair pair = instances.get(cache.getInstance().getInstanceID()); - String user = "root"; - PasswordToken password = new PasswordToken(new byte[0]); - if (pair != null && user.equals(pair.getFirst())) - password = pair.getSecond(); - SecurityOperations security = cache.getInstance().getConnector(user, password).securityOperations(); - Set users = security.listLocalUsers(); - if (!users.contains(conf.getUsername())) { - security.createLocalUser(conf.getUsername(), new PasswordToken(conf.getPassword())); - security.changeUserAuthorizations(conf.getUsername(), c.securityOperations().getUserAuthorizations(conf.getUsername())); - } else { - PasswordToken newPassword = new PasswordToken(conf.getPassword()); - // If we're changing root's password, and trying to change then keep track of that. If we have multiple instances - // that specify mismatching passwords, then throw an error. - if (user.equals(conf.getUsername())) { - if (pair != null && !newPassword.equals(pair.getSecond())) - throw new IllegalStateException( - "Invalid AccumuloConnectionFactoryBean configuration--multiple pools are configured with different root passwords!"); - instances.put(cache.getInstance().getInstanceID(), new Pair<>(conf.getUsername(), newPassword)); - } - // match root's password on mock to the password on the actual Accumulo instance - security.changeLocalUserPassword(conf.getUsername(), newPassword); - } + factory.close(); + } catch (Exception e) { + throw new RuntimeException(e); } finally { - pool.returnObject(c); - } - } - - @PreDestroy - public void tearDown() { - for (Entry> entry : this.pools.entrySet()) { - for (Entry poolEntry : entry.getValue().entrySet()) { - try { - poolEntry.getValue().close(); - } catch (Exception e) { - log.error("Error closing Accumulo Connection Pool: " + e); - } - } + factory = null; } } - /** - * @param poolName - * the name of the pool to query - * @return name of the user used in the connection pools - */ - public String getConnectionUserName(String poolName) { - return connectionPoolsConfiguration.getPools().get(poolName).getUsername(); - } - /** * Gets a client from the pool with the assigned priority * @@ -209,12 +113,19 @@ public String getConnectionUserName(String poolName) { * * @param priority * the client's Priority + * @param trackingMap + * a tracking map * @return accumulo client * @throws Exception * if there are issues */ public AccumuloClient getClient(Priority priority, Map trackingMap) throws Exception { - return getClient(null, priority, trackingMap); + return getClient(getCurrentUserDN(), getCurrentProxyServers(), priority, trackingMap); + } + + @Override + public AccumuloClient getClient(String userDN, Collection proxyServers, Priority priority, Map trackingMap) throws Exception { + return factory.getClient(userDN, proxyServers, priority, trackingMap); } /** @@ -224,39 +135,20 @@ public AccumuloClient getClient(Priority priority, Map trackingMa * the name of the pool to retrieve the client from * @param priority * the priority of the client - * @param tm + * @param trackingMap * the tracking map * @return Accumulo client * @throws Exception * if there are issues */ - public AccumuloClient getClient(final String cpn, final Priority priority, final Map tm) throws Exception { - final Map trackingMap = (tm != null) ? tm : new HashMap<>(); - final String poolName = (cpn != null) ? cpn : defaultPoolName; - - if (!priority.equals(Priority.ADMIN)) { - final String userDN = getCurrentUserDN(); - if (userDN != null) - trackingMap.put("user.dn", userDN); + public AccumuloClient getClient(final String cpn, final Priority priority, final Map trackingMap) throws Exception { + return getClient(getCurrentUserDN(), getCurrentProxyServers(), cpn, priority, trackingMap); + } - final Collection proxyServers = getCurrentProxyServers(); - if (proxyServers != null) - trackingMap.put("proxyServers", StringUtils.join(proxyServers, " -> ")); - } - AccumuloClientPool pool = pools.get(poolName).get(priority); - AccumuloClient c = pool.borrowObject(trackingMap); - AccumuloClient mock = new InMemoryAccumuloClient(pool.getFactory().getUsername(), cache.getInstance()); - mock.securityOperations().changeLocalUserPassword(pool.getFactory().getUsername(), new PasswordToken(pool.getFactory().getPassword())); - WrappedAccumuloClient wrappedAccumuloClient = new WrappedAccumuloClient(c, mock); - String classLoaderContext = System.getProperty("dw.accumulo.classLoader.context"); - if (classLoaderContext != null) { - wrappedAccumuloClient.setScannerClassLoaderContext(classLoaderContext); - } - String timeout = System.getProperty("dw.accumulo.scan.batch.timeout.seconds"); - if (timeout != null) { - wrappedAccumuloClient.setScanBatchTimeoutSeconds(Long.parseLong(timeout)); - } - return wrappedAccumuloClient; + @Override + public AccumuloClient getClient(String userDN, Collection proxyServers, String cpn, Priority priority, Map trackingMap) + throws Exception { + return factory.getClient(userDN, proxyServers, cpn, priority, trackingMap); } /** @@ -264,40 +156,20 @@ public AccumuloClient getClient(final String cpn, final Priority priority, final * * @param client * The client to return + * @throws Exception + * if there are issues */ @PermitAll // permit anyone to return a connection - public void returnClient(AccumuloClient client) { - if (client instanceof WrappedAccumuloClient) { - WrappedAccumuloClient wrappedAccumuloClient = (WrappedAccumuloClient) client; - wrappedAccumuloClient.clearScannerClassLoaderContext(); - client = wrappedAccumuloClient.getReal(); - } - for (Entry> entry : this.pools.entrySet()) { - for (Entry poolEntry : entry.getValue().entrySet()) { - if (poolEntry.getValue().connectorCameFromHere(client)) { - poolEntry.getValue().returnObject(client); - return; - } - } - } - log.info("returnConnection called with connection that did not come from any AccumuloConnectionPool"); + public void returnClient(AccumuloClient client) throws Exception { + factory.returnClient(client); } @PermitAll // permit anyone to get the report @JmxManaged public String report() { - StringBuilder buf = new StringBuilder(); - for (Entry> entry : this.pools.entrySet()) { - buf.append("**** ").append(entry.getKey()).append(" ****\n"); - buf.append("ADMIN: ").append(entry.getValue().get(Priority.ADMIN)).append("\n"); - buf.append("HIGH: ").append(entry.getValue().get(Priority.HIGH)).append("\n"); - buf.append("NORMAL: ").append(entry.getValue().get(Priority.NORMAL)).append("\n"); - buf.append("LOW: ").append(entry.getValue().get(Priority.LOW)).append("\n"); - } - - return buf.toString(); + return factory.report(); } /** @@ -314,109 +186,25 @@ public String report() { @RolesAllowed({"Administrator", "JBossAdministrator", "InternalUser"}) public ConnectionFactoryResponse getConnectionFactoryMetrics() { ConnectionFactoryResponse response = new ConnectionFactoryResponse(); - ArrayList connectionPools = new ArrayList<>(); - - Set exclude = new HashSet<>(); - exclude.add("connection.state.start"); - exclude.add("state"); - exclude.add("request.location"); - - SimpleDateFormat sdf = new SimpleDateFormat("yyyy-MM-dd HH:mm:ss,SSS"); - - for (Entry> entry : this.pools.entrySet()) { - for (Entry entry2 : entry.getValue().entrySet()) { - String poolName = entry.getKey(); - Priority priority = entry2.getKey(); - AccumuloClientPool p = entry2.getValue(); - - long now = System.currentTimeMillis(); - MutableInt maxActive = new MutableInt(); - MutableInt numActive = new MutableInt(); - MutableInt maxIdle = new MutableInt(); - MutableInt numIdle = new MutableInt(); - MutableInt numWaiting = new MutableInt(); - // getConnectionPoolStats will collect the tracking maps and maxActive, numActive, maxIdle, numIdle while synchronized - // to ensure consistency between the GenericObjectPool and the tracking maps - List> requestingConnectionsMap = p.getConnectionPoolStats(maxActive, numActive, maxIdle, numIdle, numWaiting); - - ConnectionPool poolInfo = new ConnectionPool(); - poolInfo.setPriority(priority.name()); - poolInfo.setMaxActive(maxActive.toInteger()); - poolInfo.setNumActive(numActive.toInteger()); - poolInfo.setNumWaiting(numWaiting.toInteger()); - poolInfo.setMaxIdle(maxIdle.toInteger()); - poolInfo.setNumIdle(numIdle.toInteger()); - poolInfo.setPoolName(poolName); - - List requestingConnections = new ArrayList<>(); - for (Map m : requestingConnectionsMap) { - Connection c = new Connection(); - String state = m.get("state"); - if (state != null) { - c.setState(state); - } - String requestLocation = m.get("request.location"); - if (requestLocation != null) { - c.setRequestLocation(requestLocation); - } - String stateStart = m.get("connection.state.start"); - if (stateStart != null) { - long stateStartLong = Long.parseLong(stateStart); - c.setTimeInState((now - stateStartLong)); - Date stateStartDate = new Date(stateStartLong); - c.addProperty("connection.state.start", sdf.format(stateStartDate)); - } - for (Map.Entry e : m.entrySet()) { - if (!exclude.contains(e.getKey())) { - c.addProperty(e.getKey(), e.getValue()); - } - } - requestingConnections.add(c); - } - Collections.sort(requestingConnections); - poolInfo.setConnectionRequests(requestingConnections); - connectionPools.add(poolInfo); - } - } - response.setConnectionPools(connectionPools); + response.setConnectionPools(getConnectionPools()); return response; } + @Override + public List getConnectionPools() { + return factory.getConnectionPools(); + } + @PermitAll @JmxManaged public int getConnectionUsagePercent() { - double maxPercentage = 0.0; - for (Entry> entry : pools.entrySet()) { - for (Entry poolEntry : entry.getValue().entrySet()) { - // Don't include ADMIN priority connections when computing a usage percentage - if (Priority.ADMIN.equals(poolEntry.getKey())) - continue; - - MutableInt maxActive = new MutableInt(); - MutableInt numActive = new MutableInt(); - MutableInt numWaiting = new MutableInt(); - MutableInt unused = new MutableInt(); - poolEntry.getValue().getConnectionPoolStats(maxActive, numActive, unused, unused, numWaiting); - - double percentage = (numActive.doubleValue() + numWaiting.doubleValue()) / maxActive.doubleValue(); - if (percentage > maxPercentage) { - maxPercentage = percentage; - } - } - } - return (int) (maxPercentage * 100); + return factory.getConnectionUsagePercent(); } @Override @PermitAll public Map getTrackingMap(StackTraceElement[] stackTrace) { - HashMap trackingMap = new HashMap<>(); - if (stackTrace != null) { - StackTraceElement ste = stackTrace[1]; - trackingMap.put("request.location", ste.getClassName() + "." + ste.getMethodName() + ":" + ste.getLineNumber()); - } - - return trackingMap; + return factory.getTrackingMap(stackTrace); } public String getCurrentUserDN() { diff --git a/web-services/common/src/main/java/datawave/webservice/common/connection/config/ConnectionPoolClientConfiguration.java b/web-services/common/src/main/java/datawave/webservice/common/connection/config/ConnectionPoolClientConfiguration.java new file mode 100644 index 00000000000..7bbb066fe86 --- /dev/null +++ b/web-services/common/src/main/java/datawave/webservice/common/connection/config/ConnectionPoolClientConfiguration.java @@ -0,0 +1,41 @@ +package datawave.webservice.common.connection.config; + +import java.util.Map; + +import org.apache.accumulo.core.client.ScannerBase; +import org.apache.commons.lang3.StringUtils; +import org.apache.deltaspike.core.api.config.ConfigResolver; +import org.apache.log4j.Logger; + +import datawave.core.common.result.ConnectionPoolClientProperties; +import datawave.webservice.common.connection.AccumuloClientConfiguration; + +/** + * The configuration for the connection pool clients of the form derived from properties as follows: + * + * dw.{pool}.client.{tableName}.consistency = IMMEDIATE|EVENTUAL dw.{pool}.client.{tableName}.{hintName} = {hintValue} + * + */ +public class ConnectionPoolClientConfiguration extends ConnectionPoolClientProperties { + + private static final Logger log = Logger.getLogger(ConnectionPoolConfiguration.class); + + public ConnectionPoolClientConfiguration(String poolName) { + String prefix = "dw." + poolName + ".client"; + for (Map.Entry property : ConfigResolver.getAllProperties().entrySet()) { + if (property.getKey().startsWith(prefix)) { + String[] tableAndHint = StringUtils.split(property.getKey().substring(prefix.length()), '.'); + if (tableAndHint.length == 2) { + if (tableAndHint[1].equals("consistency")) { + config.setConsistency(tableAndHint[0], ScannerBase.ConsistencyLevel.valueOf(property.getValue())); + } else { + config.addHint(tableAndHint[0], tableAndHint[1], property.getValue()); + } + } else { + log.error("Invalid client hint configuration property " + property.getKey()); + } + } + } + } + +} diff --git a/web-services/common/src/main/java/datawave/webservice/common/connection/config/ConnectionPoolConfiguration.java b/web-services/common/src/main/java/datawave/webservice/common/connection/config/ConnectionPoolConfiguration.java index a4823d00beb..d858119bacd 100644 --- a/web-services/common/src/main/java/datawave/webservice/common/connection/config/ConnectionPoolConfiguration.java +++ b/web-services/common/src/main/java/datawave/webservice/common/connection/config/ConnectionPoolConfiguration.java @@ -1,24 +1,11 @@ package datawave.webservice.common.connection.config; import org.apache.deltaspike.core.api.config.ConfigResolver; -import org.apache.log4j.Logger; -import datawave.webservice.common.logging.ThreadConfigurableLogger; -import datawave.webservice.util.EnvProvider; - -public class ConnectionPoolConfiguration { - - private static final Logger log = ThreadConfigurableLogger.getLogger(ConnectionPoolConfiguration.class); - - private String username; - private String password; - private String instance; - private String zookeepers; - private int lowPriorityPoolSize; - private int normalPriorityPoolSize; - private int highPriorityPoolSize; - private int adminPriorityPoolSize; +import datawave.core.common.result.ConnectionPoolProperties; +import datawave.core.common.util.EnvProvider; +public class ConnectionPoolConfiguration extends ConnectionPoolProperties { public ConnectionPoolConfiguration(String poolName) { username = ConfigResolver.getPropertyValue("dw." + poolName + ".accumulo.userName"); password = resolvePassword(poolName); @@ -41,37 +28,4 @@ protected String resolvePassword(String poolName) { String value = ConfigResolver.getPropertyValue("dw." + poolName + ".accumulo.password"); return EnvProvider.resolve(value); } - - public String getUsername() { - return username; - } - - public String getPassword() { - return password; - } - - public String getInstance() { - return instance; - } - - public String getZookeepers() { - return zookeepers; - } - - public int getLowPriorityPoolSize() { - return lowPriorityPoolSize; - } - - public int getNormalPriorityPoolSize() { - return normalPriorityPoolSize; - } - - public int getHighPriorityPoolSize() { - return highPriorityPoolSize; - } - - public int getAdminPriorityPoolSize() { - return adminPriorityPoolSize; - } - } diff --git a/web-services/common/src/main/java/datawave/webservice/common/connection/config/ConnectionPoolsConfiguration.java b/web-services/common/src/main/java/datawave/webservice/common/connection/config/ConnectionPoolsConfiguration.java index de9e8b94b01..8ba055ac1a5 100644 --- a/web-services/common/src/main/java/datawave/webservice/common/connection/config/ConnectionPoolsConfiguration.java +++ b/web-services/common/src/main/java/datawave/webservice/common/connection/config/ConnectionPoolsConfiguration.java @@ -1,41 +1,29 @@ package datawave.webservice.common.connection.config; import java.util.Collections; -import java.util.HashMap; import java.util.List; import java.util.Map; -import javax.annotation.PostConstruct; -import javax.inject.Inject; +import datawave.core.common.result.ConnectionPoolsProperties; -import org.apache.deltaspike.core.api.config.ConfigProperty; - -public class ConnectionPoolsConfiguration { - - @Inject - @ConfigProperty(name = "dw.connectionPool.default", defaultValue = "WAREHOUSE") - private String defaultPool = null; - - @SuppressWarnings("MismatchedQueryAndUpdateOfCollection") - @Inject - @ConfigProperty(name = "dw.connectionPool.pools", defaultValue = "WAREHOUSE,METRICS") +public class ConnectionPoolsConfiguration extends ConnectionPoolsProperties { private List poolNames; - private Map pools = new HashMap<>(); - - @PostConstruct - private void initializePools() { + public ConnectionPoolsConfiguration build() { for (String poolName : poolNames) { pools.put(poolName, new ConnectionPoolConfiguration(poolName.toLowerCase())); + configs.put(poolName, new ConnectionPoolClientConfiguration(poolName.toLowerCase())); } + return this; } - public String getDefaultPool() { - return defaultPool; + public ConnectionPoolsConfiguration withPoolNames(List poolNames) { + this.poolNames = poolNames; + return this; } - public Map getPools() { - return Collections.unmodifiableMap(pools); + public ConnectionPoolsConfiguration withDefaultPool(String defaultPool) { + this.defaultPool = defaultPool; + return this; } - } diff --git a/web-services/common/src/main/java/datawave/webservice/common/health/HealthBean.java b/web-services/common/src/main/java/datawave/webservice/common/health/HealthBean.java index e7d993fee68..d496e870179 100644 --- a/web-services/common/src/main/java/datawave/webservice/common/health/HealthBean.java +++ b/web-services/common/src/main/java/datawave/webservice/common/health/HealthBean.java @@ -46,7 +46,7 @@ import com.sun.management.OperatingSystemMXBean; import datawave.configuration.DatawaveEmbeddedProjectStageHolder; -import datawave.webservice.common.connection.AccumuloConnectionFactoryBean; +import datawave.core.common.connection.AccumuloConnectionFactory; import datawave.webservice.result.GenericResponse; @PermitAll @@ -66,7 +66,7 @@ public class HealthBean { private static String status = "ready"; @Inject - private AccumuloConnectionFactoryBean accumuloConnectionFactoryBean; + private AccumuloConnectionFactory accumuloConnectionFactoryBean; @Inject @ConfigProperty(name = "dw.health.connection.percent.limit", defaultValue = "200") diff --git a/web-services/common/src/main/java/datawave/webservice/common/json/DefaultMapperDecorator.java b/web-services/common/src/main/java/datawave/webservice/common/json/DefaultMapperDecorator.java index bc9de595280..64f19398674 100644 --- a/web-services/common/src/main/java/datawave/webservice/common/json/DefaultMapperDecorator.java +++ b/web-services/common/src/main/java/datawave/webservice/common/json/DefaultMapperDecorator.java @@ -1,14 +1,17 @@ package datawave.webservice.common.json; +import com.fasterxml.jackson.databind.AnnotationIntrospector; import com.fasterxml.jackson.databind.DeserializationFeature; import com.fasterxml.jackson.databind.MapperFeature; import com.fasterxml.jackson.databind.ObjectMapper; +import com.fasterxml.jackson.databind.introspect.JacksonAnnotationIntrospector; import com.fasterxml.jackson.databind.module.SimpleModule; import com.fasterxml.jackson.datatype.guava.GuavaModule; +import com.fasterxml.jackson.module.jaxb.JaxbAnnotationIntrospector; import com.fasterxml.jackson.module.jaxb.JaxbAnnotationModule; import datawave.microservice.querymetric.BaseQueryMetricListResponse; -import datawave.microservice.querymetric.QueryMetricsDetailListResponse; +import datawave.microservice.querymetric.QueryMetricListResponse; import datawave.webservice.response.objects.DefaultKey; import datawave.webservice.response.objects.KeyBase; @@ -24,6 +27,8 @@ public ObjectMapper decorate(ObjectMapper mapper) { mapper.registerModule(new GuavaModule()); mapper.registerModule(new JaxbAnnotationModule()); mapper.disable(DeserializationFeature.FAIL_ON_UNKNOWN_PROPERTIES); + mapper.setAnnotationIntrospector( + AnnotationIntrospector.pair(new JacksonAnnotationIntrospector(), new JaxbAnnotationIntrospector(mapper.getTypeFactory()))); registerAbstractTypes(mapper); @@ -33,7 +38,7 @@ public ObjectMapper decorate(ObjectMapper mapper) { protected void registerAbstractTypes(ObjectMapper mapper) { SimpleModule module = new SimpleModule(KeyBase.class.getName()); module.addAbstractTypeMapping(KeyBase.class, DefaultKey.class); - module.addAbstractTypeMapping(BaseQueryMetricListResponse.class, QueryMetricsDetailListResponse.class); + module.addAbstractTypeMapping(BaseQueryMetricListResponse.class, QueryMetricListResponse.class); mapper.registerModule(module); } } diff --git a/web-services/common/src/main/java/datawave/webservice/common/remote/RemoteHttpService.java b/web-services/common/src/main/java/datawave/webservice/common/remote/RemoteHttpService.java index 248285118e8..46616b3fda4 100644 --- a/web-services/common/src/main/java/datawave/webservice/common/remote/RemoteHttpService.java +++ b/web-services/common/src/main/java/datawave/webservice/common/remote/RemoteHttpService.java @@ -4,6 +4,7 @@ import java.io.InputStream; import java.io.InputStreamReader; import java.net.ConnectException; +import java.net.URI; import java.net.URISyntaxException; import java.net.UnknownHostException; import java.security.Key; @@ -176,6 +177,11 @@ protected void init() { defaultHeaders.add(new BasicHeader("X-SSL-clientcert-issuer", DnUtils.normalizeDN(certs[0].getIssuerX500Principal().getName()))); } + List> nonRetriableClasses = getNonRetriableClasses(); + List> unavailableRetryClasses = getUnavailableRetryClasses(); + DefaultHttpRequestRetryHandler datawaveRetryHandler = new DatawaveRetryHandler(retryCount(), unavailableRetryCount(), unavailableRetryDelay(), + retryCounter(), nonRetriableClasses, unavailableRetryClasses); + // @formatter:off client = HttpClients.custom() .setSSLContext(ctx) @@ -183,7 +189,7 @@ protected void init() { .setDefaultHeaders(defaultHeaders) .setMaxConnTotal(maxConnections()) .setMaxConnPerRoute(maxConnections()) - .setRetryHandler(new DatawaveRetryHandler(retryCount(), unavailableRetryCount(), unavailableRetryDelay(), retryCounter())) + .setRetryHandler(datawaveRetryHandler) .setServiceUnavailableRetryStrategy(new DatawaveUnavailableRetryStrategy(unavailableRetryCount(), unavailableRetryDelay(), retryCounter())) .build(); // @formatter:on @@ -258,6 +264,17 @@ public URIBuilder buildURI(String suffix) throws TextParseException { return buildURI().setPath(serviceURI() + suffix); } + public URIBuilder buildRedirectURI(String suffix, URI baseURI, boolean useConfiguredBaseURI) throws TextParseException { + URIBuilder builder; + if (useConfiguredBaseURI) { + builder = buildURI(); + } else { + builder = new URIBuilder(baseURI); + } + builder.setPath(serviceURI() + suffix); + return builder; + } + protected T executeGetMethod(Consumer uriCustomizer, Consumer requestCustomizer, IOFunction resultConverter, Supplier errorSupplier) throws URISyntaxException, IOException { return executeGetMethod("", uriCustomizer, requestCustomizer, resultConverter, errorSupplier); @@ -347,7 +364,7 @@ public T readResponse(HttpEntity entity, ObjectReader reader1, ObjectReader log.error("Failed to read entity content. Trying as a VoidResponse.", ioe); log.error(content); VoidResponse response = voidResponseReader.readValue(content); - throw new RuntimeException(response.getMessages().toString()); + throw new RuntimeException(String.valueOf(response.getMessages()), ioe1); } } } @@ -370,11 +387,7 @@ public VoidResponse readVoidResponse(HttpEntity entity) throws IOException { return null; } else { VoidResponse response = voidResponseReader.readValue(entity.getContent()); - if (response.getHasResults()) { - return response; - } else { - throw new RuntimeException(response.getMessages().toString()); - } + return response; } } @@ -562,30 +575,61 @@ public void setConfig(RemoteHttpServiceConfiguration config) { this.config = config; } + /** + * Classes that are instances of IOException that will cause DatawaveRetryHandler to retry with delay. Subclasses of RemoteHttpService should override this + * method if necessary + */ + protected List> getUnavailableRetryClasses() { + return Arrays.asList(ConnectException.class); + } + + /** + * Classes that are instances of IOException that should not cause a retry. Subclasses of RemoteHttpService should override this method if necessary. The + * default list of classes in DefaultHttpRequestRetryHandler is: InterruptedIOException.class, UnknownHostException.class, ConnectException.class, + * SSLException.class)); + */ + protected List> getNonRetriableClasses() { + return Arrays.asList(UnknownHostException.class, SSLException.class); + } + private static class DatawaveRetryHandler extends DefaultHttpRequestRetryHandler { + private static final Logger log = LoggerFactory.getLogger(DatawaveRetryHandler.class); private final int unavailableRetryCount; private final int unavailableRetryDelay; private final Counter retryCounter; + private List> unavailableRetryClasses; - public DatawaveRetryHandler(int retryCount, int unavailableRetryCount, int unavailableRetryDelay, Counter retryCounter) { - super(retryCount, false, Arrays.asList(UnknownHostException.class, SSLException.class)); + public DatawaveRetryHandler(int retryCount, int unavailableRetryCount, int unavailableRetryDelay, Counter retryCounter, + List> nonRetriableClasses, List> unavailableRetryClasses) { + super(retryCount, false, nonRetriableClasses); this.unavailableRetryCount = unavailableRetryCount; this.unavailableRetryDelay = unavailableRetryDelay; this.retryCounter = retryCounter; + this.unavailableRetryClasses = unavailableRetryClasses; } @Override public boolean retryRequest(IOException exception, int executionCount, HttpContext context) { boolean shouldRetry = super.retryRequest(exception, executionCount, context); - if (exception instanceof ConnectException) { + // if any class e is the same as exception or any class e is a superclass of exception then retryWithDelay + boolean retryWithDelay = unavailableRetryClasses.stream().anyMatch(e -> e.isAssignableFrom(exception.getClass())); + if (retryWithDelay) { shouldRetry = (executionCount <= unavailableRetryCount); if (shouldRetry) { try { + if (log.isTraceEnabled()) { + log.trace("retrying call after exception {}, executionCount {}, sleeping for {}ms", exception.getClass().getName(), executionCount, + unavailableRetryDelay); + } Thread.sleep(unavailableRetryDelay); } catch (InterruptedException e) { // Ignore -- we'll just end up retrying a little too fast } } + } else { + if (log.isTraceEnabled()) { + log.trace("retrying call after exception {}, executionCount {}", exception.getClass().getName(), executionCount); + } } if (shouldRetry) { retryCounter.inc(); @@ -595,6 +639,7 @@ public boolean retryRequest(IOException exception, int executionCount, HttpConte } private static class DatawaveUnavailableRetryStrategy extends DefaultServiceUnavailableRetryStrategy { + private static final Logger log = LoggerFactory.getLogger(DatawaveUnavailableRetryStrategy.class); private final int maxRetries; private final Counter retryCounter; @@ -607,10 +652,13 @@ private DatawaveUnavailableRetryStrategy(int maxRetries, int retryInterval, Coun @Override public boolean retryRequest(HttpResponse response, int executionCount, HttpContext context) { // Note that a 404 can happen during service startup, so we want to retry. - boolean shouldRetry = executionCount <= maxRetries && (response.getStatusLine().getStatusCode() == HttpStatus.SC_SERVICE_UNAVAILABLE - || response.getStatusLine().getStatusCode() == HttpStatus.SC_NOT_FOUND); + int statusCode = response.getStatusLine().getStatusCode(); + boolean shouldRetry = executionCount <= maxRetries && (statusCode == HttpStatus.SC_SERVICE_UNAVAILABLE || statusCode == HttpStatus.SC_NOT_FOUND); if (shouldRetry) { retryCounter.inc(); + if (log.isTraceEnabled()) { + log.trace("retrying call after statusCode {}, executionCount {}", statusCode, executionCount); + } } return shouldRetry; } @@ -619,5 +667,4 @@ public boolean retryRequest(HttpResponse response, int executionCount, HttpConte protected interface IOFunction { T apply(HttpEntity entity) throws IOException; } - } diff --git a/web-services/deploy/application/pom.xml b/web-services/deploy/application/pom.xml index e5958714b9e..3902115f4b9 100644 --- a/web-services/deploy/application/pom.xml +++ b/web-services/deploy/application/pom.xml @@ -4,11 +4,14 @@ gov.nsa.datawave.webservices datawave-ws-deploy-parent - 6.5.0-SNAPSHOT + 7.13.0-SNAPSHOT datawave-ws-deploy-application ear ${project.artifactId} + + true + commons-configuration @@ -33,6 +36,12 @@ gov.nsa.datawave datawave-ingest-configuration ${project.version} + + + gov.nsa.datawave.core + datawave-core-common-util + + gov.nsa.datawave @@ -43,12 +52,28 @@ gov.nsa.datawave datawave-query-core ${project.version} + + + gov.nsa.datawave.core + datawave-core-common-util + + + gov.nsa.datawave.core + datawave-core-connection-pool + + gov.nsa.datawave.webservices datawave-ws-accumulo ${project.version} ejb + + + gov.nsa.datawave.core + datawave-core-common-util + + gov.nsa.datawave.webservices @@ -82,6 +107,12 @@ datawave-ws-common ${project.version} ejb + + + gov.nsa.datawave.core + datawave-core-connection-pool + + gov.nsa.datawave.webservices @@ -106,6 +137,12 @@ ${project.version} ejb + + gov.nsa.datawave.webservices + datawave-ws-metrics + ${project.version} + ejb + gov.nsa.datawave.webservices datawave-ws-model @@ -117,12 +154,32 @@ datawave-ws-modification ${project.version} ejb + + + gov.nsa.datawave.core + datawave-core-common-util + + + gov.nsa.datawave.core + datawave-core-connection-pool + + gov.nsa.datawave.webservices datawave-ws-query ${project.version} ejb + + + gov.nsa.datawave.core + datawave-core-common-util + + + gov.nsa.datawave.core + datawave-core-connection-pool + + gov.nsa.datawave.webservices @@ -347,6 +404,17 @@ ${project.basedir}/src/main/docker true + + runtime-config.clis + + + + + ${project.basedir}/src/main/docker + false + + runtime-config.clis + @@ -504,8 +572,20 @@ org.apache.hadoop - hadoop-common + hadoop-client-api + + + org.apache.hadoop + hadoop-client-runtime + + org.apache.hadoop.thirdparty + hadoop-shaded-guava + + + org.apache.hadoop.thirdparty + hadoop-shaded-protobuf_3_7 + org.apache.hadoop.thirdparty hadoop-shaded-guava diff --git a/web-services/deploy/application/src/main/docker/Dockerfile b/web-services/deploy/application/src/main/docker/Dockerfile index 2078ce5a204..cc8a11b768d 100644 --- a/web-services/deploy/application/src/main/docker/Dockerfile +++ b/web-services/deploy/application/src/main/docker/Dockerfile @@ -1,7 +1,8 @@ -FROM azul/zulu-openjdk-centos:11.0.18-11.62.17 +FROM rockylinux/rockylinux:8 -RUN yum -y install deltarpm centos-release-sc && \ - chmod -R 777 /usr/lib/jvm/zulu11/bin/* && \ +RUN dnf -y install dnf-plugins-core epel-release java-11-openjdk-devel && \ + dnf config-manager --set-enabled powertools && \ + chmod -R 777 /usr/lib/jvm/java-11/bin/* && \ groupadd -r jboss -g 1000 && \ useradd -u 1000 -r -g jboss -m -d /opt/jboss -s /sbin/nologin -c "JBoss User" jboss && \ groupadd -r hadoop && \ @@ -10,13 +11,12 @@ RUN yum -y install deltarpm centos-release-sc && \ chmod -R ug+rX,o-rx /opt/datawave /opt/jboss ENV JAVA_VERSION=11 \ - JAVA_BUILD=11.0.18 \ - JAVA_HOME=/usr/lib/jvm/zulu11 + JAVA_HOME=/usr/lib/jvm/java-11 + LABEL version="11" -RUN yum update -y && \ - yum install -y which less bind-utils net-tools lsof nethogs dstat strace htop iperf iperf3 socat iftop xmlstarlet saxon augeas bsdtar unzip && \ - yum -y erase deltarpm +RUN dnf update -y && \ + dnf install -y which less bind-utils net-tools lsof nethogs dstat strace htop iperf iperf3 socat iftop xmlstarlet augeas bsdtar unzip ENV WILDFLY_VERSION 17.0.1.Final \ JBOSS_HOME /opt/jboss/wildfly \ @@ -38,7 +38,6 @@ ENV WILDFLY_HOME=/opt/jboss/wildfly \ WORKDIR $WILDFLY_HOME - COPY overlay $WILDFLY_HOME/ COPY mysql $WILDFLY_HOME/mysql COPY *.cli $WILDFLY_HOME/tools/ diff --git a/web-services/deploy/application/src/main/docker/docker-entrypoint.sh b/web-services/deploy/application/src/main/docker/docker-entrypoint.sh index 005692bac18..ce9c8721ad0 100644 --- a/web-services/deploy/application/src/main/docker/docker-entrypoint.sh +++ b/web-services/deploy/application/src/main/docker/docker-entrypoint.sh @@ -15,7 +15,7 @@ wait_and_shutdown(){ TIMEOUT_MINUTES=75 fi - echo "Sending wait and shutdown command to the web service. Will wait up to $TIMEOUT_MINUTES for queries to complete." + echo "Sending wait and shutdown command to the web service. Will wait up to $TIMEOUT_MINUTES minutes for queries to complete." curl --fail -s -o /tmp/curl_shutdown.log http://localhost:8080/DataWave/Common/Health/shutdown?timeoutMinutes=$TIMEOUT_MINUTES CURL_STATUS=$? if [ $CURL_STATUS -ne 0 ]; then @@ -29,6 +29,12 @@ wait_and_shutdown(){ fi } + +echo "Capturing ENV Properties" +printenv > env.properties +echo "Setting Runtime Config" +$WILDFLY_HOME/bin/jboss-cli.sh --file=./runtime-config.cli --properties=env.properties + if [[ "$@" != *"bin/standalone.sh"* ]]; then exec "$@" else diff --git a/web-services/deploy/application/src/main/wildfly/add-datawave-configuration.cli b/web-services/deploy/application/src/main/wildfly/add-datawave-configuration.cli index efcef99eca7..13f4256a956 100644 --- a/web-services/deploy/application/src/main/wildfly/add-datawave-configuration.cli +++ b/web-services/deploy/application/src/main/wildfly/add-datawave-configuration.cli @@ -56,6 +56,8 @@ module add --name=com.mysql.driver --dependencies=javax.api,javax.transaction.ap /system-property=dw.hornetq.system.password:add(value=${hornetq.system.password}) /system-property=dw.modification.cache.mdb.pool.size:add(value=${modification.cache.mdb.pool.size}) /system-property=dw.trusted.header.authentication:add(value=${trusted.header.login}) +/system-property=dw.trusted.header.issuerDn:add(value=${trusted.header.issuer-header-name}) +/system-property=dw.trusted.header.subjectDn:add(value=${trusted.header.subject-header-name}) /system-property=dw.transport.guarantee:add(value=${webapp.transport.guarantee}) /system-property=dw.connectionPool.default:add(value=WAREHOUSE) /system-property=dw.connectionPool.pools:add(value="WAREHOUSE,METRICS,UUID") @@ -116,6 +118,21 @@ module add --name=com.mysql.driver --dependencies=javax.api,javax.transaction.ap /system-property=dw.remoteDatawaveUserService.host:add(value=${security.remoteuserservice.host}) /system-property=dw.remoteDatawaveUserService.port:add(value=${security.remoteuserservice.port}) +# Audit microservice config +/system-property=dw.audit.use.remoteauditservice:add(value=${auditing.use.remoteauditservice}) +/system-property=dw.remoteDatawaveAuditService.useSrvDnsLookup:add(value=${auditing.remoteauditservice.srv.lookup.enabled}) +/system-property=dw.remoteDatawaveAuditService.srvDnsServers:add(value=${auditing.remoteauditservice.srv.lookup.servers}) +/system-property=dw.remoteDatawaveAuditService.srvDnsPort:add(value=${auditing.remoteauditservice.srv.lookup.port}) +/system-property=dw.remoteDatawaveAuditService.scheme:add(value=${auditing.remoteauditservice.scheme}) +/system-property=dw.remoteDatawaveAuditService.host:add(value=${auditing.remoteauditservice.host}) +/system-property=dw.remoteDatawaveAuditService.port:add(value=${auditing.remoteauditservice.port}) + +# Dictionary microservice config +/system-property=dw.remoteDictionary.scheme:add(value=${dictionary.remoteservice.scheme}) +/system-property=dw.remoteDictionary.host:add(value=${dictionary.remoteservice.host}) +/system-property=dw.remoteDictionary.port:add(value=${dictionary.remoteservice.port}) +/system-property=dw.remoteDictionary.useConfiguredURIForRedirect:add(value=${dictionary.remoteservice.useConfiguredURIForRedirect}) + # Accumulo microservice config /system-property=dw.remoteAccumuloService.useSrvDnsLookup:add(value=${accumulo.remoteservice.srv.lookup.enabled}) /system-property=dw.remoteAccumuloService.srvDnsServers:add(value=${accumulo.remoteservice.srv.lookup.servers}) @@ -131,6 +148,7 @@ module add --name=com.mysql.driver --dependencies=javax.api,javax.transaction.ap /system-property=dw.remoteQueryMetricService.scheme:add(value=${querymetric.remoteservice.scheme}) /system-property=dw.remoteQueryMetricService.host:add(value=${querymetric.remoteservice.host}) /system-property=dw.remoteQueryMetricService.port:add(value=${querymetric.remoteservice.port}) +/system-property=dw.remoteQueryMetricService.useConfiguredURIForRedirect:add(value=${querymetric.remoteservice.useConfiguredURIForRedirect}) # Disable SASL client authentication in zookeeper /system-property=zookeeper.sasl.client:add(value=false) diff --git a/web-services/deploy/application/src/main/wildfly/overlay/modules/org/apache/hadoop/common/main/module.xml b/web-services/deploy/application/src/main/wildfly/overlay/modules/org/apache/hadoop/common/main/module.xml index 1a1c16ca7d2..7e55a7eb489 100644 --- a/web-services/deploy/application/src/main/wildfly/overlay/modules/org/apache/hadoop/common/main/module.xml +++ b/web-services/deploy/application/src/main/wildfly/overlay/modules/org/apache/hadoop/common/main/module.xml @@ -2,7 +2,8 @@ - + + diff --git a/web-services/deploy/application/src/main/wildfly/runtime-config.cli b/web-services/deploy/application/src/main/wildfly/runtime-config.cli new file mode 100644 index 00000000000..23cd2e4a8b1 --- /dev/null +++ b/web-services/deploy/application/src/main/wildfly/runtime-config.cli @@ -0,0 +1,30 @@ + +# Start processing batch commands (MUST BE FIRST) +batch +# Run an embedded server to perform the configuration +embed-server --server-config=standalone-full.xml + + +/system-property=dw.warehouse.accumulo.password:add(value=${env.ACCUMULO_PASSWORD}) +/system-property=dw.metrics.accumulo.password:add(value=${env.ACCUMULO_PASSWORD}) +/system-property=dw.uuid.accumulo.password:add(value=${env.ACCUMULO_PASSWORD}) +/system-property=dw.mapreduce.securitydomain.keyStoreURL:add(value=file://${env.KEYSTORE}) +/system-property=dw.mapreduce.securitydomain.keyStoreType:add(value="${env.KEYSTORE_TYPE}") +/system-property=dw.mapreduce.securitydomain.keyStorePassword:add(value="${env.KEYSTORE_PASSWORD}") +/system-property=dw.mapreduce.securitydomain.trustStoreURL:add(value=file://${env.TRUSTSTORE}) +/system-property=dw.mapreduce.securitydomain.trustStoreType:add(value="${env.TRUSTSTORE_TYPE}") +/system-property=dw.mapreduce.securitydomain.trustStorePassword:add(value="${env.TRUSTSTORE_PASSWORD}") + + +/system-property=dw.mapreduce.securitydomain.keyStoreURL:add(value=file://${env.KEYSTORE}) +/system-property=dw.mapreduce.securitydomain.keyStoreType:add(value="${env.KEYSTORE_TYPE}") +/system-property=dw.mapreduce.securitydomain.keyStorePassword:add(value="${env.KEYSTORE_PASSWORD}") +/system-property=dw.mapreduce.securitydomain.trustStoreURL:add(value=file://${env.TRUSTSTORE}) +/system-property=dw.mapreduce.securitydomain.trustStoreType:add(value="${env.TRUSTSTORE_TYPE}") +/system-property=dw.mapreduce.securitydomain.trustStorePassword:add(value="${env.TRUSTSTORE_PASSWORD}") +/core-service=management/security-realm=SSLRealm/server-identity=ssl/:add(enabled-protocols=["TLSv1.1","TLSv1.2"],keystore-path=${env.KEYSTORE},keystore-provider=${env.KEYSTORE_TYPE},keystore-password="${env.KEYSTORE_PASSWORD}") +/core-service=management/security-realm=SSLRealm/authentication=truststore/:add(keystore-path=${env.TRUSTSTORE},keystore-provider=${env.TRUSTSTORE_TYPE},keystore-password="${env.TRUSTSTORE_PASSWORD}") +/subsystem=security/security-domain=datawave/jsse=classic:add(keystore={type="${env.KEYSTORE_TYPE}",password="${env.KEYSTORE_PASSWORD}",url="file://${env.KEYSTORE}"},truststore={type="${env.TRUSTSTORE_TYPE}",password="${env.TRUSTSTORE_PASSWORD}",url="file://${env.TRUSTSTORE}"}) + +# Run the batch commands (MUST BE LAST) +run-batch \ No newline at end of file diff --git a/web-services/deploy/configuration/pom.xml b/web-services/deploy/configuration/pom.xml index 40381a6accc..86c6b5604d1 100644 --- a/web-services/deploy/configuration/pom.xml +++ b/web-services/deploy/configuration/pom.xml @@ -4,10 +4,13 @@ gov.nsa.datawave.webservices datawave-ws-deploy-parent - 6.5.0-SNAPSHOT + 7.13.0-SNAPSHOT datawave-ws-deploy-configuration jar + + true + ${project.artifactId} diff --git a/web-services/deploy/configuration/src/main/resources/datawave/mapreduce/MapReduceJobs.xml b/web-services/deploy/configuration/src/main/resources/datawave/mapreduce/MapReduceJobs.xml index dc9315ad322..3817a39f851 100644 --- a/web-services/deploy/configuration/src/main/resources/datawave/mapreduce/MapReduceJobs.xml +++ b/web-services/deploy/configuration/src/main/resources/datawave/mapreduce/MapReduceJobs.xml @@ -15,7 +15,7 @@ - org.apache.accumulo.core.client.mapreduce.AccumuloInputFormat + org.apache.accumulo.hadoop.mapreduce.AccumuloInputFormat datawave.mr.bulk.BulkInputFormat @@ -48,7 +48,7 @@ - + diff --git a/web-services/deploy/configuration/src/main/resources/datawave/modification/ExampleModificationServices.xml b/web-services/deploy/configuration/src/main/resources/datawave/modification/ExampleModificationServices.xml index 762919576ea..0be73c2a3d6 100644 --- a/web-services/deploy/configuration/src/main/resources/datawave/modification/ExampleModificationServices.xml +++ b/web-services/deploy/configuration/src/main/resources/datawave/modification/ExampleModificationServices.xml @@ -19,7 +19,7 @@ --> - + @@ -51,4 +51,4 @@ - \ No newline at end of file + diff --git a/web-services/deploy/configuration/src/main/resources/datawave/modification/ModificationServices.xml b/web-services/deploy/configuration/src/main/resources/datawave/modification/ModificationServices.xml index 36fc620d46f..c574aa40e84 100644 --- a/web-services/deploy/configuration/src/main/resources/datawave/modification/ModificationServices.xml +++ b/web-services/deploy/configuration/src/main/resources/datawave/modification/ModificationServices.xml @@ -21,13 +21,13 @@ --> - + - + - + AuthorizedUser @@ -66,7 +66,7 @@ - + diff --git a/web-services/deploy/configuration/src/main/resources/datawave/query/EdgeQueryLogicFactory.xml b/web-services/deploy/configuration/src/main/resources/datawave/query/EdgeQueryLogicFactory.xml index ee701cd9289..09a505404dc 100644 --- a/web-services/deploy/configuration/src/main/resources/datawave/query/EdgeQueryLogicFactory.xml +++ b/web-services/deploy/configuration/src/main/resources/datawave/query/EdgeQueryLogicFactory.xml @@ -11,6 +11,7 @@ Provides a fall-back model in the event that the named query model 'modelName' isn't defined in DatawaveMetadata for whatever reason... --> + @@ -18,6 +19,7 @@ + diff --git a/web-services/deploy/configuration/src/main/resources/datawave/query/QueryExpiration.xml b/web-services/deploy/configuration/src/main/resources/datawave/query/QueryExpiration.xml index b7998745fd7..045e5d0480c 100644 --- a/web-services/deploy/configuration/src/main/resources/datawave/query/QueryExpiration.xml +++ b/web-services/deploy/configuration/src/main/resources/datawave/query/QueryExpiration.xml @@ -11,16 +11,16 @@ http://www.springframework.org/schema/util http://www.springframework.org/schema/util/spring-util-4.0.xsd"> - + - + - + - + - + diff --git a/web-services/deploy/configuration/src/main/resources/datawave/query/QueryLogicFactory.xml b/web-services/deploy/configuration/src/main/resources/datawave/query/QueryLogicFactory.xml index fb142291aaf..08cc4b74187 100644 --- a/web-services/deploy/configuration/src/main/resources/datawave/query/QueryLogicFactory.xml +++ b/web-services/deploy/configuration/src/main/resources/datawave/query/QueryLogicFactory.xml @@ -11,8 +11,6 @@ http://www.springframework.org/schema/util http://www.springframework.org/schema/util/spring-util-4.0.xsd"> - - @@ -39,6 +37,7 @@ + @@ -151,11 +150,11 @@ + - - + - + @@ -164,21 +163,21 @@ - + - + - + - - + + @@ -242,12 +241,6 @@ - - - - @@ -268,6 +261,8 @@ + + @@ -344,12 +339,17 @@ + + + + ${lookup.uuid.uuidTypes} + @@ -398,9 +398,14 @@ - + + + + + + - + @@ -446,7 +451,7 @@ - + @@ -526,6 +531,7 @@ + @@ -544,6 +550,7 @@ + @@ -586,6 +593,8 @@ + + @@ -598,12 +607,6 @@ - - - - @@ -664,10 +667,6 @@ - - - - - + @@ -18,4 +18,29 @@ + + + + + + + + + + + + + + + + + + + + + + + + + diff --git a/web-services/deploy/docs/docs/index.html b/web-services/deploy/docs/docs/index.html index 460a3bba961..7cedff3a0bd 100644 --- a/web-services/deploy/docs/docs/index.html +++ b/web-services/deploy/docs/docs/index.html @@ -23,7 +23,7 @@ API Documents
  • @@ -45,11 +45,11 @@

    Welcome to DataWave

    -
    +
    diff --git a/web-services/deploy/docs/pom.xml b/web-services/deploy/docs/pom.xml index 9c42361c703..65cdbab6453 100644 --- a/web-services/deploy/docs/pom.xml +++ b/web-services/deploy/docs/pom.xml @@ -4,7 +4,7 @@ gov.nsa.datawave.webservices datawave-ws-deploy-parent - 6.5.0-SNAPSHOT + 7.13.0-SNAPSHOT datawave-ws-deploy-docs war @@ -18,6 +18,23 @@ ${project.artifactId} + + maven-antrun-plugin + + + create-enunciate-directory + + run + + process-resources + + + + + + + + maven-dependency-plugin diff --git a/web-services/deploy/pom.xml b/web-services/deploy/pom.xml index aa4b2e09892..c39e8701963 100644 --- a/web-services/deploy/pom.xml +++ b/web-services/deploy/pom.xml @@ -4,7 +4,7 @@ gov.nsa.datawave.webservices datawave-ws-parent - 6.5.0-SNAPSHOT + 7.13.0-SNAPSHOT gov.nsa.datawave.webservices datawave-ws-deploy-parent diff --git a/web-services/deploy/spring-framework-integration/pom.xml b/web-services/deploy/spring-framework-integration/pom.xml index 6091bf69ab0..837e3fa8dc7 100644 --- a/web-services/deploy/spring-framework-integration/pom.xml +++ b/web-services/deploy/spring-framework-integration/pom.xml @@ -4,10 +4,13 @@ gov.nsa.datawave.webservices datawave-ws-deploy-parent - 6.5.0-SNAPSHOT + 7.13.0-SNAPSHOT spring-framework-integration ${project.artifactId} + + true + org.easymock @@ -46,6 +49,12 @@ datawave-query-core ${project.version} test + + + jakarta.validation + jakarta.validation-api + + gov.nsa.datawave.webservices diff --git a/web-services/deploy/spring-framework-integration/src/test/java/datawave/springframework/integration/WiredQueryExecutorBeanTest.java b/web-services/deploy/spring-framework-integration/src/test/java/datawave/springframework/integration/WiredQueryExecutorBeanTest.java index 23141c05818..682c2069ab4 100644 --- a/web-services/deploy/spring-framework-integration/src/test/java/datawave/springframework/integration/WiredQueryExecutorBeanTest.java +++ b/web-services/deploy/spring-framework-integration/src/test/java/datawave/springframework/integration/WiredQueryExecutorBeanTest.java @@ -19,6 +19,10 @@ import org.junit.runner.RunWith; import org.springframework.context.ApplicationContext; +import datawave.core.query.logic.QueryLogic; +import datawave.core.query.logic.composite.CompositeQueryLogic; +import datawave.core.query.result.event.DefaultResponseObjectFactory; +import datawave.microservice.query.config.QueryExpirationProperties; import datawave.query.discovery.DiscoveryLogic; import datawave.query.metrics.QueryMetricQueryLogic; import datawave.query.planner.BooleanChunkingQueryPlanner; @@ -35,16 +39,10 @@ import datawave.query.util.DateIndexHelperFactory; import datawave.security.authorization.DatawavePrincipal; import datawave.security.system.CallerPrincipal; +import datawave.security.system.ServerPrincipal; import datawave.webservice.common.json.DefaultMapperDecorator; -import datawave.webservice.edgedictionary.EdgeDictionaryResponseTypeProducer; import datawave.webservice.edgedictionary.RemoteEdgeDictionary; -import datawave.webservice.query.cache.QueryExpirationConfiguration; -import datawave.webservice.query.logic.DatawaveRoleManager; -import datawave.webservice.query.logic.EasyRoleManager; -import datawave.webservice.query.logic.QueryLogic; import datawave.webservice.query.logic.QueryLogicFactoryImpl; -import datawave.webservice.query.logic.composite.CompositeQueryLogic; -import datawave.webservice.query.result.event.DefaultResponseObjectFactory; import datawave.webservice.results.cached.CachedResultsConfiguration; /** @@ -58,6 +56,12 @@ public class WiredQueryExecutorBeanTest { @Inject ApplicationContext ctx; + @Produces + @ServerPrincipal + public DatawavePrincipal produceServerPrincipal() { + return new DatawavePrincipal(); + } + @Deployment public static JavaArchive createDeployment() throws Exception { System.setProperty("cdi.bean.context", "springFrameworkBeanRefContext.xml"); @@ -71,12 +75,11 @@ public static JavaArchive createDeployment() throws Exception { return ShrinkWrap.create(JavaArchive.class) .addPackages(true, "org.apache.deltaspike", "io.astefanutti.metrics.cdi", "datawave.data.type", "datawave.query.language.parser.jexl", "datawave.query.language.functions.jexl", "datawave.webservice.query.configuration", "datawave.configuration") - .addClasses(DefaultResponseObjectFactory.class, QueryExpirationConfiguration.class, FacetedQueryPlanner.class, FacetedQueryLogic.class, + .addClasses(DefaultResponseObjectFactory.class, QueryExpirationProperties.class, FacetedQueryPlanner.class, FacetedQueryLogic.class, DefaultQueryPlanner.class, BooleanChunkingQueryPlanner.class, ShardQueryLogic.class, CountingShardQueryLogic.class, EventQueryDataDecoratorTransformer.class, FieldIndexCountQueryLogic.class, CompositeQueryLogic.class, QueryMetricQueryLogic.class, TLDQueryLogic.class, ParentQueryLogic.class, DiscoveryLogic.class, IndexQueryLogic.class, - QueryLogicFactoryImpl.class, DatawaveRoleManager.class, EasyRoleManager.class, CachedResultsConfiguration.class, - DateIndexHelperFactory.class, EdgeDictionaryResponseTypeProducer.class, RemoteEdgeDictionary.class, + QueryLogicFactoryImpl.class, CachedResultsConfiguration.class, DateIndexHelperFactory.class, DefaultMapperDecorator.class) .addAsManifestResource(EmptyAsset.INSTANCE, "beans.xml"); } @@ -87,15 +90,18 @@ public void testCreatingContext() throws Exception { Assert.assertNotNull(defaultQueryPlanner); } + // This test ensures that we can + // 1) generate a query logic via xml configuration + // 2) inject an xml generated reference bean into the generated query logic @Test public void testCreatingPrototypeBeans() { String[] names = ctx.getBeanNamesForType(QueryLogic.class); for (String name : names) { QueryLogic ql = ctx.getBean(name, QueryLogic.class); - if (ql.getRoleManager() == null) { - log.error("role manager is null for " + name + " and " + ql + " named " + ql.getLogicName() + " and " + ql.getClass()); + if (ql.getResponseObjectFactory() == null) { + log.error("response object factory is null for " + name + " and " + ql + " named " + ql.getLogicName() + " and " + ql.getClass()); } - Assert.assertNotNull(ql.getRoleManager()); + Assert.assertNotNull(ql.getResponseObjectFactory()); log.debug("got " + ql); } } @@ -103,6 +109,8 @@ public void testCreatingPrototypeBeans() { private static JSSESecurityDomain mockJsseSecurityDomain = EasyMock.createMock(JSSESecurityDomain.class); private static DatawavePrincipal mockDatawavePrincipal = EasyMock.createMock(DatawavePrincipal.class); + private static RemoteEdgeDictionary mockRemoteEdgeDictionary = EasyMock.createMock(RemoteEdgeDictionary.class); + public static class Producer { @Produces public static JSSESecurityDomain produceSecurityDomain() { @@ -114,5 +122,10 @@ public static JSSESecurityDomain produceSecurityDomain() { public static DatawavePrincipal produceDatawavePrincipal() { return mockDatawavePrincipal; } + + @Produces + public static RemoteEdgeDictionary produceRemoteEdgeDictionary() { + return mockRemoteEdgeDictionary; + } } } diff --git a/web-services/dictionary/pom.xml b/web-services/dictionary/pom.xml index 40b9205f76a..c26e60c41ca 100644 --- a/web-services/dictionary/pom.xml +++ b/web-services/dictionary/pom.xml @@ -4,7 +4,7 @@ gov.nsa.datawave.webservices datawave-ws-parent - 6.5.0-SNAPSHOT + 7.13.0-SNAPSHOT datawave-ws-dictionary ejb @@ -20,6 +20,10 @@ datawave-edge-dictionary-core ${project.version} + + org.jboss.resteasy + resteasy-jaxrs + ${project.artifactId} diff --git a/web-services/dictionary/src/main/java/datawave/webservice/dictionary/DataDictionaryBean.java b/web-services/dictionary/src/main/java/datawave/webservice/dictionary/DataDictionaryBean.java index e53b33a4013..26f946f3773 100644 --- a/web-services/dictionary/src/main/java/datawave/webservice/dictionary/DataDictionaryBean.java +++ b/web-services/dictionary/src/main/java/datawave/webservice/dictionary/DataDictionaryBean.java @@ -50,7 +50,7 @@ public Response getDataDictionaryWithSuffix(@PathParam("suffix") String suffix, } private Response sendRedirect(String suffix, UriInfo uriInfo) throws TextParseException, URISyntaxException { - URIBuilder builder = remoteDataDictionary.buildURI(suffix); + URIBuilder builder = remoteDataDictionary.buildRedirectURI(suffix, uriInfo.getBaseUri()); uriInfo.getQueryParameters().forEach((pname, valueList) -> valueList.forEach(pvalue -> builder.addParameter(pname, pvalue))); return Response.temporaryRedirect(builder.build()).build(); } diff --git a/web-services/dictionary/src/main/java/datawave/webservice/dictionary/EdgeDictionaryBean.java b/web-services/dictionary/src/main/java/datawave/webservice/dictionary/EdgeDictionaryBean.java index cd3ed651e62..e2359d82f1b 100644 --- a/web-services/dictionary/src/main/java/datawave/webservice/dictionary/EdgeDictionaryBean.java +++ b/web-services/dictionary/src/main/java/datawave/webservice/dictionary/EdgeDictionaryBean.java @@ -12,6 +12,7 @@ import javax.ws.rs.core.Response; import javax.ws.rs.core.UriInfo; +import org.apache.deltaspike.core.api.config.ConfigProperty; import org.apache.http.client.utils.URIBuilder; import org.xbill.DNS.TextParseException; @@ -42,7 +43,7 @@ public class EdgeDictionaryBean { @GET @Path("/") public Response getEdgeDictionary(@Context UriInfo uriInfo) throws TextParseException, URISyntaxException { - URIBuilder builder = remoteEdgeDictionary.buildURI(""); + URIBuilder builder = remoteEdgeDictionary.buildRedirectURI("", uriInfo.getBaseUri()); uriInfo.getQueryParameters().forEach((pname, valueList) -> valueList.forEach(pvalue -> builder.addParameter(pname, pvalue))); return Response.temporaryRedirect(builder.build()).build(); } diff --git a/web-services/examples/client-login/pom.xml b/web-services/examples/client-login/pom.xml index 95556c5e4af..894dc78d8af 100644 --- a/web-services/examples/client-login/pom.xml +++ b/web-services/examples/client-login/pom.xml @@ -4,7 +4,7 @@ gov.nsa.datawave.webservices datawave-ws-examples-parent - 6.5.0-SNAPSHOT + 7.13.0-SNAPSHOT datawave-ws-examples-client-login ejb diff --git a/web-services/examples/http-client/pom.xml b/web-services/examples/http-client/pom.xml index 394b66954f0..1e6d633a372 100644 --- a/web-services/examples/http-client/pom.xml +++ b/web-services/examples/http-client/pom.xml @@ -4,7 +4,7 @@ gov.nsa.datawave.webservices datawave-ws-examples-parent - 6.5.0-SNAPSHOT + 7.13.0-SNAPSHOT datawave-ws-examples-http-client jar diff --git a/web-services/examples/jms-client/pom.xml b/web-services/examples/jms-client/pom.xml index 7ab307c6810..cb1b0e29a30 100644 --- a/web-services/examples/jms-client/pom.xml +++ b/web-services/examples/jms-client/pom.xml @@ -4,7 +4,7 @@ gov.nsa.datawave.webservices datawave-ws-examples-parent - 6.5.0-SNAPSHOT + 7.13.0-SNAPSHOT datawave-ws-examples-jms-client jar @@ -13,7 +13,7 @@ com.sun.xml.bind jaxb-impl - 2.3.3 + ${version.jaxb-impl} gov.nsa.datawave.webservices @@ -27,7 +27,7 @@ jakarta.xml.bind jakarta.xml.bind-api - 2.3.3 + ${version.jakarta} org.jboss.spec.javax.jms diff --git a/web-services/examples/pom.xml b/web-services/examples/pom.xml index f4efa872507..ff8dc100e50 100644 --- a/web-services/examples/pom.xml +++ b/web-services/examples/pom.xml @@ -4,7 +4,7 @@ gov.nsa.datawave.webservices datawave-ws-parent - 6.5.0-SNAPSHOT + 7.13.0-SNAPSHOT datawave-ws-examples-parent pom diff --git a/web-services/examples/query-war/pom.xml b/web-services/examples/query-war/pom.xml index d5af2d54b29..cd043708f21 100644 --- a/web-services/examples/query-war/pom.xml +++ b/web-services/examples/query-war/pom.xml @@ -4,7 +4,7 @@ gov.nsa.datawave.webservices datawave-ws-examples-parent - 6.5.0-SNAPSHOT + 7.13.0-SNAPSHOT datawave-ws-examples-query-war war diff --git a/web-services/map-reduce-embedded/pom.xml b/web-services/map-reduce-embedded/pom.xml index a6ee0cd6809..7374807a9f0 100644 --- a/web-services/map-reduce-embedded/pom.xml +++ b/web-services/map-reduce-embedded/pom.xml @@ -4,7 +4,7 @@ gov.nsa.datawave.webservices datawave-ws-parent - 6.5.0-SNAPSHOT + 7.13.0-SNAPSHOT datawave-ws-map-reduce-embedded jar diff --git a/web-services/map-reduce-embedded/src/main/java/datawave/webservice/common/connection/EmbeddedAccumuloConnectionFactory.java b/web-services/map-reduce-embedded/src/main/java/datawave/webservice/common/connection/EmbeddedAccumuloConnectionFactory.java index 6c455e28ff1..8e0df930645 100644 --- a/web-services/map-reduce-embedded/src/main/java/datawave/webservice/common/connection/EmbeddedAccumuloConnectionFactory.java +++ b/web-services/map-reduce-embedded/src/main/java/datawave/webservice/common/connection/EmbeddedAccumuloConnectionFactory.java @@ -1,6 +1,9 @@ package datawave.webservice.common.connection; +import java.util.Collection; +import java.util.Collections; import java.util.HashMap; +import java.util.List; import java.util.Map; import javax.annotation.PostConstruct; @@ -9,6 +12,11 @@ import org.apache.deltaspike.core.api.config.ConfigProperty; import org.apache.log4j.Logger; +import datawave.core.common.connection.AccumuloClientPool; +import datawave.core.common.connection.AccumuloClientPoolFactory; +import datawave.core.common.connection.AccumuloConnectionFactory; +import datawave.core.common.result.ConnectionPool; + public class EmbeddedAccumuloConnectionFactory implements AccumuloConnectionFactory { private Logger log = Logger.getLogger(this.getClass()); @@ -50,17 +58,13 @@ private AccumuloClientPool createClientPool(int limit) { } @Override - public String getConnectionUserName(String poolName) { - return this.userName; - } - - @Override - public AccumuloClient getClient(Priority priority, Map trackingMap) throws Exception { + public AccumuloClient getClient(String userDN, Collection proxiedDNs, Priority priority, Map trackingMap) throws Exception { return pool.borrowObject(trackingMap); } @Override - public AccumuloClient getClient(String poolName, Priority priority, Map trackingMap) throws Exception { + public AccumuloClient getClient(String userDN, Collection proxiedDNs, String poolName, Priority priority, Map trackingMap) + throws Exception { return pool.borrowObject(trackingMap); } @@ -69,6 +73,21 @@ public void returnClient(AccumuloClient client) throws Exception { pool.returnObject(client); } + @Override + public String report() { + return pool.toString(); + } + + @Override + public List getConnectionPools() { + return Collections.EMPTY_LIST; + } + + @Override + public int getConnectionUsagePercent() { + return 0; + } + @Override public Map getTrackingMap(StackTraceElement[] stackTrace) { HashMap trackingMap = new HashMap<>(); @@ -79,4 +98,9 @@ public Map getTrackingMap(StackTraceElement[] stackTrace) { return trackingMap; } + + @Override + public void close() throws Exception { + pool.close(); + } } diff --git a/web-services/map-reduce-status/pom.xml b/web-services/map-reduce-status/pom.xml index 7616f4276e9..9920d1b606d 100644 --- a/web-services/map-reduce-status/pom.xml +++ b/web-services/map-reduce-status/pom.xml @@ -4,7 +4,7 @@ gov.nsa.datawave.webservices datawave-ws-parent - 6.5.0-SNAPSHOT + 7.13.0-SNAPSHOT datawave-ws-map-reduce-status ejb diff --git a/web-services/map-reduce/pom.xml b/web-services/map-reduce/pom.xml index 5cd431de063..8f1c89a09b6 100644 --- a/web-services/map-reduce/pom.xml +++ b/web-services/map-reduce/pom.xml @@ -4,7 +4,7 @@ gov.nsa.datawave.webservices datawave-ws-parent - 6.5.0-SNAPSHOT + 7.13.0-SNAPSHOT datawave-ws-map-reduce ejb @@ -14,6 +14,21 @@ commons-configuration commons-configuration + + gov.nsa.datawave.core + datawave-core-map-reduce + ${project.version} + + + * + * + + + + + gov.nsa.datawave.microservice + mapreduce-query-api + javax.enterprise cdi-api @@ -42,10 +57,6 @@ org.apache.hadoop hadoop-yarn-api - - org.apache.hadoop.thirdparty - hadoop-shaded-protobuf_3_7 - org.apache.oozie oozie-client @@ -127,6 +138,11 @@ commons-configuration2 provided + + org.apache.hadoop.thirdparty + hadoop-shaded-guava + provided + org.jboss.resteasy resteasy-jaxrs @@ -161,7 +177,7 @@ org.codehaus.jackson jackson-mapper-asl - 1.9.13 + ${version.jackson-mapper-asl} runtime @@ -174,6 +190,11 @@ junit test + + org.apache.hadoop.thirdparty + hadoop-shaded-protobuf_3_7 + test + org.springframework spring-expression diff --git a/web-services/map-reduce/src/main/java/datawave/webservice/mr/MapReduceBean.java b/web-services/map-reduce/src/main/java/datawave/webservice/mr/MapReduceBean.java index c5c0d21f288..d0943b3d0d4 100644 --- a/web-services/map-reduce/src/main/java/datawave/webservice/mr/MapReduceBean.java +++ b/web-services/map-reduce/src/main/java/datawave/webservice/mr/MapReduceBean.java @@ -43,6 +43,7 @@ import javax.ws.rs.core.MultivaluedMap; import javax.ws.rs.core.StreamingOutput; +import org.apache.accumulo.core.client.mapred.AccumuloInputFormat; import org.apache.commons.compress.archivers.tar.TarArchiveEntry; import org.apache.commons.compress.archivers.tar.TarArchiveOutputStream; import org.apache.commons.lang.StringUtils; @@ -67,6 +68,9 @@ import datawave.annotation.Required; import datawave.configuration.DatawaveEmbeddedProjectStageHolder; import datawave.configuration.spring.SpringBean; +import datawave.core.common.audit.PrivateAuditConstants; +import datawave.core.common.connection.AccumuloConnectionFactory; +import datawave.core.query.logic.QueryLogicFactory; import datawave.marking.SecurityMarking; import datawave.security.authorization.DatawavePrincipal; import datawave.security.system.ServerPrincipal; @@ -74,8 +78,6 @@ import datawave.webservice.common.audit.AuditBean; import datawave.webservice.common.audit.AuditParameters; import datawave.webservice.common.audit.Auditor; -import datawave.webservice.common.audit.PrivateAuditConstants; -import datawave.webservice.common.connection.AccumuloConnectionFactory; import datawave.webservice.common.connection.config.ConnectionPoolsConfiguration; import datawave.webservice.common.exception.BadRequestException; import datawave.webservice.common.exception.DatawaveWebApplicationException; @@ -101,7 +103,7 @@ import datawave.webservice.query.exception.QueryException; import datawave.webservice.query.exception.UnauthorizedQueryException; import datawave.webservice.query.factory.Persister; -import datawave.webservice.query.logic.QueryLogicFactory; +import datawave.webservice.query.util.MapUtils; import datawave.webservice.result.BaseResponse; import datawave.webservice.result.GenericResponse; import datawave.webservice.result.VoidResponse; @@ -297,7 +299,7 @@ public GenericResponse ooziesubmit(MultivaluedMap queryPa if (!queryParameters.containsKey(AuditParameters.AUDIT_ID)) { queryParameters.putSingle(AuditParameters.AUDIT_ID, id); } - auditor.audit(queryParameters); + auditor.audit(MapUtils.toMultiValueMap(queryParameters)); } catch (IllegalArgumentException e) { log.error("Error validating audit parameters", e); BadRequestQueryException qe = new BadRequestQueryException(DatawaveErrorCode.MISSING_REQUIRED_PARAMETER, e); @@ -762,7 +764,7 @@ public StreamingOutput getResultFile(@PathParam("jobId") String jobId, @PathPara FSDataInputStream fis; try { - if (!fs.exists(resultFile) || !fs.isFile(resultFile)) { + if (!fs.exists(resultFile) || !fs.getFileStatus(resultFile).isFile()) { NotFoundQueryException qe = new NotFoundQueryException(DatawaveErrorCode.FILE_NOT_FOUND, MessageFormat.format("{0} at path {1}", fileName, resultsDir)); response.addException(qe); diff --git a/web-services/map-reduce/src/main/java/datawave/webservice/mr/bulkresults/map/ApplicationContextAwareMapper.java b/web-services/map-reduce/src/main/java/datawave/webservice/mr/bulkresults/map/ApplicationContextAwareMapper.java deleted file mode 100644 index b10053892c5..00000000000 --- a/web-services/map-reduce/src/main/java/datawave/webservice/mr/bulkresults/map/ApplicationContextAwareMapper.java +++ /dev/null @@ -1,23 +0,0 @@ -package datawave.webservice.mr.bulkresults.map; - -import org.apache.hadoop.mapreduce.Mapper; -import org.springframework.context.ApplicationContext; -import org.springframework.context.support.ClassPathXmlApplicationContext; - -public class ApplicationContextAwareMapper extends Mapper { - - public static final String SPRING_CONFIG_LOCATIONS = "spring.config.locations"; - - protected ApplicationContext applicationContext; - - /** - * Create a Spring Application Context - * - * @param contextPath - * is a possibly CSV of spring config file locations - */ - protected void setApplicationContext(String contextPath) { - this.applicationContext = new ClassPathXmlApplicationContext(contextPath.split(",")); - } - -} diff --git a/web-services/map-reduce/src/main/java/datawave/webservice/mr/bulkresults/map/SerializationFormat.java b/web-services/map-reduce/src/main/java/datawave/webservice/mr/bulkresults/map/SerializationFormat.java deleted file mode 100644 index ff6ec573921..00000000000 --- a/web-services/map-reduce/src/main/java/datawave/webservice/mr/bulkresults/map/SerializationFormat.java +++ /dev/null @@ -1,5 +0,0 @@ -package datawave.webservice.mr.bulkresults.map; - -public enum SerializationFormat { - JSON, PROTOBUF, XML, YAML -} diff --git a/web-services/map-reduce/src/main/java/datawave/webservice/mr/bulkresults/map/WeldBulkResultsFileOutputMapper.java b/web-services/map-reduce/src/main/java/datawave/webservice/mr/bulkresults/map/WeldBulkResultsFileOutputMapper.java new file mode 100644 index 00000000000..a0aebec7f1a --- /dev/null +++ b/web-services/map-reduce/src/main/java/datawave/webservice/mr/bulkresults/map/WeldBulkResultsFileOutputMapper.java @@ -0,0 +1,32 @@ +package datawave.webservice.mr.bulkresults.map; + +import java.io.IOException; + +import org.apache.accumulo.core.data.Key; +import org.apache.accumulo.core.data.Value; +import org.jboss.weld.environment.se.Weld; + +public class WeldBulkResultsFileOutputMapper extends datawave.core.mapreduce.bulkresults.map.BulkResultsFileOutputMapper { + private Weld weld; + + @Override + protected void setup(org.apache.hadoop.mapreduce.Mapper.Context context) throws IOException, InterruptedException { + if (System.getProperty("ignore.weld.startMain") == null) { + System.setProperty("com.sun.jersey.server.impl.cdi.lookupExtensionInBeanManager", "true"); // Disable CDI extensions in Jersey libs + + weld = new Weld("STATIC_INSTANCE"); + weld.initialize(); + } + + super.setup(context); + } + + @Override + protected void cleanup(Context context) throws IOException, InterruptedException { + super.cleanup(context); + + if (weld != null) { + weld.shutdown(); + } + } +} diff --git a/web-services/map-reduce/src/main/java/datawave/webservice/mr/configuration/BulkResultsJobConfiguration.java b/web-services/map-reduce/src/main/java/datawave/webservice/mr/configuration/BulkResultsJobConfiguration.java index be7eeec63bc..8e1f920b8d0 100644 --- a/web-services/map-reduce/src/main/java/datawave/webservice/mr/configuration/BulkResultsJobConfiguration.java +++ b/web-services/map-reduce/src/main/java/datawave/webservice/mr/configuration/BulkResultsJobConfiguration.java @@ -7,6 +7,7 @@ import java.security.Principal; import java.util.ArrayList; import java.util.Collection; +import java.util.Collections; import java.util.Iterator; import java.util.List; import java.util.Map; @@ -42,25 +43,26 @@ import org.apache.log4j.Logger; import org.jboss.security.JSSESecurityDomain; +import datawave.core.common.connection.AccumuloConnectionFactory; +import datawave.core.mapreduce.bulkresults.map.BulkResultsTableOutputMapper; +import datawave.core.query.configuration.GenericQueryConfiguration; +import datawave.core.query.configuration.QueryData; +import datawave.core.query.logic.QueryLogic; +import datawave.core.query.logic.QueryLogicFactory; +import datawave.microservice.authorization.util.AuthorizationsUtil; +import datawave.microservice.mapreduce.bulkresults.map.SerializationFormat; +import datawave.microservice.query.Query; import datawave.mr.bulk.BulkInputFormat; import datawave.security.authorization.DatawavePrincipal; import datawave.security.authorization.UserOperations; import datawave.security.iterator.ConfigurableVisibilityFilter; import datawave.security.util.WSAuthorizationsUtil; -import datawave.webservice.common.connection.AccumuloConnectionFactory; import datawave.webservice.common.exception.NoResultsException; -import datawave.webservice.mr.bulkresults.map.BulkResultsFileOutputMapper; -import datawave.webservice.mr.bulkresults.map.BulkResultsTableOutputMapper; -import datawave.webservice.mr.bulkresults.map.SerializationFormat; -import datawave.webservice.query.Query; +import datawave.webservice.mr.bulkresults.map.WeldBulkResultsFileOutputMapper; import datawave.webservice.query.cache.QueryCache; -import datawave.webservice.query.configuration.GenericQueryConfiguration; -import datawave.webservice.query.configuration.QueryData; import datawave.webservice.query.exception.DatawaveErrorCode; import datawave.webservice.query.exception.QueryException; import datawave.webservice.query.factory.Persister; -import datawave.webservice.query.logic.QueryLogic; -import datawave.webservice.query.logic.QueryLogicFactory; import datawave.webservice.query.runner.RunningQuery; public class BulkResultsJobConfiguration extends MapReduceJobConfiguration implements NeedCallerDetails, NeedAccumuloConnectionFactory, NeedAccumuloDetails, @@ -169,8 +171,8 @@ public void _initializeConfiguration(Job job, Path jobDir, String jobId, Map iter = queryConfig.getQueries(); + Iterator iter = queryConfig.getQueriesIter(); while (iter.hasNext()) { queryData = iter.next(); ranges.addAll(queryData.getRanges()); @@ -294,18 +296,18 @@ private void setupJob(Job job, Path jobDir, GenericQueryConfiguration queryConfi BulkInputFormat.addIterator(job.getConfiguration(), cfg); } - job.getConfiguration().set(BulkResultsFileOutputMapper.QUERY_LOGIC_SETTINGS, base64EncodedQuery); - job.getConfiguration().set(BulkResultsFileOutputMapper.QUERY_IMPL_CLASS, queryImplClass.getName()); - job.getConfiguration().set(BulkResultsFileOutputMapper.QUERY_LOGIC_NAME, logic.getLogicName()); + job.getConfiguration().set(WeldBulkResultsFileOutputMapper.QUERY_LOGIC_SETTINGS, base64EncodedQuery); + job.getConfiguration().set(WeldBulkResultsFileOutputMapper.QUERY_IMPL_CLASS, queryImplClass.getName()); + job.getConfiguration().set(WeldBulkResultsFileOutputMapper.QUERY_LOGIC_NAME, logic.getLogicName()); - job.getConfiguration().set(BulkResultsFileOutputMapper.APPLICATION_CONTEXT_PATH, + job.getConfiguration().set(WeldBulkResultsFileOutputMapper.APPLICATION_CONTEXT_PATH, "classpath*:datawave/configuration/spring/CDIBeanPostProcessor.xml," + "classpath*:datawave/query/*QueryLogicFactory.xml," + "classpath*:/MarkingFunctionsContext.xml," + "classpath*:/MetadataHelperContext.xml," + "classpath*:/CacheContext.xml"); - job.getConfiguration().set(BulkResultsFileOutputMapper.SPRING_CONFIG_LOCATIONS, - job.getConfiguration().get(BulkResultsFileOutputMapper.APPLICATION_CONTEXT_PATH)); + job.getConfiguration().set(WeldBulkResultsFileOutputMapper.SPRING_CONFIG_LOCATIONS, + job.getConfiguration().get(WeldBulkResultsFileOutputMapper.APPLICATION_CONTEXT_PATH)); // Tell the Mapper/Reducer to use a specific set of application context files when doing Spring-CDI integration. - String cdiOpts = "'-Dcdi.spring.configs=" + job.getConfiguration().get(BulkResultsFileOutputMapper.APPLICATION_CONTEXT_PATH) + "'"; + String cdiOpts = "'-Dcdi.spring.configs=" + job.getConfiguration().get(WeldBulkResultsFileOutputMapper.APPLICATION_CONTEXT_PATH) + "'"; // Pass our server DN along to the child VM so it can be made available for injection. cdiOpts += " '-Dserver.principal=" + encodePrincipal(serverPrincipal) + "'"; cdiOpts += " '-Dcaller.principal=" + encodePrincipal((DatawavePrincipal) principal) + "'"; @@ -339,27 +341,41 @@ private QuerySettings setupQuery(String sid, String queryId, Principal principal throw new QueryException("This query does not belong to you. expected: " + q.getOwner() + ", value: " + sid, Response.Status.UNAUTHORIZED.getStatusCode()); + String userDN = null; + Collection proxyServers = null; + if (principal instanceof DatawavePrincipal) { + DatawavePrincipal dp = (DatawavePrincipal) principal; + userDN = dp.getUserDN().subjectDN(); + proxyServers = dp.getProxyServers(); + } + // will throw IllegalArgumentException if not defined - logic = queryFactory.getQueryLogic(q.getQueryLogicName(), principal); + logic = queryFactory.getQueryLogic(q.getQueryLogicName(), (DatawavePrincipal) principal); // Get an accumulo connection Map trackingMap = connectionFactory.getTrackingMap(Thread.currentThread().getStackTrace()); - client = connectionFactory.getClient(logic.getConnectionPriority(), trackingMap); + client = connectionFactory.getClient(userDN, proxyServers, logic.getConnectionPriority(), trackingMap); + if (q.getQueryAuthorizations() == null) { + logic.preInitialize(q, AuthorizationsUtil.buildAuthorizations(null)); + } else { + logic.preInitialize(q, + AuthorizationsUtil.buildAuthorizations(Collections.singleton(AuthorizationsUtil.splitAuths(q.getQueryAuthorizations())))); + } // Merge user auths with the auths that they use in the Query // the query principal is our local principal unless the query logic has a different user operations - DatawavePrincipal queryPrincipal = (logic.getUserOperations() == null) ? (DatawavePrincipal) principal - : logic.getUserOperations().getRemoteUser((DatawavePrincipal) principal); + DatawavePrincipal queryPrincipal = (DatawavePrincipal) ((logic.getUserOperations() == null) ? principal + : logic.getUserOperations().getRemoteUser((DatawavePrincipal) principal)); // the overall principal (the one with combined auths across remote user operations) is our own user operations (probably the UserOperationsBean) - DatawavePrincipal overallPrincipal = (userOperations == null) ? (DatawavePrincipal) principal - : userOperations.getRemoteUser((DatawavePrincipal) principal); + DatawavePrincipal overallPrincipal = (DatawavePrincipal) ((userOperations == null) ? principal + : userOperations.getRemoteUser((DatawavePrincipal) principal)); Set runtimeQueryAuthorizations = WSAuthorizationsUtil.getDowngradedAuthorizations(q.getQueryAuthorizations(), overallPrincipal, queryPrincipal); // Initialize the logic so that the configuration contains all of the iterator options GenericQueryConfiguration queryConfig = logic.initialize(client, q, runtimeQueryAuthorizations); - String base64EncodedQuery = BulkResultsFileOutputMapper.serializeQuery(q); + String base64EncodedQuery = WeldBulkResultsFileOutputMapper.serializeQuery(q); return new QuerySettings(logic, queryConfig, base64EncodedQuery, q.getClass(), runtimeQueryAuthorizations); } finally { diff --git a/web-services/map-reduce/src/main/java/datawave/webservice/mr/configuration/MapReduceJobConfiguration.java b/web-services/map-reduce/src/main/java/datawave/webservice/mr/configuration/MapReduceJobConfiguration.java index 477ff17c8b5..d95d6105873 100644 --- a/web-services/map-reduce/src/main/java/datawave/webservice/mr/configuration/MapReduceJobConfiguration.java +++ b/web-services/map-reduce/src/main/java/datawave/webservice/mr/configuration/MapReduceJobConfiguration.java @@ -9,8 +9,6 @@ import java.net.URL; import java.text.MessageFormat; import java.util.ArrayList; -import java.util.HashSet; -import java.util.Iterator; import java.util.LinkedHashSet; import java.util.List; import java.util.Map; diff --git a/web-services/map-reduce/src/main/java/datawave/webservice/mr/configuration/NeedAccumuloConnectionFactory.java b/web-services/map-reduce/src/main/java/datawave/webservice/mr/configuration/NeedAccumuloConnectionFactory.java index 6c4107ad8ea..fd3e6674eb3 100644 --- a/web-services/map-reduce/src/main/java/datawave/webservice/mr/configuration/NeedAccumuloConnectionFactory.java +++ b/web-services/map-reduce/src/main/java/datawave/webservice/mr/configuration/NeedAccumuloConnectionFactory.java @@ -1,6 +1,6 @@ package datawave.webservice.mr.configuration; -import datawave.webservice.common.connection.AccumuloConnectionFactory; +import datawave.core.common.connection.AccumuloConnectionFactory; public interface NeedAccumuloConnectionFactory { diff --git a/web-services/map-reduce/src/main/java/datawave/webservice/mr/configuration/NeedQueryLogicFactory.java b/web-services/map-reduce/src/main/java/datawave/webservice/mr/configuration/NeedQueryLogicFactory.java index 547ab0f4f5e..b54c98cf947 100644 --- a/web-services/map-reduce/src/main/java/datawave/webservice/mr/configuration/NeedQueryLogicFactory.java +++ b/web-services/map-reduce/src/main/java/datawave/webservice/mr/configuration/NeedQueryLogicFactory.java @@ -1,6 +1,6 @@ package datawave.webservice.mr.configuration; -import datawave.webservice.query.logic.QueryLogicFactory; +import datawave.core.query.logic.QueryLogicFactory; public interface NeedQueryLogicFactory { diff --git a/web-services/map-reduce/src/main/java/datawave/webservice/mr/state/MapReduceStatePersisterBean.java b/web-services/map-reduce/src/main/java/datawave/webservice/mr/state/MapReduceStatePersisterBean.java index 76928e2ff0d..7827b2b50d3 100644 --- a/web-services/map-reduce/src/main/java/datawave/webservice/mr/state/MapReduceStatePersisterBean.java +++ b/web-services/map-reduce/src/main/java/datawave/webservice/mr/state/MapReduceStatePersisterBean.java @@ -48,9 +48,9 @@ import org.apache.log4j.Logger; import datawave.configuration.DatawaveEmbeddedProjectStageHolder; +import datawave.core.common.connection.AccumuloConnectionFactory; import datawave.security.authorization.DatawavePrincipal; import datawave.security.util.ScannerHelper; -import datawave.webservice.common.connection.AccumuloConnectionFactory; import datawave.webservice.query.exception.DatawaveErrorCode; import datawave.webservice.query.exception.NotFoundQueryException; import datawave.webservice.query.exception.QueryException; @@ -64,7 +64,7 @@ * * MapReduce Table *
  • DataTypeHandler
    Schema TypeUse
    - * + * * * * @@ -117,7 +117,7 @@ * * MapReduce Index *
    MapReduceTable
    RowColF
    - * + * * * * @@ -197,7 +197,7 @@ public void create(String id, String hdfsUri, String jobTracker, String workingD AccumuloClient c = null; try { Map trackingMap = connectionFactory.getTrackingMap(Thread.currentThread().getStackTrace()); - c = connectionFactory.getClient(AccumuloConnectionFactory.Priority.ADMIN, trackingMap); + c = connectionFactory.getClient(null, null, AccumuloConnectionFactory.Priority.ADMIN, trackingMap); tableCheck(c); // Not using a MultiTableBatchWriter here because its not implemented yet // in Mock Accumulo. @@ -248,7 +248,7 @@ public void updateState(String mapReduceJobId, MapReduceState state) throws Quer AccumuloClient c = null; try { Map trackingMap = connectionFactory.getTrackingMap(Thread.currentThread().getStackTrace()); - c = connectionFactory.getClient(AccumuloConnectionFactory.Priority.ADMIN, trackingMap); + c = connectionFactory.getClient(null, null, AccumuloConnectionFactory.Priority.ADMIN, trackingMap); tableCheck(c); try (Scanner scanner = ScannerHelper.createScanner(c, INDEX_TABLE_NAME, Collections.singleton(new Authorizations()))) { Range range = new Range(mapReduceJobId, mapReduceJobId); @@ -285,7 +285,7 @@ public void updateState(String mapReduceJobId, MapReduceState state) throws Quer BatchWriter writer = null; try { Map trackingMap = connectionFactory.getTrackingMap(Thread.currentThread().getStackTrace()); - c = connectionFactory.getClient(AccumuloConnectionFactory.Priority.ADMIN, trackingMap); + c = connectionFactory.getClient(null, null, AccumuloConnectionFactory.Priority.ADMIN, trackingMap); tableCheck(c); writer = c.createBatchWriter(TABLE_NAME, new BatchWriterConfig().setMaxLatency(10, TimeUnit.SECONDS).setMaxMemory(10240L).setMaxWriteThreads(1)); @@ -331,7 +331,7 @@ public MapReduceInfoResponseList find() { AccumuloClient c = null; try { Map trackingMap = connectionFactory.getTrackingMap(Thread.currentThread().getStackTrace()); - c = connectionFactory.getClient(AccumuloConnectionFactory.Priority.ADMIN, trackingMap); + c = connectionFactory.getClient(null, null, AccumuloConnectionFactory.Priority.ADMIN, trackingMap); tableCheck(c); try (Scanner scanner = ScannerHelper.createScanner(c, TABLE_NAME, auths)) { scanner.fetchColumnFamily(new Text(sid)); @@ -401,7 +401,7 @@ public MapReduceInfoResponseList findById(String id) { AccumuloClient c = null; try { Map trackingMap = connectionFactory.getTrackingMap(Thread.currentThread().getStackTrace()); - c = connectionFactory.getClient(AccumuloConnectionFactory.Priority.ADMIN, trackingMap); + c = connectionFactory.getClient(null, null, AccumuloConnectionFactory.Priority.ADMIN, trackingMap); tableCheck(c); try (Scanner scanner = ScannerHelper.createScanner(c, TABLE_NAME, auths)) { Range range = new Range(id); @@ -536,7 +536,7 @@ public void addJob(String id, String mapReduceJobId) throws QueryException { AccumuloClient c = null; try { Map trackingMap = connectionFactory.getTrackingMap(Thread.currentThread().getStackTrace()); - c = connectionFactory.getClient(AccumuloConnectionFactory.Priority.ADMIN, trackingMap); + c = connectionFactory.getClient(null, null, AccumuloConnectionFactory.Priority.ADMIN, trackingMap); tableCheck(c); // Not using a MultiTableBatchWriter here because its not implemented yet // in Mock Accumulo. @@ -620,7 +620,7 @@ public void remove(String id) throws QueryException { AccumuloClient c = null; try { Map trackingMap = connectionFactory.getTrackingMap(Thread.currentThread().getStackTrace()); - c = connectionFactory.getClient(AccumuloConnectionFactory.Priority.ADMIN, trackingMap); + c = connectionFactory.getClient(null, null, AccumuloConnectionFactory.Priority.ADMIN, trackingMap); tableCheck(c); // using BatchWriter instead of MultiTableBatchWriter because Mock CB does not support // MultiTableBatchWriter diff --git a/web-services/map-reduce/src/test/java/datawave/webservice/mr/MapReduceBeanTest.java b/web-services/map-reduce/src/test/java/datawave/webservice/mr/MapReduceBeanTest.java index 28559cd17c4..781106a8063 100644 --- a/web-services/map-reduce/src/test/java/datawave/webservice/mr/MapReduceBeanTest.java +++ b/web-services/map-reduce/src/test/java/datawave/webservice/mr/MapReduceBeanTest.java @@ -25,12 +25,13 @@ import org.powermock.reflect.Whitebox; import org.springframework.context.support.ClassPathXmlApplicationContext; +import datawave.core.common.connection.AccumuloConnectionFactory; +import datawave.core.query.logic.QueryLogicFactory; import datawave.security.authorization.DatawavePrincipal; import datawave.security.authorization.DatawaveUser; import datawave.security.authorization.DatawaveUser.UserType; import datawave.security.authorization.SubjectIssuerDNPair; import datawave.security.util.DnUtils; -import datawave.webservice.common.connection.AccumuloConnectionFactory; import datawave.webservice.common.connection.config.ConnectionPoolsConfiguration; import datawave.webservice.common.exception.BadRequestException; import datawave.webservice.common.exception.DatawaveWebApplicationException; @@ -43,7 +44,6 @@ import datawave.webservice.query.exception.DatawaveErrorCode; import datawave.webservice.query.exception.QueryException; import datawave.webservice.query.factory.Persister; -import datawave.webservice.query.logic.QueryLogicFactory; import datawave.webservice.results.mr.MapReduceJobDescription; @RunWith(EasyMockRunner.class) diff --git a/web-services/map-reduce/src/test/java/datawave/webservice/mr/state/MapReduceStatePersisterTest.java b/web-services/map-reduce/src/test/java/datawave/webservice/mr/state/MapReduceStatePersisterTest.java index 3ffdaf06464..fc1fe24279b 100644 --- a/web-services/map-reduce/src/test/java/datawave/webservice/mr/state/MapReduceStatePersisterTest.java +++ b/web-services/map-reduce/src/test/java/datawave/webservice/mr/state/MapReduceStatePersisterTest.java @@ -36,12 +36,12 @@ import datawave.accumulo.inmemory.InMemoryAccumuloClient; import datawave.accumulo.inmemory.InMemoryInstance; +import datawave.core.common.connection.AccumuloConnectionFactory; import datawave.security.authorization.DatawavePrincipal; import datawave.security.authorization.DatawaveUser; import datawave.security.authorization.DatawaveUser.UserType; import datawave.security.authorization.SubjectIssuerDNPair; import datawave.security.util.DnUtils; -import datawave.webservice.common.connection.AccumuloConnectionFactory; import datawave.webservice.mr.state.MapReduceStatePersisterBean.MapReduceState; import datawave.webservice.results.mr.MapReduceInfoResponse; import datawave.webservice.results.mr.MapReduceInfoResponseList; @@ -96,7 +96,8 @@ public void testPersistentCreate() throws Exception { HashMap trackingMap = new HashMap<>(); expect(connectionFactory.getTrackingMap(EasyMock.anyObject())).andReturn(trackingMap); - expect(connectionFactory.getClient(EasyMock.eq(AccumuloConnectionFactory.Priority.ADMIN), EasyMock.eq(trackingMap))).andReturn(client); + expect(connectionFactory.getClient(EasyMock.eq(null), EasyMock.eq(null), EasyMock.eq(AccumuloConnectionFactory.Priority.ADMIN), + EasyMock.eq(trackingMap))).andReturn(client); connectionFactory.returnClient(client); replayAll(); bean.create(id, hdfs, jt, workingDirectory, mapReduceJobId, resultsDirectory, runtimeParameters, jobName); @@ -192,10 +193,12 @@ public void testUpdateState() throws Exception { // Get ready to call updateState HashMap trackingMap = new HashMap<>(); expect(connectionFactory.getTrackingMap(EasyMock.anyObject())).andReturn(trackingMap); - expect(connectionFactory.getClient(EasyMock.eq(AccumuloConnectionFactory.Priority.ADMIN), EasyMock.eq(trackingMap))).andReturn(client); + expect(connectionFactory.getClient(EasyMock.eq(null), EasyMock.eq(null), EasyMock.eq(AccumuloConnectionFactory.Priority.ADMIN), + EasyMock.eq(trackingMap))).andReturn(client); connectionFactory.returnClient(client); expect(connectionFactory.getTrackingMap(EasyMock.anyObject())).andReturn(trackingMap); - expect(connectionFactory.getClient(EasyMock.eq(AccumuloConnectionFactory.Priority.ADMIN), EasyMock.eq(trackingMap))).andReturn(client); + expect(connectionFactory.getClient(EasyMock.eq(null), EasyMock.eq(null), EasyMock.eq(AccumuloConnectionFactory.Priority.ADMIN), + EasyMock.eq(trackingMap))).andReturn(client); connectionFactory.returnClient(client); replayAll(); @@ -238,7 +241,8 @@ public void testFind() throws Exception { EasyMock.expect(ctx.getCallerPrincipal()).andReturn(principal); HashMap trackingMap = new HashMap<>(); expect(connectionFactory.getTrackingMap(EasyMock.anyObject())).andReturn(trackingMap); - expect(connectionFactory.getClient(EasyMock.eq(AccumuloConnectionFactory.Priority.ADMIN), EasyMock.eq(trackingMap))).andReturn(client); + expect(connectionFactory.getClient(EasyMock.eq(null), EasyMock.eq(null), EasyMock.eq(AccumuloConnectionFactory.Priority.ADMIN), + EasyMock.eq(trackingMap))).andReturn(client); connectionFactory.returnClient(client); replayAll(); @@ -254,7 +258,8 @@ public void testFindNoResults() throws Exception { EasyMock.expect(ctx.getCallerPrincipal()).andReturn(principal); HashMap trackingMap = new HashMap<>(); expect(connectionFactory.getTrackingMap(EasyMock.anyObject())).andReturn(trackingMap); - expect(connectionFactory.getClient(EasyMock.eq(AccumuloConnectionFactory.Priority.ADMIN), EasyMock.eq(trackingMap))).andReturn(client); + expect(connectionFactory.getClient(EasyMock.eq(null), EasyMock.eq(null), EasyMock.eq(AccumuloConnectionFactory.Priority.ADMIN), + EasyMock.eq(trackingMap))).andReturn(client); connectionFactory.returnClient(client); replayAll(); @@ -283,7 +288,8 @@ public void testDontFindSomeoneElsesResults() throws Exception { EasyMock.expect(ctx.getCallerPrincipal()).andReturn(principal); HashMap trackingMap = new HashMap<>(); expect(connectionFactory.getTrackingMap(EasyMock.anyObject())).andReturn(trackingMap); - expect(connectionFactory.getClient(EasyMock.eq(AccumuloConnectionFactory.Priority.ADMIN), EasyMock.eq(trackingMap))).andReturn(client); + expect(connectionFactory.getClient(EasyMock.eq(null), EasyMock.eq(null), EasyMock.eq(AccumuloConnectionFactory.Priority.ADMIN), + EasyMock.eq(trackingMap))).andReturn(client); connectionFactory.returnClient(client); replayAll(); @@ -305,7 +311,8 @@ public void testDontFindSomeoneElsesJob() throws Exception { EasyMock.expect(ctx.getCallerPrincipal()).andReturn(principal); HashMap trackingMap = new HashMap<>(); expect(connectionFactory.getTrackingMap(EasyMock.anyObject())).andReturn(trackingMap); - expect(connectionFactory.getClient(EasyMock.eq(AccumuloConnectionFactory.Priority.ADMIN), EasyMock.eq(trackingMap))).andReturn(client); + expect(connectionFactory.getClient(EasyMock.eq(null), EasyMock.eq(null), EasyMock.eq(AccumuloConnectionFactory.Priority.ADMIN), + EasyMock.eq(trackingMap))).andReturn(client); connectionFactory.returnClient(client); replayAll(); @@ -325,7 +332,8 @@ public void testFindById() throws Exception { EasyMock.expect(ctx.getCallerPrincipal()).andReturn(principal); HashMap trackingMap = new HashMap<>(); expect(connectionFactory.getTrackingMap(EasyMock.anyObject())).andReturn(trackingMap); - expect(connectionFactory.getClient(EasyMock.eq(AccumuloConnectionFactory.Priority.ADMIN), EasyMock.eq(trackingMap))).andReturn(client); + expect(connectionFactory.getClient(EasyMock.eq(null), EasyMock.eq(null), EasyMock.eq(AccumuloConnectionFactory.Priority.ADMIN), + EasyMock.eq(trackingMap))).andReturn(client); connectionFactory.returnClient(client); replayAll(); MapReduceInfoResponseList result = bean.findById(id); @@ -356,12 +364,14 @@ public void testRemove() throws Exception { // Get ready to call remove HashMap trackingMap = new HashMap<>(); expect(connectionFactory.getTrackingMap(EasyMock.anyObject())).andReturn(trackingMap); - expect(connectionFactory.getClient(EasyMock.eq(AccumuloConnectionFactory.Priority.ADMIN), EasyMock.eq(trackingMap))).andReturn(client); + expect(connectionFactory.getClient(EasyMock.eq(null), EasyMock.eq(null), EasyMock.eq(AccumuloConnectionFactory.Priority.ADMIN), + EasyMock.eq(trackingMap))).andReturn(client); connectionFactory.returnClient(client); EasyMock.expect(ctx.getCallerPrincipal()).andReturn(principal); EasyMock.expect(ctx.getCallerPrincipal()).andReturn(principal); expect(connectionFactory.getTrackingMap(EasyMock.anyObject())).andReturn(trackingMap); - expect(connectionFactory.getClient(EasyMock.eq(AccumuloConnectionFactory.Priority.ADMIN), EasyMock.eq(trackingMap))).andReturn(client); + expect(connectionFactory.getClient(EasyMock.eq(null), EasyMock.eq(null), EasyMock.eq(AccumuloConnectionFactory.Priority.ADMIN), + EasyMock.eq(trackingMap))).andReturn(client); connectionFactory.returnClient(client); replayAll(); diff --git a/web-services/map-reduce/src/test/resources/datawave/mapreduce/MapReduceJobs.xml b/web-services/map-reduce/src/test/resources/datawave/mapreduce/MapReduceJobs.xml index b8b4fc5d158..c5fa0ba6742 100644 --- a/web-services/map-reduce/src/test/resources/datawave/mapreduce/MapReduceJobs.xml +++ b/web-services/map-reduce/src/test/resources/datawave/mapreduce/MapReduceJobs.xml @@ -58,7 +58,7 @@ - + diff --git a/web-services/metrics/pom.xml b/web-services/metrics/pom.xml new file mode 100644 index 00000000000..8b7d3c42bd0 --- /dev/null +++ b/web-services/metrics/pom.xml @@ -0,0 +1,53 @@ + + + 4.0.0 + + gov.nsa.datawave.webservices + datawave-ws-parent + 7.13.0-SNAPSHOT + + datawave-ws-metrics + ejb + ${project.artifactId} + + + gov.nsa.datawave + datawave-query-core + ${project.version} + + + gov.nsa.datawave.webservices + datawave-ws-common + ${project.version} + + + gov.nsa.datawave.webservices + datawave-ws-query + ${project.version} + + + gov.nsa.datawave + datawave-ingest-configuration + ${project.version} + test + + + + ${project.artifactId} + + + org.apache.maven.plugins + maven-ejb-plugin + + 3.2 + + + true + true + + + + + + + diff --git a/warehouse/query-core/src/main/java/datawave/query/map/SimpleQueryGeometryHandler.java b/web-services/metrics/src/main/java/datawave/query/map/SimpleQueryGeometryHandler.java similarity index 89% rename from warehouse/query-core/src/main/java/datawave/query/map/SimpleQueryGeometryHandler.java rename to web-services/metrics/src/main/java/datawave/query/map/SimpleQueryGeometryHandler.java index 0f82484bdb6..fd1e78ddcd7 100644 --- a/warehouse/query-core/src/main/java/datawave/query/map/SimpleQueryGeometryHandler.java +++ b/web-services/metrics/src/main/java/datawave/query/map/SimpleQueryGeometryHandler.java @@ -10,23 +10,23 @@ import javax.enterprise.context.ApplicationScoped; import javax.inject.Inject; -import org.apache.commons.jexl2.parser.JexlNode; +import org.apache.commons.jexl3.parser.JexlNode; import org.apache.deltaspike.core.api.config.ConfigProperty; import org.apache.deltaspike.core.api.exclude.Exclude; import org.apache.log4j.Logger; import datawave.configuration.DatawaveEmbeddedProjectStageHolder; +import datawave.core.common.logging.ThreadConfigurableLogger; +import datawave.core.query.map.QueryGeometryHandler; +import datawave.microservice.query.QueryImpl; import datawave.microservice.querymetric.BaseQueryMetric; +import datawave.microservice.querymetric.QueryGeometry; +import datawave.microservice.querymetric.QueryGeometryResponse; import datawave.query.jexl.JexlASTHelper; import datawave.query.jexl.visitors.GeoFeatureVisitor; import datawave.query.language.parser.ParseException; import datawave.query.language.parser.jexl.LuceneToJexlQueryParser; import datawave.query.metrics.ShardTableQueryMetricHandler; -import datawave.webservice.common.logging.ThreadConfigurableLogger; -import datawave.webservice.query.QueryImpl; -import datawave.webservice.query.map.QueryGeometry; -import datawave.webservice.query.map.QueryGeometryHandler; -import datawave.webservice.query.map.QueryGeometryResponse; /** * This class is used to extract query geometries from the query metrics in an effort to provide those geometries for subsequent display to the user. @@ -47,8 +47,9 @@ public class SimpleQueryGeometryHandler implements QueryGeometryHandler { @Override public QueryGeometryResponse getQueryGeometryResponse(String id, List metrics) { - QueryGeometryResponse response = new QueryGeometryResponse(id, basemaps); - + QueryGeometryResponse response = new QueryGeometryResponse(); + response.setQueryId(id); + response.setBasemaps(basemaps); if (metrics != null) { Set queryGeometries = new LinkedHashSet<>(); for (BaseQueryMetric metric : metrics) { diff --git a/warehouse/query-core/src/main/java/datawave/query/metrics/ShardTableQueryMetricHandler.java b/web-services/metrics/src/main/java/datawave/query/metrics/ShardTableQueryMetricHandler.java similarity index 96% rename from warehouse/query-core/src/main/java/datawave/query/metrics/ShardTableQueryMetricHandler.java rename to web-services/metrics/src/main/java/datawave/query/metrics/ShardTableQueryMetricHandler.java index 791c85f7802..4ce461b03d9 100644 --- a/warehouse/query-core/src/main/java/datawave/query/metrics/ShardTableQueryMetricHandler.java +++ b/web-services/metrics/src/main/java/datawave/query/metrics/ShardTableQueryMetricHandler.java @@ -60,6 +60,13 @@ import datawave.configuration.DatawaveEmbeddedProjectStageHolder; import datawave.configuration.spring.SpringBean; +import datawave.core.common.connection.AccumuloConnectionFactory; +import datawave.core.common.connection.AccumuloConnectionFactory.Priority; +import datawave.core.common.logging.ThreadConfigurableLogger; +import datawave.core.query.cache.ResultsPage; +import datawave.core.query.logic.QueryLogic; +import datawave.core.query.logic.QueryLogicFactory; +import datawave.core.query.util.QueryUtil; import datawave.data.hash.UID; import datawave.data.hash.UIDBuilder; import datawave.ingest.config.RawRecordContainerImpl; @@ -72,6 +79,9 @@ import datawave.ingest.mapreduce.job.writer.LiveContextWriter; import datawave.ingest.table.config.TableConfigHelper; import datawave.marking.MarkingFunctions; +import datawave.microservice.query.Query; +import datawave.microservice.query.QueryImpl; +import datawave.microservice.query.QueryImpl.Parameter; import datawave.microservice.querymetric.BaseQueryMetric; import datawave.microservice.querymetric.BaseQueryMetric.Lifecycle; import datawave.microservice.querymetric.BaseQueryMetric.PageMetric; @@ -79,7 +89,6 @@ import datawave.microservice.querymetric.QueryMetric; import datawave.microservice.querymetric.QueryMetricFactory; import datawave.microservice.querymetric.QueryMetricListResponse; -import datawave.microservice.querymetric.QueryMetricsDetailListResponse; import datawave.microservice.querymetric.QueryMetricsSummaryResponse; import datawave.query.iterator.QueryOptions; import datawave.query.jexl.visitors.JexlFormattedStringBuildingVisitor; @@ -87,21 +96,11 @@ import datawave.query.map.SimpleQueryGeometryHandler; import datawave.security.authorization.DatawavePrincipal; import datawave.security.util.WSAuthorizationsUtil; -import datawave.webservice.common.connection.AccumuloConnectionFactory; -import datawave.webservice.common.connection.AccumuloConnectionFactory.Priority; -import datawave.webservice.common.logging.ThreadConfigurableLogger; -import datawave.webservice.query.Query; -import datawave.webservice.query.QueryImpl; -import datawave.webservice.query.QueryImpl.Parameter; -import datawave.webservice.query.cache.ResultsPage; import datawave.webservice.query.exception.QueryException; import datawave.webservice.query.exception.QueryExceptionType; -import datawave.webservice.query.logic.QueryLogic; -import datawave.webservice.query.logic.QueryLogicFactory; import datawave.webservice.query.result.event.EventBase; import datawave.webservice.query.result.event.FieldBase; import datawave.webservice.query.runner.RunningQuery; -import datawave.webservice.query.util.QueryUtil; import datawave.webservice.result.BaseQueryResponse; import datawave.webservice.result.BaseResponse; import datawave.webservice.result.EventQueryResponseBase; @@ -174,7 +173,7 @@ private void initialize() { AccumuloClient client = null; try { - client = connectionFactory.getClient(Priority.ADMIN, new HashMap<>()); + client = connectionFactory.getClient(null, null, Priority.ADMIN, new HashMap<>()); connectorAuthorizations = client.securityOperations().getUserAuthorizations(client.whoami()).toString(); connectorAuthorizationCollection = Lists.newArrayList(StringUtils.split(connectorAuthorizations, ",")); reload(); @@ -209,7 +208,7 @@ private void verifyTables() { AccumuloClient client = null; try { - client = this.connectionFactory.getClient(Priority.ADMIN, new HashMap<>()); + client = this.connectionFactory.getClient(null, null, Priority.ADMIN, new HashMap<>()); AbstractColumnBasedHandler handler = new ContentQueryMetricsHandler<>(); createAndConfigureTablesIfNecessary(handler.getTableNames(conf), client.tableOperations(), conf); } catch (Exception e) { @@ -452,11 +451,8 @@ private List getQueryMetrics(BaseResponse response, Query query, Da try { Map trackingMap = this.connectionFactory.getTrackingMap(Thread.currentThread().getStackTrace()); - client = this.connectionFactory.getClient(Priority.ADMIN, trackingMap); + client = this.connectionFactory.getClient(null, null, Priority.ADMIN, trackingMap); QueryLogic queryLogic = queryLogicFactory.getQueryLogic(query.getQueryLogicName(), datawavePrincipal); - if (queryLogic instanceof QueryMetricQueryLogic) { - ((QueryMetricQueryLogic) queryLogic).setRolesSets(datawavePrincipal.getPrimaryUser().getRoles()); - } runningQuery = new RunningQuery(null, client, Priority.ADMIN, queryLogic, query, query.getQueryAuthorizations(), datawavePrincipal, metricFactory); boolean done = false; @@ -524,8 +520,8 @@ private List getQueryMetrics(BaseResponse response, Query query, Da } @Override - public QueryMetricsDetailListResponse query(String user, String queryId, DatawavePrincipal datawavePrincipal) { - QueryMetricsDetailListResponse response = new QueryMetricsDetailListResponse(); + public QueryMetricListResponse query(String user, String queryId, DatawavePrincipal datawavePrincipal) { + QueryMetricListResponse response = new QueryMetricListResponse(); try { enableLogs(false); diff --git a/web-services/metrics/src/main/resources/META-INF/beans.xml b/web-services/metrics/src/main/resources/META-INF/beans.xml new file mode 100644 index 00000000000..4ca201f8ff2 --- /dev/null +++ b/web-services/metrics/src/main/resources/META-INF/beans.xml @@ -0,0 +1,9 @@ + + + + \ No newline at end of file diff --git a/web-services/metrics/src/test/java/datawave/query/map/SimpleQueryGeometryHandlerTest.java b/web-services/metrics/src/test/java/datawave/query/map/SimpleQueryGeometryHandlerTest.java new file mode 100644 index 00000000000..99ce02ccc03 --- /dev/null +++ b/web-services/metrics/src/test/java/datawave/query/map/SimpleQueryGeometryHandlerTest.java @@ -0,0 +1,231 @@ +package datawave.query.map; + +import static datawave.query.QueryParameters.QUERY_SYNTAX; + +import java.util.ArrayList; +import java.util.HashSet; +import java.util.List; +import java.util.Set; + +import org.junit.Assert; +import org.junit.Before; +import org.junit.Test; + +import datawave.microservice.query.QueryImpl; +import datawave.microservice.querymetric.QueryGeometry; +import datawave.microservice.querymetric.QueryGeometryResponse; +import datawave.microservice.querymetric.QueryMetric; +import datawave.webservice.query.exception.QueryExceptionType; + +public class SimpleQueryGeometryHandlerTest { + + private SimpleQueryGeometryHandler handler; + + private String commonId; + + private Set luceneParams; + private Set jexlParams; + private Set emptyParams; + + @Before + public void setup() { + handler = new SimpleQueryGeometryHandler(); + + commonId = "super-special-query-id"; + + luceneParams = new HashSet<>(); + luceneParams.add(new QueryImpl.Parameter(QUERY_SYNTAX, "LUCENE")); + + jexlParams = new HashSet<>(); + jexlParams.add(new QueryImpl.Parameter(QUERY_SYNTAX, "JEXL")); + + emptyParams = new HashSet<>(); + } + + public QueryGeometryResponse generateResponse(String id, String query, Set params) { + List queryMetrics = new ArrayList<>(); + + QueryMetric qm = new QueryMetric(); + qm.setQueryId(id); + qm.setQuery(query); + qm.setParameters(params); + queryMetrics.add(qm); + + return handler.getQueryGeometryResponse(id, queryMetrics); + } + + @Test + public void validQueryJexlTest() { + QueryGeometryResponse resp = generateResponse(commonId, "geowave:contains(field1, 'POINT(0 0)')", jexlParams); + + Assert.assertEquals(1, resp.getResult().size()); + Assert.assertNull(resp.getExceptions()); + + QueryGeometry queryGeometry = resp.getResult().get(0); + Assert.assertEquals("{\"type\":\"Point\",\"coordinates\":[0.0,0.0]}", queryGeometry.getGeometry()); + Assert.assertEquals("geowave:contains(field1, 'POINT(0 0)')", queryGeometry.getFunction()); + } + + @Test + public void validGeoQueryJexlTest() { + QueryGeometryResponse resp = generateResponse(commonId, "geo:within_bounding_box(field1, '0_0', '10_10')", jexlParams); + + Assert.assertEquals(1, resp.getResult().size()); + Assert.assertNull(resp.getExceptions()); + + QueryGeometry queryGeometry = resp.getResult().get(0); + Assert.assertEquals("{\"type\":\"Polygon\",\"coordinates\":[[[0.0,0.0],[10,0.0],[10,10],[0.0,10],[0.0,0.0]]]}", queryGeometry.getGeometry()); + Assert.assertEquals("geo:within_bounding_box(field1, '0_0', '10_10')", queryGeometry.getFunction()); + } + + @Test + public void validQueryLuceneTest() { + QueryGeometryResponse resp = generateResponse(commonId, "#COVERS(field2, 'POINT(1 1)')", luceneParams); + + Assert.assertEquals(1, resp.getResult().size()); + Assert.assertNull(resp.getExceptions()); + + QueryGeometry queryGeometry = resp.getResult().get(0); + Assert.assertEquals("{\"type\":\"Point\",\"coordinates\":[1,1]}", queryGeometry.getGeometry()); + Assert.assertEquals("#COVERS(field2, 'POINT(1 1)')", queryGeometry.getFunction()); + } + + @Test + public void validGeoBoxQueryLuceneTest() { + QueryGeometryResponse resp = generateResponse(commonId, "#GEO(bounding_box, field1, '0_0', '10_10')", luceneParams); + + Assert.assertEquals(1, resp.getResult().size()); + Assert.assertNull(resp.getExceptions()); + + QueryGeometry queryGeometry = resp.getResult().get(0); + Assert.assertEquals("{\"type\":\"Polygon\",\"coordinates\":[[[0.0,0.0],[10,0.0],[10,10],[0.0,10],[0.0,0.0]]]}", queryGeometry.getGeometry()); + Assert.assertEquals("#GEO(bounding_box, field1, '0_0', '10_10')", queryGeometry.getFunction()); + } + + @Test + public void validGeoCircleQueryLuceneTest() { + QueryGeometryResponse resp = generateResponse(commonId, "#GEO(circle, field1, '0_0', 10)", luceneParams); + + Assert.assertEquals(1, resp.getResult().size()); + Assert.assertNull(resp.getExceptions()); + + QueryGeometry queryGeometry = resp.getResult().get(0); + Assert.assertEquals( + "{\"type\":\"Polygon\",\"coordinates\":[[[10,0.0],[9.9452,1.0453],[9.7815,2.0791],[9.5106,3.0902],[9.1355,4.0674],[8.6603,5],[8.0902,5.8779],[7.4314,6.6913],[6.6913,7.4314],[5.8779,8.0902],[5,8.6603],[4.0674,9.1355],[3.0902,9.5106],[2.0791,9.7815],[1.0453,9.9452],[6.0E-16,10],[-1.0453,9.9452],[-2.0791,9.7815],[-3.0902,9.5106],[-4.0674,9.1355],[-5,8.6603],[-5.8779,8.0902],[-6.6913,7.4314],[-7.4314,6.6913],[-8.0902,5.8779],[-8.6603,5],[-9.1355,4.0674],[-9.5106,3.0902],[-9.7815,2.0791],[-9.9452,1.0453],[-10,1.2E-15],[-9.9452,-1.0453],[-9.7815,-2.0791],[-9.5106,-3.0902],[-9.1355,-4.0674],[-8.6603,-5],[-8.0902,-5.8779],[-7.4314,-6.6913],[-6.6913,-7.4314],[-5.8779,-8.0902],[-5,-8.6603],[-4.0674,-9.1355],[-3.0902,-9.5106],[-2.0791,-9.7815],[-1.0453,-9.9452],[-1.8E-15,-10],[1.0453,-9.9452],[2.0791,-9.7815],[3.0902,-9.5106],[4.0674,-9.1355],[5,-8.6603],[5.8779,-8.0902],[6.6913,-7.4314],[7.4314,-6.6913],[8.0902,-5.8779],[8.6603,-5],[9.1355,-4.0674],[9.5106,-3.0902],[9.7815,-2.0791],[9.9452,-1.0453],[10,0.0]]]}", + queryGeometry.getGeometry()); + Assert.assertEquals("#GEO(circle, field1, '0_0', 10)", queryGeometry.getFunction()); + } + + @Test + public void validJexlQueryUndefinedSyntaxTest() { + QueryGeometryResponse resp = generateResponse(commonId, "geowave:covered_by(field3, 'POINT(2 2)')", emptyParams); + + Assert.assertEquals(1, resp.getResult().size()); + Assert.assertNull(resp.getExceptions()); + + QueryGeometry queryGeometry = resp.getResult().get(0); + Assert.assertEquals("{\"type\":\"Point\",\"coordinates\":[2,2]}", queryGeometry.getGeometry()); + Assert.assertEquals("geowave:covered_by(field3, 'POINT(2 2)')", queryGeometry.getFunction()); + } + + @Test + public void validLuceneQueryUndefinedSyntaxTest() { + QueryGeometryResponse resp = generateResponse(commonId, "#CROSSES(field4, 'POINT(3 3)')", emptyParams); + + Assert.assertEquals(0, resp.getResult().size()); + Assert.assertEquals(1, resp.getExceptions().size()); + + QueryExceptionType queryExceptionType = resp.getExceptions().get(0); + Assert.assertEquals("Unable to parse the geo features", queryExceptionType.getMessage()); + } + + @Test + public void validMultiFunctionQueryJexlTest() { + QueryGeometryResponse resp = generateResponse(commonId, "geowave:intersects(field5, 'POINT(4 4)') || geowave:overlaps(field6, 'POINT(5 5)')", + jexlParams); + + Assert.assertEquals(2, resp.getResult().size()); + Assert.assertNull(resp.getExceptions()); + + QueryGeometry queryGeometry = resp.getResult().get(0); + Assert.assertEquals("{\"type\":\"Point\",\"coordinates\":[4,4]}", queryGeometry.getGeometry()); + Assert.assertEquals("geowave:intersects(field5, 'POINT(4 4)')", queryGeometry.getFunction()); + + queryGeometry = resp.getResult().get(1); + Assert.assertEquals("{\"type\":\"Point\",\"coordinates\":[5,5]}", queryGeometry.getGeometry()); + Assert.assertEquals("geowave:overlaps(field6, 'POINT(5 5)')", queryGeometry.getFunction()); + } + + @Test + public void validMultiFunctionQueryLuceneTest() { + QueryGeometryResponse resp = generateResponse(commonId, "#INTERSECTS(field7, 'POINT(6 6)') || #WITHIN(field8, 'POINT(7 7)')", luceneParams); + + Assert.assertEquals(2, resp.getResult().size()); + Assert.assertNull(resp.getExceptions()); + + QueryGeometry queryGeometry = resp.getResult().get(0); + Assert.assertEquals("{\"type\":\"Point\",\"coordinates\":[6,6]}", queryGeometry.getGeometry()); + Assert.assertEquals("#INTERSECTS(field7, 'POINT(6 6)')", queryGeometry.getFunction()); + + queryGeometry = resp.getResult().get(1); + Assert.assertEquals("{\"type\":\"Point\",\"coordinates\":[7,7]}", queryGeometry.getGeometry()); + Assert.assertEquals("#WITHIN(field8, 'POINT(7 7)')", queryGeometry.getFunction()); + } + + @Test + public void validNonGeoQueryLuceneTest() { + QueryGeometryResponse resp = generateResponse(commonId, "field9: 'term'", luceneParams); + + Assert.assertEquals(0, resp.getResult().size()); + Assert.assertNull(resp.getExceptions()); + } + + @Test + public void invalidQueryJexlTest() { + QueryGeometryResponse resp = generateResponse(commonId, "geowave:intersects(field11, 3000)", jexlParams); + + Assert.assertEquals(0, resp.getResult().size()); + Assert.assertNull(resp.getExceptions()); + } + + @Test + public void invalidQueryLuceneTest() { + QueryGeometryResponse resp = generateResponse(commonId, "#INTERSECTS(field12, 5000)", luceneParams); + + Assert.assertEquals(0, resp.getResult().size()); + Assert.assertNull(resp.getExceptions()); + } + + @Test + public void multipleQueryMetricsTest() { + List queryMetrics = new ArrayList<>(); + + // Valid query, Lucene syntax QueryMetric + QueryMetric qm = new QueryMetric(); + qm.setQueryId(commonId); + qm.setQuery("#COVERS(field1, 'POINT(1 1)')"); + qm.setParameters(luceneParams); + queryMetrics.add(qm); + + // Valid query, unique query id, Jexl syntax QueryMetric + qm = new QueryMetric(); + qm.setQueryId("special-snowflake-id"); + qm.setQuery("geowave:intersects(field2, 'POINT(2 2)')"); + qm.setParameters(jexlParams); + queryMetrics.add(qm); + + QueryGeometryResponse resp = handler.getQueryGeometryResponse(commonId, queryMetrics); + + Assert.assertEquals(2, resp.getResult().size()); + + QueryGeometry queryGeometry = resp.getResult().get(0); + Assert.assertEquals("{\"type\":\"Point\",\"coordinates\":[1,1]}", queryGeometry.getGeometry()); + Assert.assertEquals("#COVERS(field1, 'POINT(1 1)')", queryGeometry.getFunction()); + + queryGeometry = resp.getResult().get(1); + Assert.assertEquals("{\"type\":\"Point\",\"coordinates\":[2,2]}", queryGeometry.getGeometry()); + Assert.assertEquals("geowave:intersects(field2, 'POINT(2 2)')", queryGeometry.getFunction()); + + System.out.println("done!"); + } +} diff --git a/web-services/metrics/src/test/resources/log4j.properties b/web-services/metrics/src/test/resources/log4j.properties new file mode 100644 index 00000000000..6646cecab8b --- /dev/null +++ b/web-services/metrics/src/test/resources/log4j.properties @@ -0,0 +1,31 @@ +log4j.rootLogger=DEBUG, R +log4j.appender.R=org.apache.log4j.ConsoleAppender +log4j.appender.R.layout=org.apache.log4j.PatternLayout +log4j.appender.R.layout.ConversionPattern=%d %p %C:%L %t %m%n +log4j.appender.R.encoding=UTF-8 + +#log4j.logger.org.apache.commons.jci=OFF +#log4j.logger.datawave.query.iterators.bb.BoundingBoxIterator=OFF +#log4j.logger.datawave.query.iterators.bb.SelectorSearchingIterator=OFF +log4j.logger.datawave=info +log4j.logger.datawave.query=info +log4j.logger.datawave.query.*=info + +log4j.logger.datawave.ingest.data.normalizer=FATAL +log4j.logger.org.apache.commons.jexl2.JexlEngine=ERROR +log4j.logger.org.springframework=WARN +log4j.logger.org.apache.hadoop=WARN + +log4j.logger.datawave.ingest=WARN +log4j.logger.datawave.query.testframework=INFO +# enable dump of tables by setting value to debug +log4j.logger.datawave.helpers.PrintUtility=info + +# set DefaultQueryPlanner to debug for analysis of query plan +log4j.logger.datawave.query.planner.DefaultQueryPlanner=info +log4j.logger.datawave.query.tables.ShardQueryLogic=info +log4j.logger.org.apache.commons.beanutils=INFO +log4j.logger.org.apache.accumulo=INFO + +#log4j.logger.datawave.query.iterator.facets=DEBUG +#log4j.logger.datawave.query.tables.facets=DEBUG diff --git a/web-services/model/pom.xml b/web-services/model/pom.xml index 982a8dc993f..c1f606e4cfc 100644 --- a/web-services/model/pom.xml +++ b/web-services/model/pom.xml @@ -4,7 +4,7 @@ gov.nsa.datawave.webservices datawave-ws-parent - 6.5.0-SNAPSHOT + 7.13.0-SNAPSHOT datawave-ws-model ejb @@ -88,6 +88,11 @@ commons-configuration2 provided + + org.apache.hadoop.thirdparty + hadoop-shaded-guava + provided + org.jboss.logging jboss-logging diff --git a/web-services/model/src/main/java/datawave/webservice/query/model/ModelBean.java b/web-services/model/src/main/java/datawave/webservice/query/model/ModelBean.java index c597d0e79fd..94d6da61b78 100644 --- a/web-services/model/src/main/java/datawave/webservice/query/model/ModelBean.java +++ b/web-services/model/src/main/java/datawave/webservice/query/model/ModelBean.java @@ -3,6 +3,7 @@ import java.security.Principal; import java.util.Collection; import java.util.HashSet; +import java.util.List; import java.util.Map; import java.util.Map.Entry; import java.util.Set; @@ -50,14 +51,14 @@ import com.google.common.collect.Sets; import datawave.annotation.Required; +import datawave.core.common.cache.AccumuloTableCache; +import datawave.core.common.connection.AccumuloConnectionFactory; import datawave.interceptor.RequiredInterceptor; import datawave.interceptor.ResponseInterceptor; import datawave.query.model.FieldMapping; import datawave.query.model.ModelKeyParser; import datawave.security.authorization.DatawavePrincipal; import datawave.security.util.ScannerHelper; -import datawave.webservice.common.cache.AccumuloTableCache; -import datawave.webservice.common.connection.AccumuloConnectionFactory; import datawave.webservice.common.exception.DatawaveWebApplicationException; import datawave.webservice.common.exception.NotFoundException; import datawave.webservice.common.exception.PreConditionFailedException; @@ -151,7 +152,7 @@ public ModelList listModelNames(@QueryParam("modelTableName") String modelTableN HashSet modelNames = new HashSet<>(); try { Map trackingMap = connectionFactory.getTrackingMap(Thread.currentThread().getStackTrace()); - client = connectionFactory.getClient(AccumuloConnectionFactory.Priority.LOW, trackingMap); + client = connectionFactory.getClient(getCurrentUserDN(), getCurrentProxyServers(), AccumuloConnectionFactory.Priority.LOW, trackingMap); try (Scanner scanner = ScannerHelper.createScanner(client, this.checkModelTableName(modelTableName), cbAuths)) { for (Entry entry : scanner) { String colf = entry.getKey().getColumnFamily().toString(); @@ -354,7 +355,7 @@ public datawave.webservice.model.Model getModel(@Required("name") @PathParam("na AccumuloClient client = null; try { Map trackingMap = connectionFactory.getTrackingMap(Thread.currentThread().getStackTrace()); - client = connectionFactory.getClient(AccumuloConnectionFactory.Priority.LOW, trackingMap); + client = connectionFactory.getClient(getCurrentUserDN(), getCurrentProxyServers(), AccumuloConnectionFactory.Priority.LOW, trackingMap); try (Scanner scanner = ScannerHelper.createScanner(client, this.checkModelTableName(modelTableName), cbAuths)) { IteratorSetting cfg = new IteratorSetting(21, "colfRegex", RegExFilter.class.getName()); cfg.addOption(RegExFilter.COLF_REGEX, "^" + name + "(\\x00.*)?"); @@ -422,7 +423,7 @@ public VoidResponse insertMapping(datawave.webservice.model.Model model, @QueryP String tableName = this.checkModelTableName(modelTableName); try { Map trackingMap = connectionFactory.getTrackingMap(Thread.currentThread().getStackTrace()); - client = connectionFactory.getClient(AccumuloConnectionFactory.Priority.LOW, trackingMap); + client = connectionFactory.getClient(getCurrentUserDN(), getCurrentProxyServers(), AccumuloConnectionFactory.Priority.LOW, trackingMap); writer = client.createBatchWriter(tableName, new BatchWriterConfig().setMaxLatency(BATCH_WRITER_MAX_LATENCY, TimeUnit.MILLISECONDS) .setMaxMemory(BATCH_WRITER_MAX_MEMORY).setMaxWriteThreads(BATCH_WRITER_MAX_THREADS)); for (FieldMapping mapping : model.getFields()) { @@ -453,7 +454,7 @@ public VoidResponse insertMapping(datawave.webservice.model.Model model, @QueryP } } } - cache.reloadCache(tableName); + cache.reloadTableCache(tableName); return response; } @@ -495,7 +496,7 @@ private VoidResponse deleteMapping(datawave.webservice.model.Model model, String String tableName = this.checkModelTableName(modelTableName); try { Map trackingMap = connectionFactory.getTrackingMap(Thread.currentThread().getStackTrace()); - client = connectionFactory.getClient(AccumuloConnectionFactory.Priority.LOW, trackingMap); + client = connectionFactory.getClient(getCurrentUserDN(), getCurrentProxyServers(), AccumuloConnectionFactory.Priority.LOW, trackingMap); writer = client.createBatchWriter(tableName, new BatchWriterConfig().setMaxLatency(BATCH_WRITER_MAX_LATENCY, TimeUnit.MILLISECONDS) .setMaxMemory(BATCH_WRITER_MAX_MEMORY).setMaxWriteThreads(BATCH_WRITER_MAX_THREADS)); for (FieldMapping mapping : model.getFields()) { @@ -527,7 +528,7 @@ private VoidResponse deleteMapping(datawave.webservice.model.Model model, String } } if (reloadCache) - cache.reloadCache(tableName); + cache.reloadTableCache(tableName); return response; } @@ -543,4 +544,28 @@ private String checkModelTableName(String tableName) { else return tableName; } + + public String getCurrentUserDN() { + + String currentUserDN = null; + Principal p = ctx.getCallerPrincipal(); + + if (p != null && p instanceof DatawavePrincipal) { + currentUserDN = ((DatawavePrincipal) p).getUserDN().subjectDN(); + } + + return currentUserDN; + } + + public Collection getCurrentProxyServers() { + List currentProxyServers = null; + Principal p = ctx.getCallerPrincipal(); + + if (p != null && p instanceof DatawavePrincipal) { + currentProxyServers = ((DatawavePrincipal) p).getProxyServers(); + } + + return currentProxyServers; + } + } diff --git a/web-services/model/src/test/java/datawave/webservice/query/model/ModelBeanTest.java b/web-services/model/src/test/java/datawave/webservice/query/model/ModelBeanTest.java index 721c4ba0f62..b9aed896f46 100644 --- a/web-services/model/src/test/java/datawave/webservice/query/model/ModelBeanTest.java +++ b/web-services/model/src/test/java/datawave/webservice/query/model/ModelBeanTest.java @@ -37,6 +37,8 @@ import datawave.accumulo.inmemory.InMemoryAccumuloClient; import datawave.accumulo.inmemory.InMemoryInstance; +import datawave.core.common.cache.AccumuloTableCache; +import datawave.core.common.connection.AccumuloConnectionFactory; import datawave.query.model.ModelKeyParser; import datawave.security.authorization.DatawavePrincipal; import datawave.security.authorization.DatawaveUser; @@ -44,8 +46,6 @@ import datawave.security.authorization.SubjectIssuerDNPair; import datawave.security.util.DnUtils; import datawave.security.util.ScannerHelper; -import datawave.webservice.common.cache.AccumuloTableCache; -import datawave.webservice.common.connection.AccumuloConnectionFactory; import datawave.webservice.common.exception.DatawaveWebApplicationException; import datawave.webservice.model.ModelList; @@ -134,9 +134,10 @@ public void tearDown() { public void testModelImportNoTable() throws Exception { HashMap trackingMap = new HashMap<>(); EasyMock.expect(connectionFactory.getTrackingMap((StackTraceElement[]) EasyMock.anyObject())).andReturn(trackingMap); - EasyMock.expect(connectionFactory.getClient(EasyMock.eq(AccumuloConnectionFactory.Priority.LOW), EasyMock.eq(trackingMap))).andReturn(client); + EasyMock.expect(connectionFactory.getClient(EasyMock.eq(userDN.toLowerCase()), EasyMock.eq(null), EasyMock.eq(AccumuloConnectionFactory.Priority.LOW), + EasyMock.eq(trackingMap))).andReturn(client); connectionFactory.returnClient(client); - EasyMock.expect(ctx.getCallerPrincipal()).andReturn(principal); + EasyMock.expect(ctx.getCallerPrincipal()).andReturn(principal).anyTimes(); PowerMock.replayAll(); bean.importModel(MODEL_ONE, (String) null); @@ -148,16 +149,18 @@ private void importModels() throws Exception { HashMap trackingMap = new HashMap<>(); EasyMock.expect(connectionFactory.getTrackingMap((StackTraceElement[]) EasyMock.anyObject())).andReturn(trackingMap); - EasyMock.expect(connectionFactory.getClient(EasyMock.eq(AccumuloConnectionFactory.Priority.LOW), EasyMock.eq(trackingMap))).andReturn(client); + EasyMock.expect(connectionFactory.getClient(EasyMock.eq(userDN.toLowerCase()), EasyMock.eq(null), EasyMock.eq(AccumuloConnectionFactory.Priority.LOW), + EasyMock.eq(trackingMap))).andReturn(client); connectionFactory.returnClient(client); - EasyMock.expect(ctx.getCallerPrincipal()).andReturn(principal); + EasyMock.expect(ctx.getCallerPrincipal()).andReturn(principal).anyTimes(); EasyMock.expect(connectionFactory.getTrackingMap((StackTraceElement[]) EasyMock.anyObject())).andReturn(trackingMap); - EasyMock.expect(connectionFactory.getClient(EasyMock.eq(AccumuloConnectionFactory.Priority.LOW), EasyMock.eq(trackingMap))).andReturn(client); + EasyMock.expect(connectionFactory.getClient(EasyMock.eq(userDN.toLowerCase()), EasyMock.eq(null), EasyMock.eq(AccumuloConnectionFactory.Priority.LOW), + EasyMock.eq(trackingMap))).andReturn(client); EasyMock.expect(System.currentTimeMillis()).andReturn(TIMESTAMP); connectionFactory.returnClient(client); EasyMock.expect(System.currentTimeMillis()).andReturn(TIMESTAMP); EasyMock.expect(System.currentTimeMillis()).andReturn(TIMESTAMP); - EasyMock.expect(cache.reloadCache(ModelBean.DEFAULT_MODEL_TABLE_NAME)).andReturn(null); + cache.reloadTableCache(ModelBean.DEFAULT_MODEL_TABLE_NAME); PowerMock.replayAll(); bean.importModel(MODEL_ONE, (String) null); @@ -165,17 +168,19 @@ private void importModels() throws Exception { PowerMock.resetAll(); EasyMock.expect(connectionFactory.getTrackingMap((StackTraceElement[]) EasyMock.anyObject())).andReturn(trackingMap); - EasyMock.expect(connectionFactory.getClient(EasyMock.eq(AccumuloConnectionFactory.Priority.LOW), EasyMock.eq(trackingMap))).andReturn(client); + EasyMock.expect(connectionFactory.getClient(EasyMock.eq(userDN.toLowerCase()), EasyMock.eq(null), EasyMock.eq(AccumuloConnectionFactory.Priority.LOW), + EasyMock.eq(trackingMap))).andReturn(client); connectionFactory.returnClient(client); - EasyMock.expect(ctx.getCallerPrincipal()).andReturn(principal); + EasyMock.expect(ctx.getCallerPrincipal()).andReturn(principal).anyTimes(); EasyMock.expect(connectionFactory.getTrackingMap((StackTraceElement[]) EasyMock.anyObject())).andReturn(trackingMap); - EasyMock.expect(connectionFactory.getClient(EasyMock.eq(AccumuloConnectionFactory.Priority.LOW), EasyMock.eq(trackingMap))).andReturn(client); + EasyMock.expect(connectionFactory.getClient(EasyMock.eq(userDN.toLowerCase()), EasyMock.eq(null), EasyMock.eq(AccumuloConnectionFactory.Priority.LOW), + EasyMock.eq(trackingMap))).andReturn(client); EasyMock.expect(System.currentTimeMillis()).andReturn(TIMESTAMP); connectionFactory.returnClient(client); EasyMock.expect(System.currentTimeMillis()).andReturn(TIMESTAMP); EasyMock.expect(System.currentTimeMillis()).andReturn(TIMESTAMP); EasyMock.expect(System.currentTimeMillis()).andReturn(TIMESTAMP); - EasyMock.expect(cache.reloadCache(ModelBean.DEFAULT_MODEL_TABLE_NAME)).andReturn(null); + cache.reloadTableCache(ModelBean.DEFAULT_MODEL_TABLE_NAME); PowerMock.replayAll(); bean.importModel(MODEL_TWO, (String) null); @@ -188,10 +193,11 @@ public void testListModels() throws Exception { importModels(); PowerMock.resetAll(); - EasyMock.expect(ctx.getCallerPrincipal()).andReturn(principal); + EasyMock.expect(ctx.getCallerPrincipal()).andReturn(principal).anyTimes(); HashMap trackingMap = new HashMap<>(); EasyMock.expect(connectionFactory.getTrackingMap((StackTraceElement[]) EasyMock.anyObject())).andReturn(trackingMap); - EasyMock.expect(connectionFactory.getClient(EasyMock.eq(AccumuloConnectionFactory.Priority.LOW), EasyMock.eq(trackingMap))).andReturn(client); + EasyMock.expect(connectionFactory.getClient(EasyMock.eq(userDN.toLowerCase()), EasyMock.eq(null), EasyMock.eq(AccumuloConnectionFactory.Priority.LOW), + EasyMock.eq(trackingMap))).andReturn(client); connectionFactory.returnClient(client); PowerMock.replayAll(); @@ -208,10 +214,11 @@ public void testModelGet() throws Exception { importModels(); PowerMock.resetAll(); - EasyMock.expect(ctx.getCallerPrincipal()).andReturn(principal); + EasyMock.expect(ctx.getCallerPrincipal()).andReturn(principal).anyTimes(); HashMap trackingMap = new HashMap<>(); EasyMock.expect(connectionFactory.getTrackingMap((StackTraceElement[]) EasyMock.anyObject())).andReturn(trackingMap); - EasyMock.expect(connectionFactory.getClient(EasyMock.eq(AccumuloConnectionFactory.Priority.LOW), EasyMock.eq(trackingMap))).andReturn(client); + EasyMock.expect(connectionFactory.getClient(EasyMock.eq(userDN.toLowerCase()), EasyMock.eq(null), EasyMock.eq(AccumuloConnectionFactory.Priority.LOW), + EasyMock.eq(trackingMap))).andReturn(client); connectionFactory.returnClient(client); PowerMock.replayAll(); @@ -226,22 +233,24 @@ public void testModelDelete() throws Exception { importModels(); PowerMock.resetAll(); - EasyMock.expect(ctx.getCallerPrincipal()).andReturn(principal); + EasyMock.expect(ctx.getCallerPrincipal()).andReturn(principal).anyTimes(); HashMap trackingMap = new HashMap<>(); EasyMock.expect(connectionFactory.getTrackingMap((StackTraceElement[]) EasyMock.anyObject())).andReturn(trackingMap); - EasyMock.expect(connectionFactory.getClient(EasyMock.eq(AccumuloConnectionFactory.Priority.LOW), EasyMock.eq(trackingMap))).andReturn(client); + EasyMock.expect(connectionFactory.getClient(EasyMock.eq(userDN.toLowerCase()), EasyMock.eq(null), EasyMock.eq(AccumuloConnectionFactory.Priority.LOW), + EasyMock.eq(trackingMap))).andReturn(client); connectionFactory.returnClient(client); EasyMock.expect(connectionFactory.getTrackingMap((StackTraceElement[]) EasyMock.anyObject())).andReturn(trackingMap); - EasyMock.expect(connectionFactory.getClient(EasyMock.eq(AccumuloConnectionFactory.Priority.LOW), EasyMock.eq(trackingMap))).andReturn(client); + EasyMock.expect(connectionFactory.getClient(EasyMock.eq(userDN.toLowerCase()), EasyMock.eq(null), EasyMock.eq(AccumuloConnectionFactory.Priority.LOW), + EasyMock.eq(trackingMap))).andReturn(client); connectionFactory.returnClient(client); - EasyMock.expect(ctx.getCallerPrincipal()).andReturn(principal); EasyMock.expect(connectionFactory.getTrackingMap((StackTraceElement[]) EasyMock.anyObject())).andReturn(trackingMap); - EasyMock.expect(connectionFactory.getClient(EasyMock.eq(AccumuloConnectionFactory.Priority.LOW), EasyMock.eq(trackingMap))).andReturn(client); + EasyMock.expect(connectionFactory.getClient(EasyMock.eq(userDN.toLowerCase()), EasyMock.eq(null), EasyMock.eq(AccumuloConnectionFactory.Priority.LOW), + EasyMock.eq(trackingMap))).andReturn(client); EasyMock.expect(System.currentTimeMillis()).andReturn(TIMESTAMP); EasyMock.expect(System.currentTimeMillis()).andReturn(TIMESTAMP); EasyMock.expect(System.currentTimeMillis()).andReturn(TIMESTAMP); connectionFactory.returnClient(client); - EasyMock.expect(cache.reloadCache(ModelBean.DEFAULT_MODEL_TABLE_NAME)).andReturn(null); + cache.reloadTableCache(ModelBean.DEFAULT_MODEL_TABLE_NAME); EasyMock.expect(System.currentTimeMillis()).andReturn(TIMESTAMP); EasyMock.expect(System.currentTimeMillis()).andReturn(TIMESTAMP); EasyMock.expect(System.currentTimeMillis()).andReturn(TIMESTAMP); @@ -251,9 +260,10 @@ public void testModelDelete() throws Exception { PowerMock.verifyAll(); PowerMock.resetAll(); - EasyMock.expect(ctx.getCallerPrincipal()).andReturn(principal); + EasyMock.expect(ctx.getCallerPrincipal()).andReturn(principal).anyTimes(); EasyMock.expect(connectionFactory.getTrackingMap((StackTraceElement[]) EasyMock.anyObject())).andReturn(trackingMap); - EasyMock.expect(connectionFactory.getClient(EasyMock.eq(AccumuloConnectionFactory.Priority.LOW), EasyMock.eq(trackingMap))).andReturn(client); + EasyMock.expect(connectionFactory.getClient(EasyMock.eq(userDN.toLowerCase()), EasyMock.eq(null), EasyMock.eq(AccumuloConnectionFactory.Priority.LOW), + EasyMock.eq(trackingMap))).andReturn(client); connectionFactory.returnClient(client); PowerMock.replayAll(); try { @@ -271,9 +281,10 @@ public void testModelDelete() throws Exception { PowerMock.verifyAll(); PowerMock.resetAll(); // Ensure model one still intact - EasyMock.expect(ctx.getCallerPrincipal()).andReturn(principal); + EasyMock.expect(ctx.getCallerPrincipal()).andReturn(principal).anyTimes(); EasyMock.expect(connectionFactory.getTrackingMap((StackTraceElement[]) EasyMock.anyObject())).andReturn(trackingMap); - EasyMock.expect(connectionFactory.getClient(EasyMock.eq(AccumuloConnectionFactory.Priority.LOW), EasyMock.eq(trackingMap))).andReturn(client); + EasyMock.expect(connectionFactory.getClient(EasyMock.eq(userDN.toLowerCase()), EasyMock.eq(null), EasyMock.eq(AccumuloConnectionFactory.Priority.LOW), + EasyMock.eq(trackingMap))).andReturn(client); connectionFactory.returnClient(client); PowerMock.replayAll(); datawave.webservice.model.Model model1 = bean.getModel(MODEL_ONE.getName(), (String) null); @@ -284,10 +295,11 @@ public void testModelDelete() throws Exception { @Test(expected = DatawaveWebApplicationException.class) public void testModelGetInvalidModelName() throws Exception { - EasyMock.expect(ctx.getCallerPrincipal()).andReturn(principal); + EasyMock.expect(ctx.getCallerPrincipal()).andReturn(principal).anyTimes(); HashMap trackingMap = new HashMap<>(); EasyMock.expect(connectionFactory.getTrackingMap((StackTraceElement[]) EasyMock.anyObject())).andReturn(trackingMap); - EasyMock.expect(connectionFactory.getClient(EasyMock.eq(AccumuloConnectionFactory.Priority.LOW), EasyMock.eq(trackingMap))).andReturn(client); + EasyMock.expect(connectionFactory.getClient(EasyMock.eq(userDN.toLowerCase()), EasyMock.eq(null), EasyMock.eq(AccumuloConnectionFactory.Priority.LOW), + EasyMock.eq(trackingMap))).andReturn(client); connectionFactory.returnClient(client); PowerMock.replayAll(); @@ -300,18 +312,20 @@ public void testCloneModel() throws Exception { importModels(); PowerMock.resetAll(); - EasyMock.expect(ctx.getCallerPrincipal()).andReturn(principal); + EasyMock.expect(ctx.getCallerPrincipal()).andReturn(principal).anyTimes(); HashMap trackingMap = new HashMap<>(); EasyMock.expect(connectionFactory.getTrackingMap((StackTraceElement[]) EasyMock.anyObject())).andReturn(trackingMap); - EasyMock.expect(connectionFactory.getClient(EasyMock.eq(AccumuloConnectionFactory.Priority.LOW), EasyMock.eq(trackingMap))).andReturn(client); + EasyMock.expect(connectionFactory.getClient(EasyMock.eq(userDN.toLowerCase()), EasyMock.eq(null), EasyMock.eq(AccumuloConnectionFactory.Priority.LOW), + EasyMock.eq(trackingMap))).andReturn(client); connectionFactory.returnClient(client); EasyMock.expect(connectionFactory.getTrackingMap((StackTraceElement[]) EasyMock.anyObject())).andReturn(trackingMap); - EasyMock.expect(connectionFactory.getClient(EasyMock.eq(AccumuloConnectionFactory.Priority.LOW), EasyMock.eq(trackingMap))).andReturn(client); + EasyMock.expect(connectionFactory.getClient(EasyMock.eq(userDN.toLowerCase()), EasyMock.eq(null), EasyMock.eq(AccumuloConnectionFactory.Priority.LOW), + EasyMock.eq(trackingMap))).andReturn(client); connectionFactory.returnClient(client); - EasyMock.expect(ctx.getCallerPrincipal()).andReturn(principal); EasyMock.expect(connectionFactory.getTrackingMap((StackTraceElement[]) EasyMock.anyObject())).andReturn(trackingMap); - EasyMock.expect(connectionFactory.getClient(EasyMock.eq(AccumuloConnectionFactory.Priority.LOW), EasyMock.eq(trackingMap))).andReturn(client); - EasyMock.expect(cache.reloadCache(ModelBean.DEFAULT_MODEL_TABLE_NAME)).andReturn(null); + EasyMock.expect(connectionFactory.getClient(EasyMock.eq(userDN.toLowerCase()), EasyMock.eq(null), EasyMock.eq(AccumuloConnectionFactory.Priority.LOW), + EasyMock.eq(trackingMap))).andReturn(client); + cache.reloadTableCache(ModelBean.DEFAULT_MODEL_TABLE_NAME); EasyMock.expect(System.currentTimeMillis()).andReturn(TIMESTAMP); connectionFactory.returnClient(client); EasyMock.expect(System.currentTimeMillis()).andReturn(TIMESTAMP); @@ -321,9 +335,10 @@ public void testCloneModel() throws Exception { bean.cloneModel(MODEL_ONE.getName(), "MODEL2", (String) null); PowerMock.verifyAll(); PowerMock.resetAll(); - EasyMock.expect(ctx.getCallerPrincipal()).andReturn(principal); + EasyMock.expect(ctx.getCallerPrincipal()).andReturn(principal).anyTimes(); EasyMock.expect(connectionFactory.getTrackingMap((StackTraceElement[]) EasyMock.anyObject())).andReturn(trackingMap); - EasyMock.expect(connectionFactory.getClient(EasyMock.eq(AccumuloConnectionFactory.Priority.LOW), EasyMock.eq(trackingMap))).andReturn(client); + EasyMock.expect(connectionFactory.getClient(EasyMock.eq(userDN.toLowerCase()), EasyMock.eq(null), EasyMock.eq(AccumuloConnectionFactory.Priority.LOW), + EasyMock.eq(trackingMap))).andReturn(client); connectionFactory.returnClient(client); PowerMock.replayAll(); diff --git a/web-services/modification/pom.xml b/web-services/modification/pom.xml index 0c000c154b1..bfc58214da7 100644 --- a/web-services/modification/pom.xml +++ b/web-services/modification/pom.xml @@ -4,7 +4,7 @@ gov.nsa.datawave.webservices datawave-ws-parent - 6.5.0-SNAPSHOT + 7.13.0-SNAPSHOT datawave-ws-modification ejb @@ -19,6 +19,11 @@ datawave-query-core ${project.version} + + gov.nsa.datawave.core + datawave-core-modification + ${project.version} + javax.enterprise cdi-api diff --git a/web-services/modification/src/main/java/datawave/webservice/modification/ModificationBean.java b/web-services/modification/src/main/java/datawave/webservice/modification/ModificationBean.java index f20a9ac9d60..5021835c563 100644 --- a/web-services/modification/src/main/java/datawave/webservice/modification/ModificationBean.java +++ b/web-services/modification/src/main/java/datawave/webservice/modification/ModificationBean.java @@ -1,16 +1,6 @@ package datawave.webservice.modification; -import static java.util.Map.Entry; - -import java.security.Principal; -import java.text.MessageFormat; -import java.util.ArrayList; -import java.util.Collection; -import java.util.Collections; -import java.util.HashSet; import java.util.List; -import java.util.Map; -import java.util.Set; import javax.annotation.Resource; import javax.annotation.security.DeclareRoles; @@ -30,31 +20,22 @@ import javax.ws.rs.Path; import javax.ws.rs.PathParam; import javax.ws.rs.Produces; -import javax.ws.rs.core.MultivaluedMap; -import org.apache.accumulo.core.client.AccumuloClient; -import org.apache.accumulo.core.security.Authorizations; import org.apache.log4j.Logger; import org.jboss.resteasy.annotations.GZIP; -import org.jboss.resteasy.specimpl.MultivaluedMapImpl; import datawave.annotation.Required; import datawave.configuration.spring.SpringBean; +import datawave.core.common.connection.AccumuloConnectionFactory; import datawave.interceptor.RequiredInterceptor; import datawave.interceptor.ResponseInterceptor; +import datawave.modification.DatawaveModificationException; +import datawave.modification.ModificationService; +import datawave.modification.configuration.ModificationConfiguration; import datawave.security.authorization.DatawavePrincipal; -import datawave.webservice.common.audit.AuditParameterBuilder; -import datawave.webservice.common.connection.AccumuloConnectionFactory; -import datawave.webservice.common.exception.BadRequestException; import datawave.webservice.common.exception.DatawaveWebApplicationException; -import datawave.webservice.common.exception.UnauthorizedException; import datawave.webservice.modification.cache.ModificationCacheBean; -import datawave.webservice.modification.configuration.ModificationConfiguration; -import datawave.webservice.modification.configuration.ModificationServiceConfiguration; -import datawave.webservice.query.exception.BadRequestQueryException; -import datawave.webservice.query.exception.DatawaveErrorCode; import datawave.webservice.query.exception.QueryException; -import datawave.webservice.query.exception.UnauthorizedQueryException; import datawave.webservice.query.runner.QueryExecutorBean; import datawave.webservice.result.VoidResponse; import datawave.webservice.results.modification.ModificationConfigurationResponse; @@ -86,8 +67,15 @@ public class ModificationBean { @SpringBean(refreshable = true) private ModificationConfiguration modificationConfiguration; - @Inject - private AuditParameterBuilder auditParameterBuilder; + private ModificationService service; + + private ModificationService getService() { + if (service == null) { + service = new ModificationService(modificationConfiguration, cache.getCache(), connectionFactory, + new QueryExecutorBeanService(queryService).getFactory()); + } + return service; + } /** * Returns a list of the Modification service names and their configurations @@ -104,16 +92,7 @@ public class ModificationBean { @GZIP @Interceptors({RequiredInterceptor.class, ResponseInterceptor.class}) public List listConfigurations() { - List configs = new ArrayList<>(); - for (Entry entry : this.modificationConfiguration.getConfigurations().entrySet()) { - ModificationConfigurationResponse r = new ModificationConfigurationResponse(); - r.setName(entry.getKey()); - r.setRequestClass(entry.getValue().getRequestClass().getName()); - r.setDescription(entry.getValue().getDescription()); - r.setAuthorizedRoles(entry.getValue().getAuthorizedRoles()); - configs.add(r); - } - return configs; + return getService().listConfigurations(); } /** @@ -141,84 +120,15 @@ public List listConfigurations() { @Interceptors({RequiredInterceptor.class, ResponseInterceptor.class}) public VoidResponse submit(@Required("modificationServiceName") @PathParam("serviceName") String modificationServiceName, @Required("request") ModificationRequestBase request) { - VoidResponse response = new VoidResponse(); - - // Find out who/what called this method - Principal p = ctx.getCallerPrincipal(); - String user; - Set cbAuths = new HashSet<>(); - Collection userRoles = Collections.emptySet(); - if (p instanceof DatawavePrincipal) { - DatawavePrincipal dp = (DatawavePrincipal) p; - user = dp.getShortName(); - userRoles = dp.getPrimaryUser().getRoles(); - for (Collection c : dp.getAuthorizations()) - cbAuths.add(new Authorizations(c.toArray(new String[c.size()]))); - } else { - QueryException qe = new QueryException(DatawaveErrorCode.UNEXPECTED_PRINCIPAL_ERROR, MessageFormat.format("Class: {0}", p.getClass().getName())); - response.addException(qe); - throw new DatawaveWebApplicationException(qe, response); - } - - AccumuloClient client = null; - AccumuloConnectionFactory.Priority priority; try { - // Get the Modification Service from the configuration - ModificationServiceConfiguration service = modificationConfiguration.getConfiguration(modificationServiceName); - if (!request.getClass().equals(service.getRequestClass())) { - BadRequestQueryException qe = new BadRequestQueryException(DatawaveErrorCode.INVALID_REQUEST_CLASS, - MessageFormat.format("Requires: {0}", service.getRequestClass().getName())); + DatawavePrincipal p = (DatawavePrincipal) ctx.getCallerPrincipal(); + return getService().submit(p, modificationServiceName, request); + } catch (DatawaveModificationException dme) { + VoidResponse response = new VoidResponse(); + for (QueryException qe : dme.getExceptions()) { response.addException(qe); - throw new BadRequestException(qe, response); - } - - priority = service.getPriority(); - - // Ensure that the user is in the list of authorized roles - if (null != service.getAuthorizedRoles()) { - boolean authorized = !Collections.disjoint(userRoles, service.getAuthorizedRoles()); - if (!authorized) { - // Then the user does not have any of the authorized roles - UnauthorizedQueryException qe = new UnauthorizedQueryException(DatawaveErrorCode.JOB_EXECUTION_UNAUTHORIZED, - MessageFormat.format("Requires one of: {0}", service.getAuthorizedRoles())); - response.addException(qe); - throw new UnauthorizedException(qe, response); - } } - - if (service.getRequiresAudit()) { - try { - MultivaluedMap requestMap = new MultivaluedMapImpl<>(); - requestMap.putAll(request.toMap()); - auditParameterBuilder.convertAndValidate(requestMap); - } catch (Exception e) { - QueryException qe = new QueryException(DatawaveErrorCode.QUERY_AUDITING_ERROR, e); - log.error(qe); - response.addException(qe.getBottomQueryException()); - } - } - - // Process the modification - Map trackingMap = connectionFactory.getTrackingMap(Thread.currentThread().getStackTrace()); - client = connectionFactory.getClient(modificationConfiguration.getPoolName(), priority, trackingMap); - service.setQueryService(queryService); - log.info("Processing modification request from user=" + user + ": \n" + request); - service.process(client, request, cache.getCachedMutableFieldList(), cbAuths, user); - return response; - } catch (DatawaveWebApplicationException e) { - throw e; - } catch (Exception e) { - QueryException qe = new QueryException(DatawaveErrorCode.MODIFICATION_ERROR, e); - log.error(qe); - response.addException(qe.getBottomQueryException()); - throw new DatawaveWebApplicationException(e, response); - } finally { - if (null != client) - try { - connectionFactory.returnClient(client); - } catch (Exception e) { - log.error("Error returning connection", e); - } + throw new DatawaveWebApplicationException(dme, response); } } diff --git a/web-services/modification/src/main/java/datawave/webservice/modification/QueryExecutorBeanService.java b/web-services/modification/src/main/java/datawave/webservice/modification/QueryExecutorBeanService.java new file mode 100644 index 00000000000..039ae0fd998 --- /dev/null +++ b/web-services/modification/src/main/java/datawave/webservice/modification/QueryExecutorBeanService.java @@ -0,0 +1,43 @@ +package datawave.webservice.modification; + +import java.util.List; +import java.util.Map; + +import datawave.modification.query.ModificationQueryService; +import datawave.security.authorization.ProxiedUserDetails; +import datawave.webservice.query.runner.QueryExecutorBean; +import datawave.webservice.query.util.MapUtils; +import datawave.webservice.result.BaseQueryResponse; +import datawave.webservice.result.GenericResponse; + +public class QueryExecutorBeanService implements ModificationQueryService { + private final QueryExecutorBean queryService; + + public QueryExecutorBeanService(QueryExecutorBean queryService) { + this.queryService = queryService; + } + + @Override + public GenericResponse createQuery(String logicName, Map> paramsToMap) { + return queryService.createQuery(logicName, MapUtils.toMultivaluedMap(paramsToMap)); + } + + @Override + public BaseQueryResponse next(String id) { + return queryService.next(id); + } + + @Override + public void close(String id) { + queryService.close(id); + } + + public ModificationQueryServiceFactory getFactory() { + return new ModificationQueryServiceFactory() { + @Override + public ModificationQueryService createService(ProxiedUserDetails userDetails) { + return QueryExecutorBeanService.this; + } + }; + } +} diff --git a/web-services/modification/src/main/java/datawave/webservice/modification/cache/ModificationCacheBean.java b/web-services/modification/src/main/java/datawave/webservice/modification/cache/ModificationCacheBean.java index 6b1cc1eb2b0..348529cf481 100644 --- a/web-services/modification/src/main/java/datawave/webservice/modification/cache/ModificationCacheBean.java +++ b/web-services/modification/src/main/java/datawave/webservice/modification/cache/ModificationCacheBean.java @@ -1,11 +1,6 @@ package datawave.webservice.modification.cache; -import static datawave.webservice.common.connection.AccumuloConnectionFactory.Priority; - import java.util.ArrayList; -import java.util.Collections; -import java.util.HashMap; -import java.util.HashSet; import java.util.List; import java.util.Map; import java.util.Map.Entry; @@ -15,7 +10,6 @@ import javax.annotation.security.DeclareRoles; import javax.annotation.security.RolesAllowed; import javax.annotation.security.RunAs; -import javax.ejb.EJBException; import javax.ejb.LocalBean; import javax.ejb.Lock; import javax.ejb.LockType; @@ -27,11 +21,6 @@ import javax.ws.rs.Path; import javax.ws.rs.Produces; -import org.apache.accumulo.core.client.AccumuloClient; -import org.apache.accumulo.core.client.BatchScanner; -import org.apache.accumulo.core.data.Key; -import org.apache.accumulo.core.data.Range; -import org.apache.accumulo.core.data.Value; import org.apache.deltaspike.core.api.jmx.JmxManaged; import org.apache.deltaspike.core.api.jmx.MBean; import org.apache.hadoop.io.Text; @@ -39,11 +28,11 @@ import org.jboss.resteasy.annotations.GZIP; import datawave.configuration.spring.SpringBean; +import datawave.core.common.connection.AccumuloConnectionFactory; import datawave.interceptor.RequiredInterceptor; import datawave.interceptor.ResponseInterceptor; -import datawave.security.util.ScannerHelper; -import datawave.webservice.common.connection.AccumuloConnectionFactory; -import datawave.webservice.modification.configuration.ModificationConfiguration; +import datawave.modification.cache.ModificationCache; +import datawave.modification.configuration.ModificationConfiguration; import datawave.webservice.result.VoidResponse; import datawave.webservice.results.modification.MutableFieldListResponse; @@ -65,7 +54,7 @@ public class ModificationCacheBean { private Logger log = Logger.getLogger(this.getClass()); - private Map> cache = new HashMap<>(); + private ModificationCache cache; @Inject private AccumuloConnectionFactory connectionFactory; @@ -76,11 +65,7 @@ public class ModificationCacheBean { @PostConstruct public void init() { - if (modificationConfiguration != null) { - reloadMutableFieldCache(); - } else { - log.error("modificationConfiguration was null"); - } + cache = new ModificationCache(connectionFactory, modificationConfiguration); } /** @@ -102,53 +87,13 @@ public void init() { @GZIP @JmxManaged public VoidResponse reloadMutableFieldCache() { - this.clearCache(); - log.trace("cleared cache"); - final VoidResponse resp = new VoidResponse(); - AccumuloClient client = null; - BatchScanner s = null; - try { - Map trackingMap = connectionFactory.getTrackingMap(Thread.currentThread().getStackTrace()); - log.trace("getting mutable list from table " + this.modificationConfiguration.getTableName()); - log.trace("modificationConfiguration.getPoolName() = " + modificationConfiguration.getPoolName()); - client = connectionFactory.getClient(modificationConfiguration.getPoolName(), Priority.ADMIN, trackingMap); - log.trace("got connection"); - s = ScannerHelper.createBatchScanner(client, this.modificationConfiguration.getTableName(), - Collections.singleton(client.securityOperations().getUserAuthorizations(client.whoami())), 8); - s.setRanges(Collections.singleton(new Range())); - s.fetchColumnFamily(MODIFICATION_COLUMN); - for (Entry e : s) { - // Field name is in the row and datatype is in the colq. - String datatype = e.getKey().getColumnQualifier().toString(); - log.trace("datatype = " + datatype); - String fieldName = e.getKey().getRow().toString(); - log.trace("fieldname = " + fieldName); - if (null == cache.get(datatype)) - cache.put(datatype, new HashSet<>()); - cache.get(datatype).add(fieldName); - } - log.trace("cache size = " + cache.size()); - for (Entry> e : cache.entrySet()) { - log.trace("datatype = " + e.getKey() + ", fieldcount = " + e.getValue().size()); - } - } catch (Exception e) { - log.error("Error during initialization of ModificationCacheBean", e); - throw new EJBException("Error during initialization of ModificationCacheBean", e); - } finally { - if (null != s) - s.close(); - try { - connectionFactory.returnClient(client); - } catch (Exception e) { - log.error("Error returning connection to pool", e); - } - } - return resp; + this.cache.reloadMutableFieldCache(); + return new VoidResponse(); } @JmxManaged public String listMutableFields() { - return cache.toString(); + return cache.listMutableFields(); } /** @@ -161,8 +106,7 @@ public String listMutableFields() { * @return true if field is mutable for the given datatype */ public boolean isFieldMutable(String datatype, String field) { - log.trace("datatype = " + datatype + ", field = " + field); - return cache.get(datatype).contains(field); + return cache.isFieldMutable(datatype, field); } @GET @@ -173,7 +117,7 @@ public boolean isFieldMutable(String datatype, String field) { @Interceptors({RequiredInterceptor.class, ResponseInterceptor.class}) public List getMutableFieldList() { List lists = new ArrayList<>(); - for (Entry> entry : this.cache.entrySet()) { + for (Entry> entry : this.cache.getCachedMutableFieldList().entrySet()) { MutableFieldListResponse r = new MutableFieldListResponse(); r.setDatatype(entry.getKey()); r.setMutableFields(entry.getValue()); @@ -183,17 +127,15 @@ public List getMutableFieldList() { } public Map> getCachedMutableFieldList() { - log.trace("cache = " + cache); - return Collections.unmodifiableMap(cache); + return cache.getCachedMutableFieldList(); } public ModificationConfiguration getModificationConfiguration() { return modificationConfiguration; } - protected void clearCache() { - log.trace("cleared the cache"); - this.cache.clear(); + public ModificationCache getCache() { + return cache; } } diff --git a/web-services/modification/src/main/java/datawave/webservice/modification/cache/ModificationCacheMessageBean.java b/web-services/modification/src/main/java/datawave/webservice/modification/cache/ModificationCacheMessageBean.java index 52b92497645..ab7aab8b84d 100644 --- a/web-services/modification/src/main/java/datawave/webservice/modification/cache/ModificationCacheMessageBean.java +++ b/web-services/modification/src/main/java/datawave/webservice/modification/cache/ModificationCacheMessageBean.java @@ -16,9 +16,9 @@ import org.apache.log4j.Logger; import datawave.configuration.spring.SpringBean; -import datawave.webservice.modification.MutableMetadataHandler; -import datawave.webservice.modification.configuration.ModificationConfiguration; -import datawave.webservice.modification.configuration.ModificationServiceConfiguration; +import datawave.modification.MutableMetadataHandler; +import datawave.modification.configuration.ModificationConfiguration; +import datawave.modification.configuration.ModificationServiceConfiguration; @RunAs("InternalUser") @MessageDriven(name = "ModificationCacheMessageBean", diff --git a/web-services/pom.xml b/web-services/pom.xml index 7fca3139553..958283e8255 100644 --- a/web-services/pom.xml +++ b/web-services/pom.xml @@ -4,7 +4,7 @@ gov.nsa.datawave datawave-parent - 6.5.0-SNAPSHOT + 7.13.0-SNAPSHOT gov.nsa.datawave.webservices datawave-ws-parent @@ -21,6 +21,7 @@ map-reduce map-reduce-status map-reduce-embedded + metrics atom query-websocket web-root @@ -44,8 +45,9 @@ 1.21 1.7 3.2 - 3.6 + 3.9.0 2.6.1 + 2.1.8 1.1 1.0.1 1.1 @@ -114,6 +116,11 @@ + + dnsjava + dnsjava + ${version.dnsjava} + gov.nsa.datawave datawave-core @@ -356,8 +363,16 @@ ${version.zookeeper} - * - * + org.slf4j + slf4j-log4j12 + + + log4j + log4j + + + ch.qos.logback + logback-classic @@ -495,7 +510,14 @@ org.powermock powermock-module-junit4-rule-agent - 1.6.1 + 2.0.2 + test + true + + + org.powermock + powermock-reflect + 2.0.2 test true @@ -506,6 +528,11 @@ gov.nsa.datawave datawave-common-test + + org.junit.jupiter + junit-jupiter-engine + test + @@ -624,6 +651,18 @@ /dev/null + + + org.junit.jupiter + junit-jupiter-engine + ${version.junit.bom} + + + org.junit.vintage + junit-vintage-engine + ${version.junit.bom} + + integration-test diff --git a/web-services/query-websocket/pom.xml b/web-services/query-websocket/pom.xml index 6fa5b91f00c..040a8f2c011 100644 --- a/web-services/query-websocket/pom.xml +++ b/web-services/query-websocket/pom.xml @@ -4,7 +4,7 @@ gov.nsa.datawave.webservices datawave-ws-parent - 6.5.0-SNAPSHOT + 7.13.0-SNAPSHOT datawave-ws-query-websocket war diff --git a/web-services/query/pom.xml b/web-services/query/pom.xml index 74a36752760..96b02b5e777 100644 --- a/web-services/query/pom.xml +++ b/web-services/query/pom.xml @@ -4,7 +4,7 @@ gov.nsa.datawave.webservices datawave-ws-parent - 6.5.0-SNAPSHOT + 7.13.0-SNAPSHOT datawave-ws-query ejb @@ -62,6 +62,12 @@ + + gov.nsa.datawave.core + datawave-core-query + ${project.version} + jboss + gov.nsa.datawave.microservice query-metric-api @@ -70,6 +76,12 @@ gov.nsa.datawave.webservices datawave-ws-client ${project.version} + + + jakarta.validation + jakarta.validation-api + + io.protostuff @@ -89,7 +101,7 @@ --> org.apache.commons - commons-jexl + commons-jexl3 org.apache.commons @@ -101,6 +113,10 @@ org.easymock easymock + + org.mockito + mockito-core + org.powermock powermock-api-easymock @@ -215,6 +231,20 @@ jboss-transaction-api_1.2_spec provided + + gov.nsa.datawave + datawave-core + ${project.version} + tests + test-jar + test + + + org.apache.logging.log4j + log4j-slf4j-impl + + + gov.nsa.datawave datawave-in-memory-accumulo diff --git a/web-services/query/src/main/java/datawave/webservice/query/cache/CreatedQueryLogicCacheBean.java b/web-services/query/src/main/java/datawave/webservice/query/cache/CreatedQueryLogicCacheBean.java index 8f9f16c022d..b23309bad13 100644 --- a/web-services/query/src/main/java/datawave/webservice/query/cache/CreatedQueryLogicCacheBean.java +++ b/web-services/query/src/main/java/datawave/webservice/query/cache/CreatedQueryLogicCacheBean.java @@ -27,8 +27,8 @@ import com.google.common.collect.Maps; import datawave.configuration.DatawaveEmbeddedProjectStageHolder; -import datawave.webservice.common.connection.AccumuloConnectionFactory; -import datawave.webservice.query.logic.QueryLogic; +import datawave.core.common.connection.AccumuloConnectionFactory; +import datawave.core.query.logic.QueryLogic; @Startup @Singleton @@ -99,6 +99,7 @@ public boolean equals(Object o) { * @param logic * the query logic * @param client + * accumulo client * @return true if there was no previous mapping for the given queryId in the cache. */ public boolean add(String queryId, String userId, QueryLogic logic, AccumuloClient client) { diff --git a/web-services/query/src/main/java/datawave/webservice/query/cache/QueryCacheBean.java b/web-services/query/src/main/java/datawave/webservice/query/cache/QueryCacheBean.java index 93e2acefbef..e2158a046e9 100644 --- a/web-services/query/src/main/java/datawave/webservice/query/cache/QueryCacheBean.java +++ b/web-services/query/src/main/java/datawave/webservice/query/cache/QueryCacheBean.java @@ -22,7 +22,7 @@ import org.jboss.resteasy.annotations.GZIP; import datawave.configuration.DatawaveEmbeddedProjectStageHolder; -import datawave.webservice.query.logic.QueryLogic; +import datawave.core.query.logic.QueryLogic; import datawave.webservice.query.runner.QueryExecutorBean; import datawave.webservice.query.runner.RunningQuery; import datawave.webservice.result.VoidResponse; diff --git a/web-services/query/src/main/java/datawave/webservice/query/cache/QueryExpirationBean.java b/web-services/query/src/main/java/datawave/webservice/query/cache/QueryExpirationBean.java index 2fc63efbb05..9f69ddd0a1a 100644 --- a/web-services/query/src/main/java/datawave/webservice/query/cache/QueryExpirationBean.java +++ b/web-services/query/src/main/java/datawave/webservice/query/cache/QueryExpirationBean.java @@ -18,8 +18,9 @@ import datawave.configuration.DatawaveEmbeddedProjectStageHolder; import datawave.configuration.spring.SpringBean; +import datawave.core.common.connection.AccumuloConnectionFactory; +import datawave.microservice.query.config.QueryExpirationProperties; import datawave.microservice.querymetric.QueryMetric; -import datawave.webservice.common.connection.AccumuloConnectionFactory; import datawave.webservice.query.exception.DatawaveErrorCode; import datawave.webservice.query.exception.QueryException; import datawave.webservice.query.metric.QueryMetricsBean; @@ -40,17 +41,17 @@ public class QueryExpirationBean { private static final Logger log = Logger.getLogger(QueryExpirationBean.class); @Inject - private QueryCache cache; + QueryCache cache; @Inject @SpringBean(refreshable = true) - private QueryExpirationConfiguration conf; + QueryExpirationProperties conf; @Inject - private AccumuloConnectionFactory connectionFactory; + AccumuloConnectionFactory connectionFactory; @Inject - private CreatedQueryLogicCacheBean qlCache; + CreatedQueryLogicCacheBean qlCache; @Inject private QueryMetricsBean metrics; @@ -64,7 +65,7 @@ public void init() { } if (conf == null) { - throw new IllegalArgumentException("QueryExpirationConfiguration is null"); + throw new IllegalArgumentException("QueryExpirationProperties is null"); } } @@ -90,7 +91,7 @@ public void removeIdleOrExpired() { } long now = System.currentTimeMillis(); clearQueries(now); - qlCache.clearQueryLogics(now, conf.getCallTimeInMS()); + qlCache.clearQueryLogics(now, conf.getCallTimeoutMillis()); } private void clearQueries(long now) { @@ -161,11 +162,11 @@ private void clearQueries(long now) { private boolean isIdleTooLong(RunningQuery query, long currentTime) { long difference = currentTime - query.getLastUsed(); if (log.isDebugEnabled()) { - long countDown = (conf.getIdleTimeInMS() / 1000) - (difference / 1000); + long countDown = (conf.getIdleTimeoutMillis() / 1000) - (difference / 1000); log.debug("Query: " + query.getSettings().getOwner() + " - " + query.getSettings().getId() + " will be evicted in: " + countDown + " seconds."); } - return difference > conf.getIdleTimeInMS(); + return difference > conf.getIdleTimeoutMillis(); } /** @@ -186,7 +187,7 @@ private boolean isNextTooLong(RunningQuery query, long currentTime) { query.touch(); // Since we know we're still in a call, go ahead and reset the idle time. long difference = currentTime - query.getTimeOfCurrentCall(); - if (difference > conf.getCallTimeInMS()) { + if (difference > conf.getCallTimeoutMillis()) { log.warn("Query " + query.getSettings().getOwner() + " - " + query.getSettings().getId() + " has been in a call for " + (difference / 1000) + "s. We are evicting this query from the cache."); return true; diff --git a/web-services/query/src/main/java/datawave/webservice/query/cache/QueryExpirationConfiguration.java b/web-services/query/src/main/java/datawave/webservice/query/cache/QueryExpirationConfiguration.java deleted file mode 100644 index 077836799cc..00000000000 --- a/web-services/query/src/main/java/datawave/webservice/query/cache/QueryExpirationConfiguration.java +++ /dev/null @@ -1,91 +0,0 @@ -package datawave.webservice.query.cache; - -import datawave.configuration.RefreshableScope; - -/** - * Configuration file is located at: datawave/query/QueryExpiration.xml - */ -@RefreshableScope -public class QueryExpirationConfiguration { - - public static final int PAGE_TIMEOUT_MIN_DEFAULT = 60; - public static final int IDLE_TIME_MIN_DEFAULT = 15; - - private long idleTimeMinutes = IDLE_TIME_MIN_DEFAULT; - private long callTimeMinutes = PAGE_TIMEOUT_MIN_DEFAULT; - private long pageSizeShortCircuitCheckTimeMinutes = PAGE_TIMEOUT_MIN_DEFAULT / 2; - private long pageShortCircuitTimeoutMinutes = Math.round(0.97 * PAGE_TIMEOUT_MIN_DEFAULT); - private int maxLongRunningTimeoutRetries = 3; - - public long getIdleTimeMinutes() { - return idleTimeMinutes; - } - - public long getIdleTimeInMS() { - return idleTimeMinutes * 60 * 1000; - } - - public void setIdleTime(long idleTimeMinutes) { - this.idleTimeMinutes = idleTimeMinutes; - } - - public void setIdleTimeMinutes(long idleTimeMinutes) { - this.idleTimeMinutes = idleTimeMinutes; - } - - public long getCallTimeMinutes() { - return callTimeMinutes; - } - - public long getCallTimeInMS() { - return callTimeMinutes * 60 * 1000; - } - - public void setCallTime(long callTimeMinutes) { - this.callTimeMinutes = callTimeMinutes; - } - - public void setCallTimeMinutes(long callTimeMinutes) { - this.callTimeMinutes = callTimeMinutes; - } - - public float getPageSizeShortCircuitCheckTimeMinutes() { - return pageSizeShortCircuitCheckTimeMinutes; - } - - public long getPageSizeShortCircuitCheckTimeInMS() { - return pageSizeShortCircuitCheckTimeMinutes * 60 * 1000; - } - - public void setPageSizeShortCircuitCheckTime(long pageSizeShortCircuitCheckTimeMinutes) { - this.pageSizeShortCircuitCheckTimeMinutes = pageSizeShortCircuitCheckTimeMinutes; - } - - public void setPageSizeShortCircuitCheckTimeMinutes(long pageSizeShortCircuitCheckTimeMinutes) { - this.pageSizeShortCircuitCheckTimeMinutes = pageSizeShortCircuitCheckTimeMinutes; - } - - public long getPageShortCircuitTimeoutMinutes() { - return pageShortCircuitTimeoutMinutes; - } - - public long getPageShortCircuitTimeoutInMS() { - return pageShortCircuitTimeoutMinutes * 60 * 1000; - } - - public void setPageShortCircuitTimeout(long pageShortCircuitTimeoutMinutes) { - this.pageShortCircuitTimeoutMinutes = pageShortCircuitTimeoutMinutes; - } - - public void setPageShortCircuitTimeoutMinutes(long pageShortCircuitTimeoutMinutes) { - this.pageShortCircuitTimeoutMinutes = pageShortCircuitTimeoutMinutes; - } - - public int getMaxLongRunningTimeoutRetries() { - return maxLongRunningTimeoutRetries; - } - - public void setMaxLongRunningTimeoutRetries(int maxLongRunningTimeoutRetries) { - this.maxLongRunningTimeoutRetries = maxLongRunningTimeoutRetries; - } -} diff --git a/web-services/query/src/main/java/datawave/webservice/query/cache/RunningQueryTimingImpl.java b/web-services/query/src/main/java/datawave/webservice/query/cache/RunningQueryTimingImpl.java index 66a53b04cc3..968bb75b58e 100644 --- a/web-services/query/src/main/java/datawave/webservice/query/cache/RunningQueryTimingImpl.java +++ b/web-services/query/src/main/java/datawave/webservice/query/cache/RunningQueryTimingImpl.java @@ -1,5 +1,6 @@ package datawave.webservice.query.cache; +import datawave.microservice.query.config.QueryExpirationProperties; import datawave.webservice.query.runner.RunningQuery.RunningQueryTiming; public class RunningQueryTimingImpl implements RunningQueryTiming { @@ -13,9 +14,8 @@ public class RunningQueryTimingImpl implements RunningQueryTiming { // The maximum number of times to continue running a long running query after the timeout is reached. private int maxLongRunningTimeoutRetries = 3; - public RunningQueryTimingImpl(QueryExpirationConfiguration conf, int pageTimeout) { - this(conf.getCallTimeInMS(), conf.getPageSizeShortCircuitCheckTimeInMS(), conf.getPageShortCircuitTimeoutInMS(), - conf.getMaxLongRunningTimeoutRetries()); + public RunningQueryTimingImpl(QueryExpirationProperties conf, int pageTimeout) { + this(conf.getCallTimeoutMillis(), conf.getShortCircuitCheckTimeMillis(), conf.getShortCircuitTimeoutMillis(), conf.getMaxLongRunningTimeoutRetries()); if (pageTimeout > 0) { maxCallMs = pageTimeout * 60 * 1000; diff --git a/web-services/query/src/main/java/datawave/webservice/query/configuration/GenericQueryConfiguration.java b/web-services/query/src/main/java/datawave/webservice/query/configuration/GenericQueryConfiguration.java deleted file mode 100644 index 55d8627f024..00000000000 --- a/web-services/query/src/main/java/datawave/webservice/query/configuration/GenericQueryConfiguration.java +++ /dev/null @@ -1,228 +0,0 @@ -package datawave.webservice.query.configuration; - -import java.util.Collections; -import java.util.Date; -import java.util.Iterator; -import java.util.Set; - -import org.apache.accumulo.core.client.AccumuloClient; -import org.apache.accumulo.core.client.BatchScanner; -import org.apache.accumulo.core.security.Authorizations; -import org.apache.log4j.Logger; - -import com.fasterxml.jackson.annotation.JsonIgnore; -import com.google.common.collect.Iterators; - -import datawave.util.TableName; -import datawave.webservice.common.logging.ThreadConfigurableLogger; -import datawave.webservice.query.logic.BaseQueryLogic; -import datawave.webservice.util.EnvProvider; - -/** - *

    - * A basic query configuration object that contains the information needed to run a query. - *

    - * - *

    - * Provides some "expected" default values for parameters. This configuration object also encapsulates iterators and their options that would be set on a - * {@link BatchScanner}. - *

    - * - */ -public abstract class GenericQueryConfiguration { - - private static final Logger log = ThreadConfigurableLogger.getLogger(GenericQueryConfiguration.class); - - @JsonIgnore - private transient AccumuloClient client = null; - private Set authorizations = Collections.singleton(Authorizations.EMPTY); - // Leave in a top-level query for backwards-compatibility purposes - private String queryString = null; - - private Date beginDate = null; - private Date endDate = null; - - // The max number of next + seek calls made by the underlying iterators - private Long maxWork = -1L; - - protected int baseIteratorPriority = 100; - - // Table name - private String tableName = TableName.SHARD; - - @JsonIgnore - private transient Iterator queries = Collections.emptyIterator(); - - protected boolean bypassAccumulo; - - // use a value like 'env:PASS' to pull from the environment - private String accumuloPassword = ""; - - /** - * Empty default constructor - */ - public GenericQueryConfiguration() { - - } - - /** - * Pulls the table name, max query results, and max rows to scan from the provided argument - * - * @param configuredLogic - * A pre-configured BaseQueryLogic to initialize the Configuration with - */ - public GenericQueryConfiguration(BaseQueryLogic configuredLogic) { - this(configuredLogic.getConfig()); - } - - public GenericQueryConfiguration(GenericQueryConfiguration genericConfig) { - this.setBaseIteratorPriority(genericConfig.getBaseIteratorPriority()); - this.setBypassAccumulo(genericConfig.getBypassAccumulo()); - this.setAccumuloPassword(genericConfig.getAccumuloPassword()); - this.setAuthorizations(genericConfig.getAuthorizations()); - this.setBeginDate(genericConfig.getBeginDate()); - this.setClient(genericConfig.getClient()); - this.setEndDate(genericConfig.getEndDate()); - this.setMaxWork(genericConfig.getMaxWork()); - this.setQueries(genericConfig.getQueries()); - this.setQueryString(genericConfig.getQueryString()); - this.setTableName(genericConfig.getTableName()); - } - - /** - * Return the configured {@code Iterator} - * - * @return the configured iterator - */ - public Iterator getQueries() { - return Iterators.unmodifiableIterator(this.queries); - } - - /** - * Set the queries to be run. - * - * @param queries - * the queries - */ - public void setQueries(Iterator queries) { - this.queries = queries; - } - - public AccumuloClient getClient() { - return client; - } - - public void setClient(AccumuloClient client) { - this.client = client; - } - - public void setQueryString(String query) { - this.queryString = query; - } - - public String getQueryString() { - return queryString; - } - - public Set getAuthorizations() { - return authorizations; - } - - public void setAuthorizations(Set auths) { - this.authorizations = auths; - } - - public int getBaseIteratorPriority() { - return baseIteratorPriority; - } - - public void setBaseIteratorPriority(final int baseIteratorPriority) { - this.baseIteratorPriority = baseIteratorPriority; - } - - public Date getBeginDate() { - return beginDate; - } - - public void setBeginDate(Date beginDate) { - this.beginDate = beginDate; - } - - public Date getEndDate() { - return endDate; - } - - public void setEndDate(Date endDate) { - this.endDate = endDate; - } - - public Long getMaxWork() { - return maxWork; - } - - public void setMaxWork(Long maxWork) { - this.maxWork = maxWork; - } - - public String getTableName() { - return tableName; - } - - public void setTableName(String tableName) { - this.tableName = tableName; - } - - public boolean getBypassAccumulo() { - return bypassAccumulo; - } - - public void setBypassAccumulo(boolean bypassAccumulo) { - this.bypassAccumulo = bypassAccumulo; - } - - /** - * @return - the accumulo password - */ - public String getAccumuloPassword() { - return this.accumuloPassword; - } - - /** - * Sets configured password for accumulo access - * - * @param password - * the password used to connect to accumulo - */ - public void setAccumuloPassword(String password) { - this.accumuloPassword = EnvProvider.resolve(password); - } - - /** - * Checks for non-null, sane values for the configured values - * - * @return True if all of the encapsulated values have legitimate values, otherwise false - */ - public boolean canRunQuery() { - // Ensure we were given connector and authorizations - if (null == this.getClient() || null == this.getAuthorizations()) { - return false; - } - - // Ensure valid dates - if (null == this.getBeginDate() || null == this.getEndDate() || endDate.before(beginDate)) { - return false; - } - - // A non-empty table was given - if (null == getTableName() || this.getTableName().isEmpty()) { - return false; - } - - // At least one QueryData was provided - if (null == this.queries) { - return false; - } - - return true; - } -} diff --git a/web-services/query/src/main/java/datawave/webservice/query/configuration/IdTranslatorConfiguration.java b/web-services/query/src/main/java/datawave/webservice/query/configuration/IdTranslatorConfiguration.java index 2e3706ed92c..3e1c0aa8ffa 100644 --- a/web-services/query/src/main/java/datawave/webservice/query/configuration/IdTranslatorConfiguration.java +++ b/web-services/query/src/main/java/datawave/webservice/query/configuration/IdTranslatorConfiguration.java @@ -8,8 +8,8 @@ import org.jboss.resteasy.specimpl.MultivaluedMapImpl; import org.springframework.stereotype.Component; +import datawave.microservice.query.QueryParameters; import datawave.query.data.UUIDType; -import datawave.webservice.query.QueryParameters; @Component("idTranslatorConfiguration") public class IdTranslatorConfiguration { @@ -42,7 +42,7 @@ public void setUuidTypes(List uuidTypes) { List goodTypes = new ArrayList<>(); if (uuidTypes != null) { for (UUIDType uuidType : uuidTypes) { - if (uuidType.getDefinedView().equalsIgnoreCase("LuceneUUIDEventQuery")) { + if ("LuceneUUIDEventQuery".equalsIgnoreCase(uuidType.getQueryLogic("default"))) { goodTypes.add(uuidType); } } diff --git a/web-services/query/src/main/java/datawave/webservice/query/configuration/LookupUUIDConfiguration.java b/web-services/query/src/main/java/datawave/webservice/query/configuration/LookupUUIDConfiguration.java index 7c87e099994..7559e015ee9 100644 --- a/web-services/query/src/main/java/datawave/webservice/query/configuration/LookupUUIDConfiguration.java +++ b/web-services/query/src/main/java/datawave/webservice/query/configuration/LookupUUIDConfiguration.java @@ -1,13 +1,14 @@ package datawave.webservice.query.configuration; import java.util.List; +import java.util.Map; import javax.ws.rs.core.MultivaluedMap; import org.jboss.resteasy.specimpl.MultivaluedMapImpl; +import datawave.microservice.query.QueryParameters; import datawave.query.data.UUIDType; -import datawave.webservice.query.QueryParameters; import datawave.webservice.query.util.LookupUUIDConstants; /** @@ -16,6 +17,8 @@ public class LookupUUIDConfiguration { protected List uuidTypes = null; + protected Map contentLookupTypes = null; + protected int batchLookupUpperLimit = LookupUUIDConstants.DEFAULT_BATCH_LOOKUP_UPPER_LIMIT; protected String beginDate = null; protected String columnVisibility; @@ -37,6 +40,10 @@ public String getColumnVisibility() { return this.columnVisibility; } + public Map getContentLookupTypes() { + return this.contentLookupTypes; + } + public List getUuidTypes() { return this.uuidTypes; } @@ -63,6 +70,10 @@ public void setUuidTypes(List uuidTypes) { this.uuidTypes = uuidTypes; } + public void setContentLookupTypes(Map contentLookupTypes) { + this.contentLookupTypes = contentLookupTypes; + } + public MultivaluedMap optionalParamsToMap() { MultivaluedMap p = new MultivaluedMapImpl<>(); if (this.columnVisibility != null) { diff --git a/web-services/query/src/main/java/datawave/webservice/query/configuration/QueryData.java b/web-services/query/src/main/java/datawave/webservice/query/configuration/QueryData.java deleted file mode 100644 index f039f104d18..00000000000 --- a/web-services/query/src/main/java/datawave/webservice/query/configuration/QueryData.java +++ /dev/null @@ -1,85 +0,0 @@ -package datawave.webservice.query.configuration; - -import java.util.ArrayList; -import java.util.Collection; -import java.util.List; - -import org.apache.accumulo.core.client.IteratorSetting; -import org.apache.accumulo.core.data.Range; - -import com.google.common.collect.Lists; -import com.google.common.collect.Sets; - -/** - * Class to encapsulate all required information to run a query. - * - */ -public class QueryData { - List settings = Lists.newArrayList(); - String query; - Collection ranges = Sets.newHashSet(); - Collection columnFamilies = Sets.newHashSet(); - - public QueryData() {} - - public QueryData(String query, Collection ranges, List settings) { - setQuery(query); - setRanges(ranges); - setSettings(settings); - } - - public QueryData(QueryData other) { - this(other.getQuery(), other.getRanges(), other.getSettings()); - } - - public QueryData(QueryData other, Collection ranges) { - setQuery(other.getQuery()); - setSettings(other.getSettings()); - setRanges(ranges); - } - - public QueryData(String queryString, ArrayList ranges, List settings, Collection columnFamilies) { - this(queryString, ranges, settings); - this.columnFamilies.addAll(columnFamilies); - } - - public List getSettings() { - return settings; - } - - public void setSettings(List settings) { - this.settings = Lists.newArrayList(settings); - } - - public String getQuery() { - return query; - } - - public void setQuery(String query) { - this.query = query; - } - - public Collection getRanges() { - return ranges; - } - - public Collection getColumnFamilies() { - return columnFamilies; - } - - public void setRanges(Collection ranges) { - if (null != ranges) - this.ranges.addAll(ranges); - } - - public void addIterator(IteratorSetting cfg) { - this.settings.add(cfg); - } - - @Override - public String toString() { - StringBuilder sb = new StringBuilder(256); - sb.append("Query: '").append(this.query).append("', Ranges: ").append(this.ranges).append(", Settings: ").append(this.settings); - return sb.toString(); - } -} diff --git a/web-services/query/src/main/java/datawave/webservice/query/dashboard/DashboardBean.java b/web-services/query/src/main/java/datawave/webservice/query/dashboard/DashboardBean.java index 3b2afca68be..72cb6ae12f2 100644 --- a/web-services/query/src/main/java/datawave/webservice/query/dashboard/DashboardBean.java +++ b/web-services/query/src/main/java/datawave/webservice/query/dashboard/DashboardBean.java @@ -33,12 +33,13 @@ import org.jboss.resteasy.annotations.GZIP; import datawave.configuration.DatawaveEmbeddedProjectStageHolder; +import datawave.core.common.connection.AccumuloConnectionFactory; +import datawave.core.common.extjs.ExtJsResponse; +import datawave.core.query.dashboard.DashboardSummary; import datawave.interceptor.ResponseInterceptor; import datawave.security.authorization.DatawavePrincipal; import datawave.security.util.ScannerHelper; import datawave.security.util.WSAuthorizationsUtil; -import datawave.webservice.common.connection.AccumuloConnectionFactory; -import datawave.webservice.common.extjs.ExtJsResponse; import datawave.webservice.query.runner.QueryExecutorBean; @Path("/Query/Metrics/dashboard") @@ -153,7 +154,15 @@ private Scanner createScanner(AccumuloClient accumuloClient) throws TableNotFoun } private AccumuloClient createClient() throws Exception { + Principal p = ctx.getCallerPrincipal(); + String userDn = null; + Collection proxyServers = null; + if (p instanceof DatawavePrincipal) { + DatawavePrincipal dp = (DatawavePrincipal) p; + userDn = dp.getUserDN().subjectDN(); + proxyServers = dp.getProxyServers(); + } Map trackingMap = connectionFactory.getTrackingMap(Thread.currentThread().getStackTrace()); - return connectionFactory.getClient(AccumuloConnectionFactory.Priority.LOW, trackingMap); + return connectionFactory.getClient(userDn, proxyServers, AccumuloConnectionFactory.Priority.LOW, trackingMap); } } diff --git a/web-services/query/src/main/java/datawave/webservice/query/dashboard/DashboardQuery.java b/web-services/query/src/main/java/datawave/webservice/query/dashboard/DashboardQuery.java index 5dd77c82563..2f2c240bc57 100644 --- a/web-services/query/src/main/java/datawave/webservice/query/dashboard/DashboardQuery.java +++ b/web-services/query/src/main/java/datawave/webservice/query/dashboard/DashboardQuery.java @@ -3,15 +3,15 @@ import java.text.ParseException; import java.util.Date; -import javax.ws.rs.core.MultivaluedMap; - import org.apache.commons.lang.time.DateUtils; -import org.jboss.resteasy.specimpl.MultivaluedMapImpl; -import datawave.webservice.common.extjs.ExtJsResponse; -import datawave.webservice.query.QueryParametersImpl; -import datawave.webservice.query.QueryPersistence; +import datawave.core.common.extjs.ExtJsResponse; +import datawave.core.query.dashboard.DashboardFields; +import datawave.core.query.dashboard.DashboardSummary; +import datawave.microservice.query.DefaultQueryParameters; +import datawave.microservice.query.QueryPersistence; import datawave.webservice.query.runner.QueryExecutor; +import datawave.webservice.query.util.MapUtils; public class DashboardQuery { @@ -33,9 +33,9 @@ private DashboardQuery() {} public static ExtJsResponse createQuery(QueryExecutor queryExecutor, String auths, Date beginDate, Date endDate, Date now) throws ParseException { - MultivaluedMap paramsMap = new MultivaluedMapImpl<>(); - paramsMap.putAll(QueryParametersImpl.paramsToMap(logicName, queryString, queryName, columnVisibility, beginDate, endDate, auths, - DateUtils.addDays(now, 1), pageSize, pageTimeout, maxResultsOverride, persistence, systemFrom, parameters, trace)); - return (ExtJsResponse) queryExecutor.createQueryAndNext(logicName, paramsMap); + return (ExtJsResponse) queryExecutor.createQueryAndNext(logicName, + MapUtils.toMultivaluedMap(DefaultQueryParameters.paramsToMap(logicName, queryString, queryName, columnVisibility, beginDate, endDate, + auths, DateUtils.addDays(now, 1), pageSize, pageTimeout, maxResultsOverride, persistence, systemFrom, parameters, + trace))); } } diff --git a/web-services/query/src/main/java/datawave/webservice/query/factory/Persister.java b/web-services/query/src/main/java/datawave/webservice/query/factory/Persister.java index 727f7bb9941..2f5363c930d 100644 --- a/web-services/query/src/main/java/datawave/webservice/query/factory/Persister.java +++ b/web-services/query/src/main/java/datawave/webservice/query/factory/Persister.java @@ -49,19 +49,19 @@ import com.google.protobuf.InvalidProtocolBufferException; import datawave.configuration.DatawaveEmbeddedProjectStageHolder; -import datawave.configuration.spring.SpringBean; +import datawave.core.common.connection.AccumuloConnectionFactory; +import datawave.core.common.connection.AccumuloConnectionFactory.Priority; +import datawave.core.query.util.QueryUtil; import datawave.marking.SecurityMarking; +import datawave.microservice.query.Query; +import datawave.microservice.query.QueryParameters; +import datawave.microservice.query.QueryPersistence; import datawave.query.iterator.QueriesTableAgeOffIterator; import datawave.security.authorization.DatawavePrincipal; import datawave.security.util.ScannerHelper; -import datawave.webservice.common.connection.AccumuloConnectionFactory; -import datawave.webservice.common.connection.AccumuloConnectionFactory.Priority; -import datawave.webservice.query.Query; -import datawave.webservice.query.QueryParameters; -import datawave.webservice.query.QueryPersistence; import datawave.webservice.query.result.event.ResponseObjectFactory; +import datawave.webservice.query.util.MapUtils; import datawave.webservice.query.util.QueryUncaughtExceptionHandler; -import datawave.webservice.query.util.QueryUtil; /** * Object that creates and updates QueryImpl objects using a table structure: @@ -104,13 +104,12 @@ public Q apply(final Entry entry) { protected EJBContext ctx; @Inject - @SpringBean(name = "ResponseObjectFactory") private ResponseObjectFactory responseObjectFactory; public Query create(String userDN, List dnList, SecurityMarking marking, String queryLogicName, QueryParameters qp, MultivaluedMap optionalQueryParameters) { Query q = responseObjectFactory.getQueryImpl(); - q.initialize(userDN, dnList, queryLogicName, qp, optionalQueryParameters); + q.initialize(userDN, dnList, queryLogicName, qp, MapUtils.toMultiValueMap(optionalQueryParameters)); q.setColumnVisibility(marking.toColumnVisibilityString()); q.setUncaughtExceptionHandler(new QueryUncaughtExceptionHandler()); Thread.currentThread().setUncaughtExceptionHandler(q.getUncaughtExceptionHandler()); @@ -145,7 +144,7 @@ private void create(Query query) { AccumuloClient c = null; try { Map trackingMap = connectionFactory.getTrackingMap(Thread.currentThread().getStackTrace()); - c = connectionFactory.getClient(Priority.ADMIN, trackingMap); + c = connectionFactory.getClient(null, null, Priority.ADMIN, trackingMap); tableCheck(c); try (BatchWriter writer = c.createBatchWriter(TABLE_NAME, new BatchWriterConfig().setMaxLatency(10, TimeUnit.SECONDS).setMaxMemory(10240L).setMaxWriteThreads(1))) { @@ -208,7 +207,7 @@ public void remove(Query query) throws Exception { BatchDeleter deleter = null; try { Map trackingMap = connectionFactory.getTrackingMap(Thread.currentThread().getStackTrace()); - c = connectionFactory.getClient(Priority.ADMIN, trackingMap); + c = connectionFactory.getClient(null, null, Priority.ADMIN, trackingMap); if (!c.tableOperations().exists(TABLE_NAME)) { return; } @@ -265,7 +264,7 @@ public List findById(String id) { try { Map trackingMap = connectionFactory.getTrackingMap(Thread.currentThread().getStackTrace()); - client = connectionFactory.getClient(Priority.ADMIN, trackingMap); + client = connectionFactory.getClient(null, null, Priority.ADMIN, trackingMap); tableCheck(client); IteratorSetting regex = new IteratorSetting(21, RegExFilter.class); @@ -313,7 +312,7 @@ public List findByName(String name) { AccumuloClient c = null; try { Map trackingMap = connectionFactory.getTrackingMap(Thread.currentThread().getStackTrace()); - c = connectionFactory.getClient(Priority.ADMIN, trackingMap); + c = connectionFactory.getClient(null, null, Priority.ADMIN, trackingMap); tableCheck(c); try (Scanner scanner = ScannerHelper.createScanner(c, TABLE_NAME, auths)) { Range range = new Range(shortName, shortName); @@ -359,7 +358,7 @@ public List findByUser() { AccumuloClient c = null; try { Map trackingMap = connectionFactory.getTrackingMap(Thread.currentThread().getStackTrace()); - c = connectionFactory.getClient(Priority.ADMIN, trackingMap); + c = connectionFactory.getClient(null, null, Priority.ADMIN, trackingMap); tableCheck(c); try (Scanner scanner = ScannerHelper.createScanner(c, TABLE_NAME, auths)) { Range range = new Range(sid, sid); @@ -411,7 +410,7 @@ public List findByUser(String user) { AccumuloClient c = null; try { Map trackingMap = connectionFactory.getTrackingMap(Thread.currentThread().getStackTrace()); - c = connectionFactory.getClient(Priority.ADMIN, trackingMap); + c = connectionFactory.getClient(null, null, Priority.ADMIN, trackingMap); tableCheck(c); try (Scanner scanner = ScannerHelper.createScanner(c, TABLE_NAME, auths)) { Range range = new Range(user, user); @@ -445,7 +444,7 @@ public List adminFindById(final String queryId) { try { final Map trackingMap = connectionFactory.getTrackingMap(Thread.currentThread().getStackTrace()); - client = connectionFactory.getClient(Priority.ADMIN, trackingMap); + client = connectionFactory.getClient(null, null, Priority.ADMIN, trackingMap); tableCheck(client); final IteratorSetting regex = new IteratorSetting(21, RegExFilter.class); diff --git a/web-services/query/src/main/java/datawave/webservice/query/hud/HudBean.java b/web-services/query/src/main/java/datawave/webservice/query/hud/HudBean.java index 2eea342a4a3..89f6a0372d0 100644 --- a/web-services/query/src/main/java/datawave/webservice/query/hud/HudBean.java +++ b/web-services/query/src/main/java/datawave/webservice/query/hud/HudBean.java @@ -25,12 +25,13 @@ import com.google.gson.Gson; import datawave.configuration.DatawaveEmbeddedProjectStageHolder; +import datawave.microservice.query.Query; import datawave.microservice.querymetric.BaseQueryMetric; import datawave.microservice.querymetric.BaseQueryMetric.PageMetric; +import datawave.microservice.querymetric.BaseQueryMetricListResponse; import datawave.microservice.querymetric.QueryMetricSummary; import datawave.microservice.querymetric.QueryMetricsSummaryResponse; import datawave.security.authorization.DatawavePrincipal; -import datawave.webservice.query.Query; import datawave.webservice.query.factory.Persister; import datawave.webservice.query.metric.QueryMetricsBean; import datawave.webservice.query.runner.QueryExecutorBean; @@ -91,19 +92,21 @@ public String getRunningQueries(@PathParam("userid") String userId) throws Excep List queryMetricsList; - queryMetricsList = queryMetrics.query(queryId).getResult(); - - if (queryMetricsList != null && !queryMetricsList.isEmpty()) { - BaseQueryMetric qm = queryMetricsList.get(0); - - List pageMetrics = qm.getPageTimes(); - summary.setPageMetrics(pageMetrics); - - summary.setCreateDate(qm.getCreateDate().getTime()); - summary.setNumPages(qm.getNumPages()); - summary.setNumResults(qm.getNumResults()); - summary.setLastUpdated(qm.getLastUpdated().getTime()); - summary.setLifeCycle(qm.getLifecycle().toString()); + BaseQueryMetricListResponse response = queryMetrics.query(queryId); + if (response != null) { + queryMetricsList = response.getResult(); + if (queryMetricsList != null && !queryMetricsList.isEmpty()) { + BaseQueryMetric qm = queryMetricsList.get(0); + + List pageMetrics = qm.getPageTimes(); + summary.setPageMetrics(pageMetrics); + + summary.setCreateDate(qm.getCreateDate().getTime()); + summary.setNumPages(qm.getNumPages()); + summary.setNumResults(qm.getNumResults()); + summary.setLastUpdated(qm.getLastUpdated().getTime()); + summary.setLifeCycle(qm.getLifecycle().toString()); + } } querySummaryList.add(summary); @@ -125,20 +128,24 @@ private DatawavePrincipal getPrincipal() { @Path("/summaryall") @GET @RolesAllowed({"Administrator", "MetricsAdministrator"}) - public String getSummaryQueryStats() throws Exception { + public String getSummaryQueryStats() { QueryMetricsSummaryResponse summaryResp = queryMetrics.getQueryMetricsSummary(null, null); - QueryMetricSummary hour1 = summaryResp.getHour1(); - QueryMetricSummary hour6 = summaryResp.getHour6(); - QueryMetricSummary hour12 = summaryResp.getHour12(); - QueryMetricSummary day1 = summaryResp.getDay1(); - - List metricSummaryList = new ArrayList<>(); - metricSummaryList.add(metricSummaryBuilder.buildMetricsSummary(1L, hour1)); - metricSummaryList.add(metricSummaryBuilder.buildMetricsSummary(6L, hour6)); - metricSummaryList.add(metricSummaryBuilder.buildMetricsSummary(12L, hour12)); - metricSummaryList.add(metricSummaryBuilder.buildMetricsSummary(24L, day1)); - - return gson.toJson(metricSummaryList); + if (summaryResp == null) { + return null; + } else { + QueryMetricSummary hour1 = summaryResp.getHour1(); + QueryMetricSummary hour6 = summaryResp.getHour6(); + QueryMetricSummary hour12 = summaryResp.getHour12(); + QueryMetricSummary day1 = summaryResp.getDay1(); + + List metricSummaryList = new ArrayList<>(); + metricSummaryList.add(metricSummaryBuilder.buildMetricsSummary(1L, hour1)); + metricSummaryList.add(metricSummaryBuilder.buildMetricsSummary(6L, hour6)); + metricSummaryList.add(metricSummaryBuilder.buildMetricsSummary(12L, hour12)); + metricSummaryList.add(metricSummaryBuilder.buildMetricsSummary(24L, day1)); + + return gson.toJson(metricSummaryList); + } } @Path("/activeusers") diff --git a/web-services/query/src/main/java/datawave/webservice/query/hud/HudQuerySummaryBuilder.java b/web-services/query/src/main/java/datawave/webservice/query/hud/HudQuerySummaryBuilder.java index 52b17835b58..96786cbd377 100644 --- a/web-services/query/src/main/java/datawave/webservice/query/hud/HudQuerySummaryBuilder.java +++ b/web-services/query/src/main/java/datawave/webservice/query/hud/HudQuerySummaryBuilder.java @@ -1,6 +1,6 @@ package datawave.webservice.query.hud; -import datawave.webservice.query.Query; +import datawave.microservice.query.Query; /** * diff --git a/web-services/query/src/main/java/datawave/webservice/query/interceptor/QueryMetricsEnrichmentInterceptor.java b/web-services/query/src/main/java/datawave/webservice/query/interceptor/QueryMetricsEnrichmentInterceptor.java index 10f6c4481a5..4c18427d648 100644 --- a/web-services/query/src/main/java/datawave/webservice/query/interceptor/QueryMetricsEnrichmentInterceptor.java +++ b/web-services/query/src/main/java/datawave/webservice/query/interceptor/QueryMetricsEnrichmentInterceptor.java @@ -19,14 +19,14 @@ import org.jboss.resteasy.util.FindAnnotation; import datawave.configuration.DatawaveEmbeddedProjectStageHolder; +import datawave.core.query.logic.BaseQueryLogic; +import datawave.core.query.logic.QueryLogic; import datawave.microservice.querymetric.BaseQueryMetric; import datawave.microservice.querymetric.BaseQueryMetric.PageMetric; import datawave.resteasy.interceptor.BaseMethodStatsInterceptor; import datawave.webservice.query.annotation.EnrichQueryMetrics; import datawave.webservice.query.annotation.EnrichQueryMetrics.MethodType; import datawave.webservice.query.cache.QueryCache; -import datawave.webservice.query.logic.BaseQueryLogic; -import datawave.webservice.query.logic.QueryLogic; import datawave.webservice.query.metric.QueryMetricsBean; import datawave.webservice.query.runner.QueryExecutorBean; import datawave.webservice.query.runner.RunningQuery; diff --git a/web-services/query/src/main/java/datawave/webservice/query/logic/DatawaveRoleManager.java b/web-services/query/src/main/java/datawave/webservice/query/logic/DatawaveRoleManager.java deleted file mode 100644 index 3bd47bcd97f..00000000000 --- a/web-services/query/src/main/java/datawave/webservice/query/logic/DatawaveRoleManager.java +++ /dev/null @@ -1,43 +0,0 @@ -package datawave.webservice.query.logic; - -import java.security.Principal; -import java.util.Collection; -import java.util.Collections; -import java.util.HashSet; -import java.util.Set; - -import com.google.common.collect.Sets; - -import datawave.security.authorization.DatawavePrincipal; - -public class DatawaveRoleManager implements RoleManager { - - private Set requiredRoles; - - public DatawaveRoleManager() {} - - public DatawaveRoleManager(Collection requiredRoles) { - this.requiredRoles = Collections.unmodifiableSet(Sets.newHashSet(requiredRoles)); - } - - @Override - public boolean canRunQuery(QueryLogic queryLogic, Principal principal) { - if (principal instanceof DatawavePrincipal == false) - return false; - DatawavePrincipal datawavePrincipal = (DatawavePrincipal) principal; - if (requiredRoles != null && !requiredRoles.isEmpty()) { - Set usersRoles = new HashSet<>(datawavePrincipal.getPrimaryUser().getRoles()); - return usersRoles.containsAll(requiredRoles); - } - return true; - } - - public Set getRequiredRoles() { - return requiredRoles; - } - - public void setRequiredRoles(Set requiredRoles) { - this.requiredRoles = requiredRoles; - } - -} diff --git a/web-services/query/src/main/java/datawave/webservice/query/logic/EasyRoleManager.java b/web-services/query/src/main/java/datawave/webservice/query/logic/EasyRoleManager.java deleted file mode 100644 index 653eac3ffbf..00000000000 --- a/web-services/query/src/main/java/datawave/webservice/query/logic/EasyRoleManager.java +++ /dev/null @@ -1,23 +0,0 @@ -package datawave.webservice.query.logic; - -import java.security.Principal; -import java.util.Collections; -import java.util.Set; - -public class EasyRoleManager implements RoleManager { - - @Override - public boolean canRunQuery(QueryLogic queryLogic, Principal principal) { - return true; - } - - @Override - public void setRequiredRoles(Set requiredRoles) { - // TODO Auto-generated method stub - } - - @Override - public Set getRequiredRoles() { - return Collections.emptySet(); - } -} diff --git a/web-services/query/src/main/java/datawave/webservice/query/logic/QueryLogicFactory.java b/web-services/query/src/main/java/datawave/webservice/query/logic/QueryLogicFactory.java deleted file mode 100644 index 4ad56232d43..00000000000 --- a/web-services/query/src/main/java/datawave/webservice/query/logic/QueryLogicFactory.java +++ /dev/null @@ -1,23 +0,0 @@ -package datawave.webservice.query.logic; - -import java.security.Principal; -import java.util.List; - -public interface QueryLogicFactory { - - /** - * - * @param name - * name of query logic - * @param principal - * the principal - * @return new instance of QueryLogic class - * @throws IllegalArgumentException - * if query logic name does not exist - * @throws CloneNotSupportedException - * if the clone is not supported - */ - QueryLogic getQueryLogic(String name, Principal principal) throws IllegalArgumentException, CloneNotSupportedException; - - List> getQueryLogicList(); -} diff --git a/web-services/query/src/main/java/datawave/webservice/query/logic/QueryLogicFactoryConfiguration.java b/web-services/query/src/main/java/datawave/webservice/query/logic/QueryLogicFactoryConfiguration.java index c9f3cca3b0c..e722baf9272 100644 --- a/web-services/query/src/main/java/datawave/webservice/query/logic/QueryLogicFactoryConfiguration.java +++ b/web-services/query/src/main/java/datawave/webservice/query/logic/QueryLogicFactoryConfiguration.java @@ -2,6 +2,8 @@ import java.util.Map; +import datawave.core.query.logic.QueryLogic; + public class QueryLogicFactoryConfiguration { // The logicMap is the list of logics that can be loaded. // This is a map of logic name to bean name. diff --git a/web-services/query/src/main/java/datawave/webservice/query/logic/QueryLogicFactoryImpl.java b/web-services/query/src/main/java/datawave/webservice/query/logic/QueryLogicFactoryImpl.java index b39bc7ee4b5..07514cf7ba5 100644 --- a/web-services/query/src/main/java/datawave/webservice/query/logic/QueryLogicFactoryImpl.java +++ b/web-services/query/src/main/java/datawave/webservice/query/logic/QueryLogicFactoryImpl.java @@ -1,11 +1,11 @@ package datawave.webservice.query.logic; -import java.security.Principal; import java.util.ArrayList; import java.util.HashMap; -import java.util.Iterator; +import java.util.HashSet; import java.util.List; import java.util.Map; +import java.util.Set; import javax.inject.Inject; @@ -13,6 +13,11 @@ import org.springframework.context.ApplicationContext; import datawave.configuration.spring.SpringBean; +import datawave.core.query.logic.QueryLogic; +import datawave.core.query.logic.QueryLogicFactory; +import datawave.security.authorization.DatawavePrincipal; +import datawave.security.authorization.ProxiedUserDetails; +import datawave.security.system.ServerPrincipal; import datawave.webservice.common.exception.UnauthorizedException; import datawave.webservice.result.VoidResponse; @@ -28,9 +33,22 @@ public class QueryLogicFactoryImpl implements QueryLogicFactory { @Inject private ApplicationContext applicationContext; + @Inject + @ServerPrincipal + private DatawavePrincipal serverPrincipal; + + @Override + public QueryLogic getQueryLogic(String name, ProxiedUserDetails currentUser) throws IllegalArgumentException, CloneNotSupportedException { + return getQueryLogic(name, currentUser, true); + } + @Override - public QueryLogic getQueryLogic(String queryLogic, Principal principal) throws IllegalArgumentException, CloneNotSupportedException { + public QueryLogic getQueryLogic(String name) throws IllegalArgumentException, CloneNotSupportedException { + return getQueryLogic(name, null, false); + } + public QueryLogic getQueryLogic(String queryLogic, ProxiedUserDetails currentUser, boolean checkRoles) + throws IllegalArgumentException, CloneNotSupportedException { String beanName = queryLogic; if (queryLogicFactoryConfiguration.hasLogicMap()) { beanName = queryLogicFactoryConfiguration.getLogicMap().get(queryLogic); @@ -42,7 +60,6 @@ public QueryLogic getQueryLogic(String queryLogic, Principal principal) throw QueryLogic logic; try { logic = (QueryLogic) applicationContext.getBean(beanName); - logic.setPrincipal(principal); } catch (ClassCastException | NoSuchBeanDefinitionException cce) { if (beanName.equals(queryLogic)) { throw new IllegalArgumentException("Logic name '" + queryLogic + "' does not exist in the configuration"); @@ -51,9 +68,9 @@ public QueryLogic getQueryLogic(String queryLogic, Principal principal) throw } } - if (!logic.canRunQuery(principal)) { - throw new UnauthorizedException(new IllegalAccessException("User does not have required role(s): " + logic.getRoleManager().getRequiredRoles()), - new VoidResponse()); + Set userRoles = new HashSet<>(currentUser.getPrimaryUser().getRoles()); + if (checkRoles && !logic.canRunQuery(userRoles)) { + throw new UnauthorizedException(new IllegalAccessException("User does not have required role(s): " + logic.getRequiredRoles()), new VoidResponse()); } logic.setLogicName(queryLogic); @@ -63,6 +80,10 @@ public QueryLogic getQueryLogic(String queryLogic, Principal principal) throw if (logic.getPageByteTrigger() == 0) { logic.setPageByteTrigger(queryLogicFactoryConfiguration.getPageByteTrigger()); } + + logic.setCurrentUser(currentUser); + logic.setServerUser(serverPrincipal); + return logic; } diff --git a/web-services/query/src/main/java/datawave/webservice/query/logic/RoleManager.java b/web-services/query/src/main/java/datawave/webservice/query/logic/RoleManager.java deleted file mode 100644 index ab15f595e8d..00000000000 --- a/web-services/query/src/main/java/datawave/webservice/query/logic/RoleManager.java +++ /dev/null @@ -1,14 +0,0 @@ -package datawave.webservice.query.logic; - -import java.security.Principal; -import java.util.Set; - -public interface RoleManager { - - boolean canRunQuery(QueryLogic queryLogic, Principal principal); - - void setRequiredRoles(Set requiredRoles); - - Set getRequiredRoles(); - -} diff --git a/web-services/query/src/main/java/datawave/webservice/query/logic/RoleManagerImpl.java b/web-services/query/src/main/java/datawave/webservice/query/logic/RoleManagerImpl.java deleted file mode 100644 index f42b5ee0330..00000000000 --- a/web-services/query/src/main/java/datawave/webservice/query/logic/RoleManagerImpl.java +++ /dev/null @@ -1,5 +0,0 @@ -package datawave.webservice.query.logic; - -public class RoleManagerImpl { - -} diff --git a/web-services/query/src/main/java/datawave/webservice/query/logic/WritesQueryMetrics.java b/web-services/query/src/main/java/datawave/webservice/query/logic/WritesQueryMetrics.java deleted file mode 100644 index 049bd144779..00000000000 --- a/web-services/query/src/main/java/datawave/webservice/query/logic/WritesQueryMetrics.java +++ /dev/null @@ -1,9 +0,0 @@ -package datawave.webservice.query.logic; - -import datawave.microservice.querymetric.BaseQueryMetric; - -public interface WritesQueryMetrics { - - void writeQueryMetrics(BaseQueryMetric metric); - -} diff --git a/web-services/query/src/main/java/datawave/webservice/query/logic/composite/CompositeLogicException.java b/web-services/query/src/main/java/datawave/webservice/query/logic/composite/CompositeLogicException.java deleted file mode 100644 index c57abd66a55..00000000000 --- a/web-services/query/src/main/java/datawave/webservice/query/logic/composite/CompositeLogicException.java +++ /dev/null @@ -1,50 +0,0 @@ -package datawave.webservice.query.logic.composite; - -import java.util.Collection; -import java.util.Collections; -import java.util.Map; - -import datawave.webservice.query.exception.QueryException; - -public class CompositeLogicException extends RuntimeException { - public CompositeLogicException(String message, String logicName, Exception exception) { - super(getMessage(message, Collections.singletonMap(logicName, exception)), exception); - } - - public CompositeLogicException(String message, Map exceptions) { - super(getMessage(message, exceptions), getQueryException(exceptions.values())); - if (exceptions.size() > 1) { - exceptions.values().stream().forEach(e -> addSuppressed(e)); - } - } - - // looking for an exception that has a nested QueryException such that we may return an error code - private static Exception getQueryException(Collection exceptions) { - if (exceptions.size() == 1) { - return exceptions.iterator().next(); - } - Exception e = null; - for (Exception test : exceptions) { - if (e == null) { - e = test; - } else if (isQueryException(test)) { - e = test; - } - if (isQueryException(e)) { - break; - } - } - return e; - } - - private static boolean isQueryException(Exception e) { - return new QueryException(e).getQueryExceptionsInStack().size() > 1; - } - - private static String getMessage(String message, Map exceptions) { - StringBuilder builder = new StringBuilder(); - builder.append(message).append(":"); - exceptions.entrySet().stream().forEach(e -> builder.append('\n').append(e.getKey()).append(": ").append(e.getValue().getMessage())); - return builder.toString(); - } -} diff --git a/web-services/query/src/main/java/datawave/webservice/query/logic/composite/CompositeQueryLogicTransformer.java b/web-services/query/src/main/java/datawave/webservice/query/logic/composite/CompositeQueryLogicTransformer.java deleted file mode 100644 index 8219789b5a9..00000000000 --- a/web-services/query/src/main/java/datawave/webservice/query/logic/composite/CompositeQueryLogicTransformer.java +++ /dev/null @@ -1,86 +0,0 @@ -package datawave.webservice.query.logic.composite; - -import java.util.List; - -import org.apache.log4j.Logger; - -import com.google.common.base.Throwables; - -import datawave.webservice.query.cache.ResultsPage; -import datawave.webservice.query.cachedresults.CacheableLogic; -import datawave.webservice.query.cachedresults.CacheableQueryRow; -import datawave.webservice.query.exception.QueryException; -import datawave.webservice.query.logic.AbstractQueryLogicTransformer; -import datawave.webservice.query.logic.QueryLogicTransformer; -import datawave.webservice.result.BaseQueryResponse; - -public class CompositeQueryLogicTransformer extends AbstractQueryLogicTransformer implements CacheableLogic { - - protected static final Logger log = Logger.getLogger(CompositeQueryLogicTransformer.class); - - private List> delegates = null; - - public CompositeQueryLogicTransformer(List> delegates) { - this.delegates = delegates; - } - - @Override - public O transform(I input) { - // The objects put into the pageQueue have already been transformed, so no transformation required here. - return (O) input; - } - - @Override - public List writeToCache(Object o) throws QueryException { - List result = null; - for (QueryLogicTransformer t : delegates) { - if (t instanceof CacheableLogic) { - CacheableLogic c = (CacheableLogic) t; - try { - result = c.writeToCache(o); - } catch (Exception e) { - log.warn("Error calling writeToCache on delegate, continuing...", e); - } - } - } - return result; - } - - @Override - public List readFromCache(List row) { - List result = null; - for (QueryLogicTransformer t : delegates) { - if (t instanceof CacheableLogic) { - CacheableLogic c = (CacheableLogic) t; - try { - result = c.readFromCache(row); - } catch (Exception e) { - log.warn("Error calling writeToCache on delegate, continuing...", e); - } - } - } - return result; - } - - @Override - public BaseQueryResponse createResponse(List resultList) { - Exception lastFailure = null; - for (QueryLogicTransformer t : delegates) { - if (t instanceof AbstractQueryLogicTransformer) { - AbstractQueryLogicTransformer a = (AbstractQueryLogicTransformer) t; - try { - log.trace("createResponse List"); - return a.createResponse(resultList); - } catch (Exception e) { - log.warn("Error calling createResponse on delegate, trying the next one", e); - lastFailure = e; - } - } - } - if (lastFailure != null) { - Throwables.propagate(lastFailure); - } - return null; - } - -} diff --git a/web-services/query/src/main/java/datawave/webservice/query/metric/QueryMetricsBean.java b/web-services/query/src/main/java/datawave/webservice/query/metric/QueryMetricsBean.java index 3f11a5b3476..b2370ce0505 100644 --- a/web-services/query/src/main/java/datawave/webservice/query/metric/QueryMetricsBean.java +++ b/web-services/query/src/main/java/datawave/webservice/query/metric/QueryMetricsBean.java @@ -1,5 +1,11 @@ package datawave.webservice.query.metric; +import static datawave.metrics.remote.RemoteQueryMetricService.ID_METRIC_SUFFIX; +import static datawave.metrics.remote.RemoteQueryMetricService.MAP_METRIC_SUFFIX; +import static datawave.metrics.remote.RemoteQueryMetricService.SUMMARY_ALL_SUFFIX; +import static datawave.metrics.remote.RemoteQueryMetricService.SUMMARY_USER_SUFFIX; + +import java.net.URISyntaxException; import java.security.Principal; import java.util.Calendar; import java.util.Date; @@ -19,23 +25,33 @@ import javax.ejb.TransactionManagementType; import javax.inject.Inject; import javax.interceptor.Interceptors; +import javax.servlet.http.HttpServletRequest; import javax.ws.rs.GET; import javax.ws.rs.POST; import javax.ws.rs.Path; import javax.ws.rs.PathParam; import javax.ws.rs.Produces; import javax.ws.rs.QueryParam; +import javax.ws.rs.core.Context; +import javax.ws.rs.core.HttpHeaders; +import javax.ws.rs.core.Response; +import javax.ws.rs.core.UriInfo; import org.apache.commons.lang.time.DateUtils; import org.apache.deltaspike.core.api.config.ConfigProperty; import org.apache.deltaspike.core.api.exclude.Exclude; +import org.apache.http.client.utils.URIBuilder; import org.apache.log4j.Logger; import org.jboss.resteasy.annotations.GZIP; +import org.springframework.http.MediaType; +import org.xbill.DNS.TextParseException; import datawave.annotation.DateFormat; import datawave.annotation.Required; import datawave.configuration.DatawaveEmbeddedProjectStageHolder; import datawave.configuration.spring.SpringBean; +import datawave.core.query.map.QueryGeometryHandler; +import datawave.core.query.metric.QueryMetricHandler; import datawave.interceptor.RequiredInterceptor; import datawave.interceptor.ResponseInterceptor; import datawave.metrics.remote.RemoteQueryMetricService; @@ -46,8 +62,6 @@ import datawave.security.authorization.DatawavePrincipal; import datawave.webservice.query.exception.DatawaveErrorCode; import datawave.webservice.query.exception.QueryException; -import datawave.webservice.query.map.QueryGeometryHandler; -import datawave.webservice.query.map.QueryGeometryResponse; @Path("/Query/Metrics") @Produces({"application/xml", "text/xml", "application/json", "text/yaml", "text/x-yaml", "application/x-yaml", "text/html"}) @@ -107,6 +121,22 @@ public void updateMetric(BaseQueryMetric metric) throws Exception { } } + /* + * Used from HudBean to 1) ensure no redirect 2) return BaseQueryMetricListResponse + */ + public BaseQueryMetricListResponse query(String id) { + if (queryMetricsWriterConfiguration.getUseRemoteService()) { + return remoteQueryMetricService.id(id); + } else { + try { + return (BaseQueryMetricListResponse) query(id, null, null); + } catch (Exception e) { + log.error(e.getMessage(), e); + return null; + } + } + } + /** * Returns metrics for the current users queries that are identified by the id * @@ -124,9 +154,10 @@ public void updateMetric(BaseQueryMetric metric) throws Exception { @POST @Path("/id/{id}") @Interceptors({RequiredInterceptor.class, ResponseInterceptor.class}) - public BaseQueryMetricListResponse query(@PathParam("id") @Required("id") String id) { - if (queryMetricsWriterConfiguration.getUseRemoteService()) { - return remoteQueryMetricService.id(id); + public Object query(@PathParam("id") @Required("id") String id, @Context HttpServletRequest request, @Context UriInfo uriInfo) + throws TextParseException, URISyntaxException { + if (queryMetricsWriterConfiguration.getUseRemoteService() || isHtmlResponse(request)) { + return sendRedirect(String.format(ID_METRIC_SUFFIX, id), uriInfo); } else { // Find out who/what called this method DatawavePrincipal dp = null; @@ -144,11 +175,10 @@ public BaseQueryMetricListResponse query(@PathParam("id") @Required("id") String @POST @Path("/id/{id}/map") @Interceptors({RequiredInterceptor.class, ResponseInterceptor.class}) - public QueryGeometryResponse map(@PathParam("id") @Required("id") String id) { - if (queryMetricsWriterConfiguration.getUseRemoteService()) { - QueryGeometryResponse response = remoteQueryMetricService.map(id); - response.setBasemaps(this.basemaps); - return response; + public Object map(@PathParam("id") @Required("id") String id, @Context HttpServletRequest request, @Context UriInfo uriInfo) + throws TextParseException, URISyntaxException { + if (queryMetricsWriterConfiguration.getUseRemoteService() || isHtmlResponse(request)) { + return sendRedirect(String.format(MAP_METRIC_SUFFIX, id), uriInfo); } else { // Find out who/what called this method DatawavePrincipal dp = null; @@ -162,6 +192,18 @@ public QueryGeometryResponse map(@PathParam("id") @Required("id") String id) { } } + /* + * Used from HudBean to 1) ensure no redirect 2) return QueryMetricsSummaryResponse + */ + public QueryMetricsSummaryResponse getQueryMetricsSummary(Date begin, Date end) { + try { + return (QueryMetricsSummaryResponse) getQueryMetricsSummary(begin, end, null, null); + } catch (Exception e) { + log.error(e.getMessage(), e); + return null; + } + } + /** * * Returns a summary of the query metrics @@ -182,10 +224,11 @@ public QueryGeometryResponse map(@PathParam("id") @Required("id") String id) { @Path("/summary/all") @Interceptors(ResponseInterceptor.class) @RolesAllowed({"Administrator", "MetricsAdministrator"}) - public QueryMetricsSummaryResponse getQueryMetricsSummary(@QueryParam("begin") @DateFormat(defaultTime = "000000", defaultMillisec = "000") Date begin, - @QueryParam("end") @DateFormat(defaultTime = "235959", defaultMillisec = "999") Date end) { - if (queryMetricsWriterConfiguration.getUseRemoteService()) { - return remoteQueryMetricService.summaryAll(begin, end); + public Object getQueryMetricsSummary(@QueryParam("begin") @DateFormat(defaultTime = "000000", defaultMillisec = "000") Date begin, + @QueryParam("end") @DateFormat(defaultTime = "235959", defaultMillisec = "999") Date end, @Context HttpServletRequest request, + @Context UriInfo uriInfo) throws TextParseException, URISyntaxException { + if (queryMetricsWriterConfiguration.getUseRemoteService() || isHtmlResponse(request)) { + return sendRedirect(SUMMARY_ALL_SUFFIX, uriInfo); } else { return queryMetricsSummary(begin, end, false); } @@ -212,14 +255,10 @@ public QueryMetricsSummaryResponse getQueryMetricsSummary(@QueryParam("begin") @ @Path("/summary") @Interceptors(ResponseInterceptor.class) @RolesAllowed({"Administrator", "MetricsAdministrator"}) - public QueryMetricsSummaryResponse getQueryMetricsSummaryDeprecated1( - @QueryParam("begin") @DateFormat(defaultTime = "000000", defaultMillisec = "000") Date begin, - @QueryParam("end") @DateFormat(defaultTime = "235959", defaultMillisec = "999") Date end) { - if (queryMetricsWriterConfiguration.getUseRemoteService()) { - return remoteQueryMetricService.summaryAll(begin, end); - } else { - return queryMetricsSummary(begin, end, false); - } + public Object getQueryMetricsSummaryDeprecated1(@QueryParam("begin") @DateFormat(defaultTime = "000000", defaultMillisec = "000") Date begin, + @QueryParam("end") @DateFormat(defaultTime = "235959", defaultMillisec = "999") Date end, @Context HttpServletRequest request, + @Context UriInfo uriInfo) throws TextParseException, URISyntaxException { + return getQueryMetricsSummary(begin, end, request, uriInfo); } /** @@ -243,14 +282,10 @@ public QueryMetricsSummaryResponse getQueryMetricsSummaryDeprecated1( @Path("/summaryCounts") @Interceptors(ResponseInterceptor.class) @RolesAllowed({"Administrator", "MetricsAdministrator"}) - public QueryMetricsSummaryResponse getQueryMetricsSummaryDeprecated2( - @QueryParam("begin") @DateFormat(defaultTime = "000000", defaultMillisec = "000") Date begin, - @QueryParam("end") @DateFormat(defaultTime = "235959", defaultMillisec = "999") Date end) { - if (queryMetricsWriterConfiguration.getUseRemoteService()) { - return remoteQueryMetricService.summaryAll(begin, end); - } else { - return queryMetricsSummary(begin, end, false); - } + public Object getQueryMetricsSummaryDeprecated2(@QueryParam("begin") @DateFormat(defaultTime = "000000", defaultMillisec = "000") Date begin, + @QueryParam("end") @DateFormat(defaultTime = "235959", defaultMillisec = "999") Date end, @Context HttpServletRequest request, + @Context UriInfo uriInfo) throws TextParseException, URISyntaxException { + return getQueryMetricsSummary(begin, end, request, uriInfo); } /** @@ -272,10 +307,11 @@ public QueryMetricsSummaryResponse getQueryMetricsSummaryDeprecated2( @GET @Path("/summary/user") @Interceptors(ResponseInterceptor.class) - public QueryMetricsSummaryResponse getQueryMetricsUserSummary(@QueryParam("begin") @DateFormat(defaultTime = "000000", defaultMillisec = "000") Date begin, - @QueryParam("end") @DateFormat(defaultTime = "235959", defaultMillisec = "999") Date end) { - if (queryMetricsWriterConfiguration.getUseRemoteService()) { - return remoteQueryMetricService.summaryUser(begin, end); + public Object getQueryMetricsUserSummary(@QueryParam("begin") @DateFormat(defaultTime = "000000", defaultMillisec = "000") Date begin, + @QueryParam("end") @DateFormat(defaultTime = "235959", defaultMillisec = "999") Date end, @Context HttpServletRequest request, + @Context UriInfo uriInfo) throws TextParseException, URISyntaxException { + if (queryMetricsWriterConfiguration.getUseRemoteService() || isHtmlResponse(request)) { + return sendRedirect(SUMMARY_USER_SUFFIX, uriInfo); } else { return queryMetricsSummary(begin, end, true); } @@ -301,14 +337,10 @@ public QueryMetricsSummaryResponse getQueryMetricsUserSummary(@QueryParam("begin @GET @Path("/summaryCounts/user") @Interceptors(ResponseInterceptor.class) - public QueryMetricsSummaryResponse getQueryMetricsUserSummaryDeprecated( - @QueryParam("begin") @DateFormat(defaultTime = "000000", defaultMillisec = "000") Date begin, - @QueryParam("end") @DateFormat(defaultTime = "235959", defaultMillisec = "999") Date end) { - if (queryMetricsWriterConfiguration.getUseRemoteService()) { - return remoteQueryMetricService.summaryUser(begin, end); - } else { - return queryMetricsSummary(begin, end, true); - } + public Object getQueryMetricsUserSummaryDeprecated(@QueryParam("begin") @DateFormat(defaultTime = "000000", defaultMillisec = "000") Date begin, + @QueryParam("end") @DateFormat(defaultTime = "235959", defaultMillisec = "999") Date end, @Context HttpServletRequest request, + @Context UriInfo uriInfo) throws TextParseException, URISyntaxException { + return getQueryMetricsUserSummary(begin, end, request, uriInfo); } private QueryMetricsSummaryResponse queryMetricsSummary(Date begin, Date end, boolean onlyCurrentUser) { @@ -356,4 +388,18 @@ private DatawavePrincipal getPrincipal() { } return dp; } + + private boolean isHtmlResponse(HttpServletRequest request) { + if (request == null) { + return false; + } else { + return MediaType.parseMediaTypes(request.getHeader(HttpHeaders.ACCEPT)).contains(MediaType.TEXT_HTML); + } + } + + private Response sendRedirect(String suffix, UriInfo uriInfo) throws TextParseException, URISyntaxException { + URIBuilder builder = remoteQueryMetricService.buildRedirectURI(suffix, uriInfo.getBaseUri()); + uriInfo.getQueryParameters().forEach((pname, valueList) -> valueList.forEach(pvalue -> builder.addParameter(pname, pvalue))); + return Response.temporaryRedirect(builder.build()).build(); + } } diff --git a/web-services/query/src/main/java/datawave/webservice/query/metric/QueryMetricsWriter.java b/web-services/query/src/main/java/datawave/webservice/query/metric/QueryMetricsWriter.java index e011bb4a589..daf82f4b24e 100644 --- a/web-services/query/src/main/java/datawave/webservice/query/metric/QueryMetricsWriter.java +++ b/web-services/query/src/main/java/datawave/webservice/query/metric/QueryMetricsWriter.java @@ -39,6 +39,7 @@ import datawave.configuration.DatawaveEmbeddedProjectStageHolder; import datawave.configuration.RefreshEvent; import datawave.configuration.spring.SpringBean; +import datawave.core.query.metric.QueryMetricHandler; import datawave.metrics.remote.RemoteQueryMetricService; import datawave.microservice.querymetric.BaseQueryMetric; import datawave.microservice.querymetric.BaseQueryMetric.Lifecycle; @@ -183,8 +184,10 @@ public void addMetricToQueue(QueryMetricHolder queryMetricHolder) { * Poll the blocking queue for new query metric updates until the number of updates reached batchSize or maxLatency is reached. * * @param batchSize + * the size of the batch * @param maxLatency - * @return + * maximum latency + * @return list of query metric holders */ private List getMetricsFromQueue(int batchSize, long maxLatency) { List metricHolderList = new ArrayList<>(); @@ -322,6 +325,7 @@ public void run() { * Process query metric updates using either the RemoteQueryMetricService or the ShardTableQueryMetricHandler * * @param metricHolderList + * query metric updates */ private void processQueryMetrics(List metricHolderList) { if (!metricHolderList.isEmpty()) { @@ -337,6 +341,7 @@ private void processQueryMetrics(List metricHolderList) { * Process query metric updates using the RemoteQueryMetricService * * @param metricHolderList + * query metric updates */ private void processQueryMetricsWithRemoteService(List metricHolderList) { List metricList = metricHolderList.stream().map(QueryMetricHolder::getQueryMetric).collect(Collectors.toList()); @@ -358,6 +363,7 @@ private void processQueryMetricsWithRemoteService(List metric * Process query metric updates using the ShardTableQueryMetricHandler * * @param metricHolderList + * query metric updates */ private void processQueryMetricsWithHandler(List metricHolderList) { List currentFailures = new ArrayList<>(); @@ -392,7 +398,8 @@ private void processQueryMetricsWithHandler(List metricHolder * Attempt to send metrics that previously failed to send * * @param failedMetrics - * @return + * list of metrics that failed + * @return if successfully wrote failed metrics */ private boolean writeFailedMetrics(List failedMetrics) { Iterator itr = failedMetrics.iterator(); @@ -426,6 +433,7 @@ private boolean writeFailedMetrics(List failedMetrics) { * Determine if we should discard failed query metric updates or keep retrying * * @param anySuccessful + * the any successful */ private void processFailedMetricList(boolean anySuccessful) { long discardForFailureCount = 0; @@ -467,7 +475,9 @@ private void processFailedMetricList(boolean anySuccessful) { * Wraps the sending of query metric updates to the RemoteQueryMetricService Failure is indicated by throwing an Exception * * @param updatedMetrics + * updated metrics * @throws Exception + * when probelms arise */ private void writeMetricsToRemoteService(List updatedMetrics) throws Exception { if (!updatedMetrics.isEmpty()) { @@ -483,8 +493,10 @@ private void writeMetricsToRemoteService(List updatedMetrics) t * Wraps the sending of query metric updates to the ShardTableQueryMetricHandler Failure is indicated by returning a list of failed query metric updates * * @param queryMetricHandler + * metric handler * @param metricQueue - * @return + * list of metrics to process + * @return list of failed query metrics */ private List writeMetricsToHandler(QueryMetricHandler queryMetricHandler, List metricQueue) { List failedMetrics = new ArrayList<>(); @@ -511,6 +523,7 @@ private List writeMetricsToHandler(QueryMetricHandler queryMe * service handles this for the path that uses the RemoteQueryMetricService * * @param queryMetric + * query metric */ private synchronized void sendMetricsToTimely(BaseQueryMetric queryMetric) { diff --git a/web-services/query/src/main/java/datawave/webservice/query/predicate/AuthorizationsPredicate.java b/web-services/query/src/main/java/datawave/webservice/query/predicate/AuthorizationsPredicate.java deleted file mode 100644 index c90f0b69ebd..00000000000 --- a/web-services/query/src/main/java/datawave/webservice/query/predicate/AuthorizationsPredicate.java +++ /dev/null @@ -1,53 +0,0 @@ -package datawave.webservice.query.predicate; - -import java.util.function.Predicate; - -import org.apache.accumulo.core.security.Authorizations; -import org.apache.accumulo.core.security.ColumnVisibility; -import org.apache.accumulo.core.security.VisibilityEvaluator; -import org.apache.accumulo.core.security.VisibilityParseException; - -/** - * This is a predicate that will test the auths against a specified visibility (as defined by accumulo's ColumnVisibility). In addition to the visibility, one - * can specify that only the first of the authorizations is matched (presumably the user). - */ -public class AuthorizationsPredicate implements Predicate { - - // A visibility string to be matched against the auths being used for the query - private ColumnVisibility visibility; - - public AuthorizationsPredicate() {} - - public AuthorizationsPredicate(String visibility) { - setVisibility(visibility); - } - - @Override - public boolean test(Authorizations auths) { - // match the visibility against the auths. - ColumnVisibility vis = getVisibility(); - VisibilityEvaluator ve = new VisibilityEvaluator(auths); - try { - return (ve.evaluate(vis)); - } catch (VisibilityParseException e) { - throw new RuntimeException(e); - } - } - - public ColumnVisibility getVisibility() { - return visibility; - } - - public void setVisibility(ColumnVisibility visibility) { - this.visibility = visibility; - } - - public void setVisibility(String visibility) { - setVisibility(new ColumnVisibility(visibility)); - } - - @Override - public String toString() { - return "(auths =~ " + visibility + ')'; - } -} diff --git a/web-services/query/src/main/java/datawave/webservice/query/remote/RemoteQueryServiceImpl.java b/web-services/query/src/main/java/datawave/webservice/query/remote/RemoteQueryServiceImpl.java index e0c5741f41d..0e354bac945 100644 --- a/web-services/query/src/main/java/datawave/webservice/query/remote/RemoteQueryServiceImpl.java +++ b/web-services/query/src/main/java/datawave/webservice/query/remote/RemoteQueryServiceImpl.java @@ -22,10 +22,11 @@ import com.fasterxml.jackson.databind.ObjectReader; +import datawave.core.query.remote.RemoteQueryService; import datawave.security.auth.DatawaveAuthenticationMechanism; import datawave.security.authorization.DatawavePrincipal; +import datawave.security.authorization.ProxiedUserDetails; import datawave.webservice.common.remote.RemoteHttpService; -import datawave.webservice.common.remote.RemoteQueryService; import datawave.webservice.result.BaseQueryResponse; import datawave.webservice.result.GenericResponse; import datawave.webservice.result.VoidResponse; @@ -50,10 +51,12 @@ public class RemoteQueryServiceImpl extends RemoteHttpService implements RemoteQ private ObjectReader baseQueryResponseReader; - private ObjectReader eventQueryResponseReader; + private ObjectReader nextQueryResponseReader; private boolean initialized = false; + private Class nextQueryResponseClass; + @Override @PostConstruct public void init() { @@ -61,22 +64,26 @@ public void init() { super.init(); genericResponseReader = objectMapper.readerFor(GenericResponse.class); baseQueryResponseReader = objectMapper.readerFor(BaseQueryResponse.class); - eventQueryResponseReader = objectMapper.readerFor(responseObjectFactory.getEventQueryResponse().getClass()); + if (nextQueryResponseClass == null) { + nextQueryResponseReader = objectMapper.readerFor(responseObjectFactory.getEventQueryResponse().getClass()); + } else { + nextQueryResponseReader = objectMapper.readerFor(nextQueryResponseClass); + } initialized = true; } } @Override - public GenericResponse createQuery(String queryLogicName, Map> queryParameters, Object callerObject) { + public GenericResponse createQuery(String queryLogicName, Map> queryParameters, ProxiedUserDetails callerObject) { return query(CREATE, queryLogicName, queryParameters, callerObject); } @Override - public GenericResponse planQuery(String queryLogicName, Map> queryParameters, Object callerObject) { + public GenericResponse planQuery(String queryLogicName, Map> queryParameters, ProxiedUserDetails callerObject) { return query(PLAN, queryLogicName, queryParameters, callerObject); } - private GenericResponse query(String endPoint, String queryLogicName, Map> queryParameters, Object callerObject) { + private GenericResponse query(String endPoint, String queryLogicName, Map> queryParameters, ProxiedUserDetails callerObject) { init(); final DatawavePrincipal principal = getDatawavePrincipal(callerObject); @@ -112,7 +119,7 @@ private GenericResponse query(String endPoint, String queryLogicName, Ma } @Override - public BaseQueryResponse next(String id, Object callerObject) { + public BaseQueryResponse next(String id, ProxiedUserDetails callerObject) { init(); final DatawavePrincipal principal = getDatawavePrincipal(callerObject); @@ -122,12 +129,12 @@ public BaseQueryResponse next(String id, Object callerObject) { httpGet.setHeader(PROXIED_ENTITIES_HEADER, getProxiedEntities(principal)); httpGet.setHeader(PROXIED_ISSUERS_HEADER, getProxiedIssuers(principal)); }, entity -> { - return readResponse(entity, eventQueryResponseReader, baseQueryResponseReader); + return readResponse(entity, nextQueryResponseReader, baseQueryResponseReader); }, () -> suffix); } @Override - public VoidResponse close(String id, Object callerObject) { + public VoidResponse close(String id, ProxiedUserDetails callerObject) { init(); final DatawavePrincipal principal = getDatawavePrincipal(callerObject); @@ -142,7 +149,7 @@ public VoidResponse close(String id, Object callerObject) { } @Override - public GenericResponse planQuery(String id, Object callerObject) { + public GenericResponse planQuery(String id, ProxiedUserDetails callerObject) { init(); final DatawavePrincipal principal = getDatawavePrincipal(callerObject); @@ -168,11 +175,18 @@ public URI getQueryMetricsURI(String id) { } - private DatawavePrincipal getDatawavePrincipal(Object callerObject) { + private DatawavePrincipal getDatawavePrincipal(ProxiedUserDetails callerObject) { if (callerObject instanceof DatawavePrincipal) { return (DatawavePrincipal) callerObject; } throw new RuntimeException("Cannot handle a " + callerObject.getClass() + ". Only DatawavePrincipal is accepted"); } + public Class getNextQueryResponseClass() { + return nextQueryResponseClass; + } + + public void setNextQueryResponseClass(Class nextQueryResponseClass) { + this.nextQueryResponseClass = nextQueryResponseClass; + } } diff --git a/web-services/query/src/main/java/datawave/webservice/query/runner/AccumuloConnectionRequestBean.java b/web-services/query/src/main/java/datawave/webservice/query/runner/AccumuloConnectionRequestBean.java index 0be1a28c60c..8d2c794a541 100644 --- a/web-services/query/src/main/java/datawave/webservice/query/runner/AccumuloConnectionRequestBean.java +++ b/web-services/query/src/main/java/datawave/webservice/query/runner/AccumuloConnectionRequestBean.java @@ -1,76 +1,29 @@ package datawave.webservice.query.runner; -import java.security.Principal; -import java.util.Map; -import java.util.concurrent.ConcurrentHashMap; - import javax.annotation.Resource; import javax.ejb.EJBContext; import javax.inject.Singleton; -import org.apache.accumulo.core.util.Pair; import org.apache.log4j.Logger; +import datawave.core.query.runner.AccumuloConnectionRequestMap; + /** * For storing a map of queryId to Thread that is requesting an AccumuloConnection */ @Singleton // CDI singleton -public class AccumuloConnectionRequestBean { +public class AccumuloConnectionRequestBean extends AccumuloConnectionRequestMap { private static Logger log = Logger.getLogger(AccumuloConnectionRequestBean.class); @Resource private EJBContext ctx; - private Map> getConnectionThreadMap = new ConcurrentHashMap<>(); + private AccumuloConnectionRequestMap getConnectionThreadMap = new AccumuloConnectionRequestMap(); public boolean cancelConnectionRequest(String id) { - return cancelConnectionRequest(id, ctx.getCallerPrincipal()); + return cancelConnectionRequest(id, ctx.getCallerPrincipal().getName()); } - public boolean cancelConnectionRequest(String id, Principal principal) { - // this call checks that the Principal used for the connection request and th connection cancel are the same - // if query is waiting for an accumulo connection in create or reset, then interrupt it - boolean connectionRequestCanceled = false; - try { - Pair connectionRequestPair = getConnectionThreadMap.get(id); - if (connectionRequestPair != null) { - String connectionRequestPrincipalName = principal.getName(); - String connectionCancelPrincipalName = connectionRequestPair.getFirst().getName(); - if (connectionRequestPrincipalName.equals(connectionCancelPrincipalName)) { - connectionRequestPair.getSecond().interrupt(); - connectionRequestCanceled = true; - } - } - } catch (Exception e) { - log.error(e.getMessage(), e); - } - return connectionRequestCanceled; - } - - public boolean adminCancelConnectionRequest(String id) { - // it is assumed that admin status is already checked, so this call does not check the calling Principals - // if query is waiting for an accumulo connection in create or reset, then interrupt it - boolean connectionRequestCanceled = false; - try { - Pair connectionRequestPair = getConnectionThreadMap.get(id); - if (connectionRequestPair != null) { - connectionRequestPair.getSecond().interrupt(); - connectionRequestCanceled = true; - } - } catch (Exception e) { - log.error(e.getMessage(), e); - } - return connectionRequestCanceled; - } - - public void requestBegin(String id) { - Pair connectionRequestPair = new Pair<>(ctx.getCallerPrincipal(), Thread.currentThread()); - getConnectionThreadMap.put(id, connectionRequestPair); - } - - public void requestEnd(String id) { - getConnectionThreadMap.remove(id); - } } diff --git a/web-services/query/src/main/java/datawave/webservice/query/runner/BasicQueryBean.java b/web-services/query/src/main/java/datawave/webservice/query/runner/BasicQueryBean.java index 02058f9d6c9..4cba1a8b22b 100644 --- a/web-services/query/src/main/java/datawave/webservice/query/runner/BasicQueryBean.java +++ b/web-services/query/src/main/java/datawave/webservice/query/runner/BasicQueryBean.java @@ -1,7 +1,6 @@ package datawave.webservice.query.runner; import java.lang.reflect.Method; -import java.security.Principal; import java.util.ArrayList; import java.util.Arrays; import java.util.Collections; @@ -44,19 +43,18 @@ import datawave.annotation.GenerateQuerySessionId; import datawave.annotation.Required; import datawave.configuration.DatawaveEmbeddedProjectStageHolder; -import datawave.configuration.spring.SpringBean; +import datawave.core.query.logic.BaseQueryLogic; +import datawave.core.query.logic.QueryLogic; +import datawave.core.query.logic.QueryLogicFactory; import datawave.interceptor.RequiredInterceptor; import datawave.interceptor.ResponseInterceptor; +import datawave.microservice.query.Query; import datawave.resteasy.interceptor.CreateQuerySessionIDFilter; import datawave.security.authorization.AuthorizationException; import datawave.security.authorization.DatawavePrincipal; import datawave.security.authorization.UserOperations; import datawave.security.util.WSAuthorizationsUtil; -import datawave.webservice.query.Query; import datawave.webservice.query.exception.QueryException; -import datawave.webservice.query.logic.BaseQueryLogic; -import datawave.webservice.query.logic.QueryLogic; -import datawave.webservice.query.logic.QueryLogicFactory; import datawave.webservice.query.result.event.ResponseObjectFactory; import datawave.webservice.query.result.logic.QueryLogicDescription; import datawave.webservice.result.BaseQueryResponse; @@ -109,7 +107,6 @@ public class BasicQueryBean { private SessionContext sessionContext; @Inject - @SpringBean(name = "ResponseObjectFactory") private ResponseObjectFactory responseObjectFactory; @PostConstruct @@ -166,10 +163,10 @@ public QueryWizardStep1Response showQueryWizardStep1() { if (exampleQueries != null) { d.setExampleQueries(new ArrayList<>(exampleQueries)); } - Set requiredRoles = l.getRoleManager().getRequiredRoles(); + Set requiredRoles = l.getRequiredRoles(); if (requiredRoles != null) { List requiredRolesList = new ArrayList<>(); - requiredRolesList.addAll(l.getRoleManager().getRequiredRoles()); + requiredRolesList.addAll(l.getRequiredRoles()); d.setRequiredRoles(requiredRolesList); } @@ -265,10 +262,10 @@ public QueryWizardStep2Response showQueryWizardStep2(MultivaluedMap(exampleQueries)); } - Set requiredRoles = l.getRoleManager().getRequiredRoles(); + Set requiredRoles = l.getRequiredRoles(); if (requiredRoles != null) { List requiredRolesList = new ArrayList<>(); - requiredRolesList.addAll(l.getRoleManager().getRequiredRoles()); + requiredRolesList.addAll(l.getRequiredRoles()); d.setRequiredRoles(requiredRolesList); } @@ -307,8 +304,8 @@ public QueryWizardStep2Response showQueryWizardStep2(MultivaluedMap logic = queryLogicFactory.getQueryLogic(logicName, principal); + QueryLogic logic = queryLogicFactory.getQueryLogic(logicName, (DatawavePrincipal) principal); // the query principal is our local principal unless the query logic has a different user operations - DatawavePrincipal queryPrincipal = (logic.getUserOperations() == null) ? (DatawavePrincipal) principal - : logic.getUserOperations().getRemoteUser((DatawavePrincipal) principal); + DatawavePrincipal queryPrincipal = (DatawavePrincipal) ((logic.getUserOperations() == null) ? principal + : logic.getUserOperations().getRemoteUser((DatawavePrincipal) principal)); userAuths = WSAuthorizationsUtil.buildUserAuthorizationString(queryPrincipal); } catch (Exception e) { log.error("Failed to get user query authorizations", e); diff --git a/web-services/query/src/main/java/datawave/webservice/query/runner/NoOpQueryPredictor.java b/web-services/query/src/main/java/datawave/webservice/query/runner/NoOpQueryPredictor.java deleted file mode 100644 index 20f0435b71d..00000000000 --- a/web-services/query/src/main/java/datawave/webservice/query/runner/NoOpQueryPredictor.java +++ /dev/null @@ -1,14 +0,0 @@ -package datawave.webservice.query.runner; - -import java.util.Set; - -import datawave.microservice.querymetric.BaseQueryMetric; -import datawave.microservice.querymetric.BaseQueryMetric.Prediction; - -public class NoOpQueryPredictor implements QueryPredictor { - - @Override - public Set predict(BaseQueryMetric query) throws PredictionException { - return null; - } -} diff --git a/web-services/query/src/main/java/datawave/webservice/query/runner/QueryExecutor.java b/web-services/query/src/main/java/datawave/webservice/query/runner/QueryExecutor.java index 2f3a6e1ccc6..56ecc391850 100644 --- a/web-services/query/src/main/java/datawave/webservice/query/runner/QueryExecutor.java +++ b/web-services/query/src/main/java/datawave/webservice/query/runner/QueryExecutor.java @@ -7,7 +7,7 @@ import javax.ws.rs.core.StreamingOutput; import javax.ws.rs.core.UriInfo; -import datawave.webservice.query.QueryPersistence; +import datawave.microservice.query.QueryPersistence; import datawave.webservice.result.BaseQueryResponse; import datawave.webservice.result.GenericResponse; import datawave.webservice.result.QueryImplListResponse; diff --git a/web-services/query/src/main/java/datawave/webservice/query/runner/QueryExecutorBean.java b/web-services/query/src/main/java/datawave/webservice/query/runner/QueryExecutorBean.java index 6ecbdfc1a1d..ddb81dc9a7d 100644 --- a/web-services/query/src/main/java/datawave/webservice/query/runner/QueryExecutorBean.java +++ b/web-services/query/src/main/java/datawave/webservice/query/runner/QueryExecutorBean.java @@ -72,11 +72,13 @@ import org.apache.accumulo.core.client.AccumuloClient; import org.apache.accumulo.core.security.Authorizations; import org.apache.accumulo.core.util.Pair; -import org.apache.commons.jexl2.parser.TokenMgrError; +import org.apache.commons.jexl3.parser.TokenMgrException; import org.apache.deltaspike.core.api.exclude.Exclude; import org.apache.log4j.Logger; import org.jboss.resteasy.annotations.GZIP; import org.jboss.resteasy.specimpl.MultivaluedMapImpl; +import org.springframework.util.LinkedMultiValueMap; +import org.springframework.util.MultiValueMap; import org.springframework.util.StringUtils; import com.codahale.metrics.annotation.Timed; @@ -87,7 +89,6 @@ import com.fasterxml.jackson.databind.ObjectMapper; import com.fasterxml.jackson.databind.introspect.JacksonAnnotationIntrospector; import com.fasterxml.jackson.module.jaxb.JaxbAnnotationIntrospector; -import com.google.common.base.Throwables; import com.google.common.collect.HashMultimap; import com.google.common.collect.Multimap; import com.google.common.io.CountingOutputStream; @@ -98,9 +99,23 @@ import datawave.annotation.Required; import datawave.configuration.DatawaveEmbeddedProjectStageHolder; import datawave.configuration.spring.SpringBean; +import datawave.core.common.audit.PrivateAuditConstants; +import datawave.core.common.connection.AccumuloConnectionFactory; +import datawave.core.query.cache.ResultsPage; +import datawave.core.query.logic.QueryLogic; +import datawave.core.query.logic.QueryLogicFactory; +import datawave.core.query.logic.QueryLogicTransformer; +import datawave.core.query.predict.QueryPredictor; +import datawave.core.query.util.QueryUtil; import datawave.interceptor.RequiredInterceptor; import datawave.interceptor.ResponseInterceptor; import datawave.marking.SecurityMarking; +import datawave.microservice.query.Query; +import datawave.microservice.query.QueryImpl; +import datawave.microservice.query.QueryImpl.Parameter; +import datawave.microservice.query.QueryParameters; +import datawave.microservice.query.QueryPersistence; +import datawave.microservice.query.config.QueryExpirationProperties; import datawave.microservice.querymetric.BaseQueryMetric; import datawave.microservice.querymetric.BaseQueryMetric.PageMetric; import datawave.microservice.querymetric.BaseQueryMetric.Prediction; @@ -114,8 +129,6 @@ import datawave.webservice.common.audit.AuditBean; import datawave.webservice.common.audit.AuditParameters; import datawave.webservice.common.audit.Auditor.AuditType; -import datawave.webservice.common.audit.PrivateAuditConstants; -import datawave.webservice.common.connection.AccumuloConnectionFactory; import datawave.webservice.common.exception.BadRequestException; import datawave.webservice.common.exception.DatawaveWebApplicationException; import datawave.webservice.common.exception.NoResultsException; @@ -123,18 +136,11 @@ import datawave.webservice.common.exception.PreConditionFailedException; import datawave.webservice.common.exception.QueryCanceledException; import datawave.webservice.common.exception.UnauthorizedException; -import datawave.webservice.query.Query; -import datawave.webservice.query.QueryImpl; -import datawave.webservice.query.QueryImpl.Parameter; -import datawave.webservice.query.QueryParameters; -import datawave.webservice.query.QueryPersistence; import datawave.webservice.query.annotation.EnrichQueryMetrics; import datawave.webservice.query.cache.ClosedQueryCache; import datawave.webservice.query.cache.CreatedQueryLogicCacheBean; import datawave.webservice.query.cache.QueryCache; -import datawave.webservice.query.cache.QueryExpirationConfiguration; import datawave.webservice.query.cache.QueryTraceCache; -import datawave.webservice.query.cache.ResultsPage; import datawave.webservice.query.cache.RunningQueryTimingImpl; import datawave.webservice.query.configuration.LookupUUIDConfiguration; import datawave.webservice.query.exception.BadRequestQueryException; @@ -145,18 +151,15 @@ import datawave.webservice.query.exception.QueryException; import datawave.webservice.query.exception.UnauthorizedQueryException; import datawave.webservice.query.factory.Persister; -import datawave.webservice.query.logic.QueryLogic; -import datawave.webservice.query.logic.QueryLogicFactory; -import datawave.webservice.query.logic.QueryLogicTransformer; import datawave.webservice.query.metric.QueryMetricsBean; import datawave.webservice.query.result.event.ResponseObjectFactory; import datawave.webservice.query.result.logic.QueryLogicDescription; import datawave.webservice.query.util.GetUUIDCriteria; import datawave.webservice.query.util.LookupUUIDUtil; +import datawave.webservice.query.util.MapUtils; import datawave.webservice.query.util.NextContentCriteria; import datawave.webservice.query.util.PostUUIDCriteria; import datawave.webservice.query.util.QueryUncaughtExceptionHandler; -import datawave.webservice.query.util.QueryUtil; import datawave.webservice.query.util.UIDQueryCriteria; import datawave.webservice.result.BaseQueryResponse; import datawave.webservice.result.BaseResponse; @@ -188,6 +191,7 @@ public class QueryExecutorBean implements QueryExecutor { */ public static final String EXPAND_VALUES = "expand.values"; public static final String EXPAND_FIELDS = "expand.fields"; + public static final String CONTEXT_PARAMETER = "context"; private final Logger log = Logger.getLogger(QueryExecutorBean.class); @@ -214,7 +218,7 @@ public class QueryExecutorBean implements QueryExecutor { @Inject @SpringBean(refreshable = true) - private QueryExpirationConfiguration queryExpirationConf; + private QueryExpirationProperties queryExpirationConf; @Inject private Persister persister; @@ -233,7 +237,6 @@ public class QueryExecutorBean implements QueryExecutor { private SecurityMarking marking; @Inject - @SpringBean(name = "ResponseObjectFactory") private ResponseObjectFactory responseObjectFactory; private LookupUUIDUtil lookupUUIDUtil; @@ -260,7 +263,7 @@ public class QueryExecutorBean implements QueryExecutor { private ClosedQueryCache closedQueryCache; private final int PAGE_TIMEOUT_MIN = 1; - private final int PAGE_TIMEOUT_MAX = QueryExpirationConfiguration.PAGE_TIMEOUT_MIN_DEFAULT; + private final int PAGE_TIMEOUT_MAX = 60; private final String UUID_REGEX_RULE = "[a-fA-F\\d-]+"; private final String INVALID_PAGESIZE = "page.size"; @@ -346,10 +349,10 @@ public QueryLogicResponse listQueryLogic() { if (exampleQueries != null) { d.setExampleQueries(new ArrayList<>(exampleQueries)); } - Set requiredRoles = l.getRoleManager().getRequiredRoles(); + Set requiredRoles = l.getRequiredRoles(); if (requiredRoles != null) { List requiredRolesList = new ArrayList<>(); - requiredRolesList.addAll(l.getRoleManager().getRequiredRoles()); + requiredRolesList.addAll(l.getRequiredRoles()); d.setRequiredRoles(requiredRolesList); } @@ -415,6 +418,32 @@ private void handleIncorrectPageSize() { throwBadRequest(DatawaveErrorCode.INVALID_PAGE_SIZE, response); } + /** + * Setup the caller data in the QueryData object + * + * @param p + * @param qd + * @return qd + */ + private QueryData setUserData(Principal p, QueryData qd) { + // Find out who/what called this method + qd.proxyServers = null; + qd.p = p; + qd.userDn = qd.p.getName(); + qd.userid = qd.userDn; + qd.dnList = Collections.singletonList(qd.userid); + if (qd.p instanceof DatawavePrincipal) { + DatawavePrincipal dp = (DatawavePrincipal) qd.p; + qd.userid = dp.getShortName(); + qd.userDn = dp.getUserDN().subjectDN(); + String[] dns = dp.getDNs(); + Arrays.sort(dns); + qd.dnList = Arrays.asList(dns); + qd.proxyServers = dp.getProxyServers(); + } + return qd; + } + /** * This method will provide some initial query validation for the define and create query calls. * @@ -437,7 +466,7 @@ private QueryData validateQuery(String queryLogicName, MultivaluedMap response = new GenericResponse<>(); - response.addException(qe); - throw new UnauthorizedException(qe, response); - } + // Verify that the calling principal has access to the query logic iff being called externally (i.e. Principal instanceof DatawavePrincipal) + if (qd.p instanceof DatawavePrincipal && !qd.logic.containsDNWithAccess(qd.dnList)) { + UnauthorizedQueryException qe = new UnauthorizedQueryException("None of the DNs used have access to this query logic: " + qd.dnList, 401); + GenericResponse response = new GenericResponse<>(); + response.addException(qe); + throw new UnauthorizedException(qe, response); } log.trace(qd.userid + " has authorizations " + ((qd.p instanceof DatawavePrincipal) ? ((DatawavePrincipal) qd.p).getAuthorizations() : "")); @@ -552,12 +570,21 @@ private QueryData validateQuery(String queryLogicName, MultivaluedMap= 0) { - if (!ctx.isCallerInRole(PRIVILEGED_USER) || !ctx.isCallerInRole(UNLIMITED_QUERY_RESULTS_USER)) { - if (qp.getMaxResultsOverride() < 0 || (qd.logic.getMaxResults() < qp.getMaxResultsOverride())) { - log.error("Invalid max results override: " + qp.getMaxResultsOverride() + " vs " + qd.logic.getMaxResults()); + // Init a query instance in order to properly compute max results for the user... + // TODO: consider refactoring such that query init happens only once here via Persister, and cache in QueryData instance + MultivaluedMap optionalQueryParameters = new MultivaluedMapImpl<>(); + optionalQueryParameters.putAll(qp.getUnknownParameters(queryParameters)); + Query q = responseObjectFactory.getQueryImpl(); + q.initialize(qd.userDn, qd.dnList, queryLogicName, qp, optionalQueryParameters); + + long resultLimit = qd.logic.getResultLimit(q); + + // validate the user's max results override in the context of all currently configured overrides + // privileged users and unlimited max results users are exempt from limitations + if (qp.isMaxResultsOverridden() && resultLimit >= 0) { + if (!ctx.isCallerInRole(PRIVILEGED_USER) && !ctx.isCallerInRole(UNLIMITED_QUERY_RESULTS_USER)) { + if (qp.getMaxResultsOverride() < 0 || (resultLimit < qp.getMaxResultsOverride())) { + log.error("Invalid max results override: " + qp.getMaxResultsOverride() + " vs " + resultLimit); GenericResponse response = new GenericResponse<>(); throwBadRequest(DatawaveErrorCode.INVALID_MAX_RESULTS_OVERRIDE, response); } @@ -604,9 +631,8 @@ public GenericResponse defineQuery(@Required("logicName") @PathParam("lo // will not exist when reset is called. RunningQuery rq; try { - MultivaluedMap optionalQueryParameters = new MultivaluedMapImpl<>(); - optionalQueryParameters.putAll(qp.getUnknownParameters(queryParameters)); - Query q = persister.create(qd.userDn, qd.dnList, marking, queryLogicName, qp, optionalQueryParameters); + Map> optionalQueryParameters = qp.getUnknownParameters(MapUtils.toMultiValueMap(queryParameters)); + Query q = persister.create(qd.userDn, qd.dnList, marking, queryLogicName, qp, MapUtils.toMultivaluedMap(optionalQueryParameters)); response.setResult(q.getId().toString()); boolean shouldTraceQuery = shouldTraceQuery(qp.getQuery(), qd.userid, false); if (shouldTraceQuery) { @@ -677,9 +703,8 @@ public GenericResponse createQuery(@Required("logicName") @PathParam("lo AuditType auditType = qd.logic.getAuditType(null); try { - MultivaluedMap optionalQueryParameters = new MultivaluedMapImpl<>(); - optionalQueryParameters.putAll(qp.getUnknownParameters(queryParameters)); - q = persister.create(qd.userDn, qd.dnList, marking, queryLogicName, qp, optionalQueryParameters); + Map> optionalQueryParameters = qp.getUnknownParameters(MapUtils.toMultiValueMap(queryParameters)); + q = persister.create(qd.userDn, qd.dnList, marking, queryLogicName, qp, MapUtils.toMultivaluedMap(optionalQueryParameters)); auditType = qd.logic.getAuditType(q); } finally { queryParameters.add(PrivateAuditConstants.AUDIT_TYPE, auditType.name()); @@ -699,7 +724,7 @@ public GenericResponse createQuery(@Required("logicName") @PathParam("lo if (!queryParameters.containsKey(AuditParameters.AUDIT_ID) && q != null) { queryParameters.putSingle(AuditParameters.AUDIT_ID, q.getId().toString()); } - auditor.audit(queryParameters); + auditor.audit(MapUtils.toMultiValueMap(queryParameters)); } catch (IllegalArgumentException e) { log.error("Error validating audit parameters", e); BadRequestQueryException qe = new BadRequestQueryException(DatawaveErrorCode.MISSING_REQUIRED_PARAMETER, e); @@ -717,9 +742,9 @@ public GenericResponse createQuery(@Required("logicName") @PathParam("lo priority = qd.logic.getConnectionPriority(); Map trackingMap = connectionFactory.getTrackingMap(Thread.currentThread().getStackTrace()); q.populateTrackingMap(trackingMap); - accumuloConnectionRequestBean.requestBegin(q.getId().toString()); + accumuloConnectionRequestBean.requestBegin(q.getId().toString(), qd.userDn, trackingMap); try { - client = connectionFactory.getClient(qd.logic.getConnPoolName(), priority, trackingMap); + client = connectionFactory.getClient(qd.userDn, qd.proxyServers, qd.logic.getConnPoolName(), priority, trackingMap); } finally { accumuloConnectionRequestBean.requestEnd(q.getId().toString()); } @@ -780,7 +805,7 @@ public GenericResponse createQuery(@Required("logicName") @PathParam("lo /* * Allow web services to throw their own WebApplicationExceptions */ - if (t instanceof Error && !(t instanceof TokenMgrError)) { + if (t instanceof Error && !(t instanceof TokenMgrException)) { log.error(queryId + ": " + t.getMessage(), t); throw (Error) t; } else if (t instanceof WebApplicationException) { @@ -848,9 +873,8 @@ public GenericResponse planQuery(@Required("logicName") @PathParam("logi AuditType auditType = qd.logic.getAuditType(null); try { - MultivaluedMap optionalQueryParameters = new MultivaluedMapImpl<>(); - optionalQueryParameters.putAll(qp.getUnknownParameters(queryParameters)); - q = persister.create(qd.userDn, qd.dnList, marking, queryLogicName, qp, optionalQueryParameters); + Map> optionalQueryParameters = qp.getUnknownParameters(MapUtils.toMultiValueMap(queryParameters)); + q = persister.create(qd.userDn, qd.dnList, marking, queryLogicName, qp, MapUtils.toMultivaluedMap(optionalQueryParameters)); auditType = qd.logic.getAuditType(q); } finally { queryParameters.add(PrivateAuditConstants.AUDIT_TYPE, auditType.name()); @@ -871,7 +895,7 @@ public GenericResponse planQuery(@Required("logicName") @PathParam("logi if (!queryParameters.containsKey(AuditParameters.AUDIT_ID)) { queryParameters.putSingle(AuditParameters.AUDIT_ID, q.getId().toString()); } - auditor.audit(queryParameters); + auditor.audit(MapUtils.toMultiValueMap(queryParameters)); } catch (IllegalArgumentException e) { log.error("Error validating audit parameters", e); BadRequestQueryException qe = new BadRequestQueryException(DatawaveErrorCode.MISSING_REQUIRED_PARAMETER, e); @@ -889,9 +913,9 @@ public GenericResponse planQuery(@Required("logicName") @PathParam("logi priority = qd.logic.getConnectionPriority(); Map trackingMap = connectionFactory.getTrackingMap(Thread.currentThread().getStackTrace()); q.populateTrackingMap(trackingMap); - accumuloConnectionRequestBean.requestBegin(q.getId().toString()); + accumuloConnectionRequestBean.requestBegin(q.getId().toString(), qd.userDn, trackingMap); try { - client = connectionFactory.getClient(qd.logic.getConnPoolName(), priority, trackingMap); + client = connectionFactory.getClient(qd.userDn, qd.proxyServers, qd.logic.getConnPoolName(), priority, trackingMap); } finally { accumuloConnectionRequestBean.requestEnd(q.getId().toString()); } @@ -902,10 +926,10 @@ public GenericResponse planQuery(@Required("logicName") @PathParam("logi } else { qd.logic.preInitialize(q, WSAuthorizationsUtil.buildAuthorizations(null)); } - DatawavePrincipal queryPrincipal = (qd.logic.getUserOperations() == null) ? (DatawavePrincipal) qd.p - : qd.logic.getUserOperations().getRemoteUser((DatawavePrincipal) qd.p); + DatawavePrincipal queryPrincipal = (DatawavePrincipal) ((qd.logic.getUserOperations() == null) ? qd.p + : qd.logic.getUserOperations().getRemoteUser((DatawavePrincipal) qd.p)); // the overall principal (the one with combined auths across remote user operations) is our own user operations bean - DatawavePrincipal overallPrincipal = userOperationsBean.getRemoteUser((DatawavePrincipal) qd.p); + DatawavePrincipal overallPrincipal = (DatawavePrincipal) userOperationsBean.getRemoteUser((DatawavePrincipal) qd.p); Set calculatedAuths = WSAuthorizationsUtil.getDowngradedAuthorizations(qp.getAuths(), overallPrincipal, queryPrincipal); String plan = qd.logic.getPlan(client, q, calculatedAuths, expandFields, expandValues); response.setResult(plan); @@ -917,7 +941,7 @@ public GenericResponse planQuery(@Required("logicName") @PathParam("logi /* * Allow web services to throw their own WebApplicationExceptions */ - if (t instanceof Error && !(t instanceof TokenMgrError)) { + if (t instanceof Error && !(t instanceof TokenMgrException)) { log.error(t.getMessage(), t); throw (Error) t; } else if (t instanceof WebApplicationException) { @@ -983,9 +1007,8 @@ public GenericResponse predictQuery(@Required("logicName") @PathParam("l if (predictor != null) { try { qp.setPersistenceMode(QueryPersistence.TRANSIENT); - MultivaluedMap optionalQueryParameters = new MultivaluedMapImpl<>(); - optionalQueryParameters.putAll(qp.getUnknownParameters(queryParameters)); - Query q = persister.create(qd.userDn, qd.dnList, marking, queryLogicName, qp, optionalQueryParameters); + Map> optionalQueryParameters = qp.getUnknownParameters(MapUtils.toMultiValueMap(queryParameters)); + Query q = persister.create(qd.userDn, qd.dnList, marking, queryLogicName, qp, MapUtils.toMultivaluedMap(optionalQueryParameters)); BaseQueryMetric metric = metricFactory.createMetric(); metric.populate(q); @@ -1008,7 +1031,7 @@ public GenericResponse predictQuery(@Required("logicName") @PathParam("l * Allow web services to throw their own WebApplicationExceptions */ - if (t instanceof Error && !(t instanceof TokenMgrError)) { + if (t instanceof Error && !(t instanceof TokenMgrException)) { log.error(t.getMessage(), t); throw (Error) t; } else if (t instanceof WebApplicationException) { @@ -1073,7 +1096,7 @@ private List getQueryByName(String name) throws Exception { } // will throw IllegalArgumentException if not defined - QueryLogic logic = queryLogicFactory.getQueryLogic(q.getQueryLogicName(), p); + QueryLogic logic = queryLogicFactory.getQueryLogic(q.getQueryLogicName(), (DatawavePrincipal) p); AccumuloConnectionFactory.Priority priority = logic.getConnectionPriority(); RunningQuery query = new RunningQuery(metrics, null, priority, logic, q, q.getQueryAuthorizations(), p, new RunningQueryTimingImpl(queryExpirationConf, qp.getPageTimeout()), this.predictor, this.userOperationsBean, this.metricFactory); @@ -1110,7 +1133,7 @@ private RunningQuery getQueryById(String id, Principal principal) throws Excepti Query q = queries.get(0); // will throw IllegalArgumentException if not defined - QueryLogic logic = queryLogicFactory.getQueryLogic(q.getQueryLogicName(), principal); + QueryLogic logic = queryLogicFactory.getQueryLogic(q.getQueryLogicName(), (DatawavePrincipal) principal); AccumuloConnectionFactory.Priority priority = logic.getConnectionPriority(); query = new RunningQuery(metrics, null, priority, logic, q, q.getQueryAuthorizations(), principal, new RunningQueryTimingImpl(queryExpirationConf, qp.getPageTimeout()), this.predictor, this.userOperationsBean, @@ -1145,7 +1168,8 @@ private RunningQuery adminGetQueryById(String id) throws Exception { final String auths = q.getQueryAuthorizations(); // will throw IllegalArgumentException if not defined - final QueryLogic logic = queryLogicFactory.getQueryLogic(q.getQueryLogicName(), ctx.getCallerPrincipal()); + Principal principal = ctx.getCallerPrincipal(); + final QueryLogic logic = queryLogicFactory.getQueryLogic(q.getQueryLogicName(), (DatawavePrincipal) principal); final AccumuloConnectionFactory.Priority priority = logic.getConnectionPriority(); query = RunningQuery.createQueryWithAuthorizations(metrics, null, priority, logic, q, auths, new RunningQueryTimingImpl(queryExpirationConf, qp.getPageTimeout()), this.predictor, this.metricFactory); @@ -1165,7 +1189,6 @@ private RunningQuery adminGetQueryById(String id) throws Exception { * the ID of the query to reload/reset * @return an empty response * - * @return datawave.webservice.result.VoidResponse * @RequestHeader X-ProxiedEntitiesChain use when proxying request for user, by specifying a chain of DNs of the identities to proxy * @RequestHeader X-ProxiedIssuersChain required when using X-ProxiedEntitiesChain, specify one issuer DN per subject DN listed in X-ProxiedEntitiesChain * @ResponseHeader query-session-id this header and value will be in the Set-Cookie header, subsequent calls for this session will need to supply the @@ -1214,14 +1237,12 @@ public VoidResponse reset(@Required("id") @PathParam("id") String id) { query.closeConnection(connectionFactory); } else { AuditType auditType = query.getLogic().getAuditType(query.getSettings()); - MultivaluedMap queryParameters = new MultivaluedMapImpl<>(); - queryParameters.putAll(query.getSettings().toMap()); - - queryParameters.putSingle(PrivateAuditConstants.AUDIT_TYPE, auditType.name()); - queryParameters.putSingle(PrivateAuditConstants.LOGIC_CLASS, query.getLogic().getLogicName()); - queryParameters.putSingle(PrivateAuditConstants.USER_DN, query.getSettings().getUserDN()); - queryParameters.putSingle(PrivateAuditConstants.COLUMN_VISIBILITY, query.getSettings().getColumnVisibility()); + MultiValueMap queryParameters = new LinkedMultiValueMap<>(query.getSettings().toMap()); + queryParameters.set(PrivateAuditConstants.AUDIT_TYPE, auditType.name()); + queryParameters.set(PrivateAuditConstants.LOGIC_CLASS, query.getLogic().getLogicName()); + queryParameters.set(PrivateAuditConstants.USER_DN, query.getSettings().getUserDN()); + queryParameters.set(PrivateAuditConstants.COLUMN_VISIBILITY, query.getSettings().getColumnVisibility()); if (!auditType.equals(AuditType.NONE)) { try { try { @@ -1234,7 +1255,7 @@ public VoidResponse reset(@Required("id") @PathParam("id") String id) { } // if the user didn't set an audit id, use the query id if (!queryParameters.containsKey(AuditParameters.AUDIT_ID)) { - queryParameters.putSingle(AuditParameters.AUDIT_ID, id); + queryParameters.set(AuditParameters.AUDIT_ID, id); } auditor.audit(queryParameters); } catch (IllegalArgumentException e) { @@ -1255,9 +1276,10 @@ public VoidResponse reset(@Required("id") @PathParam("id") String id) { priority = query.getConnectionPriority(); Map trackingMap = connectionFactory.getTrackingMap(Thread.currentThread().getStackTrace()); query.getSettings().populateTrackingMap(trackingMap); - accumuloConnectionRequestBean.requestBegin(id); + QueryData qd = setUserData(ctx.getCallerPrincipal(), new QueryData()); + accumuloConnectionRequestBean.requestBegin(id, qd.userDn, trackingMap); try { - client = connectionFactory.getClient(query.getLogic().getConnPoolName(), priority, trackingMap); + client = connectionFactory.getClient(qd.userDn, qd.proxyServers, query.getLogic().getConnPoolName(), priority, trackingMap); } finally { accumuloConnectionRequestBean.requestEnd(id); } @@ -1455,13 +1477,18 @@ private T lookupContentByUUID(String uuidType, String uuid, MultivaluedMap T lookupContentByUUIDBatch(MultivaluedMap queryParamet } // Create the criteria for looking up the respective events, which we need to get the shard IDs and column families // required for the content lookup - final PostUUIDCriteria criteria = new PostUUIDCriteria(uuidPairs, queryParameters); + final PostUUIDCriteria criteria = new PostUUIDCriteria(uuidPairs, MapUtils.toMultiValueMap(queryParameters)); // Set the HTTP headers if a streamed response is required if (streamingOutput) { @@ -1588,17 +1615,18 @@ T lookupUUID(String uuidType, String uuid, MultivaluedMap que if (!StringUtils.isEmpty(streaming)) { streamingOutput = Boolean.parseBoolean(streaming); } + String uuidTypeContext = queryParameters.getFirst(CONTEXT_PARAMETER); final UUIDType matchingType = this.lookupUUIDUtil.getUUIDType(uuidType); String queryId = null; T response; try { // Construct the criteria used to perform the query final GetUUIDCriteria criteria; - final String view = (null != matchingType) ? matchingType.getDefinedView() : null; + final String view = (null != matchingType) ? matchingType.getQueryLogic(uuidTypeContext) : null; if ((LookupUUIDUtil.UID_QUERY.equals(view) || LookupUUIDUtil.LOOKUP_UID_QUERY.equals(view))) { - criteria = new UIDQueryCriteria(uuid, uuidType, queryParameters); + criteria = new UIDQueryCriteria(uuid, uuidType, MapUtils.toMultiValueMap(queryParameters)); } else { - criteria = new GetUUIDCriteria(uuid, uuidType, queryParameters); + criteria = new GetUUIDCriteria(uuid, uuidType, MapUtils.toMultiValueMap(queryParameters)); } // Add the HTTP headers in case streaming is required @@ -1606,6 +1634,10 @@ T lookupUUID(String uuidType, String uuid, MultivaluedMap que criteria.setStreamingOutputHeaders(httpHeaders); } + if (!StringUtils.isEmpty(uuidTypeContext)) { + criteria.setUUIDTypeContext(uuidTypeContext); + } + // Perform the query and get the first set of results response = this.lookupUUIDUtil.createUUIDQueryAndNext(criteria); if (response instanceof BaseQueryResponse) { @@ -1661,7 +1693,7 @@ public T lookupUUIDBatch(MultivaluedMap queryParameters, @Req if (!StringUtils.isEmpty(streaming)) { streamingOutput = Boolean.parseBoolean(streaming); } - final PostUUIDCriteria criteria = new PostUUIDCriteria(uuidPairs, queryParameters); + final PostUUIDCriteria criteria = new PostUUIDCriteria(uuidPairs, MapUtils.toMultiValueMap(queryParameters)); if (streamingOutput) { criteria.setStreamingOutputHeaders(httpHeaders); } @@ -2156,8 +2188,9 @@ public VoidResponse close(@Required("id") @PathParam("id") String id) { private VoidResponse close(String id, Principal principal) { VoidResponse response = new VoidResponse(); try { - boolean connectionRequestCanceled = accumuloConnectionRequestBean.cancelConnectionRequest(id, principal); - Pair,AccumuloClient> tuple = qlCache.pollIfOwnedBy(id, ((DatawavePrincipal) principal).getShortName()); + QueryData qd = setUserData(ctx.getCallerPrincipal(), new QueryData()); + boolean connectionRequestCanceled = accumuloConnectionRequestBean.cancelConnectionRequest(id, qd.userDn); + Pair,AccumuloClient> tuple = qlCache.pollIfOwnedBy(id, qd.userid); if (!id.matches(UUID_REGEX_RULE)) { log.error("Invalid query id: " + id); GenericResponse genericResponse = new GenericResponse<>(); @@ -2203,6 +2236,8 @@ private VoidResponse close(String id, Principal principal) { response.addException(qe.getBottomQueryException()); int statusCode = qe.getBottomQueryException().getStatusCode(); throw new DatawaveWebApplicationException(qe, response, statusCode); + } catch (Throwable t) { + throw t; } } @@ -2313,7 +2348,8 @@ public VoidResponse cancel(@Required("id") @PathParam("id") String id) { VoidResponse response = new VoidResponse(); try { boolean connectionRequestCanceled = accumuloConnectionRequestBean.cancelConnectionRequest(id); - Pair,AccumuloClient> tuple = qlCache.pollIfOwnedBy(id, ctx.getCallerPrincipal().getName()); + QueryData qd = setUserData(ctx.getCallerPrincipal(), new QueryData()); + Pair,AccumuloClient> tuple = qlCache.pollIfOwnedBy(id, qd.userid); if (tuple == null) { try { @@ -2698,7 +2734,8 @@ public GenericResponse duplicateQuery(@PathParam("id") String id, @Requi // TODO: add validation for all these sets // maybe set variables instead of stuffing in query if (newQueryLogicName != null) { - q.setQueryLogicName(queryLogicFactory.getQueryLogic(newQueryLogicName, ctx.getCallerPrincipal()).getLogicName()); + Principal principal = ctx.getCallerPrincipal(); + q.setQueryLogicName(queryLogicFactory.getQueryLogic(newQueryLogicName, (DatawavePrincipal) principal).getLogicName()); } if (newQuery != null) { q.setQuery(newQuery); @@ -2720,6 +2757,7 @@ public GenericResponse duplicateQuery(@PathParam("id") String id, @Requi } if (newMaxResultsOverride != null) { q.setMaxResultsOverride(newMaxResultsOverride); + q.setMaxResultsOverridden(true); } if (newPageTimeout != null) { q.setPageTimeout(newPageTimeout); @@ -2734,8 +2772,7 @@ public GenericResponse duplicateQuery(@PathParam("id") String id, @Requi } } } - MultivaluedMap newSettings = new MultivaluedMapImpl<>(); - newSettings.putAll(q.toMap()); + MultivaluedMap newSettings = MapUtils.toMultivaluedMap(q.toMap()); newSettings.putSingle(QueryParameters.QUERY_PERSISTENCE, persistence.name()); return createQuery(q.getQueryLogicName(), newSettings); } catch (DatawaveWebApplicationException e) { @@ -2781,7 +2818,7 @@ public GenericResponse duplicateQuery(@PathParam("id") String id, @Requi * @param parameters * - optional parameters to the query, a semi-colon separated list name=value pairs (optional, auditing required if changed) * @see datawave.webservice.query.runner.QueryExecutorBean#updateQuery(String, String, String, String, java.util.Date, java.util.Date, String, - * java.util.Date, Integer, Integer, Long, datawave.webservice.query.QueryPersistence, String) + * java.util.Date, Integer, Integer, Long, datawave.microservice.query.QueryPersistence, String) * * @return {@code datawave.webservice.result.GenericResponse} * @RequestHeader X-ProxiedEntitiesChain use when proxying request for user, by specifying a chain of DNs of the identities to proxy @@ -2874,11 +2911,10 @@ private void updateQuery(GenericResponse response, RunningQuery runningQ AuditType auditType = runningQuery.getLogic().getAuditType(runningQuery.getSettings()); if (!auditType.equals(AuditType.NONE)) { try { - MultivaluedMap queryParameters = new MultivaluedMapImpl<>(); - queryParameters.putAll(duplicate.toMap()); + MultiValueMap queryParameters = new LinkedMultiValueMap<>(duplicate.toMap()); // if the user didn't set an audit id, use the query id if (!queryParameters.containsKey(AuditParameters.AUDIT_ID)) { - queryParameters.putSingle(AuditParameters.AUDIT_ID, q.getId().toString()); + queryParameters.set(AuditParameters.AUDIT_ID, q.getId().toString()); } auditor.audit(queryParameters); } catch (IllegalArgumentException e) { @@ -2916,11 +2952,11 @@ private void updateQuery(GenericResponse response, RunningQuery runningQ } private void updateQueryParams(Query q, String queryLogicName, String query, Date beginDate, Date endDate, String queryAuthorizations, Date expirationDate, - Integer pagesize, Integer pageTimeout, Long maxResultsOverride, String parameters) throws CloneNotSupportedException { + Integer pagesize, Integer pageTimeout, Long maxResultsOverride, String parameters) throws QueryException, CloneNotSupportedException { Principal p = ctx.getCallerPrincipal(); // TODO: add validation for all these sets if (queryLogicName != null) { - QueryLogic logic = queryLogicFactory.getQueryLogic(queryLogicName, p); + QueryLogic logic = queryLogicFactory.getQueryLogic(queryLogicName, (DatawavePrincipal) p); q.setQueryLogicName(logic.getLogicName()); } if (query != null) { @@ -2946,6 +2982,7 @@ private void updateQueryParams(Query q, String queryLogicName, String query, Dat } if (maxResultsOverride != null) { q.setMaxResultsOverride(maxResultsOverride); + q.setMaxResultsOverridden(true); } if (parameters != null) { Set params = new HashSet<>(); @@ -2960,6 +2997,26 @@ private void updateQueryParams(Query q, String queryLogicName, String query, Dat } } + /** + * @param queryLogicName + * the logic name + * @param queryParameters + * the query parameters + * @return the generic response + */ + @POST + @Produces({"application/xml", "text/xml", "application/json", "text/yaml", "text/x-yaml", "application/x-yaml", "application/x-protobuf", + "application/x-protostuff"}) + @Path("/{logicName}/validate") + @Interceptors({RequiredInterceptor.class, ResponseInterceptor.class}) + @Timed(name = "dw.query.validateQuery", absolute = true) + public GenericResponse validateQuery(@Required("logicName") @PathParam("logicName") String queryLogicName, + MultivaluedMap queryParameters) { + GenericResponse response = new GenericResponse<>(); + response.setMessages(Collections.singletonList("Query validator coming soon.")); + throw new DatawaveWebApplicationException(new UnsupportedOperationException("Query validator not implemented"), response, 501); + } + /** * Administrator credentials required. Returns list of queries for some other user * @@ -3255,7 +3312,7 @@ public StreamingOutput execute(@PathParam("logicName") String logicName, Multiva // Find the response class Class responseClass; try { - QueryLogic l = queryLogicFactory.getQueryLogic(logicName, p); + QueryLogic l = queryLogicFactory.getQueryLogic(logicName, (DatawavePrincipal) p); QueryLogicTransformer t = l.getEnrichedTransformer(q); BaseResponse refResponse = t.createResponse(emptyList); responseClass = refResponse.getClass(); diff --git a/web-services/query/src/main/java/datawave/webservice/query/runner/RunningQuery.java b/web-services/query/src/main/java/datawave/webservice/query/runner/RunningQuery.java index ca33d79528e..3b44d65d656 100644 --- a/web-services/query/src/main/java/datawave/webservice/query/runner/RunningQuery.java +++ b/web-services/query/src/main/java/datawave/webservice/query/runner/RunningQuery.java @@ -4,7 +4,6 @@ import java.util.ArrayList; import java.util.Collections; import java.util.List; -import java.util.Map; import java.util.Set; import java.util.UUID; import java.util.concurrent.ArrayBlockingQueue; @@ -23,6 +22,15 @@ import org.apache.log4j.Logger; import org.jboss.logging.NDC; +import datawave.core.common.connection.AccumuloConnectionFactory; +import datawave.core.query.cache.ResultsPage; +import datawave.core.query.configuration.GenericQueryConfiguration; +import datawave.core.query.logic.BaseQueryLogic; +import datawave.core.query.logic.QueryLogic; +import datawave.core.query.logic.WritesQueryMetrics; +import datawave.core.query.logic.WritesResultCardinalities; +import datawave.core.query.predict.QueryPredictor; +import datawave.microservice.query.Query; import datawave.microservice.querymetric.BaseQueryMetric; import datawave.microservice.querymetric.BaseQueryMetric.Prediction; import datawave.microservice.querymetric.QueryMetric; @@ -30,20 +38,13 @@ import datawave.microservice.querymetric.QueryMetricFactoryImpl; import datawave.security.authorization.DatawavePrincipal; import datawave.security.authorization.UserOperations; +import datawave.security.authorization.remote.RemoteUserOperationsImpl; import datawave.security.util.WSAuthorizationsUtil; -import datawave.webservice.common.connection.AccumuloConnectionFactory; -import datawave.webservice.query.Query; -import datawave.webservice.query.QueryImpl; +import datawave.webservice.common.connection.WrappedAccumuloClient; import datawave.webservice.query.cache.AbstractRunningQuery; -import datawave.webservice.query.cache.ResultsPage; -import datawave.webservice.query.configuration.GenericQueryConfiguration; import datawave.webservice.query.data.ObjectSizeOf; import datawave.webservice.query.exception.DatawaveErrorCode; import datawave.webservice.query.exception.QueryException; -import datawave.webservice.query.logic.BaseQueryLogic; -import datawave.webservice.query.logic.QueryLogic; -import datawave.webservice.query.logic.WritesQueryMetrics; -import datawave.webservice.query.logic.WritesResultCardinalities; import datawave.webservice.query.metric.QueryMetricsBean; import datawave.webservice.query.result.event.EventBase; import datawave.webservice.query.util.QueryUncaughtExceptionHandler; @@ -128,11 +129,14 @@ public RunningQuery(QueryMetricsBean queryMetrics, AccumuloClient client, Accumu } else { logic.preInitialize(settings, WSAuthorizationsUtil.buildAuthorizations(null)); } - DatawavePrincipal queryPrincipal = (logic.getUserOperations() == null) ? (DatawavePrincipal) principal - : logic.getUserOperations().getRemoteUser((DatawavePrincipal) principal); + DatawavePrincipal queryPrincipal = (DatawavePrincipal) ((logic.getUserOperations() == null) ? principal + : logic.getUserOperations().getRemoteUser((DatawavePrincipal) principal)); // the overall principal (the one with combined auths across remote user operations) is our own user operations (probably the UserOperationsBean) - DatawavePrincipal overallPrincipal = (userOperations == null) ? (DatawavePrincipal) principal - : userOperations.getRemoteUser((DatawavePrincipal) principal); + // don't call remote user operations if it's asked not to + DatawavePrincipal overallPrincipal = (userOperations == null + || "false".equalsIgnoreCase(settings.findParameter(RemoteUserOperationsImpl.INCLUDE_REMOTE_SERVICES).getParameterValue())) + ? (DatawavePrincipal) principal + : userOperations.getRemoteUser((DatawavePrincipal) principal); this.calculatedAuths = WSAuthorizationsUtil.getDowngradedAuthorizations(methodAuths, overallPrincipal, queryPrincipal); this.timing = timing; this.executor = Executors.newSingleThreadExecutor(); @@ -191,6 +195,9 @@ public void setClient(AccumuloClient client) throws Exception { addNDC(); applyPrediction(null); this.client = client; + if (this.client instanceof WrappedAccumuloClient && this.logic.getClientConfig() != null) { + ((WrappedAccumuloClient) this.client).updateClientConfig(this.logic.getClientConfig()); + } long start = System.currentTimeMillis(); GenericQueryConfiguration configuration = this.logic.initialize(this.client, this.settings, this.calculatedAuths); this.lastPageNumber = 0; @@ -579,7 +586,7 @@ public ResultsPage next() throws Exception { log.info("Returning final empty page"); terminateResultsThread(); // This query is done, we have no more results to return. - return new ResultsPage(); + return new ResultsPage(Collections.emptyList(), ResultsPage.Status.NONE); } } } diff --git a/web-services/query/src/main/java/datawave/webservice/query/util/AbstractUUIDLookupCriteria.java b/web-services/query/src/main/java/datawave/webservice/query/util/AbstractUUIDLookupCriteria.java index 837da62827a..2f490dabdfb 100644 --- a/web-services/query/src/main/java/datawave/webservice/query/util/AbstractUUIDLookupCriteria.java +++ b/web-services/query/src/main/java/datawave/webservice/query/util/AbstractUUIDLookupCriteria.java @@ -1,11 +1,11 @@ package datawave.webservice.query.util; import javax.ws.rs.core.HttpHeaders; -import javax.ws.rs.core.MultivaluedMap; -import org.jboss.resteasy.specimpl.MultivaluedMapImpl; +import org.springframework.util.LinkedMultiValueMap; +import org.springframework.util.MultiValueMap; -import datawave.webservice.query.Query; +import datawave.microservice.query.Query; /** * Abstract implementation of criteria used for UUID lookup queries @@ -14,7 +14,9 @@ public abstract class AbstractUUIDLookupCriteria { private boolean allEventLookup; private boolean contentLookup; private HttpHeaders headersForStreamedResponse; - private MultivaluedMap queryParameters; + private MultiValueMap queryParameters; + + private String uuidTypeContext; /** * Constructor @@ -24,12 +26,12 @@ public abstract class AbstractUUIDLookupCriteria { */ public AbstractUUIDLookupCriteria(final Query settings) { if (null != settings) { - this.queryParameters = new MultivaluedMapImpl<>(); + this.queryParameters = new LinkedMultiValueMap<>(); this.queryParameters.putAll(settings.toMap()); } } - public AbstractUUIDLookupCriteria(final MultivaluedMap queryParameters) { + public AbstractUUIDLookupCriteria(final MultiValueMap queryParameters) { this.queryParameters = queryParameters; } @@ -68,9 +70,24 @@ public void setStreamingOutputHeaders(final HttpHeaders headers) { this.headersForStreamedResponse = headers; } - public MultivaluedMap getQueryParameters() { + public MultiValueMap getQueryParameters() { return queryParameters; } public abstract String getRawQueryString(); + + /** + * returns a context for the lookup request if any was specfied in the request. The lookup context is used to obtain alternate query logics for the lookup + * requests to use. This can be used to modify the types of responses the query operations provide (e.g., plaintext responses.) + * + * @return + */ + public String getUUIDTypeContext() { + return uuidTypeContext; + } + + public void setUUIDTypeContext(String uuidTypeContext) { + this.uuidTypeContext = uuidTypeContext; + } + } diff --git a/web-services/query/src/main/java/datawave/webservice/query/util/GetUUIDCriteria.java b/web-services/query/src/main/java/datawave/webservice/query/util/GetUUIDCriteria.java index 6c581ac376f..6ea639ce523 100644 --- a/web-services/query/src/main/java/datawave/webservice/query/util/GetUUIDCriteria.java +++ b/web-services/query/src/main/java/datawave/webservice/query/util/GetUUIDCriteria.java @@ -1,6 +1,6 @@ package datawave.webservice.query.util; -import javax.ws.rs.core.MultivaluedMap; +import org.springframework.util.MultiValueMap; /** * Lookup criteria for one and only one UUID @@ -9,7 +9,7 @@ public class GetUUIDCriteria extends AbstractUUIDLookupCriteria { protected final String uuid; protected final String uuidType; - public GetUUIDCriteria(final String uuid, final String uuidType, MultivaluedMap queryParameters) { + public GetUUIDCriteria(final String uuid, final String uuidType, MultiValueMap queryParameters) { super(queryParameters); this.uuid = uuid; diff --git a/web-services/query/src/main/java/datawave/webservice/query/util/LookupUUIDUtil.java b/web-services/query/src/main/java/datawave/webservice/query/util/LookupUUIDUtil.java index e19a889177c..425476a073c 100644 --- a/web-services/query/src/main/java/datawave/webservice/query/util/LookupUUIDUtil.java +++ b/web-services/query/src/main/java/datawave/webservice/query/util/LookupUUIDUtil.java @@ -20,27 +20,30 @@ import javax.ws.rs.core.StreamingOutput; import org.apache.commons.lang.time.DateUtils; +import org.apache.commons.lang3.StringUtils; import org.apache.log4j.Logger; import org.jboss.resteasy.specimpl.MultivaluedMapImpl; +import datawave.core.query.logic.QueryLogic; +import datawave.core.query.logic.QueryLogicFactory; +import datawave.core.query.util.QueryUtil; +import datawave.microservice.query.DefaultQueryParameters; +import datawave.microservice.query.Query; +import datawave.microservice.query.QueryImpl; +import datawave.microservice.query.QueryParameters; +import datawave.microservice.query.QueryPersistence; import datawave.query.data.UUIDType; import datawave.security.authorization.DatawavePrincipal; import datawave.security.authorization.UserOperations; +import datawave.security.authorization.remote.RemoteUserOperationsImpl; import datawave.security.util.WSAuthorizationsUtil; import datawave.util.time.DateHelper; import datawave.webservice.common.audit.AuditParameters; import datawave.webservice.common.exception.DatawaveWebApplicationException; import datawave.webservice.common.exception.NoResultsException; -import datawave.webservice.query.Query; -import datawave.webservice.query.QueryImpl; -import datawave.webservice.query.QueryParameters; -import datawave.webservice.query.QueryParametersImpl; -import datawave.webservice.query.QueryPersistence; import datawave.webservice.query.configuration.LookupUUIDConfiguration; import datawave.webservice.query.exception.DatawaveErrorCode; import datawave.webservice.query.exception.QueryException; -import datawave.webservice.query.logic.QueryLogic; -import datawave.webservice.query.logic.QueryLogicFactory; import datawave.webservice.query.result.event.EventBase; import datawave.webservice.query.result.event.FieldBase; import datawave.webservice.query.result.event.Metadata; @@ -89,6 +92,7 @@ public class LookupUUIDUtil { public static final String LOOKUP_UID_QUERY = "LookupUIDQuery"; protected static final String UUID_TERM_DELIMITER = ":"; + public static final String DEFAULT_CONTENT_LOOKUP_TYPE = "default"; private Date beginAsDate = null; private EJBContext ctx; @@ -109,6 +113,7 @@ public class LookupUUIDUtil { private final UserOperations userOperations; private Map uuidTypes = Collections.synchronizedMap(new HashMap<>()); + private Map contentLookupTypes = Collections.synchronizedMap(new HashMap<>()); MultivaluedMap defaultOptionalParams; @@ -153,17 +158,21 @@ public LookupUUIDUtil(final LookupUUIDConfiguration configuration, final QueryEx // set the query logic factory this.queryLogicFactory = queryLogicFactory; - // Populate the UUIDType map - final List types = this.lookupUUIDConfiguration.getUuidTypes(); this.uuidTypes.clear(); + + // load uuidTypes from the flat list + final List types = this.lookupUUIDConfiguration.getUuidTypes(); + if (null != types) { for (final UUIDType type : types) { - if (null != type) { - this.uuidTypes.put(type.getFieldName().toUpperCase(), type); - } + addUUIDType(type.getFieldName(), type); } } + // Populate the content lookup types map + this.contentLookupTypes.clear(); + this.contentLookupTypes = this.lookupUUIDConfiguration.getContentLookupTypes(); + // Assign the begin date try { this.beginAsDate = DateHelper.parseWithGMT(this.lookupUUIDConfiguration.getBeginDate()); @@ -373,7 +382,7 @@ public T createUUIDQueryAndNext(final AbstractUUIDLookupCriteria unvalidated queryParameters.putSingle(QueryParameters.QUERY_NAME, queryName); try { - queryParameters.putSingle(QueryParameters.QUERY_BEGIN, QueryParametersImpl.formatDate(this.beginAsDate)); + queryParameters.putSingle(QueryParameters.QUERY_BEGIN, DefaultQueryParameters.formatDate(this.beginAsDate)); } catch (ParseException e) { throw new RuntimeException("Unable to format new query begin date: " + this.beginAsDate); } @@ -383,7 +392,7 @@ public T createUUIDQueryAndNext(final AbstractUUIDLookupCriteria unvalidated queryParameters.remove(QueryParameters.QUERY_END); } try { - queryParameters.putSingle(QueryParameters.QUERY_END, QueryParametersImpl.formatDate(endDate)); + queryParameters.putSingle(QueryParameters.QUERY_END, DefaultQueryParameters.formatDate(endDate)); } catch (ParseException e) { throw new RuntimeException("Unable to format new query end date: " + endDate); } @@ -393,7 +402,7 @@ public T createUUIDQueryAndNext(final AbstractUUIDLookupCriteria unvalidated queryParameters.remove(QueryParameters.QUERY_EXPIRATION); } try { - queryParameters.putSingle(QueryParameters.QUERY_EXPIRATION, QueryParametersImpl.formatDate(expireDate)); + queryParameters.putSingle(QueryParameters.QUERY_EXPIRATION, DefaultQueryParameters.formatDate(expireDate)); } catch (ParseException e) { throw new RuntimeException("Unable to format new query expr date: " + expireDate); } @@ -431,7 +440,7 @@ public T createUUIDQueryAndNext(final AbstractUUIDLookupCriteria unvalidated return response; } - public Query createSettings(MultivaluedMap queryParameters) { + public Query createSettings(Map> queryParameters) { log.debug("Initial query parameters: " + queryParameters); Query query = responseObjectFactory.getQueryImpl(); if (queryParameters != null) { @@ -439,7 +448,11 @@ public Query createSettings(MultivaluedMap queryParameters) { if (defaultOptionalParams != null) { expandedQueryParameters.putAll(defaultOptionalParams); } - String delimitedParams = queryParameters.getFirst(QueryParameters.QUERY_PARAMS); + List params = queryParameters.get(QueryParameters.QUERY_PARAMS); + String delimitedParams = null; + if (params != null && !params.isEmpty()) { + delimitedParams = params.get(0); + } if (delimitedParams != null) { for (QueryImpl.Parameter pm : QueryUtil.parseParameters(delimitedParams)) { expandedQueryParameters.putSingle(pm.getParameterName(), pm.getParameterValue()); @@ -457,21 +470,28 @@ public Query createSettings(MultivaluedMap queryParameters) { return query; } - public String getAuths(String logicName, MultivaluedMap queryParameters, String queryAuths, Principal principal) { + public String getAuths(String logicName, Map> queryParameters, String queryAuths, Principal principal) { String userAuths; try { - QueryLogic logic = queryLogicFactory.getQueryLogic(logicName, principal); + QueryLogic logic = queryLogicFactory.getQueryLogic(logicName, (DatawavePrincipal) principal); Query settings = createSettings(queryParameters); if (queryAuths == null) { logic.preInitialize(settings, WSAuthorizationsUtil.buildAuthorizations(((DatawavePrincipal) principal).getAuthorizations())); } else { logic.preInitialize(settings, WSAuthorizationsUtil.buildAuthorizations(Collections.singleton(WSAuthorizationsUtil.splitAuths(queryAuths)))); } + // the query principal is our local principal unless the query logic has a different user operations DatawavePrincipal queryPrincipal = (logic.getUserOperations() == null) ? (DatawavePrincipal) principal : logic.getUserOperations().getRemoteUser((DatawavePrincipal) principal); // the overall principal (the one with combined auths across remote user operations) is our own user operations (probably the UserOperationsBean) - DatawavePrincipal overallPrincipal = (userOperations == null) ? (DatawavePrincipal) principal + // don't call remote user operations if it's asked not to + String includeRemoteServices = "true"; + if (queryParameters.get(RemoteUserOperationsImpl.INCLUDE_REMOTE_SERVICES) != null + && !queryParameters.get(RemoteUserOperationsImpl.INCLUDE_REMOTE_SERVICES).isEmpty()) { + includeRemoteServices = queryParameters.get(RemoteUserOperationsImpl.INCLUDE_REMOTE_SERVICES).get(0); + } + DatawavePrincipal overallPrincipal = (userOperations == null || "false".equalsIgnoreCase(includeRemoteServices)) ? (DatawavePrincipal) principal : userOperations.getRemoteUser((DatawavePrincipal) principal); if (queryAuths != null) { userAuths = WSAuthorizationsUtil.downgradeUserAuths(queryAuths, overallPrincipal, queryPrincipal); @@ -486,21 +506,92 @@ public String getAuths(String logicName, MultivaluedMap queryPara } /** - * Returns a UUIDType implementation, if any, matching the specified field name + * Add the specified uuid type to the internal uuidtypes map if the type is not null. + * + * @param uuidType + * the uuid type name / field + * @param type + * the uuid type. + */ + private void addUUIDType(final String uuidType, final UUIDType type) { + if (type != null) { + final String key = buildUUIDTypeKey(uuidType); + this.uuidTypes.put(key.toUpperCase(), type); + } + } + + /** + * Build the string key used to store uuid types in the internal uuidtype map. There can be multiple lists of uuid types, referred to as contexts - this + * allows multiple logicNames to be associated with the same field. + * + * @param uuidType + * the name/field of the specified UUIDType. + * @return the key used to store/retrieve the UUID type from the uuidType map. + */ + private String buildUUIDTypeKey(String uuidType) { + if (uuidType == null) + return null; + return uuidType.toUpperCase(); + } + + /** + * Returns a UUIDType implementation, if any, matching the specified field name. * * @param uuidType * the field name of the desired UUIDType - * @return a UUIDType implementation, if any, matching the specified field name + * @return a UUIDType implementation, if any, matching the specified field name, null if one does not exist. */ public UUIDType getUUIDType(final String uuidType) { - final UUIDType type; - if (null != uuidType) { - type = this.uuidTypes.get(uuidType.toUpperCase()); + String uuidTypeKey = buildUUIDTypeKey(uuidType); + return (uuidTypeKey == null) ? null : uuidTypes.get(uuidTypeKey); + } + + /** + * check if the query logic name is one of the configured content lookup type logics. + * + * @param queryLogicName + * @return + */ + public boolean isContentLookup(String queryLogicName) { + if (queryLogicName == null) + return false; + + for (Map.Entry typeEntry : contentLookupTypes.entrySet()) { + if (queryLogicName.equals(typeEntry.getValue())) { + return true; + } + } + return false; + } + + /** + * find the appropriate content lookup type logic from the specified lookup context. + * + * @param lookupContext + * the lookupContext for the current request. + * @return the name of the content lookup type logic configured for the specified context, or the default content lookup type logic name, if none is + * configured for the context specified. + */ + public String getLookupType(String lookupContext) { + String lookupType = contentLookupTypes.get(lookupContext); + if (StringUtils.isEmpty(lookupType)) { + log.info("Request contained lookupContext '" + lookupContext + "', which was not configured, retrieving default lookup type."); } else { - type = null; + + if (log.isDebugEnabled()) { + log.debug("Using lookupContext " + lookupContext); + } + return lookupType; } - return type; + lookupType = contentLookupTypes.get(DEFAULT_CONTENT_LOOKUP_TYPE); + if (StringUtils.isEmpty(lookupType)) { + log.warn("Request contained lookupContext '" + lookupContext + + "', which was not configured, yet no default is configured either, returning hard-coded default: " + CONTENT_QUERY); + lookupType = CONTENT_QUERY; + } + + return lookupType; } /** @@ -538,7 +629,7 @@ public T lookupContentByNextResponse(final AbstractUUIDLookupCriteria valida // be evaluated since the getLogicName() expression would always evaluate as false for an instance // of AllEventMockResponse. // - if (CONTENT_QUERY.equals(nextQueryResponse.getLogicName()) && !(nextQueryResponse instanceof AllEventMockResponse)) { + if (isContentLookup(nextQueryResponse.getLogicName()) && !(nextQueryResponse instanceof AllEventMockResponse)) { contentQueryResponse = (T) nextQueryResponse; } // Handle the case where /next has returned results of a UUID lookup, and a secondary content @@ -671,18 +762,18 @@ private EventQueryResponseBase lookupPagedContent(final String queryName, final queryParameters.putSingle(QueryParameters.QUERY_NAME, queryName); queryParameters.putSingle(QueryParameters.QUERY_STRING, contentQuery.toString()); try { - queryParameters.putSingle(QueryParameters.QUERY_BEGIN, QueryParametersImpl.formatDate(this.beginAsDate)); + queryParameters.putSingle(QueryParameters.QUERY_BEGIN, DefaultQueryParameters.formatDate(this.beginAsDate)); } catch (ParseException e1) { throw new RuntimeException("Error formatting begin date: " + this.beginAsDate); } try { - queryParameters.putSingle(QueryParameters.QUERY_END, QueryParametersImpl.formatDate(endDate)); + queryParameters.putSingle(QueryParameters.QUERY_END, DefaultQueryParameters.formatDate(endDate)); } catch (ParseException e1) { throw new RuntimeException("Error formatting end date: " + endDate); } queryParameters.putSingle(QueryParameters.QUERY_AUTHORIZATIONS, userAuths); try { - queryParameters.putSingle(QueryParameters.QUERY_EXPIRATION, QueryParametersImpl.formatDate(expireDate)); + queryParameters.putSingle(QueryParameters.QUERY_EXPIRATION, DefaultQueryParameters.formatDate(expireDate)); } catch (ParseException e1) { throw new RuntimeException("Error formatting expr date: " + expireDate); } @@ -695,7 +786,8 @@ private EventQueryResponseBase lookupPagedContent(final String queryName, final } } - final GenericResponse createResponse = this.queryExecutor.createQuery(CONTENT_QUERY, queryParameters); + final String contentLookupType = getLookupType(validatedCriteria.getUUIDTypeContext()); + final GenericResponse createResponse = this.queryExecutor.createQuery(contentLookupType, queryParameters); final String contentQueryId = createResponse.getResult(); boolean preventCloseOfMergedQueryId = ((null == mergedContentQueryResponse) && allEventMockResponse); try { @@ -785,18 +877,18 @@ private StreamingOutput lookupStreamedContent(final String queryName, final Abst queryParameters.putSingle(QueryParameters.QUERY_NAME, queryName); queryParameters.putSingle(QueryParameters.QUERY_STRING, contentQuery.toString()); try { - queryParameters.putSingle(QueryParameters.QUERY_BEGIN, QueryParametersImpl.formatDate(this.beginAsDate)); + queryParameters.putSingle(QueryParameters.QUERY_BEGIN, DefaultQueryParameters.formatDate(this.beginAsDate)); } catch (ParseException e1) { throw new RuntimeException("Error formatting begin date: " + this.beginAsDate); } try { - queryParameters.putSingle(QueryParameters.QUERY_END, QueryParametersImpl.formatDate(endDate)); + queryParameters.putSingle(QueryParameters.QUERY_END, DefaultQueryParameters.formatDate(endDate)); } catch (ParseException e1) { throw new RuntimeException("Error formatting end date: " + endDate); } queryParameters.putSingle(QueryParameters.QUERY_AUTHORIZATIONS, userAuths); try { - queryParameters.putSingle(QueryParameters.QUERY_EXPIRATION, QueryParametersImpl.formatDate(expireDate)); + queryParameters.putSingle(QueryParameters.QUERY_EXPIRATION, DefaultQueryParameters.formatDate(expireDate)); } catch (ParseException e1) { throw new RuntimeException("Error formatting expr date: " + expireDate); } @@ -811,7 +903,8 @@ private StreamingOutput lookupStreamedContent(final String queryName, final Abst // Call the ContentQuery for one or more events final HttpHeaders headers = validatedCriteria.getStreamingOutputHeaders(); - return this.queryExecutor.execute(CONTENT_QUERY, queryParameters, headers); + final String contentLookupType = getLookupType(validatedCriteria.getUUIDTypeContext()); + return this.queryExecutor.execute(contentLookupType, queryParameters, headers); } private void mergeNextUUIDLookups(final EventQueryResponseBase mergedResponse) { @@ -899,11 +992,11 @@ private AbstractUUIDLookupCriteria validateLookupCriteria(final AbstractUUIDLook // Validate the "potential" UUID term. It's potential because it could be an OR operator // or some other query syntax that would be validated with more scrutiny once the query // executor is invoked. - final UUIDType uuidType = this.validateUUIDTerm(potentialUUIDTerm.trim(), logicName); + final UUIDType uuidType = this.validateUUIDTerm(criteria.getUUIDTypeContext(), potentialUUIDTerm.trim(), logicName); if (null != uuidType) { // Assign the query logic name if undefined if (null == logicName) { - logicName = uuidType.getDefinedView(); + logicName = uuidType.getQueryLogic(criteria.getUUIDTypeContext()); } // Increment the UUID type/value count @@ -963,7 +1056,7 @@ private AbstractUUIDLookupCriteria validateLookupCriteria(final AbstractUUIDLook params = params + ';' + PARAM_HIT_LIST + ':' + true; } - criteria.getQueryParameters().putSingle(QueryParameters.QUERY_PARAMS, params); + criteria.getQueryParameters().set(QueryParameters.QUERY_PARAMS, params); // All is well, so return the validated criteria return criteria; @@ -987,13 +1080,15 @@ private EventQueryResponseBase validatePagedResponse(final BaseQueryResponse res * Validate the specified token as a UUID lookup term, either as a LUCENE-formatted field/value or a UIDQuery field/value. Tokens missing the appropriate * delimiter are ignored and return with a null UUIDType. * - * @param uuidTypeValueTerm A token to evaluate as a possible UUID field/value term + * @param lookupContext additional information about the lookup purpose or type. + * + * @param possibleUUIDTerm A token to evaluate as a possible UUID field/value term * * @param logicName The existing assigned query logic name, if any * * @return A valid UUIDType, or null if the specified token is obviously not a UUID field/value term */ - private UUIDType validateUUIDTerm(final String possibleUUIDTerm, final String logicName) { + private UUIDType validateUUIDTerm(final String lookupContext, final String possibleUUIDTerm, final String logicName) { // Declare the return value final UUIDType matchingUuidType; @@ -1026,8 +1121,8 @@ else if ((null == uuid) || uuid.isEmpty()) { throw new DatawaveWebApplicationException(new IllegalArgumentException(message), errorReponse); } // Reject conflicting logic name - else if ((null != logicName) && !logicName.equals(matchingUuidType.getDefinedView())) { - final String message = "Multiple UUID types '" + logicName + "' and '" + matchingUuidType.getDefinedView() + "' not " + else if ((null != logicName) && !logicName.equals(matchingUuidType.getQueryLogic(lookupContext))) { + final String message = "Multiple UUID types '" + logicName + "' and '" + matchingUuidType.getQueryLogic(lookupContext) + "' not " + " supported within the same lookup request"; final GenericResponse errorReponse = new GenericResponse<>(); errorReponse.addMessage(message); diff --git a/web-services/query/src/main/java/datawave/webservice/query/util/MapUtils.java b/web-services/query/src/main/java/datawave/webservice/query/util/MapUtils.java new file mode 100644 index 00000000000..da2da2bc963 --- /dev/null +++ b/web-services/query/src/main/java/datawave/webservice/query/util/MapUtils.java @@ -0,0 +1,31 @@ +package datawave.webservice.query.util; + +import java.util.List; +import java.util.Map; + +import javax.ws.rs.core.MultivaluedMap; + +import org.jboss.resteasy.specimpl.MultivaluedMapImpl; +import org.springframework.util.LinkedMultiValueMap; +import org.springframework.util.MultiValueMap; + +public class MapUtils { + + public static MultiValueMap toMultiValueMap(MultivaluedMap multivaluedMap) { + MultiValueMap multiValueMap = null; + if (multivaluedMap != null) { + multiValueMap = new LinkedMultiValueMap<>(); + multivaluedMap.forEach(multiValueMap::put); + } + return multiValueMap; + } + + public static MultivaluedMap toMultivaluedMap(Map> multiValueMap) { + MultivaluedMap multivaluedMap = null; + if (multiValueMap != null) { + multivaluedMap = new MultivaluedMapImpl<>(); + multiValueMap.forEach(multivaluedMap::put); + } + return multivaluedMap; + } +} diff --git a/web-services/query/src/main/java/datawave/webservice/query/util/NextContentCriteria.java b/web-services/query/src/main/java/datawave/webservice/query/util/NextContentCriteria.java index 1166bf62e2c..086aca6022a 100644 --- a/web-services/query/src/main/java/datawave/webservice/query/util/NextContentCriteria.java +++ b/web-services/query/src/main/java/datawave/webservice/query/util/NextContentCriteria.java @@ -1,8 +1,8 @@ package datawave.webservice.query.util; -import javax.ws.rs.core.MultivaluedMap; +import org.springframework.util.MultiValueMap; -import datawave.webservice.query.Query; +import datawave.microservice.query.Query; /** * Lookup criteria for paging through content results @@ -10,7 +10,7 @@ public class NextContentCriteria extends AbstractUUIDLookupCriteria { private final String queryId; - public NextContentCriteria(final String queryId, MultivaluedMap queryParameters) { + public NextContentCriteria(final String queryId, MultiValueMap queryParameters) { super(queryParameters); this.queryId = queryId; } diff --git a/web-services/query/src/main/java/datawave/webservice/query/util/PostUUIDCriteria.java b/web-services/query/src/main/java/datawave/webservice/query/util/PostUUIDCriteria.java index 5e369481a7f..32e3e1ea0d2 100644 --- a/web-services/query/src/main/java/datawave/webservice/query/util/PostUUIDCriteria.java +++ b/web-services/query/src/main/java/datawave/webservice/query/util/PostUUIDCriteria.java @@ -1,6 +1,6 @@ package datawave.webservice.query.util; -import javax.ws.rs.core.MultivaluedMap; +import org.springframework.util.MultiValueMap; /** * Lookup criteria for one or more UUIDs @@ -8,7 +8,7 @@ public class PostUUIDCriteria extends AbstractUUIDLookupCriteria { private final String uuidPairs; - public PostUUIDCriteria(final String uuidPairs, MultivaluedMap queryParameters) { + public PostUUIDCriteria(final String uuidPairs, MultiValueMap queryParameters) { super(queryParameters); this.uuidPairs = uuidPairs; } diff --git a/web-services/query/src/main/java/datawave/webservice/query/util/UIDQueryCriteria.java b/web-services/query/src/main/java/datawave/webservice/query/util/UIDQueryCriteria.java index e5109f734db..d2ee65cc689 100644 --- a/web-services/query/src/main/java/datawave/webservice/query/util/UIDQueryCriteria.java +++ b/web-services/query/src/main/java/datawave/webservice/query/util/UIDQueryCriteria.java @@ -1,12 +1,12 @@ package datawave.webservice.query.util; -import javax.ws.rs.core.MultivaluedMap; +import org.springframework.util.MultiValueMap; /** * Criteria for one and only one UIDQuery-based lookup */ public class UIDQueryCriteria extends GetUUIDCriteria { - public UIDQueryCriteria(final String uuid, final String uuidType, MultivaluedMap queryParameters) { + public UIDQueryCriteria(final String uuid, final String uuidType, MultiValueMap queryParameters) { super(uuid, uuidType, queryParameters); } diff --git a/web-services/query/src/test/java/datawave/webservice/query/cache/CreatedQueryLogicCacheBeanTest.java b/web-services/query/src/test/java/datawave/webservice/query/cache/CreatedQueryLogicCacheBeanTest.java index 0de451eb3ec..cd1963d5655 100644 --- a/web-services/query/src/test/java/datawave/webservice/query/cache/CreatedQueryLogicCacheBeanTest.java +++ b/web-services/query/src/test/java/datawave/webservice/query/cache/CreatedQueryLogicCacheBeanTest.java @@ -17,8 +17,8 @@ import com.google.common.collect.Sets; +import datawave.core.query.logic.QueryLogic; import datawave.webservice.query.cache.CreatedQueryLogicCacheBean.Triple; -import datawave.webservice.query.logic.QueryLogic; /** * diff --git a/web-services/query/src/test/java/datawave/webservice/query/cache/QueryCacheBeanTest.java b/web-services/query/src/test/java/datawave/webservice/query/cache/QueryCacheBeanTest.java index 6d7b4565a7d..bf35cd3316f 100644 --- a/web-services/query/src/test/java/datawave/webservice/query/cache/QueryCacheBeanTest.java +++ b/web-services/query/src/test/java/datawave/webservice/query/cache/QueryCacheBeanTest.java @@ -20,11 +20,11 @@ import org.powermock.api.easymock.annotation.Mock; import org.powermock.modules.junit4.PowerMockRunner; +import datawave.core.common.connection.AccumuloConnectionFactory; +import datawave.core.query.logic.QueryLogic; +import datawave.microservice.authorization.util.AuthorizationsUtil; +import datawave.microservice.query.QueryImpl; import datawave.microservice.querymetric.QueryMetricFactoryImpl; -import datawave.security.util.WSAuthorizationsUtil; -import datawave.webservice.common.connection.AccumuloConnectionFactory; -import datawave.webservice.query.QueryImpl; -import datawave.webservice.query.logic.QueryLogic; import datawave.webservice.query.runner.RunningQuery; @RunWith(PowerMockRunner.class) @@ -126,7 +126,7 @@ public void testGetRunningQueries() throws Exception { expect(logic.isLongRunningQuery()).andReturn(false); expect(logic.getResultLimit(q)).andReturn(-1L); expect(logic.getMaxResults()).andReturn(-1L); - logic.preInitialize(q, WSAuthorizationsUtil.buildAuthorizations(null)); + logic.preInitialize(q, AuthorizationsUtil.buildAuthorizations(null)); expect(logic.getUserOperations()).andReturn(null); PowerMock.replayAll(); diff --git a/web-services/query/src/test/java/datawave/webservice/query/cache/QueryExpirationBeanTest.java b/web-services/query/src/test/java/datawave/webservice/query/cache/QueryExpirationBeanTest.java index 0b47891b7fe..899c7638538 100644 --- a/web-services/query/src/test/java/datawave/webservice/query/cache/QueryExpirationBeanTest.java +++ b/web-services/query/src/test/java/datawave/webservice/query/cache/QueryExpirationBeanTest.java @@ -8,19 +8,16 @@ import org.junit.Assert; import org.junit.BeforeClass; import org.junit.Test; -import org.junit.runner.RunWith; -import org.powermock.api.easymock.PowerMock; -import org.powermock.modules.junit4.PowerMockRunner; -import org.powermock.reflect.Whitebox; +import org.mockito.Mockito; import com.google.common.cache.Cache; +import datawave.core.common.connection.AccumuloConnectionFactory; +import datawave.microservice.query.QueryImpl; +import datawave.microservice.query.config.QueryExpirationProperties; import datawave.microservice.querymetric.QueryMetricFactoryImpl; -import datawave.webservice.common.connection.AccumuloConnectionFactory; -import datawave.webservice.query.QueryImpl; import datawave.webservice.query.runner.RunningQuery; -@RunWith(PowerMockRunner.class) public class QueryExpirationBeanTest { private static CreatedQueryLogicCacheBean qlCache; @@ -32,7 +29,7 @@ public static void setup() throws IllegalArgumentException, IllegalAccessExcepti queryCache = new QueryCache(); queryCache.init(); qlCache = new CreatedQueryLogicCacheBean(); - connFactory = PowerMock.createMock(AccumuloConnectionFactory.class); + connFactory = Mockito.mock(AccumuloConnectionFactory.class); } @Test @@ -51,18 +48,21 @@ public void testRemoveIdleOrExpired() throws Exception { Assert.assertFalse("Query Cache still contains query", queryCache.containsKey(qid)); Assert.assertFalse("Query Logic Cache still contains query logic", qlCache.snapshot().containsKey(qid)); + Cache queryCacheBuild = queryCache.buildCache(); for (int i = 0; i < 5; i++) { RunningQuery runningQuery = createRunningQuery(); String key = runningQuery.getSettings().getId().toString(); - queryCache.put(key, runningQuery); + queryCacheBuild.put(key, runningQuery); qlCache.add(key, key, runningQuery.getLogic(), null); } - int queryCacheSize = Whitebox.getInternalState(queryCache, Cache.class).asMap().size(); + int queryCacheSize = queryCacheBuild.asMap().size(); Assert.assertEquals(5, queryCacheSize); Assert.assertEquals(5, qlCache.snapshot().size()); bean.close(); qlCache.shutdown(); - queryCacheSize = Whitebox.getInternalState(queryCache, Cache.class).asMap().size(); + + queryCacheBuild = queryCache.buildCache(); + queryCacheSize = queryCacheBuild.asMap().size(); Assert.assertEquals("Query Cache is not empty: " + queryCacheSize, 0, queryCacheSize); Assert.assertEquals("Query Logic Cache is not empty: " + qlCache.snapshot().size(), 0, qlCache.snapshot().size()); } @@ -70,14 +70,14 @@ public void testRemoveIdleOrExpired() throws Exception { private QueryExpirationBean createBean(int expireTime) throws IllegalArgumentException, IllegalAccessException { QueryExpirationBean bean = new QueryExpirationBean(); - QueryExpirationConfiguration expirationConfiguration = new QueryExpirationConfiguration(); - setInternalState(expirationConfiguration, "idleTimeMinutes", expireTime); - setInternalState(expirationConfiguration, "callTimeMinutes", expireTime); + QueryExpirationProperties expirationConfiguration = new QueryExpirationProperties(); + expirationConfiguration.setIdleTimeout(expireTime); + expirationConfiguration.setCallTimeout(expireTime); - setInternalState(bean, QueryExpirationConfiguration.class, expirationConfiguration); - setInternalState(bean, QueryCache.class, queryCache); - setInternalState(bean, CreatedQueryLogicCacheBean.class, qlCache); - setInternalState(bean, AccumuloConnectionFactory.class, connFactory); + bean.conf = expirationConfiguration; + bean.cache = queryCache; + bean.qlCache = qlCache; + bean.connectionFactory = connFactory; return bean; } diff --git a/web-services/query/src/test/java/datawave/webservice/query/cache/RunningQueryTimingImplTest.java b/web-services/query/src/test/java/datawave/webservice/query/cache/RunningQueryTimingImplTest.java index 6de7b91f018..fc42b757d02 100644 --- a/web-services/query/src/test/java/datawave/webservice/query/cache/RunningQueryTimingImplTest.java +++ b/web-services/query/src/test/java/datawave/webservice/query/cache/RunningQueryTimingImplTest.java @@ -8,6 +8,8 @@ import org.junit.Before; import org.junit.Test; +import datawave.microservice.query.config.QueryExpirationProperties; + /** * */ @@ -26,8 +28,8 @@ public void setUp() throws Exception {} public void tearDown() throws Exception {} @Test - public void testQueryExpirationConfigurationDefaults() { - QueryExpirationConfiguration conf = new QueryExpirationConfiguration(); + public void testQueryExpirationPropertiesDefaults() { + QueryExpirationProperties conf = new QueryExpirationProperties(); RunningQueryTimingImpl timing = new RunningQueryTimingImpl(conf, -1); assertEquals(60 * 60 * 1000, timing.getMaxCallMs()); @@ -36,11 +38,11 @@ public void testQueryExpirationConfigurationDefaults() { } @Test - public void testQueryExpirationConfiguration() { - QueryExpirationConfiguration conf = new QueryExpirationConfiguration(); - conf.setCallTime(10); - conf.setPageShortCircuitTimeout(9); - conf.setPageSizeShortCircuitCheckTime(5); + public void testQueryExpirationProperties() { + QueryExpirationProperties conf = new QueryExpirationProperties(); + conf.setCallTimeout(10); + conf.setShortCircuitTimeout(9); + conf.setShortCircuitCheckTime(5); RunningQueryTimingImpl timing = new RunningQueryTimingImpl(conf, -1); assertEquals(10 * 60 * 1000, timing.getMaxCallMs()); @@ -49,8 +51,8 @@ public void testQueryExpirationConfiguration() { } @Test - public void testQueryExpirationConfigurationWithTimeout() { - QueryExpirationConfiguration conf = new QueryExpirationConfiguration(); + public void testQueryExpirationPropertiesWithTimeout() { + QueryExpirationProperties conf = new QueryExpirationProperties(); RunningQueryTimingImpl timing = new RunningQueryTimingImpl(conf, 20); assertEquals(20 * 60 * 1000, timing.getMaxCallMs()); diff --git a/web-services/query/src/test/java/datawave/webservice/query/cache/TestQueryLogic.java b/web-services/query/src/test/java/datawave/webservice/query/cache/TestQueryLogic.java index 2accc32ff35..15c72cbac69 100644 --- a/web-services/query/src/test/java/datawave/webservice/query/cache/TestQueryLogic.java +++ b/web-services/query/src/test/java/datawave/webservice/query/cache/TestQueryLogic.java @@ -5,11 +5,11 @@ import org.apache.accumulo.core.client.AccumuloClient; import org.apache.accumulo.core.security.Authorizations; -import datawave.webservice.common.connection.AccumuloConnectionFactory.Priority; -import datawave.webservice.query.Query; -import datawave.webservice.query.configuration.GenericQueryConfiguration; -import datawave.webservice.query.logic.BaseQueryLogic; -import datawave.webservice.query.logic.QueryLogicTransformer; +import datawave.core.common.connection.AccumuloConnectionFactory; +import datawave.core.query.configuration.GenericQueryConfiguration; +import datawave.core.query.logic.BaseQueryLogic; +import datawave.core.query.logic.QueryLogicTransformer; +import datawave.microservice.query.Query; public class TestQueryLogic extends BaseQueryLogic { @@ -28,8 +28,8 @@ public String getPlan(AccumuloClient client, Query settings, Set } @Override - public Priority getConnectionPriority() { - return Priority.NORMAL; + public AccumuloConnectionFactory.Priority getConnectionPriority() { + return AccumuloConnectionFactory.Priority.NORMAL; } @Override diff --git a/web-services/query/src/test/java/datawave/webservice/query/configuration/GenericQueryConfigurationMockTest.java b/web-services/query/src/test/java/datawave/webservice/query/configuration/GenericQueryConfigurationMockTest.java index 528e6c38a60..e6e46656383 100644 --- a/web-services/query/src/test/java/datawave/webservice/query/configuration/GenericQueryConfigurationMockTest.java +++ b/web-services/query/src/test/java/datawave/webservice/query/configuration/GenericQueryConfigurationMockTest.java @@ -19,7 +19,9 @@ import org.powermock.api.easymock.annotation.Mock; import org.powermock.modules.junit4.PowerMockRunner; -import datawave.webservice.query.logic.BaseQueryLogic; +import datawave.core.query.configuration.GenericQueryConfiguration; +import datawave.core.query.configuration.QueryData; +import datawave.core.query.logic.BaseQueryLogic; @RunWith(PowerMockRunner.class) public class GenericQueryConfigurationMockTest { @@ -40,8 +42,8 @@ public class GenericQueryConfigurationMockTest { public void setup() { this.config = new GenericQueryConfiguration() { @Override - public Iterator getQueries() { - return super.getQueries(); + public Iterator getQueriesIter() { + return super.getQueriesIter(); } }; } @@ -68,6 +70,8 @@ public void testConstructor_WithConfiguredLogic() { @Test public void testCanRunQuery_HappyPath() { + expect(this.authorizations.getAuthorizations()).andReturn(Collections.emptyList()); + // Run the test PowerMock.replayAll(); GenericQueryConfiguration subject = new GenericQueryConfiguration() {}; diff --git a/web-services/query/src/test/java/datawave/webservice/query/configuration/GenericQueryConfigurationTest.java b/web-services/query/src/test/java/datawave/webservice/query/configuration/GenericQueryConfigurationTest.java new file mode 100644 index 00000000000..1e9afaaa5e3 --- /dev/null +++ b/web-services/query/src/test/java/datawave/webservice/query/configuration/GenericQueryConfigurationTest.java @@ -0,0 +1,42 @@ +package datawave.webservice.query.configuration; + +import static org.junit.Assert.assertEquals; + +import java.util.Collections; +import java.util.Date; + +import org.apache.accumulo.core.security.Authorizations; +import org.junit.Test; + +import datawave.core.query.configuration.GenericQueryConfiguration; +import datawave.util.TableName; + +public class GenericQueryConfigurationTest { + + @Test + public void testCopyConstructor() { + GenericQueryConfiguration config = new GenericQueryConfiguration(); + config.setAuthorizations(Collections.singleton(new Authorizations("AUTH1,AUTH2"))); + config.setQueryString("FOO == 'bar'"); + config.setBeginDate(new Date(0)); + config.setEndDate(new Date(System.currentTimeMillis())); + config.setMaxWork(Long.MAX_VALUE); + config.setBaseIteratorPriority(17); + config.setTableName(TableName.SHARD_INDEX); // non-default value + // skip query data iterator, empty iterator doesn't matter + config.setBypassAccumulo(true); + config.setAccumuloPassword("env:PASS"); + + GenericQueryConfiguration copy = new GenericQueryConfiguration(config); + assertEquals(config.getAuthorizations(), copy.getAuthorizations()); + assertEquals(config.getQueryString(), copy.getQueryString()); + assertEquals(config.getBeginDate(), copy.getBeginDate()); + assertEquals(config.getEndDate(), copy.getEndDate()); + assertEquals(config.getMaxWork(), copy.getMaxWork()); + assertEquals(config.getBaseIteratorPriority(), copy.getBaseIteratorPriority()); + assertEquals(config.getTableName(), copy.getTableName()); + assertEquals(config.getBypassAccumulo(), copy.getBypassAccumulo()); + assertEquals(config.getAccumuloPassword(), copy.getAccumuloPassword()); + } + +} diff --git a/web-services/query/src/test/java/datawave/webservice/query/configuration/QueryDataTest.java b/web-services/query/src/test/java/datawave/webservice/query/configuration/QueryDataTest.java index af565eff0fa..8e42387ebf2 100644 --- a/web-services/query/src/test/java/datawave/webservice/query/configuration/QueryDataTest.java +++ b/web-services/query/src/test/java/datawave/webservice/query/configuration/QueryDataTest.java @@ -1,167 +1,104 @@ package datawave.webservice.query.configuration; -import static org.easymock.EasyMock.expect; import static org.junit.Assert.assertEquals; -import static org.junit.Assert.assertNotNull; -import static org.junit.Assert.assertTrue; -import java.util.Arrays; +import java.util.ArrayList; import java.util.Collection; import java.util.Collections; import java.util.List; import org.apache.accumulo.core.client.IteratorSetting; +import org.apache.accumulo.core.data.Key; import org.apache.accumulo.core.data.Range; -import org.junit.Assert; import org.junit.Test; -import org.junit.runner.RunWith; -import org.powermock.api.easymock.PowerMock; -import org.powermock.api.easymock.annotation.Mock; -import org.powermock.modules.junit4.PowerMockRunner; -import com.google.common.collect.Lists; +import datawave.core.query.configuration.QueryData; -/** - * - */ -@RunWith(PowerMockRunner.class) public class QueryDataTest { - @Mock - QueryData copy; - - @Mock - IteratorSetting setting; - - @Mock - Range range; @Test public void testCopyConstructor() { - // Set expectations - expect(this.copy.getQuery()).andReturn("TEST"); - expect(this.copy.getRanges()).andReturn(Arrays.asList(this.range)); - expect(this.copy.getSettings()).andReturn(Arrays.asList(this.setting)); - - // Run the test - PowerMock.replayAll(); - QueryData subject = new QueryData(this.copy); - String result1 = subject.getQuery(); - Collection result2 = subject.getRanges(); - subject.addIterator(this.setting); - Collection result3 = subject.getSettings(); - String result4 = subject.toString(); - PowerMock.verifyAll(); - - // Verify results - assertNotNull("Query should not be null", result1); - assertNotNull("Ranges should not be null", result2); - assertTrue("Ranges should not be empty", !result2.isEmpty()); - assertNotNull("Settings should not be null", result3); - assertTrue("Settings should not be empty", !result3.isEmpty()); - assertEquals("Settings should have a size of 2", 2, result3.size()); - assertNotNull("toString should not be null", result4); + String tableName = "SHARD"; + String query = "FOO == 'bar'"; + Collection ranges = Collections.singleton(new Range(new Key("row"), true, new Key("row\0"), false)); + Collection columnFamilies = Collections.singleton("FOO"); + List settings = new ArrayList<>(); + settings.add(new IteratorSetting(20, "iterator", "QueryIterator.class")); + + QueryData original = new QueryData(tableName, query, ranges, columnFamilies, settings); + QueryData copy = new QueryData(original); + + assertEquals(original.getTableName(), copy.getTableName()); + assertEquals(original.getQuery(), copy.getQuery()); + assertEquals(original.getRanges(), copy.getRanges()); + assertEquals(original.getColumnFamilies(), copy.getColumnFamilies()); + assertEquals(original.getSettings(), copy.getSettings()); } @Test public void testCorrectReuse() { - List queries = Lists.newArrayList(); - - QueryData query1 = new QueryData(); - query1.setQuery("FOO == 'bar'"); - query1.setSettings(Lists.newArrayList(new IteratorSetting(20, "iter1", "iter1.class"))); - query1.setRanges(Collections.singleton(Range.prefix("1"))); - - queries.add(query1); - queries.add(new QueryData(query1, Collections.singleton(Range.prefix("2")))); - queries.add(new QueryData(query1, Collections.singleton(Range.prefix("3")))); - - Integer count = 1; - List prevSettings = null; - String prevQuery = null; + String query = "FOO == 'bar'"; + List settings = new ArrayList<>(); + settings.add(new IteratorSetting(20, "iter1", "iter1.class")); + + QueryData original = new QueryData(); + original.setQuery(query); + original.setSettings(settings); + original.setColumnFamilies(Collections.emptySet()); + original.setRanges(Collections.singleton(Range.prefix("1"))); + + List queries = new ArrayList<>(); + queries.add(original); + queries.add(new QueryData(original).withRanges(Collections.singleton(Range.prefix("2")))); + queries.add(new QueryData(original).withRanges(Collections.singleton(Range.prefix("3")))); + + int count = 1; for (QueryData qd : queries) { - if (null == prevSettings) { - prevSettings = qd.getSettings(); - } else { - Assert.assertEquals(prevSettings, qd.getSettings()); - } - - if (null == prevQuery) { - prevQuery = qd.getQuery(); - } else { - Assert.assertEquals(prevQuery, qd.getQuery()); - } - - Assert.assertEquals(1, qd.getRanges().size()); - - Range r = qd.getRanges().iterator().next(); - - Assert.assertEquals(count.toString(), r.getStartKey().getRow().toString()); - + assertEquals(query, qd.getQuery()); + assertRange(Integer.toString(count), qd.getRanges()); + assertEquals(0, qd.getColumnFamilies().size()); + assertEquals(settings, qd.getSettings()); count++; } } @Test public void testCorrectDownstreamReuse() { - List queries = Lists.newArrayList(); + String query = "FOO == 'bar'"; + List settings = new ArrayList<>(); + settings.add(new IteratorSetting(20, "iter1", "iter1.class")); - QueryData query1 = new QueryData(); - query1.setQuery("FOO == 'bar'"); - query1.setSettings(Lists.newArrayList(new IteratorSetting(20, "iter1", "iter1.class"))); - query1.setRanges(Collections.singleton(Range.prefix("1"))); + QueryData original = new QueryData(); + original.setQuery(query); + original.setSettings(settings); + original.setRanges(Collections.singleton(Range.prefix("1"))); - queries.add(query1); - queries.add(new QueryData(query1, Collections.singleton(Range.prefix("2")))); - queries.add(new QueryData(query1, Collections.singleton(Range.prefix("3")))); + List queries = new ArrayList<>(); + queries.add(original); + queries.add(new QueryData(original).withRanges(Collections.singleton(Range.prefix("2")))); + queries.add(new QueryData(original).withRanges(Collections.singleton(Range.prefix("3")))); for (QueryData qd : queries) { qd.getSettings().add(new IteratorSetting(21, "iter2", "iter2.class")); } - Integer count = 1; - List prevSettings = null; - String prevQuery = null; - for (QueryData qd : queries) { - if (null == prevSettings) { - prevSettings = qd.getSettings(); - } else { - Assert.assertTrue(equals(prevSettings, qd.getSettings())); - } - - if (null == prevQuery) { - prevQuery = qd.getQuery(); - } else { - Assert.assertEquals(prevQuery, qd.getQuery()); - } - - Assert.assertEquals(1, qd.getRanges().size()); - - Range r = qd.getRanges().iterator().next(); - - Assert.assertEquals(count.toString(), r.getStartKey().getRow().toString()); + List expectedSettings = new ArrayList<>(); + expectedSettings.add(new IteratorSetting(20, "iter1", "iter1.class")); + expectedSettings.add(new IteratorSetting(21, "iter2", "iter2.class")); + int count = 1; + for (QueryData qd : queries) { + assertEquals(query, qd.getQuery()); + assertRange(Integer.toString(count), qd.getRanges()); + assertEquals(0, qd.getColumnFamilies().size()); + assertEquals(expectedSettings, qd.getSettings()); count++; } } - protected boolean equals(List settings1, List settings2) { - if ((null == settings1 && null != settings2) || (null != settings1 && null == settings2)) { - return false; - } - - if (settings1.size() != settings2.size()) { - return false; - } - - for (int i = 0; i < settings1.size(); i++) { - IteratorSetting s1 = settings1.get(i), s2 = settings2.get(i); - if (!(s1.getIteratorClass().equals(s2.getIteratorClass()) && s1.getName().equals(s2.getName()) && s1.getPriority() == s2.getPriority() - && s1.getOptions().equals(s2.getOptions()))) { - return false; - } - } - - return true; + private void assertRange(String expectedRow, Collection ranges) { + assertEquals(1, ranges.size()); + Range range = ranges.iterator().next(); + assertEquals(expectedRow, range.getStartKey().getRow().toString()); } - } diff --git a/web-services/query/src/test/java/datawave/webservice/query/configuration/TestBaseQueryLogic.java b/web-services/query/src/test/java/datawave/webservice/query/configuration/TestBaseQueryLogic.java index 0e550f02715..3ca745aec3d 100644 --- a/web-services/query/src/test/java/datawave/webservice/query/configuration/TestBaseQueryLogic.java +++ b/web-services/query/src/test/java/datawave/webservice/query/configuration/TestBaseQueryLogic.java @@ -24,14 +24,15 @@ import com.google.common.collect.Sets; +import datawave.core.common.connection.AccumuloConnectionFactory.Priority; +import datawave.core.query.configuration.GenericQueryConfiguration; +import datawave.core.query.logic.BaseQueryLogic; +import datawave.core.query.logic.QueryLogicTransformer; +import datawave.microservice.query.Query; +import datawave.microservice.query.QueryImpl; +import datawave.security.authorization.DatawavePrincipal; +import datawave.security.authorization.ProxiedUserDetails; import datawave.webservice.common.audit.Auditor; -import datawave.webservice.common.connection.AccumuloConnectionFactory.Priority; -import datawave.webservice.query.Query; -import datawave.webservice.query.QueryImpl; -import datawave.webservice.query.logic.BaseQueryLogic; -import datawave.webservice.query.logic.EasyRoleManager; -import datawave.webservice.query.logic.QueryLogicTransformer; -import datawave.webservice.query.logic.RoleManager; @RunWith(PowerMockRunner.class) public class TestBaseQueryLogic { @@ -42,6 +43,9 @@ public class TestBaseQueryLogic { @Mock Query query; + @Mock + GenericQueryConfiguration config; + @Test public void testConstructor_Copy() throws Exception { // Set expectations @@ -50,21 +54,37 @@ public void testConstructor_Copy() throws Exception { expect(this.copy.getLogicName()).andReturn("logicName"); expect(this.copy.getLogicDescription()).andReturn("logicDescription"); expect(this.copy.getAuditType(null)).andReturn(Auditor.AuditType.ACTIVE); - expect(this.copy.getTableName()).andReturn("tableName"); - expect(this.copy.getMaxResults()).andReturn(Long.MAX_VALUE); - expect(this.copy.getMaxWork()).andReturn(10L); expect(this.copy.getMaxPageSize()).andReturn(25); expect(this.copy.getPageByteTrigger()).andReturn(1024L); expect(this.copy.getCollectQueryMetrics()).andReturn(false); - expect(this.copy.getConnPoolName()).andReturn("connPool1"); - expect(this.copy.getBaseIteratorPriority()).andReturn(100); - expect(this.copy.getPrincipal()).andReturn(null); - RoleManager roleManager = new EasyRoleManager(); - expect(this.copy.getRoleManager()).andReturn(roleManager); + expect(this.copy.getRequiredRoles()).andReturn(null); expect(this.copy.getSelectorExtractor()).andReturn(null); - expect(this.copy.getBypassAccumulo()).andReturn(false); - expect(this.copy.getAccumuloPassword()).andReturn(""); + expect(this.copy.getCurrentUser()).andReturn(null); + expect(this.copy.getServerUser()).andReturn(null); expect(this.copy.getResponseEnricherBuilder()).andReturn(null); + ProxiedUserDetails principal = new DatawavePrincipal(); + expect(this.copy.getCurrentUser()).andReturn(principal).anyTimes(); + + // setup expectations for GenericQueryConfig + expect(config.getQuery()).andReturn(new QueryImpl()); + expect(config.isCheckpointable()).andReturn(false); + expect(config.getAuthorizations()).andReturn(null).anyTimes(); + expect(config.getQueryString()).andReturn("FOO == 'bar'").anyTimes(); + expect(config.getBeginDate()).andReturn(null).anyTimes(); + expect(config.getEndDate()).andReturn(null).anyTimes(); + expect(config.getMaxWork()).andReturn(1L).anyTimes(); + expect(config.getBaseIteratorPriority()).andReturn(100).anyTimes(); + expect(config.getTableName()).andReturn("tableName").anyTimes(); + expect(config.getBypassAccumulo()).andReturn(false).anyTimes(); + expect(config.getAccumuloPassword()).andReturn("env:PASS").anyTimes(); + expect(config.isReduceResults()).andReturn(false).anyTimes(); + expect(config.getClient()).andReturn(null).anyTimes(); + expect(config.getQueries()).andReturn(Collections.emptyList()).anyTimes(); + expect(config.getQueriesIter()).andReturn(Collections.emptyIterator()).anyTimes(); + expect(config.getTableConsistencyLevels()).andReturn(Collections.emptyMap()).anyTimes(); + expect(config.getTableHints()).andReturn(Collections.emptyMap()).anyTimes(); + expect(config.getConnPoolName()).andReturn("connPool1"); + expect(this.copy.getConfig()).andReturn(config).anyTimes(); // Run the test PowerMock.replayAll(); diff --git a/web-services/query/src/test/java/datawave/webservice/query/interceptor/QueryMetricsEnrichmentInterceptorTest.java b/web-services/query/src/test/java/datawave/webservice/query/interceptor/QueryMetricsEnrichmentInterceptorTest.java index dcde8af0c33..1f8646b757c 100644 --- a/web-services/query/src/test/java/datawave/webservice/query/interceptor/QueryMetricsEnrichmentInterceptorTest.java +++ b/web-services/query/src/test/java/datawave/webservice/query/interceptor/QueryMetricsEnrichmentInterceptorTest.java @@ -45,13 +45,13 @@ import com.google.common.io.CountingOutputStream; +import datawave.core.query.logic.BaseQueryLogic; import datawave.microservice.querymetric.BaseQueryMetric.PageMetric; import datawave.microservice.querymetric.QueryMetric; import datawave.security.util.DnUtils; import datawave.webservice.query.annotation.EnrichQueryMetrics; import datawave.webservice.query.cache.QueryCache; import datawave.webservice.query.interceptor.QueryMetricsEnrichmentInterceptor.QueryCall; -import datawave.webservice.query.logic.BaseQueryLogic; import datawave.webservice.query.metric.QueryMetricsBean; import datawave.webservice.query.runner.RunningQuery; import datawave.webservice.result.BaseQueryResponse; diff --git a/web-services/query/src/test/java/datawave/webservice/query/logic/BaseQueryLogicTest.java b/web-services/query/src/test/java/datawave/webservice/query/logic/BaseQueryLogicTest.java new file mode 100644 index 00000000000..27b0a83d7ef --- /dev/null +++ b/web-services/query/src/test/java/datawave/webservice/query/logic/BaseQueryLogicTest.java @@ -0,0 +1,101 @@ +package datawave.webservice.query.logic; + +import static org.junit.Assert.assertEquals; + +import java.util.Collections; +import java.util.Set; + +import org.apache.accumulo.core.client.AccumuloClient; +import org.apache.accumulo.core.security.Authorizations; +import org.junit.Test; + +import datawave.core.common.connection.AccumuloConnectionFactory; +import datawave.core.query.configuration.GenericQueryConfiguration; +import datawave.core.query.logic.BaseQueryLogic; +import datawave.core.query.logic.QueryLogicTransformer; +import datawave.microservice.query.Query; +import datawave.security.authorization.DatawavePrincipal; +import datawave.webservice.common.audit.Auditor; + +public class BaseQueryLogicTest { + + @Test + public void testCopyConstructor() { + BaseQueryLogicImpl original = new BaseQueryLogicImpl(); + original.setLogicName("BaseQueryLogicImpl"); + original.setLogicDescription("Implementation of a BaseQueryLogic"); + original.setAuditType(Auditor.AuditType.PASSIVE); + original.setDnResultLimits(Collections.singletonMap("dn=user", 100L)); + original.setSystemFromResultLimits(Collections.singletonMap("SYSTEM", 100L)); + original.setMaxResults(1000L); + original.setMaxPageSize(100); + original.setPageByteTrigger(123456L); + original.setCollectQueryMetrics(false); + original.setAuthorizedDNs(Collections.singleton("dn=authorized1")); + original.setCurrentUser(new DatawavePrincipal("user")); + + BaseQueryLogicImpl copy = new BaseQueryLogicImpl(original); + assertEquals(original.getLogicName(), copy.getLogicName()); + assertEquals(original.getLogicDescription(), copy.getLogicDescription()); + assertEquals(original.getAuditType(), copy.getAuditType()); + assertEquals(original.getDnResultLimits(), copy.getDnResultLimits()); + assertEquals(original.getSystemFromResultLimits(), copy.getSystemFromResultLimits()); + assertEquals(original.getMaxResults(), copy.getMaxResults()); + assertEquals(original.getMaxPageSize(), copy.getMaxPageSize()); + assertEquals(original.getPageByteTrigger(), copy.getPageByteTrigger()); + assertEquals(original.getCollectQueryMetrics(), copy.getCollectQueryMetrics()); + assertEquals(original.getAuthorizedDNs(), copy.getAuthorizedDNs()); + assertEquals(original.getCurrentUser(), copy.getCurrentUser()); + } + + class BaseQueryLogicImpl extends BaseQueryLogic { + + public BaseQueryLogicImpl() { + super(); + } + + public BaseQueryLogicImpl(BaseQueryLogicImpl other) { + super(other); + } + + @Override + public GenericQueryConfiguration initialize(AccumuloClient client, Query settings, Set runtimeQueryAuthorizations) throws Exception { + return null; + } + + @Override + public void setupQuery(GenericQueryConfiguration configuration) throws Exception { + + } + + @Override + public Object clone() throws CloneNotSupportedException { + return null; + } + + @Override + public AccumuloConnectionFactory.Priority getConnectionPriority() { + return null; + } + + @Override + public QueryLogicTransformer getTransformer(Query settings) { + return null; + } + + @Override + public Set getOptionalQueryParameters() { + return null; + } + + @Override + public Set getRequiredQueryParameters() { + return null; + } + + @Override + public Set getExampleQueries() { + return null; + } + } +} diff --git a/web-services/query/src/test/java/datawave/webservice/query/logic/ConfiguredQueryLogicFactoryBeanTest.java b/web-services/query/src/test/java/datawave/webservice/query/logic/ConfiguredQueryLogicFactoryBeanTest.java index 1215653bfeb..f50d1276a7a 100644 --- a/web-services/query/src/test/java/datawave/webservice/query/logic/ConfiguredQueryLogicFactoryBeanTest.java +++ b/web-services/query/src/test/java/datawave/webservice/query/logic/ConfiguredQueryLogicFactoryBeanTest.java @@ -11,6 +11,7 @@ import java.util.HashMap; import java.util.List; import java.util.Map; +import java.util.TreeMap; import javax.ejb.EJBContext; @@ -28,6 +29,8 @@ import org.springframework.beans.factory.xml.XmlBeanDefinitionReader; import org.springframework.context.support.ClassPathXmlApplicationContext; +import datawave.core.query.logic.BaseQueryLogic; +import datawave.core.query.logic.QueryLogic; import datawave.security.authorization.DatawavePrincipal; import datawave.security.authorization.DatawaveUser; import datawave.security.authorization.DatawaveUser.UserType; @@ -102,8 +105,10 @@ public void testGetQueryLogic_HasRequiredRoles() throws Exception { QueryLogicFactoryConfiguration qlfc = new QueryLogicFactoryConfiguration(); qlfc.setMaxPageSize(25); qlfc.setPageByteTrigger(1024L); - this.logic.setPrincipal(altPrincipal); + this.logic.setServerUser(altPrincipal); this.logic.setLogicName(queryName); + expect(altPrincipal.getPrimaryUser()).andReturn( + new DatawaveUser(SubjectIssuerDNPair.of("CN=Poe Edgar Allan eapoe, OU=acme", ""), UserType.USER, null, null, null, 0L)); expect(this.logic.getMaxPageSize()).andReturn(25); expect(this.logic.getPageByteTrigger()).andReturn(1024L); expect(this.applicationContext.getBean(mappedQueryName)).andReturn(this.logic); @@ -134,8 +139,10 @@ public void testGetQueryLogic_propertyOverride() throws Exception { Map> rolesMap = new HashMap<>(); rolesMap.put(queryName, roles); - this.logic.setPrincipal(altPrincipal); + this.logic.setServerUser(altPrincipal); this.logic.setLogicName(queryName); + expect(altPrincipal.getPrimaryUser()).andReturn( + new DatawaveUser(SubjectIssuerDNPair.of("CN=Poe Edgar Allan eapoe, OU=acme", ""), UserType.USER, null, null, null, 0L)); expect(this.logic.getMaxPageSize()).andReturn(0); expect(this.logic.getPageByteTrigger()).andReturn(0L); this.logic.setMaxPageSize(25); diff --git a/web-services/query/src/test/java/datawave/webservice/query/logic/DatawaveRoleManagerTest.java b/web-services/query/src/test/java/datawave/webservice/query/logic/DatawaveRoleManagerTest.java deleted file mode 100644 index 4f35d637577..00000000000 --- a/web-services/query/src/test/java/datawave/webservice/query/logic/DatawaveRoleManagerTest.java +++ /dev/null @@ -1,139 +0,0 @@ -package datawave.webservice.query.logic; - -import java.security.Principal; -import java.util.Collection; -import java.util.HashSet; -import java.util.Set; - -import org.junit.Assert; -import org.junit.Before; -import org.junit.Test; - -import com.google.common.collect.Lists; - -import datawave.security.authorization.DatawavePrincipal; -import datawave.security.authorization.DatawaveUser; -import datawave.security.authorization.DatawaveUser.UserType; -import datawave.security.authorization.SubjectIssuerDNPair; -import datawave.security.util.DnUtils; - -public class DatawaveRoleManagerTest { - - private DatawaveRoleManager drm; - private DatawavePrincipal datawavePrincipal; - private Principal p; - - @Before - public void beforeEachTest() { - System.setProperty(DnUtils.NPE_OU_PROPERTY, "iamnotaperson"); - System.setProperty("dw.metadatahelper.all.auths", "A,B,C,D"); - createAndSetWithSingleRole(); - } - - private void createAndSetWithSingleRole() { - - String dn = "dn1"; - String issuerDN = "idn"; - SubjectIssuerDNPair combinedDN = SubjectIssuerDNPair.of(dn, issuerDN); - Collection roles = Lists.newArrayList("REQ_ROLE_1"); - - DatawaveUser user = new DatawaveUser(combinedDN, UserType.USER, null, roles, null, System.currentTimeMillis()); - datawavePrincipal = new DatawavePrincipal(Lists.newArrayList(user)); - } - - private void createAndSetWithTwoRoles() { - - String dn = "dn1"; - String issuerDN = "idn"; - SubjectIssuerDNPair combinedDn1 = SubjectIssuerDNPair.of(dn, issuerDN); - String combinedDN = dn + "<" + issuerDN + ">"; - String dn2 = "dn2"; - String combinedDN2 = dn2 + "<" + issuerDN + ">"; - SubjectIssuerDNPair combinedDn2 = SubjectIssuerDNPair.of(dn2, issuerDN); - - DatawaveUser u1 = new DatawaveUser(combinedDn1, UserType.USER, null, getFirstRole(), null, System.currentTimeMillis()); - DatawaveUser u2 = new DatawaveUser(combinedDn2, UserType.SERVER, null, getSecondRole(), null, System.currentTimeMillis()); - - datawavePrincipal = new DatawavePrincipal(Lists.newArrayList(u1, u2)); - } - - public Set getFirstRole() { - Set datawaveRoles = new HashSet<>(); - datawaveRoles.add("REQ_ROLE_1"); - return datawaveRoles; - } - - public Set getSecondRole() { - Set datawaveRoles = new HashSet<>(); - datawaveRoles.add("REQ_ROLE_2"); - return datawaveRoles; - } - - public Set getAllRoles() { - Set datawaveRoles = new HashSet<>(); - datawaveRoles.add("REQ_ROLE_1"); - datawaveRoles.add("REQ_ROLE_2"); - return datawaveRoles; - } - - @Test - public void testEmptyConstructor() { - - drm = new DatawaveRoleManager(); - - Set gottenRoles = drm.getRequiredRoles(); - Assert.assertNull(gottenRoles); - - drm.setRequiredRoles(getFirstRole()); - gottenRoles = drm.getRequiredRoles(); - - Assert.assertTrue(gottenRoles.contains("REQ_ROLE_1")); - Assert.assertFalse(gottenRoles.contains("REQ_ROLE_2")); - } - - @Test - public void testBasicsLoadedConstructor() { - - drm = new DatawaveRoleManager(getFirstRole()); - - Set gottenRoles = drm.getRequiredRoles(); - Assert.assertTrue(gottenRoles.contains("REQ_ROLE_1")); - Assert.assertFalse(gottenRoles.contains("REQ_ROLE_2")); - } - - @Test - public void testCanRunQuery() { - - drm = new DatawaveRoleManager(getFirstRole()); - - // Expect false when passing in a null Principal object - boolean canRun = drm.canRunQuery(null, null); - Assert.assertFalse(canRun); - - // Modify the principal and set the required roles to null - p = datawavePrincipal; - Assert.assertNotEquals(null, p); - drm.setRequiredRoles(null); - - // This test should pass when setting requiredRoles to null - canRun = drm.canRunQuery(null, p); - Assert.assertTrue(canRun); - - // Now set up a test that requires roles to run - drm.setRequiredRoles(getFirstRole()); - canRun = drm.canRunQuery(null, p); - Assert.assertTrue(canRun); - - // Now add a second required role check - drm.setRequiredRoles(getAllRoles()); - canRun = drm.canRunQuery(null, p); - Assert.assertFalse(canRun); - - // Recreate the principal with two roles and check - createAndSetWithTwoRoles(); - p = datawavePrincipal; - drm.setRequiredRoles(getFirstRole()); - canRun = drm.canRunQuery(null, p); - Assert.assertTrue(canRun); - } -} diff --git a/web-services/query/src/test/java/datawave/webservice/query/logic/QueryLogicFactoryBeanTest.java b/web-services/query/src/test/java/datawave/webservice/query/logic/QueryLogicFactoryBeanTest.java index 06867401594..757523b6ab3 100644 --- a/web-services/query/src/test/java/datawave/webservice/query/logic/QueryLogicFactoryBeanTest.java +++ b/web-services/query/src/test/java/datawave/webservice/query/logic/QueryLogicFactoryBeanTest.java @@ -30,6 +30,8 @@ import org.springframework.beans.factory.xml.XmlBeanDefinitionReader; import org.springframework.context.support.ClassPathXmlApplicationContext; +import datawave.core.query.logic.BaseQueryLogic; +import datawave.core.query.logic.QueryLogic; import datawave.security.authorization.DatawavePrincipal; import datawave.security.authorization.DatawaveUser; import datawave.security.authorization.DatawaveUser.UserType; @@ -103,8 +105,10 @@ public void testGetQueryLogic_HasRequiredRoles() throws Exception { QueryLogicFactoryConfiguration qlfc = new QueryLogicFactoryConfiguration(); qlfc.setMaxPageSize(25); qlfc.setPageByteTrigger(1024L); - this.logic.setPrincipal(altPrincipal); + this.logic.setCurrentUser(altPrincipal); this.logic.setLogicName(queryName); + expect(altPrincipal.getPrimaryUser()).andReturn( + new DatawaveUser(SubjectIssuerDNPair.of("CN=Poe Edgar Allan eapoe, OU=acme", ""), UserType.USER, null, null, null, 0L)); expect(this.logic.getMaxPageSize()).andReturn(25); expect(this.logic.getPageByteTrigger()).andReturn(1024L); expect(this.applicationContext.getBean(queryName)).andReturn(this.logic); @@ -135,8 +139,10 @@ public void testGetQueryLogic_propertyOverride() throws Exception { Map> rolesMap = new HashMap<>(); rolesMap.put(queryName, roles); - this.logic.setPrincipal(altPrincipal); + this.logic.setServerUser(altPrincipal); this.logic.setLogicName(queryName); + expect(altPrincipal.getPrimaryUser()).andReturn( + new DatawaveUser(SubjectIssuerDNPair.of("CN=Poe Edgar Allan eapoe, OU=acme", ""), UserType.USER, null, null, null, 0L)); expect(this.logic.getMaxPageSize()).andReturn(0); expect(this.logic.getPageByteTrigger()).andReturn(0L); this.logic.setMaxPageSize(25); diff --git a/web-services/query/src/test/java/datawave/webservice/query/logic/TestLegacyBaseQueryLogicTransformer.java b/web-services/query/src/test/java/datawave/webservice/query/logic/TestLegacyBaseQueryLogicTransformer.java index 97a648c225a..0a8a5353a79 100644 --- a/web-services/query/src/test/java/datawave/webservice/query/logic/TestLegacyBaseQueryLogicTransformer.java +++ b/web-services/query/src/test/java/datawave/webservice/query/logic/TestLegacyBaseQueryLogicTransformer.java @@ -15,8 +15,9 @@ import org.powermock.api.easymock.annotation.Mock; import org.powermock.modules.junit4.PowerMockRunner; +import datawave.core.query.cache.ResultsPage; +import datawave.core.query.logic.BaseQueryLogicTransformer; import datawave.marking.MarkingFunctions; -import datawave.webservice.query.cache.ResultsPage; import datawave.webservice.query.result.event.EventBase; import datawave.webservice.result.BaseQueryResponse; diff --git a/web-services/query/src/test/java/datawave/webservice/query/logic/TestQueryLogic.java b/web-services/query/src/test/java/datawave/webservice/query/logic/TestQueryLogic.java index 9255f0c30bf..0869d891fa9 100644 --- a/web-services/query/src/test/java/datawave/webservice/query/logic/TestQueryLogic.java +++ b/web-services/query/src/test/java/datawave/webservice/query/logic/TestQueryLogic.java @@ -6,9 +6,11 @@ import org.apache.accumulo.core.security.Authorizations; import org.junit.Ignore; -import datawave.webservice.common.connection.AccumuloConnectionFactory; -import datawave.webservice.query.Query; -import datawave.webservice.query.configuration.GenericQueryConfiguration; +import datawave.core.common.connection.AccumuloConnectionFactory; +import datawave.core.query.configuration.GenericQueryConfiguration; +import datawave.core.query.logic.BaseQueryLogic; +import datawave.core.query.logic.QueryLogicTransformer; +import datawave.microservice.query.Query; @Ignore public class TestQueryLogic extends BaseQueryLogic { diff --git a/web-services/query/src/test/java/datawave/webservice/query/logic/composite/CompositeQueryLogicTest.java b/web-services/query/src/test/java/datawave/webservice/query/logic/composite/CompositeQueryLogicTest.java index 805d4190fe4..e1a773b6dfe 100644 --- a/web-services/query/src/test/java/datawave/webservice/query/logic/composite/CompositeQueryLogicTest.java +++ b/web-services/query/src/test/java/datawave/webservice/query/logic/composite/CompositeQueryLogicTest.java @@ -27,30 +27,32 @@ import com.google.common.collect.HashMultimap; +import datawave.core.common.connection.AccumuloConnectionFactory.Priority; +import datawave.core.query.cache.ResultsPage; +import datawave.core.query.cache.ResultsPage.Status; +import datawave.core.query.configuration.GenericQueryConfiguration; +import datawave.core.query.exception.EmptyObjectException; +import datawave.core.query.logic.BaseQueryLogic; +import datawave.core.query.logic.BaseQueryLogicTransformer; +import datawave.core.query.logic.QueryLogic; +import datawave.core.query.logic.QueryLogicTransformer; +import datawave.core.query.logic.composite.CompositeLogicException; +import datawave.core.query.logic.composite.CompositeQueryLogic; +import datawave.core.query.logic.filtered.FilteredQueryLogic; import datawave.marking.MarkingFunctions; +import datawave.microservice.query.Query; +import datawave.microservice.query.QueryImpl; import datawave.security.authorization.AuthorizationException; import datawave.security.authorization.DatawavePrincipal; import datawave.security.authorization.DatawaveUser; import datawave.security.authorization.DatawaveUser.UserType; +import datawave.security.authorization.ProxiedUserDetails; import datawave.security.authorization.SubjectIssuerDNPair; import datawave.security.authorization.UserOperations; import datawave.security.util.DnUtils; import datawave.user.AuthorizationsListBase; import datawave.user.DefaultAuthorizationsList; -import datawave.webservice.common.connection.AccumuloConnectionFactory.Priority; -import datawave.webservice.query.Query; -import datawave.webservice.query.QueryImpl; -import datawave.webservice.query.cache.ResultsPage; -import datawave.webservice.query.cache.ResultsPage.Status; -import datawave.webservice.query.configuration.GenericQueryConfiguration; -import datawave.webservice.query.exception.EmptyObjectException; import datawave.webservice.query.exception.QueryException; -import datawave.webservice.query.logic.BaseQueryLogic; -import datawave.webservice.query.logic.BaseQueryLogicTransformer; -import datawave.webservice.query.logic.DatawaveRoleManager; -import datawave.webservice.query.logic.EasyRoleManager; -import datawave.webservice.query.logic.QueryLogic; -import datawave.webservice.query.logic.QueryLogicTransformer; import datawave.webservice.query.result.EdgeQueryResponseBase; import datawave.webservice.query.result.edge.EdgeBase; import datawave.webservice.result.BaseQueryResponse; @@ -346,7 +348,7 @@ public Set getExampleQueries() { public static class TestUserOperations implements UserOperations { @Override - public AuthorizationsListBase listEffectiveAuthorizations(Object callerObject) throws AuthorizationException { + public AuthorizationsListBase listEffectiveAuthorizations(ProxiedUserDetails callerObject) throws AuthorizationException { DatawavePrincipal p = (DatawavePrincipal) callerObject; DefaultAuthorizationsList authList = new DefaultAuthorizationsList(); DatawaveUser primaryUser = p.getPrimaryUser(); @@ -361,7 +363,7 @@ public AuthorizationsListBase listEffectiveAuthorizations(Object callerObject) t } @Override - public GenericResponse flushCachedCredentials(Object callerObject) { + public GenericResponse flushCachedCredentials(ProxiedUserDetails callerObject) { return new GenericResponse<>(); } } @@ -454,6 +456,21 @@ public Set getExampleQueries() { } + public static class TestFilteredQueryLogic extends FilteredQueryLogic { + private boolean filtered; + + public TestFilteredQueryLogic(boolean filtered) { + QueryLogic delegate = new TestQueryLogic(); + setDelegate(delegate); + this.filtered = filtered; + } + + @Override + public boolean isFiltered() { + return filtered; + } + } + @Before public void setup() { System.setProperty(DnUtils.NPE_OU_PROPERTY, "iamnotaperson"); @@ -477,7 +494,7 @@ public void testClone() throws Exception { c.setQueryLogics(logics); c = (CompositeQueryLogic) c.clone(); - c.setPrincipal(principal); + c.setCurrentUser(principal); c.initialize(null, settings, Collections.singleton(auths)); c.getTransformer(settings); @@ -501,7 +518,7 @@ public void testInitializeOKWithSameQueryLogicAndTableNames() throws Exception { CompositeQueryLogic c = new CompositeQueryLogic(); c.setQueryLogics(logics); - c.setPrincipal(principal); + c.setCurrentUser(principal); c.initialize(null, settings, Collections.singleton(auths)); c.getTransformer(settings); @@ -535,7 +552,7 @@ public String getTableName() { CompositeQueryLogic c = new CompositeQueryLogic(); c.setQueryLogics(logics); - c.setPrincipal(principal); + c.setCurrentUser(principal); c.initialize(null, settings, Collections.singleton(auths)); c.getTransformer(settings); @@ -560,7 +577,7 @@ public void testInitialize() throws Exception { CompositeQueryLogic c = new CompositeQueryLogic(); c.setQueryLogics(logics); - c.setPrincipal(principal); + c.setCurrentUser(principal); c.initialize(null, settings, Collections.singleton(auths)); c.getTransformer(settings); @@ -591,12 +608,67 @@ public GenericQueryConfiguration initialize(AccumuloClient connection, Query set CompositeQueryLogic c = new CompositeQueryLogic(); c.setQueryLogics(logics); - c.setPrincipal(principal); + c.setCurrentUser(principal); c.initialize(null, settings, Collections.singleton(auths)); Assert.assertEquals(1, c.getInitializedLogics().size()); } + @Test + public void testInitializeOKWithFilter() throws Exception { + + Map> logics = new HashMap<>(); + logics.put("TestQueryLogic", new TestQueryLogic()); + logics.put("TestQueryLogic2", new TestFilteredQueryLogic(true)); + + QueryImpl settings = new QueryImpl(); + settings.setPagesize(100); + settings.setQueryAuthorizations(auths.toString()); + settings.setQuery("FOO == 'BAR'"); + settings.setParameters(new HashSet<>()); + settings.setId(UUID.randomUUID()); + + CompositeQueryLogic c = new CompositeQueryLogic(); + c.setQueryLogics(logics); + + c.setCurrentUser(principal); + c.initialize(null, settings, Collections.singleton(auths)); + + Assert.assertEquals(1, c.getInitializedLogics().size()); + // ensure the filtered query logic is actually dropped + Assert.assertEquals(0, c.getUninitializedLogics().size()); + } + + @Test(expected = CompositeLogicException.class) + public void testInitializeNotOKWithFilter() throws Exception { + + Map> logics = new HashMap<>(); + logics.put("TestQueryLogic", new TestQueryLogic() { + @Override + public GenericQueryConfiguration initialize(AccumuloClient connection, Query settings, Set runtimeQueryAuthorizations) + throws Exception { + throw new Exception("initialize failed"); + } + }); + logics.put("TestQueryLogic2", new TestFilteredQueryLogic(true)); + + QueryImpl settings = new QueryImpl(); + settings.setPagesize(100); + settings.setQueryAuthorizations(auths.toString()); + settings.setQuery("FOO == 'BAR'"); + settings.setParameters(new HashSet<>()); + settings.setId(UUID.randomUUID()); + + CompositeQueryLogic c = new CompositeQueryLogic(); + c.setQueryLogics(logics); + + // testing that we fail despite allMustInitialize to false because the filtered logic does not count + c.setAllMustInitialize(false); + + c.setCurrentUser(principal); + c.initialize(null, settings, Collections.singleton(auths)); + } + @Test(expected = CompositeLogicException.class) public void testInitializeNotOKWithFailure() throws Exception { @@ -621,7 +693,7 @@ public GenericQueryConfiguration initialize(AccumuloClient connection, Query set c.setAllMustInitialize(true); c.setQueryLogics(logics); - c.setPrincipal(principal); + c.setCurrentUser(principal); c.initialize(null, settings, Collections.singleton(auths)); } @@ -655,7 +727,7 @@ public GenericQueryConfiguration initialize(AccumuloClient connection, Query set CompositeQueryLogic c = new CompositeQueryLogic(); c.setQueryLogics(logics); - c.setPrincipal(principal); + c.setCurrentUser(principal); c.initialize(null, settings, Collections.singleton(auths)); } @@ -690,7 +762,7 @@ public GenericQueryConfiguration initialize(AccumuloClient connection, Query set c.setAllMustInitialize(true); c.setQueryLogics(logics); - c.setPrincipal(principal); + c.setCurrentUser(principal); c.initialize(null, settings, Collections.singleton(auths)); c.getTransformer(settings); @@ -727,13 +799,13 @@ public GenericQueryConfiguration initialize(AccumuloClient connection, Query set c.setAllMustInitialize(true); c.setQueryLogics(logics); - c.setPrincipal(principal); + c.setCurrentUser(principal); try { c.initialize(null, settings, Collections.singleton(auths)); c.getTransformer(settings); } catch (CompositeLogicException e) { - Assert.assertEquals("query initialize failed", e.getCause().getCause().getMessage()); + Assert.assertEquals("datawave.webservice.query.exception.QueryException: query initialize failed", e.getCause().getCause().getMessage()); } } @@ -754,7 +826,7 @@ public void testInitializeWithDifferentResponseTypes() throws Exception { CompositeQueryLogic c = new CompositeQueryLogic(); c.setQueryLogics(logics); - c.setPrincipal(principal); + c.setCurrentUser(principal); c.initialize(null, settings, Collections.singleton(auths)); c.getTransformer(settings); @@ -777,7 +849,7 @@ public void testCloseWithNoSetup() throws Exception { CompositeQueryLogic c = new CompositeQueryLogic(); c.setQueryLogics(logics); - c.setPrincipal(principal); + c.setCurrentUser(principal); c.initialize(null, settings, Collections.singleton(auths)); c.getTransformer(settings); @@ -817,7 +889,7 @@ public void testQueryLogic() throws Exception { * RunningQuery.setupConnection() */ c.setQueryLogics(logics); - c.setPrincipal(principal); + c.setCurrentUser(principal); c.initialize(null, settings, Collections.singleton(auths)); c.setupQuery(null); TransformIterator iter = c.getTransformIterator(settings); @@ -881,7 +953,7 @@ public void testQueryLogicWithEmptyEvent() throws Exception { * RunningQuery.setupConnection() */ c.setQueryLogics(logics); - c.setPrincipal(principal); + c.setCurrentUser(principal); c.initialize((AccumuloClient) null, (Query) settings, Collections.singleton(auths)); c.setupQuery(null); TransformIterator iter = c.getTransformIterator((Query) settings); @@ -945,7 +1017,7 @@ public void testQueryLogicShortCircuitExecution() throws Exception { * RunningQuery.setupConnection() */ c.setQueryLogics(logics); - c.setPrincipal(principal); + c.setCurrentUser(principal); c.setShortCircuitExecution(true); c.initialize((AccumuloClient) null, (Query) settings, Collections.singleton(auths)); c.setupQuery(null); @@ -1013,7 +1085,7 @@ public void testQueryLogicShortCircuitExecutionWithEmptyEvent() throws Exception * RunningQuery.setupConnection() */ c.setQueryLogics(logics); - c.setPrincipal(principal); + c.setCurrentUser(principal); c.setShortCircuitExecution(true); c.initialize((AccumuloClient) null, (Query) settings, Collections.singleton(auths)); c.setupQuery(null); @@ -1078,7 +1150,7 @@ public void testQueryLogicShortCircuitExecutionHitsSecondLogic() throws Exceptio * RunningQuery.setupConnection() */ c.setQueryLogics(logics); - c.setPrincipal(principal); + c.setCurrentUser(principal); c.setShortCircuitExecution(true); c.initialize((AccumuloClient) null, (Query) settings, Collections.singleton(auths)); c.setupQuery(null); @@ -1147,7 +1219,7 @@ public void testQueryLogicWithNextFailure() throws Exception { * RunningQuery.setupConnection() */ c.setQueryLogics(logics); - c.setPrincipal(principal); + c.setCurrentUser(principal); c.initialize(null, settings, Collections.singleton(auths)); c.setupQuery(null); TransformIterator iter = c.getTransformIterator(settings); @@ -1198,7 +1270,152 @@ public void testQueryLogicWithMaxResultsOverride() throws Exception { * RunningQuery.setupConnection() */ c.setQueryLogics(logics); - c.setPrincipal(principal); + c.setCurrentUser(principal); + c.initialize(null, settings, Collections.singleton(auths)); + c.setupQuery(null); + TransformIterator iter = c.getTransformIterator(settings); + + /** + * RunningQuery.next() - iterate over results coming from tablet server through the TransformIterator to turn them into the objects. + */ + List results = new ArrayList<>(); + while (iter.hasNext()) { + Object o = iter.next(); + if (null == o) + break; + Assert.assertTrue(o instanceof TestQueryResponse); + results.add(o); + } + Assert.assertEquals(5, results.size()); + ResultsPage page = new ResultsPage(results, Status.COMPLETE); + + /** + * QueryExecutorBean.next() - transform list of objects into JAXB response + */ + TestQueryResponseList response = (TestQueryResponseList) c.getEnrichedTransformer((Query) settings).createResponse(page); + Assert.assertEquals(5, response.getResponses().size()); + for (TestQueryResponse r : response.getResponses()) { + Assert.assertNotNull(r); + } + + c.close(); + + } + + @Test + // testQueryLogic with max.results.override is set + public void testQueryLogicWithMaxResultsOverrideWithDNOverride() throws Exception { + Map> logics = new HashMap<>(); + TestQueryLogic logic1 = new TestQueryLogic(); + TestQueryLogic2 logic2 = new TestQueryLogic2(); + logics.put("TestQueryLogic", logic1); + logics.put("TestQueryLogic2", logic2); + + logic1.getData().put(key1, value1); + logic1.getData().put(key2, value2); + logic2.getData().put(key3, value3); + logic2.getData().put(key4, value4); + logic1.getData().put(key5, value5); + logic1.getData().put(key6, value6); + logic2.getData().put(key7, value7); + logic2.getData().put(key8, value8); + + QueryImpl settings = new QueryImpl(); + settings.setPagesize(100); + settings.setQueryAuthorizations(auths.toString()); + settings.setQuery("FOO == 'BAR'"); + settings.setParameters(new HashSet<>()); + settings.setId(UUID.randomUUID()); + settings.setDnList(Arrays.asList(principal.getUserDN().subjectDN())); + + CompositeQueryLogic c = new CompositeQueryLogic(); + // max.results.override is set to -1 when it is not passed in as it is an optional parameter + logic1.setMaxResults(2); // it can return 4, so this will cap it at 3 (1 more than max) + logic2.setMaxResults(1); // it cat return 4, so this will cap it at 2 (1 more than max) + + // just FYI, setting up DNResultLimits for the composite query logic doesn't do anything + // c.setDnResultLimits(Map.of(principal.getUserDN().subjectDN(), 3L)); + + // settings the DNResultLimits for each logics configured for composite + logic1.setDnResultLimits(Map.of(principal.getUserDN().subjectDN(), 2L)); + logic2.setDnResultLimits(Map.of(principal.getUserDN().subjectDN(), 3L)); + /** + * RunningQuery.setupConnection() + */ + c.setQueryLogics(logics); + c.setCurrentUser(principal); + c.initialize(null, settings, Collections.singleton(auths)); + c.setupQuery(null); + TransformIterator iter = c.getTransformIterator(settings); + + /** + * RunningQuery.next() - iterate over results coming from tablet server through the TransformIterator to turn them into the objects. + */ + List results = new ArrayList<>(); + while (iter.hasNext()) { + Object o = iter.next(); + if (null == o) + break; + Assert.assertTrue(o instanceof TestQueryResponse); + results.add(o); + } + Assert.assertEquals(7, results.size()); + ResultsPage page = new ResultsPage(results, Status.COMPLETE); + + /** + * QueryExecutorBean.next() - transform list of objects into JAXB response + */ + TestQueryResponseList response = (TestQueryResponseList) c.getEnrichedTransformer((Query) settings).createResponse(page); + Assert.assertEquals(7, response.getResponses().size()); + for (TestQueryResponse r : response.getResponses()) { + Assert.assertNotNull(r); + } + + c.close(); + + } + + @Test + // testQueryLogic with max.results.override is set + public void testQueryLogicWithMaxResultsOverrideWithDNOverrideNonMatchingDN() throws Exception { + Map> logics = new HashMap<>(); + TestQueryLogic logic1 = new TestQueryLogic(); + TestQueryLogic2 logic2 = new TestQueryLogic2(); + logics.put("TestQueryLogic", logic1); + logics.put("TestQueryLogic2", logic2); + + logic1.getData().put(key1, value1); + logic1.getData().put(key2, value2); + logic2.getData().put(key3, value3); + logic2.getData().put(key4, value4); + logic1.getData().put(key5, value5); + logic1.getData().put(key6, value6); + logic2.getData().put(key7, value7); + logic2.getData().put(key8, value8); + + QueryImpl settings = new QueryImpl(); + settings.setPagesize(100); + settings.setQueryAuthorizations(auths.toString()); + settings.setQuery("FOO == 'BAR'"); + settings.setParameters(new HashSet<>()); + settings.setId(UUID.randomUUID()); + settings.setDnList(Arrays.asList(principal.getUserDN().subjectDN())); + + CompositeQueryLogic c = new CompositeQueryLogic(); + // max.results.override is set to -1 when it is not passed in as it is an optional parameter + logic1.setMaxResults(2); // it can return 4, so this will cap it at 3 (1 more than max) + logic2.setMaxResults(1); // it cat return 4, so this will cap it at 2 (1 more than max) + + // setting up DNResultLimits for the composite query logic + // c.setDnResultLimits(Map.of(principal.getUserDN().toString(), 3L)); + + logic1.setDnResultLimits(Map.of(principal.getUserDN().subjectDN() + "foo", 2L)); + logic2.setDnResultLimits(Map.of(principal.getUserDN().subjectDN() + "bar", 3L)); + /** + * RunningQuery.setupConnection() + */ + c.setQueryLogics(logics); + c.setCurrentUser(principal); c.initialize(null, settings, Collections.singleton(auths)); c.setupQuery(null); TransformIterator iter = c.getTransformIterator(settings); @@ -1262,7 +1479,7 @@ public void testQueryLogicNoDataLogic1() throws Exception { * RunningQuery.setupConnection() */ c.setQueryLogics(logics); - c.setPrincipal(principal); + c.setCurrentUser(principal); c.initialize(null, settings, Collections.singleton(auths)); c.setupQuery(null); TransformIterator iter = c.getTransformIterator(settings); @@ -1326,7 +1543,7 @@ public void testQueryLogicNoDataLogic2() throws Exception { * RunningQuery.setupConnection() */ c.setQueryLogics(logics); - c.setPrincipal(principal); + c.setCurrentUser(principal); c.initialize(null, settings, Collections.singleton(auths)); c.setupQuery(null); TransformIterator iter = c.getTransformIterator(settings); @@ -1378,7 +1595,7 @@ public void testQueryLogicNoData() throws Exception { * RunningQuery.setupConnection() */ c.setQueryLogics(logics); - c.setPrincipal(principal); + c.setCurrentUser(principal); c.initialize(null, settings, Collections.singleton(auths)); c.setupQuery(null); TransformIterator iter = c.getTransformIterator(settings); @@ -1405,20 +1622,16 @@ public void testCanRunQueryLogic() throws Exception { TestQueryLogic logic1 = new TestQueryLogic(); HashSet roles = new HashSet<>(); roles.add("TESTROLE"); - logic1.setRoleManager(new DatawaveRoleManager(roles)); + logic1.setRequiredRoles(roles); TestQueryLogic2 logic2 = new TestQueryLogic2(); - logic2.setRoleManager(new EasyRoleManager()); + logic2.setRequiredRoles(Collections.emptySet()); logics.put("TestQueryLogic", logic1); logics.put("TestQueryLogic2", logic2); CompositeQueryLogic c = new CompositeQueryLogic(); c.setQueryLogics(logics); - DatawaveUser u = new DatawaveUser(SubjectIssuerDNPair.of("CN=Other User Name ouser, OU=acme", "CN=ca, OU=acme"), UserType.USER, null, - Collections.singleton("TESTROLE"), null, 0L); - DatawavePrincipal p = new DatawavePrincipal(Collections.singletonList(u)); - - Assert.assertTrue(c.canRunQuery(p)); + Assert.assertTrue(c.canRunQuery(Collections.singleton("TESTROLE"))); Assert.assertEquals(2, c.getQueryLogics().size()); } @@ -1428,22 +1641,18 @@ public void testCanRunQueryLogic2() throws Exception { TestQueryLogic logic1 = new TestQueryLogic(); HashSet roles = new HashSet<>(); roles.add("TESTROLE"); - logic1.setRoleManager(new DatawaveRoleManager(roles)); + logic1.setRequiredRoles(roles); TestQueryLogic2 logic2 = new TestQueryLogic2(); HashSet roles2 = new HashSet<>(); roles2.add("NONTESTROLE"); - logic2.setRoleManager(new DatawaveRoleManager(roles2)); + logic2.setRequiredRoles(roles2); logics.put("TestQueryLogic", logic1); logics.put("TestQueryLogic2", logic2); CompositeQueryLogic c = new CompositeQueryLogic(); c.setQueryLogics(logics); - DatawaveUser u = new DatawaveUser(SubjectIssuerDNPair.of("CN=Other User Name ouser, OU=acme", "CN=ca, OU=acme"), UserType.USER, null, - Collections.singleton("TESTROLE"), null, 0L); - DatawavePrincipal p = new DatawavePrincipal(Collections.singletonList(u)); - - Assert.assertTrue(c.canRunQuery(p)); + Assert.assertTrue(c.canRunQuery(Collections.singleton("TESTROLE"))); Assert.assertEquals(1, c.getQueryLogics().size()); } @@ -1453,22 +1662,18 @@ public void testCannotRunQueryLogic2() throws Exception { TestQueryLogic logic1 = new TestQueryLogic(); HashSet roles = new HashSet<>(); roles.add("NONTESTROLE"); - logic1.setRoleManager(new DatawaveRoleManager(roles)); + logic1.setRequiredRoles(roles); TestQueryLogic2 logic2 = new TestQueryLogic2(); HashSet roles2 = new HashSet<>(); roles2.add("NONTESTROLE"); - logic2.setRoleManager(new DatawaveRoleManager(roles2)); + logic2.setRequiredRoles(roles2); logics.put("TestQueryLogic", logic1); logics.put("TestQueryLogic2", logic2); CompositeQueryLogic c = new CompositeQueryLogic(); c.setQueryLogics(logics); - DatawaveUser u = new DatawaveUser(SubjectIssuerDNPair.of("CN=Other User Name ouser, OU=acme", "CN=ca, OU=acme"), UserType.USER, null, - Collections.singleton("TESTROLE"), null, 0L); - DatawavePrincipal p = new DatawavePrincipal(Collections.singletonList(u)); - - Assert.assertFalse(c.canRunQuery(p)); + Assert.assertFalse(c.canRunQuery(Collections.singleton("TESTROLE"))); Assert.assertEquals(0, c.getQueryLogics().size()); } @@ -1531,7 +1736,7 @@ public void testAuthorizationsUpdate() throws Exception { * RunningQuery.setupConnection() */ c.setQueryLogics(logics); - c.setPrincipal(principal); + c.setCurrentUser(principal); c.initialize(null, settings, Collections.singleton(auths)); c.setupQuery(null); TransformIterator iter = c.getTransformIterator(settings); diff --git a/web-services/query/src/test/java/datawave/webservice/query/logic/filtered/FilteredQueryLogicTest.java b/web-services/query/src/test/java/datawave/webservice/query/logic/filtered/FilteredQueryLogicTest.java index e58770424d3..98ffa3d7385 100644 --- a/web-services/query/src/test/java/datawave/webservice/query/logic/filtered/FilteredQueryLogicTest.java +++ b/web-services/query/src/test/java/datawave/webservice/query/logic/filtered/FilteredQueryLogicTest.java @@ -5,17 +5,18 @@ import java.util.Set; import org.apache.accumulo.core.security.Authorizations; -import org.easymock.EasyMock; import org.junit.After; import org.junit.Assert; import org.junit.Before; import org.junit.Test; -import org.powermock.api.easymock.PowerMock; +import org.mockito.Mockito; -import datawave.webservice.query.Query; -import datawave.webservice.query.QueryImpl; -import datawave.webservice.query.configuration.GenericQueryConfiguration; -import datawave.webservice.query.logic.QueryLogic; +import datawave.core.query.configuration.GenericQueryConfiguration; +import datawave.core.query.logic.QueryLogic; +import datawave.core.query.logic.filtered.FilteredQueryLogic; +import datawave.core.query.logic.filtered.QueryLogicFilterByAuth; +import datawave.microservice.query.Query; +import datawave.microservice.query.QueryImpl; public class FilteredQueryLogicTest { @@ -24,7 +25,7 @@ public class FilteredQueryLogicTest { @Before public void setup() { - delegate = PowerMock.createMock(QueryLogic.class); + delegate = Mockito.mock(QueryLogic.class); logic = new FilteredQueryLogic(); logic.setDelegate(delegate); logic.setFilter(new QueryLogicFilterByAuth("FOO|BAR")); @@ -32,7 +33,7 @@ public void setup() { @After public void cleanup() { - PowerMock.resetAll(); + Mockito.reset(); } @Test @@ -40,14 +41,12 @@ public void testFiltered() throws Exception { Query settings = new QueryImpl(); Set auths = Collections.singleton(new Authorizations("FILTERME")); - PowerMock.replayAll(); GenericQueryConfiguration config = logic.initialize(null, settings, auths); logic.setupQuery(config); Iterator it = logic.iterator(); Assert.assertFalse(it.hasNext()); String plan = logic.getPlan(null, settings, auths, true, true); Assert.assertEquals("", plan); - PowerMock.verifyAll(); } @Test @@ -56,12 +55,11 @@ public void testNotFiltered() throws Exception { Set auths = Collections.singleton(new Authorizations("FOO")); GenericQueryConfiguration config = new GenericQueryConfiguration() {}; - EasyMock.expect(delegate.initialize(null, settings, auths)).andReturn(config); + Mockito.when(delegate.initialize(null, settings, auths)).thenReturn(config); delegate.setupQuery(config); - EasyMock.expect(delegate.iterator()).andReturn(Collections.singleton(new Object()).iterator()); - EasyMock.expect(delegate.getPlan(null, settings, auths, true, true)).andReturn("a plan"); + Mockito.when(delegate.iterator()).thenReturn(Collections.singleton(new Object()).iterator()); + Mockito.when(delegate.getPlan(null, settings, auths, true, true)).thenReturn("a plan"); - PowerMock.replayAll(); logic.initialize(null, new QueryImpl(), Collections.singleton(new Authorizations("FOO"))); logic.setupQuery(config); Iterator it = logic.iterator(); @@ -70,6 +68,5 @@ public void testNotFiltered() throws Exception { Assert.assertFalse(it.hasNext()); String plan = logic.getPlan(null, settings, auths, true, true); Assert.assertEquals("a plan", plan); - PowerMock.verifyAll(); } } diff --git a/web-services/query/src/test/java/datawave/webservice/query/logic/filtered/QueryLogicFilterByAuthTest.java b/web-services/query/src/test/java/datawave/webservice/query/logic/filtered/QueryLogicFilterByAuthTest.java index 80f4305191e..47ce96a8b5b 100644 --- a/web-services/query/src/test/java/datawave/webservice/query/logic/filtered/QueryLogicFilterByAuthTest.java +++ b/web-services/query/src/test/java/datawave/webservice/query/logic/filtered/QueryLogicFilterByAuthTest.java @@ -9,6 +9,8 @@ import org.apache.accumulo.core.security.Authorizations; import org.junit.Test; +import datawave.core.query.logic.filtered.QueryLogicFilterByAuth; + public class QueryLogicFilterByAuthTest { @Test public void testDefaults() { diff --git a/web-services/query/src/test/java/datawave/webservice/query/logic/filtered/QueryLogicFilterByParameterTest.java b/web-services/query/src/test/java/datawave/webservice/query/logic/filtered/QueryLogicFilterByParameterTest.java index 33da980f9f9..53f86d087f2 100644 --- a/web-services/query/src/test/java/datawave/webservice/query/logic/filtered/QueryLogicFilterByParameterTest.java +++ b/web-services/query/src/test/java/datawave/webservice/query/logic/filtered/QueryLogicFilterByParameterTest.java @@ -5,7 +5,8 @@ import org.junit.Test; -import datawave.webservice.query.QueryImpl; +import datawave.core.query.logic.filtered.QueryLogicFilterByParameter; +import datawave.microservice.query.QueryImpl; public class QueryLogicFilterByParameterTest { @Test diff --git a/web-services/query/src/test/java/datawave/webservice/query/runner/ExtendedQueryExecutorBeanTest.java b/web-services/query/src/test/java/datawave/webservice/query/runner/ExtendedQueryExecutorBeanTest.java index 8641025e46c..7782e671a5a 100644 --- a/web-services/query/src/test/java/datawave/webservice/query/runner/ExtendedQueryExecutorBeanTest.java +++ b/web-services/query/src/test/java/datawave/webservice/query/runner/ExtendedQueryExecutorBeanTest.java @@ -1,5 +1,6 @@ package datawave.webservice.query.runner; +import static org.easymock.EasyMock.anyObject; import static org.easymock.EasyMock.eq; import static org.easymock.EasyMock.expect; import static org.easymock.EasyMock.expectLastCall; @@ -20,6 +21,7 @@ import java.util.Collections; import java.util.Date; import java.util.HashMap; +import java.util.HashSet; import java.util.List; import java.util.Map; import java.util.Set; @@ -64,8 +66,23 @@ import com.google.common.collect.Multimap; import com.google.common.collect.Sets; +import datawave.core.common.audit.PrivateAuditConstants; +import datawave.core.common.connection.AccumuloConnectionFactory; +import datawave.core.common.connection.AccumuloConnectionFactory.Priority; +import datawave.core.query.cache.ResultsPage; +import datawave.core.query.configuration.GenericQueryConfiguration; +import datawave.core.query.logic.BaseQueryLogic; +import datawave.core.query.logic.QueryLogic; +import datawave.core.query.logic.QueryLogicFactory; +import datawave.core.query.logic.QueryLogicTransformer; import datawave.marking.ColumnVisibilitySecurityMarking; import datawave.marking.SecurityMarking; +import datawave.microservice.query.DefaultQueryParameters; +import datawave.microservice.query.Query; +import datawave.microservice.query.QueryImpl; +import datawave.microservice.query.QueryParameters; +import datawave.microservice.query.QueryPersistence; +import datawave.microservice.query.config.QueryExpirationProperties; import datawave.microservice.querymetric.QueryMetric; import datawave.microservice.querymetric.QueryMetricFactory; import datawave.microservice.querymetric.QueryMetricFactoryImpl; @@ -73,50 +90,33 @@ import datawave.security.authorization.DatawavePrincipal; import datawave.security.authorization.DatawaveUser; import datawave.security.authorization.SubjectIssuerDNPair; -import datawave.security.authorization.UserOperations; +import datawave.security.authorization.remote.RemoteUserOperationsImpl; import datawave.security.user.UserOperationsBean; import datawave.security.util.WSAuthorizationsUtil; import datawave.webservice.common.audit.AuditBean; import datawave.webservice.common.audit.AuditParameters; import datawave.webservice.common.audit.Auditor.AuditType; -import datawave.webservice.common.audit.PrivateAuditConstants; -import datawave.webservice.common.connection.AccumuloConnectionFactory; -import datawave.webservice.common.connection.AccumuloConnectionFactory.Priority; import datawave.webservice.common.exception.BadRequestException; import datawave.webservice.common.exception.DatawaveWebApplicationException; import datawave.webservice.common.exception.NoResultsException; -import datawave.webservice.query.Query; -import datawave.webservice.query.QueryImpl; -import datawave.webservice.query.QueryParameters; -import datawave.webservice.query.QueryParametersImpl; -import datawave.webservice.query.QueryPersistence; import datawave.webservice.query.cache.ClosedQueryCache; import datawave.webservice.query.cache.CreatedQueryLogicCacheBean; import datawave.webservice.query.cache.QueryCache; -import datawave.webservice.query.cache.QueryExpirationConfiguration; import datawave.webservice.query.cache.QueryTraceCache; import datawave.webservice.query.cache.QueryTraceCache.CacheListener; import datawave.webservice.query.cache.QueryTraceCache.PatternWrapper; -import datawave.webservice.query.cache.ResultsPage; -import datawave.webservice.query.configuration.GenericQueryConfiguration; import datawave.webservice.query.configuration.LookupUUIDConfiguration; import datawave.webservice.query.exception.BadRequestQueryException; import datawave.webservice.query.exception.DatawaveErrorCode; import datawave.webservice.query.exception.NoResultsQueryException; import datawave.webservice.query.exception.QueryException; import datawave.webservice.query.factory.Persister; -import datawave.webservice.query.logic.BaseQueryLogic; -import datawave.webservice.query.logic.DatawaveRoleManager; -import datawave.webservice.query.logic.EasyRoleManager; -import datawave.webservice.query.logic.QueryLogic; -import datawave.webservice.query.logic.QueryLogicFactory; import datawave.webservice.query.logic.QueryLogicFactoryImpl; -import datawave.webservice.query.logic.QueryLogicTransformer; -import datawave.webservice.query.logic.RoleManager; import datawave.webservice.query.metric.QueryMetricsBean; import datawave.webservice.query.result.event.ResponseObjectFactory; import datawave.webservice.query.util.GetUUIDCriteria; import datawave.webservice.query.util.LookupUUIDUtil; +import datawave.webservice.query.util.MapUtils; import datawave.webservice.query.util.QueryUncaughtExceptionHandler; import datawave.webservice.result.BaseQueryResponse; import datawave.webservice.result.DefaultEventQueryResponse; @@ -230,7 +230,7 @@ public class ExtendedQueryExecutorBeanTest { @Mock UserOperationsBean userOperations; - QueryExpirationConfiguration queryExpirationConf; + QueryExpirationProperties queryExpirationConf; @BeforeClass public static void setup() throws Exception {} @@ -240,10 +240,10 @@ public void setupBefore() throws Exception { queryLogic2 = PowerMock.createMock(QuerySyntaxParserQueryLogic.class); - queryExpirationConf = new QueryExpirationConfiguration(); - queryExpirationConf.setPageSizeShortCircuitCheckTime(45); - queryExpirationConf.setPageShortCircuitTimeout(58); - queryExpirationConf.setCallTime(60); + queryExpirationConf = new QueryExpirationProperties(); + queryExpirationConf.setShortCircuitCheckTime(45); + queryExpirationConf.setShortCircuitTimeout(58); + queryExpirationConf.setIdleTimeout(60); } @SuppressWarnings({"rawtypes", "unchecked"}) @@ -371,6 +371,8 @@ public void testAdminCancel_LookupAccumuloQuery() throws Exception { expect(this.query.getPageTimeout()).andReturn(-1).anyTimes(); expect(this.query.getExpirationDate()).andReturn(null).anyTimes(); expect(this.query.getParameters()).andReturn((Set) Collections.emptySet()).anyTimes(); + expect(this.query.findParameter(RemoteUserOperationsImpl.INCLUDE_REMOTE_SERVICES)) + .andReturn(new QueryImpl.Parameter(RemoteUserOperationsImpl.INCLUDE_REMOTE_SERVICES, "true")).anyTimes(); expect(context.getCallerPrincipal()).andReturn(principal); expect(this.queryLogicFactory.getQueryLogic("ql1", principal)).andReturn((QueryLogic) this.queryLogic1); expect(this.queryLogic1.getConnectionPriority()).andReturn(Priority.NORMAL); @@ -394,8 +396,8 @@ public void testAdminCancel_LookupAccumuloQuery() throws Exception { setInternalState(subject, ClosedQueryCache.class, closedCache); setInternalState(subject, Persister.class, persister); setInternalState(subject, QueryLogicFactory.class, queryLogicFactory); - setInternalState(subject, QueryExpirationConfiguration.class, queryExpirationConf); - setInternalState(subject, QueryParameters.class, new QueryParametersImpl()); + setInternalState(subject, QueryExpirationProperties.class, queryExpirationConf); + setInternalState(subject, QueryParameters.class, new DefaultQueryParameters()); setInternalState(subject, QueryMetricFactory.class, new QueryMetricFactoryImpl()); setInternalState(connectionRequestBean, EJBContext.class, context); setInternalState(subject, AccumuloConnectionRequestBean.class, connectionRequestBean); @@ -472,13 +474,18 @@ public void testAdminClose_NullTupleReturnedAndQueryExceptionThrown() throws Exc public void testCancel_HappyPath() throws Exception { // Set local test input String userName = "userName"; + String userSid = "userSid"; UUID queryId = UUID.randomUUID(); // Set expectations of the create logic expect(this.connectionRequestBean.cancelConnectionRequest(queryId.toString())).andReturn(false); expect(this.context.getCallerPrincipal()).andReturn(this.principal).anyTimes(); expect(this.principal.getName()).andReturn(userName); - expect(this.qlCache.pollIfOwnedBy(queryId.toString(), userName)).andReturn(this.tuple); + expect(this.principal.getUserDN()).andReturn(SubjectIssuerDNPair.of(userName)); + expect(this.principal.getDNs()).andReturn(new String[] {userName}); + expect(this.principal.getShortName()).andReturn(userSid); + expect(this.principal.getProxyServers()).andReturn(new ArrayList<>(0)).anyTimes(); + expect(this.qlCache.pollIfOwnedBy(queryId.toString(), userSid)).andReturn(this.tuple); this.closedCache.remove(queryId.toString()); expect(this.tuple.getFirst()).andReturn((QueryLogic) this.queryLogic1); this.queryLogic1.close(); @@ -516,7 +523,10 @@ public void testCancel_NullTupleReturnedAndQueryExceptionThrown() throws Excepti expect(this.connectionRequestBean.cancelConnectionRequest(queryId.toString())).andReturn(false); expect(this.context.getCallerPrincipal()).andReturn(this.principal).times(2); expect(this.principal.getName()).andReturn(userName); - expect(this.qlCache.pollIfOwnedBy(queryId.toString(), userName)).andReturn(null); + expect(this.principal.getUserDN()).andReturn(SubjectIssuerDNPair.of(userName)); + expect(this.principal.getDNs()).andReturn(new String[] {userName}); + expect(this.principal.getShortName()).andReturn(userSid); + expect(this.qlCache.pollIfOwnedBy(queryId.toString(), userSid)).andReturn(null); expect(this.closedCache.exists(queryId.toString())).andReturn(false); expect(this.principal.getName()).andReturn(userName); expect(this.principal.getShortName()).andReturn(userSid); @@ -559,9 +569,11 @@ public void testCancel_RunningQueryFoundInCache() throws Exception { expect(this.connectionRequestBean.cancelConnectionRequest(queryId.toString())).andReturn(false); expect(this.context.getCallerPrincipal()).andReturn(this.principal).times(2); expect(this.principal.getName()).andReturn(userName); - expect(this.qlCache.pollIfOwnedBy(queryId.toString(), userName)).andReturn(null); + expect(this.qlCache.pollIfOwnedBy(queryId.toString(), userSid)).andReturn(null); expect(this.principal.getName()).andReturn(userName); - expect(this.principal.getShortName()).andReturn(userSid); + expect(this.principal.getUserDN()).andReturn(SubjectIssuerDNPair.of(userName)); + expect(this.principal.getDNs()).andReturn(new String[] {userName}); + expect(this.principal.getShortName()).andReturn(userSid).anyTimes(); expect(this.principal.getProxyServers()).andReturn(new ArrayList<>(0)).anyTimes(); expect(this.principal.getAuthorizations()).andReturn((Collection) Arrays.asList(Arrays.asList(queryAuthorizations))); expect(this.cache.get(queryId.toString())).andReturn(this.runningQuery); @@ -602,10 +614,12 @@ public void testClose_NullTupleReturnedFromQueryLogicCache() throws Exception { String queryAuthorizations = "AUTH_1"; // Set expectations - expect(this.connectionRequestBean.cancelConnectionRequest(queryId.toString(), this.principal)).andReturn(false); + expect(this.connectionRequestBean.cancelConnectionRequest(queryId.toString(), userName.toLowerCase())).andReturn(false); expect(this.context.getCallerPrincipal()).andReturn(this.principal).anyTimes(); - expect(this.principal.getName()).andReturn(userName); - expect(this.principal.getShortName()).andReturn(userSid).times(2); + expect(this.principal.getName()).andReturn(userName).anyTimes(); + expect(this.principal.getShortName()).andReturn(userSid).anyTimes(); + expect(this.principal.getDNs()).andReturn(new String[] {userName}); + expect(this.principal.getUserDN()).andReturn(SubjectIssuerDNPair.of(userName)); expect(this.principal.getProxyServers()).andReturn(new ArrayList<>(0)).anyTimes(); expect(this.principal.getAuthorizations()).andReturn((Collection) Arrays.asList(Arrays.asList(queryAuthorizations))); expect(this.qlCache.pollIfOwnedBy(queryId.toString(), userSid)).andReturn(null); @@ -637,14 +651,18 @@ public void testClose_NullTupleReturnedFromQueryLogicCache() throws Exception { public void testClose_UncheckedException() throws Exception { // Set local test input String userSid = "userSid"; + String userName = "userName"; UUID queryId = UUID.randomUUID(); // Set expectations - expect(this.connectionRequestBean.cancelConnectionRequest(queryId.toString(), this.principal)).andReturn(false); + expect(this.connectionRequestBean.cancelConnectionRequest(queryId.toString(), userName.toLowerCase())).andReturn(false); expect(this.context.getCallerPrincipal()).andReturn(this.principal).anyTimes(); + expect(this.principal.getName()).andReturn(userName); expect(this.principal.getShortName()).andReturn(userSid); + expect(this.principal.getDNs()).andReturn(new String[] {userName}); expect(this.principal.getProxyServers()).andReturn(new ArrayList<>(0)).anyTimes(); expect(this.qlCache.pollIfOwnedBy(queryId.toString(), userSid)).andReturn(this.tuple); + expect(this.principal.getUserDN()).andReturn(SubjectIssuerDNPair.of(userName)); expect(this.tuple.getFirst()).andReturn((QueryLogic) this.queryLogic1); this.queryLogic1.close(); PowerMock.expectLastCall().andThrow(ILLEGAL_STATE_EXCEPTION); @@ -688,7 +706,7 @@ public void testCreateQueryAndNext_HappyPath() throws Exception { boolean trace = false; String userName = "userName"; String userSid = "userSid"; - String userDN = "userdn"; + String userDN = "userDN"; SubjectIssuerDNPair userDNpair = SubjectIssuerDNPair.of(userDN); List dnList = Collections.singletonList(userDN); UUID queryId = UUID.randomUUID(); @@ -697,33 +715,31 @@ public void testCreateQueryAndNext_HappyPath() throws Exception { HashMap> authsMap = new HashMap<>(); authsMap.put("userdn", Arrays.asList(queryAuthorizations)); - MultivaluedMap queryParameters = new MultivaluedMapImpl<>(); - queryParameters.putSingle(QueryParameters.QUERY_STRING, query); - queryParameters.putSingle(QueryParameters.QUERY_NAME, queryName); - queryParameters.putSingle(QueryParameters.QUERY_LOGIC_NAME, queryLogicName); - queryParameters.putSingle(QueryParameters.QUERY_BEGIN, QueryParametersImpl.formatDate(beginDate)); - queryParameters.putSingle(QueryParameters.QUERY_END, QueryParametersImpl.formatDate(endDate)); - queryParameters.putSingle(QueryParameters.QUERY_EXPIRATION, QueryParametersImpl.formatDate(expirationDate)); - queryParameters.putSingle(QueryParameters.QUERY_AUTHORIZATIONS, queryAuthorizations); - queryParameters.putSingle(QueryParameters.QUERY_PAGESIZE, String.valueOf(pagesize)); - queryParameters.putSingle(QueryParameters.QUERY_PAGETIMEOUT, String.valueOf(pageTimeout)); - queryParameters.putSingle(QueryParameters.QUERY_PERSISTENCE, persistenceMode.name()); - queryParameters.putSingle(QueryParameters.QUERY_TRACE, String.valueOf(trace)); - queryParameters.putSingle(ColumnVisibilitySecurityMarking.VISIBILITY_MARKING, queryVisibility); - queryParameters.putSingle("valid", "param"); + MultiValueMap queryParameters = new LinkedMultiValueMap<>(); + queryParameters.add(QueryParameters.QUERY_STRING, query); + queryParameters.add(QueryParameters.QUERY_NAME, queryName); + queryParameters.add(QueryParameters.QUERY_LOGIC_NAME, queryLogicName); + queryParameters.add(QueryParameters.QUERY_BEGIN, DefaultQueryParameters.formatDate(beginDate)); + queryParameters.add(QueryParameters.QUERY_END, DefaultQueryParameters.formatDate(endDate)); + queryParameters.add(QueryParameters.QUERY_EXPIRATION, DefaultQueryParameters.formatDate(expirationDate)); + queryParameters.add(QueryParameters.QUERY_AUTHORIZATIONS, queryAuthorizations); + queryParameters.add(QueryParameters.QUERY_PAGESIZE, String.valueOf(pagesize)); + queryParameters.add(QueryParameters.QUERY_PAGETIMEOUT, String.valueOf(pageTimeout)); + queryParameters.add(QueryParameters.QUERY_PERSISTENCE, persistenceMode.name()); + queryParameters.add(QueryParameters.QUERY_TRACE, String.valueOf(trace)); + queryParameters.add(ColumnVisibilitySecurityMarking.VISIBILITY_MARKING, queryVisibility); + queryParameters.add("valid", "param"); ColumnVisibilitySecurityMarking marking = new ColumnVisibilitySecurityMarking(); marking.validate(queryParameters); - QueryParameters qp = new QueryParametersImpl(); + QueryParameters qp = new DefaultQueryParameters(); qp.validate(queryParameters); - MultivaluedMap op = new MultivaluedMapImpl<>(); - op.putAll(qp.getUnknownParameters(queryParameters)); - // op.putSingle(PrivateAuditConstants.AUDIT_TYPE, AuditType.NONE.name()); - op.putSingle(PrivateAuditConstants.LOGIC_CLASS, queryLogicName); - op.putSingle(PrivateAuditConstants.COLUMN_VISIBILITY, queryVisibility); - op.putSingle(PrivateAuditConstants.USER_DN, userDNpair.subjectDN()); + MultiValueMap op = qp.getUnknownParameters(queryParameters); + op.add(PrivateAuditConstants.LOGIC_CLASS, queryLogicName); + op.add(PrivateAuditConstants.COLUMN_VISIBILITY, queryVisibility); + op.add(PrivateAuditConstants.USER_DN, userDNpair.subjectDN()); // Set expectations of the create logic queryLogic1.validate(queryParameters); @@ -738,15 +754,16 @@ public void testCreateQueryAndNext_HappyPath() throws Exception { expect(this.queryLogic1.containsDNWithAccess(Collections.singletonList(userDN))).andReturn(true); expect(this.queryLogic1.getAuditType(null)).andReturn(AuditType.NONE); expect(this.principal.getAuthorizations()).andReturn((Collection) Arrays.asList(Arrays.asList(queryAuthorizations))); - expect(persister.create(eq(userDNpair.subjectDN()), eq(dnList), eq(marking), eq(queryLogicName), eq(qp), eq(op))).andReturn(this.query); + expect(persister.create(eq(userDNpair.subjectDN()), eq(dnList), eq(marking), eq(queryLogicName), eq(qp), eq(MapUtils.toMultivaluedMap(op)))) + .andReturn(this.query); expect(this.queryLogic1.getAuditType(this.query)).andReturn(AuditType.NONE); expect(this.queryLogic1.getConnectionPriority()).andReturn(Priority.NORMAL); expect(this.queryLogic1.getConnPoolName()).andReturn("connPool1"); expect(this.queryLogic1.isLongRunningQuery()).andReturn(false); expect(this.connectionFactory.getTrackingMap(isA(StackTraceElement[].class))).andReturn(null); this.query.populateTrackingMap(null); - this.connectionRequestBean.requestBegin(queryId.toString()); - expect(this.connectionFactory.getClient("connPool1", Priority.NORMAL, null)).andReturn(this.client); + this.connectionRequestBean.requestBegin(queryId.toString(), userDN.toLowerCase(), null); + expect(this.connectionFactory.getClient(userDN.toLowerCase(), new ArrayList<>(0), "connPool1", Priority.NORMAL, null)).andReturn(this.client); this.connectionRequestBean.requestEnd(queryId.toString()); expect(this.traceInfos.get(userSid)).andReturn(new ArrayList<>(0)); expect(this.traceInfos.get(null)).andReturn(Arrays.asList(PatternWrapper.wrap("NONMATCHING_REGEX"))); @@ -769,6 +786,8 @@ public void testCreateQueryAndNext_HappyPath() throws Exception { expect(this.query.getExpirationDate()).andReturn(null).anyTimes(); expect(this.query.getParameters()).andReturn((Set) Collections.emptySet()).anyTimes(); expect(this.query.getUncaughtExceptionHandler()).andReturn(new QueryUncaughtExceptionHandler()).anyTimes(); + expect(this.query.findParameter(RemoteUserOperationsImpl.INCLUDE_REMOTE_SERVICES)) + .andReturn(new QueryImpl.Parameter(RemoteUserOperationsImpl.INCLUDE_REMOTE_SERVICES, "true")).anyTimes(); this.metrics.updateMetric(isA(QueryMetric.class)); PowerMock.expectLastCall().times(2); expect(this.query.getUserDN()).andReturn(userDN).anyTimes(); @@ -806,6 +825,8 @@ public void testCreateQueryAndNext_HappyPath() throws Exception { expect(this.queryLogic1.getEnrichedTransformer(this.query)).andReturn(this.transformer); expect(this.transformer.createResponse(this.resultsPage)).andReturn(this.baseResponse); expect(this.resultsPage.getStatus()).andReturn(ResultsPage.Status.COMPLETE).times(2); + expect(this.responseObjectFactory.getQueryImpl()).andReturn(new QueryImpl()); + expect(queryLogic1.getResultLimit(anyObject(QueryImpl.class))).andReturn(-1L); this.baseResponse.setHasResults(true); this.baseResponse.setPageNumber(pageNumber); expect(this.queryLogic1.getLogicName()).andReturn(queryLogicName); @@ -832,16 +853,16 @@ public void testCreateQueryAndNext_HappyPath() throws Exception { setInternalState(subject, ClosedQueryCache.class, closedCache); setInternalState(subject, Persister.class, persister); setInternalState(subject, QueryLogicFactoryImpl.class, queryLogicFactory); - setInternalState(subject, QueryExpirationConfiguration.class, queryExpirationConf); + setInternalState(subject, QueryExpirationProperties.class, queryExpirationConf); setInternalState(subject, AuditBean.class, auditor); setInternalState(subject, QueryMetricsBean.class, metrics); setInternalState(subject, Multimap.class, traceInfos); setInternalState(subject, SecurityMarking.class, new ColumnVisibilitySecurityMarking()); - setInternalState(subject, QueryParameters.class, new QueryParametersImpl()); + setInternalState(subject, QueryParameters.class, new DefaultQueryParameters()); setInternalState(subject, QueryMetricFactory.class, new QueryMetricFactoryImpl()); setInternalState(connectionRequestBean, EJBContext.class, context); setInternalState(subject, AccumuloConnectionRequestBean.class, connectionRequestBean); - BaseQueryResponse result1 = subject.createQueryAndNext(queryLogicName, queryParameters); + BaseQueryResponse result1 = subject.createQueryAndNext(queryLogicName, MapUtils.toMultivaluedMap(queryParameters)); PowerMock.verifyAll(); // Verify results @@ -879,9 +900,9 @@ public void testCreateQueryAndNext_BadID() throws Exception { queryParameters.putSingle(QueryParameters.QUERY_STRING, query); queryParameters.putSingle(QueryParameters.QUERY_NAME, queryName); queryParameters.putSingle(QueryParameters.QUERY_LOGIC_NAME, queryLogicName); - queryParameters.putSingle(QueryParameters.QUERY_BEGIN, QueryParametersImpl.formatDate(beginDate)); - queryParameters.putSingle(QueryParameters.QUERY_END, QueryParametersImpl.formatDate(endDate)); - queryParameters.putSingle(QueryParameters.QUERY_EXPIRATION, QueryParametersImpl.formatDate(expirationDate)); + queryParameters.putSingle(QueryParameters.QUERY_BEGIN, DefaultQueryParameters.formatDate(beginDate)); + queryParameters.putSingle(QueryParameters.QUERY_END, DefaultQueryParameters.formatDate(endDate)); + queryParameters.putSingle(QueryParameters.QUERY_EXPIRATION, DefaultQueryParameters.formatDate(expirationDate)); queryParameters.putSingle(QueryParameters.QUERY_AUTHORIZATIONS, queryAuthorizations); queryParameters.putSingle(QueryParameters.QUERY_PAGESIZE, String.valueOf(pagesize)); queryParameters.putSingle(QueryParameters.QUERY_PAGETIMEOUT, String.valueOf(pageTimeout)); @@ -893,7 +914,7 @@ public void testCreateQueryAndNext_BadID() throws Exception { ColumnVisibilitySecurityMarking marking = new ColumnVisibilitySecurityMarking(); marking.validate(queryParameters); - QueryParameters qp = new QueryParametersImpl(); + QueryParameters qp = new DefaultQueryParameters(); qp.validate(queryParameters); MultivaluedMap op = new MultivaluedMapImpl<>(); @@ -922,8 +943,8 @@ public void testCreateQueryAndNext_BadID() throws Exception { expect(this.queryLogic1.isLongRunningQuery()).andReturn(false); expect(this.connectionFactory.getTrackingMap(isA(StackTraceElement[].class))).andReturn(null); this.query.populateTrackingMap(null); - this.connectionRequestBean.requestBegin(queryId.toString()); - expect(this.connectionFactory.getClient("connPool1", Priority.NORMAL, null)).andReturn(this.client); + this.connectionRequestBean.requestBegin(queryId.toString(), userDN.toLowerCase(), null); + expect(this.connectionFactory.getClient(userDN.toLowerCase(), new ArrayList<>(0), "connPool1", Priority.NORMAL, null)).andReturn(this.client); this.connectionRequestBean.requestEnd(queryId.toString()); expect(this.traceInfos.get(userSid)).andReturn(new ArrayList<>(0)); expect(this.traceInfos.get(null)).andReturn(Arrays.asList(PatternWrapper.wrap("NONMATCHING_REGEX"))); @@ -946,6 +967,8 @@ public void testCreateQueryAndNext_BadID() throws Exception { expect(this.query.getExpirationDate()).andReturn(null).anyTimes(); expect(this.query.getParameters()).andReturn((Set) Collections.emptySet()).anyTimes(); expect(this.query.getUncaughtExceptionHandler()).andReturn(new QueryUncaughtExceptionHandler()).anyTimes(); + expect(this.query.findParameter(RemoteUserOperationsImpl.INCLUDE_REMOTE_SERVICES)) + .andReturn(new QueryImpl.Parameter(RemoteUserOperationsImpl.INCLUDE_REMOTE_SERVICES, "true")).anyTimes(); this.metrics.updateMetric(isA(QueryMetric.class)); PowerMock.expectLastCall().times(2); expect(this.query.getUserDN()).andReturn(userDN).anyTimes(); @@ -960,6 +983,8 @@ public void testCreateQueryAndNext_BadID() throws Exception { cache.put(eq(queryId.toString()), isA(RunningQuery.class)); expect(this.genericConfiguration.getQueryString()).andReturn(queryName).once(); expect(this.qlCache.poll(queryId.toString())).andReturn(null); + expect(this.responseObjectFactory.getQueryImpl()).andReturn(new QueryImpl()); + expect(queryLogic1.getResultLimit(anyObject(QueryImpl.class))).andReturn(-1L); // Set expectations of the next logic expect(this.principal.getName()).andReturn(userName); @@ -987,12 +1012,12 @@ public void testCreateQueryAndNext_BadID() throws Exception { setInternalState(subject, ClosedQueryCache.class, closedCache); setInternalState(subject, Persister.class, persister); setInternalState(subject, QueryLogicFactoryImpl.class, queryLogicFactory); - setInternalState(subject, QueryExpirationConfiguration.class, queryExpirationConf); + setInternalState(subject, QueryExpirationProperties.class, queryExpirationConf); setInternalState(subject, AuditBean.class, auditor); setInternalState(subject, QueryMetricsBean.class, metrics); setInternalState(subject, Multimap.class, traceInfos); setInternalState(subject, SecurityMarking.class, new ColumnVisibilitySecurityMarking()); - setInternalState(subject, QueryParameters.class, new QueryParametersImpl()); + setInternalState(subject, QueryParameters.class, new DefaultQueryParameters()); setInternalState(subject, QueryMetricFactory.class, new QueryMetricFactoryImpl()); setInternalState(connectionRequestBean, EJBContext.class, context); setInternalState(subject, AccumuloConnectionRequestBean.class, connectionRequestBean); @@ -1042,9 +1067,9 @@ public void testCreateQueryAndNext_PageSizeParam() throws Exception { queryParameters.putSingle(QueryParameters.QUERY_STRING, query); queryParameters.putSingle(QueryParameters.QUERY_NAME, queryName); queryParameters.putSingle(QueryParameters.QUERY_LOGIC_NAME, queryLogicName); - queryParameters.putSingle(QueryParameters.QUERY_BEGIN, QueryParametersImpl.formatDate(beginDate)); - queryParameters.putSingle(QueryParameters.QUERY_END, QueryParametersImpl.formatDate(endDate)); - queryParameters.putSingle(QueryParameters.QUERY_EXPIRATION, QueryParametersImpl.formatDate(expirationDate)); + queryParameters.putSingle(QueryParameters.QUERY_BEGIN, DefaultQueryParameters.formatDate(beginDate)); + queryParameters.putSingle(QueryParameters.QUERY_END, DefaultQueryParameters.formatDate(endDate)); + queryParameters.putSingle(QueryParameters.QUERY_EXPIRATION, DefaultQueryParameters.formatDate(expirationDate)); queryParameters.putSingle(QueryParameters.QUERY_AUTHORIZATIONS, queryAuthorizations); queryParameters.putSingle(QueryParameters.QUERY_PAGESIZE, String.valueOf(pagesize)); queryParameters.putSingle(QueryParameters.QUERY_PAGETIMEOUT, String.valueOf(pageTimeout)); @@ -1057,7 +1082,7 @@ public void testCreateQueryAndNext_PageSizeParam() throws Exception { ColumnVisibilitySecurityMarking marking = new ColumnVisibilitySecurityMarking(); marking.validate(queryParameters); - QueryParameters qp = new QueryParametersImpl(); + QueryParameters qp = new DefaultQueryParameters(); qp.validate(queryParameters); MultivaluedMap op = new MultivaluedMapImpl<>(); @@ -1085,8 +1110,8 @@ public void testCreateQueryAndNext_PageSizeParam() throws Exception { expect(this.queryLogic1.getConnPoolName()).andReturn("connPool1"); expect(this.queryLogic1.isLongRunningQuery()).andReturn(false); expect(this.connectionFactory.getTrackingMap(isA(StackTraceElement[].class))).andReturn(null); - this.connectionRequestBean.requestBegin(queryId.toString()); - expect(this.connectionFactory.getClient("connPool1", Priority.NORMAL, null)).andReturn(this.client); + this.connectionRequestBean.requestBegin(queryId.toString(), userDN.toLowerCase(), null); + expect(this.connectionFactory.getClient("connPool1", new ArrayList<>(), Priority.NORMAL, null)).andReturn(this.client); this.connectionRequestBean.requestEnd(queryId.toString()); expect(this.traceInfos.get(userSid)).andReturn(new ArrayList<>(0)); expect(this.traceInfos.get(null)).andReturn(Arrays.asList(PatternWrapper.wrap("NONMATCHING_REGEX"))); @@ -1146,12 +1171,12 @@ public void testCreateQueryAndNext_PageSizeParam() throws Exception { setInternalState(subject, ClosedQueryCache.class, closedCache); setInternalState(subject, Persister.class, persister); setInternalState(subject, QueryLogicFactoryImpl.class, queryLogicFactory); - setInternalState(subject, QueryExpirationConfiguration.class, queryExpirationConf); + setInternalState(subject, QueryExpirationProperties.class, queryExpirationConf); setInternalState(subject, AuditBean.class, auditor); setInternalState(subject, QueryMetricsBean.class, metrics); setInternalState(subject, Multimap.class, traceInfos); setInternalState(subject, SecurityMarking.class, new ColumnVisibilitySecurityMarking()); - setInternalState(subject, QueryParameters.class, new QueryParametersImpl()); + setInternalState(subject, QueryParameters.class, new DefaultQueryParameters()); setInternalState(subject, QueryMetricFactory.class, new QueryMetricFactoryImpl()); setInternalState(connectionRequestBean, EJBContext.class, context); setInternalState(subject, AccumuloConnectionRequestBean.class, connectionRequestBean); @@ -1201,9 +1226,9 @@ public void testCreateQueryAndNext_PageSizeParamTwo() throws Exception { queryParameters.putSingle(QueryParameters.QUERY_STRING, query); queryParameters.putSingle(QueryParameters.QUERY_NAME, queryName); queryParameters.putSingle(QueryParameters.QUERY_LOGIC_NAME, queryLogicName); - queryParameters.putSingle(QueryParameters.QUERY_BEGIN, QueryParametersImpl.formatDate(beginDate)); - queryParameters.putSingle(QueryParameters.QUERY_END, QueryParametersImpl.formatDate(endDate)); - queryParameters.putSingle(QueryParameters.QUERY_EXPIRATION, QueryParametersImpl.formatDate(expirationDate)); + queryParameters.putSingle(QueryParameters.QUERY_BEGIN, DefaultQueryParameters.formatDate(beginDate)); + queryParameters.putSingle(QueryParameters.QUERY_END, DefaultQueryParameters.formatDate(endDate)); + queryParameters.putSingle(QueryParameters.QUERY_EXPIRATION, DefaultQueryParameters.formatDate(expirationDate)); queryParameters.putSingle(QueryParameters.QUERY_AUTHORIZATIONS, queryAuthorizations); queryParameters.putSingle(QueryParameters.QUERY_PAGESIZE, String.valueOf(pagesize)); // If the wrong page size parameter is added here, it should be dropped automatically by the QueryImpl @@ -1217,7 +1242,7 @@ public void testCreateQueryAndNext_PageSizeParamTwo() throws Exception { ColumnVisibilitySecurityMarking marking = new ColumnVisibilitySecurityMarking(); marking.validate(queryParameters); - QueryParameters qp = new QueryParametersImpl(); + QueryParameters qp = new DefaultQueryParameters(); qp.validate(queryParameters); MultivaluedMap op = new MultivaluedMapImpl<>(); @@ -1246,8 +1271,8 @@ public void testCreateQueryAndNext_PageSizeParamTwo() throws Exception { expect(this.queryLogic1.getConnPoolName()).andReturn("connPool1"); expect(this.queryLogic1.isLongRunningQuery()).andReturn(false); expect(this.connectionFactory.getTrackingMap(isA(StackTraceElement[].class))).andReturn(null); - this.connectionRequestBean.requestBegin(queryId.toString()); - expect(this.connectionFactory.getClient("connPool1", Priority.NORMAL, null)).andReturn(this.client); + this.connectionRequestBean.requestBegin(queryId.toString(), userDN.toLowerCase(), null); + expect(this.connectionFactory.getClient("connPool1", new ArrayList<>(), Priority.NORMAL, null)).andReturn(this.client); this.connectionRequestBean.requestEnd(queryId.toString()); expect(this.traceInfos.get(userSid)).andReturn(new ArrayList<>(0)); expect(this.traceInfos.get(null)).andReturn(Arrays.asList(PatternWrapper.wrap("NONMATCHING_REGEX"))); @@ -1328,12 +1353,12 @@ public void testCreateQueryAndNext_PageSizeParamTwo() throws Exception { setInternalState(subject, ClosedQueryCache.class, closedCache); setInternalState(subject, Persister.class, persister); setInternalState(subject, QueryLogicFactoryImpl.class, queryLogicFactory); - setInternalState(subject, QueryExpirationConfiguration.class, queryExpirationConf); + setInternalState(subject, QueryExpirationProperties.class, queryExpirationConf); setInternalState(subject, AuditBean.class, auditor); setInternalState(subject, QueryMetricsBean.class, metrics); setInternalState(subject, Multimap.class, traceInfos); setInternalState(subject, SecurityMarking.class, new ColumnVisibilitySecurityMarking()); - setInternalState(subject, QueryParameters.class, new QueryParametersImpl()); + setInternalState(subject, QueryParameters.class, new DefaultQueryParameters()); setInternalState(subject, QueryMetricFactory.class, new QueryMetricFactoryImpl()); setInternalState(connectionRequestBean, EJBContext.class, context); setInternalState(subject, AccumuloConnectionRequestBean.class, connectionRequestBean); @@ -1385,9 +1410,9 @@ public void testCreateQueryAndNext_DoubleAuditValues() throws Exception { queryParameters.putSingle(QueryParameters.QUERY_STRING, query); queryParameters.putSingle(QueryParameters.QUERY_NAME, queryName); queryParameters.putSingle(QueryParameters.QUERY_LOGIC_NAME, queryLogicName); - queryParameters.putSingle(QueryParameters.QUERY_BEGIN, QueryParametersImpl.formatDate(beginDate)); - queryParameters.putSingle(QueryParameters.QUERY_END, QueryParametersImpl.formatDate(endDate)); - queryParameters.putSingle(QueryParameters.QUERY_EXPIRATION, QueryParametersImpl.formatDate(expirationDate)); + queryParameters.putSingle(QueryParameters.QUERY_BEGIN, DefaultQueryParameters.formatDate(beginDate)); + queryParameters.putSingle(QueryParameters.QUERY_END, DefaultQueryParameters.formatDate(endDate)); + queryParameters.putSingle(QueryParameters.QUERY_EXPIRATION, DefaultQueryParameters.formatDate(expirationDate)); queryParameters.putSingle(QueryParameters.QUERY_AUTHORIZATIONS, queryAuthorizations); queryParameters.putSingle(QueryParameters.QUERY_PAGESIZE, String.valueOf(pagesize)); queryParameters.putSingle(QueryParameters.QUERY_PAGETIMEOUT, String.valueOf(pageTimeout)); @@ -1402,11 +1427,10 @@ public void testCreateQueryAndNext_DoubleAuditValues() throws Exception { ColumnVisibilitySecurityMarking marking = new ColumnVisibilitySecurityMarking(); marking.validate(queryParameters); - QueryParameters qp = new QueryParametersImpl(); + QueryParameters qp = new DefaultQueryParameters(); qp.validate(queryParameters); - MultivaluedMap op = new MultivaluedMapImpl<>(); - op.putAll(qp.getUnknownParameters(queryParameters)); + MultivaluedMap op = MapUtils.toMultivaluedMap(qp.getUnknownParameters(MapUtils.toMultiValueMap(queryParameters))); // op.putSingle(PrivateAuditConstants.AUDIT_TYPE, AuditType.NONE.name()); op.putSingle(PrivateAuditConstants.LOGIC_CLASS, queryLogicName); op.putSingle(PrivateAuditConstants.COLUMN_VISIBILITY, queryVisibility); @@ -1435,7 +1459,7 @@ public void testCreateQueryAndNext_DoubleAuditValues() throws Exception { expect(this.queryLogic1.getConnPoolName()).andReturn("connPool1"); expect(this.connectionFactory.getTrackingMap(isA(StackTraceElement[].class))).andReturn(null); this.query.populateTrackingMap(null); - expect(this.connectionFactory.getClient("connPool1", Priority.NORMAL, null)).andReturn(this.client); + expect(this.connectionFactory.getClient(userDN.toLowerCase(), new ArrayList<>(0), "connPool1", Priority.NORMAL, null)).andReturn(this.client); expect(this.traceInfos.get(userSid)).andReturn(new ArrayList<>(0)); expect(this.traceInfos.get(null)).andReturn(Arrays.asList(PatternWrapper.wrap("NONMATCHING_REGEX"))); expect(this.qlCache.add(queryId.toString(), userSid, this.queryLogic1, this.client)).andReturn(true); @@ -1454,6 +1478,8 @@ public void testCreateQueryAndNext_DoubleAuditValues() throws Exception { expect(this.query.getExpirationDate()).andReturn(null).anyTimes(); expect(this.query.getParameters()).andReturn((Set) Collections.emptySet()).anyTimes(); expect(this.query.getUncaughtExceptionHandler()).andReturn(new QueryUncaughtExceptionHandler()).anyTimes(); + expect(this.query.findParameter(RemoteUserOperationsImpl.INCLUDE_REMOTE_SERVICES)) + .andReturn(new QueryImpl.Parameter(RemoteUserOperationsImpl.INCLUDE_REMOTE_SERVICES, "true")).anyTimes(); this.metrics.updateMetric(isA(QueryMetric.class)); PowerMock.expectLastCall().times(2); expect(this.query.getUserDN()).andReturn(userDN).anyTimes(); @@ -1479,7 +1505,7 @@ public void testCreateQueryAndNext_DoubleAuditValues() throws Exception { expect(this.cache.get(queryId.toString())).andReturn(this.runningQuery); expect(cache.lock(queryId.toString())).andReturn(true); expect(this.runningQuery.getSettings()).andReturn(this.query); - this.connectionRequestBean.requestBegin(queryId.toString()); + this.connectionRequestBean.requestBegin(queryId.toString(), userDN.toLowerCase(), null); expect(this.runningQuery.getClient()).andReturn(this.client); this.connectionRequestBean.requestEnd(queryId.toString()); @@ -1509,6 +1535,9 @@ public void testCreateQueryAndNext_DoubleAuditValues() throws Exception { expect(this.transaction.getStatus()).andReturn(Status.STATUS_ACTIVE).anyTimes(); this.transaction.commit(); + expect(this.responseObjectFactory.getQueryImpl()).andReturn(new QueryImpl()); + expect(queryLogic1.getResultLimit(anyObject(QueryImpl.class))).andReturn(-1L); + // Run the test PowerMock.replayAll(); QueryExecutorBean subject = new QueryExecutorBean(); @@ -1521,12 +1550,12 @@ public void testCreateQueryAndNext_DoubleAuditValues() throws Exception { setInternalState(subject, ClosedQueryCache.class, closedCache); setInternalState(subject, Persister.class, persister); setInternalState(subject, QueryLogicFactoryImpl.class, queryLogicFactory); - setInternalState(subject, QueryExpirationConfiguration.class, queryExpirationConf); + setInternalState(subject, QueryExpirationProperties.class, queryExpirationConf); setInternalState(subject, AuditBean.class, auditor); setInternalState(subject, QueryMetricsBean.class, metrics); setInternalState(subject, Multimap.class, traceInfos); setInternalState(subject, SecurityMarking.class, new ColumnVisibilitySecurityMarking()); - setInternalState(subject, QueryParameters.class, new QueryParametersImpl()); + setInternalState(subject, QueryParameters.class, new DefaultQueryParameters()); setInternalState(subject, QueryMetricFactory.class, new QueryMetricFactoryImpl()); setInternalState(connectionRequestBean, EJBContext.class, context); setInternalState(subject, AccumuloConnectionRequestBean.class, connectionRequestBean); @@ -1561,29 +1590,28 @@ public void testCreateQueryAndNext_AddToCacheException() throws Exception { List dnList = Collections.singletonList(userDN); UUID queryId = UUID.randomUUID(); - MultivaluedMap queryParameters = new MultivaluedMapImpl<>(); - queryParameters.putSingle(QueryParameters.QUERY_STRING, query); - queryParameters.putSingle(QueryParameters.QUERY_NAME, queryName); - queryParameters.putSingle(QueryParameters.QUERY_LOGIC_NAME, queryLogicName); - queryParameters.putSingle(QueryParameters.QUERY_BEGIN, QueryParametersImpl.formatDate(beginDate)); - queryParameters.putSingle(QueryParameters.QUERY_END, QueryParametersImpl.formatDate(endDate)); - queryParameters.putSingle(QueryParameters.QUERY_EXPIRATION, QueryParametersImpl.formatDate(expirationDate)); - queryParameters.putSingle(QueryParameters.QUERY_AUTHORIZATIONS, queryAuthorizations); - queryParameters.putSingle(QueryParameters.QUERY_PAGESIZE, String.valueOf(pagesize)); - queryParameters.putSingle(QueryParameters.QUERY_PAGETIMEOUT, String.valueOf(pageTimeout)); - queryParameters.putSingle(QueryParameters.QUERY_PERSISTENCE, persistenceMode.name()); - queryParameters.putSingle(QueryParameters.QUERY_TRACE, String.valueOf(trace)); - queryParameters.putSingle("valid", "param"); - queryParameters.putSingle(ColumnVisibilitySecurityMarking.VISIBILITY_MARKING, queryVisibility); + MultiValueMap queryParameters = new LinkedMultiValueMap<>(); + queryParameters.set(QueryParameters.QUERY_STRING, query); + queryParameters.set(QueryParameters.QUERY_NAME, queryName); + queryParameters.set(QueryParameters.QUERY_LOGIC_NAME, queryLogicName); + queryParameters.set(QueryParameters.QUERY_BEGIN, DefaultQueryParameters.formatDate(beginDate)); + queryParameters.set(QueryParameters.QUERY_END, DefaultQueryParameters.formatDate(endDate)); + queryParameters.set(QueryParameters.QUERY_EXPIRATION, DefaultQueryParameters.formatDate(expirationDate)); + queryParameters.set(QueryParameters.QUERY_AUTHORIZATIONS, queryAuthorizations); + queryParameters.set(QueryParameters.QUERY_PAGESIZE, String.valueOf(pagesize)); + queryParameters.set(QueryParameters.QUERY_PAGETIMEOUT, String.valueOf(pageTimeout)); + queryParameters.set(QueryParameters.QUERY_PERSISTENCE, persistenceMode.name()); + queryParameters.set(QueryParameters.QUERY_TRACE, String.valueOf(trace)); + queryParameters.set("valid", "param"); + queryParameters.set(ColumnVisibilitySecurityMarking.VISIBILITY_MARKING, queryVisibility); ColumnVisibilitySecurityMarking marking = new ColumnVisibilitySecurityMarking(); marking.validate(queryParameters); - QueryParameters qp = new QueryParametersImpl(); + QueryParameters qp = new DefaultQueryParameters(); qp.validate(queryParameters); - MultivaluedMap op = new MultivaluedMapImpl<>(); - op.putAll(qp.getUnknownParameters(queryParameters)); + MultivaluedMap op = MapUtils.toMultivaluedMap(qp.getUnknownParameters(queryParameters)); // op.putSingle(PrivateAuditConstants.AUDIT_TYPE, AuditType.ACTIVE.name()); op.putSingle(PrivateAuditConstants.LOGIC_CLASS, queryLogicName); op.putSingle(PrivateAuditConstants.COLUMN_VISIBILITY, queryVisibility); @@ -1606,13 +1634,13 @@ public void testCreateQueryAndNext_AddToCacheException() throws Exception { expect(persister.create(eq(userDNpair.subjectDN()), eq(dnList), eq(marking), eq(queryLogicName), eq(qp), eq(op))).andReturn(this.query); expect(this.queryLogic1.getAuditType(this.query)).andReturn(AuditType.ACTIVE); expect(this.queryLogic1.getSelectors(this.query)).andReturn(null); - expect(auditor.audit(eq(queryParameters))).andReturn(null); + expect(auditor.audit(anyObject())).andReturn(null); expect(this.queryLogic1.getConnectionPriority()).andReturn(Priority.NORMAL); expect(this.queryLogic1.getConnPoolName()).andReturn("connPool1"); expect(this.connectionFactory.getTrackingMap(isA(StackTraceElement[].class))).andReturn(null); this.query.populateTrackingMap(null); - this.connectionRequestBean.requestBegin(queryId.toString()); - expect(this.connectionFactory.getClient("connPool1", Priority.NORMAL, null)).andReturn(this.client); + this.connectionRequestBean.requestBegin(queryId.toString(), userDN.toLowerCase(), null); + expect(this.connectionFactory.getClient(userDN.toLowerCase(), new ArrayList<>(0), "connPool1", Priority.NORMAL, null)).andReturn(this.client); this.connectionRequestBean.requestEnd(queryId.toString()); expect(this.traceInfos.get(userSid)).andReturn(Arrays.asList(PatternWrapper.wrap(query))); expect(this.qlCache.add(queryId.toString(), userSid, this.queryLogic1, this.client)) @@ -1625,17 +1653,20 @@ public void testCreateQueryAndNext_AddToCacheException() throws Exception { expect(this.query.getId()).andReturn(queryId).anyTimes(); expect(this.qlCache.poll(queryId.toString())).andReturn(null); + expect(this.responseObjectFactory.getQueryImpl()).andReturn(new QueryImpl()); + expect(queryLogic1.getResultLimit(anyObject(QueryImpl.class))).andReturn(-1L); + // Run the test PowerMock.replayAll(); QueryExecutorBean subject = new QueryExecutorBean(); setInternalState(subject, EJBContext.class, context); - setInternalState(subject, QueryParameters.class, new QueryParametersImpl()); + setInternalState(subject, QueryParameters.class, new DefaultQueryParameters()); setInternalState(subject, AccumuloConnectionFactory.class, connectionFactory); setInternalState(subject, ResponseObjectFactory.class, responseObjectFactory); setInternalState(subject, CreatedQueryLogicCacheBean.class, qlCache); setInternalState(subject, Persister.class, persister); setInternalState(subject, QueryLogicFactory.class, queryLogicFactory); - setInternalState(subject, QueryExpirationConfiguration.class, queryExpirationConf); + setInternalState(subject, QueryExpirationProperties.class, queryExpirationConf); setInternalState(subject, AuditBean.class, auditor); setInternalState(subject, Multimap.class, traceInfos); setInternalState(subject, SecurityMarking.class, marking); @@ -1644,7 +1675,7 @@ public void testCreateQueryAndNext_AddToCacheException() throws Exception { setInternalState(subject, AccumuloConnectionRequestBean.class, connectionRequestBean); Throwable result1 = null; try { - subject.createQueryAndNext(queryLogicName, queryParameters); + subject.createQueryAndNext(queryLogicName, MapUtils.toMultivaluedMap(queryParameters)); } catch (DatawaveWebApplicationException e) { result1 = e.getCause(); } @@ -1674,7 +1705,7 @@ public void testCreateQueryAndNext_ButNoResults() throws Exception { boolean trace = false; String userName = "userName"; String userSid = "userSid"; - String userDN = "userdn"; + String userDN = "userDN"; SubjectIssuerDNPair userDNpair = SubjectIssuerDNPair.of(userDN); List dnList = Collections.singletonList(userDN); UUID queryId = UUID.randomUUID(); @@ -1687,9 +1718,9 @@ public void testCreateQueryAndNext_ButNoResults() throws Exception { queryParameters.putSingle(QueryParameters.QUERY_STRING, query); queryParameters.putSingle(QueryParameters.QUERY_NAME, queryName); queryParameters.putSingle(QueryParameters.QUERY_LOGIC_NAME, queryLogicName); - queryParameters.putSingle(QueryParameters.QUERY_BEGIN, QueryParametersImpl.formatDate(beginDate)); - queryParameters.putSingle(QueryParameters.QUERY_END, QueryParametersImpl.formatDate(endDate)); - queryParameters.putSingle(QueryParameters.QUERY_EXPIRATION, QueryParametersImpl.formatDate(expirationDate)); + queryParameters.putSingle(QueryParameters.QUERY_BEGIN, DefaultQueryParameters.formatDate(beginDate)); + queryParameters.putSingle(QueryParameters.QUERY_END, DefaultQueryParameters.formatDate(endDate)); + queryParameters.putSingle(QueryParameters.QUERY_EXPIRATION, DefaultQueryParameters.formatDate(expirationDate)); queryParameters.putSingle(QueryParameters.QUERY_AUTHORIZATIONS, queryAuthorizations); queryParameters.putSingle(QueryParameters.QUERY_PAGESIZE, String.valueOf(pagesize)); queryParameters.putSingle(QueryParameters.QUERY_PAGETIMEOUT, String.valueOf(pageTimeout)); @@ -1701,11 +1732,10 @@ public void testCreateQueryAndNext_ButNoResults() throws Exception { ColumnVisibilitySecurityMarking marking = new ColumnVisibilitySecurityMarking(); marking.validate(queryParameters); - QueryParameters qp = new QueryParametersImpl(); + QueryParameters qp = new DefaultQueryParameters(); qp.validate(queryParameters); - MultivaluedMap op = new MultivaluedMapImpl<>(); - op.putAll(qp.getUnknownParameters(queryParameters)); + MultivaluedMap op = MapUtils.toMultivaluedMap(qp.getUnknownParameters(MapUtils.toMultiValueMap(queryParameters))); op.putSingle(PrivateAuditConstants.LOGIC_CLASS, queryLogicName); op.putSingle(PrivateAuditConstants.COLUMN_VISIBILITY, queryVisibility); op.putSingle(PrivateAuditConstants.USER_DN, userDNpair.subjectDN()); @@ -1751,6 +1781,8 @@ public void testCreateQueryAndNext_ButNoResults() throws Exception { expect(this.query.getExpirationDate()).andReturn(null).anyTimes(); expect(this.query.getParameters()).andReturn((Set) Collections.emptySet()).anyTimes(); expect(this.query.getUncaughtExceptionHandler()).andReturn(new QueryUncaughtExceptionHandler()).anyTimes(); + expect(this.query.findParameter(RemoteUserOperationsImpl.INCLUDE_REMOTE_SERVICES)) + .andReturn(new QueryImpl.Parameter(RemoteUserOperationsImpl.INCLUDE_REMOTE_SERVICES, "true")).anyTimes(); this.metrics.updateMetric(isA(QueryMetric.class)); PowerMock.expectLastCall().times(2); expect(this.query.getUserDN()).andReturn(userDN).anyTimes(); @@ -1773,7 +1805,7 @@ public void testCreateQueryAndNext_ButNoResults() throws Exception { expect(this.cache.get(queryId.toString())).andReturn(this.runningQuery); expect(cache.lock(queryId.toString())).andReturn(true); expect(this.runningQuery.getSettings()).andReturn(this.query).anyTimes(); - this.connectionRequestBean.requestBegin(queryId.toString()); + this.connectionRequestBean.requestBegin(queryId.toString(), userDN.toLowerCase(), null); expect(this.runningQuery.getClient()).andReturn(this.client); this.connectionRequestBean.requestEnd(queryId.toString()); this.runningQuery.setActiveCall(true); @@ -1809,13 +1841,15 @@ public void testCreateQueryAndNext_ButNoResults() throws Exception { // Set expectations expect(this.context.getCallerPrincipal()).andReturn(this.principal).anyTimes(); - expect(this.principal.getName()).andReturn(userName); - expect(this.principal.getShortName()).andReturn(userSid).times(2); - expect(this.principal.getAuthorizations()).andReturn((Collection) Arrays.asList(Arrays.asList(queryAuthorizations))); - expect(this.connectionRequestBean.cancelConnectionRequest(queryId.toString(), this.principal)).andReturn(false); + expect(this.principal.getName()).andReturn(userName).anyTimes(); + expect(this.principal.getShortName()).andReturn(userSid).anyTimes(); + expect(this.principal.getUserDN()).andReturn(SubjectIssuerDNPair.of(userDN)); + expect(this.principal.getDNs()).andReturn(new String[] {userDN}); + expect(this.principal.getAuthorizations()).andReturn((Collection) Arrays.asList(Arrays.asList(queryAuthorizations))).anyTimes(); + expect(this.connectionRequestBean.cancelConnectionRequest(queryId.toString(), userDN.toLowerCase())).andReturn(false); expect(this.qlCache.pollIfOwnedBy(queryId.toString(), userSid)).andReturn(null); expect(this.cache.get(queryId.toString())).andReturn(this.runningQuery); - expect(this.connectionFactory.getClient("connPool1", Priority.NORMAL, null)).andReturn(this.client); + expect(this.connectionFactory.getClient(userDN.toLowerCase(), new ArrayList<>(0), "connPool1", Priority.NORMAL, null)).andReturn(this.client); this.runningQuery.closeConnection(this.connectionFactory); this.cache.remove(queryId.toString()); this.closedCache.add(queryId.toString()); @@ -1823,6 +1857,9 @@ public void testCreateQueryAndNext_ButNoResults() throws Exception { // expect(this.runningQuery.getTraceInfo()).andReturn(null); expect(this.responseObjectFactory.getEventQueryResponse()).andReturn(new DefaultEventQueryResponse()); + expect(this.responseObjectFactory.getQueryImpl()).andReturn(new QueryImpl()); + expect(queryLogic1.getResultLimit(anyObject(QueryImpl.class))).andReturn(-1L); + // Run the test PowerMock.replayAll(); try { @@ -1836,12 +1873,12 @@ public void testCreateQueryAndNext_ButNoResults() throws Exception { setInternalState(subject, ClosedQueryCache.class, closedCache); setInternalState(subject, Persister.class, persister); setInternalState(subject, QueryLogicFactory.class, queryLogicFactory); - setInternalState(subject, QueryExpirationConfiguration.class, queryExpirationConf); + setInternalState(subject, QueryExpirationProperties.class, queryExpirationConf); setInternalState(subject, AuditBean.class, auditor); setInternalState(subject, QueryMetricsBean.class, metrics); setInternalState(subject, Multimap.class, traceInfos); setInternalState(subject, SecurityMarking.class, new ColumnVisibilitySecurityMarking()); - setInternalState(subject, QueryParameters.class, new QueryParametersImpl()); + setInternalState(subject, QueryParameters.class, new DefaultQueryParameters()); setInternalState(subject, QueryMetricFactory.class, new QueryMetricFactoryImpl()); setInternalState(connectionRequestBean, EJBContext.class, context); setInternalState(subject, AccumuloConnectionRequestBean.class, connectionRequestBean); @@ -1872,14 +1909,14 @@ public void testCreateQueryAndNext_InvalidExpirationDate() throws Exception { String parameters = null; boolean trace = false; - MultivaluedMap p = new MultivaluedMapImpl<>(); - p.putAll(QueryParametersImpl.paramsToMap(queryLogicName, query, queryName, queryVisibility, beginDate, endDate, queryAuthorizations, expirationDate, - pagesize, pageTimeout, maxResultsOverride, persistenceMode, systemFrom, parameters, trace)); + MultivaluedMap p = MapUtils.toMultivaluedMap( + DefaultQueryParameters.paramsToMap(queryLogicName, query, queryName, queryVisibility, beginDate, endDate, queryAuthorizations, + expirationDate, pagesize, pageTimeout, maxResultsOverride, persistenceMode, systemFrom, parameters, trace)); // Run the test PowerMock.replayAll(); QueryExecutorBean subject = new QueryExecutorBean(); - setInternalState(subject, QueryParameters.class, new QueryParametersImpl()); + setInternalState(subject, QueryParameters.class, new DefaultQueryParameters()); setInternalState(subject, QueryMetricFactory.class, new QueryMetricFactoryImpl()); Throwable result1 = null; try { @@ -1916,14 +1953,14 @@ public void testCreateQueryAndNext_InvalidPageSize() throws Exception { String parameters = null; boolean trace = false; - MultivaluedMap p = new MultivaluedMapImpl<>(); - p.putAll(QueryParametersImpl.paramsToMap(queryLogicName, query, queryName, queryVisibility, beginDate, endDate, queryAuthorizations, expirationDate, - pagesize, pageTimeout, maxResultsOverride, persistenceMode, systemFrom, parameters, trace)); + MultivaluedMap p = MapUtils.toMultivaluedMap( + DefaultQueryParameters.paramsToMap(queryLogicName, query, queryName, queryVisibility, beginDate, endDate, queryAuthorizations, + expirationDate, pagesize, pageTimeout, maxResultsOverride, persistenceMode, systemFrom, parameters, trace)); // Run the test PowerMock.replayAll(); QueryExecutorBean subject = new QueryExecutorBean(); - setInternalState(subject, QueryParameters.class, new QueryParametersImpl()); + setInternalState(subject, QueryParameters.class, new DefaultQueryParameters()); setInternalState(subject, QueryMetricFactory.class, new QueryMetricFactoryImpl()); Throwable result1 = null; try { @@ -1961,18 +1998,17 @@ public void testCreateQueryAndNext_PageSizeExceedsConfiguredMax() throws Excepti boolean trace = false; // Set expectations - MultivaluedMap queryParameters = new MultivaluedMapImpl<>(); - queryParameters.putAll(QueryParametersImpl.paramsToMap(queryLogicName, query, queryName, queryVisibility, beginDate, endDate, queryAuthorizations, - expirationDate, pagesize, pageTimeout, maxResultsOverride, persistenceMode, systemFrom, parameters, trace)); + MultivaluedMap queryParameters = MapUtils.toMultivaluedMap( + DefaultQueryParameters.paramsToMap(queryLogicName, query, queryName, queryVisibility, beginDate, endDate, queryAuthorizations, + expirationDate, pagesize, pageTimeout, maxResultsOverride, persistenceMode, systemFrom, parameters, trace)); ColumnVisibilitySecurityMarking marking = new ColumnVisibilitySecurityMarking(); marking.validate(queryParameters); - QueryParameters qp = new QueryParametersImpl(); + QueryParameters qp = new DefaultQueryParameters(); qp.validate(queryParameters); - MultivaluedMap op = new MultivaluedMapImpl<>(); - op.putAll(qp.getUnknownParameters(queryParameters)); + MultivaluedMap op = MapUtils.toMultivaluedMap(qp.getUnknownParameters(MapUtils.toMultiValueMap(queryParameters))); op.putSingle(PrivateAuditConstants.LOGIC_CLASS, queryLogicName); op.putSingle(PrivateAuditConstants.COLUMN_VISIBILITY, queryVisibility); expect(this.queryLogicFactory.getQueryLogic(queryLogicName, this.principal)).andReturn((QueryLogic) this.queryLogic1); @@ -1993,9 +2029,9 @@ public void testCreateQueryAndNext_PageSizeExceedsConfiguredMax() throws Excepti QueryExecutorBean subject = new QueryExecutorBean(); setInternalState(subject, EJBContext.class, context); setInternalState(subject, QueryLogicFactory.class, queryLogicFactory); - setInternalState(subject, QueryExpirationConfiguration.class, queryExpirationConf); + setInternalState(subject, QueryExpirationProperties.class, queryExpirationConf); setInternalState(subject, SecurityMarking.class, new ColumnVisibilitySecurityMarking()); - setInternalState(subject, QueryParameters.class, new QueryParametersImpl()); + setInternalState(subject, QueryParameters.class, new DefaultQueryParameters()); setInternalState(subject, QueryMetricFactory.class, new QueryMetricFactoryImpl()); Throwable result1 = null; try { @@ -2040,14 +2076,14 @@ public void testCreateQueryAndNext_UndefinedQueryLogic() throws Exception { QueryExecutorBean subject = new QueryExecutorBean(); setInternalState(subject, QueryLogicFactory.class, queryLogicFactory); setInternalState(subject, EJBContext.class, context); - setInternalState(subject, QueryExpirationConfiguration.class, queryExpirationConf); - setInternalState(subject, QueryParameters.class, new QueryParametersImpl()); + setInternalState(subject, QueryExpirationProperties.class, queryExpirationConf); + setInternalState(subject, QueryParameters.class, new DefaultQueryParameters()); setInternalState(subject, QueryMetricFactory.class, new QueryMetricFactoryImpl()); Throwable result1 = null; try { - MultivaluedMap queryParameters = new MultivaluedMapImpl<>(); - queryParameters.putAll(QueryParametersImpl.paramsToMap(queryLogicName, query, queryName, queryVisibility, beginDate, endDate, queryAuthorizations, - expirationDate, pagesize, pageTimeout, maxResultsOverride, persistenceMode, systemFrom, parameters, trace)); + MultivaluedMap queryParameters = MapUtils.toMultivaluedMap( + DefaultQueryParameters.paramsToMap(queryLogicName, query, queryName, queryVisibility, beginDate, endDate, queryAuthorizations, + expirationDate, pagesize, pageTimeout, maxResultsOverride, persistenceMode, systemFrom, parameters, trace)); subject.createQueryAndNext(queryLogicName, queryParameters); @@ -2085,13 +2121,13 @@ public void testDefineQuery_InvalidExpirationDate() throws Exception { // Run the test PowerMock.replayAll(); QueryExecutorBean subject = new QueryExecutorBean(); - setInternalState(subject, QueryParameters.class, new QueryParametersImpl()); + setInternalState(subject, QueryParameters.class, new DefaultQueryParameters()); setInternalState(subject, QueryMetricFactory.class, new QueryMetricFactoryImpl()); Throwable result1 = null; try { - MultivaluedMap queryParameters = new MultivaluedMapImpl<>(); - queryParameters.putAll(QueryParametersImpl.paramsToMap(queryLogicName, query, queryName, queryVisibility, beginDate, endDate, queryAuthorizations, - expirationDate, pagesize, pageTimeout, maxResultsOverride, persistenceMode, systemFrom, parameters, trace)); + MultivaluedMap queryParameters = MapUtils.toMultivaluedMap( + DefaultQueryParameters.paramsToMap(queryLogicName, query, queryName, queryVisibility, beginDate, endDate, queryAuthorizations, + expirationDate, pagesize, pageTimeout, maxResultsOverride, persistenceMode, systemFrom, parameters, trace)); subject.defineQuery(queryLogicName, queryParameters); @@ -2129,13 +2165,13 @@ public void testDefineQuery_InvalidPageSize() throws Exception { // Run the test PowerMock.replayAll(); QueryExecutorBean subject = new QueryExecutorBean(); - setInternalState(subject, QueryParameters.class, new QueryParametersImpl()); + setInternalState(subject, QueryParameters.class, new DefaultQueryParameters()); setInternalState(subject, QueryMetricFactory.class, new QueryMetricFactoryImpl()); Throwable result1 = null; try { - MultivaluedMap queryParameters = new MultivaluedMapImpl<>(); - queryParameters.putAll(QueryParametersImpl.paramsToMap(queryLogicName, query, queryName, queryVisibility, beginDate, endDate, queryAuthorizations, - expirationDate, pagesize, pageTimeout, maxResultsOverride, persistenceMode, systemFrom, parameters, trace)); + MultivaluedMap queryParameters = MapUtils.toMultivaluedMap( + DefaultQueryParameters.paramsToMap(queryLogicName, query, queryName, queryVisibility, beginDate, endDate, queryAuthorizations, + expirationDate, pagesize, pageTimeout, maxResultsOverride, persistenceMode, systemFrom, parameters, trace)); subject.defineQuery(queryLogicName, queryParameters); } catch (DatawaveWebApplicationException e) { @@ -2167,25 +2203,24 @@ public void testDefineQuery_UncheckedException() throws Exception { QueryPersistence persistenceMode = QueryPersistence.PERSISTENT; String userName = "userName"; String userSid = "userSid"; - String userDN = "userdn"; + String userDN = "userDN"; SubjectIssuerDNPair userDNpair = SubjectIssuerDNPair.of(userDN); boolean trace = false; List dnList = Collections.singletonList(userDN); UUID queryId = UUID.randomUUID(); - MultivaluedMap queryParameters = new MultivaluedMapImpl<>(); - queryParameters.putAll(QueryParametersImpl.paramsToMap(null, query, queryName, queryVisibility, beginDate, endDate, queryAuthorizations, expirationDate, - pagesize, pageTimeout, maxResultsOverride, persistenceMode, null, null, trace)); + MultivaluedMap queryParameters = MapUtils + .toMultivaluedMap(DefaultQueryParameters.paramsToMap(null, query, queryName, queryVisibility, beginDate, endDate, queryAuthorizations, + expirationDate, pagesize, pageTimeout, maxResultsOverride, persistenceMode, null, null, trace)); queryParameters.putSingle("valid", "param"); ColumnVisibilitySecurityMarking marking = new ColumnVisibilitySecurityMarking(); marking.validate(queryParameters); - QueryParameters qp = new QueryParametersImpl(); + QueryParameters qp = new DefaultQueryParameters(); qp.validate(queryParameters); - MultivaluedMap op = new MultivaluedMapImpl<>(); - op.putAll(qp.getUnknownParameters(queryParameters)); + MultivaluedMap op = MapUtils.toMultivaluedMap(qp.getUnknownParameters(MapUtils.toMultiValueMap(queryParameters))); op.putSingle(PrivateAuditConstants.LOGIC_CLASS, queryLogicName); op.putSingle(PrivateAuditConstants.COLUMN_VISIBILITY, queryVisibility); op.putSingle(PrivateAuditConstants.USER_DN, userDNpair.subjectDN()); @@ -2214,10 +2249,10 @@ public void testDefineQuery_UncheckedException() throws Exception { setInternalState(subject, EJBContext.class, context); setInternalState(subject, Persister.class, persister); setInternalState(subject, QueryLogicFactory.class, queryLogicFactory); - setInternalState(subject, QueryExpirationConfiguration.class, queryExpirationConf); + setInternalState(subject, QueryExpirationProperties.class, queryExpirationConf); setInternalState(subject, Multimap.class, traceInfos); setInternalState(subject, SecurityMarking.class, new ColumnVisibilitySecurityMarking()); - setInternalState(subject, QueryParameters.class, new QueryParametersImpl()); + setInternalState(subject, QueryParameters.class, new DefaultQueryParameters()); setInternalState(subject, QueryMetricFactory.class, new QueryMetricFactoryImpl()); subject.defineQuery(queryLogicName, queryParameters); } finally { @@ -2311,9 +2346,9 @@ public void testDuplicateQuery_HappyPath() throws Exception { MultivaluedMap queryParameters = new MultivaluedMapImpl<>(); queryParameters.putSingle(QueryParameters.QUERY_STRING, query); queryParameters.putSingle(QueryParameters.QUERY_NAME, newQueryName); - queryParameters.putSingle(QueryParameters.QUERY_BEGIN, QueryParametersImpl.formatDate(beginDate)); - queryParameters.putSingle(QueryParameters.QUERY_END, QueryParametersImpl.formatDate(endDate)); - queryParameters.putSingle(QueryParameters.QUERY_EXPIRATION, QueryParametersImpl.formatDate(expirationDate)); + queryParameters.putSingle(QueryParameters.QUERY_BEGIN, DefaultQueryParameters.formatDate(beginDate)); + queryParameters.putSingle(QueryParameters.QUERY_END, DefaultQueryParameters.formatDate(endDate)); + queryParameters.putSingle(QueryParameters.QUERY_EXPIRATION, DefaultQueryParameters.formatDate(expirationDate)); queryParameters.putSingle(QueryParameters.QUERY_AUTHORIZATIONS, queryAuthorizations); queryParameters.putSingle(QueryParameters.QUERY_PARAMS, parameters); queryParameters.putSingle(QueryParameters.QUERY_PAGESIZE, String.valueOf(pagesize)); @@ -2325,11 +2360,10 @@ public void testDuplicateQuery_HappyPath() throws Exception { ColumnVisibilitySecurityMarking marking = new ColumnVisibilitySecurityMarking(); marking.validate(queryParameters); - QueryParameters qp = new QueryParametersImpl(); + QueryParameters qp = new DefaultQueryParameters(); qp.validate(queryParameters); - MultivaluedMap op = new MultivaluedMapImpl<>(); - op.putAll(qp.getUnknownParameters(queryParameters)); + MultivaluedMap op = MapUtils.toMultivaluedMap(qp.getUnknownParameters(MapUtils.toMultiValueMap(queryParameters))); op.putSingle(PrivateAuditConstants.LOGIC_CLASS, queryLogic1.getClass().getSimpleName()); op.putSingle(PrivateAuditConstants.COLUMN_VISIBILITY, queryVisibility); op.putSingle(PrivateAuditConstants.USER_DN, userDN); @@ -2367,7 +2401,7 @@ public void testDuplicateQuery_HappyPath() throws Exception { expect(this.queryLogic1.getConnPoolName()).andReturn("connPool1"); expect(this.connectionFactory.getTrackingMap(isA(StackTraceElement[].class))).andReturn(null); this.query.populateTrackingMap(null); - expect(this.connectionFactory.getClient("connPool1", Priority.NORMAL, null)).andReturn(this.client); + expect(this.connectionFactory.getClient(userDN.toLowerCase(), new ArrayList<>(0), "connPool1", Priority.NORMAL, null)).andReturn(this.client); expect(this.qlCache.add(newQuery1.getId().toString(), userSid, this.queryLogic1, this.client)).andReturn(true); expect(this.queryLogic1.getCollectQueryMetrics()).andReturn(false); expect(this.queryLogic1.initialize(eq(this.client), isA(Query.class), isA(Set.class))).andReturn(this.genericConfiguration); @@ -2388,10 +2422,10 @@ public void testDuplicateQuery_HappyPath() throws Exception { setInternalState(subject, ClosedQueryCache.class, closedCache); setInternalState(subject, Persister.class, persister); setInternalState(subject, QueryLogicFactory.class, queryLogicFactory); - setInternalState(subject, QueryExpirationConfiguration.class, queryExpirationConf); + setInternalState(subject, QueryExpirationProperties.class, queryExpirationConf); setInternalState(subject, QueryMetricsBean.class, metrics); setInternalState(subject, Multimap.class, traceInfos); - setInternalState(subject, QueryParameters.class, new QueryParametersImpl()); + setInternalState(subject, QueryParameters.class, new DefaultQueryParameters()); setInternalState(subject, QueryMetricFactory.class, new QueryMetricFactoryImpl()); GenericResponse result1 = subject.duplicateQuery(queryId.toString(), newQueryName, queryLogicName, query, queryVisibility, beginDate, endDate, queryAuthorizations, expirationDate, pagesize, pageTimeout, maxResultsOverride, persistenceMode, parameters, trace); @@ -2441,8 +2475,8 @@ public void testDuplicateQuery_FindByIDReturnsNull() throws Exception { setInternalState(subject, ClosedQueryCache.class, closedCache); setInternalState(subject, Persister.class, persister); setInternalState(subject, QueryLogicFactory.class, queryLogicFactory); - setInternalState(subject, QueryExpirationConfiguration.class, queryExpirationConf); - setInternalState(subject, QueryParameters.class, new QueryParametersImpl()); + setInternalState(subject, QueryExpirationProperties.class, queryExpirationConf); + setInternalState(subject, QueryParameters.class, new DefaultQueryParameters()); setInternalState(subject, QueryMetricFactory.class, new QueryMetricFactoryImpl()); try { @@ -2517,10 +2551,10 @@ public void testDuplicateQuery_UncheckedExceptionThrownDuringCreateQuery() throw setInternalState(subject, ClosedQueryCache.class, closedCache); setInternalState(subject, Persister.class, persister); setInternalState(subject, QueryLogicFactory.class, queryLogicFactory); - setInternalState(subject, QueryExpirationConfiguration.class, queryExpirationConf); + setInternalState(subject, QueryExpirationProperties.class, queryExpirationConf); setInternalState(subject, QueryMetricsBean.class, metrics); setInternalState(subject, Multimap.class, traceInfos); - setInternalState(subject, QueryParameters.class, new QueryParametersImpl()); + setInternalState(subject, QueryParameters.class, new DefaultQueryParameters()); setInternalState(subject, QueryMetricFactory.class, new QueryMetricFactoryImpl()); try { @@ -2745,6 +2779,7 @@ public void testInit() throws Exception { expect(this.lookupUUIDConfiguration.getUuidTypes()).andReturn(null); expect(this.lookupUUIDConfiguration.getBeginDate()).andReturn("not a date"); expect(this.lookupUUIDConfiguration.getBatchLookupUpperLimit()).andReturn(0); + expect(this.lookupUUIDConfiguration.getContentLookupTypes()).andReturn(Collections.emptyMap()); expect(this.context.getCallerPrincipal()).andReturn(this.principal).anyTimes(); LookupUUIDConfiguration tmpCfg = new LookupUUIDConfiguration(); tmpCfg.setColumnVisibility("PUBLIC"); @@ -2757,7 +2792,7 @@ public void testInit() throws Exception { setInternalState(subject, QueryTraceCache.class, traceCache); setInternalState(subject, LookupUUIDConfiguration.class, lookupUUIDConfiguration); setInternalState(subject, QueryLogicFactory.class, queryLogicFactory); - setInternalState(subject, QueryExpirationConfiguration.class, queryExpirationConf); + setInternalState(subject, QueryExpirationProperties.class, queryExpirationConf); setInternalState(subject, QueryMetricFactory.class, new QueryMetricFactoryImpl()); try { @@ -2808,6 +2843,8 @@ public void testList_HappyPath() throws Exception { expect(this.query.getPageTimeout()).andReturn(-1).anyTimes(); expect(this.query.getExpirationDate()).andReturn(null).anyTimes(); expect(this.query.getParameters()).andReturn((Set) Collections.emptySet()).anyTimes(); + expect(this.query.findParameter(RemoteUserOperationsImpl.INCLUDE_REMOTE_SERVICES)) + .andReturn(new QueryImpl.Parameter(RemoteUserOperationsImpl.INCLUDE_REMOTE_SERVICES, "true")).anyTimes(); expect(this.query.getDnList()).andReturn(dnList).anyTimes(); expect(this.queryLogic1.getResultLimit(this.query)).andReturn(-1L); expect(this.queryLogic1.getMaxResults()).andReturn(-1L); @@ -2823,8 +2860,8 @@ public void testList_HappyPath() throws Exception { setInternalState(subject, ClosedQueryCache.class, closedCache); setInternalState(subject, Persister.class, persister); setInternalState(subject, QueryLogicFactory.class, queryLogicFactory); - setInternalState(subject, QueryExpirationConfiguration.class, queryExpirationConf); - setInternalState(subject, QueryParameters.class, new QueryParametersImpl()); + setInternalState(subject, QueryExpirationProperties.class, queryExpirationConf); + setInternalState(subject, QueryParameters.class, new DefaultQueryParameters()); setInternalState(subject, QueryMetricFactory.class, new QueryMetricFactoryImpl()); setInternalState(subject, UserOperationsBean.class, userOperations); QueryImplListResponse result1 = subject.list(queryName); @@ -2900,7 +2937,6 @@ public void testListQueriesForUser_UncheckedException() throws Exception { @Test public void testListQueryLogic() throws Exception { // Set expectations - RoleManager roleManager = new EasyRoleManager(); expect(this.queryLogicFactory.getQueryLogicList()).andReturn(Arrays.asList(this.queryLogic1, this.queryLogic2)); expect(this.queryLogic1.getLogicName()).andReturn("logic1").times(1); // Begin 1st loop expect(this.queryLogic1.getAuditType(null)).andReturn(AuditType.LOCALONLY); @@ -2908,17 +2944,16 @@ public void testListQueryLogic() throws Exception { expect(this.queryLogic1.getOptionalQueryParameters()).andReturn(new TreeSet<>()); expect(this.queryLogic1.getRequiredQueryParameters()).andReturn(new TreeSet<>()); expect(this.queryLogic1.getExampleQueries()).andReturn(new TreeSet<>()); - expect(this.queryLogic1.getRoleManager()).andReturn(roleManager).anyTimes(); - expect(this.queryLogic1.getResponseClass(EasyMock.anyObject(Query.class))).andThrow(ILLEGAL_STATE_EXCEPTION); + expect(this.queryLogic1.getRequiredRoles()).andReturn(new HashSet<>()).anyTimes(); + expect(this.queryLogic1.getResponseClass(anyObject(Query.class))).andThrow(ILLEGAL_STATE_EXCEPTION); expect(this.queryLogic2.getLogicName()).andReturn("logic2").times(1); // Begin 1st loop expect(this.queryLogic2.getAuditType(null)).andReturn(AuditType.LOCALONLY); expect(this.queryLogic2.getLogicDescription()).andReturn("description2"); expect(this.queryLogic2.getOptionalQueryParameters()).andReturn(new TreeSet<>()); expect(this.queryLogic2.getRequiredQueryParameters()).andReturn(new TreeSet<>()); expect(this.queryLogic2.getExampleQueries()).andReturn(new TreeSet<>()); - RoleManager roleManager2 = new DatawaveRoleManager(Arrays.asList("ROLE_1", "ROLE_2")); - expect(this.queryLogic2.getRoleManager()).andReturn(roleManager2).times(2); - expect(this.queryLogic2.getResponseClass(EasyMock.anyObject(Query.class))).andReturn(this.baseResponse.getClass().getCanonicalName()); + expect(this.queryLogic2.getRequiredRoles()).andReturn(new HashSet<>(Arrays.asList("ROLE_1", "ROLE_2"))).times(2); + expect(this.queryLogic2.getResponseClass(anyObject(Query.class))).andReturn(this.baseResponse.getClass().getCanonicalName()); expect(this.responseObjectFactory.getQueryImpl()).andReturn(new QueryImpl()); Map parsers = new HashMap<>(); parsers.put("PARSER1", null); @@ -2928,7 +2963,7 @@ public void testListQueryLogic() throws Exception { PowerMock.replayAll(); QueryExecutorBean subject = new QueryExecutorBean(); setInternalState(subject, QueryLogicFactory.class, queryLogicFactory); - setInternalState(subject, QueryExpirationConfiguration.class, queryExpirationConf); + setInternalState(subject, QueryExpirationProperties.class, queryExpirationConf); setInternalState(subject, QueryMetricFactory.class, new QueryMetricFactoryImpl()); setInternalState(subject, ResponseObjectFactory.class, responseObjectFactory); QueryLogicResponse result1 = subject.listQueryLogic(); @@ -3013,9 +3048,11 @@ public void testNext_QueryExceptionDueToCacheLock() throws Exception { // Set expectations expect(this.context.getCallerPrincipal()).andReturn(this.principal).anyTimes(); - expect(this.principal.getName()).andReturn(userName); - expect(this.principal.getShortName()).andReturn(userSid); - expect(this.principal.getProxyServers()).andReturn(new ArrayList<>(0)); + expect(this.principal.getName()).andReturn(userName).anyTimes(); + expect(this.principal.getShortName()).andReturn(userSid).anyTimes(); + expect(this.principal.getUserDN()).andReturn(SubjectIssuerDNPair.of(userName)); + expect(this.principal.getDNs()).andReturn(new String[] {userName}); + expect(this.principal.getProxyServers()).andReturn(new ArrayList<>(0)).anyTimes(); expect(this.context.getUserTransaction()).andReturn(this.transaction).anyTimes(); this.transaction.begin(); expect(this.cache.get(queryId.toString())).andReturn(this.runningQuery); @@ -3064,9 +3101,11 @@ public void testNext_UncheckedException() throws Exception { // Set expectations expect(this.context.getCallerPrincipal()).andReturn(this.principal).anyTimes(); - expect(this.principal.getName()).andReturn(userName); - expect(this.principal.getShortName()).andReturn(userSid); - expect(this.principal.getProxyServers()).andReturn(new ArrayList<>(0)); + expect(this.principal.getName()).andReturn(userName).anyTimes(); + expect(this.principal.getShortName()).andReturn(userSid).anyTimes(); + expect(this.principal.getUserDN()).andReturn(SubjectIssuerDNPair.of(userName)); + expect(this.principal.getDNs()).andReturn(new String[] {userName}); + expect(this.principal.getProxyServers()).andReturn(new ArrayList<>(0)).anyTimes(); expect(this.context.getUserTransaction()).andReturn(this.transaction).anyTimes(); this.transaction.begin(); expect(this.cache.get(queryId.toString())).andReturn(this.runningQuery); @@ -3110,9 +3149,11 @@ public void testNext_UserNotOwner() throws Exception { // Set expectations expect(this.context.getCallerPrincipal()).andReturn(this.principal).anyTimes(); - expect(this.principal.getName()).andReturn(userName); - expect(this.principal.getShortName()).andReturn(otherSid); - expect(this.principal.getProxyServers()).andReturn(new ArrayList<>(0)); + expect(this.principal.getName()).andReturn(userName).anyTimes(); + expect(this.principal.getShortName()).andReturn(otherSid).anyTimes(); + expect(this.principal.getUserDN()).andReturn(SubjectIssuerDNPair.of(userName)); + expect(this.principal.getDNs()).andReturn(new String[] {userName}); + expect(this.principal.getProxyServers()).andReturn(new ArrayList<>(0)).anyTimes(); expect(this.context.getUserTransaction()).andReturn(this.transaction).anyTimes(); this.transaction.begin(); expect(this.cache.get(queryId.toString())).andReturn(this.runningQuery); @@ -3165,9 +3206,11 @@ public void testNext_NullQueryReturnedFromCache() throws Exception { // Set expectations expect(this.context.getCallerPrincipal()).andReturn(this.principal).anyTimes(); - expect(this.principal.getName()).andReturn(userName); - expect(this.principal.getShortName()).andReturn(userSid); - expect(this.principal.getProxyServers()).andReturn(new ArrayList<>(0)); + expect(this.principal.getName()).andReturn(userName).anyTimes(); + expect(this.principal.getShortName()).andReturn(userSid).anyTimes(); + expect(this.principal.getUserDN()).andReturn(SubjectIssuerDNPair.of(userName)); + expect(this.principal.getDNs()).andReturn(new String[] {userName}); + expect(this.principal.getProxyServers()).andReturn(new ArrayList<>(0)).anyTimes(); expect(this.context.getUserTransaction()).andReturn(this.transaction).anyTimes(); this.transaction.begin(); expect(this.cache.get(queryId.toString())).andReturn(null); @@ -3282,16 +3325,19 @@ public void testReset_NoPreexistingRunningQuery() throws Exception { map.set(PrivateAuditConstants.COLUMN_VISIBILITY, authorization); map.set(PrivateAuditConstants.USER_DN, userDN); map.set(AuditParameters.AUDIT_ID, queryName); - MultivaluedMap auditMap = new MultivaluedMapImpl(); + MultiValueMap auditMap = new LinkedMultiValueMap(); auditMap.putAll(map); // Set expectations expect(this.context.getUserTransaction()).andReturn(this.transaction).anyTimes(); this.transaction.begin(); expect(this.transaction.getStatus()).andReturn(Status.STATUS_ACTIVE).anyTimes(); - expect(this.context.getCallerPrincipal()).andReturn(this.principal); - expect(this.principal.getName()).andReturn(userName); - expect(this.principal.getShortName()).andReturn(sid); + expect(this.context.getCallerPrincipal()).andReturn(this.principal).anyTimes(); + expect(this.principal.getName()).andReturn(userName).anyTimes(); + expect(this.principal.getShortName()).andReturn(sid).anyTimes(); + expect(this.principal.getUserDN()).andReturn(SubjectIssuerDNPair.of(userDN)); + expect(this.principal.getDNs()).andReturn(new String[] {userDN}); + expect(this.principal.getProxyServers()).andReturn(new ArrayList<>()); expect(this.principal.getAuthorizations()).andReturn((Collection) Arrays.asList(Arrays.asList(authorization))); expect(this.principal.getPrimaryUser()).andReturn(dwUser).anyTimes(); expect(this.dwUser.getAuths()).andReturn(Collections.singleton(authorization)).anyTimes(); @@ -3309,6 +3355,7 @@ public void testReset_NoPreexistingRunningQuery() throws Exception { expect(this.query.getOwner()).andReturn(sid).anyTimes(); expect(this.query.getId()).andReturn(queryId).anyTimes(); expect(this.query.getQuery()).andReturn(queryName).anyTimes(); + expect(this.query.getQueryName()).andReturn(queryName).anyTimes(); this.cache.put(eq(queryId.toString()), isA(RunningQuery.class)); expect(this.cache.lock(queryName)).andReturn(true); expect(this.queryLogic1.getAuditType(this.query)).andReturn(AuditType.PASSIVE); @@ -3323,8 +3370,9 @@ public void testReset_NoPreexistingRunningQuery() throws Exception { expect(this.query.getColumnVisibility()).andReturn(authorization); expect(this.query.getBeginDate()).andReturn(null); expect(this.query.getEndDate()).andReturn(null); - expect(this.query.getQueryName()).andReturn(queryName); expect(this.query.getParameters()).andReturn((Set) Collections.emptySet()); + expect(this.query.findParameter(RemoteUserOperationsImpl.INCLUDE_REMOTE_SERVICES)) + .andReturn(new QueryImpl.Parameter(RemoteUserOperationsImpl.INCLUDE_REMOTE_SERVICES, "true")).anyTimes(); expect(this.query.getColumnVisibility()).andReturn(authorization); expect(this.queryLogic1.getSelectors(this.query)).andReturn(null); expect(this.auditor.audit(auditMap)).andReturn(null); @@ -3352,8 +3400,9 @@ public void testReset_NoPreexistingRunningQuery() throws Exception { this.query.populateTrackingMap(new HashMap<>()); expect(this.queryLogic1.getConnPoolName()).andReturn("connPool1"); expect(this.queryLogic1.getLogicName()).andReturn(queryLogicName); - connectionRequestBean.requestBegin(queryName); - expect(this.connectionFactory.getClient(eq("connPool1"), eq(Priority.NORMAL), isA(Map.class))).andReturn(this.client); + connectionRequestBean.requestBegin(queryName, userDN.toLowerCase(), new HashMap<>()); + expect(this.connectionFactory.getClient(eq(userDN.toLowerCase()), eq(new ArrayList<>()), eq("connPool1"), eq(Priority.NORMAL), eq(new HashMap<>()))) + .andReturn(this.client); connectionRequestBean.requestEnd(queryName); expect(this.queryLogic1.initialize(eq(this.client), eq(this.query), isA(Set.class))).andReturn(this.genericConfiguration); this.queryLogic1.setupQuery(this.genericConfiguration); @@ -3375,8 +3424,8 @@ public void testReset_NoPreexistingRunningQuery() throws Exception { setInternalState(subject, ClosedQueryCache.class, closedCache); setInternalState(subject, Persister.class, persister); setInternalState(subject, QueryLogicFactory.class, queryLogicFactory); - setInternalState(subject, QueryExpirationConfiguration.class, queryExpirationConf); - setInternalState(subject, QueryParameters.class, new QueryParametersImpl()); + setInternalState(subject, QueryExpirationProperties.class, queryExpirationConf); + setInternalState(subject, QueryParameters.class, new DefaultQueryParameters()); setInternalState(subject, AuditBean.class, auditor); setInternalState(subject, QueryMetricsBean.class, metrics); setInternalState(subject, QueryMetricFactory.class, new QueryMetricFactoryImpl()); @@ -3428,7 +3477,7 @@ public void testReset_PreexistingRunningQueryWithCloseConnectionException() thro setInternalState(subject, ClosedQueryCache.class, closedCache); setInternalState(subject, Persister.class, persister); setInternalState(subject, QueryLogicFactory.class, queryLogicFactory); - setInternalState(subject, QueryExpirationConfiguration.class, queryExpirationConf); + setInternalState(subject, QueryExpirationProperties.class, queryExpirationConf); setInternalState(subject, AuditBean.class, auditor); setInternalState(subject, QueryMetricsBean.class, metrics); setInternalState(subject, QueryMetricFactory.class, new QueryMetricFactoryImpl()); @@ -3482,7 +3531,7 @@ public void testReset_PreexistingRunningQueryWithLockException() throws Exceptio setInternalState(subject, ClosedQueryCache.class, closedCache); setInternalState(subject, Persister.class, persister); setInternalState(subject, QueryLogicFactory.class, queryLogicFactory); - setInternalState(subject, QueryExpirationConfiguration.class, queryExpirationConf); + setInternalState(subject, QueryExpirationProperties.class, queryExpirationConf); setInternalState(subject, AuditBean.class, auditor); setInternalState(subject, QueryMetricsBean.class, metrics); setInternalState(subject, QueryMetricFactory.class, new QueryMetricFactoryImpl()); @@ -3525,9 +3574,9 @@ public void testUpdateQuery_PersistentMode() throws Exception { MultiValueMap p = new LinkedMultiValueMap<>(); p.set(QueryParameters.QUERY_AUTHORIZATIONS, queryAuthorizations); - p.set(QueryParameters.QUERY_BEGIN, QueryParametersImpl.formatDate(beginDate)); - p.set(QueryParameters.QUERY_END, QueryParametersImpl.formatDate(endDate)); - p.set(QueryParameters.QUERY_EXPIRATION, QueryParametersImpl.formatDate(expirationDate)); + p.set(QueryParameters.QUERY_BEGIN, DefaultQueryParameters.formatDate(beginDate)); + p.set(QueryParameters.QUERY_END, DefaultQueryParameters.formatDate(endDate)); + p.set(QueryParameters.QUERY_EXPIRATION, DefaultQueryParameters.formatDate(expirationDate)); p.set(QueryParameters.QUERY_NAME, queryName); p.set(QueryParameters.QUERY_PAGESIZE, Integer.toString(pagesize)); p.set(QueryParameters.QUERY_PAGETIMEOUT, Integer.toString(pageTimeout)); @@ -3540,9 +3589,9 @@ public void testUpdateQuery_PersistentMode() throws Exception { p.set(PrivateAuditConstants.LOGIC_CLASS, queryLogicName); p.set(PrivateAuditConstants.COLUMN_VISIBILITY, queryVisibility); p.set(PrivateAuditConstants.USER_DN, userDN); - MultivaluedMap auditMap = new MultivaluedMapImpl(); + MultiValueMap auditMap = new LinkedMultiValueMap(); auditMap.putAll(p); - auditMap.putSingle(AuditParameters.AUDIT_ID, queryId.toString()); + auditMap.set(AuditParameters.AUDIT_ID, queryId.toString()); // Set expectations expect(this.context.getCallerPrincipal()).andReturn(this.principal).times(4); @@ -3591,7 +3640,7 @@ public void testUpdateQuery_PersistentMode() throws Exception { setInternalState(subject, ClosedQueryCache.class, closedCache); setInternalState(subject, Persister.class, persister); setInternalState(subject, QueryLogicFactory.class, queryLogicFactory); - setInternalState(subject, QueryExpirationConfiguration.class, queryExpirationConf); + setInternalState(subject, QueryExpirationProperties.class, queryExpirationConf); setInternalState(subject, AuditBean.class, auditor); setInternalState(subject, QueryMetricFactory.class, new QueryMetricFactoryImpl()); GenericResponse result1 = subject.updateQuery(queryId.toString(), queryLogicName, query, queryVisibility, beginDate, endDate, @@ -3640,11 +3689,10 @@ public void testExecute_HappyPath() throws Exception { qp.setPageTimeout(pageTimeout); qp.setColumnVisibility(queryAuthorizations); - MultivaluedMap params = new MultivaluedMapImpl<>(); - params.putAll(qp.toMap()); - params.putSingle(QueryParameters.QUERY_TRACE, Boolean.toString(trace)); - params.putSingle(QueryParameters.QUERY_PERSISTENCE, persistenceMode.name()); - params.putSingle(QueryParameters.QUERY_PARAMS, parameters); + MultiValueMap params = new LinkedMultiValueMap<>(qp.toMap()); + params.set(QueryParameters.QUERY_TRACE, Boolean.toString(trace)); + params.set(QueryParameters.QUERY_PERSISTENCE, persistenceMode.name()); + params.set(QueryParameters.QUERY_PARAMS, parameters); QueryExecutorBean subject = PowerMock.createPartialMock(QueryExecutorBean.class, "createQuery"); @@ -3655,7 +3703,7 @@ public void testExecute_HappyPath() throws Exception { expect(this.queryLogicFactory.getQueryLogic(queryLogicName, principal)).andReturn((QueryLogic) this.queryLogic1); expect(this.queryLogic1.getEnrichedTransformer(isA(Query.class))).andReturn(this.transformer); expect(this.transformer.createResponse(isA(ResultsPage.class))).andReturn(this.baseResponse); - expect(subject.createQuery(queryLogicName, params, httpHeaders)).andReturn(createResponse); + expect(subject.createQuery(queryLogicName, MapUtils.toMultivaluedMap(params), httpHeaders)).andReturn(createResponse); expect(this.cache.get(eq(queryId.toString()))).andReturn(this.runningQuery); expect(this.runningQuery.getMetric()).andReturn(this.queryMetric); expect(this.responseObjectFactory.getQueryImpl()).andReturn(new QueryImpl()); @@ -3672,12 +3720,12 @@ public void testExecute_HappyPath() throws Exception { setInternalState(subject, ClosedQueryCache.class, closedCache); setInternalState(subject, Persister.class, persister); setInternalState(subject, QueryLogicFactory.class, queryLogicFactory); - setInternalState(subject, QueryExpirationConfiguration.class, queryExpirationConf); + setInternalState(subject, QueryExpirationProperties.class, queryExpirationConf); setInternalState(subject, AuditBean.class, auditor); setInternalState(subject, QueryMetricsBean.class, metrics); setInternalState(subject, Multimap.class, traceInfos); setInternalState(subject, QueryMetricFactory.class, new QueryMetricFactoryImpl()); - StreamingOutput result1 = subject.execute(queryLogicName, params, httpHeaders); + StreamingOutput result1 = subject.execute(queryLogicName, MapUtils.toMultivaluedMap(params), httpHeaders); PowerMock.verifyAll(); // Verify results @@ -3729,16 +3777,16 @@ public void testExecute_InvalidMediaType() throws Exception { setInternalState(subject, ClosedQueryCache.class, closedCache); setInternalState(subject, Persister.class, persister); setInternalState(subject, QueryLogicFactory.class, queryLogicFactory); - setInternalState(subject, QueryExpirationConfiguration.class, queryExpirationConf); + setInternalState(subject, QueryExpirationProperties.class, queryExpirationConf); setInternalState(subject, AuditBean.class, auditor); setInternalState(subject, QueryMetricsBean.class, metrics); setInternalState(subject, Multimap.class, traceInfos); setInternalState(subject, QueryMetricFactory.class, new QueryMetricFactoryImpl()); StreamingOutput result1 = null; try { - MultivaluedMap queryParameters = new MultivaluedMapImpl<>(); - queryParameters.putAll(QueryParametersImpl.paramsToMap(queryLogicName, query, queryName, queryVisibility, beginDate, endDate, queryAuthorizations, - expirationDate, pagesize, pageTimeout, maxResultsOverride, persistenceMode, systemFrom, parameters, trace)); + MultivaluedMap queryParameters = MapUtils.toMultivaluedMap( + DefaultQueryParameters.paramsToMap(queryLogicName, query, queryName, queryVisibility, beginDate, endDate, queryAuthorizations, + expirationDate, pagesize, pageTimeout, maxResultsOverride, persistenceMode, systemFrom, parameters, trace)); result1 = subject.execute(queryLogicName, queryParameters, httpHeaders); @@ -3762,7 +3810,7 @@ public void testLookupUUID_happyPath() { expect(uriInfo.getQueryParameters()).andReturn(new MultivaluedHashMap<>()); expect(lookupUUIDUtil.getUUIDType("uuidType")).andReturn(uuidType); - expect(uuidType.getDefinedView()).andReturn("abc"); + expect(uuidType.getQueryLogic(null)).andReturn("abc"); expect(lookupUUIDUtil.createUUIDQueryAndNext(isA(GetUUIDCriteria.class))).andReturn(response); expect(response.getQueryId()).andReturn("11111"); expect(context.getCallerPrincipal()).andReturn(principal); @@ -3776,7 +3824,7 @@ public void testLookupUUID_happyPath() { setInternalState(subject, ClosedQueryCache.class, closedCache); setInternalState(subject, Persister.class, persister); setInternalState(subject, QueryLogicFactory.class, queryLogicFactory); - setInternalState(subject, QueryExpirationConfiguration.class, queryExpirationConf); + setInternalState(subject, QueryExpirationProperties.class, queryExpirationConf); setInternalState(subject, AuditBean.class, auditor); setInternalState(subject, QueryMetricFactory.class, new QueryMetricFactoryImpl()); setInternalState(subject, LookupUUIDUtil.class, lookupUUIDUtil); @@ -3796,7 +3844,7 @@ public void testLookupUUID_closeFail() { expect(uriInfo.getQueryParameters()).andReturn(new MultivaluedHashMap<>()); expect(lookupUUIDUtil.getUUIDType("uuidType")).andReturn(uuidType); - expect(uuidType.getDefinedView()).andReturn("abc"); + expect(uuidType.getQueryLogic(null)).andReturn("abc"); expect(lookupUUIDUtil.createUUIDQueryAndNext(isA(GetUUIDCriteria.class))).andReturn(response); expect(response.getQueryId()).andReturn("11111"); expect(context.getCallerPrincipal()).andReturn(principal); @@ -3810,7 +3858,7 @@ public void testLookupUUID_closeFail() { setInternalState(subject, ClosedQueryCache.class, closedCache); setInternalState(subject, Persister.class, persister); setInternalState(subject, QueryLogicFactory.class, queryLogicFactory); - setInternalState(subject, QueryExpirationConfiguration.class, queryExpirationConf); + setInternalState(subject, QueryExpirationProperties.class, queryExpirationConf); setInternalState(subject, AuditBean.class, auditor); setInternalState(subject, QueryMetricFactory.class, new QueryMetricFactoryImpl()); setInternalState(subject, LookupUUIDUtil.class, lookupUUIDUtil); @@ -3840,7 +3888,7 @@ public void testPlanQuery() throws Exception { boolean trace = false; String userName = "userName"; String userSid = "userSid"; - String userDN = "userdn"; + String userDN = "userDN"; SubjectIssuerDNPair userDNpair = SubjectIssuerDNPair.of(userDN); List dnList = Collections.singletonList(userDN); UUID queryId = UUID.randomUUID(); @@ -3852,9 +3900,9 @@ public void testPlanQuery() throws Exception { queryParameters.putSingle(QueryParameters.QUERY_LOGIC_NAME, queryLogicName); queryParameters.putSingle(QueryParameters.QUERY_STRING, query); queryParameters.putSingle(QueryParameters.QUERY_NAME, queryName); - queryParameters.putSingle(QueryParameters.QUERY_BEGIN, QueryParametersImpl.formatDate(beginDate)); - queryParameters.putSingle(QueryParameters.QUERY_END, QueryParametersImpl.formatDate(endDate)); - queryParameters.putSingle(QueryParameters.QUERY_EXPIRATION, QueryParametersImpl.formatDate(expirationDate)); + queryParameters.putSingle(QueryParameters.QUERY_BEGIN, DefaultQueryParameters.formatDate(beginDate)); + queryParameters.putSingle(QueryParameters.QUERY_END, DefaultQueryParameters.formatDate(endDate)); + queryParameters.putSingle(QueryParameters.QUERY_EXPIRATION, DefaultQueryParameters.formatDate(expirationDate)); queryParameters.putSingle(QueryParameters.QUERY_AUTHORIZATIONS, queryAuthorizations); queryParameters.putSingle(QueryParameters.QUERY_PAGESIZE, String.valueOf(pagesize)); queryParameters.putSingle(QueryParameters.QUERY_PAGETIMEOUT, String.valueOf(pageTimeout)); @@ -3866,11 +3914,10 @@ public void testPlanQuery() throws Exception { ColumnVisibilitySecurityMarking marking = new ColumnVisibilitySecurityMarking(); marking.validate(queryParameters); - QueryParameters qp = new QueryParametersImpl(); + QueryParameters qp = new DefaultQueryParameters(); qp.validate(queryParameters); - MultivaluedMap op = new MultivaluedMapImpl<>(); - op.putAll(qp.getUnknownParameters(queryParameters)); + MultivaluedMap op = MapUtils.toMultivaluedMap(qp.getUnknownParameters(MapUtils.toMultiValueMap(queryParameters))); op.putSingle(PrivateAuditConstants.LOGIC_CLASS, queryLogicName); op.putSingle(PrivateAuditConstants.COLUMN_VISIBILITY, queryVisibility); op.putSingle(PrivateAuditConstants.USER_DN, userDNpair.subjectDN()); @@ -3894,8 +3941,8 @@ public void testPlanQuery() throws Exception { expect(this.queryLogic1.getConnPoolName()).andReturn("connPool1"); expect(this.connectionFactory.getTrackingMap(isA(StackTraceElement[].class))).andReturn(null); this.query.populateTrackingMap(null); - this.connectionRequestBean.requestBegin(queryId.toString()); - expect(this.connectionFactory.getClient("connPool1", Priority.NORMAL, null)).andReturn(this.client); + this.connectionRequestBean.requestBegin(queryId.toString(), userDN.toLowerCase(), null); + expect(this.connectionFactory.getClient(userDN.toLowerCase(), new ArrayList<>(0), "connPool1", Priority.NORMAL, null)).andReturn(this.client); this.connectionRequestBean.requestEnd(queryId.toString()); expect(this.principal.getPrimaryUser()).andReturn(dwUser).anyTimes(); expect(this.dwUser.getAuths()).andReturn(Collections.singleton(queryAuthorizations)).anyTimes(); @@ -3918,6 +3965,9 @@ public void testPlanQuery() throws Exception { expect(this.query.getUncaughtExceptionHandler()).andReturn(new QueryUncaughtExceptionHandler()).anyTimes(); expect(this.query.getUserDN()).andReturn(userDN).anyTimes(); + expect(this.responseObjectFactory.getQueryImpl()).andReturn(new QueryImpl()); + expect(queryLogic1.getResultLimit(anyObject(QueryImpl.class))).andReturn(-1L); + // Set expectations of the plan Authorizations queryAuths = new Authorizations(queryAuthorizations); expect(this.queryLogic1.getPlan(this.client, this.query, Collections.singleton(queryAuths), true, false)).andReturn("a query plan"); @@ -3940,12 +3990,12 @@ public void testPlanQuery() throws Exception { setInternalState(subject, ClosedQueryCache.class, closedCache); setInternalState(subject, Persister.class, persister); setInternalState(subject, QueryLogicFactoryImpl.class, queryLogicFactory); - setInternalState(subject, QueryExpirationConfiguration.class, queryExpirationConf); + setInternalState(subject, QueryExpirationProperties.class, queryExpirationConf); setInternalState(subject, AuditBean.class, auditor); setInternalState(subject, QueryMetricsBean.class, metrics); setInternalState(subject, Multimap.class, traceInfos); setInternalState(subject, SecurityMarking.class, new ColumnVisibilitySecurityMarking()); - setInternalState(subject, QueryParameters.class, new QueryParametersImpl()); + setInternalState(subject, QueryParameters.class, new DefaultQueryParameters()); setInternalState(subject, QueryMetricFactory.class, new QueryMetricFactoryImpl()); setInternalState(connectionRequestBean, EJBContext.class, context); setInternalState(subject, AccumuloConnectionRequestBean.class, connectionRequestBean); @@ -3975,7 +4025,7 @@ public void testPlanQueryWithValues() throws Exception { boolean trace = false; String userName = "userName"; String userSid = "userSid"; - String userDN = "userdn"; + String userDN = "userDN"; SubjectIssuerDNPair userDNpair = SubjectIssuerDNPair.of(userDN); List dnList = Collections.singletonList(userDN); UUID queryId = UUID.randomUUID(); @@ -3983,30 +4033,28 @@ public void testPlanQueryWithValues() throws Exception { HashMap> authsMap = new HashMap<>(); authsMap.put("userdn", Arrays.asList(queryAuthorizations)); - MultivaluedMap queryParameters = new MultivaluedMapImpl<>(); - queryParameters.putSingle(QueryParameters.QUERY_LOGIC_NAME, queryLogicName); - queryParameters.putSingle(QueryParameters.QUERY_STRING, query); - queryParameters.putSingle(QueryParameters.QUERY_NAME, queryName); - queryParameters.putSingle(QueryParameters.QUERY_BEGIN, QueryParametersImpl.formatDate(beginDate)); - queryParameters.putSingle(QueryParameters.QUERY_END, QueryParametersImpl.formatDate(endDate)); - queryParameters.putSingle(QueryParameters.QUERY_EXPIRATION, QueryParametersImpl.formatDate(expirationDate)); - queryParameters.putSingle(QueryParameters.QUERY_AUTHORIZATIONS, queryAuthorizations); - queryParameters.putSingle(QueryParameters.QUERY_PAGESIZE, String.valueOf(pagesize)); - queryParameters.putSingle(QueryParameters.QUERY_PAGETIMEOUT, String.valueOf(pageTimeout)); - queryParameters.putSingle(QueryParameters.QUERY_PERSISTENCE, persistenceMode.name()); - queryParameters.putSingle(QueryParameters.QUERY_TRACE, String.valueOf(trace)); - queryParameters.putSingle(ColumnVisibilitySecurityMarking.VISIBILITY_MARKING, queryVisibility); - queryParameters.putSingle("valid", "param"); - queryParameters.putSingle(QueryExecutorBean.EXPAND_VALUES, "true"); - + MultiValueMap queryParameters = new LinkedMultiValueMap<>(); + queryParameters.set(QueryParameters.QUERY_LOGIC_NAME, queryLogicName); + queryParameters.set(QueryParameters.QUERY_STRING, query); + queryParameters.set(QueryParameters.QUERY_NAME, queryName); + queryParameters.set(QueryParameters.QUERY_BEGIN, DefaultQueryParameters.formatDate(beginDate)); + queryParameters.set(QueryParameters.QUERY_END, DefaultQueryParameters.formatDate(endDate)); + queryParameters.set(QueryParameters.QUERY_EXPIRATION, DefaultQueryParameters.formatDate(expirationDate)); + queryParameters.set(QueryParameters.QUERY_AUTHORIZATIONS, queryAuthorizations); + queryParameters.set(QueryParameters.QUERY_PAGESIZE, String.valueOf(pagesize)); + queryParameters.set(QueryParameters.QUERY_PAGETIMEOUT, String.valueOf(pageTimeout)); + queryParameters.set(QueryParameters.QUERY_PERSISTENCE, persistenceMode.name()); + queryParameters.set(QueryParameters.QUERY_TRACE, String.valueOf(trace)); + queryParameters.set(ColumnVisibilitySecurityMarking.VISIBILITY_MARKING, queryVisibility); + queryParameters.set("valid", "param"); + queryParameters.set(QueryExecutorBean.EXPAND_VALUES, "true"); ColumnVisibilitySecurityMarking marking = new ColumnVisibilitySecurityMarking(); marking.validate(queryParameters); - QueryParameters qp = new QueryParametersImpl(); + QueryParameters qp = new DefaultQueryParameters(); qp.validate(queryParameters); - MultivaluedMap op = new MultivaluedMapImpl<>(); - op.putAll(qp.getUnknownParameters(queryParameters)); + MultivaluedMap op = MapUtils.toMultivaluedMap(qp.getUnknownParameters(queryParameters)); // op.putSingle(PrivateAuditConstants.AUDIT_TYPE, AuditType.NONE.name()); op.putSingle(PrivateAuditConstants.LOGIC_CLASS, queryLogicName); op.putSingle(PrivateAuditConstants.COLUMN_VISIBILITY, queryVisibility); @@ -4014,7 +4062,6 @@ public void testPlanQueryWithValues() throws Exception { // Set expectations of the create logic queryLogic1.validate(queryParameters); - // this.query.populateMetric(isA(QueryMetric.class)); expect(this.queryLogicFactory.getQueryLogic(queryLogicName, this.principal)).andReturn((QueryLogic) this.queryLogic1); expect(this.queryLogic1.getMaxPageSize()).andReturn(1000).times(2); expect(this.context.getCallerPrincipal()).andReturn(this.principal).anyTimes(); @@ -4026,7 +4073,7 @@ public void testPlanQueryWithValues() throws Exception { expect(this.queryLogic1.containsDNWithAccess(Collections.singletonList(userDN))).andReturn(true); expect(this.queryLogic1.getAuditType(null)).andReturn(AuditType.PASSIVE); expect(this.queryLogic1.getSelectors(this.query)).andReturn(null); - expect(auditor.audit(eq(queryParameters))).andReturn(null); + expect(auditor.audit(anyObject())).andReturn(null); expect(this.principal.getAuthorizations()).andReturn((Collection) Arrays.asList(Arrays.asList(queryAuthorizations))); expect(persister.create(eq(userDNpair.subjectDN()), eq(dnList), eq(marking), eq(queryLogicName), eq(qp), eq(op))).andReturn(this.query); expect(this.queryLogic1.getAuditType(this.query)).andReturn(AuditType.PASSIVE); @@ -4036,8 +4083,8 @@ public void testPlanQueryWithValues() throws Exception { expect(this.queryLogic1.getUserOperations()).andReturn(null); expect(this.connectionFactory.getTrackingMap(isA(StackTraceElement[].class))).andReturn(null); this.query.populateTrackingMap(null); - this.connectionRequestBean.requestBegin(queryId.toString()); - expect(this.connectionFactory.getClient("connPool1", Priority.NORMAL, null)).andReturn(this.client); + this.connectionRequestBean.requestBegin(queryId.toString(), userDN.toLowerCase(), null); + expect(this.connectionFactory.getClient(userDN.toLowerCase(), new ArrayList<>(0), "connPool1", Priority.NORMAL, null)).andReturn(this.client); this.connectionRequestBean.requestEnd(queryId.toString()); expect(this.principal.getPrimaryUser()).andReturn(dwUser).anyTimes(); @@ -4063,6 +4110,9 @@ public void testPlanQueryWithValues() throws Exception { // expect(this.genericConfiguration.getQueryString()).andReturn(queryName).once(); // expect(this.qlCache.poll(queryId.toString())).andReturn(null); + expect(this.responseObjectFactory.getQueryImpl()).andReturn(new QueryImpl()); + expect(queryLogic1.getResultLimit(anyObject(QueryImpl.class))).andReturn(-1L); + // Set expectations of the plan Authorizations queryAuths = new Authorizations(queryAuthorizations); expect(this.queryLogic1.getPlan(this.client, this.query, Collections.singleton(queryAuths), true, true)).andReturn("a query plan"); @@ -4085,16 +4135,16 @@ public void testPlanQueryWithValues() throws Exception { setInternalState(subject, ClosedQueryCache.class, closedCache); setInternalState(subject, Persister.class, persister); setInternalState(subject, QueryLogicFactoryImpl.class, queryLogicFactory); - setInternalState(subject, QueryExpirationConfiguration.class, queryExpirationConf); + setInternalState(subject, QueryExpirationProperties.class, queryExpirationConf); setInternalState(subject, AuditBean.class, auditor); setInternalState(subject, QueryMetricsBean.class, metrics); setInternalState(subject, Multimap.class, traceInfos); setInternalState(subject, SecurityMarking.class, new ColumnVisibilitySecurityMarking()); - setInternalState(subject, QueryParameters.class, new QueryParametersImpl()); + setInternalState(subject, QueryParameters.class, new DefaultQueryParameters()); setInternalState(subject, QueryMetricFactory.class, new QueryMetricFactoryImpl()); setInternalState(connectionRequestBean, EJBContext.class, context); setInternalState(subject, AccumuloConnectionRequestBean.class, connectionRequestBean); - GenericResponse result1 = subject.planQuery(queryLogicName, queryParameters); + GenericResponse result1 = subject.planQuery(queryLogicName, MapUtils.toMultivaluedMap(queryParameters)); PowerMock.verifyAll(); // Verify results @@ -4126,19 +4176,19 @@ public void testCreateQuery_auditException() throws Exception { HashMap> authsMap = new HashMap<>(); authsMap.put("USERDN", Arrays.asList(queryAuthorizations)); - MultivaluedMap queryParameters = new MultivaluedMapImpl<>(); - queryParameters.putSingle(QueryParameters.QUERY_STRING, query); - queryParameters.putSingle(QueryParameters.QUERY_NAME, queryName); - queryParameters.putSingle(QueryParameters.QUERY_LOGIC_NAME, queryLogicName); - queryParameters.putSingle(QueryParameters.QUERY_BEGIN, QueryParametersImpl.formatDate(beginDate)); - queryParameters.putSingle(QueryParameters.QUERY_END, QueryParametersImpl.formatDate(endDate)); - queryParameters.putSingle(QueryParameters.QUERY_EXPIRATION, QueryParametersImpl.formatDate(expirationDate)); - queryParameters.putSingle(QueryParameters.QUERY_AUTHORIZATIONS, queryAuthorizations); - queryParameters.putSingle(QueryParameters.QUERY_PAGESIZE, String.valueOf(pagesize)); - queryParameters.putSingle(QueryParameters.QUERY_PERSISTENCE, persistenceMode.name()); - queryParameters.putSingle(QueryParameters.QUERY_TRACE, String.valueOf(trace)); - queryParameters.putSingle(ColumnVisibilitySecurityMarking.VISIBILITY_MARKING, queryVisibility); - queryParameters.putSingle("valid", "param"); + MultiValueMap queryParameters = new LinkedMultiValueMap<>(); + queryParameters.set(QueryParameters.QUERY_STRING, query); + queryParameters.set(QueryParameters.QUERY_NAME, queryName); + queryParameters.set(QueryParameters.QUERY_LOGIC_NAME, queryLogicName); + queryParameters.set(QueryParameters.QUERY_BEGIN, DefaultQueryParameters.formatDate(beginDate)); + queryParameters.set(QueryParameters.QUERY_END, DefaultQueryParameters.formatDate(endDate)); + queryParameters.set(QueryParameters.QUERY_EXPIRATION, DefaultQueryParameters.formatDate(expirationDate)); + queryParameters.set(QueryParameters.QUERY_AUTHORIZATIONS, queryAuthorizations); + queryParameters.set(QueryParameters.QUERY_PAGESIZE, String.valueOf(pagesize)); + queryParameters.set(QueryParameters.QUERY_PERSISTENCE, persistenceMode.name()); + queryParameters.set(QueryParameters.QUERY_TRACE, String.valueOf(trace)); + queryParameters.set(ColumnVisibilitySecurityMarking.VISIBILITY_MARKING, queryVisibility); + queryParameters.set("valid", "param"); expect(context.getCallerPrincipal()).andReturn(principal).anyTimes(); expect(this.queryLogicFactory.getQueryLogic(queryLogicName, this.principal)).andReturn((QueryLogic) this.queryLogic1); @@ -4153,7 +4203,9 @@ public void testCreateQuery_auditException() throws Exception { expect(this.principal.getAuthorizations()).andReturn((Collection) Arrays.asList(Arrays.asList(queryAuthorizations))); expect(this.queryLogic1.getMaxPageSize()).andReturn(10).anyTimes(); expect(queryLogic1.getSelectors(null)).andReturn(null); - expect(auditor.audit(queryParameters)).andThrow(new JMSRuntimeException("EXPECTED TESTING EXCEPTION")); + expect(this.responseObjectFactory.getQueryImpl()).andReturn(new QueryImpl()); + expect(queryLogic1.getResultLimit(anyObject(QueryImpl.class))).andReturn(-1L); + expect(auditor.audit(EasyMock.anyObject())).andThrow(new JMSRuntimeException("EXPECTED TESTING EXCEPTION")); queryLogic1.close(); PowerMock.replayAll(); @@ -4167,17 +4219,17 @@ public void testCreateQuery_auditException() throws Exception { setInternalState(executor, ClosedQueryCache.class, closedCache); setInternalState(executor, Persister.class, persister); setInternalState(executor, QueryLogicFactoryImpl.class, queryLogicFactory); - setInternalState(executor, QueryExpirationConfiguration.class, queryExpirationConf); + setInternalState(executor, QueryExpirationProperties.class, queryExpirationConf); setInternalState(executor, AuditBean.class, auditor); setInternalState(executor, QueryMetricsBean.class, metrics); setInternalState(executor, Multimap.class, traceInfos); setInternalState(executor, SecurityMarking.class, new ColumnVisibilitySecurityMarking()); - setInternalState(executor, QueryParameters.class, new QueryParametersImpl()); + setInternalState(executor, QueryParameters.class, new DefaultQueryParameters()); setInternalState(executor, QueryMetricFactory.class, new QueryMetricFactoryImpl()); setInternalState(connectionRequestBean, EJBContext.class, context); setInternalState(executor, AccumuloConnectionRequestBean.class, connectionRequestBean); - executor.createQuery(queryLogicName, queryParameters); + executor.createQuery(queryLogicName, MapUtils.toMultivaluedMap(queryParameters)); PowerMock.verifyAll(); } @@ -4207,13 +4259,13 @@ public void testReset_auditException() throws Exception { qp.setUserDN(userDN); qp.setDnList(Collections.singletonList(userDN)); - MultiValueMap map = new LinkedMultiValueMap<>(); + MultiValueMap map = new LinkedMultiValueMap<>(qp.toMap()); map.set(PrivateAuditConstants.AUDIT_TYPE, AuditType.PASSIVE.name()); map.set(PrivateAuditConstants.LOGIC_CLASS, queryLogicName); map.set(PrivateAuditConstants.COLUMN_VISIBILITY, authorization); map.set(PrivateAuditConstants.USER_DN, userDN); map.set(AuditParameters.AUDIT_ID, queryName); - MultivaluedMap auditMap = new MultivaluedMapImpl(); + MultiValueMap auditMap = new LinkedMultiValueMap(); auditMap.putAll(map); // Set expectations @@ -4261,7 +4313,7 @@ public void testReset_auditException() throws Exception { setInternalState(subject, ClosedQueryCache.class, closedCache); setInternalState(subject, Persister.class, persister); setInternalState(subject, QueryLogicFactory.class, queryLogicFactory); - setInternalState(subject, QueryExpirationConfiguration.class, queryExpirationConf); + setInternalState(subject, QueryExpirationProperties.class, queryExpirationConf); setInternalState(subject, AuditBean.class, auditor); setInternalState(subject, QueryMetricsBean.class, metrics); setInternalState(subject, QueryMetricFactory.class, new QueryMetricFactoryImpl()); @@ -4298,9 +4350,9 @@ public void testUpdateQuery_auditException() throws Exception { MultiValueMap p = new LinkedMultiValueMap<>(); p.set(QueryParameters.QUERY_AUTHORIZATIONS, queryAuthorizations); - p.set(QueryParameters.QUERY_BEGIN, QueryParametersImpl.formatDate(beginDate)); - p.set(QueryParameters.QUERY_END, QueryParametersImpl.formatDate(endDate)); - p.set(QueryParameters.QUERY_EXPIRATION, QueryParametersImpl.formatDate(expirationDate)); + p.set(QueryParameters.QUERY_BEGIN, DefaultQueryParameters.formatDate(beginDate)); + p.set(QueryParameters.QUERY_END, DefaultQueryParameters.formatDate(endDate)); + p.set(QueryParameters.QUERY_EXPIRATION, DefaultQueryParameters.formatDate(expirationDate)); p.set(QueryParameters.QUERY_NAME, queryName); p.set(QueryParameters.QUERY_PAGESIZE, Integer.toString(pagesize)); p.set(QueryParameters.QUERY_PAGETIMEOUT, Integer.toString(pageTimeout)); @@ -4314,7 +4366,7 @@ public void testUpdateQuery_auditException() throws Exception { p.set(PrivateAuditConstants.COLUMN_VISIBILITY, queryVisibility); p.set(PrivateAuditConstants.USER_DN, userDN); p.set(AuditParameters.AUDIT_ID, queryId.toString()); - MultivaluedMap auditMap = new MultivaluedMapImpl(); + MultiValueMap auditMap = new LinkedMultiValueMap(); auditMap.putAll(p); // Set expectations @@ -4358,7 +4410,7 @@ public void testUpdateQuery_auditException() throws Exception { setInternalState(subject, ClosedQueryCache.class, closedCache); setInternalState(subject, Persister.class, persister); setInternalState(subject, QueryLogicFactory.class, queryLogicFactory); - setInternalState(subject, QueryExpirationConfiguration.class, queryExpirationConf); + setInternalState(subject, QueryExpirationProperties.class, queryExpirationConf); setInternalState(subject, AuditBean.class, auditor); setInternalState(subject, QueryMetricFactory.class, new QueryMetricFactoryImpl()); @@ -4388,18 +4440,17 @@ public void testDefineQuery_userNotInAllowedDNs() throws Exception { boolean trace = false; // Set expectations - MultivaluedMap queryParameters = new MultivaluedMapImpl<>(); - queryParameters.putAll(QueryParametersImpl.paramsToMap(queryLogicName, query, queryName, queryVisibility, beginDate, endDate, queryAuthorizations, - expirationDate, pagesize, pageTimeout, maxResultsOverride, persistenceMode, systemFrom, parameters, trace)); + MultivaluedMap queryParameters = MapUtils.toMultivaluedMap( + DefaultQueryParameters.paramsToMap(queryLogicName, query, queryName, queryVisibility, beginDate, endDate, queryAuthorizations, + expirationDate, pagesize, pageTimeout, maxResultsOverride, persistenceMode, systemFrom, parameters, trace)); ColumnVisibilitySecurityMarking marking = new ColumnVisibilitySecurityMarking(); marking.validate(queryParameters); - QueryParameters qp = new QueryParametersImpl(); + QueryParameters qp = new DefaultQueryParameters(); qp.validate(queryParameters); - MultivaluedMap op = new MultivaluedMapImpl<>(); - op.putAll(qp.getUnknownParameters(queryParameters)); + MultivaluedMap op = MapUtils.toMultivaluedMap(qp.getUnknownParameters(MapUtils.toMultiValueMap(queryParameters))); op.putSingle(PrivateAuditConstants.LOGIC_CLASS, queryLogicName); op.putSingle(PrivateAuditConstants.COLUMN_VISIBILITY, queryVisibility); expect(this.queryLogicFactory.getQueryLogic(queryLogicName, this.principal)).andReturn((QueryLogic) this.queryLogic1); @@ -4418,9 +4469,9 @@ public void testDefineQuery_userNotInAllowedDNs() throws Exception { QueryExecutorBean subject = new QueryExecutorBean(); setInternalState(subject, EJBContext.class, context); setInternalState(subject, QueryLogicFactory.class, queryLogicFactory); - setInternalState(subject, QueryExpirationConfiguration.class, queryExpirationConf); + setInternalState(subject, QueryExpirationProperties.class, queryExpirationConf); setInternalState(subject, SecurityMarking.class, new ColumnVisibilitySecurityMarking()); - setInternalState(subject, QueryParameters.class, new QueryParametersImpl()); + setInternalState(subject, QueryParameters.class, new DefaultQueryParameters()); setInternalState(subject, QueryMetricFactory.class, new QueryMetricFactoryImpl()); Throwable result1 = null; try { @@ -4459,18 +4510,17 @@ public void testCreateQuery_userNotInAllowedDNs() throws Exception { boolean trace = false; // Set expectations - MultivaluedMap queryParameters = new MultivaluedMapImpl<>(); - queryParameters.putAll(QueryParametersImpl.paramsToMap(queryLogicName, query, queryName, queryVisibility, beginDate, endDate, queryAuthorizations, - expirationDate, pagesize, pageTimeout, maxResultsOverride, persistenceMode, systemFrom, parameters, trace)); + MultivaluedMap queryParameters = MapUtils.toMultivaluedMap( + DefaultQueryParameters.paramsToMap(queryLogicName, query, queryName, queryVisibility, beginDate, endDate, queryAuthorizations, + expirationDate, pagesize, pageTimeout, maxResultsOverride, persistenceMode, systemFrom, parameters, trace)); ColumnVisibilitySecurityMarking marking = new ColumnVisibilitySecurityMarking(); marking.validate(queryParameters); - QueryParameters qp = new QueryParametersImpl(); + QueryParameters qp = new DefaultQueryParameters(); qp.validate(queryParameters); - MultivaluedMap op = new MultivaluedMapImpl<>(); - op.putAll(qp.getUnknownParameters(queryParameters)); + MultivaluedMap op = MapUtils.toMultivaluedMap(qp.getUnknownParameters(MapUtils.toMultiValueMap(queryParameters))); op.putSingle(PrivateAuditConstants.LOGIC_CLASS, queryLogicName); op.putSingle(PrivateAuditConstants.COLUMN_VISIBILITY, queryVisibility); expect(this.queryLogicFactory.getQueryLogic(queryLogicName, this.principal)).andReturn((QueryLogic) this.queryLogic1); @@ -4489,9 +4539,9 @@ public void testCreateQuery_userNotInAllowedDNs() throws Exception { QueryExecutorBean subject = new QueryExecutorBean(); setInternalState(subject, EJBContext.class, context); setInternalState(subject, QueryLogicFactory.class, queryLogicFactory); - setInternalState(subject, QueryExpirationConfiguration.class, queryExpirationConf); + setInternalState(subject, QueryExpirationProperties.class, queryExpirationConf); setInternalState(subject, SecurityMarking.class, new ColumnVisibilitySecurityMarking()); - setInternalState(subject, QueryParameters.class, new QueryParametersImpl()); + setInternalState(subject, QueryParameters.class, new DefaultQueryParameters()); setInternalState(subject, QueryMetricFactory.class, new QueryMetricFactoryImpl()); Throwable result1 = null; try { @@ -4530,18 +4580,17 @@ public void testCreateQueryAndNext_userNotInAllowedDNs() throws Exception { boolean trace = false; // Set expectations - MultivaluedMap queryParameters = new MultivaluedMapImpl<>(); - queryParameters.putAll(QueryParametersImpl.paramsToMap(queryLogicName, query, queryName, queryVisibility, beginDate, endDate, queryAuthorizations, - expirationDate, pagesize, pageTimeout, maxResultsOverride, persistenceMode, systemFrom, parameters, trace)); + MultivaluedMap queryParameters = MapUtils.toMultivaluedMap( + DefaultQueryParameters.paramsToMap(queryLogicName, query, queryName, queryVisibility, beginDate, endDate, queryAuthorizations, + expirationDate, pagesize, pageTimeout, maxResultsOverride, persistenceMode, systemFrom, parameters, trace)); ColumnVisibilitySecurityMarking marking = new ColumnVisibilitySecurityMarking(); marking.validate(queryParameters); - QueryParameters qp = new QueryParametersImpl(); + QueryParameters qp = new DefaultQueryParameters(); qp.validate(queryParameters); - MultivaluedMap op = new MultivaluedMapImpl<>(); - op.putAll(qp.getUnknownParameters(queryParameters)); + MultivaluedMap op = MapUtils.toMultivaluedMap(qp.getUnknownParameters(MapUtils.toMultiValueMap(queryParameters))); op.putSingle(PrivateAuditConstants.LOGIC_CLASS, queryLogicName); op.putSingle(PrivateAuditConstants.COLUMN_VISIBILITY, queryVisibility); expect(this.queryLogicFactory.getQueryLogic(queryLogicName, this.principal)).andReturn((QueryLogic) this.queryLogic1); @@ -4560,9 +4609,9 @@ public void testCreateQueryAndNext_userNotInAllowedDNs() throws Exception { QueryExecutorBean subject = new QueryExecutorBean(); setInternalState(subject, EJBContext.class, context); setInternalState(subject, QueryLogicFactory.class, queryLogicFactory); - setInternalState(subject, QueryExpirationConfiguration.class, queryExpirationConf); + setInternalState(subject, QueryExpirationProperties.class, queryExpirationConf); setInternalState(subject, SecurityMarking.class, new ColumnVisibilitySecurityMarking()); - setInternalState(subject, QueryParameters.class, new QueryParametersImpl()); + setInternalState(subject, QueryParameters.class, new DefaultQueryParameters()); setInternalState(subject, QueryMetricFactory.class, new QueryMetricFactoryImpl()); Throwable result1 = null; try { @@ -4597,16 +4646,16 @@ public void testPlanQuery_userNotInAllowedDNs() throws Exception { boolean trace = false; String userName = "userName"; String userSid = "userSid"; - String userDN = "userdn"; + String userDN = "userDN"; SubjectIssuerDNPair userDNpair = SubjectIssuerDNPair.of(userDN); MultivaluedMap queryParameters = new MultivaluedMapImpl<>(); queryParameters.putSingle(QueryParameters.QUERY_LOGIC_NAME, queryLogicName); queryParameters.putSingle(QueryParameters.QUERY_STRING, query); queryParameters.putSingle(QueryParameters.QUERY_NAME, queryName); - queryParameters.putSingle(QueryParameters.QUERY_BEGIN, QueryParametersImpl.formatDate(beginDate)); - queryParameters.putSingle(QueryParameters.QUERY_END, QueryParametersImpl.formatDate(endDate)); - queryParameters.putSingle(QueryParameters.QUERY_EXPIRATION, QueryParametersImpl.formatDate(expirationDate)); + queryParameters.putSingle(QueryParameters.QUERY_BEGIN, DefaultQueryParameters.formatDate(beginDate)); + queryParameters.putSingle(QueryParameters.QUERY_END, DefaultQueryParameters.formatDate(endDate)); + queryParameters.putSingle(QueryParameters.QUERY_EXPIRATION, DefaultQueryParameters.formatDate(expirationDate)); queryParameters.putSingle(QueryParameters.QUERY_AUTHORIZATIONS, queryAuthorizations); queryParameters.putSingle(QueryParameters.QUERY_PAGESIZE, String.valueOf(pagesize)); queryParameters.putSingle(QueryParameters.QUERY_PAGETIMEOUT, String.valueOf(pageTimeout)); @@ -4618,11 +4667,10 @@ public void testPlanQuery_userNotInAllowedDNs() throws Exception { ColumnVisibilitySecurityMarking marking = new ColumnVisibilitySecurityMarking(); marking.validate(queryParameters); - QueryParameters qp = new QueryParametersImpl(); + QueryParameters qp = new DefaultQueryParameters(); qp.validate(queryParameters); - MultivaluedMap op = new MultivaluedMapImpl<>(); - op.putAll(qp.getUnknownParameters(queryParameters)); + MultivaluedMap op = MapUtils.toMultivaluedMap(qp.getUnknownParameters(MapUtils.toMultiValueMap(queryParameters))); op.putSingle(PrivateAuditConstants.LOGIC_CLASS, queryLogicName); op.putSingle(PrivateAuditConstants.COLUMN_VISIBILITY, queryVisibility); op.putSingle(PrivateAuditConstants.USER_DN, userDNpair.subjectDN()); @@ -4649,12 +4697,12 @@ public void testPlanQuery_userNotInAllowedDNs() throws Exception { setInternalState(subject, ClosedQueryCache.class, closedCache); setInternalState(subject, Persister.class, persister); setInternalState(subject, QueryLogicFactoryImpl.class, queryLogicFactory); - setInternalState(subject, QueryExpirationConfiguration.class, queryExpirationConf); + setInternalState(subject, QueryExpirationProperties.class, queryExpirationConf); setInternalState(subject, AuditBean.class, auditor); setInternalState(subject, QueryMetricsBean.class, metrics); setInternalState(subject, Multimap.class, traceInfos); setInternalState(subject, SecurityMarking.class, new ColumnVisibilitySecurityMarking()); - setInternalState(subject, QueryParameters.class, new QueryParametersImpl()); + setInternalState(subject, QueryParameters.class, new DefaultQueryParameters()); setInternalState(subject, QueryMetricFactory.class, new QueryMetricFactoryImpl()); setInternalState(connectionRequestBean, EJBContext.class, context); setInternalState(subject, AccumuloConnectionRequestBean.class, connectionRequestBean); @@ -4669,7 +4717,7 @@ public void testPlanQuery_userNotInAllowedDNs() throws Exception { // Verify results assertTrue("QueryException expected to have been thrown", result1 instanceof QueryException); assertEquals("Thrown exception expected to have been due to access denied", "401", ((QueryException) result1).getErrorCode()); - assertEquals("Thrown exception expected to detail reason for access denial", "None of the DNs used have access to this query logic: [userdn]", + assertEquals("Thrown exception expected to detail reason for access denial", "None of the DNs used have access to this query logic: [userDN]", result1.getMessage()); } @@ -4691,16 +4739,16 @@ public void testPredictQuery_userNotInAllowedDNs() throws Exception { boolean trace = false; String userName = "userName"; String userSid = "userSid"; - String userDN = "userdn"; + String userDN = "userDN"; SubjectIssuerDNPair userDNpair = SubjectIssuerDNPair.of(userDN); MultivaluedMap queryParameters = new MultivaluedMapImpl<>(); queryParameters.putSingle(QueryParameters.QUERY_LOGIC_NAME, queryLogicName); queryParameters.putSingle(QueryParameters.QUERY_STRING, query); queryParameters.putSingle(QueryParameters.QUERY_NAME, queryName); - queryParameters.putSingle(QueryParameters.QUERY_BEGIN, QueryParametersImpl.formatDate(beginDate)); - queryParameters.putSingle(QueryParameters.QUERY_END, QueryParametersImpl.formatDate(endDate)); - queryParameters.putSingle(QueryParameters.QUERY_EXPIRATION, QueryParametersImpl.formatDate(expirationDate)); + queryParameters.putSingle(QueryParameters.QUERY_BEGIN, DefaultQueryParameters.formatDate(beginDate)); + queryParameters.putSingle(QueryParameters.QUERY_END, DefaultQueryParameters.formatDate(endDate)); + queryParameters.putSingle(QueryParameters.QUERY_EXPIRATION, DefaultQueryParameters.formatDate(expirationDate)); queryParameters.putSingle(QueryParameters.QUERY_AUTHORIZATIONS, queryAuthorizations); queryParameters.putSingle(QueryParameters.QUERY_PAGESIZE, String.valueOf(pagesize)); queryParameters.putSingle(QueryParameters.QUERY_PAGETIMEOUT, String.valueOf(pageTimeout)); @@ -4712,11 +4760,10 @@ public void testPredictQuery_userNotInAllowedDNs() throws Exception { ColumnVisibilitySecurityMarking marking = new ColumnVisibilitySecurityMarking(); marking.validate(queryParameters); - QueryParameters qp = new QueryParametersImpl(); + QueryParameters qp = new DefaultQueryParameters(); qp.validate(queryParameters); - MultivaluedMap op = new MultivaluedMapImpl<>(); - op.putAll(qp.getUnknownParameters(queryParameters)); + MultivaluedMap op = MapUtils.toMultivaluedMap(qp.getUnknownParameters(MapUtils.toMultiValueMap(queryParameters))); op.putSingle(PrivateAuditConstants.LOGIC_CLASS, queryLogicName); op.putSingle(PrivateAuditConstants.COLUMN_VISIBILITY, queryVisibility); op.putSingle(PrivateAuditConstants.USER_DN, userDNpair.subjectDN()); @@ -4743,12 +4790,12 @@ public void testPredictQuery_userNotInAllowedDNs() throws Exception { setInternalState(subject, ClosedQueryCache.class, closedCache); setInternalState(subject, Persister.class, persister); setInternalState(subject, QueryLogicFactoryImpl.class, queryLogicFactory); - setInternalState(subject, QueryExpirationConfiguration.class, queryExpirationConf); + setInternalState(subject, QueryExpirationProperties.class, queryExpirationConf); setInternalState(subject, AuditBean.class, auditor); setInternalState(subject, QueryMetricsBean.class, metrics); setInternalState(subject, Multimap.class, traceInfos); setInternalState(subject, SecurityMarking.class, new ColumnVisibilitySecurityMarking()); - setInternalState(subject, QueryParameters.class, new QueryParametersImpl()); + setInternalState(subject, QueryParameters.class, new DefaultQueryParameters()); setInternalState(subject, QueryMetricFactory.class, new QueryMetricFactoryImpl()); setInternalState(connectionRequestBean, EJBContext.class, context); setInternalState(subject, AccumuloConnectionRequestBean.class, connectionRequestBean); @@ -4763,7 +4810,7 @@ public void testPredictQuery_userNotInAllowedDNs() throws Exception { // Verify results assertTrue("QueryException expected to have been thrown", result1 instanceof QueryException); assertEquals("Thrown exception expected to have been due to access denied", "401", ((QueryException) result1).getErrorCode()); - assertEquals("Thrown exception expected to detail reason for access denial", "None of the DNs used have access to this query logic: [userdn]", + assertEquals("Thrown exception expected to detail reason for access denial", "None of the DNs used have access to this query logic: [userDN]", result1.getMessage()); } @@ -4840,4 +4887,20 @@ public Set getExampleQueries() { return Collections.emptySet(); } } + + public void populateMetric(Query query, QueryMetric qm) { + qm.setQueryType(query.getClass()); + qm.setQueryId(query.getId().toString()); + qm.setUser(query.getOwner()); + qm.setUserDN(query.getUserDN()); + qm.setQuery(query.getQuery()); + qm.setQueryLogic(query.getQueryLogicName()); + qm.setBeginDate(query.getBeginDate()); + qm.setEndDate(query.getEndDate()); + qm.setQueryAuthorizations(query.getQueryAuthorizations()); + qm.setQueryName(query.getQueryName()); + qm.setParameters(query.getParameters()); + qm.setColumnVisibility(query.getColumnVisibility()); + } + } diff --git a/web-services/query/src/test/java/datawave/webservice/query/runner/ExtendedRunningQueryTest.java b/web-services/query/src/test/java/datawave/webservice/query/runner/ExtendedRunningQueryTest.java index 774f5017323..51cbf1e5378 100644 --- a/web-services/query/src/test/java/datawave/webservice/query/runner/ExtendedRunningQueryTest.java +++ b/web-services/query/src/test/java/datawave/webservice/query/runner/ExtendedRunningQueryTest.java @@ -33,6 +33,13 @@ import com.google.common.collect.Lists; +import datawave.core.common.connection.AccumuloConnectionFactory; +import datawave.core.common.connection.AccumuloConnectionFactory.Priority; +import datawave.core.query.cache.ResultsPage; +import datawave.core.query.configuration.GenericQueryConfiguration; +import datawave.core.query.logic.QueryLogic; +import datawave.microservice.authorization.util.AuthorizationsUtil; +import datawave.microservice.query.Query; import datawave.microservice.querymetric.QueryMetric; import datawave.microservice.querymetric.QueryMetricFactoryImpl; import datawave.security.authorization.DatawavePrincipal; @@ -40,13 +47,6 @@ import datawave.security.authorization.DatawaveUser.UserType; import datawave.security.authorization.SubjectIssuerDNPair; import datawave.security.util.DnUtils; -import datawave.security.util.WSAuthorizationsUtil; -import datawave.webservice.common.connection.AccumuloConnectionFactory; -import datawave.webservice.common.connection.AccumuloConnectionFactory.Priority; -import datawave.webservice.query.Query; -import datawave.webservice.query.cache.ResultsPage; -import datawave.webservice.query.configuration.GenericQueryConfiguration; -import datawave.webservice.query.logic.QueryLogic; import datawave.webservice.query.metric.QueryMetricsBean; import datawave.webservice.query.util.QueryUncaughtExceptionHandler; @@ -181,7 +181,7 @@ public void testNext_HappyPathUsingDeprecatedConstructor() throws Exception { expect(this.genericConfiguration.getQueryString()).andReturn(query).once(); expect(this.queryLogic.isLongRunningQuery()).andReturn(false); expect(this.queryLogic.getResultLimit(eq(this.query))).andReturn(maxResults); - this.queryLogic.preInitialize(this.query, WSAuthorizationsUtil.buildAuthorizations(Collections.singleton(Collections.singleton("AUTH_1")))); + this.queryLogic.preInitialize(this.query, AuthorizationsUtil.buildAuthorizations(Collections.singleton(Collections.singleton("AUTH_1")))); expect(this.queryLogic.getUserOperations()).andReturn(null); this.queryLogic.setPageProcessingStartTime(anyLong()); @@ -272,7 +272,7 @@ public void testNextMaxResults_HappyPathUsingDeprecatedConstructor() throws Exce expect(this.queryLogic.getPageByteTrigger()).andReturn(pageByteTrigger).anyTimes(); expect(this.queryLogic.getMaxWork()).andReturn(maxWork).anyTimes(); expect(this.queryLogic.getMaxResults()).andReturn(maxResults).anyTimes(); - this.queryLogic.preInitialize(this.query, WSAuthorizationsUtil.buildAuthorizations(Collections.singleton(Collections.singleton("AUTH_1")))); + this.queryLogic.preInitialize(this.query, AuthorizationsUtil.buildAuthorizations(Collections.singleton(Collections.singleton("AUTH_1")))); expect(this.queryLogic.getUserOperations()).andReturn(null); expect(this.genericConfiguration.getQueryString()).andReturn(query).once(); this.queryLogic.setPageProcessingStartTime(anyLong()); @@ -342,7 +342,7 @@ public void testNext_NoResultsAfterCancellationUsingDeprecatedConstructor() thro expect(this.queryLogic.isLongRunningQuery()).andReturn(false); expect(this.queryLogic.getResultLimit(eq(this.query))).andReturn(maxResults); expect(this.queryLogic.getMaxResults()).andReturn(maxResults); - this.queryLogic.preInitialize(this.query, WSAuthorizationsUtil.buildAuthorizations(Collections.singleton(Collections.singleton("AUTH_1")))); + this.queryLogic.preInitialize(this.query, AuthorizationsUtil.buildAuthorizations(Collections.singleton(Collections.singleton("AUTH_1")))); expect(this.queryLogic.getUserOperations()).andReturn(null); this.queryLogic.setPageProcessingStartTime(anyLong()); @@ -397,7 +397,7 @@ public void testCloseConnection_HappyPath() throws Exception { expect(this.queryLogic.isLongRunningQuery()).andReturn(false); expect(this.queryLogic.getResultLimit(eq(this.query))).andReturn(maxResults); expect(this.queryLogic.getMaxResults()).andReturn(maxResults); - this.queryLogic.preInitialize(this.query, WSAuthorizationsUtil.buildAuthorizations(Collections.singleton(Collections.singleton("AUTH_1")))); + this.queryLogic.preInitialize(this.query, AuthorizationsUtil.buildAuthorizations(Collections.singleton(Collections.singleton("AUTH_1")))); expect(this.queryLogic.getUserOperations()).andReturn(null); this.queryLogic.setupQuery(this.genericConfiguration); this.queryMetrics.updateMetric(isA(QueryMetric.class)); @@ -486,7 +486,7 @@ public void testNextWithDnResultLimit_HappyPathUsingDeprecatedConstructor() thro expect(this.queryLogic.getPageByteTrigger()).andReturn(pageByteTrigger).anyTimes(); expect(this.queryLogic.getMaxWork()).andReturn(maxWork).anyTimes(); expect(this.queryLogic.getMaxResults()).andReturn(maxResults).anyTimes(); - this.queryLogic.preInitialize(this.query, WSAuthorizationsUtil.buildAuthorizations(Collections.singleton(Collections.singleton("AUTH_1")))); + this.queryLogic.preInitialize(this.query, AuthorizationsUtil.buildAuthorizations(Collections.singleton(Collections.singleton("AUTH_1")))); expect(this.queryLogic.getUserOperations()).andReturn(null); expect(this.genericConfiguration.getQueryString()).andReturn(query).once(); this.queryLogic.setPageProcessingStartTime(anyLong()); diff --git a/web-services/query/src/test/java/datawave/webservice/query/runner/QueryExecutorBeanTest.java b/web-services/query/src/test/java/datawave/webservice/query/runner/QueryExecutorBeanTest.java index ff1bbf87365..29efa06e89c 100644 --- a/web-services/query/src/test/java/datawave/webservice/query/runner/QueryExecutorBeanTest.java +++ b/web-services/query/src/test/java/datawave/webservice/query/runner/QueryExecutorBeanTest.java @@ -61,14 +61,26 @@ import org.xml.sax.SAXException; import com.google.common.collect.HashMultimap; -import com.google.common.collect.Maps; import com.google.common.collect.Multimap; import com.google.common.collect.Sets; import datawave.accumulo.inmemory.InMemoryAccumuloClient; import datawave.accumulo.inmemory.InMemoryInstance; +import datawave.core.common.audit.PrivateAuditConstants; +import datawave.core.common.connection.AccumuloConnectionFactory; +import datawave.core.query.configuration.GenericQueryConfiguration; +import datawave.core.query.logic.BaseQueryLogic; +import datawave.core.query.logic.QueryLogic; +import datawave.core.query.logic.QueryLogicFactory; +import datawave.core.query.predict.QueryPredictor; import datawave.marking.ColumnVisibilitySecurityMarking; import datawave.marking.SecurityMarking; +import datawave.microservice.query.DefaultQueryParameters; +import datawave.microservice.query.Query; +import datawave.microservice.query.QueryImpl; +import datawave.microservice.query.QueryParameters; +import datawave.microservice.query.QueryPersistence; +import datawave.microservice.query.config.QueryExpirationProperties; import datawave.microservice.querymetric.BaseQueryMetric; import datawave.microservice.querymetric.BaseQueryMetric.Lifecycle; import datawave.microservice.querymetric.BaseQueryMetric.Prediction; @@ -87,31 +99,21 @@ import datawave.webservice.common.audit.AuditService; import datawave.webservice.common.audit.Auditor.AuditType; import datawave.webservice.common.audit.DefaultAuditParameterBuilder; -import datawave.webservice.common.audit.PrivateAuditConstants; -import datawave.webservice.common.connection.AccumuloConnectionFactory; import datawave.webservice.common.exception.BadRequestException; import datawave.webservice.common.exception.DatawaveWebApplicationException; -import datawave.webservice.query.Query; -import datawave.webservice.query.QueryImpl; -import datawave.webservice.query.QueryParameters; -import datawave.webservice.query.QueryParametersImpl; -import datawave.webservice.query.QueryPersistence; import datawave.webservice.query.cache.ClosedQueryCache; import datawave.webservice.query.cache.CreatedQueryLogicCacheBean; import datawave.webservice.query.cache.CreatedQueryLogicCacheBean.Triple; import datawave.webservice.query.cache.QueryCache; -import datawave.webservice.query.cache.QueryExpirationConfiguration; import datawave.webservice.query.cache.QueryTraceCache; -import datawave.webservice.query.configuration.GenericQueryConfiguration; import datawave.webservice.query.configuration.LookupUUIDConfiguration; import datawave.webservice.query.exception.DatawaveErrorCode; import datawave.webservice.query.exception.QueryException; import datawave.webservice.query.factory.Persister; -import datawave.webservice.query.logic.BaseQueryLogic; -import datawave.webservice.query.logic.QueryLogic; -import datawave.webservice.query.logic.QueryLogicFactory; import datawave.webservice.query.logic.QueryLogicFactoryImpl; import datawave.webservice.query.metric.QueryMetricsBean; +import datawave.webservice.query.result.event.ResponseObjectFactory; +import datawave.webservice.query.util.MapUtils; import datawave.webservice.result.GenericResponse; @RunWith(PowerMockRunner.class) @@ -144,9 +146,10 @@ public class QueryExecutorBeanTest { private AuditService auditService; private QueryMetricsBean metrics; private QueryLogicFactoryImpl queryLogicFactory; - private QueryExpirationConfiguration queryExpirationConf; + private QueryExpirationProperties queryExpirationConf; private Persister persister; private QueryPredictor predictor; + private ResponseObjectFactory responseObjectFactory; private EJBContext ctx; private CreatedQueryLogicCacheBean qlCache; private QueryExecutorBean bean; @@ -177,21 +180,23 @@ public void setup() throws Exception { predictor = createStrictMock(QueryPredictor.class); ctx = createStrictMock(EJBContext.class); qlCache = new CreatedQueryLogicCacheBean(); - queryExpirationConf = new QueryExpirationConfiguration(); - queryExpirationConf.setPageSizeShortCircuitCheckTime(45); - queryExpirationConf.setPageShortCircuitTimeout(58); - queryExpirationConf.setCallTime(60); + queryExpirationConf = new QueryExpirationProperties(); + queryExpirationConf.setShortCircuitCheckTime(45); + queryExpirationConf.setShortCircuitTimeout(58); + queryExpirationConf.setCallTimeout(60); connectionRequestBean = createStrictMock(AccumuloConnectionRequestBean.class); + responseObjectFactory = createStrictMock(ResponseObjectFactory.class); setInternalState(auditor, AuditService.class, auditService); setInternalState(auditor, AuditParameterBuilder.class, new DefaultAuditParameterBuilder()); setInternalState(connectionRequestBean, EJBContext.class, ctx); setInternalState(bean, QueryCache.class, cache); + setInternalState(bean, ResponseObjectFactory.class, responseObjectFactory); setInternalState(bean, ClosedQueryCache.class, closedCache); setInternalState(bean, AccumuloConnectionFactory.class, connectionFactory); setInternalState(bean, AuditBean.class, auditor); setInternalState(bean, QueryMetricsBean.class, metrics); setInternalState(bean, QueryLogicFactory.class, queryLogicFactory); - setInternalState(bean, QueryExpirationConfiguration.class, queryExpirationConf); + setInternalState(bean, QueryExpirationProperties.class, queryExpirationConf); setInternalState(bean, Persister.class, persister); setInternalState(bean, QueryPredictor.class, predictor); setInternalState(bean, EJBContext.class, ctx); @@ -200,7 +205,7 @@ public void setup() throws Exception { setInternalState(bean, Multimap.class, HashMultimap.create()); setInternalState(bean, LookupUUIDConfiguration.class, new LookupUUIDConfiguration()); setInternalState(bean, SecurityMarking.class, new ColumnVisibilitySecurityMarking()); - setInternalState(bean, QueryParameters.class, new QueryParametersImpl()); + setInternalState(bean, QueryParameters.class, new DefaultQueryParameters()); setInternalState(bean, QueryMetricFactory.class, new QueryMetricFactoryImpl()); setInternalState(bean, AccumuloConnectionRequestBean.class, connectionRequestBean); @@ -242,9 +247,9 @@ private MultivaluedMap createNewQueryParameterMap() throws Except p.putSingle(QueryParameters.QUERY_STRING, "foo == 'bar'"); p.putSingle(QueryParameters.QUERY_NAME, "query name"); p.putSingle(QueryParameters.QUERY_AUTHORIZATIONS, StringUtils.join(auths, ",")); - p.putSingle(QueryParameters.QUERY_BEGIN, QueryParametersImpl.formatDate(beginDate)); - p.putSingle(QueryParameters.QUERY_END, QueryParametersImpl.formatDate(endDate)); - p.putSingle(QueryParameters.QUERY_EXPIRATION, QueryParametersImpl.formatDate(expirationDate)); + p.putSingle(QueryParameters.QUERY_BEGIN, DefaultQueryParameters.formatDate(beginDate)); + p.putSingle(QueryParameters.QUERY_END, DefaultQueryParameters.formatDate(endDate)); + p.putSingle(QueryParameters.QUERY_EXPIRATION, DefaultQueryParameters.formatDate(expirationDate)); p.putSingle(QueryParameters.QUERY_NAME, queryName); p.putSingle(QueryParameters.QUERY_PAGESIZE, Integer.toString(pagesize)); p.putSingle(QueryParameters.QUERY_STRING, query); @@ -255,9 +260,8 @@ private MultivaluedMap createNewQueryParameterMap() throws Except } private MultivaluedMap createNewQueryParameters(QueryImpl q, MultivaluedMap p) { - QueryParameters qp = new QueryParametersImpl(); - MultivaluedMap optionalParameters = new MultivaluedMapImpl<>(); - optionalParameters.putAll(qp.getUnknownParameters(p)); + QueryParameters qp = new DefaultQueryParameters(); + MultivaluedMap optionalParameters = MapUtils.toMultivaluedMap(qp.getUnknownParameters(MapUtils.toMultiValueMap(p))); optionalParameters.putSingle(PrivateAuditConstants.USER_DN, userDN.toLowerCase()); optionalParameters.putSingle(PrivateAuditConstants.COLUMN_VISIBILITY, "PRIVATE|PUBLIC"); optionalParameters.putSingle(PrivateAuditConstants.LOGIC_CLASS, q.getQueryLogicName()); @@ -281,10 +285,9 @@ private void defineTestRunner(QueryImpl q, MultivaluedMap p) thro PowerMock.resetAll(); EasyMock.expect(ctx.getCallerPrincipal()).andReturn(principal).anyTimes(); - suppress(constructor(QueryParametersImpl.class)); + suppress(constructor(DefaultQueryParameters.class)); EasyMock.expect(persister.create(principal.getUserDN().subjectDN(), dnList, (SecurityMarking) Whitebox.getField(bean.getClass(), "marking").get(bean), queryLogicName, (QueryParameters) Whitebox.getField(bean.getClass(), "qp").get(bean), optionalParameters)).andReturn(q); - EasyMock.expect(queryLogicFactory.getQueryLogic(queryLogicName, principal)).andReturn(logic); EasyMock.expect(logic.getRequiredQueryParameters()).andReturn(Collections.EMPTY_SET); EasyMock.expect(logic.getConnectionPriority()).andReturn(AccumuloConnectionFactory.Priority.NORMAL); @@ -295,6 +298,8 @@ private void defineTestRunner(QueryImpl q, MultivaluedMap p) thro EasyMock.expect(logic.getMaxResults()).andReturn(-1L); logic.preInitialize(q, WSAuthorizationsUtil.buildAuthorizations(Collections.singleton(Sets.newHashSet("PUBLIC", "PRIVATE")))); EasyMock.expect(logic.getUserOperations()).andReturn(null); + EasyMock.expect(responseObjectFactory.getQueryImpl()).andReturn(new QueryImpl()); + EasyMock.expect(logic.getResultLimit(anyObject(QueryImpl.class))).andReturn(-1L); PowerMock.replayAll(); bean.defineQuery(queryLogicName, p); @@ -342,9 +347,9 @@ public void testCreateWithNoSelectedAuths() throws Exception { MultivaluedMap p = new MultivaluedMapImpl<>(); p.putSingle(QueryParameters.QUERY_AUTHORIZATIONS, ""); - p.putSingle(QueryParameters.QUERY_BEGIN, QueryParametersImpl.formatDate(beginDate)); - p.putSingle(QueryParameters.QUERY_END, QueryParametersImpl.formatDate(beginDate)); - p.putSingle(QueryParameters.QUERY_EXPIRATION, QueryParametersImpl.formatDate(expirationDate)); + p.putSingle(QueryParameters.QUERY_BEGIN, DefaultQueryParameters.formatDate(beginDate)); + p.putSingle(QueryParameters.QUERY_END, DefaultQueryParameters.formatDate(beginDate)); + p.putSingle(QueryParameters.QUERY_EXPIRATION, DefaultQueryParameters.formatDate(expirationDate)); p.putSingle(QueryParameters.QUERY_NAME, queryName); p.putSingle(QueryParameters.QUERY_PAGESIZE, Integer.toString(pagesize)); p.putSingle(QueryParameters.QUERY_STRING, query); @@ -354,9 +359,8 @@ public void testCreateWithNoSelectedAuths() throws Exception { InMemoryInstance instance = new InMemoryInstance(); AccumuloClient client = new InMemoryAccumuloClient("root", instance); - QueryParameters qp = new QueryParametersImpl(); - MultivaluedMap optionalParameters = new MultivaluedMapImpl<>(); - optionalParameters.putAll(qp.getUnknownParameters(p)); + QueryParameters qp = new DefaultQueryParameters(); + MultivaluedMap optionalParameters = MapUtils.toMultivaluedMap(qp.getUnknownParameters(MapUtils.toMultiValueMap(p))); DatawaveUser user = new DatawaveUser(SubjectIssuerDNPair.of(userDN, ""), UserType.USER, Arrays.asList(auths), null, null, 0L); @@ -367,10 +371,13 @@ public void testCreateWithNoSelectedAuths() throws Exception { PowerMock.resetAll(); EasyMock.expect(ctx.getCallerPrincipal()).andReturn(principal).anyTimes(); - suppress(constructor(QueryParametersImpl.class)); + suppress(constructor(DefaultQueryParameters.class)); EasyMock.expect(persister.create(userDN, dnList, (SecurityMarking) Whitebox.getField(bean.getClass(), "marking").get(bean), queryLogicName, (QueryParameters) Whitebox.getField(bean.getClass(), "qp").get(bean), optionalParameters)).andReturn(q); + EasyMock.expect(responseObjectFactory.getQueryImpl()).andReturn(new QueryImpl()); + EasyMock.expect(logic.getResultLimit(anyObject(QueryImpl.class))).andReturn(-1L); + EasyMock.expect(queryLogicFactory.getQueryLogic(queryLogicName, principal)).andReturn(logic); EasyMock.expect(logic.getRequiredQueryParameters()).andReturn(Collections.EMPTY_SET); EasyMock.expect(logic.containsDNWithAccess(dnList)).andReturn(true); @@ -425,7 +432,7 @@ public void testPredict() throws Exception { PowerMock.resetAll(); EasyMock.expect(ctx.getCallerPrincipal()).andReturn(principal).anyTimes(); - suppress(constructor(QueryParametersImpl.class)); + suppress(constructor(DefaultQueryParameters.class)); EasyMock.expect(persister.create(principal.getUserDN().subjectDN(), dnList, (SecurityMarking) Whitebox.getField(bean.getClass(), "marking").get(bean), queryLogicName, (QueryParameters) Whitebox.getField(bean.getClass(), "qp").get(bean), optionalParameters)).andReturn(q); @@ -477,6 +484,8 @@ public boolean equals(Object o) { Set predictions = new HashSet<>(); predictions.add(new Prediction("source", 1)); + EasyMock.expect(responseObjectFactory.getQueryImpl()).andReturn(new QueryImpl()); + EasyMock.expect(logic.getResultLimit(anyObject(QueryImpl.class))).andReturn(-1L); EasyMock.expect(predictor.predict(EasyMock.eq(testMetric))).andReturn(predictions); PowerMock.replayAll(); @@ -617,8 +626,8 @@ public void testBeginDateAfterEndDate() throws Exception { final MultivaluedMap queryParameters = createNewQueryParameterMap(); queryParameters.remove(QueryParameters.QUERY_BEGIN); queryParameters.remove(QueryParameters.QUERY_END); - queryParameters.putSingle(QueryParameters.QUERY_BEGIN, QueryParametersImpl.formatDate(beginDate)); - queryParameters.putSingle(QueryParameters.QUERY_END, QueryParametersImpl.formatDate(endDate)); + queryParameters.putSingle(QueryParameters.QUERY_BEGIN, DefaultQueryParameters.formatDate(beginDate)); + queryParameters.putSingle(QueryParameters.QUERY_END, DefaultQueryParameters.formatDate(endDate)); try { queryParameters.putSingle(QueryParameters.QUERY_LOGIC_NAME, "EventQueryLogic"); @@ -671,22 +680,23 @@ public void testCloseActuallyCloses() throws Exception { EasyMock.expect(persister.create(principal.getUserDN().subjectDN(), dnList, Whitebox.getInternalState(bean, SecurityMarking.class), queryLogicName, Whitebox.getInternalState(bean, QueryParameters.class), optionalParameters)).andReturn(q); EasyMock.expect(persister.findById(EasyMock.anyString())).andReturn(null).anyTimes(); - EasyMock.expect(connectionFactory.getTrackingMap(anyObject())).andReturn(Maps.newHashMap()).anyTimes(); + EasyMock.expect(responseObjectFactory.getQueryImpl()).andReturn(new QueryImpl()); + EasyMock.expect(logic.getResultLimit(anyObject(QueryImpl.class))).andReturn(-1L); + EasyMock.expect(connectionFactory.getTrackingMap(anyObject())).andReturn(null).anyTimes(); BaseQueryMetric metric = new QueryMetricFactoryImpl().createMetric(); metric.populate(q); EasyMock.expectLastCall(); metric.setQueryType(RunningQuery.class.getSimpleName()); metric.setLifecycle(Lifecycle.DEFINED); - System.out.println(metric); Set predictions = new HashSet<>(); predictions.add(new Prediction("source", 1)); EasyMock.expect(predictor.predict(metric)).andReturn(predictions); - connectionRequestBean.requestBegin(q.getId().toString()); + connectionRequestBean.requestBegin(q.getId().toString(), userDN.toLowerCase(), null); EasyMock.expectLastCall(); - EasyMock.expect(connectionFactory.getClient(eq("connPool1"), anyObject(), anyObject())).andReturn(c).anyTimes(); + EasyMock.expect(connectionFactory.getClient(eq(userDN.toLowerCase()), eq(null), eq("connPool1"), anyObject(), anyObject())).andReturn(c).anyTimes(); connectionRequestBean.requestEnd(q.getId().toString()); EasyMock.expectLastCall(); connectionFactory.returnClient(c); @@ -704,7 +714,7 @@ public void testCloseActuallyCloses() throws Exception { logic.preInitialize(q, WSAuthorizationsUtil.buildAuthorizations(Collections.singleton(Sets.newHashSet("PUBLIC", "PRIVATE")))); EasyMock.expect(logic.getUserOperations()).andReturn(null); - EasyMock.expect(connectionRequestBean.cancelConnectionRequest(q.getId().toString(), principal)).andReturn(false).anyTimes(); + EasyMock.expect(connectionRequestBean.cancelConnectionRequest(q.getId().toString(), userDN.toLowerCase())).andReturn(false).anyTimes(); connectionFactory.returnClient(EasyMock.isA(AccumuloClient.class)); final AtomicBoolean initializeLooping = new AtomicBoolean(false); diff --git a/web-services/query/src/test/java/datawave/webservice/query/runner/RunningQueryTest.java b/web-services/query/src/test/java/datawave/webservice/query/runner/RunningQueryTest.java index b67228d7b55..cb3dc873a16 100644 --- a/web-services/query/src/test/java/datawave/webservice/query/runner/RunningQueryTest.java +++ b/web-services/query/src/test/java/datawave/webservice/query/runner/RunningQueryTest.java @@ -32,6 +32,13 @@ import datawave.accumulo.inmemory.InMemoryAccumuloClient; import datawave.accumulo.inmemory.InMemoryInstance; +import datawave.core.common.connection.AccumuloConnectionFactory; +import datawave.core.query.configuration.GenericQueryConfiguration; +import datawave.core.query.logic.BaseQueryLogic; +import datawave.core.query.logic.QueryLogic; +import datawave.core.query.logic.composite.CompositeQueryLogic; +import datawave.microservice.authorization.util.AuthorizationsUtil; +import datawave.microservice.query.QueryImpl; import datawave.microservice.querymetric.QueryMetricFactoryImpl; import datawave.security.authorization.AuthorizationException; import datawave.security.authorization.DatawavePrincipal; @@ -39,15 +46,7 @@ import datawave.security.authorization.DatawaveUser.UserType; import datawave.security.authorization.SubjectIssuerDNPair; import datawave.security.util.DnUtils; -import datawave.security.util.WSAuthorizationsUtil; -import datawave.webservice.common.connection.AccumuloConnectionFactory; -import datawave.webservice.query.QueryImpl; -import datawave.webservice.query.configuration.GenericQueryConfiguration; -import datawave.webservice.query.logic.BaseQueryLogic; -import datawave.webservice.query.logic.DatawaveRoleManager; -import datawave.webservice.query.logic.QueryLogic; import datawave.webservice.query.logic.TestQueryLogic; -import datawave.webservice.query.logic.composite.CompositeQueryLogic; import datawave.webservice.query.logic.composite.CompositeQueryLogicTest; public class RunningQueryTest { @@ -118,7 +117,7 @@ public void testConstructorSetsConnection() throws Exception { expect(logic.isLongRunningQuery()).andReturn(false); expect(logic.getResultLimit(settings)).andReturn(-1L); expect(logic.getMaxResults()).andReturn(-1L); - logic.preInitialize(settings, WSAuthorizationsUtil.buildAuthorizations(null)); + logic.preInitialize(settings, AuthorizationsUtil.buildAuthorizations(null)); expect(logic.getUserOperations()).andReturn(null); replay(logic); @@ -141,7 +140,7 @@ public void testConstructorWithNullConnector() throws Exception { expect(logic.isLongRunningQuery()).andReturn(false); expect(logic.getResultLimit(settings)).andReturn(-1L); expect(logic.getMaxResults()).andReturn(-1L); - logic.preInitialize(settings, WSAuthorizationsUtil.buildAuthorizations(null)); + logic.preInitialize(settings, AuthorizationsUtil.buildAuthorizations(null)); expect(logic.getUserOperations()).andReturn(null); replay(logic); @@ -163,7 +162,7 @@ public void testConstructorShouldNotMergeAuths() throws Exception { Authorizations expected = new Authorizations(auths); expect(logic.getCollectQueryMetrics()).andReturn(false); - logic.preInitialize(settings, WSAuthorizationsUtil.buildAuthorizations(Collections.singleton(Sets.newHashSet("A", "B", "C")))); + logic.preInitialize(settings, AuthorizationsUtil.buildAuthorizations(Collections.singleton(Sets.newHashSet("A", "B", "C")))); expect(logic.getUserOperations()).andReturn(null); replay(logic); @@ -189,21 +188,21 @@ public void testWithCompositeQueryLogic() throws Exception { HashSet roles = new HashSet<>(); roles.add("NONTESTROLE"); logic1.setTableName("thisTable"); - logic1.setRoleManager(new DatawaveRoleManager(roles)); + logic1.setRequiredRoles(roles); CompositeQueryLogicTest.TestQueryLogic2 logic2 = new CompositeQueryLogicTest.TestQueryLogic2(); HashSet roles2 = new HashSet<>(); roles2.add("NONTESTROLE"); logic2.setTableName("thatTable"); - logic2.setRoleManager(new DatawaveRoleManager(roles2)); - logics.put("TestQuery1", logic1); - logics.put("TestQuery2", logic2); + logic2.setRequiredRoles(roles2); + logics.put("TestQueryLogic", logic1); + logics.put("TestQueryLogic2", logic2); CompositeQueryLogic compositeQueryLogic = new CompositeQueryLogic(); compositeQueryLogic.setQueryLogics(logics); DatawaveUser user = new DatawaveUser(userDN, UserType.USER, Arrays.asList(auths), null, null, 0L); DatawavePrincipal principal = new DatawavePrincipal(Collections.singletonList(user)); - compositeQueryLogic.setPrincipal(principal); + compositeQueryLogic.setCurrentUser(principal); try { RunningQuery query = new RunningQuery(client, connectionPriority, compositeQueryLogic, settings, null, principal, new QueryMetricFactoryImpl()); } catch (NullPointerException npe) { diff --git a/web-services/query/src/test/java/datawave/webservice/query/util/LookupUUIDUtilTest.java b/web-services/query/src/test/java/datawave/webservice/query/util/LookupUUIDUtilTest.java index a1509238b8a..ea6ba2e886a 100644 --- a/web-services/query/src/test/java/datawave/webservice/query/util/LookupUUIDUtilTest.java +++ b/web-services/query/src/test/java/datawave/webservice/query/util/LookupUUIDUtilTest.java @@ -17,12 +17,12 @@ import org.powermock.core.classloader.annotations.PrepareForTest; import org.powermock.modules.junit4.PowerMockRunner; +import datawave.core.query.logic.QueryLogicFactory; +import datawave.microservice.query.Query; +import datawave.microservice.query.QueryImpl; import datawave.query.data.UUIDType; import datawave.security.authorization.UserOperations; -import datawave.webservice.query.Query; -import datawave.webservice.query.QueryImpl; import datawave.webservice.query.configuration.LookupUUIDConfiguration; -import datawave.webservice.query.logic.QueryLogicFactory; import datawave.webservice.query.result.event.ResponseObjectFactory; import datawave.webservice.query.runner.QueryExecutor; @@ -45,6 +45,7 @@ public class LookupUUIDUtilTest { @Test public void testCreateSettings() { + expect(configuration.getContentLookupTypes()).andReturn(Collections.emptyMap()); expect(configuration.getUuidTypes()).andReturn(Collections.singletonList(new UUIDType("ID", "LuceneUUIDEventQuery", 28))); expect(configuration.getBeginDate()).andReturn("20230101"); expect(configuration.getBatchLookupUpperLimit()).andReturn(10); diff --git a/web-services/query/src/test/java/datawave/webservice/query/util/QueryUtilTest.java b/web-services/query/src/test/java/datawave/webservice/query/util/QueryUtilTest.java index 56874dbad22..ed52bad46b6 100644 --- a/web-services/query/src/test/java/datawave/webservice/query/util/QueryUtilTest.java +++ b/web-services/query/src/test/java/datawave/webservice/query/util/QueryUtilTest.java @@ -14,9 +14,10 @@ import com.google.protobuf.InvalidProtocolBufferException; -import datawave.webservice.query.Query; -import datawave.webservice.query.QueryImpl; -import datawave.webservice.query.QueryImpl.Parameter; +import datawave.core.query.util.QueryUtil; +import datawave.microservice.query.Query; +import datawave.microservice.query.QueryImpl; +import datawave.microservice.query.QueryImpl.Parameter; public class QueryUtilTest { diff --git a/web-services/query/src/test/resources/TestConfiguredQueryLogicFactory.xml b/web-services/query/src/test/resources/TestConfiguredQueryLogicFactory.xml index 075857b342f..c042a99cdb6 100644 --- a/web-services/query/src/test/resources/TestConfiguredQueryLogicFactory.xml +++ b/web-services/query/src/test/resources/TestConfiguredQueryLogicFactory.xml @@ -46,7 +46,6 @@ - @@ -59,12 +58,5 @@ - - - - - - - - \ No newline at end of file + diff --git a/web-services/query/src/test/resources/TestQueryLogicFactory.xml b/web-services/query/src/test/resources/TestQueryLogicFactory.xml index f1bab540051..a7391d162e0 100644 --- a/web-services/query/src/test/resources/TestQueryLogicFactory.xml +++ b/web-services/query/src/test/resources/TestQueryLogicFactory.xml @@ -21,32 +21,17 @@ - - - - - - - - - - - - - - - \ No newline at end of file diff --git a/web-services/rest-api/pom.xml b/web-services/rest-api/pom.xml index bb193d06c80..3b437cf6512 100644 --- a/web-services/rest-api/pom.xml +++ b/web-services/rest-api/pom.xml @@ -4,7 +4,7 @@ gov.nsa.datawave.webservices datawave-ws-parent - 6.5.0-SNAPSHOT + 7.13.0-SNAPSHOT datawave-ws-rest-api war diff --git a/web-services/rest-api/src/main/webapp/WEB-INF/web.xml b/web-services/rest-api/src/main/webapp/WEB-INF/web.xml index 0279c35a755..e6ee4223324 100644 --- a/web-services/rest-api/src/main/webapp/WEB-INF/web.xml +++ b/web-services/rest-api/src/main/webapp/WEB-INF/web.xml @@ -54,7 +54,7 @@ datawave.configuration.ConfigurationBean, - datawave.webservice.common.cache.AccumuloTableCache, + datawave.webservice.common.cache.AccumuloTableCacheBean, datawave.webservice.common.connection.AccumuloConnectionFactoryBean, datawave.webservice.common.audit.AuditBean, datawave.webservice.common.health.HealthBean, diff --git a/web-services/security/pom.xml b/web-services/security/pom.xml index 26fcb74f391..e40f9d3f842 100644 --- a/web-services/security/pom.xml +++ b/web-services/security/pom.xml @@ -4,7 +4,7 @@ gov.nsa.datawave.webservices datawave-ws-parent - 6.5.0-SNAPSHOT + 7.13.0-SNAPSHOT datawave-ws-security ejb @@ -100,6 +100,10 @@ org.powermock powermock-module-junit4 + + org.powermock + powermock-reflect + com.fasterxml.jackson.module jackson-module-jaxb-annotations @@ -249,11 +253,21 @@ weld-core-impl test + + org.mockito + mockito-core + test + org.springframework spring-expression test + + org.springframework + spring-test + test + ${project.artifactId} @@ -261,9 +275,6 @@ true src/main/resources - - source-templates/** - test-classes @@ -307,45 +318,6 @@ - - maven-resources-plugin - - - copy-templated-sources - - copy-resources - - validate - - ${project.build.directory}/generated-sources/templated-sources - - - src/main/resources/source-templates - true - - - - - - - - org.codehaus.mojo - build-helper-maven-plugin - - - add-source - - add-source - - generate-sources - - - target/generated-sources/templated-sources - - - - - diff --git a/web-services/security/src/main/java/datawave/security/auth/DatawaveAuthenticationMechanism.java b/web-services/security/src/main/java/datawave/security/auth/DatawaveAuthenticationMechanism.java index b0d0dfa4e85..c70294be172 100644 --- a/web-services/security/src/main/java/datawave/security/auth/DatawaveAuthenticationMechanism.java +++ b/web-services/security/src/main/java/datawave/security/auth/DatawaveAuthenticationMechanism.java @@ -7,14 +7,21 @@ import java.security.cert.Certificate; import java.security.cert.X509Certificate; import java.util.Arrays; +import java.util.HashMap; import java.util.HashSet; import java.util.Map; import java.util.Set; import java.util.concurrent.TimeUnit; import javax.net.ssl.SSLPeerUnverifiedException; +import javax.security.auth.login.AccountLockedException; +import javax.security.auth.login.CredentialException; +import javax.security.auth.login.FailedLoginException; +import javax.security.auth.login.LoginException; import org.apache.commons.lang3.StringUtils; +import org.apache.http.HttpStatus; +import org.jboss.security.SecurityContextAssociation; import org.slf4j.Logger; import org.slf4j.LoggerFactory; import org.xnio.SslClientAuthMode; @@ -60,6 +67,7 @@ public class DatawaveAuthenticationMechanism implements AuthenticationMechanism private final boolean trustedHeaderAuthentication; private final boolean jwtHeaderAuthentication; private final Set dnsToPrune; + private final Map,Integer> returnCodeMap = new HashMap(); @SuppressWarnings("UnusedDeclaration") public DatawaveAuthenticationMechanism() { @@ -90,6 +98,16 @@ public DatawaveAuthenticationMechanism(String mechanismName, boolean forceRenego } SUBJECT_DN_HEADER = System.getProperty("dw.trusted.header.subjectDn", "X-SSL-ClientCert-Subject".toLowerCase()); ISSUER_DN_HEADER = System.getProperty("dw.trusted.header.issuerDn", "X-SSL-ClientCert-Issuer".toLowerCase()); + // These LoginExceptions are thrown from DatawavePrincipalLoginModule and + // caught and saved in the SecurityContext in JBossCachedAuthenticationManager. + + // there was some problem with the credential that prevented evaluation + returnCodeMap.put(CredentialException.class, HttpStatus.SC_UNAUTHORIZED); + // credential was evaluated and rejected + returnCodeMap.put(AccountLockedException.class, HttpStatus.SC_FORBIDDEN); + returnCodeMap.put(FailedLoginException.class, HttpStatus.SC_FORBIDDEN); + // there was a system error that prevented evaluation of the credential + returnCodeMap.put(LoginException.class, HttpStatus.SC_SERVICE_UNAVAILABLE); } @Override @@ -223,7 +241,25 @@ private Certificate[] getPeerCertificates(HttpServerExchange exchange, SSLSessio @Override public ChallengeResult sendChallenge(HttpServerExchange httpServerExchange, SecurityContext securityContext) { - return new ChallengeResult(false); + // FORBIDDEN (403) was the previous default response code returned when an exception happened + // in the DatawavePrincipalLoginModule and this method returned ChallengeResult(false) + int returnCode = HttpStatus.SC_FORBIDDEN; + org.jboss.security.SecurityContext sc = SecurityContextAssociation.getSecurityContext(); + if (sc != null) { + // A LoginException is thrown from DatawavePrincipalLoginModule and caught + // and saved in the SecurityContext in JBossCachedAuthenticationManager. + Exception e = (Exception) sc.getData().get("org.jboss.security.exception"); + if (e != null) { + if (returnCodeMap.containsKey(e.getClass())) { + returnCode = returnCodeMap.get(e.getClass()); + } + if (logger.isTraceEnabled()) { + logger.trace("exception class: {} returnCode: {}", e.getClass().getCanonicalName(), returnCode); + } + } + } + // The ChallengeResult is evaluated in SecurityContextImpl.transition() + return new ChallengeResult(true, returnCode); } private String getSingleHeader(HeaderMap headers, String headerName) throws MultipleHeaderException { diff --git a/web-services/security/src/main/java/datawave/security/authorization/remote/ConditionalRemoteUserOperations.java b/web-services/security/src/main/java/datawave/security/authorization/remote/ConditionalRemoteUserOperations.java deleted file mode 100644 index 788407c714c..00000000000 --- a/web-services/security/src/main/java/datawave/security/authorization/remote/ConditionalRemoteUserOperations.java +++ /dev/null @@ -1,109 +0,0 @@ -package datawave.security.authorization.remote; - -import java.util.Collections; -import java.util.function.Function; - -import org.slf4j.Logger; -import org.slf4j.LoggerFactory; - -import datawave.security.authorization.AuthorizationException; -import datawave.security.authorization.DatawavePrincipal; -import datawave.security.authorization.UserOperations; -import datawave.user.AuthorizationsListBase; -import datawave.webservice.query.result.event.ResponseObjectFactory; -import datawave.webservice.result.GenericResponse; - -/** - * A conditional remote user operations will only invoke the delegate remote service base on a specified function of the specified principal. For example we may - * only need to invoke the remote user operations if we know the remote system will have additional auths that this user will need for the query logic being - * invoked. - * - * An example may be a composite query that call a local and a remote query logic. Perhaps we can already tell that the user will not be able to get any - * additional authorities from the remote system and hence the remote call will not be required. - */ -public class ConditionalRemoteUserOperations implements UserOperations { - private static final Logger log = LoggerFactory.getLogger(ConditionalRemoteUserOperations.class); - - private UserOperations delegate; - private Function condition; - private ResponseObjectFactory responseObjectFactory; - - private static final GenericResponse EMPTY_RESPONSE = new GenericResponse<>(); - - public boolean isFiltered(DatawavePrincipal principal) { - if (!condition.apply(principal)) { - if (log.isDebugEnabled()) { - log.debug("Filter " + condition + " blocking " + principal.getName() + " from " + delegate + " user operations"); - } - return true; - } else { - if (log.isDebugEnabled()) { - log.debug("Passing through filter " + condition + " for " + principal.getName() + " for " + delegate + " user operations"); - } - return false; - } - } - - @Override - public AuthorizationsListBase listEffectiveAuthorizations(Object callerObject) throws AuthorizationException { - assert (delegate != null); - assert (condition != null); - assert (responseObjectFactory != null); - - final DatawavePrincipal principal = getDatawavePrincipal(callerObject); - - if (!isFiltered(principal)) { - return delegate.listEffectiveAuthorizations(callerObject); - } else { - AuthorizationsListBase response = responseObjectFactory.getAuthorizationsList(); - response.setUserAuths(principal.getUserDN().subjectDN(), principal.getUserDN().issuerDN(), Collections.EMPTY_LIST); - return response; - } - } - - @Override - public GenericResponse flushCachedCredentials(Object callerObject) throws AuthorizationException { - assert (delegate != null); - assert (condition != null); - assert (responseObjectFactory != null); - - final DatawavePrincipal principal = getDatawavePrincipal(callerObject); - - if (!isFiltered(principal)) { - return delegate.flushCachedCredentials(callerObject); - } else { - return EMPTY_RESPONSE; - } - } - - private DatawavePrincipal getDatawavePrincipal(Object callerObject) { - if (callerObject instanceof DatawavePrincipal) { - return (DatawavePrincipal) callerObject; - } - throw new RuntimeException("Cannot handle a " + callerObject.getClass() + ". Only DatawavePrincipal is accepted"); - } - - public UserOperations getDelegate() { - return delegate; - } - - public void setDelegate(UserOperations delegate) { - this.delegate = delegate; - } - - public Function getCondition() { - return condition; - } - - public void setCondition(Function condition) { - this.condition = condition; - } - - public ResponseObjectFactory getResponseObjectFactory() { - return responseObjectFactory; - } - - public void setResponseObjectFactory(ResponseObjectFactory responseObjectFactory) { - this.responseObjectFactory = responseObjectFactory; - } -} diff --git a/web-services/security/src/main/java/datawave/security/authorization/remote/RemoteDatawaveUserService.java b/web-services/security/src/main/java/datawave/security/authorization/remote/RemoteDatawaveUserService.java index e3d80cf748d..e390d202095 100644 --- a/web-services/security/src/main/java/datawave/security/authorization/remote/RemoteDatawaveUserService.java +++ b/web-services/security/src/main/java/datawave/security/authorization/remote/RemoteDatawaveUserService.java @@ -1,12 +1,12 @@ package datawave.security.authorization.remote; import java.io.IOException; -import java.net.URISyntaxException; +import java.net.ConnectException; +import java.net.UnknownHostException; +import java.util.Arrays; import java.util.Base64; import java.util.Collection; import java.util.List; -import java.util.function.Consumer; -import java.util.function.Supplier; import java.util.stream.Collectors; import javax.annotation.PostConstruct; @@ -14,12 +14,11 @@ import javax.enterprise.inject.Alternative; import javax.inject.Inject; import javax.interceptor.Interceptor; +import javax.net.ssl.SSLException; import org.apache.deltaspike.core.api.config.ConfigProperty; import org.apache.deltaspike.core.api.exclude.Exclude; import org.apache.http.HttpHeaders; -import org.apache.http.client.methods.HttpGet; -import org.apache.http.client.utils.URIBuilder; import org.apache.http.entity.ContentType; import org.apache.http.util.EntityUtils; @@ -102,6 +101,16 @@ public class RemoteDatawaveUserService extends RemoteHttpService implements Cach @Metric(name = "dw.remoteDatawaveUserService.failures", absolute = true) private Counter failureCounter; + @Override + protected List> getUnavailableRetryClasses() { + return Arrays.asList(ConnectException.class, UnknownHostException.class); + } + + @Override + protected List> getNonRetriableClasses() { + return Arrays.asList(SSLException.class); + } + @Override @Timed(name = "dw.remoteDatawaveUserService.lookup", absolute = true) public Collection lookup(Collection dns) throws AuthorizationException { diff --git a/web-services/security/src/main/java/datawave/security/authorization/remote/RemoteUserOperationsImpl.java b/web-services/security/src/main/java/datawave/security/authorization/remote/RemoteUserOperationsImpl.java index 4459b92d553..faea5264f26 100644 --- a/web-services/security/src/main/java/datawave/security/authorization/remote/RemoteUserOperationsImpl.java +++ b/web-services/security/src/main/java/datawave/security/authorization/remote/RemoteUserOperationsImpl.java @@ -14,6 +14,7 @@ import datawave.security.auth.DatawaveAuthenticationMechanism; import datawave.security.authorization.AuthorizationException; import datawave.security.authorization.DatawavePrincipal; +import datawave.security.authorization.ProxiedUserDetails; import datawave.security.authorization.UserOperations; import datawave.user.AuthorizationsListBase; import datawave.webservice.common.remote.RemoteHttpService; @@ -30,7 +31,7 @@ public class RemoteUserOperationsImpl extends RemoteHttpService implements UserO private static final String FLUSH_CREDS = "flushCachedCredentials"; - private static final String INCLUDE_REMOTE_SERVICES = "includeRemoteServices"; + public static final String INCLUDE_REMOTE_SERVICES = "includeRemoteServices"; private ObjectReader genericResponseReader; @@ -50,15 +51,15 @@ public void init() { } @Override - @Cacheable(value = "getRemoteUser", key = "{#principal}", cacheManager = "remoteOperationsCacheManager") - public DatawavePrincipal getRemoteUser(DatawavePrincipal principal) throws AuthorizationException { - log.info("Cache fault: Retrieving user for " + principal.getPrimaryUser().getDn()); - return UserOperations.super.getRemoteUser(principal); + @Cacheable(value = "getRemoteUser", key = "{#currentUser}", cacheManager = "remoteOperationsCacheManager") + public ProxiedUserDetails getRemoteUser(ProxiedUserDetails currentUser) throws AuthorizationException { + log.info("Cache fault: Retrieving user for " + currentUser.getPrimaryUser().getDn()); + return UserOperations.super.getRemoteUser(currentUser); } @Override @Cacheable(value = "listEffectiveAuthorizations", key = "{#callerObject}", cacheManager = "remoteOperationsCacheManager") - public AuthorizationsListBase listEffectiveAuthorizations(Object callerObject) throws AuthorizationException { + public AuthorizationsListBase listEffectiveAuthorizations(ProxiedUserDetails callerObject) throws AuthorizationException { init(); final DatawavePrincipal principal = getDatawavePrincipal(callerObject); log.info("Cache fault: Retrieving effective auths for " + principal.getPrimaryUser().getDn()); @@ -76,7 +77,7 @@ public AuthorizationsListBase listEffectiveAuthorizations(Object callerObject) t } @Override - public GenericResponse flushCachedCredentials(Object callerObject) throws AuthorizationException { + public GenericResponse flushCachedCredentials(ProxiedUserDetails callerObject) throws AuthorizationException { init(); final DatawavePrincipal principal = getDatawavePrincipal(callerObject); final String suffix = FLUSH_CREDS; diff --git a/web-services/security/src/main/java/datawave/security/authorization/simple/DatabaseUserService.java b/web-services/security/src/main/java/datawave/security/authorization/simple/DatabaseUserService.java index 26fe5cf6807..94de654a437 100644 --- a/web-services/security/src/main/java/datawave/security/authorization/simple/DatabaseUserService.java +++ b/web-services/security/src/main/java/datawave/security/authorization/simple/DatabaseUserService.java @@ -34,7 +34,7 @@ * contain two tables: users (name is customizable by setting the dw.databaseUsersService.usersTableName property) and roleToAuthMapping (name is customizable * by setting the dw.databaseUsersService.mappingTableName property). The expected structure of the users table is: *
    MapReduceIndex
    RowColF
    - * + * * * * @@ -70,7 +70,7 @@ * The roleToAuthMapping table contains the mappings of roles seen in the roles column of the users table into Accumulo auths that appear in the auths column of * the users table. The expected structure of this table is: *
    User data table
    Column NameColumn Type
    - * + * * * * @@ -120,7 +120,7 @@ public DatabaseUserService(@ConfigProperty(name = "dw.databaseUsersService.users public void setup() { try (Connection c = ds.getConnection(); Statement s = c.createStatement(); - ResultSet rs = s.executeQuery("SELECT role, auth FROM " + mappingTableName)) { + ResultSet rs = s.executeQuery(String.format("SELECT role, auth FROM %s", mappingTableName))) { while (rs.next()) { roleToAuthorizationMap.put(rs.getString("role"), rs.getString("auth")); } @@ -133,7 +133,7 @@ public void setup() { @Override public Collection lookup(Collection dns) throws AuthorizationException { try (Connection c = ds.getConnection(); - PreparedStatement ps = c.prepareStatement("SELECT * from " + usersTableName + " where subjectDN = ? and issuerDN = ?")) { + PreparedStatement ps = c.prepareStatement(String.format("SELECT * from %s where subjectDN = ? and issuerDN = ?", usersTableName))) { ArrayList users = new ArrayList<>(); for (SubjectIssuerDNPair dn : dns) { users.add(lookup(ps, dn)); diff --git a/web-services/security/src/main/java/datawave/security/authorization/test/TestDatawaveUserService.java b/web-services/security/src/main/java/datawave/security/authorization/test/TestDatawaveUserService.java index 4b5ba7d542d..ae8d4ecbcdc 100644 --- a/web-services/security/src/main/java/datawave/security/authorization/test/TestDatawaveUserService.java +++ b/web-services/security/src/main/java/datawave/security/authorization/test/TestDatawaveUserService.java @@ -32,13 +32,13 @@ import datawave.configuration.RefreshableScope; import datawave.configuration.spring.SpringBean; +import datawave.core.common.connection.AccumuloConnectionFactory; import datawave.security.authorization.AuthorizationException; import datawave.security.authorization.CachedDatawaveUserService; import datawave.security.authorization.DatawaveUser; import datawave.security.authorization.DatawaveUserInfo; import datawave.security.authorization.DatawaveUserService; import datawave.security.authorization.SubjectIssuerDNPair; -import datawave.webservice.common.connection.AccumuloConnectionFactory; import datawave.webservice.util.NotEqualPropertyExpressionInterpreter; /** @@ -206,7 +206,7 @@ protected void readTestUsers() { protected List readAccumuloAuthorizations() { try { - AccumuloClient client = accumuloConnectionFactory.getClient(null, AccumuloConnectionFactory.Priority.ADMIN, new HashMap<>()); + AccumuloClient client = accumuloConnectionFactory.getClient(null, null, AccumuloConnectionFactory.Priority.ADMIN, new HashMap<>()); Authorizations auths = client.securityOperations().getUserAuthorizations(client.whoami()); return Arrays.asList(auths.toString().split("\\s*,\\s*")); } catch (Exception e) { diff --git a/web-services/security/src/main/java/datawave/security/cache/CredentialsCacheBean.java b/web-services/security/src/main/java/datawave/security/cache/CredentialsCacheBean.java index 2d86e03e6d5..45b1fb1e725 100644 --- a/web-services/security/src/main/java/datawave/security/cache/CredentialsCacheBean.java +++ b/web-services/security/src/main/java/datawave/security/cache/CredentialsCacheBean.java @@ -39,13 +39,13 @@ import datawave.configuration.ConfigurationEvent; import datawave.configuration.DatawaveEmbeddedProjectStageHolder; import datawave.configuration.RefreshLifecycle; +import datawave.core.common.connection.AccumuloConnectionFactory; import datawave.security.DnList; import datawave.security.authorization.CachedDatawaveUserService; import datawave.security.authorization.DatawavePrincipal; import datawave.security.authorization.DatawaveUser; import datawave.security.authorization.DatawaveUserInfo; import datawave.security.system.AuthorizationCache; -import datawave.webservice.common.connection.AccumuloConnectionFactory; import datawave.webservice.common.exception.DatawaveWebApplicationException; import datawave.webservice.query.exception.QueryException; import datawave.webservice.result.GenericResponse; @@ -311,7 +311,7 @@ public GenericResponse reloadAccumuloAuthorizations() { private void retrieveAccumuloAuthorizations() throws Exception { Map trackingMap = accumuloConnectionFactory.getTrackingMap(Thread.currentThread().getStackTrace()); - AccumuloClient c = accumuloConnectionFactory.getClient(AccumuloConnectionFactory.Priority.ADMIN, trackingMap); + AccumuloClient c = accumuloConnectionFactory.getClient(null, null, AccumuloConnectionFactory.Priority.ADMIN, trackingMap); try { Authorizations auths = c.securityOperations().getUserAuthorizations(c.whoami()); HashSet authSet = new HashSet<>(); diff --git a/web-services/security/src/main/java/datawave/security/login/DatawavePrincipalLoginModule.java b/web-services/security/src/main/java/datawave/security/login/DatawavePrincipalLoginModule.java index 31af6683950..bf74bc6effc 100644 --- a/web-services/security/src/main/java/datawave/security/login/DatawavePrincipalLoginModule.java +++ b/web-services/security/src/main/java/datawave/security/login/DatawavePrincipalLoginModule.java @@ -47,6 +47,7 @@ import datawave.configuration.DatawaveEmbeddedProjectStageHolder; import datawave.configuration.spring.BeanProvider; import datawave.security.auth.DatawaveCredential; +import datawave.security.authorization.AuthorizationException; import datawave.security.authorization.DatawavePrincipal; import datawave.security.authorization.DatawaveUser; import datawave.security.authorization.DatawaveUserService; @@ -111,8 +112,9 @@ public void initialize(Subject subject, CallbackHandler callbackHandler, Map flushCachedCredentials(@DefaultValue("true") @QueryParam("includeRemoteServices") boolean includeRemoteServices) { - return flushCachedCredentials(context.getCallerPrincipal(), includeRemoteServices); + return flushCachedCredentials((ProxiedUserDetails) context.getCallerPrincipal(), includeRemoteServices); } @Override - public GenericResponse flushCachedCredentials(Object callerPrincipal) { + public GenericResponse flushCachedCredentials(ProxiedUserDetails callerPrincipal) { return flushCachedCredentials(callerPrincipal, true); } - private GenericResponse flushCachedCredentials(Object callerPrincipal, boolean includeRemoteServices) { + private GenericResponse flushCachedCredentials(ProxiedUserDetails callerPrincipal, boolean includeRemoteServices) { GenericResponse response = new GenericResponse<>(); log.info("Flushing credentials for " + callerPrincipal + " from the cache."); diff --git a/web-services/security/src/test/java/datawave/security/authorization/remote/ConditionalRemoteUserOperationsTest.java b/web-services/security/src/test/java/datawave/security/authorization/remote/ConditionalRemoteUserOperationsTest.java index 7a3c3af3b8b..bac8351be2e 100644 --- a/web-services/security/src/test/java/datawave/security/authorization/remote/ConditionalRemoteUserOperationsTest.java +++ b/web-services/security/src/test/java/datawave/security/authorization/remote/ConditionalRemoteUserOperationsTest.java @@ -3,15 +3,19 @@ import java.util.ArrayList; import java.util.Arrays; import java.util.List; +import java.util.function.Supplier; import org.junit.Test; import org.wildfly.common.Assert; import com.google.common.collect.HashMultimap; +import datawave.microservice.query.Query; import datawave.security.authorization.AuthorizationException; +import datawave.security.authorization.ConditionalRemoteUserOperations; import datawave.security.authorization.DatawavePrincipal; import datawave.security.authorization.DatawaveUser; +import datawave.security.authorization.ProxiedUserDetails; import datawave.security.authorization.SubjectIssuerDNPair; import datawave.security.authorization.UserOperations; import datawave.user.AuthorizationsListBase; @@ -20,7 +24,6 @@ import datawave.webservice.dictionary.data.DescriptionBase; import datawave.webservice.dictionary.data.FieldsBase; import datawave.webservice.metadata.MetadataFieldBase; -import datawave.webservice.query.Query; import datawave.webservice.query.cachedresults.CacheableQueryRow; import datawave.webservice.query.result.EdgeQueryResponseBase; import datawave.webservice.query.result.edge.EdgeBase; @@ -41,13 +44,13 @@ private static class MockRemoteUserOperations implements UserOperations { boolean invoked = false; @Override - public AuthorizationsListBase listEffectiveAuthorizations(Object callerObject) throws AuthorizationException { + public AuthorizationsListBase listEffectiveAuthorizations(ProxiedUserDetails callerObject) throws AuthorizationException { invoked = true; return new DefaultAuthorizationsList(); } @Override - public GenericResponse flushCachedCredentials(Object callerObject) { + public GenericResponse flushCachedCredentials(ProxiedUserDetails callerObject) { invoked = true; return new GenericResponse<>(); } @@ -58,7 +61,7 @@ public void testConditional() throws AuthorizationException { MockRemoteUserOperations testOperations = new MockRemoteUserOperations(); ConditionalRemoteUserOperations testObj = new ConditionalRemoteUserOperations(); testObj.setDelegate(testOperations); - testObj.setResponseObjectFactory(new MockResponseObjectFactory()); + testObj.setAuthorizationsListBaseSupplier(() -> new MockResponseObjectFactory().getAuthorizationsList()); testObj.setCondition(a -> a.getProxiedUsers().size() == 1); List users = new ArrayList<>(); diff --git a/web-services/security/src/test/java/datawave/security/authorization/remote/RemoteUserOperationsImplHttpTest.java b/web-services/security/src/test/java/datawave/security/authorization/remote/RemoteUserOperationsImplHttpTest.java index b9f51f47596..26650903d4f 100644 --- a/web-services/security/src/test/java/datawave/security/authorization/remote/RemoteUserOperationsImplHttpTest.java +++ b/web-services/security/src/test/java/datawave/security/authorization/remote/RemoteUserOperationsImplHttpTest.java @@ -1,6 +1,7 @@ package datawave.security.authorization.remote; import static org.junit.Assert.assertEquals; +import static org.junit.Assert.assertNotEquals; import java.io.IOException; import java.math.BigInteger; @@ -9,38 +10,63 @@ import java.nio.charset.Charset; import java.security.KeyPair; import java.security.KeyPairGenerator; +import java.security.NoSuchAlgorithmException; import java.security.PrivateKey; import java.security.SecureRandom; +import java.security.cert.CertificateException; import java.security.cert.X509Certificate; import java.time.ZonedDateTime; +import java.util.ArrayList; import java.util.Arrays; -import java.util.HashMap; +import java.util.Collections; +import java.util.List; +import javax.enterprise.concurrent.ManagedExecutorService; import javax.security.auth.x500.X500Principal; import javax.ws.rs.core.MediaType; +import org.apache.accumulo.core.security.Authorizations; import org.apache.commons.io.IOUtils; +import org.jboss.security.JSSESecurityDomain; import org.junit.After; import org.junit.Before; import org.junit.Test; +import org.junit.runner.RunWith; +import org.mockito.Mockito; +import org.springframework.beans.factory.annotation.Autowired; +import org.springframework.cache.Cache; +import org.springframework.cache.CacheManager; +import org.springframework.cache.annotation.EnableCaching; +import org.springframework.cache.concurrent.ConcurrentMapCache; +import org.springframework.cache.support.SimpleCacheManager; +import org.springframework.context.annotation.Bean; +import org.springframework.context.annotation.Configuration; +import org.springframework.test.context.ContextConfiguration; +import org.springframework.test.context.junit4.SpringRunner; import org.wildfly.security.x500.cert.X509CertificateBuilder; import com.fasterxml.jackson.databind.ObjectMapper; +import com.google.common.collect.Sets; import com.sun.net.httpserver.HttpExchange; import com.sun.net.httpserver.HttpHandler; import com.sun.net.httpserver.HttpServer; +import datawave.microservice.query.Query; import datawave.security.authorization.DatawavePrincipal; +import datawave.security.authorization.DatawaveUser; +import datawave.security.authorization.ProxiedUserDetails; +import datawave.security.authorization.SubjectIssuerDNPair; +import datawave.security.authorization.UserOperations; import datawave.security.util.DnUtils; import datawave.user.AuthorizationsListBase; import datawave.user.DefaultAuthorizationsList; import datawave.webservice.common.json.DefaultMapperDecorator; +import datawave.webservice.common.json.ObjectMapperDecorator; import datawave.webservice.common.remote.TestJSSESecurityDomain; import datawave.webservice.dictionary.data.DataDictionaryBase; import datawave.webservice.dictionary.data.DescriptionBase; import datawave.webservice.dictionary.data.FieldsBase; import datawave.webservice.metadata.MetadataFieldBase; -import datawave.webservice.query.Query; import datawave.webservice.query.cachedresults.CacheableQueryRow; import datawave.webservice.query.result.EdgeQueryResponseBase; import datawave.webservice.query.result.edge.EdgeBase; @@ -54,46 +80,105 @@ import datawave.webservice.result.FacetQueryResponseBase; import datawave.webservice.result.GenericResponse; +@RunWith(SpringRunner.class) +@ContextConfiguration public class RemoteUserOperationsImplHttpTest { - private static final int keysize = 2048; + @EnableCaching + @Configuration + static class Config { + + @Bean + public CacheManager remoteOperationsCacheManager() { + SimpleCacheManager cacheManager = new SimpleCacheManager(); + List caches = new ArrayList(); + caches.add(new ConcurrentMapCache("listEffectiveAuthorizations")); + caches.add(new ConcurrentMapCache("getRemoteUser")); + cacheManager.setCaches(caches); + return cacheManager; + } + + @Bean + public ObjectMapperDecorator objectMapperDecorator() { + return new DefaultMapperDecorator(); + } - private static final String commonName = "cn=www.test.us"; - private static final String alias = "tomcat"; - private static final char[] keyPass = "changeit".toCharArray(); + @Bean + public ManagedExecutorService executorService() { + return Mockito.mock(ManagedExecutorService.class); + } + + @Bean + public JSSESecurityDomain jsseSecurityDomain() throws CertificateException, NoSuchAlgorithmException { + String alias = "tomcat"; + char[] keyPass = "changeit".toCharArray(); + int keysize = 2048; + String commonName = "cn=www.test.us"; + + KeyPairGenerator generater = KeyPairGenerator.getInstance("RSA"); + generater.initialize(keysize); + KeyPair keypair = generater.generateKeyPair(); + PrivateKey privKey = keypair.getPrivate(); + final X509Certificate[] chain = new X509Certificate[1]; + X500Principal x500Principal = new X500Principal(commonName); + final ZonedDateTime start = ZonedDateTime.now().minusWeeks(1); + final ZonedDateTime until = start.plusYears(1); + X509CertificateBuilder builder = new X509CertificateBuilder().setIssuerDn(x500Principal).setSerialNumber(new BigInteger(10, new SecureRandom())) + .setNotValidBefore(start).setNotValidAfter(until).setSubjectDn(x500Principal).setPublicKey(keypair.getPublic()) + .setSigningKey(keypair.getPrivate()).setSignatureAlgorithmName("SHA256withRSA"); + chain[0] = builder.build(); + + return new TestJSSESecurityDomain(alias, privKey, keyPass, chain); + } + + @Bean + public HttpServer server() throws IOException { + HttpServer server = HttpServer.create(new InetSocketAddress(PORT), 0); + server.setExecutor(null); + server.start(); + return server; + } + + @Bean + public RemoteUserOperationsImpl remote(HttpServer server) { + // create a remote event query logic that has our own server behind it + RemoteUserOperationsImpl remote = new RemoteUserOperationsImpl(); + remote.setQueryServiceURI("/Security/User/"); + remote.setQueryServiceScheme("http"); + remote.setQueryServiceHost("localhost"); + remote.setQueryServicePort(server.getAddress().getPort()); + remote.setResponseObjectFactory(new MockResponseObjectFactory()); + return remote; + } + } - private X500Principal x500Principal; + private static final SubjectIssuerDNPair userDN = SubjectIssuerDNPair.of("userDn", "issuerDn"); + private static final SubjectIssuerDNPair otherUserDN = SubjectIssuerDNPair.of("otherUserDn", "issuerDn"); + private static Authorizations auths = new Authorizations("auth1", "auth2"); private static final int PORT = 0; + private final DatawaveUser user = new DatawaveUser(userDN, DatawaveUser.UserType.USER, Sets.newHashSet(auths.toString().split(",")), null, null, -1L); + private final DatawavePrincipal principal = new DatawavePrincipal((Collections.singleton(user))); + + private final DatawaveUser otherUser = new DatawaveUser(otherUserDN, DatawaveUser.UserType.USER, Sets.newHashSet(auths.toString().split(",")), null, null, + -1L); + private final DatawavePrincipal otherPrincipal = new DatawavePrincipal((Collections.singleton(otherUser))); + + @Autowired private HttpServer server; - private RemoteUserOperationsImpl remote; + @Autowired + private UserOperations remote; + + private DefaultAuthorizationsList listEffectiveAuthResponse; @Before public void setup() throws Exception { final ObjectMapper objectMapper = new DefaultMapperDecorator().decorate(new ObjectMapper()); System.setProperty(DnUtils.SUBJECT_DN_PATTERN_PROPERTY, ".*ou=server.*"); - KeyPairGenerator generater = KeyPairGenerator.getInstance("RSA"); - generater.initialize(keysize); - KeyPair keypair = generater.generateKeyPair(); - PrivateKey privKey = keypair.getPrivate(); - final X509Certificate[] chain = new X509Certificate[1]; - x500Principal = new X500Principal(commonName); - final ZonedDateTime start = ZonedDateTime.now().minusWeeks(1); - final ZonedDateTime until = start.plusYears(1); - X509CertificateBuilder builder = new X509CertificateBuilder().setIssuerDn(x500Principal).setSerialNumber(new BigInteger(10, new SecureRandom())) - .setNotValidBefore(start).setNotValidAfter(until).setSubjectDn(x500Principal).setPublicKey(keypair.getPublic()) - .setSigningKey(keypair.getPrivate()).setSignatureAlgorithmName("SHA256withRSA"); - chain[0] = builder.build(); - - server = HttpServer.create(new InetSocketAddress(PORT), 0); - server.setExecutor(null); - server.start(); - - DefaultAuthorizationsList listEffectiveAuthResponse = new DefaultAuthorizationsList(); - listEffectiveAuthResponse.setUserAuths("testuserDn", "testissuerDn", Arrays.asList("auth1", "auth2")); - listEffectiveAuthResponse.setAuthMapping(new HashMap<>()); + + setListEffectiveAuthResponse(userDN, auths); HttpHandler listEffectiveAuthorizationsHandler = new HttpHandler() { @Override @@ -122,17 +207,6 @@ public void handle(HttpExchange exchange) throws IOException { server.createContext("/Security/User/listEffectiveAuthorizations", listEffectiveAuthorizationsHandler); server.createContext("/Security/User/flushCachedCredentials", flushHandler); - - // create a remote event query logic that has our own server behind it - remote = new RemoteUserOperationsImpl(); - remote.setQueryServiceURI("/Security/User/"); - remote.setQueryServiceScheme("http"); - remote.setQueryServiceHost("localhost"); - remote.setQueryServicePort(server.getAddress().getPort()); - remote.setExecutorService(null); - remote.setObjectMapperDecorator(new DefaultMapperDecorator()); - remote.setResponseObjectFactory(new MockResponseObjectFactory()); - remote.setJsseSecurityDomain(new TestJSSESecurityDomain(alias, privKey, keyPass, chain)); } @After @@ -142,15 +216,33 @@ public void after() { } } + private void setListEffectiveAuthResponse(SubjectIssuerDNPair userDN, Authorizations auths) { + listEffectiveAuthResponse = new DefaultAuthorizationsList(); + listEffectiveAuthResponse.setUserAuths(userDN.subjectDN(), userDN.issuerDN(), Arrays.asList(auths.toString().split(","))); + listEffectiveAuthResponse.addAuths(userDN.subjectDN(), userDN.issuerDN(), Arrays.asList(auths.toString().split(","))); + } + @Test public void testRemoteUserOperations() throws Exception { - DatawavePrincipal principal = new DatawavePrincipal(commonName); - AuthorizationsListBase auths = remote.listEffectiveAuthorizations(principal); - assertEquals(2, auths.getAllAuths().size()); + AuthorizationsListBase returnedAuths = remote.listEffectiveAuthorizations(principal); + assertEquals(2, returnedAuths.getAllAuths().size()); GenericResponse flush = remote.flushCachedCredentials(principal); assertEquals("test flush result", flush.getResult()); + + ProxiedUserDetails returnedUser = remote.getRemoteUser(principal); + + // ensure that we get the cached user details + ProxiedUserDetails dupeReturnedUser = remote.getRemoteUser(principal); + assertEquals(returnedUser, dupeReturnedUser); + + // setup the list effective auth response for the other user + setListEffectiveAuthResponse(otherUserDN, auths); + + // ensure that we get the other user details, not the cached user details + ProxiedUserDetails newReturnedUser = remote.getRemoteUser(otherPrincipal); + assertNotEquals(returnedUser, newReturnedUser); } public static class MockResponseObjectFactory extends ResponseObjectFactory { diff --git a/web-services/security/src/test/java/datawave/security/authorization/test/TestDatawaveUserServiceTest.java b/web-services/security/src/test/java/datawave/security/authorization/test/TestDatawaveUserServiceTest.java index 519625aeffe..5155dc2d9b7 100644 --- a/web-services/security/src/test/java/datawave/security/authorization/test/TestDatawaveUserServiceTest.java +++ b/web-services/security/src/test/java/datawave/security/authorization/test/TestDatawaveUserServiceTest.java @@ -6,6 +6,7 @@ import java.util.Collection; import java.util.Collections; import java.util.HashMap; +import java.util.List; import java.util.Map; import java.util.stream.Collectors; @@ -28,6 +29,8 @@ import datawave.accumulo.inmemory.InMemoryAccumuloClient; import datawave.accumulo.inmemory.InMemoryInstance; +import datawave.core.common.connection.AccumuloConnectionFactory; +import datawave.core.common.result.ConnectionPool; import datawave.security.authorization.AuthorizationException; import datawave.security.authorization.CachedDatawaveUserService; import datawave.security.authorization.DatawaveUser; @@ -35,7 +38,6 @@ import datawave.security.authorization.DatawaveUserInfo; import datawave.security.authorization.DatawaveUserService; import datawave.security.authorization.SubjectIssuerDNPair; -import datawave.webservice.common.connection.AccumuloConnectionFactory; @RunWith(Enclosed.class) public class TestDatawaveUserServiceTest { @@ -234,35 +236,51 @@ private static class MockAccumuloConnectionFactory implements AccumuloConnection public MockAccumuloConnectionFactory() { try { - inMemoryInstance.getConnector("root", "").securityOperations().changeUserAuthorizations("root", new Authorizations("PUB", "PVT")); + new InMemoryAccumuloClient("root", inMemoryInstance).securityOperations().changeUserAuthorizations("root", new Authorizations("PUB", "PVT")); } catch (AccumuloException | AccumuloSecurityException e) { throw new RuntimeException(e); } } @Override - public String getConnectionUserName(String poolName) { - return "test"; + public AccumuloClient getClient(String userDN, Collection proxiedDNs, Priority priority, Map trackingMap) throws Exception { + return new InMemoryAccumuloClient("root", inMemoryInstance); } @Override - public AccumuloClient getClient(Priority priority, Map trackingMap) throws Exception { + public AccumuloClient getClient(String userDN, Collection proxiedDNs, String poolName, Priority priority, Map trackingMap) + throws Exception { return new InMemoryAccumuloClient("root", inMemoryInstance); } @Override - public AccumuloClient getClient(String poolName, Priority priority, Map trackingMap) throws Exception { - return new InMemoryAccumuloClient("root", inMemoryInstance); + public void returnClient(AccumuloClient client) { + } @Override - public void returnClient(AccumuloClient client) { + public String report() { + return null; + } + + @Override + public List getConnectionPools() { + return null; + } + @Override + public int getConnectionUsagePercent() { + return 0; } @Override public Map getTrackingMap(StackTraceElement[] stackTrace) { return new HashMap<>(); } + + @Override + public void close() throws Exception { + + } } } diff --git a/web-services/security/src/test/java/datawave/security/cache/CredentialsCacheBeanTest.java b/web-services/security/src/test/java/datawave/security/cache/CredentialsCacheBeanTest.java index b1fd6699a53..3a3d7c6c7c6 100644 --- a/web-services/security/src/test/java/datawave/security/cache/CredentialsCacheBeanTest.java +++ b/web-services/security/src/test/java/datawave/security/cache/CredentialsCacheBeanTest.java @@ -7,8 +7,10 @@ import java.security.Principal; import java.util.ArrayList; import java.util.Arrays; +import java.util.Collection; import java.util.Collections; import java.util.HashSet; +import java.util.List; import java.util.Map; import java.util.Set; @@ -33,13 +35,14 @@ import com.google.common.collect.Lists; import datawave.configuration.spring.BeanProvider; +import datawave.core.common.connection.AccumuloConnectionFactory; +import datawave.core.common.result.ConnectionPool; import datawave.security.DnList; import datawave.security.authorization.DatawavePrincipal; import datawave.security.authorization.DatawaveUser; import datawave.security.authorization.DatawaveUser.UserType; import datawave.security.authorization.SubjectIssuerDNPair; import datawave.security.system.AuthorizationCache; -import datawave.webservice.common.connection.AccumuloConnectionFactory; @RunWith(Arquillian.class) public class CredentialsCacheBeanTest { @@ -170,28 +173,43 @@ public Set getCachedKeys() { private static class MockAccumuloConnectionFactory implements AccumuloConnectionFactory { @Override - public String getConnectionUserName(String poolName) { + public AccumuloClient getClient(String userDN, Collection proxiedDNs, Priority priority, Map trackingMap) { return null; } @Override - public AccumuloClient getClient(Priority priority, Map trackingMap) { + public AccumuloClient getClient(String userDN, Collection proxiedDNs, String poolName, Priority priority, Map trackingMap) { return null; } @Override - public AccumuloClient getClient(String poolName, Priority priority, Map trackingMap) { + public void returnClient(AccumuloClient client) { + + } + + @Override + public String report() { return null; } @Override - public void returnClient(AccumuloClient client) { + public List getConnectionPools() { + return null; + } + @Override + public int getConnectionUsagePercent() { + return 0; } @Override public Map getTrackingMap(StackTraceElement[] stackTrace) { return null; } + + @Override + public void close() throws Exception { + + } } } diff --git a/web-services/security/src/test/java/datawave/security/login/DatawavePrincipalLoginModuleTest.java b/web-services/security/src/test/java/datawave/security/login/DatawavePrincipalLoginModuleTest.java index ea22722492a..72077588de8 100644 --- a/web-services/security/src/test/java/datawave/security/login/DatawavePrincipalLoginModuleTest.java +++ b/web-services/security/src/test/java/datawave/security/login/DatawavePrincipalLoginModuleTest.java @@ -9,6 +9,7 @@ import static org.junit.Assert.fail; import java.net.Socket; +import java.net.UnknownHostException; import java.security.KeyStore; import java.security.KeyStoreException; import java.security.NoSuchAlgorithmException; @@ -31,6 +32,7 @@ import javax.security.auth.login.AccountLockedException; import javax.security.auth.login.CredentialException; import javax.security.auth.login.FailedLoginException; +import javax.security.auth.login.LoginException; import org.easymock.EasyMockRunner; import org.easymock.EasyMockSupport; @@ -271,7 +273,7 @@ public void testGetRoleSetsLeavesRequiredRoles() throws Exception { verifyAll(); } - @Test(expected = CredentialException.class) + @Test(expected = FailedLoginException.class) public void testProxiedEntitiesLoginNoRole() throws Exception { // Call Chain is U -> S1 -> S2. S2 will have no role. This test case tests // the case of no role for the terminal service. This should fail with @@ -305,7 +307,7 @@ public void testProxiedEntitiesLoginNoRole() throws Exception { verifyAll(); } - @Test(expected = CredentialException.class) + @Test(expected = FailedLoginException.class) public void testDirectRolesFailServer() throws Exception { /** * Chain is User -> S1 -> S2. S2 is terminal server. Verified that s2 does not have the appropriate authorized role for terminal server (directRole). @@ -540,6 +542,36 @@ public void testDisallowlistedProxiedUser() throws Exception { verifyAll(); } + @Test + public void testAuthorizationExceptionOnLookup() throws Exception { + // Ensure that an AuthorizationException from the DatawaveUserService results + // in a LoginException being thrown from DatawavePrincipalLOginModule.login() + String issuerDN = DnUtils.normalizeDN(testServerCert.getIssuerDN().getName()); + String otherServerDN = DnUtils.normalizeDN("CN=otherServer.example.com, OU=iamnotaperson, OU=acme"); + String proxiedSubjects = "<" + userDN.subjectDN() + "><" + otherServerDN + ">"; + String proxiedIssuers = "<" + userDN.issuerDN() + "><" + issuerDN + ">"; + DatawaveCredential datawaveCredential = new DatawaveCredential(testServerCert, proxiedSubjects, proxiedIssuers); + callbackHandler.name = datawaveCredential.getUserName(); + callbackHandler.credential = datawaveCredential; + + expect(securityDomain.getKeyStore()).andReturn(serverKeystore); + expect(securityDomain.getTrustStore()).andReturn(truststore); + expect(datawaveUserService.lookup(datawaveCredential.getEntities())).andThrow(new AuthorizationException()); + + replayAll(); + + try { + datawaveLoginModule.login(); + fail("Login should not have succeeded"); + } catch (Exception e) { + // this type of check is used because there are many subclasses of LoginException + // Using a @Test(expected = LoginException.class) would succeed if any of these were caught + assertTrue(e.getClass().equals(LoginException.class)); + } + + verifyAll(); + } + @Test public void testProxiedEntitiesLogin() throws Exception { // Proxied entities has the original user DN, plus it came through a server and @@ -612,7 +644,7 @@ public void testJWTLogin() throws Exception { verifyAll(); } - @Test(expected = FailedLoginException.class) + @Test(expected = CredentialException.class) public void testInvalidLoginCertIssuerDenied() throws Exception { MockDatawaveCertVerifier.issuerSupported = false; DatawaveCredential datawaveCredential = new DatawaveCredential(testUserCert, null, null); @@ -631,7 +663,7 @@ public void testInvalidLoginCertIssuerDenied() throws Exception { } } - @Test(expected = FailedLoginException.class) + @Test(expected = CredentialException.class) public void testInvalidLoginCertVerificationFailed() throws Exception { MockDatawaveCertVerifier.verify = false; DatawaveCredential datawaveCredential = new DatawaveCredential(testUserCert, null, null); @@ -650,7 +682,7 @@ public void testInvalidLoginCertVerificationFailed() throws Exception { } } - @Test(expected = FailedLoginException.class) + @Test public void testInvalidLoginAuthorizationLookupFailed() throws Exception { DatawaveCredential datawaveCredential = new DatawaveCredential(testUserCert, null, null); callbackHandler.name = datawaveCredential.getUserName(); @@ -664,6 +696,11 @@ public void testInvalidLoginAuthorizationLookupFailed() throws Exception { try { datawaveLoginModule.login(); + fail("Login should not have succeeded"); + } catch (LoginException e) { + // this type of check is used because there are many subclasses of LoginException + // Using a @Test(expected = LoginException.class) would succeed if any of these were caught + assertTrue(e.getClass().equals(LoginException.class)); } finally { verifyAll(); } diff --git a/web-services/web-root/pom.xml b/web-services/web-root/pom.xml index 9f3780d56c3..4b7d9001810 100644 --- a/web-services/web-root/pom.xml +++ b/web-services/web-root/pom.xml @@ -4,7 +4,7 @@ gov.nsa.datawave.webservices datawave-ws-parent - 6.5.0-SNAPSHOT + 7.13.0-SNAPSHOT datawave-ws-web-root war
    Role to Auth Mapping table
    Column NameColumn Type