Skip to content
New issue

Have a question about this project? Sign up for a free GitHub account to open an issue and contact its maintainers and the community.

By clicking “Sign up for GitHub”, you agree to our terms of service and privacy statement. We’ll occasionally send you account related emails.

Already on GitHub? Sign in to your account

Replace exclusionary words whitelist and blacklist in the places that won't impact backwards compatibility #2178

Merged
merged 5 commits into from
Mar 5, 2022
Merged
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
2 changes: 1 addition & 1 deletion TESTING.md
Original file line number Diff line number Diff line change
Expand Up @@ -245,7 +245,7 @@ The YAML REST tests support all the options provided by the randomized runner, p

- `tests.rest.suite`: comma separated paths of the test suites to be run (by default loaded from /rest-api-spec/test). It is possible to run only a subset of the tests providing a sub-folder or even a single yaml file (the default /rest-api-spec/test prefix is optional when files are loaded from classpath) e.g. `-Dtests.rest.suite=index,get,create/10_with_id`

- `tests.rest.blacklist`: comma separated globs that identify tests that are blacklisted and need to be skipped e.g. `-Dtests.rest.blacklist=index/**/Index document,get/10_basic/**`
- `tests.rest.blacklist`: comma separated globs that identify tests that are denylisted and need to be skipped e.g. `-Dtests.rest.blacklist=index/**/Index document,get/10_basic/**`

Java REST tests can be run with the "javaRestTest" task.

Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -48,7 +48,7 @@ public interface NodeSelector {
* iterate the nodes as many times as they need.
* <p>
* This may be called twice per request: first for "living" nodes that
* have not been blacklisted by previous errors. If the selector removes
* have not been denylisted by previous errors. If the selector removes
* all nodes from the list or if there aren't any living nodes then the
* {@link RestClient} will call this method with a list of "dead" nodes.
* <p>
Expand Down
30 changes: 15 additions & 15 deletions client/rest/src/main/java/org/opensearch/client/RestClient.java
Original file line number Diff line number Diff line change
Expand Up @@ -125,7 +125,7 @@ public class RestClient implements Closeable {
final List<Header> defaultHeaders;
private final String pathPrefix;
private final AtomicInteger lastNodeIndex = new AtomicInteger(0);
private final ConcurrentMap<HttpHost, DeadHostState> blacklist = new ConcurrentHashMap<>();
private final ConcurrentMap<HttpHost, DeadHostState> denylist = new ConcurrentHashMap<>();
private final FailureListener failureListener;
private final NodeSelector nodeSelector;
private volatile NodeTuple<List<Node>> nodeTuple;
Expand Down Expand Up @@ -246,7 +246,7 @@ public synchronized void setNodes(Collection<Node> nodes) {
authCache.put(node.getHost(), new BasicScheme());
}
this.nodeTuple = new NodeTuple<>(Collections.unmodifiableList(new ArrayList<>(nodesByHost.values())), authCache);
this.blacklist.clear();
this.denylist.clear();
}

/**
Expand Down Expand Up @@ -448,7 +448,7 @@ public void cancelled() {
*/
private NodeTuple<Iterator<Node>> nextNodes() throws IOException {
NodeTuple<List<Node>> nodeTuple = this.nodeTuple;
Iterable<Node> hosts = selectNodes(nodeTuple, blacklist, lastNodeIndex, nodeSelector);
Iterable<Node> hosts = selectNodes(nodeTuple, denylist, lastNodeIndex, nodeSelector);
return new NodeTuple<>(hosts.iterator(), nodeTuple.authCache);
}

Expand All @@ -458,17 +458,17 @@ private NodeTuple<Iterator<Node>> nextNodes() throws IOException {
*/
static Iterable<Node> selectNodes(
NodeTuple<List<Node>> nodeTuple,
Map<HttpHost, DeadHostState> blacklist,
Map<HttpHost, DeadHostState> denylist,
AtomicInteger lastNodeIndex,
NodeSelector nodeSelector
) throws IOException {
/*
* Sort the nodes into living and dead lists.
*/
List<Node> livingNodes = new ArrayList<>(Math.max(0, nodeTuple.nodes.size() - blacklist.size()));
List<DeadNode> deadNodes = new ArrayList<>(blacklist.size());
List<Node> livingNodes = new ArrayList<>(Math.max(0, nodeTuple.nodes.size() - denylist.size()));
List<DeadNode> deadNodes = new ArrayList<>(denylist.size());
for (Node node : nodeTuple.nodes) {
DeadHostState deadness = blacklist.get(node.getHost());
DeadHostState deadness = denylist.get(node.getHost());
if (deadness == null || deadness.shallBeRetried()) {
livingNodes.add(node);
} else {
Expand Down Expand Up @@ -526,9 +526,9 @@ static Iterable<Node> selectNodes(
* Receives as an argument the host that was used for the successful request.
*/
private void onResponse(Node node) {
DeadHostState removedHost = this.blacklist.remove(node.getHost());
DeadHostState removedHost = this.denylist.remove(node.getHost());
if (logger.isDebugEnabled() && removedHost != null) {
logger.debug("removed [" + node + "] from blacklist");
logger.debug("removed [" + node + "] from denylist");
}
}

Expand All @@ -538,19 +538,19 @@ private void onResponse(Node node) {
*/
private void onFailure(Node node) {
while (true) {
DeadHostState previousDeadHostState = blacklist.putIfAbsent(
DeadHostState previousDeadHostState = denylist.putIfAbsent(
node.getHost(),
new DeadHostState(DeadHostState.DEFAULT_TIME_SUPPLIER)
);
if (previousDeadHostState == null) {
if (logger.isDebugEnabled()) {
logger.debug("added [" + node + "] to blacklist");
logger.debug("added [" + node + "] to denylist");
}
break;
}
if (blacklist.replace(node.getHost(), previousDeadHostState, new DeadHostState(previousDeadHostState))) {
if (denylist.replace(node.getHost(), previousDeadHostState, new DeadHostState(previousDeadHostState))) {
if (logger.isDebugEnabled()) {
logger.debug("updated [" + node + "] already in blacklist");
logger.debug("updated [" + node + "] already in denylist");
}
break;
}
Expand Down Expand Up @@ -718,8 +718,8 @@ static class NodeTuple<T> {
}

/**
* Contains a reference to a blacklisted node and the time until it is
* revived. We use this so we can do a single pass over the blacklist.
* Contains a reference to a denylisted node and the time until it is
* revived. We use this so we can do a single pass over the denylist.
*/
private static class DeadNode implements Comparable<DeadNode> {
final Node node;
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -62,7 +62,7 @@
import static org.junit.Assert.fail;

/**
* Tests for {@link RestClient} behaviour against multiple hosts: fail-over, blacklisting etc.
* Tests for {@link RestClient} behaviour against multiple hosts: fail-over, denylisting etc.
* Relies on a mock http client to intercept requests and return desired responses based on request path.
*/
public class RestClientMultipleHostsTests extends RestClientTestCase {
Expand Down Expand Up @@ -154,7 +154,7 @@ public void testRoundRobinRetryErrors() throws Exception {
fail("request should have failed");
} catch (ResponseException e) {
Set<HttpHost> hostsSet = hostsSet();
// first request causes all the hosts to be blacklisted, the returned exception holds one suppressed exception each
// first request causes all the hosts to be denylisted, the returned exception holds one suppressed exception each
failureListener.assertCalled(nodes);
do {
Response response = e.getResponse();
Expand All @@ -175,7 +175,7 @@ public void testRoundRobinRetryErrors() throws Exception {
assertEquals("every host should have been used but some weren't: " + hostsSet, 0, hostsSet.size());
} catch (IOException e) {
Set<HttpHost> hostsSet = hostsSet();
// first request causes all the hosts to be blacklisted, the returned exception holds one suppressed exception each
// first request causes all the hosts to be denylisted, the returned exception holds one suppressed exception each
failureListener.assertCalled(nodes);
do {
HttpHost httpHost = HttpHost.create(e.getMessage());
Expand Down Expand Up @@ -211,13 +211,13 @@ public void testRoundRobinRetryErrors() throws Exception {
"host [" + response.getHost() + "] not found, most likely used multiple times",
hostsSet.remove(response.getHost())
);
// after the first request, all hosts are blacklisted, a single one gets resurrected each time
// after the first request, all hosts are denylisted, a single one gets resurrected each time
failureListener.assertCalled(response.getHost());
assertEquals(0, e.getSuppressed().length);
} catch (IOException e) {
HttpHost httpHost = HttpHost.create(e.getMessage());
assertTrue("host [" + httpHost + "] not found, most likely used multiple times", hostsSet.remove(httpHost));
// after the first request, all hosts are blacklisted, a single one gets resurrected each time
// after the first request, all hosts are denylisted, a single one gets resurrected each time
failureListener.assertCalled(httpHost);
assertEquals(0, e.getSuppressed().length);
}
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -260,11 +260,11 @@ public String toString() {

NodeTuple<List<Node>> nodeTuple = new NodeTuple<>(Arrays.asList(n1, n2, n3), null);

Map<HttpHost, DeadHostState> emptyBlacklist = Collections.emptyMap();
Map<HttpHost, DeadHostState> emptyDenylist = Collections.emptyMap();

// Normal cases where the node selector doesn't reject all living nodes
assertSelectLivingHosts(Arrays.asList(n1, n2, n3), nodeTuple, emptyBlacklist, NodeSelector.ANY);
assertSelectLivingHosts(Arrays.asList(n2, n3), nodeTuple, emptyBlacklist, not1);
assertSelectLivingHosts(Arrays.asList(n1, n2, n3), nodeTuple, emptyDenylist, NodeSelector.ANY);
assertSelectLivingHosts(Arrays.asList(n2, n3), nodeTuple, emptyDenylist, not1);

/*
* Try a NodeSelector that excludes all nodes. This should
Expand All @@ -274,83 +274,83 @@ public String toString() {
String message = "NodeSelector [NONE] rejected all nodes, living ["
+ "[host=http://1, version=1], [host=http://2, version=2], "
+ "[host=http://3, version=3]] and dead []";
assertEquals(message, assertSelectAllRejected(nodeTuple, emptyBlacklist, noNodes));
assertEquals(message, assertSelectAllRejected(nodeTuple, emptyDenylist, noNodes));
}

// Mark all the nodes dead for a few test cases
{
final AtomicLong time = new AtomicLong(0L);
Supplier<Long> timeSupplier = time::get;
Map<HttpHost, DeadHostState> blacklist = new HashMap<>();
blacklist.put(n1.getHost(), new DeadHostState(timeSupplier));
blacklist.put(n2.getHost(), new DeadHostState(new DeadHostState(timeSupplier)));
blacklist.put(n3.getHost(), new DeadHostState(new DeadHostState(new DeadHostState(timeSupplier))));
Map<HttpHost, DeadHostState> denylist = new HashMap<>();
denylist.put(n1.getHost(), new DeadHostState(timeSupplier));
denylist.put(n2.getHost(), new DeadHostState(new DeadHostState(timeSupplier)));
denylist.put(n3.getHost(), new DeadHostState(new DeadHostState(new DeadHostState(timeSupplier))));

/*
* case when fewer nodeTuple than blacklist, won't result in any IllegalCapacityException
* case when fewer nodeTuple than denylist, won't result in any IllegalCapacityException
*/
{
NodeTuple<List<Node>> fewerNodeTuple = new NodeTuple<>(Arrays.asList(n1, n2), null);
assertSelectLivingHosts(Arrays.asList(n1), fewerNodeTuple, blacklist, NodeSelector.ANY);
assertSelectLivingHosts(Arrays.asList(n2), fewerNodeTuple, blacklist, not1);
assertSelectLivingHosts(Arrays.asList(n1), fewerNodeTuple, denylist, NodeSelector.ANY);
assertSelectLivingHosts(Arrays.asList(n2), fewerNodeTuple, denylist, not1);
}

/*
* selectHosts will revive a single host regardless of
* blacklist time. It'll revive the node that is closest
* denylist time. It'll revive the node that is closest
* to being revived that the NodeSelector is ok with.
*/
assertEquals(singletonList(n1), RestClient.selectNodes(nodeTuple, blacklist, new AtomicInteger(), NodeSelector.ANY));
assertEquals(singletonList(n2), RestClient.selectNodes(nodeTuple, blacklist, new AtomicInteger(), not1));
assertEquals(singletonList(n1), RestClient.selectNodes(nodeTuple, denylist, new AtomicInteger(), NodeSelector.ANY));
assertEquals(singletonList(n2), RestClient.selectNodes(nodeTuple, denylist, new AtomicInteger(), not1));

/*
* Try a NodeSelector that excludes all nodes. This should
* return a failure, but a different failure than when the
* blacklist is empty so that the caller knows that all of
* their nodes are blacklisted AND blocked.
* denylist is empty so that the caller knows that all of
* their nodes are denylisted AND blocked.
*/
String message = "NodeSelector [NONE] rejected all nodes, living [] and dead ["
+ "[host=http://1, version=1], [host=http://2, version=2], "
+ "[host=http://3, version=3]]";
assertEquals(message, assertSelectAllRejected(nodeTuple, blacklist, noNodes));
assertEquals(message, assertSelectAllRejected(nodeTuple, denylist, noNodes));

/*
* Now lets wind the clock forward, past the timeout for one of
* the dead nodes. We should return it.
*/
time.set(new DeadHostState(timeSupplier).getDeadUntilNanos());
assertSelectLivingHosts(Arrays.asList(n1), nodeTuple, blacklist, NodeSelector.ANY);
assertSelectLivingHosts(Arrays.asList(n1), nodeTuple, denylist, NodeSelector.ANY);

/*
* But if the NodeSelector rejects that node then we'll pick the
* first on that the NodeSelector doesn't reject.
*/
assertSelectLivingHosts(Arrays.asList(n2), nodeTuple, blacklist, not1);
assertSelectLivingHosts(Arrays.asList(n2), nodeTuple, denylist, not1);

/*
* If we wind the clock way into the future, past any of the
* blacklist timeouts then we function as though the nodes aren't
* in the blacklist at all.
* denylist timeouts then we function as though the nodes aren't
* in the denylist at all.
*/
time.addAndGet(DeadHostState.MAX_CONNECTION_TIMEOUT_NANOS);
assertSelectLivingHosts(Arrays.asList(n1, n2, n3), nodeTuple, blacklist, NodeSelector.ANY);
assertSelectLivingHosts(Arrays.asList(n2, n3), nodeTuple, blacklist, not1);
assertSelectLivingHosts(Arrays.asList(n1, n2, n3), nodeTuple, denylist, NodeSelector.ANY);
assertSelectLivingHosts(Arrays.asList(n2, n3), nodeTuple, denylist, not1);
}
}

private void assertSelectLivingHosts(
List<Node> expectedNodes,
NodeTuple<List<Node>> nodeTuple,
Map<HttpHost, DeadHostState> blacklist,
Map<HttpHost, DeadHostState> denylist,
NodeSelector nodeSelector
) throws IOException {
int iterations = 1000;
AtomicInteger lastNodeIndex = new AtomicInteger(0);
assertEquals(expectedNodes, RestClient.selectNodes(nodeTuple, blacklist, lastNodeIndex, nodeSelector));
assertEquals(expectedNodes, RestClient.selectNodes(nodeTuple, denylist, lastNodeIndex, nodeSelector));
// Calling it again rotates the set of results
for (int i = 1; i < iterations; i++) {
Collections.rotate(expectedNodes, 1);
assertEquals("iteration " + i, expectedNodes, RestClient.selectNodes(nodeTuple, blacklist, lastNodeIndex, nodeSelector));
assertEquals("iteration " + i, expectedNodes, RestClient.selectNodes(nodeTuple, denylist, lastNodeIndex, nodeSelector));
}
}

Expand All @@ -360,11 +360,11 @@ private void assertSelectLivingHosts(
*/
private static String assertSelectAllRejected(
NodeTuple<List<Node>> nodeTuple,
Map<HttpHost, DeadHostState> blacklist,
Map<HttpHost, DeadHostState> denylist,
NodeSelector nodeSelector
) {
try {
RestClient.selectNodes(nodeTuple, blacklist, new AtomicInteger(0), nodeSelector);
RestClient.selectNodes(nodeTuple, denylist, new AtomicInteger(0), nodeSelector);
throw new AssertionError("expected selectHosts to fail");
} catch (IOException e) {
return e.getMessage();
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -54,7 +54,7 @@
* a thread must have {@code modifyThread} to even terminate its own pool, leaving
* system threads unprotected.
* </ul>
* This class throws exception on {@code exitVM} calls, and provides a whitelist where calls
* This class throws exception on {@code exitVM} calls, and provides an allowlist where calls
* from exit are allowed.
* <p>
* Additionally it enforces threadgroup security with the following rules:
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -43,13 +43,13 @@

public class AnalysisPainlessExtension implements PainlessExtension {

private static final Whitelist WHITELIST = WhitelistLoader.loadFromResourceFiles(
private static final Whitelist ALLOWLIST = WhitelistLoader.loadFromResourceFiles(
AnalysisPainlessExtension.class,
"painless_whitelist.txt"
);

@Override
public Map<ScriptContext<?>, List<Whitelist>> getContextWhitelists() {
return Collections.singletonMap(AnalysisPredicateScript.CONTEXT, Collections.singletonList(WHITELIST));
return Collections.singletonMap(AnalysisPredicateScript.CONTEXT, Collections.singletonList(ALLOWLIST));
}
}
Original file line number Diff line number Diff line change
Expand Up @@ -44,13 +44,13 @@

public class ProcessorsWhitelistExtension implements PainlessExtension {

private static final Whitelist WHITELIST = WhitelistLoader.loadFromResourceFiles(
private static final Whitelist ALLOWLIST = WhitelistLoader.loadFromResourceFiles(
ProcessorsWhitelistExtension.class,
"processors_whitelist.txt"
);

@Override
public Map<ScriptContext<?>, List<Whitelist>> getContextWhitelists() {
return Collections.singletonMap(IngestScript.CONTEXT, Collections.singletonList(WHITELIST));
return Collections.singletonMap(IngestScript.CONTEXT, Collections.singletonList(ALLOWLIST));
}
}
Original file line number Diff line number Diff line change
Expand Up @@ -17,7 +17,7 @@
# under the License.
#

# This file contains a whitelist of static processor methods that can be accessed from painless
# This file contains a allowlist of static processor methods that can be accessed from painless

class org.opensearch.ingest.common.Processors {
long bytes(String)
Expand Down
Loading