Skip to content

Commit

Permalink
update libs (#354)
Browse files Browse the repository at this point in the history
* update libs

* update libs from VM charm

* update test_mongos to test the new way of removing users

* update lib

* update src

* wait for mongos charm to remove the user

* update tests
  • Loading branch information
MiaAltieri authored Oct 21, 2024
1 parent 4c3c9e7 commit e8b6f2d
Show file tree
Hide file tree
Showing 4 changed files with 118 additions and 12 deletions.
69 changes: 63 additions & 6 deletions lib/charms/mongodb/v0/config_server_interface.py
Original file line number Diff line number Diff line change
Expand Up @@ -14,8 +14,15 @@
DatabaseRequestedEvent,
DatabaseRequires,
)
from charms.mongodb.v0.mongo import MongoConnection
from charms.mongodb.v1.mongos import MongosConnection
from ops.charm import CharmBase, EventBase, RelationBrokenEvent, RelationChangedEvent
from ops.charm import (
CharmBase,
EventBase,
RelationBrokenEvent,
RelationChangedEvent,
RelationCreatedEvent,
)
from ops.framework import Object
from ops.model import (
ActiveStatus,
Expand All @@ -24,6 +31,7 @@
StatusBase,
WaitingStatus,
)
from pymongo.errors import PyMongoError

from config import Config

Expand All @@ -43,16 +51,20 @@

# Increment this PATCH version before using `charmcraft publish-lib` or reset
# to 0 if you are raising the major API version
LIBPATCH = 13
LIBPATCH = 14


class ClusterProvider(Object):
"""Manage relations between the config server and mongos router on the config-server side."""

def __init__(
self, charm: CharmBase, relation_name: str = Config.Relations.CLUSTER_RELATIONS_NAME
self,
charm: CharmBase,
relation_name: str = Config.Relations.CLUSTER_RELATIONS_NAME,
substrate: str = Config.Substrate.VM,
) -> None:
"""Constructor for ShardingProvider object."""
self.substrate = substrate
self.relation_name = relation_name
self.charm = charm
self.database_provides = DatabaseProvides(self.charm, relation_name=self.relation_name)
Expand Down Expand Up @@ -179,7 +191,9 @@ def _on_relation_broken(self, event) -> None:
logger.info("Skipping relation broken event, broken event due to scale down")
return

self.charm.client_relations.oversee_users(departed_relation_id, event)
# mongos-k8s router is in charge of removing its own users.
if self.substrate == Config.Substrate.VM:
self.charm.client_relations.oversee_users(departed_relation_id, event)

def update_config_server_db(self, event):
"""Provides related mongos applications with new config server db."""
Expand Down Expand Up @@ -244,7 +258,7 @@ def __init__(
super().__init__(charm, self.relation_name)
self.framework.observe(
charm.on[self.relation_name].relation_created,
self.database_requires._on_relation_created_event,
self._on_relation_created_handler,
)

self.framework.observe(
Expand All @@ -261,6 +275,11 @@ def __init__(
charm.on[self.relation_name].relation_broken, self._on_relation_broken
)

def _on_relation_created_handler(self, event: RelationCreatedEvent) -> None:
logger.info("Integrating to config-server")
self.charm.status.set_and_share_status(WaitingStatus("Connecting to config-server"))
self.database_requires._on_relation_created_event(event)

def _on_database_created(self, event) -> None:
if self.charm.upgrade_in_progress:
logger.warning(
Expand Down Expand Up @@ -303,6 +322,8 @@ def _on_relation_changed(self, event) -> None:

# avoid restarting mongos when possible
if not updated_keyfile and not updated_config and self.is_mongos_running():
# mongos-k8s router must update its users on start
self._update_k8s_users(event)
return

# mongos is not available until it is using new secrets
Expand All @@ -321,6 +342,20 @@ def _on_relation_changed(self, event) -> None:
if self.charm.unit.is_leader():
self.charm.mongos_initialised = True

# mongos-k8s router must update its users on start
self._update_k8s_users(event)

def _update_k8s_users(self, event) -> None:
if self.substrate != Config.Substrate.K8S:
return

# K8s can handle its 1:Many users after being initialized
try:
self.charm.client_relations.oversee_users(None, None)
except PyMongoError:
event.defer()
logger.debug("failed to add users on mongos-k8s router, will defer and try again.")

def _on_relation_broken(self, event: RelationBrokenEvent) -> None:
# Only relation_deparated events can check if scaling down
if not self.charm.has_departed_run(event.relation.id):
Expand All @@ -334,6 +369,13 @@ def _on_relation_broken(self, event: RelationBrokenEvent) -> None:
logger.info("Skipping relation broken event, broken event due to scale down")
return

try:
self.handle_mongos_k8s_users_removal()
except PyMongoError:
logger.debug("Trouble removing router users, will defer and try again")
event.defer()
return

self.charm.stop_mongos_service()
logger.info("Stopped mongos daemon")

Expand All @@ -348,9 +390,24 @@ def _on_relation_broken(self, event: RelationBrokenEvent) -> None:
if self.substrate == Config.Substrate.VM:
self.charm.remove_connection_info()
else:
self.db_initialised = False
self.charm.db_initialised = False

# BEGIN: helper functions
def handle_mongos_k8s_users_removal(self) -> None:
"""Handles the removal of all client mongos-k8s users and the mongos-k8s admin user.
Raises:
PyMongoError
"""
if not self.charm.unit.is_leader() or self.substrate != Config.Substrate.K8S:
return

self.charm.client_relations.remove_all_relational_users()

# now that the client mongos users have been removed we can remove ourself
with MongoConnection(self.charm.mongo_config) as mongo:
mongo.drop_user(self.charm.mongo_config.username)

def pass_hook_checks(self, event):
"""Runs the pre-hooks checks for ClusterRequirer, returns True if all pass."""
if self.is_mongos_tls_missing():
Expand Down
37 changes: 37 additions & 0 deletions lib/charms/mongodb/v1/mongodb_provider.py
Original file line number Diff line number Diff line change
Expand Up @@ -227,6 +227,15 @@ def remove_users(
):
continue

# for user removal of mongos-k8s router, we let the router remove itself
if (
self.charm.is_role(Config.Role.CONFIG_SERVER)
and self.substrate == Config.Substrate.K8S
):
logger.info("K8s routers will remove themselves.")
self._remove_from_relational_users_to_manage(username)
return

mongo.drop_user(username)
self._remove_from_relational_users_to_manage(username)

Expand Down Expand Up @@ -514,6 +523,34 @@ def _add_to_relational_users_to_manage(self, user_to_add: str) -> None:
current_users.add(user_to_add)
self._update_relational_users_to_manage(current_users)

def remove_all_relational_users(self):
"""Removes all users from DB.
Raises: PyMongoError.
"""
with MongoConnection(self.charm.mongo_config) as mongo:
database_users = mongo.get_users()

users_being_managed = database_users.intersection(self._get_relational_users_to_manage())
self.remove_users(users_being_managed, expected_current_users=set())

# now we must remove all of their connection info
for relation in self._get_relations():
fields = self.database_provides.fetch_my_relation_data([relation.id])[relation.id]
self.database_provides.delete_relation_data(relation.id, fields=list(fields))

# unforatunately the above doesn't work to remove secrets, so we forcibly remove the
# rest manually remove the secret before clearing the databag
for unit in relation.units:
secret_id = json.loads(relation.data[unit]["data"])["secret-user"]
# secret id is the same on all units for `secret-user`
break

user_secrets = self.charm.model.get_secret(id=secret_id)
user_secrets.remove_all_revisions()
user_secrets.get_content(refresh=True)
relation.data[self.charm.app].clear()

@staticmethod
def _get_database_from_relation(relation: Relation) -> Optional[str]:
"""Return database name from relation."""
Expand Down
2 changes: 1 addition & 1 deletion src/charm.py
Original file line number Diff line number Diff line change
Expand Up @@ -153,7 +153,7 @@ def __init__(self, *args):

self.shard = ConfigServerRequirer(self)
self.config_server = ShardingProvider(self)
self.cluster = ClusterProvider(self)
self.cluster = ClusterProvider(self, substrate=Config.SUBSTRATE)

self.upgrade = MongoDBUpgrade(self)

Expand Down
22 changes: 17 additions & 5 deletions tests/integration/sharding_tests/test_mongos.py
Original file line number Diff line number Diff line change
Expand Up @@ -4,6 +4,7 @@


import pytest
import tenacity
from pymongo.errors import OperationFailure
from pytest_operator.plugin import OpsTest

Expand All @@ -17,7 +18,9 @@
SHARD_REL_NAME = "sharding"
CLUSTER_REL_NAME = "cluster"
CONFIG_SERVER_REL_NAME = "config-server"
NUMBER_OF_MONGOS_USERS_WHEN_NO_ROUTERS = 3 # operator-user, backup-user, and montior-user
TIMEOUT = 10 * 60
TWO_MINUTE_TIMEOUT = 2 * 60


@pytest.mark.group(1)
Expand Down Expand Up @@ -125,7 +128,11 @@ async def test_connect_to_cluster_creates_user(ops_test: OpsTest) -> None:
@pytest.mark.group(1)
@pytest.mark.abort_on_fail
async def test_disconnect_from_cluster_removes_user(ops_test: OpsTest) -> None:
"""Verifies that when the cluster is formed a the user is removed."""
"""Verifies that when the cluster is formed the client users are removed.
Since mongos-k8s router supports multiple users, we expect that the removal of this
integration will result in the removal of the mongos-k8s admin users and all of its clients.
"""
# generate URI for new mongos user
(username, password) = await get_related_username_password(
ops_test, app_name=MONGOS_APP_NAME, relation_name=CLUSTER_REL_NAME
Expand All @@ -135,7 +142,6 @@ async def test_disconnect_from_cluster_removes_user(ops_test: OpsTest) -> None:
mongos_client = await get_direct_mongo_client(
ops_test, app_name=CONFIG_SERVER_APP_NAME, mongos=True
)
num_users = count_users(mongos_client)
await ops_test.model.applications[MONGOS_APP_NAME].remove_relation(
f"{MONGOS_APP_NAME}:cluster",
f"{CONFIG_SERVER_APP_NAME}:cluster",
Expand All @@ -148,9 +154,15 @@ async def test_disconnect_from_cluster_removes_user(ops_test: OpsTest) -> None:
)
num_users_after_removal = count_users(mongos_client)

assert (
num_users - 1 == num_users_after_removal
), "Cluster did not remove user after integration removal."
for attempt in tenacity.Retrying(
reraise=True,
stop=tenacity.stop_after_delay(TWO_MINUTE_TIMEOUT),
wait=tenacity.wait_fixed(10),
):
with attempt:
assert (
NUMBER_OF_MONGOS_USERS_WHEN_NO_ROUTERS == num_users_after_removal
), "Cluster did not remove user after integration removal."

mongos_user_client = await get_direct_mongo_client(
ops_test,
Expand Down

0 comments on commit e8b6f2d

Please sign in to comment.