Skip to content

Commit

Permalink
Address some of review from Brian
Browse files Browse the repository at this point in the history
  • Loading branch information
anodos325 committed Jun 27, 2024
1 parent 3945ee6 commit 916292e
Show file tree
Hide file tree
Showing 8 changed files with 63 additions and 23 deletions.
Original file line number Diff line number Diff line change
Expand Up @@ -9,7 +9,7 @@
SHELL=/bin/sh
PATH=/etc:/bin:/sbin:/usr/bin:/usr/sbin:/usr/local/bin:/usr/local/sbin

30 3 * * * root midclt call dscache.refresh > /dev/null 2>&1
30 3 * * * root midclt call directoryservices.cache.refresh > /dev/null 2>&1
45 3 * * * root midclt call config.backup >/dev/null 2>&1

45 3 * * * root midclt call pool.scrub.run ${boot_pool} ${system_advanced['boot_scrub']} > /dev/null 2>&1
Expand Down
57 changes: 42 additions & 15 deletions src/middlewared/middlewared/plugins/directoryservices_/cache.py
Original file line number Diff line number Diff line change
Expand Up @@ -25,7 +25,14 @@ class Config:
Str('idtype', enum=['USER', 'GROUP'], required=True),
Dict('cache_entry', additional_attrs=True),
)
def insert(self, idtype, entry):
def _insert(self, idtype, entry):
"""
Internal method to insert an entry into cache. Only consumers should be in this
plugin
Raises:
RuntimeError (tdb library error / corruption)
"""
match (id_type := IDType[idtype]):
case IDType.GROUP:
insert_cache_entry(id_type, entry['gid'], entry['name'], entry)
Expand All @@ -43,20 +50,32 @@ def insert(self, idtype, entry):
),
Dict(
'options',
Bool('synthesize', default=False),
Bool('smb', default=False)
)
)
def retrieve(self, data, options):
def _retrieve(self, data, options):
"""
Internal method to retrieve an entry from cache. If the entry does not exist then
a lookup via NSS will be attempted and if successful a cache entry will be generated.
Only consumers should be in this plugin. Either `who` or `id` should be specified.
Returns:
user.query entry (successful user lookup)
group.query entry (successful group lookup)
None (lookup failure)
Raises:
RuntimeError (tdb library error)
CallError (Idmap lookup failure -- unexpected)
"""
try:
entry = retrieve_cache_entry(IDType[data['idtype']], data.get('who'), data.get('id'))
except MatchNotFound:
entry = None

if not entry and options['synthesize']:
if not entry:
"""
if cache lacks entry, create one from passwd / grp info,
insert into cache and return synthesized value.
If cache lacks entry, create one from passwd / grp info, insert into cache
user.get_user_obj and group.get_group_obj will raise KeyError if NSS lookup fails.
"""
try:
Expand Down Expand Up @@ -96,15 +115,13 @@ def retrieve(self, data, options):
if entry is None:
return None

self.insert(data['idtype'], entry)
self._insert(data['idtype'], entry)
entry['nt_name'] = entry[name_key]
except KeyError:
entry = None

elif not entry:
raise KeyError(data.get('who') or data.get('id'))

if entry and not options['smb']:
# caller has not requested SMB information and so we should strip it
entry['sid'] = None
entry['nt_name'] = None

Expand All @@ -121,6 +138,11 @@ def retrieve(self, data, options):
def query(self, id_type, filters, options):
"""
Query User / Group cache with `query-filters` and `query-options`.
NOTE: only consumers for this endpoint should be user.query and group.query.
query-options (apart from determining whether to include "SMB" information)
are not evaluated here because user.query and group.query applies pagination
on full results.
"""
ds = self.middleware.call_sync('directoryservices.status')
if ds['type'] is None:
Expand All @@ -138,14 +160,15 @@ def query(self, id_type, filters, options):
# generate cache entry based on its results. This allows slowly building
# a cache when user / group enumeration is disabled.
key = 'who' if is_name_check else 'id'
entry = self.retrieve({
entry = self._retrieve({
'idtype': id_type,
key: filters[0][2],
}, {'synthesize': True, 'smb': get_smb})
}, {'smb': get_smb})

return [entry] if entry else []

entries = query_cache_entries(IDType[id_type], filters, options)
# options must be omitted to defer pagination logic to caller
entries = query_cache_entries(IDType[id_type], filters, {})
if not get_smb:
for entry in entries:
entry['sid'] = None
Expand Down Expand Up @@ -177,8 +200,12 @@ def idmap_online_check_wait(self, job):
@job(lock="directoryservices_cache_fill", lock_queue_size=1)
def refresh(self, job):
"""
This is called from a cronjob every 24 hours and when a user clicks on the
UI button to 'rebuild directory service cache'.
Rebuild the directory services cache. This is performed in the following
situations:
1. User starts a directory service
2. User triggers manually through API or webui
3. Once every 24 hours via cronjob
"""

ds = self.middleware.call_sync('directoryservices.status')
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -34,6 +34,7 @@ class Secrets(enum.Enum):
DOMAIN_GUID = 'SECRETS/DOMGUID'
SERVER_GUID = 'SECRETS/GUID'
LDAP_BIND_PW = 'SECRETS/LDAP_BIND_PW'
LDAP_IDMAP_SECRET = 'SECRETS/GENERIC/IDMAP_LDAP'
LOCAL_SCHANNEL_KEY = 'SECRETS/LOCAL_SCHANNEL_KEY'
AUTH_USER = 'SECRETS/AUTH_USER'
AUTH_DOMAIN = 'SECRETS/AUTH_DOMAIN'
Expand Down Expand Up @@ -97,15 +98,15 @@ def set_ldap_idmap_secret(self, domain, user_dn, secret):
This method is used by idmap plugin to write the password.
"""
store_secrets_entry(
f'SECRETS/GENERIC/IDMAP_LDAP_{domain.upper()}/{user_dn}',
f'{Secrets.LDAP_IDMAP_SECRET.value}_{domain.upper()}/{user_dn}',
b64encode(secret.encode() + b'\x00')
)

def get_ldap_idmap_secret(self, domain, user_dn):
"""
Retrieve idmap secret for the specifed domain and user dn.
"""
return fetch_secrets_entry(f'SECRETS/GENERIC/IDMAP_LDAP_{domain.upper()}/{user_dn}')
return fetch_secrets_entry(f'{Secrets.LDAP_IDMAP_SECRET.value}_{domain.upper()}/{user_dn}')

def get_machine_secret(self, domain):
return fetch_secrets_entry(f'{Secrets.MACHINE_PASSWORD.value}/{domain.upper()}')
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -265,7 +265,7 @@ def fill_cache(
}

if user_count % LOG_CACHE_ENTRY_INTERVAL == 0:
job.set_progress(50, f'{user_data.pw_name}: adding user to cache')
job.set_progress(50, f'{user_data.pw_name}: adding user to cache. User count: {user_count}')

# Store forward and reverse entries
_tdb_add_entry(self.users_handle, user_data.pw_uid, user_data.pw_name, entry)
Expand Down Expand Up @@ -298,7 +298,7 @@ def fill_cache(
}

if group_count % LOG_CACHE_ENTRY_INTERVAL == 0:
job.set_progress(80, f'{group_data.gr_name}: adding group to cache')
job.set_progress(80, f'{group_data.gr_name}: adding group to cache. Group count: {group_count}')

_tdb_add_entry(self.groups_handle, group_data.gr_gid, group_data.gr_name, entry)
group_count += 1
Expand Down
9 changes: 8 additions & 1 deletion src/middlewared/middlewared/plugins/idmap_/gencache.py
Original file line number Diff line number Diff line change
Expand Up @@ -40,12 +40,19 @@ def remove_gencache_entry(key: str) -> None:


def wipe_gencache_entries() -> None:
""" wrapper around tdb_wipe_all for file """
with get_tdb_handle(GENCACHE_FILE, GENCACHE_TDB_OPTIONS) as hdl:
return hdl.clear()


def flush_gencache_entries() -> None:
""" delete all keys in gencache """
"""
delete all keys in gencache
This matches behavior of "net cache flush" which iterates and
deletes entries. If we fail due to corrupt TDB file then it will
be wiped.
"""
with get_tdb_handle(GENCACHE_FILE, GENCACHE_TDB_OPTIONS) as hdl:
for entry in hdl.entries():
hdl.delete(entry['key'])
Expand Down
Original file line number Diff line number Diff line change
@@ -1,8 +1,12 @@
import enum
import wbclient

# Base datastore `id` value for entries that are returned as user.query and
# group.query results. We add the posix uid / gid to this number to ensure
# that it is unique at a given point in time.
BASE_SYNTHETIC_DATASTORE_ID = 100000000
TRUENAS_IDMAP_MAX = 2147000000

TRUENAS_IDMAP_MAX = 2147000000 # Maximum ID that we allow winbind / sssd to provide
TRUENAS_IDMAP_DEFAULT_LOW = 90000001
SID_LOCAL_USER_PREFIX = "S-1-22-1-"
SID_LOCAL_GROUP_PREFIX = "S-1-22-2-"
Expand Down
2 changes: 1 addition & 1 deletion src/middlewared/middlewared/plugins/smb_/sharesec.py
Original file line number Diff line number Diff line change
Expand Up @@ -26,7 +26,7 @@ def fetch_share_acl(share_name: str) -> str:


def store_share_acl(share_name: str, val: str) -> None:
""" write base64-encoded NT ACL for SMB share to server running condfiguraiton """
""" write base64-encoded NT ACL for SMB share to server running configuration """
with get_tdb_handle(LOCAL_SHARE_INFO_FILE, SHARE_INFO_TDB_OPTIONS) as hdl:
return hdl.store(f'SECDESC/{share_name.lower()}', val)

Expand Down
1 change: 1 addition & 0 deletions src/middlewared/middlewared/pytest/unit/utils/test_tdb.py
Original file line number Diff line number Diff line change
Expand Up @@ -104,6 +104,7 @@ def batched_tdb_ops(hdl: TDBHandle, datatype: TDBDataType):
]
out = hdl.batch_op(batched_ops)
assert out['test_key1'] == data1
assert len(list(hdl.entries())) == 1

hdl.clear()
assert len(list(hdl.entries())) == 0
Expand Down

0 comments on commit 916292e

Please sign in to comment.