Skip to content

Commit

Permalink
cephadm: Various properties like 'last_refresh' do not contain timezone
Browse files Browse the repository at this point in the history
Fixes: https://tracker.ceph.com/issues/48068

Signed-off-by: Volker Theile <[email protected]>
  • Loading branch information
votdev committed Jan 5, 2021
1 parent 02178b0 commit 3fe7152
Show file tree
Hide file tree
Showing 15 changed files with 201 additions and 81 deletions.
9 changes: 4 additions & 5 deletions src/cephadm/cephadm
Original file line number Diff line number Diff line change
Expand Up @@ -48,7 +48,6 @@ import os
import platform
import pwd
import random
import re
import select
import shutil
import socket
Expand Down Expand Up @@ -101,7 +100,7 @@ if sys.version_info > (3, 0):
container_path = ''
cached_stdin = None

DATEFMT = '%Y-%m-%dT%H:%M:%S.%f'
DATEFMT = '%Y-%m-%dT%H:%M:%S.%fZ'

# Log and console output config
logging_config = {
Expand Down Expand Up @@ -1177,7 +1176,7 @@ def get_file_timestamp(fn):
return datetime.datetime.fromtimestamp(
mt, tz=datetime.timezone.utc
).strftime(DATEFMT)
except Exception as e:
except Exception:
return None


Expand All @@ -1199,11 +1198,11 @@ def try_convert_datetime(s):
p = re.compile(r'(\.[\d]{6})[\d]*')
s = p.sub(r'\1', s)

# replace trailling Z with -0000, since (on python 3.6.8) it won't parse
# replace trailing Z with -0000, since (on python 3.6.8) it won't parse
if s and s[-1] == 'Z':
s = s[:-1] + '-0000'

# cut off the redundnat 'CST' part that strptime can't parse, if
# cut off the redundant 'CST' part that strptime can't parse, if
# present.
v = s.split(' ')
s = ' '.join(v[0:3])
Expand Down
24 changes: 12 additions & 12 deletions src/pybind/mgr/cephadm/inventory.py
Original file line number Diff line number Diff line change
Expand Up @@ -7,7 +7,7 @@
import orchestrator
from ceph.deployment import inventory
from ceph.deployment.service_spec import ServiceSpec
from cephadm.utils import str_to_datetime, datetime_to_str
from ceph.utils import str_to_datetime, datetime_to_str, datetime_now
from orchestrator import OrchestratorError, HostSpec, OrchestratorEvent

if TYPE_CHECKING:
Expand Down Expand Up @@ -136,7 +136,7 @@ def save(self, spec):
self.spec_preview[spec.service_name()] = spec
return None
self.specs[spec.service_name()] = spec
self.spec_created[spec.service_name()] = datetime.datetime.utcnow()
self.spec_created[spec.service_name()] = datetime_now()
self.mgr.set_store(
SPEC_STORE_PREFIX + spec.service_name(),
json.dumps({
Expand Down Expand Up @@ -278,7 +278,7 @@ def load(self):
def update_host_daemons(self, host, dm):
# type: (str, Dict[str, orchestrator.DaemonDescription]) -> None
self.daemons[host] = dm
self.last_daemon_update[host] = datetime.datetime.utcnow()
self.last_daemon_update[host] = datetime_now()

def update_host_facts(self, host, facts):
# type: (str, Dict[str, Dict[str, Any]]) -> None
Expand All @@ -289,7 +289,7 @@ def update_host_devices_networks(self, host, dls, nets):
# type: (str, List[inventory.Device], Dict[str,List[str]]) -> None
self.devices[host] = dls
self.networks[host] = nets
self.last_device_update[host] = datetime.datetime.utcnow()
self.last_device_update[host] = datetime_now()

def update_daemon_config_deps(self, host: str, name: str, deps: List[str], stamp: datetime.datetime) -> None:
self.daemon_config_deps[host][name] = {
Expand All @@ -299,7 +299,7 @@ def update_daemon_config_deps(self, host: str, name: str, deps: List[str], stamp

def update_last_host_check(self, host):
# type: (str) -> None
self.last_host_check[host] = datetime.datetime.utcnow()
self.last_host_check[host] = datetime_now()

def prime_empty_host(self, host):
# type: (str) -> None
Expand Down Expand Up @@ -472,7 +472,7 @@ def host_needs_daemon_refresh(self, host):
if host in self.daemon_refresh_queue:
self.daemon_refresh_queue.remove(host)
return True
cutoff = datetime.datetime.utcnow() - datetime.timedelta(
cutoff = datetime_now() - datetime.timedelta(
seconds=self.mgr.daemon_cache_timeout)
if host not in self.last_daemon_update or self.last_daemon_update[host] < cutoff:
return True
Expand Down Expand Up @@ -507,7 +507,7 @@ def host_needs_device_refresh(self, host):
if host in self.device_refresh_queue:
self.device_refresh_queue.remove(host)
return True
cutoff = datetime.datetime.utcnow() - datetime.timedelta(
cutoff = datetime_now() - datetime.timedelta(
seconds=self.mgr.device_cache_timeout)
if host not in self.last_device_update or self.last_device_update[host] < cutoff:
return True
Expand All @@ -526,7 +526,7 @@ def host_needs_osdspec_preview_refresh(self, host: str) -> bool:

def host_needs_check(self, host):
# type: (str) -> bool
cutoff = datetime.datetime.utcnow() - datetime.timedelta(
cutoff = datetime_now() - datetime.timedelta(
seconds=self.mgr.host_check_interval)
return host not in self.last_host_check or self.last_host_check[host] < cutoff

Expand All @@ -551,7 +551,7 @@ def host_needs_new_etc_ceph_ceph_conf(self, host: str) -> bool:
def update_last_etc_ceph_ceph_conf(self, host: str) -> None:
if not self.mgr.last_monmap:
return
self.last_etc_ceph_ceph_conf[host] = datetime.datetime.utcnow()
self.last_etc_ceph_ceph_conf[host] = datetime_now()

def host_needs_registry_login(self, host: str) -> bool:
if host in self.mgr.offline_hosts:
Expand Down Expand Up @@ -632,22 +632,22 @@ def add(self, event: OrchestratorEvent) -> None:
self.events[event.kind_subject()] = self.events[event.kind_subject()][-5:]

def for_service(self, spec: ServiceSpec, level: str, message: str) -> None:
e = OrchestratorEvent(datetime.datetime.utcnow(), 'service',
e = OrchestratorEvent(datetime_now(), 'service',
spec.service_name(), level, message)
self.add(e)

def from_orch_error(self, e: OrchestratorError) -> None:
if e.event_subject is not None:
self.add(OrchestratorEvent(
datetime.datetime.utcnow(),
datetime_now(),
e.event_subject[0],
e.event_subject[1],
"ERROR",
str(e)
))

def for_daemon(self, daemon_name: str, level: str, message: str) -> None:
e = OrchestratorEvent(datetime.datetime.utcnow(), 'daemon', daemon_name, level, message)
e = OrchestratorEvent(datetime_now(), 'daemon', daemon_name, level, message)
self.add(e)

def for_daemon_from_exception(self, daemon_name: str, e: Exception) -> None:
Expand Down
17 changes: 7 additions & 10 deletions src/pybind/mgr/cephadm/module.py
Original file line number Diff line number Diff line change
Expand Up @@ -26,6 +26,7 @@
from ceph.deployment.service_spec import \
NFSServiceSpec, ServiceSpec, PlacementSpec, assert_valid_host, \
CustomContainerSpec, HostPlacementSpec
from ceph.utils import str_to_datetime, datetime_to_str, datetime_now
from cephadm.serve import CephadmServe
from cephadm.services.cephadmservice import CephadmDaemonSpec

Expand All @@ -52,8 +53,7 @@
from .inventory import Inventory, SpecStore, HostCache, EventStore
from .upgrade import CEPH_UPGRADE_ORDER, CephadmUpgrade
from .template import TemplateMgr
from .utils import forall_hosts, CephadmNoImage, cephadmNoImage, \
str_to_datetime, datetime_to_str
from .utils import forall_hosts, CephadmNoImage, cephadmNoImage

try:
import remoto
Expand Down Expand Up @@ -89,8 +89,6 @@ def remoto_has_connection(self: Any) -> bool:
ConnectTimeout=30
"""

CEPH_DATEFMT = '%Y-%m-%dT%H:%M:%S.%fZ'

CEPH_TYPES = set(CEPH_UPGRADE_ORDER)


Expand Down Expand Up @@ -498,9 +496,8 @@ def notify(self, notify_type: str, notify_id: Optional[str]) -> None:
if notify_type == "mon_map":
# get monmap mtime so we can refresh configs when mons change
monmap = self.get('mon_map')
self.last_monmap = datetime.datetime.strptime(
monmap['modified'], CEPH_DATEFMT)
if self.last_monmap and self.last_monmap > datetime.datetime.utcnow():
self.last_monmap = str_to_datetime(monmap['modified'])
if self.last_monmap and self.last_monmap > datetime_now():
self.last_monmap = None # just in case clocks are skewed
if getattr(self, 'manage_etc_ceph_ceph_conf', False):
# getattr, due to notify() being called before config_notify()
Expand Down Expand Up @@ -939,7 +936,7 @@ def _set_extra_ceph_conf(self, inbuf: Optional[str] = None) -> HandleCommandResu

self.set_store("extra_ceph_conf", json.dumps({
'conf': inbuf,
'last_modified': datetime_to_str(datetime.datetime.utcnow())
'last_modified': datetime_to_str(datetime_now())
}))
self.log.info('Set extra_ceph_conf')
self._kick_serve_loop()
Expand Down Expand Up @@ -1840,7 +1837,7 @@ def _create_daemon(self,
).service_id(), overwrite=True):

image = ''
start_time = datetime.datetime.utcnow()
start_time = datetime_now()
ports: List[int] = daemon_spec.ports if daemon_spec.ports else []

if daemon_spec.daemon_type == 'container':
Expand Down Expand Up @@ -2381,7 +2378,7 @@ def remove_osds(self, osd_ids: List[str],
force=force,
hostname=daemon.hostname,
fullname=daemon.name(),
process_started_at=datetime.datetime.utcnow(),
process_started_at=datetime_now(),
remove_util=self.rm_util))
except NotFoundError:
return f"Unable to find OSDs: {osd_ids}"
Expand Down
5 changes: 3 additions & 2 deletions src/pybind/mgr/cephadm/serve.py
Original file line number Diff line number Diff line change
Expand Up @@ -12,11 +12,12 @@
from ceph.deployment import inventory
from ceph.deployment.drive_group import DriveGroupSpec
from ceph.deployment.service_spec import ServiceSpec, HostPlacementSpec, RGWSpec
from ceph.utils import str_to_datetime, datetime_now

import orchestrator
from cephadm.schedule import HostAssignment
from cephadm.upgrade import CEPH_UPGRADE_ORDER
from cephadm.utils import forall_hosts, cephadmNoImage, str_to_datetime, is_repo_digest
from cephadm.utils import forall_hosts, cephadmNoImage, is_repo_digest
from orchestrator import OrchestratorError

if TYPE_CHECKING:
Expand Down Expand Up @@ -224,7 +225,7 @@ def _refresh_host_daemons(self, host: str) -> Optional[str]:
if '.' not in d['name']:
continue
sd = orchestrator.DaemonDescription()
sd.last_refresh = datetime.datetime.utcnow()
sd.last_refresh = datetime_now()
for k in ['created', 'started', 'last_configured', 'last_deployed']:
v = d.get(k, None)
if v:
Expand Down
3 changes: 2 additions & 1 deletion src/pybind/mgr/cephadm/services/osd.py
Original file line number Diff line number Diff line change
Expand Up @@ -6,10 +6,11 @@
from ceph.deployment.drive_group import DriveGroupSpec
from ceph.deployment.drive_selection import DriveSelection
from ceph.deployment.inventory import Device
from ceph.utils import datetime_to_str, str_to_datetime

from datetime import datetime
import orchestrator
from cephadm.utils import forall_hosts, datetime_to_str, str_to_datetime
from cephadm.utils import forall_hosts
from orchestrator import OrchestratorError
from mgr_module import MonCommandFailed

Expand Down
5 changes: 2 additions & 3 deletions src/pybind/mgr/cephadm/tests/fixtures.py
Original file line number Diff line number Diff line change
@@ -1,10 +1,9 @@
import datetime
import time
import fnmatch
from contextlib import contextmanager

from ceph.deployment.service_spec import PlacementSpec, ServiceSpec
from cephadm.module import CEPH_DATEFMT
from ceph.utils import datetime_to_str, datetime_now
from cephadm.serve import CephadmServe

try:
Expand Down Expand Up @@ -55,7 +54,7 @@ def with_cephadm_module(module_options=None, store=None):
store = {}
if '_ceph_get/mon_map' not in store:
m.mock_store_set('_ceph_get', 'mon_map', {
'modified': datetime.datetime.utcnow().strftime(CEPH_DATEFMT),
'modified': datetime_to_str(datetime_now()),
'fsid': 'foobar',
})
for k, v in store.items():
Expand Down
14 changes: 7 additions & 7 deletions src/pybind/mgr/cephadm/tests/test_cephadm.py
Original file line number Diff line number Diff line change
@@ -1,4 +1,3 @@
import datetime
import json
from contextlib import contextmanager
from unittest.mock import ANY
Expand All @@ -20,12 +19,13 @@
NFSServiceSpec, IscsiServiceSpec, HostPlacementSpec, CustomContainerSpec
from ceph.deployment.drive_selection.selector import DriveSelection
from ceph.deployment.inventory import Devices, Device
from ceph.utils import datetime_to_str, datetime_now
from orchestrator import ServiceDescription, DaemonDescription, InventoryHost, \
HostSpec, OrchestratorError
from tests import mock
from .fixtures import cephadm_module, wait, _run_cephadm, match_glob, with_host, \
with_cephadm_module, with_service, assert_rm_service, _deploy_cephadm_binary
from cephadm.module import CephadmOrchestrator, CEPH_DATEFMT
from cephadm.module import CephadmOrchestrator

"""
TODOs:
Expand Down Expand Up @@ -202,7 +202,7 @@ def test_daemon_action(self, cephadm_module: CephadmOrchestrator):

# Make sure, _check_daemons does a redeploy due to monmap change:
cephadm_module._store['_ceph_get/mon_map'] = {
'modified': datetime.datetime.utcnow().strftime(CEPH_DATEFMT),
'modified': datetime_to_str(datetime_now()),
'fsid': 'foobar',
}
cephadm_module.notify('mon_map', None)
Expand All @@ -221,7 +221,7 @@ def test_daemon_action_fail(self, cephadm_module: CephadmOrchestrator):

# Make sure, _check_daemons does a redeploy due to monmap change:
cephadm_module.mock_store_set('_ceph_get', 'mon_map', {
'modified': datetime.datetime.utcnow().strftime(CEPH_DATEFMT),
'modified': datetime_to_str(datetime_now()),
'fsid': 'foobar',
})
cephadm_module.notify('mon_map', None)
Expand Down Expand Up @@ -301,7 +301,7 @@ def test_daemon_check_post(self, cephadm_module: CephadmOrchestrator):

# Make sure, _check_daemons does a redeploy due to monmap change:
cephadm_module.mock_store_set('_ceph_get', 'mon_map', {
'modified': datetime.datetime.utcnow().strftime(CEPH_DATEFMT),
'modified': datetime_to_str(datetime_now()),
'fsid': 'foobar',
})
cephadm_module.notify('mon_map', None)
Expand Down Expand Up @@ -512,7 +512,7 @@ def test_remove_osds(self, cephadm_module):
force=False,
hostname='test',
fullname='osd.0',
process_started_at=datetime.datetime.utcnow(),
process_started_at=datetime_now(),
remove_util=cephadm_module.rm_util
))
cephadm_module.rm_util.process_removal_queue()
Expand Down Expand Up @@ -877,7 +877,7 @@ def test_etc_ceph(self, _check, _get_connection, cephadm_module):

# Make sure, _check_daemons does a redeploy due to monmap change:
cephadm_module.mock_store_set('_ceph_get', 'mon_map', {
'modified': datetime.datetime.utcnow().strftime(CEPH_DATEFMT),
'modified': datetime_to_str(datetime_now()),
'fsid': 'foobar',
})
cephadm_module.notify('mon_map', mock.MagicMock())
Expand Down
15 changes: 7 additions & 8 deletions src/pybind/mgr/cephadm/tests/test_migration.py
Original file line number Diff line number Diff line change
@@ -1,12 +1,11 @@
import json
from datetime import datetime

import pytest

from ceph.deployment.service_spec import PlacementSpec, ServiceSpec, HostPlacementSpec
from ceph.utils import datetime_to_str, datetime_now
from cephadm import CephadmOrchestrator
from cephadm.inventory import SPEC_STORE_PREFIX
from cephadm.utils import DATEFMT
from cephadm.tests.fixtures import _run_cephadm, cephadm_module, wait, with_host
from orchestrator import OrchestratorError
from cephadm.serve import CephadmServe
Expand Down Expand Up @@ -48,8 +47,8 @@ def test_migrate_scheduler(cephadm_module: CephadmOrchestrator):

# Sorry, for this hack, but I need to make sure, Migration thinks,
# we have updated all daemons already.
cephadm_module.cache.last_daemon_update['host1'] = datetime.now()
cephadm_module.cache.last_daemon_update['host2'] = datetime.now()
cephadm_module.cache.last_daemon_update['host1'] = datetime_now()
cephadm_module.cache.last_daemon_update['host2'] = datetime_now()

cephadm_module.migration_current = 0
cephadm_module.migration.migrate()
Expand All @@ -72,7 +71,7 @@ def test_migrate_service_id_mon_one(cephadm_module: CephadmOrchestrator):
'hosts': ['host1']
}
},
'created': datetime.utcnow().strftime(DATEFMT),
'created': datetime_to_str(datetime_now()),
}, sort_keys=True),
)

Expand Down Expand Up @@ -103,7 +102,7 @@ def test_migrate_service_id_mon_two(cephadm_module: CephadmOrchestrator):
'count': 5,
}
},
'created': datetime.utcnow().strftime(DATEFMT),
'created': datetime_to_str(datetime_now()),
}, sort_keys=True),
)
cephadm_module.set_store(SPEC_STORE_PREFIX + 'mon.wrong', json.dumps({
Expand All @@ -114,7 +113,7 @@ def test_migrate_service_id_mon_two(cephadm_module: CephadmOrchestrator):
'hosts': ['host1']
}
},
'created': datetime.utcnow().strftime(DATEFMT),
'created': datetime_to_str(datetime_now()),
}, sort_keys=True),
)

Expand Down Expand Up @@ -146,7 +145,7 @@ def test_migrate_service_id_mds_one(cephadm_module: CephadmOrchestrator):
'hosts': ['host1']
}
},
'created': datetime.utcnow().strftime(DATEFMT),
'created': datetime_to_str(datetime_now()),
}, sort_keys=True),
)

Expand Down
Loading

0 comments on commit 3fe7152

Please sign in to comment.