Skip to content

Commit

Permalink
resource: Move vector of groups into io_queue_topology
Browse files Browse the repository at this point in the history
This is the continuation of the patch scylladb#2 -- the 2-step initialization
of io-groups is patched to use one container object instead of two.

Signed-off-by: Pavel Emelyanov <[email protected]>
  • Loading branch information
xemul committed Jun 11, 2021
1 parent c8d3d39 commit 296545a
Show file tree
Hide file tree
Showing 3 changed files with 14 additions and 13 deletions.
14 changes: 7 additions & 7 deletions include/seastar/core/resource.hh
Original file line number Diff line number Diff line change
Expand Up @@ -64,9 +64,14 @@ struct memory {
// This will allow us to easily find who is the IO coordinator for a given
// node without a trip to a remote CPU.
struct io_queue_topology {
struct group {
std::shared_ptr<io_group> g;
unsigned attached = 0;
};

std::vector<io_queue*> queues;
std::vector<unsigned> shard_to_group;
unsigned nr_groups;
std::vector<group> groups;

io_queue_topology();
io_queue_topology(const io_queue_topology&) = delete;
Expand All @@ -85,15 +90,10 @@ struct resources {
};

struct device_io_topology {
struct group {
std::shared_ptr<io_group> g;
unsigned attached = 0;
};
util::spinlock lock;
std::vector<group> groups;

device_io_topology() noexcept = default;
device_io_topology(const io_queue_topology& iot) noexcept : groups(iot.nr_groups) {}
device_io_topology(const io_queue_topology& iot) noexcept {}
};

resources allocate(configuration c);
Expand Down
4 changes: 2 additions & 2 deletions src/core/reactor.cc
Original file line number Diff line number Diff line change
Expand Up @@ -3907,9 +3907,9 @@ void smp::configure(boost::program_options::variables_map configuration, reactor

{
std::lock_guard _(topology.lock);
resource::device_io_topology::group& iog = topology.groups[group_idx];
auto& iog = io_info.groups[group_idx];
if (iog.attached == 0) {
struct io_queue::config qcfg = disk_config.generate_config(id, topology.groups.size());
struct io_queue::config qcfg = disk_config.generate_config(id, io_info.groups.size());
iog.g = std::make_shared<io_group>(std::move(qcfg));
seastar_logger.debug("allocate {} IO group", group_idx);
}
Expand Down
9 changes: 5 additions & 4 deletions src/core/resource.cc
Original file line number Diff line number Diff line change
Expand Up @@ -252,7 +252,7 @@ io_queue_topology::~io_queue_topology() {
io_queue_topology::io_queue_topology(io_queue_topology&& o)
: queues(std::move(o.queues))
, shard_to_group(std::move(o.shard_to_group))
, nr_groups(std::exchange(o.nr_groups, 0))
, groups(std::move(o.groups))
{ }

}
Expand Down Expand Up @@ -414,14 +414,14 @@ allocate_io_queues(hwloc_topology_t& topology, std::vector<cpu> cpus, std::unord

auto cpu_sets = distribute_objects(topology, num_io_groups);
ret.queues.resize(cpus.size());
ret.nr_groups = 0;
unsigned nr_groups = 0;

// First step: distribute the IO queues given the information returned in cpu_sets.
// If there is one IO queue per processor, only this loop will be executed.
std::unordered_map<unsigned, std::vector<unsigned>> node_coordinators;
for (auto&& cs : cpu_sets()) {
auto io_coordinator = find_shard(hwloc_bitmap_first(cs));
ret.shard_to_group[io_coordinator] = ret.nr_groups++;
ret.shard_to_group[io_coordinator] = nr_groups++;

auto node_id = node_of_shard(io_coordinator);
if (node_coordinators.count(node_id) == 0) {
Expand All @@ -431,6 +431,7 @@ allocate_io_queues(hwloc_topology_t& topology, std::vector<cpu> cpus, std::unord
numa_nodes[node_id].erase(io_coordinator);
}

ret.groups.resize(nr_groups);

auto available_nodes = boost::copy_range<std::vector<unsigned>>(node_coordinators | boost::adaptors::map_keys);

Expand Down Expand Up @@ -642,7 +643,7 @@ allocate_io_queues(configuration c, std::vector<cpu> cpus) {
unsigned nr_cpus = unsigned(cpus.size());
ret.queues.resize(nr_cpus);
ret.shard_to_group.resize(nr_cpus);
ret.nr_groups = 1;
ret.groups.resize(1);

for (unsigned shard = 0; shard < nr_cpus; ++shard) {
ret.shard_to_group[shard] = 0;
Expand Down

0 comments on commit 296545a

Please sign in to comment.