diff --git a/repos/base-hw/src/core/kernel/core_interface.h b/repos/base-hw/src/core/kernel/core_interface.h index 4e606ee1be7..7b22ebf1683 100644 --- a/repos/base-hw/src/core/kernel/core_interface.h +++ b/repos/base-hw/src/core/kernel/core_interface.h @@ -137,10 +137,9 @@ namespace Kernel { * \retval 0 suceeded * \retval !=0 failed */ - inline int start_thread(Thread & thread, unsigned const cpu_id, - Pd & pd, Native_utcb & utcb) + inline int start_thread(Thread & thread, Pd & pd, Native_utcb & utcb) { - return (int)call(call_id_start_thread(), (Call_arg)&thread, cpu_id, + return (int)call(call_id_start_thread(), (Call_arg)&thread, (Call_arg)&pd, (Call_arg)&utcb); } diff --git a/repos/base-hw/src/core/kernel/cpu.cc b/repos/base-hw/src/core/kernel/cpu.cc index 1644f86e346..774e6775312 100644 --- a/repos/base-hw/src/core/kernel/cpu.cc +++ b/repos/base-hw/src/core/kernel/cpu.cc @@ -27,35 +27,35 @@ using namespace Kernel; -/************* - ** Cpu_job ** - *************/ +/***************** + ** Cpu_context ** + *****************/ -void Cpu_job::_activate() { _cpu->schedule(this); } +void Cpu_context::_activate() { _cpu().schedule(*this); } -void Cpu_job::_deactivate() +void Cpu_context::_deactivate() { - assert(_cpu->id() == Cpu::executing_id()); - _cpu->scheduler().unready(*this); + assert(_cpu().id() == Cpu::executing_id()); + _cpu().scheduler().unready(*this); } -void Cpu_job::_yield() +void Cpu_context::_yield() { - assert(_cpu->id() == Cpu::executing_id()); - _cpu->scheduler().yield(); + assert(_cpu().id() == Cpu::executing_id()); + _cpu().scheduler().yield(); } -void Cpu_job::_interrupt(Irq::Pool &user_irq_pool, unsigned const /* cpu_id */) +void Cpu_context::_interrupt(Irq::Pool &user_irq_pool) { /* let the IRQ controller take a pending IRQ for handling, if any */ unsigned irq_id; - if (_cpu->pic().take_request(irq_id)) + if (_cpu().pic().take_request(irq_id)) - /* let the CPU of this job handle the IRQ if it is a CPU-local one */ - if (!_cpu->handle_if_cpu_local_interrupt(irq_id)) { + /* let the CPU of this context handle the IRQ if it is a CPU-local one */ + if (!_cpu().handle_if_cpu_local_interrupt(irq_id)) { /* it isn't a CPU-local IRQ, so, it must be a user IRQ */ User_irq * irq = User_irq::object(user_irq_pool, irq_id); @@ -64,38 +64,37 @@ void Cpu_job::_interrupt(Irq::Pool &user_irq_pool, unsigned const /* cpu_id */) } /* let the IRQ controller finish the currently taken IRQ */ - _cpu->pic().finish_request(); + _cpu().pic().finish_request(); } -void Cpu_job::affinity(Cpu &cpu) +void Cpu_context::affinity(Cpu &cpu) { - _cpu = &cpu; - _cpu->scheduler().insert(*this); + _cpu().scheduler().remove(*this); + _cpu_ptr = &cpu; + _cpu().scheduler().insert(*this); } -void Cpu_job::quota(unsigned const q) +void Cpu_context::quota(unsigned const q) { - if (_cpu) - _cpu->scheduler().quota(*this, q); - else - Context::quota(q); + _cpu().scheduler().quota(*this, q); } -Cpu_job::Cpu_job(Priority const p, unsigned const q) +Cpu_context::Cpu_context(Cpu &cpu, + Priority const priority, + unsigned const quota) : - Context(p, q), _cpu(0) -{ } + Context(priority, quota), _cpu_ptr(&cpu) +{ + _cpu().scheduler().insert(*this); +} -Cpu_job::~Cpu_job() +Cpu_context::~Cpu_context() { - if (!_cpu) - return; - - _cpu->scheduler().remove(*this); + _cpu().scheduler().remove(*this); } @@ -112,19 +111,17 @@ Cpu::Idle_thread::Idle_thread(Board::Address_space_id_allocator &addr_space_id_a Cpu &cpu, Pd &core_pd) : - Thread { addr_space_id_alloc, user_irq_pool, cpu_pool, core_pd, - Priority::min(), 0, "idle", Thread::IDLE } + Thread { addr_space_id_alloc, user_irq_pool, cpu_pool, cpu, + core_pd, Priority::min(), 0, "idle", Thread::IDLE } { regs->ip = (addr_t)&idle_thread_main; - - affinity(cpu); Thread::_pd = &core_pd; } -void Cpu::schedule(Job * const job) +void Cpu::schedule(Context &context) { - _scheduler.ready(*static_cast(job)); + _scheduler.ready(static_cast(context)); if (_id != executing_id() && _scheduler.need_to_schedule()) trigger_ip_interrupt(); } @@ -142,26 +139,26 @@ bool Cpu::handle_if_cpu_local_interrupt(unsigned const irq_id) } -Cpu_job & Cpu::schedule() +Cpu::Context & Cpu::handle_exception_and_schedule() { - /* update scheduler */ - Job & old_job = scheduled_job(); - old_job.exception(*this); + Context &context = current_context(); + context.exception(); if (_state == SUSPEND || _state == HALT) return _halt_job; + /* update schedule if necessary */ if (_scheduler.need_to_schedule()) { _timer.process_timeouts(); _scheduler.update(_timer.time()); time_t t = _scheduler.current_time_left(); _timer.set_timeout(&_timeout, t); time_t duration = _timer.schedule_timeout(); - old_job.update_execution_time(duration); + context.update_execution_time(duration); } - /* return new job */ - return scheduled_job(); + /* return current context */ + return current_context(); } diff --git a/repos/base-hw/src/core/kernel/cpu.h b/repos/base-hw/src/core/kernel/cpu.h index fb36d73afd9..e7bf8ff30c1 100644 --- a/repos/base-hw/src/core/kernel/cpu.h +++ b/repos/base-hw/src/core/kernel/cpu.h @@ -39,9 +39,11 @@ namespace Kernel { class Kernel::Cpu : public Core::Cpu, private Irq::Pool, public Genode::List::Element { - private: + public: - using Job = Cpu_job; + using Context = Cpu_context; + + private: /** * Inter-processor-interrupt object of the cpu @@ -83,13 +85,14 @@ class Kernel::Cpu : public Core::Cpu, private Irq::Pool, Pd &core_pd); }; - struct Halt_job : Job + struct Halt_job : Cpu_context { - Halt_job() : Job (0, 0) { } + Halt_job(Cpu &cpu) + : Cpu_context(cpu, 0, 0) { } - void exception(Kernel::Cpu &) override { } - void proceed(Kernel::Cpu &) override; - } _halt_job { }; + void exception() override { } + void proceed() override; + } _halt_job { *this }; enum State { RUN, HALT, SUSPEND }; @@ -140,14 +143,14 @@ class Kernel::Cpu : public Core::Cpu, private Irq::Pool, bool handle_if_cpu_local_interrupt(unsigned const irq_id); /** - * Schedule 'job' at this CPU + * Schedule 'context' at this CPU */ - void schedule(Job * const job); + void schedule(Context& context); /** - * Return the job that should be executed at next + * Return the context that should be executed next */ - Cpu_job& schedule(); + Context& handle_exception_and_schedule(); Board::Pic & pic() { return _pic; } Timer & timer() { return _timer; } @@ -155,10 +158,10 @@ class Kernel::Cpu : public Core::Cpu, private Irq::Pool, addr_t stack_start(); /** - * Returns the currently active job + * Returns the currently scheduled context */ - Job & scheduled_job() { - return static_cast(_scheduler.current().helping_destination()); } + Context & current_context() { + return static_cast(_scheduler.current().helping_destination()); } unsigned id() const { return _id; } Scheduler &scheduler() { return _scheduler; } diff --git a/repos/base-hw/src/core/kernel/cpu_context.h b/repos/base-hw/src/core/kernel/cpu_context.h index 94ddbe29b64..ad062bc0978 100644 --- a/repos/base-hw/src/core/kernel/cpu_context.h +++ b/repos/base-hw/src/core/kernel/cpu_context.h @@ -22,45 +22,38 @@ namespace Kernel { class Cpu; - - /** - * Context of a job (thread, VM, idle) that shall be executed by a CPU - */ - class Cpu_job; + class Cpu_context; } -class Kernel::Cpu_job : private Scheduler::Context +/** + * Context (thread, vcpu) that shall be executed by a CPU + */ +class Kernel::Cpu_context : private Scheduler::Context { private: - friend class Cpu; /* static_cast from 'Scheduler::Context' to 'Cpu_job' */ + friend class Cpu; time_t _execution_time { 0 }; + Cpu *_cpu_ptr; /* * Noncopyable */ - Cpu_job(Cpu_job const &); - Cpu_job &operator = (Cpu_job const &); + Cpu_context(Cpu_context const &); + Cpu_context &operator = (Cpu_context const &); protected: - Cpu * _cpu; + Cpu &_cpu() const { return *_cpu_ptr; } /** - * Handle interrupt exception that occured during execution on CPU 'id' + * Handle interrupt exception */ - void _interrupt(Irq::Pool &user_irq_pool, unsigned const id); + void _interrupt(Irq::Pool &user_irq_pool); - /** - * Activate our own CPU-share - */ void _activate(); - - /** - * Deactivate our own CPU-share - */ void _deactivate(); /** @@ -69,47 +62,34 @@ class Kernel::Cpu_job : private Scheduler::Context void _yield(); /** - * Return wether we are allowed to help job 'j' with our CPU-share + * Return possibility to help context 'j' scheduling-wise */ - bool _helping_possible(Cpu_job const &j) const { return j._cpu == _cpu; } + bool _helping_possible(Cpu_context const &j) const { + return j._cpu_ptr == _cpu_ptr; } + + void _help(Cpu_context &context) { Context::help(context); } using Context::ready; using Context::helping_finished; - void help(Cpu_job &job) { Context::help(job); } - public: using Context = Scheduler::Context; using Priority = Scheduler::Priority; - /** - * Handle exception that occured during execution on CPU 'id' - */ - virtual void exception(Cpu & cpu) = 0; - - /** - * Continue execution on CPU 'id' - */ - virtual void proceed(Cpu & cpu) = 0; - - /** - * Construct a job with scheduling priority 'p' and time quota 'q' - */ - Cpu_job(Priority const p, unsigned const q); + Cpu_context(Cpu &cpu, + Priority const priority, + unsigned const quota); - /** - * Destructor - */ - virtual ~Cpu_job(); + virtual ~Cpu_context(); /** - * Link job to CPU 'cpu' + * Link context to CPU 'cpu' */ void affinity(Cpu &cpu); /** - * Set CPU quota of the job to 'q' + * Set CPU quota of the context to 'q' */ void quota(unsigned const q); @@ -123,12 +103,15 @@ class Kernel::Cpu_job : private Scheduler::Context */ time_t execution_time() const { return _execution_time; } + /** + * Handle exception that occured during execution of this context + */ + virtual void exception() = 0; - /*************** - ** Accessors ** - ***************/ - - void cpu(Cpu &cpu) { _cpu = &cpu; } + /** + * Continue execution of this context + */ + virtual void proceed() = 0; }; #endif /* _CORE__KERNEL__CPU_CONTEXT_H_ */ diff --git a/repos/base-hw/src/core/kernel/inter_processor_work.h b/repos/base-hw/src/core/kernel/inter_processor_work.h index f2791ccac7e..3a4d078a658 100644 --- a/repos/base-hw/src/core/kernel/inter_processor_work.h +++ b/repos/base-hw/src/core/kernel/inter_processor_work.h @@ -11,8 +11,8 @@ * under the terms of the GNU Affero General Public License version 3. */ -#ifndef _CORE__KERNEL__SMP_H_ -#define _CORE__KERNEL__SMP_H_ +#ifndef _CORE__KERNEL__INTER_PROCESSOR_WORK_H_ +#define _CORE__KERNEL__INTER_PROCESSOR_WORK_H_ #include @@ -32,11 +32,11 @@ class Kernel::Inter_processor_work : Genode::Interface { public: - virtual void execute(Cpu &) = 0; + virtual void execute(Cpu & cpu) = 0; protected: Genode::List_element _le { this }; }; -#endif /* _CORE__KERNEL__SMP_H_ */ +#endif /* _CORE__KERNEL__INTER_PROCESSOR_WORK_H_ */ diff --git a/repos/base-hw/src/core/kernel/main.cc b/repos/base-hw/src/core/kernel/main.cc index e14b17f1c54..a66f1405ce8 100644 --- a/repos/base-hw/src/core/kernel/main.cc +++ b/repos/base-hw/src/core/kernel/main.cc @@ -63,16 +63,16 @@ Kernel::Main *Kernel::Main::_instance; void Kernel::Main::_handle_kernel_entry() { - Cpu &cpu = _cpu_pool.cpu(Cpu::executing_id()); - Cpu_job * new_job; + Cpu::Context * context; { Lock::Guard guard(_data_lock); - new_job = &cpu.schedule(); + context = + &_cpu_pool.cpu(Cpu::executing_id()).handle_exception_and_schedule(); } - new_job->proceed(cpu); + context->proceed(); } diff --git a/repos/base-hw/src/core/kernel/thread.cc b/repos/base-hw/src/core/kernel/thread.cc index 000133b9dcb..31fa14f56d3 100644 --- a/repos/base-hw/src/core/kernel/thread.cc +++ b/repos/base-hw/src/core/kernel/thread.cc @@ -163,7 +163,7 @@ Thread::Destroy::Destroy(Thread & caller, Core::Kernel_object & to_delet : caller(caller), thread_to_destroy(to_delete) { - thread_to_destroy->_cpu->work_list().insert(&_le); + thread_to_destroy->_cpu().work_list().insert(&_le); caller._become_inactive(AWAITS_RESTART); } @@ -171,7 +171,7 @@ Thread::Destroy::Destroy(Thread & caller, Core::Kernel_object & to_delet void Thread::Destroy::execute(Cpu &) { - thread_to_destroy->_cpu->work_list().remove(&_le); + thread_to_destroy->_cpu().work_list().remove(&_le); thread_to_destroy.destruct(); caller._restart(); } @@ -266,14 +266,14 @@ void Thread::ipc_await_request_failed() void Thread::_become_active() { - if (_state != ACTIVE && !_paused) Cpu_job::_activate(); + if (_state != ACTIVE && !_paused) Cpu_context::_activate(); _state = ACTIVE; } void Thread::_become_inactive(State const s) { - if (_state == ACTIVE && !_paused) Cpu_job::_deactivate(); + if (_state == ACTIVE && !_paused) Cpu_context::_deactivate(); _state = s; } @@ -287,7 +287,7 @@ size_t Thread::_core_to_kernel_quota(size_t const quota) const /* we assert at timer construction that cpu_quota_us in ticks fits size_t */ size_t const ticks = (size_t) - _cpu->timer().us_to_ticks(Kernel::cpu_quota_us); + _cpu().timer().us_to_ticks(Kernel::cpu_quota_us); return Cpu_session::quota_lim_downscale(quota, ticks); } @@ -295,24 +295,20 @@ size_t Thread::_core_to_kernel_quota(size_t const quota) const void Thread::_call_thread_quota() { Thread * const thread = (Thread *)user_arg_1(); - thread->Cpu_job::quota((unsigned)(_core_to_kernel_quota(user_arg_2()))); + thread->Cpu_context::quota((unsigned)(_core_to_kernel_quota(user_arg_2()))); } void Thread::_call_start_thread() { - /* lookup CPU */ - Cpu & cpu = _cpu_pool.cpu((unsigned)user_arg_2()); user_arg_0(0); Thread &thread = *(Thread*)user_arg_1(); assert(thread._state == AWAITS_START); - thread.affinity(cpu); - /* join protection domain */ - thread._pd = (Pd *) user_arg_3(); - thread._ipc_init(*(Native_utcb *)user_arg_4(), *this); + thread._pd = (Pd *) user_arg_2(); + thread._ipc_init(*(Native_utcb *)user_arg_3(), *this); /* * Sanity check core threads! @@ -326,7 +322,8 @@ void Thread::_call_start_thread() * semantic changes, and additional core threads are started * across cpu cores. */ - if (thread._pd == &_core_pd && cpu.id() != _cpu_pool.primary_cpu().id()) + if (thread._pd == &_core_pd && + thread._cpu().id() != _cpu_pool.primary_cpu().id()) Genode::raw("Error: do not start core threads" " on CPU cores different than boot cpu"); @@ -421,7 +418,7 @@ void Thread::_cancel_blocking() void Thread::_call_yield_thread() { - Cpu_job::_yield(); + Cpu_context::_yield(); } @@ -431,12 +428,11 @@ void Thread::_call_delete_thread() *(Core::Kernel_object*)user_arg_1(); /** - * Delete a thread immediately if it has no cpu assigned yet, - * or it is assigned to this cpu, or the assigned cpu did not scheduled it. + * Delete a thread immediately if it is assigned to this cpu, + * or the assigned cpu did not scheduled it. */ - if (!to_delete->_cpu || - (to_delete->_cpu->id() == Cpu::executing_id() || - &to_delete->_cpu->scheduled_job() != &*to_delete)) { + if (to_delete->_cpu().id() == Cpu::executing_id() || + &to_delete->_cpu().current_context() != &*to_delete) { _call_delete(); return; } @@ -445,7 +441,7 @@ void Thread::_call_delete_thread() * Construct a cross-cpu work item and send an IPI */ _destroy.construct(*this, to_delete); - to_delete->_cpu->trigger_ip_interrupt(); + to_delete->_cpu().trigger_ip_interrupt(); } @@ -454,8 +450,8 @@ void Thread::_call_delete_pd() Core::Kernel_object & pd = *(Core::Kernel_object*)user_arg_1(); - if (_cpu->active(pd->mmu_regs)) - _cpu->switch_to(_core_pd.mmu_regs); + if (_cpu().active(pd->mmu_regs)) + _cpu().switch_to(_core_pd.mmu_regs); _call_delete(); } @@ -480,7 +476,7 @@ void Thread::_call_await_request_msg() void Thread::_call_timeout() { - Timer & t = _cpu->timer(); + Timer & t = _cpu().timer(); _timeout_sigid = (Kernel::capid_t)user_arg_2(); t.set_timeout(this, t.us_to_ticks(user_arg_1())); } @@ -488,13 +484,13 @@ void Thread::_call_timeout() void Thread::_call_timeout_max_us() { - user_ret_time(_cpu->timer().timeout_max_us()); + user_ret_time(_cpu().timer().timeout_max_us()); } void Thread::_call_time() { - Timer & t = _cpu->timer(); + Timer & t = _cpu().timer(); user_ret_time(t.ticks_to_us(t.time())); } @@ -521,7 +517,7 @@ void Thread::_call_send_request_msg() _become_inactive(DEAD); return; } - bool const help = Cpu_job::_helping_possible(*dst); + bool const help = Cpu_context::_helping_possible(*dst); oir = oir->find(dst->pd()); if (!_ipc_node.ready_to_send()) { @@ -533,7 +529,7 @@ void Thread::_call_send_request_msg() } _state = AWAITS_IPC; - if (help) Cpu_job::help(*dst); + if (help) Cpu_context::_help(*dst); if (!help || !dst->ready()) _deactivate(); } @@ -702,7 +698,7 @@ void Thread::_call_new_irq() (Genode::Irq_session::Polarity) (user_arg_3() & 0b11); _call_new((unsigned)user_arg_2(), trigger, polarity, *c, - _cpu->pic(), _user_irq_pool); + _cpu().pic(), _user_irq_pool); } @@ -846,13 +842,15 @@ void Thread::_call() switch (call_id) { case call_id_new_thread(): _call_new(_addr_space_id_alloc, _user_irq_pool, _cpu_pool, - _core_pd, (unsigned) user_arg_2(), - (unsigned) _core_to_kernel_quota(user_arg_3()), - (char const *) user_arg_4(), USER); + _cpu_pool.cpu((unsigned)user_arg_2()), + _core_pd, (unsigned) user_arg_3(), + (unsigned) _core_to_kernel_quota(user_arg_4()), + (char const *) user_arg_5(), USER); return; case call_id_new_core_thread(): _call_new(_addr_space_id_alloc, _user_irq_pool, _cpu_pool, - _core_pd, (char const *) user_arg_2()); + _cpu_pool.cpu((unsigned)user_arg_2()), + _core_pd, (char const *) user_arg_3()); return; case call_id_thread_quota(): _call_thread_quota(); return; case call_id_delete_thread(): _call_delete_thread(); return; @@ -938,6 +936,7 @@ void Thread::_exception() Thread::Thread(Board::Address_space_id_allocator &addr_space_id_alloc, Irq::Pool &user_irq_pool, Cpu_pool &cpu_pool, + Cpu &cpu, Pd &core_pd, unsigned const priority, unsigned const quota, @@ -945,7 +944,7 @@ Thread::Thread(Board::Address_space_id_allocator &addr_space_id_alloc, Type type) : Kernel::Object { *this }, - Cpu_job { priority, quota }, + Cpu_context { cpu, priority, quota }, _addr_space_id_alloc { addr_space_id_alloc }, _user_irq_pool { user_irq_pool }, _cpu_pool { cpu_pool }, @@ -982,8 +981,8 @@ Core_main_thread(Board::Address_space_id_allocator &addr_space_id_alloc, Cpu_pool &cpu_pool, Pd &core_pd) : - Core_object( - core_pd, addr_space_id_alloc, user_irq_pool, cpu_pool, core_pd, "core") + Core_object(core_pd, addr_space_id_alloc, user_irq_pool, cpu_pool, + cpu_pool.primary_cpu(), core_pd, "core") { using namespace Core; @@ -999,7 +998,6 @@ Core_main_thread(Board::Address_space_id_allocator &addr_space_id_alloc, regs->sp = (addr_t)&__initial_stack_base[0] + DEFAULT_STACK_SIZE; regs->ip = (addr_t)&_core_start; - affinity(_cpu_pool.primary_cpu()); _utcb = &_utcb_instance; Thread::_pd = &core_pd; _become_active(); diff --git a/repos/base-hw/src/core/kernel/thread.h b/repos/base-hw/src/core/kernel/thread.h index 41e3854a66e..12a8e9ea1e6 100644 --- a/repos/base-hw/src/core/kernel/thread.h +++ b/repos/base-hw/src/core/kernel/thread.h @@ -53,7 +53,7 @@ struct Kernel::Thread_fault /** * Kernel back-end for userland execution-contexts */ -class Kernel::Thread : private Kernel::Object, public Cpu_job, private Timeout +class Kernel::Thread : private Kernel::Object, public Cpu_context, private Timeout { public: @@ -331,6 +331,7 @@ class Kernel::Thread : private Kernel::Object, public Cpu_job, private Timeout Thread(Board::Address_space_id_allocator &addr_space_id_alloc, Irq::Pool &user_irq_pool, Cpu_pool &cpu_pool, + Cpu &cpu, Pd &core_pd, unsigned const priority, unsigned const quota, @@ -345,11 +346,12 @@ class Kernel::Thread : private Kernel::Object, public Cpu_job, private Timeout Thread(Board::Address_space_id_allocator &addr_space_id_alloc, Irq::Pool &user_irq_pool, Cpu_pool &cpu_pool, + Cpu &cpu, Pd &core_pd, char const *const label) : - Thread(addr_space_id_alloc, user_irq_pool, cpu_pool, core_pd, - Scheduler::Priority::min(), 0, label, CORE) + Thread(addr_space_id_alloc, user_irq_pool, cpu_pool, cpu, + core_pd, Scheduler::Priority::min(), 0, label, CORE) { } ~Thread(); @@ -386,13 +388,14 @@ class Kernel::Thread : private Kernel::Object, public Cpu_job, private Timeout * \retval capability id of the new kernel object */ static capid_t syscall_create(Core::Kernel_object &t, + unsigned const cpu_id, unsigned const priority, size_t const quota, char const * const label) { return (capid_t)call(call_id_new_thread(), (Call_arg)&t, - (Call_arg)priority, (Call_arg)quota, - (Call_arg)label); + (Call_arg)cpu_id, (Call_arg)priority, + (Call_arg)quota, (Call_arg)label); } /** @@ -404,10 +407,11 @@ class Kernel::Thread : private Kernel::Object, public Cpu_job, private Timeout * \retval capability id of the new kernel object */ static capid_t syscall_create(Core::Kernel_object &t, + unsigned const cpu_id, char const * const label) { return (capid_t)call(call_id_new_core_thread(), (Call_arg)&t, - (Call_arg)label); + (Call_arg)cpu_id, (Call_arg)label); } /** @@ -444,12 +448,12 @@ class Kernel::Thread : private Kernel::Object, public Cpu_job, private Timeout void signal_receive_signal(void * const base, size_t const size); - /************* - ** Cpu_job ** - *************/ + /***************** + ** Cpu_context ** + *****************/ - void exception(Cpu & cpu) override; - void proceed(Cpu & cpu) override; + void exception() override; + void proceed() override; /************* diff --git a/repos/base-hw/src/core/kernel/vm.h b/repos/base-hw/src/core/kernel/vm.h index a77540a397c..75be17063de 100644 --- a/repos/base-hw/src/core/kernel/vm.h +++ b/repos/base-hw/src/core/kernel/vm.h @@ -31,7 +31,7 @@ namespace Kernel { } -class Kernel::Vm : private Kernel::Object, public Cpu_job +class Kernel::Vm : private Kernel::Object, public Cpu_context { public: @@ -66,7 +66,7 @@ class Kernel::Vm : private Kernel::Object, public Cpu_job void _pause_vcpu() { if (_scheduled != INACTIVE) - Cpu_job::_deactivate(); + Cpu_context::_deactivate(); _scheduled = INACTIVE; } @@ -135,7 +135,7 @@ class Kernel::Vm : private Kernel::Object, public Cpu_job void run() { _sync_from_vmm(); - if (_scheduled != ACTIVE) Cpu_job::_activate(); + if (_scheduled != ACTIVE) Cpu_context::_activate(); _scheduled = ACTIVE; } @@ -146,12 +146,12 @@ class Kernel::Vm : private Kernel::Object, public Cpu_job } - /************* - ** Cpu_job ** - *************/ + /***************** + ** Cpu_context ** + *****************/ - void exception(Cpu & cpu) override; - void proceed(Cpu & cpu) override; + void exception() override; + void proceed() override; }; #endif /* _CORE__KERNEL__VM_H_ */ diff --git a/repos/base-hw/src/core/platform_thread.cc b/repos/base-hw/src/core/platform_thread.cc index 7ae23acd041..6cf4aec8084 100644 --- a/repos/base-hw/src/core/platform_thread.cc +++ b/repos/base-hw/src/core/platform_thread.cc @@ -68,7 +68,7 @@ Platform_thread::Platform_thread(Label const &label, Native_utcb &utcb) _utcb_pd_addr(&utcb), _main_thread(false), _location(Affinity::Location()), - _kobj(_kobj.CALLED_FROM_CORE, _label.string()) + _kobj(_kobj.CALLED_FROM_CORE, _location.xpos(), _label.string()) { /* create UTCB for a core thread */ platform().ram_alloc().try_alloc(sizeof(Native_utcb)).with_result( @@ -101,7 +101,8 @@ Platform_thread::Platform_thread(Platform_pd &pd, _quota((unsigned)quota), _main_thread(!pd.has_any_thread), _location(location), - _kobj(_kobj.CALLED_FROM_CORE, _priority, _quota, _label.string()) + _kobj(_kobj.CALLED_FROM_CORE, _location.xpos(), + _priority, _quota, _label.string()) { try { _utcb = core_env().pd_session()->alloc(sizeof(Native_utcb), CACHED); @@ -164,8 +165,6 @@ void Platform_thread::start(void * const ip, void * const sp) _kobj->regs->ip = reinterpret_cast(ip); _kobj->regs->sp = reinterpret_cast(sp); - /* start executing new thread */ - unsigned const cpu = _location.xpos(); Native_utcb &utcb = *Thread::myself()->utcb(); @@ -176,7 +175,9 @@ void Platform_thread::start(void * const ip, void * const sp) utcb.cap_add(Capability_space::capid(_pd.parent())); utcb.cap_add(Capability_space::capid(_utcb)); } - Kernel::start_thread(*_kobj, cpu, _pd.kernel_pd(), *_utcb_core_addr); + + /* start executing new thread */ + Kernel::start_thread(*_kobj, _pd.kernel_pd(), *_utcb_core_addr); } diff --git a/repos/base-hw/src/core/spec/arm/kernel/thread.cc b/repos/base-hw/src/core/spec/arm/kernel/thread.cc index c353745e469..39b3e2030a4 100644 --- a/repos/base-hw/src/core/spec/arm/kernel/thread.cc +++ b/repos/base-hw/src/core/spec/arm/kernel/thread.cc @@ -23,32 +23,35 @@ using namespace Kernel; -extern "C" void kernel_to_user_context_switch(Cpu::Context*, Cpu::Fpu_context*); +extern "C" void kernel_to_user_context_switch(Core::Cpu::Context*, + Core::Cpu::Fpu_context*); void Thread::_call_suspend() { } -void Thread::exception(Cpu & cpu) +void Thread::exception() { + using Ctx = Core::Cpu::Context; + switch (regs->cpu_exception) { - case Cpu::Context::SUPERVISOR_CALL: + case Ctx::SUPERVISOR_CALL: _call(); return; - case Cpu::Context::PREFETCH_ABORT: - case Cpu::Context::DATA_ABORT: + case Ctx::PREFETCH_ABORT: + case Ctx::DATA_ABORT: _mmu_exception(); return; - case Cpu::Context::INTERRUPT_REQUEST: - case Cpu::Context::FAST_INTERRUPT_REQUEST: - _interrupt(_user_irq_pool, cpu.id()); + case Ctx::INTERRUPT_REQUEST: + case Ctx::FAST_INTERRUPT_REQUEST: + _interrupt(_user_irq_pool); return; - case Cpu::Context::UNDEFINED_INSTRUCTION: + case Ctx::UNDEFINED_INSTRUCTION: Genode::raw(*this, ": undefined instruction at ip=", Genode::Hex(regs->ip)); _die(); return; - case Cpu::Context::RESET: + case Ctx::RESET: return; default: Genode::raw(*this, ": triggered an unknown exception ", @@ -71,17 +74,17 @@ void Kernel::Thread::Tlb_invalidation::execute(Cpu &) { } void Thread::Flush_and_stop_cpu::execute(Cpu &) { } -void Cpu::Halt_job::proceed(Kernel::Cpu &) { } +void Cpu::Halt_job::proceed() { } -void Thread::proceed(Cpu & cpu) +void Thread::proceed() { - if (!cpu.active(pd().mmu_regs) && type() != CORE) - cpu.switch_to(pd().mmu_regs); + if (!_cpu().active(pd().mmu_regs) && type() != CORE) + _cpu().switch_to(pd().mmu_regs); - regs->cpu_exception = cpu.stack_start(); - kernel_to_user_context_switch((static_cast(&*regs)), - (static_cast(&*regs))); + regs->cpu_exception = _cpu().stack_start(); + kernel_to_user_context_switch((static_cast(&*regs)), + (static_cast(&*regs))); } diff --git a/repos/base-hw/src/core/spec/arm_v7/trustzone/kernel/vm.cc b/repos/base-hw/src/core/spec/arm_v7/trustzone/kernel/vm.cc index 16fddfcb6d3..54256ead92a 100644 --- a/repos/base-hw/src/core/spec/arm_v7/trustzone/kernel/vm.cc +++ b/repos/base-hw/src/core/spec/arm_v7/trustzone/kernel/vm.cc @@ -28,14 +28,13 @@ Vm::Vm(Irq::Pool & user_irq_pool, Identity & id) : Kernel::Object { *this }, - Cpu_job(Scheduler::Priority::min(), 0), + Cpu_context(cpu, Scheduler::Priority::min(), 0), _user_irq_pool(user_irq_pool), _state(data), _context(context), _id(id), _vcpu_context(cpu) { - affinity(cpu); /* once constructed, exit with a startup exception */ pause(); _state.cpu_exception = Genode::VCPU_EXCEPTION_STARTUP; @@ -46,12 +45,12 @@ Vm::Vm(Irq::Pool & user_irq_pool, Vm::~Vm() {} -void Vm::exception(Cpu & cpu) +void Vm::exception() { switch(_state.cpu_exception) { case Genode::Cpu_state::INTERRUPT_REQUEST: [[fallthrough]]; case Genode::Cpu_state::FAST_INTERRUPT_REQUEST: - _interrupt(_user_irq_pool, cpu.id()); + _interrupt(_user_irq_pool); return; case Genode::Cpu_state::DATA_ABORT: _state.dfar = Cpu::Dfar::read(); @@ -69,19 +68,19 @@ bool secure_irq(unsigned const i); extern "C" void monitor_mode_enter_normal_world(Genode::Vcpu_state&, void*); -void Vm::proceed(Cpu & cpu) +void Vm::proceed() { unsigned const irq = _state.irq_injection; if (irq) { - if (cpu.pic().secure(irq)) { + if (_cpu().pic().secure(irq)) { Genode::raw("Refuse to inject secure IRQ into VM"); } else { - cpu.pic().trigger(irq); + _cpu().pic().trigger(irq); _state.irq_injection = 0; } } - monitor_mode_enter_normal_world(_state, (void*) cpu.stack_start()); + monitor_mode_enter_normal_world(_state, (void*) _cpu().stack_start()); } diff --git a/repos/base-hw/src/core/spec/arm_v7/virtualization/kernel/vm.cc b/repos/base-hw/src/core/spec/arm_v7/virtualization/kernel/vm.cc index c8f5ece998f..e533e622578 100644 --- a/repos/base-hw/src/core/spec/arm_v7/virtualization/kernel/vm.cc +++ b/repos/base-hw/src/core/spec/arm_v7/virtualization/kernel/vm.cc @@ -101,7 +101,7 @@ void Board::Vcpu_context::Vm_irq::handle(Vm & vm, unsigned irq) { void Board::Vcpu_context::Vm_irq::occurred() { - Vm *vm = dynamic_cast(&_cpu.scheduled_job()); + Vm *vm = dynamic_cast(&_cpu.current_context()); if (!vm) Genode::raw("VM interrupt while VM is not runnning!"); else handle(*vm, _irq_nr); } @@ -140,14 +140,13 @@ Kernel::Vm::Vm(Irq::Pool & user_irq_pool, Identity & id) : Kernel::Object { *this }, - Cpu_job(Scheduler::Priority::min(), 0), + Cpu_context(cpu, Scheduler::Priority::min(), 0), _user_irq_pool(user_irq_pool), _state(data), _context(context), _id(id), _vcpu_context(cpu) { - affinity(cpu); /* once constructed, exit with a startup exception */ pause(); _state.cpu_exception = Genode::VCPU_EXCEPTION_STARTUP; @@ -164,29 +163,29 @@ Kernel::Vm::~Vm() } -void Kernel::Vm::exception(Cpu & cpu) +void Kernel::Vm::exception() { switch(_state.cpu_exception) { case Genode::Cpu_state::INTERRUPT_REQUEST: case Genode::Cpu_state::FAST_INTERRUPT_REQUEST: - _interrupt(_user_irq_pool, cpu.id()); + _interrupt(_user_irq_pool); break; default: pause(); _context.submit(1); } - if (cpu.pic().ack_virtual_irq(_vcpu_context.pic)) + if (_cpu().pic().ack_virtual_irq(_vcpu_context.pic)) inject_irq(Board::VT_MAINTAINANCE_IRQ); _vcpu_context.vtimer_irq.disable(); } -void Kernel::Vm::proceed(Cpu & cpu) +void Kernel::Vm::proceed() { if (_state.timer.irq) _vcpu_context.vtimer_irq.enable(); - cpu.pic().insert_virtual_irq(_vcpu_context.pic, _state.irqs.virtual_irq); + _cpu().pic().insert_virtual_irq(_vcpu_context.pic, _state.irqs.virtual_irq); /* * the following values have to be enforced by the hypervisor @@ -202,7 +201,7 @@ void Kernel::Vm::proceed(Cpu & cpu) _state.esr_el2 = Cpu::Hstr::init(); _state.hpfar_el2 = Cpu::Hcr::init(); - Hypervisor::switch_world(_state, host_context(cpu)); + Hypervisor::switch_world(_state, host_context(_cpu())); } diff --git a/repos/base-hw/src/core/spec/arm_v8/kernel/thread.cc b/repos/base-hw/src/core/spec/arm_v8/kernel/thread.cc index f4445005e9e..171d0ad9858 100644 --- a/repos/base-hw/src/core/spec/arm_v8/kernel/thread.cc +++ b/repos/base-hw/src/core/spec/arm_v8/kernel/thread.cc @@ -27,7 +27,7 @@ using namespace Kernel; void Thread::_call_suspend() { } -void Thread::exception(Cpu & cpu) +void Thread::exception() { switch (regs->exception_type) { case Cpu::RESET: return; @@ -35,7 +35,7 @@ void Thread::exception(Cpu & cpu) case Cpu::IRQ_LEVEL_EL1: [[fallthrough]]; case Cpu::FIQ_LEVEL_EL0: [[fallthrough]]; case Cpu::FIQ_LEVEL_EL1: - _interrupt(_user_irq_pool, cpu.id()); + _interrupt(_user_irq_pool); return; case Cpu::SYNC_LEVEL_EL0: [[fallthrough]]; case Cpu::SYNC_LEVEL_EL1: @@ -94,51 +94,51 @@ void Kernel::Thread::Tlb_invalidation::execute(Cpu &) { } void Thread::Flush_and_stop_cpu::execute(Cpu &) { } -void Cpu::Halt_job::proceed(Kernel::Cpu &) { } + void Cpu::Halt_job::proceed() { } -bool Kernel::Pd::invalidate_tlb(Cpu & cpu, addr_t addr, size_t size) -{ - using namespace Genode; + bool Kernel::Pd::invalidate_tlb(Cpu & cpu, addr_t addr, size_t size) + { + using namespace Genode; - /* only apply to the active cpu */ - if (cpu.id() != Cpu::executing_id()) - return false; + /* only apply to the active cpu */ + if (cpu.id() != Cpu::executing_id()) + return false; - /** - * The kernel part of the address space is mapped as global - * therefore we have to invalidate it differently - */ - if (addr >= Hw::Mm::supervisor_exception_vector().base) { - for (addr_t end = addr+size; addr < end; addr += get_page_size()) - asm volatile ("tlbi vaae1is, %0" :: "r" (addr >> 12)); - return false; - } + /** + * The kernel part of the address space is mapped as global + * therefore we have to invalidate it differently + */ + if (addr >= Hw::Mm::supervisor_exception_vector().base) { + for (addr_t end = addr+size; addr < end; addr += get_page_size()) + asm volatile ("tlbi vaae1is, %0" :: "r" (addr >> 12)); + return false; + } - /** - * Too big mappings will result in long running invalidation loops, - * just invalidate the whole tlb for the ASID then. - */ - if (size > 8 * get_page_size()) { - asm volatile ("tlbi aside1is, %0" - :: "r" ((uint64_t)mmu_regs.id() << 48)); + /** + * Too big mappings will result in long running invalidation loops, + * just invalidate the whole tlb for the ASID then. + */ + if (size > 8 * get_page_size()) { + asm volatile ("tlbi aside1is, %0" + :: "r" ((uint64_t)mmu_regs.id() << 48)); + return false; + } + + for (addr_t end = addr+size; addr < end; addr += get_page_size()) + asm volatile ("tlbi vae1is, %0" + :: "r" (addr >> 12 | (uint64_t)mmu_regs.id() << 48)); return false; } - for (addr_t end = addr+size; addr < end; addr += get_page_size()) - asm volatile ("tlbi vae1is, %0" - :: "r" (addr >> 12 | (uint64_t)mmu_regs.id() << 48)); - return false; -} - -void Thread::proceed(Cpu & cpu) -{ - if (!cpu.active(pd().mmu_regs) && type() != CORE) - cpu.switch_to(pd().mmu_regs); + void Thread::proceed() + { + if (!_cpu().active(pd().mmu_regs) && type() != CORE) + _cpu().switch_to(pd().mmu_regs); - kernel_to_user_context_switch((static_cast(&*regs)), - (void*)cpu.stack_start()); + kernel_to_user_context_switch((static_cast(&*regs)), + (void*)_cpu().stack_start()); } diff --git a/repos/base-hw/src/core/spec/arm_v8/virtualization/kernel/vm.cc b/repos/base-hw/src/core/spec/arm_v8/virtualization/kernel/vm.cc index 511a0e99a11..fe5176cffb4 100644 --- a/repos/base-hw/src/core/spec/arm_v8/virtualization/kernel/vm.cc +++ b/repos/base-hw/src/core/spec/arm_v8/virtualization/kernel/vm.cc @@ -76,7 +76,7 @@ void Board::Vcpu_context::Vm_irq::handle(Vm & vm, unsigned irq) { void Board::Vcpu_context::Vm_irq::occurred() { - Vm *vm = dynamic_cast(&_cpu.scheduled_job()); + Vm *vm = dynamic_cast(&_cpu.current_context()); if (!vm) Genode::raw("VM interrupt while VM is not runnning!"); else handle(*vm, _irq_nr); } @@ -115,15 +115,13 @@ Vm::Vm(Irq::Pool & user_irq_pool, Identity & id) : Kernel::Object { *this }, - Cpu_job(Scheduler::Priority::min(), 0), + Cpu_context(cpu, Scheduler::Priority::min(), 0), _user_irq_pool(user_irq_pool), _state(data), _context(context), _id(id), _vcpu_context(cpu) { - affinity(cpu); - _state.id_aa64isar0_el1 = Cpu::Id_aa64isar0_el1::read(); _state.id_aa64isar1_el1 = Cpu::Id_aa64isar1_el1::read(); _state.id_aa64mmfr0_el1 = Cpu::Id_aa64mmfr0_el1::read(); @@ -167,14 +165,14 @@ Vm::~Vm() } -void Vm::exception(Cpu & cpu) +void Vm::exception() { switch (_state.exception_type) { case Cpu::IRQ_LEVEL_EL0: [[fallthrough]]; case Cpu::IRQ_LEVEL_EL1: [[fallthrough]]; case Cpu::FIQ_LEVEL_EL0: [[fallthrough]]; case Cpu::FIQ_LEVEL_EL1: - _interrupt(_user_irq_pool, cpu.id()); + _interrupt(_user_irq_pool); break; case Cpu::SYNC_LEVEL_EL0: [[fallthrough]]; case Cpu::SYNC_LEVEL_EL1: [[fallthrough]]; @@ -188,17 +186,17 @@ void Vm::exception(Cpu & cpu) " not implemented!"); }; - if (cpu.pic().ack_virtual_irq(_vcpu_context.pic)) + if (_cpu().pic().ack_virtual_irq(_vcpu_context.pic)) inject_irq(Board::VT_MAINTAINANCE_IRQ); _vcpu_context.vtimer_irq.disable(); } -void Vm::proceed(Cpu & cpu) +void Vm::proceed() { if (_state.timer.irq) _vcpu_context.vtimer_irq.enable(); - cpu.pic().insert_virtual_irq(_vcpu_context.pic, _state.irqs.virtual_irq); + _cpu().pic().insert_virtual_irq(_vcpu_context.pic, _state.irqs.virtual_irq); /* * the following values have to be enforced by the hypervisor @@ -208,7 +206,7 @@ void Vm::proceed(Cpu & cpu) Cpu::Vttbr_el2::Asid::set(vttbr_el2, _id.id); addr_t guest = Hw::Mm::el2_addr(&_state); addr_t pic = Hw::Mm::el2_addr(&_vcpu_context.pic); - addr_t host = Hw::Mm::el2_addr(&host_context(cpu)); + addr_t host = Hw::Mm::el2_addr(&host_context(_cpu())); Hypervisor::switch_world(guest, host, pic, vttbr_el2); } diff --git a/repos/base-hw/src/core/spec/riscv/kernel/thread.cc b/repos/base-hw/src/core/spec/riscv/kernel/thread.cc index fbbd300cc6b..16835812627 100644 --- a/repos/base-hw/src/core/spec/riscv/kernel/thread.cc +++ b/repos/base-hw/src/core/spec/riscv/kernel/thread.cc @@ -25,21 +25,21 @@ void Thread::Tlb_invalidation::execute(Cpu &) { } void Thread::Flush_and_stop_cpu::execute(Cpu &) { } -void Cpu::Halt_job::proceed(Kernel::Cpu &) { } +void Cpu::Halt_job::proceed() { } -void Thread::exception(Cpu & cpu) +void Thread::exception() { using Context = Core::Cpu::Context; using Stval = Core::Cpu::Stval; if (regs->is_irq()) { /* cpu-local timer interrupt */ - if (regs->irq() == cpu.timer().interrupt_id()) { - cpu.handle_if_cpu_local_interrupt(cpu.timer().interrupt_id()); + if (regs->irq() == _cpu().timer().interrupt_id()) { + _cpu().handle_if_cpu_local_interrupt(_cpu().timer().interrupt_id()); } else { /* interrupt controller */ - _interrupt(_user_irq_pool, 0); + _interrupt(_user_irq_pool); } return; } @@ -113,7 +113,7 @@ void Kernel::Thread::_call_cache_line_size() } -void Kernel::Thread::proceed(Cpu & cpu) +void Kernel::Thread::proceed() { /* * The sstatus register defines to which privilege level @@ -123,8 +123,8 @@ void Kernel::Thread::proceed(Cpu & cpu) Cpu::Sstatus::Spp::set(v, (type() == USER) ? 0 : 1); Cpu::Sstatus::write(v); - if (!cpu.active(pd().mmu_regs) && type() != CORE) - cpu.switch_to(_pd->mmu_regs); + if (!_cpu().active(pd().mmu_regs) && type() != CORE) + _cpu().switch_to(_pd->mmu_regs); asm volatile("csrw sscratch, %1 \n" "mv x31, %0 \n" diff --git a/repos/base-hw/src/core/spec/x86_64/kernel/thread.cc b/repos/base-hw/src/core/spec/x86_64/kernel/thread.cc index 89d87ad0495..057bb229e7b 100644 --- a/repos/base-hw/src/core/spec/x86_64/kernel/thread.cc +++ b/repos/base-hw/src/core/spec/x86_64/kernel/thread.cc @@ -55,9 +55,9 @@ void Kernel::Thread::Flush_and_stop_cpu::execute(Cpu &cpu) } -void Kernel::Cpu::Halt_job::Halt_job::proceed(Kernel::Cpu &cpu) +void Kernel::Cpu::Halt_job::Halt_job::proceed() { - switch (cpu.state()) { + switch (_cpu().state()) { case HALT: while (true) { asm volatile ("hlt"); } @@ -83,7 +83,7 @@ void Kernel::Cpu::Halt_job::Halt_job::proceed(Kernel::Cpu &cpu) /* adhere to ACPI specification */ asm volatile ("wbinvd" : : : "memory"); - fadt.suspend(cpu.suspend.typ_a, cpu.suspend.typ_b); + fadt.suspend(_cpu().suspend.typ_a, _cpu().suspend.typ_b); Genode::raw("kernel: unexpected resume"); }); @@ -143,7 +143,7 @@ void Kernel::Thread::_call_suspend() /* single core CPU case */ if (cpu_count == 1) { /* current CPU triggers final ACPI suspend outside kernel lock */ - _cpu->next_state_suspend(); + _cpu().next_state_suspend(); return; } @@ -176,12 +176,12 @@ void Kernel::Thread::_call_cache_line_size() } -void Kernel::Thread::proceed(Cpu & cpu) +void Kernel::Thread::proceed() { - if (!cpu.active(pd().mmu_regs) && type() != CORE) - cpu.switch_to(pd().mmu_regs); + if (!_cpu().active(pd().mmu_regs) && type() != CORE) + _cpu().switch_to(pd().mmu_regs); - cpu.switch_to(*regs); + _cpu().switch_to(*regs); asm volatile("fxrstor (%1) \n" "mov %0, %%rsp \n" diff --git a/repos/base-hw/src/core/spec/x86_64/kernel/thread_exception.cc b/repos/base-hw/src/core/spec/x86_64/kernel/thread_exception.cc index 7551a62f10f..0f52cada1f1 100644 --- a/repos/base-hw/src/core/spec/x86_64/kernel/thread_exception.cc +++ b/repos/base-hw/src/core/spec/x86_64/kernel/thread_exception.cc @@ -20,7 +20,7 @@ using namespace Kernel; -void Thread::exception(Cpu & cpu) +void Thread::exception() { using Genode::Cpu_state; @@ -45,7 +45,7 @@ void Thread::exception(Cpu & cpu) if (regs->trapno >= Cpu_state::INTERRUPTS_START && regs->trapno <= Cpu_state::INTERRUPTS_END) { - _interrupt(_user_irq_pool, cpu.id()); + _interrupt(_user_irq_pool); return; } diff --git a/repos/base-hw/src/core/spec/x86_64/virtualization/kernel/vm.cc b/repos/base-hw/src/core/spec/x86_64/virtualization/kernel/vm.cc index 750d8f2d5f4..2bad69b8eb3 100644 --- a/repos/base-hw/src/core/spec/x86_64/virtualization/kernel/vm.cc +++ b/repos/base-hw/src/core/spec/x86_64/virtualization/kernel/vm.cc @@ -41,15 +41,12 @@ Vm::Vm(Irq::Pool & user_irq_pool, Identity & id) : Kernel::Object { *this }, - Cpu_job(Scheduler::Priority::min(), 0), + Cpu_context(cpu, Scheduler::Priority::min(), 0), _user_irq_pool(user_irq_pool), _state(*data.vcpu_state), _context(context), _id(id), - _vcpu_context(id.id, data) -{ - affinity(cpu); -} + _vcpu_context(id.id, data) { } Vm::~Vm() @@ -57,10 +54,10 @@ Vm::~Vm() } -void Vm::proceed(Cpu & cpu) +void Vm::proceed() { using namespace Board; - cpu.switch_to(*_vcpu_context.regs); + _cpu().switch_to(*_vcpu_context.regs); if (_vcpu_context.exit_reason == EXIT_INIT) { _vcpu_context.regs->trapno = TRAP_VMSKIP; @@ -83,7 +80,7 @@ void Vm::proceed(Cpu & cpu) } -void Vm::exception(Cpu & cpu) +void Vm::exception() { using namespace Board; @@ -121,18 +118,18 @@ void Vm::exception(Cpu & cpu) * it needs to handle an exit. */ if (_vcpu_context.exit_reason == EXIT_PAUSED) - _interrupt(_user_irq_pool, cpu.id()); + _interrupt(_user_irq_pool); else pause = true; break; case Cpu_state::INTERRUPTS_START ... Cpu_state::INTERRUPTS_END: - _interrupt(_user_irq_pool, cpu.id()); + _interrupt(_user_irq_pool); break; case TRAP_VMSKIP: /* vCPU is running for the first time */ - _vcpu_context.initialize(cpu, + _vcpu_context.initialize(_cpu(), reinterpret_cast(_id.table)); - _vcpu_context.tsc_aux_host = cpu.id(); + _vcpu_context.tsc_aux_host = _cpu().id(); /* * We set the artificial startup exit code, stop the * vCPU thread and ask the VMM to handle it.