Skip to content

Commit

Permalink
Merge pull request #62 from yongbok/microMIPS-r6-PRIP5
Browse files Browse the repository at this point in the history
microMIPS-r6-PRIP5
  • Loading branch information
yongbok committed Aug 11, 2015
2 parents 4d01ecc + ca2e6b2 commit 5a32f58
Show file tree
Hide file tree
Showing 165 changed files with 2,403 additions and 1,057 deletions.
2 changes: 2 additions & 0 deletions .gitignore
Original file line number Diff line number Diff line change
Expand Up @@ -17,6 +17,8 @@
/trace/generated-tcg-tracers.h
/trace/generated-ust-provider.h
/trace/generated-ust.c
/ui/shader/texture-blit-frag.h
/ui/shader/texture-blit-vert.h
/libcacard/trace/generated-tracers.c
*-timestamp
/*-softmmu
Expand Down
2 changes: 1 addition & 1 deletion Makefile
Original file line number Diff line number Diff line change
Expand Up @@ -135,7 +135,7 @@ endif
else \
mv [email protected] $@; \
cp -p $@ [email protected]; \
fi, " GEN $@");
fi, " GEN $@");

defconfig:
rm -f config-all-devices.mak $(SUBDIR_DEVICES_MAK)
Expand Down
2 changes: 1 addition & 1 deletion VERSION
Original file line number Diff line number Diff line change
@@ -1 +1 @@
2.3.91
2.3.94
20 changes: 10 additions & 10 deletions aio-posix.c
Original file line number Diff line number Diff line change
Expand Up @@ -233,26 +233,23 @@ static void add_pollfd(AioHandler *node)
bool aio_poll(AioContext *ctx, bool blocking)
{
AioHandler *node;
bool was_dispatching;
int i, ret;
bool progress;
int64_t timeout;

aio_context_acquire(ctx);
was_dispatching = ctx->dispatching;
progress = false;

/* aio_notify can avoid the expensive event_notifier_set if
* everything (file descriptors, bottom halves, timers) will
* be re-evaluated before the next blocking poll(). This is
* already true when aio_poll is called with blocking == false;
* if blocking == true, it is only true after poll() returns.
*
* If we're in a nested event loop, ctx->dispatching might be true.
* In that case we can restore it just before returning, but we
* have to clear it now.
* if blocking == true, it is only true after poll() returns,
* so disable the optimization now.
*/
aio_set_dispatching(ctx, !blocking);
if (blocking) {
atomic_add(&ctx->notify_me, 2);
}

ctx->walking_handlers++;

Expand All @@ -272,10 +269,15 @@ bool aio_poll(AioContext *ctx, bool blocking)
aio_context_release(ctx);
}
ret = qemu_poll_ns((GPollFD *)pollfds, npfd, timeout);
if (blocking) {
atomic_sub(&ctx->notify_me, 2);
}
if (timeout) {
aio_context_acquire(ctx);
}

aio_notify_accept(ctx);

/* if we have any readable fds, dispatch event */
if (ret > 0) {
for (i = 0; i < npfd; i++) {
Expand All @@ -287,12 +289,10 @@ bool aio_poll(AioContext *ctx, bool blocking)
ctx->walking_handlers--;

/* Run dispatch even if there were no readable fds to run timers */
aio_set_dispatching(ctx, true);
if (aio_dispatch(ctx)) {
progress = true;
}

aio_set_dispatching(ctx, was_dispatching);
aio_context_release(ctx);

return progress;
Expand Down
48 changes: 26 additions & 22 deletions aio-win32.c
Original file line number Diff line number Diff line change
Expand Up @@ -279,30 +279,25 @@ bool aio_poll(AioContext *ctx, bool blocking)
{
AioHandler *node;
HANDLE events[MAXIMUM_WAIT_OBJECTS + 1];
bool was_dispatching, progress, have_select_revents, first;
bool progress, have_select_revents, first;
int count;
int timeout;

aio_context_acquire(ctx);
have_select_revents = aio_prepare(ctx);
if (have_select_revents) {
blocking = false;
}

was_dispatching = ctx->dispatching;
progress = false;

/* aio_notify can avoid the expensive event_notifier_set if
* everything (file descriptors, bottom halves, timers) will
* be re-evaluated before the next blocking poll(). This is
* already true when aio_poll is called with blocking == false;
* if blocking == true, it is only true after poll() returns.
*
* If we're in a nested event loop, ctx->dispatching might be true.
* In that case we can restore it just before returning, but we
* have to clear it now.
* if blocking == true, it is only true after poll() returns,
* so disable the optimization now.
*/
aio_set_dispatching(ctx, !blocking);
if (blocking) {
atomic_add(&ctx->notify_me, 2);
}

have_select_revents = aio_prepare(ctx);

ctx->walking_handlers++;

Expand All @@ -317,26 +312,36 @@ bool aio_poll(AioContext *ctx, bool blocking)
ctx->walking_handlers--;
first = true;

/* wait until next event */
while (count > 0) {
/* ctx->notifier is always registered. */
assert(count > 0);

/* Multiple iterations, all of them non-blocking except the first,
* may be necessary to process all pending events. After the first
* WaitForMultipleObjects call ctx->notify_me will be decremented.
*/
do {
HANDLE event;
int ret;

timeout = blocking
timeout = blocking && !have_select_revents
? qemu_timeout_ns_to_ms(aio_compute_timeout(ctx)) : 0;
if (timeout) {
aio_context_release(ctx);
}
ret = WaitForMultipleObjects(count, events, FALSE, timeout);
if (blocking) {
assert(first);
atomic_sub(&ctx->notify_me, 2);
}
if (timeout) {
aio_context_acquire(ctx);
}
aio_set_dispatching(ctx, true);

if (first && aio_bh_poll(ctx)) {
progress = true;
if (first) {
aio_notify_accept(ctx);
progress |= aio_bh_poll(ctx);
first = false;
}
first = false;

/* if we have any signaled events, dispatch event */
event = NULL;
Expand All @@ -351,11 +356,10 @@ bool aio_poll(AioContext *ctx, bool blocking)
blocking = false;

progress |= aio_dispatch_handlers(ctx, event);
}
} while (count > 0);

progress |= timerlistgroup_run_timers(&ctx->tlg);

aio_set_dispatching(ctx, was_dispatching);
aio_context_release(ctx);
return progress;
}
64 changes: 48 additions & 16 deletions async.c
Original file line number Diff line number Diff line change
Expand Up @@ -79,8 +79,10 @@ int aio_bh_poll(AioContext *ctx)
* aio_notify again if necessary.
*/
if (!bh->deleted && atomic_xchg(&bh->scheduled, 0)) {
if (!bh->idle)
/* Idle BHs and the notify BH don't count as progress */
if (!bh->idle && bh != ctx->notify_dummy_bh) {
ret = 1;
}
bh->idle = 0;
bh->cb(bh->opaque);
}
Expand Down Expand Up @@ -184,6 +186,8 @@ aio_ctx_prepare(GSource *source, gint *timeout)
{
AioContext *ctx = (AioContext *) source;

atomic_or(&ctx->notify_me, 1);

/* We assume there is no timeout already supplied */
*timeout = qemu_timeout_ns_to_ms(aio_compute_timeout(ctx));

Expand All @@ -200,6 +204,9 @@ aio_ctx_check(GSource *source)
AioContext *ctx = (AioContext *) source;
QEMUBH *bh;

atomic_and(&ctx->notify_me, ~1);
aio_notify_accept(ctx);

for (bh = ctx->first_bh; bh; bh = bh->next) {
if (!bh->deleted && bh->scheduled) {
return true;
Expand All @@ -225,7 +232,21 @@ aio_ctx_finalize(GSource *source)
{
AioContext *ctx = (AioContext *) source;

qemu_bh_delete(ctx->notify_dummy_bh);
thread_pool_free(ctx->thread_pool);

qemu_mutex_lock(&ctx->bh_lock);
while (ctx->first_bh) {
QEMUBH *next = ctx->first_bh->next;

/* qemu_bh_delete() must have been called on BHs in this AioContext */
assert(ctx->first_bh->deleted);

g_free(ctx->first_bh);
ctx->first_bh = next;
}
qemu_mutex_unlock(&ctx->bh_lock);

aio_set_event_notifier(ctx, &ctx->notifier, NULL);
event_notifier_cleanup(&ctx->notifier);
rfifolock_destroy(&ctx->lock);
Expand Down Expand Up @@ -254,24 +275,22 @@ ThreadPool *aio_get_thread_pool(AioContext *ctx)
return ctx->thread_pool;
}

void aio_set_dispatching(AioContext *ctx, bool dispatching)
void aio_notify(AioContext *ctx)
{
ctx->dispatching = dispatching;
if (!dispatching) {
/* Write ctx->dispatching before reading e.g. bh->scheduled.
* Optimization: this is only needed when we're entering the "unsafe"
* phase where other threads must call event_notifier_set.
*/
smp_mb();
/* Write e.g. bh->scheduled before reading ctx->notify_me. Pairs
* with atomic_or in aio_ctx_prepare or atomic_add in aio_poll.
*/
smp_mb();
if (ctx->notify_me) {
event_notifier_set(&ctx->notifier);
atomic_mb_set(&ctx->notified, true);
}
}

void aio_notify(AioContext *ctx)
void aio_notify_accept(AioContext *ctx)
{
/* Write e.g. bh->scheduled before reading ctx->dispatching. */
smp_mb();
if (!ctx->dispatching) {
event_notifier_set(&ctx->notifier);
if (atomic_xchg(&ctx->notified, false)) {
event_notifier_test_and_clear(&ctx->notifier);
}
}

Expand All @@ -282,8 +301,19 @@ static void aio_timerlist_notify(void *opaque)

static void aio_rfifolock_cb(void *opaque)
{
AioContext *ctx = opaque;

/* Kick owner thread in case they are blocked in aio_poll() */
aio_notify(opaque);
qemu_bh_schedule(ctx->notify_dummy_bh);
}

static void notify_dummy_bh(void *opaque)
{
/* Do nothing, we were invoked just to force the event loop to iterate */
}

static void event_notifier_dummy_cb(EventNotifier *e)
{
}

AioContext *aio_context_new(Error **errp)
Expand All @@ -300,12 +330,14 @@ AioContext *aio_context_new(Error **errp)
g_source_set_can_recurse(&ctx->source, true);
aio_set_event_notifier(ctx, &ctx->notifier,
(EventNotifierHandler *)
event_notifier_test_and_clear);
event_notifier_dummy_cb);
ctx->thread_pool = NULL;
qemu_mutex_init(&ctx->bh_lock);
rfifolock_init(&ctx->lock, aio_rfifolock_cb, ctx);
timerlistgroup_init(&ctx->tlg, aio_timerlist_notify, ctx);

ctx->notify_dummy_bh = aio_bh_new(ctx, notify_dummy_bh, NULL);

return ctx;
}

Expand Down
8 changes: 4 additions & 4 deletions backends/hostmem.c
Original file line number Diff line number Diff line change
Expand Up @@ -10,6 +10,7 @@
* See the COPYING file in the top-level directory.
*/
#include "sysemu/hostmem.h"
#include "hw/boards.h"
#include "qapi/visitor.h"
#include "qapi-types.h"
#include "qapi-visit.h"
Expand Down Expand Up @@ -222,11 +223,10 @@ static void host_memory_backend_set_prealloc(Object *obj, bool value,
static void host_memory_backend_init(Object *obj)
{
HostMemoryBackend *backend = MEMORY_BACKEND(obj);
MachineState *machine = MACHINE(qdev_get_machine());

backend->merge = qemu_opt_get_bool(qemu_get_machine_opts(),
"mem-merge", true);
backend->dump = qemu_opt_get_bool(qemu_get_machine_opts(),
"dump-guest-core", true);
backend->merge = machine_mem_merge(machine);
backend->dump = machine_dump_guest_core(machine);
backend->prealloc = mem_prealloc;

object_property_add_bool(obj, "merge",
Expand Down
14 changes: 6 additions & 8 deletions block/mirror.c
Original file line number Diff line number Diff line change
Expand Up @@ -388,7 +388,7 @@ static void coroutine_fn mirror_run(void *opaque)
MirrorBlockJob *s = opaque;
MirrorExitData *data;
BlockDriverState *bs = s->common.bs;
int64_t sector_num, end, sectors_per_chunk, length;
int64_t sector_num, end, length;
uint64_t last_pause_ns;
BlockDriverInfo bdi;
char backing_filename[2]; /* we only need 2 characters because we are only
Expand Down Expand Up @@ -442,15 +442,16 @@ static void coroutine_fn mirror_run(void *opaque)
goto immediate_exit;
}

sectors_per_chunk = s->granularity >> BDRV_SECTOR_BITS;
mirror_free_init(s);

last_pause_ns = qemu_clock_get_ns(QEMU_CLOCK_REALTIME);
if (!s->is_none_mode) {
/* First part, loop on the sectors and initialize the dirty bitmap. */
BlockDriverState *base = s->base;
for (sector_num = 0; sector_num < end; ) {
int64_t next = (sector_num | (sectors_per_chunk - 1)) + 1;
/* Just to make sure we are not exceeding int limit. */
int nb_sectors = MIN(INT_MAX >> BDRV_SECTOR_BITS,
end - sector_num);
int64_t now = qemu_clock_get_ns(QEMU_CLOCK_REALTIME);

if (now - last_pause_ns > SLICE_TIME) {
Expand All @@ -462,8 +463,7 @@ static void coroutine_fn mirror_run(void *opaque)
goto immediate_exit;
}

ret = bdrv_is_allocated_above(bs, base,
sector_num, next - sector_num, &n);
ret = bdrv_is_allocated_above(bs, base, sector_num, nb_sectors, &n);

if (ret < 0) {
goto immediate_exit;
Expand All @@ -472,10 +472,8 @@ static void coroutine_fn mirror_run(void *opaque)
assert(n > 0);
if (ret == 1) {
bdrv_set_dirty_bitmap(s->dirty_bitmap, sector_num, n);
sector_num = next;
} else {
sector_num += n;
}
sector_num += n;
}
}

Expand Down
Loading

0 comments on commit 5a32f58

Please sign in to comment.