Skip to content

Commit

Permalink
o Merge upstream PR ish-app#2263 (fix tiocgpgrp for pty master )
Browse files Browse the repository at this point in the history
o General code cleanup
  • Loading branch information
Mike Miller committed Nov 10, 2023
1 parent 01e99b1 commit fb9c042
Show file tree
Hide file tree
Showing 2 changed files with 28 additions and 46 deletions.
8 changes: 4 additions & 4 deletions fs/tty.c
Original file line number Diff line number Diff line change
Expand Up @@ -161,7 +161,7 @@ int tty_open(struct tty *tty, struct fd *fd) {
return 0;
}

static int tty_device_open(int major, int minor, struct fd *fd) {
static intptr_t tty_device_open(int major, int minor, struct fd *fd) {
struct tty *tty;
if (major == TTY_ALTERNATE_MAJOR) {
if (minor == DEV_TTY_MINOR) {
Expand Down Expand Up @@ -297,7 +297,7 @@ ssize_t tty_input(struct tty *tty, const char *input, size_t size, bool blocking
// FIXME ECHOE and ECHOK are supposed to enable these
// ECHOKE enables erasing the line instead of echoing the kill char and outputting a newline
echo = lflags & ECHOK_;
int count = tty->bufsize;
ssize_t count = tty->bufsize;
if (ch == cc[VERASE_] && tty->bufsize > 0) {
echo = lflags & ECHOE_;
count = 1;
Expand Down Expand Up @@ -669,10 +669,10 @@ static int tiocgpgrp(struct tty *tty, pid_t_ *fg_group) {
int err = 0;
struct tty *slave = get_slave_side_tty(tty);
if (slave != tty) {
lock(&slave->lock);
lock(&slave->lock,0);
}

if (tty == slave && !tty_is_current(slave) || slave->fg_group == 0) {
if (tty == slave && (!tty_is_current(slave) || slave->fg_group == 0)) {
err = _ENOTTY;
goto error_no_ctrl_tty;
}
Expand Down
66 changes: 24 additions & 42 deletions kernel/task.c
Original file line number Diff line number Diff line change
Expand Up @@ -158,60 +158,42 @@ struct task *task_create_(struct task *parent) {
return task;
}

// We consolidate the check for whether the task is in a critical section,
// holds locks, or has pending signals into a single function.
bool should_wait(struct task *t) {
return critical_region_count(t) > 1 || locks_held_count(t) || !!(t->pending & ~t->blocked);
}

void task_destroy(struct task *task) {
// if(!pthread_mutex_trylock(&task->death_lock))
// return; // Task is already in the process of being deleted, most likely by do_exit(). -mke

task->exiting = true;

bool signal_pending = !!(current->pending & ~current->blocked);
int count = -4000; // Maybe this is more efficient? -mke
while(((critical_region_count(task) > 1) || (locks_held_count(task)) || (signal_pending)) && (count < 1)) { // Wait for now, task is in one or more critical sections, and/or has locks
nanosleep(&lock_pause, NULL);
signal_pending = !!(current->blocked);
// We use a single loop to wait for the task to be ready to destroy.
// This loop replaces all the similar while-loops in the original code.
int count = -4000; // Counter to limit the number of times we check.
while (should_wait(task) && count < 0) {
nanosleep(&lock_pause, NULL); // Sleep for a defined amount of time.
count++;
}

bool Ishould = false;
if(!trylock(&pids_lock)) { // Just in case, be sure pids_lock is set. -mke

// Multiple threads in the same process tend to cause deadlocks when locking pids_lock. So we skip the second attempt to lock pids_lock by the same pid. Which
// sometimes causes pids_lock not to be set. We lock it here, and then unlock below. -mke
//printk("WARNING: pids_lock was not set (Me: %d:%s) (Current: %d:%s) (Last: %d:%s)\n", task->pid, task->comm, current->pid, current->comm, pids_lock.pid, pids_lock.comm);
Ishould = true;
}

signal_pending = !!(current->pending & ~current->blocked);
count = -4000;
while(((critical_region_count(task) > 1) || (locks_held_count(task)) || (signal_pending)) && (count < 0)) { // Wait for now, task is in one or more critical sections, and/or has locks
nanosleep(&lock_pause, NULL);
signal_pending = !!(current->blocked);
count++;
// Now we lock the pids_lock if it's not already locked by this task.
// The trylock prevents deadlocks by avoiding locking if this thread already has the lock.
bool locked_pids_lock = false;
if (!trylock(&pids_lock)) {
locked_pids_lock = true;
}

// Remove the task from the sibling and alive lists.
list_remove(&task->siblings);
struct pid *pid = pid_get(task->pid);
pid->task = NULL;

signal_pending = !!(current->pending & ~current->blocked);
count = -4000;
while(((critical_region_count(task) > 1) || (locks_held_count(task)) || (signal_pending)) && (count < 0)) { // Wait for now, task is in one or more critical sections, and/or has locks
nanosleep(&lock_pause, NULL);
signal_pending = !!(current->blocked);
count++;
}
list_remove(&pid->alive);

signal_pending = !!(current->pending & ~current->blocked);
count = -4000;
while(((critical_region_count(task) > 1) || (locks_held_count(task)) || (signal_pending)) && (count < 0)) { // Wait for now, task is in one or more critical sections, and/or has locks
nanosleep(&lock_pause, NULL);
signal_pending = !!(current->blocked); // Be less stringent -mke
count++;
}

if(Ishould)

// Unlock pids_lock if we were the one who locked it.
if (locked_pids_lock) {
unlock(&pids_lock);

}

// Free the task's resources.
free(task);
}

Expand Down

0 comments on commit fb9c042

Please sign in to comment.