Merge branch 'rework/write_atomic-unsafe' into for-linus
commit
5cae92e622
|
|
@ -186,6 +186,8 @@ static inline void con_debug_leave(void) { }
|
||||||
* printing callbacks must not be called.
|
* printing callbacks must not be called.
|
||||||
* @CON_NBCON: Console can operate outside of the legacy style console_lock
|
* @CON_NBCON: Console can operate outside of the legacy style console_lock
|
||||||
* constraints.
|
* constraints.
|
||||||
|
* @CON_NBCON_ATOMIC_UNSAFE: The write_atomic() callback is not safe and is
|
||||||
|
* therefore only used by nbcon_atomic_flush_unsafe().
|
||||||
*/
|
*/
|
||||||
enum cons_flags {
|
enum cons_flags {
|
||||||
CON_PRINTBUFFER = BIT(0),
|
CON_PRINTBUFFER = BIT(0),
|
||||||
|
|
@ -197,6 +199,7 @@ enum cons_flags {
|
||||||
CON_EXTENDED = BIT(6),
|
CON_EXTENDED = BIT(6),
|
||||||
CON_SUSPENDED = BIT(7),
|
CON_SUSPENDED = BIT(7),
|
||||||
CON_NBCON = BIT(8),
|
CON_NBCON = BIT(8),
|
||||||
|
CON_NBCON_ATOMIC_UNSAFE = BIT(9),
|
||||||
};
|
};
|
||||||
|
|
||||||
/**
|
/**
|
||||||
|
|
@ -608,6 +611,7 @@ extern void nbcon_write_context_set_buf(struct nbcon_write_context *wctxt,
|
||||||
extern bool nbcon_enter_unsafe(struct nbcon_write_context *wctxt);
|
extern bool nbcon_enter_unsafe(struct nbcon_write_context *wctxt);
|
||||||
extern bool nbcon_exit_unsafe(struct nbcon_write_context *wctxt);
|
extern bool nbcon_exit_unsafe(struct nbcon_write_context *wctxt);
|
||||||
extern void nbcon_reacquire_nobuf(struct nbcon_write_context *wctxt);
|
extern void nbcon_reacquire_nobuf(struct nbcon_write_context *wctxt);
|
||||||
|
extern bool nbcon_allow_unsafe_takeover(void);
|
||||||
extern bool nbcon_kdb_try_acquire(struct console *con,
|
extern bool nbcon_kdb_try_acquire(struct console *con,
|
||||||
struct nbcon_write_context *wctxt);
|
struct nbcon_write_context *wctxt);
|
||||||
extern void nbcon_kdb_release(struct nbcon_write_context *wctxt);
|
extern void nbcon_kdb_release(struct nbcon_write_context *wctxt);
|
||||||
|
|
@ -627,10 +631,19 @@ static inline bool console_is_usable(struct console *con, short flags, bool use_
|
||||||
return false;
|
return false;
|
||||||
|
|
||||||
if (flags & CON_NBCON) {
|
if (flags & CON_NBCON) {
|
||||||
|
if (use_atomic) {
|
||||||
/* The write_atomic() callback is optional. */
|
/* The write_atomic() callback is optional. */
|
||||||
if (use_atomic && !con->write_atomic)
|
if (!con->write_atomic)
|
||||||
return false;
|
return false;
|
||||||
|
|
||||||
|
/*
|
||||||
|
* An unsafe write_atomic() callback is only usable
|
||||||
|
* when unsafe takeovers are allowed.
|
||||||
|
*/
|
||||||
|
if ((flags & CON_NBCON_ATOMIC_UNSAFE) && !nbcon_allow_unsafe_takeover())
|
||||||
|
return false;
|
||||||
|
}
|
||||||
|
|
||||||
/*
|
/*
|
||||||
* For the !use_atomic case, @printk_kthreads_running is not
|
* For the !use_atomic case, @printk_kthreads_running is not
|
||||||
* checked because the write_thread() callback is also used
|
* checked because the write_thread() callback is also used
|
||||||
|
|
|
||||||
|
|
@ -1437,6 +1437,26 @@ enum nbcon_prio nbcon_get_default_prio(void)
|
||||||
return NBCON_PRIO_NORMAL;
|
return NBCON_PRIO_NORMAL;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
/*
|
||||||
|
* Track if it is allowed to perform unsafe hostile takeovers of console
|
||||||
|
* ownership. When true, console drivers might perform unsafe actions while
|
||||||
|
* printing. It is externally available via nbcon_allow_unsafe_takeover().
|
||||||
|
*/
|
||||||
|
static bool panic_nbcon_allow_unsafe_takeover;
|
||||||
|
|
||||||
|
/**
|
||||||
|
* nbcon_allow_unsafe_takeover - Check if unsafe console takeovers are allowed
|
||||||
|
*
|
||||||
|
* Return: True, when it is permitted to perform unsafe console printing
|
||||||
|
*
|
||||||
|
* This is also used by console_is_usable() to determine if it is allowed to
|
||||||
|
* call write_atomic() callbacks flagged as unsafe (CON_NBCON_ATOMIC_UNSAFE).
|
||||||
|
*/
|
||||||
|
bool nbcon_allow_unsafe_takeover(void)
|
||||||
|
{
|
||||||
|
return panic_on_this_cpu() && panic_nbcon_allow_unsafe_takeover;
|
||||||
|
}
|
||||||
|
|
||||||
/**
|
/**
|
||||||
* nbcon_legacy_emit_next_record - Print one record for an nbcon console
|
* nbcon_legacy_emit_next_record - Print one record for an nbcon console
|
||||||
* in legacy contexts
|
* in legacy contexts
|
||||||
|
|
@ -1507,7 +1527,6 @@ bool nbcon_legacy_emit_next_record(struct console *con, bool *handover,
|
||||||
* write_atomic() callback
|
* write_atomic() callback
|
||||||
* @con: The nbcon console to flush
|
* @con: The nbcon console to flush
|
||||||
* @stop_seq: Flush up until this record
|
* @stop_seq: Flush up until this record
|
||||||
* @allow_unsafe_takeover: True, to allow unsafe hostile takeovers
|
|
||||||
*
|
*
|
||||||
* Return: 0 if @con was flushed up to @stop_seq Otherwise, error code on
|
* Return: 0 if @con was flushed up to @stop_seq Otherwise, error code on
|
||||||
* failure.
|
* failure.
|
||||||
|
|
@ -1526,8 +1545,7 @@ bool nbcon_legacy_emit_next_record(struct console *con, bool *handover,
|
||||||
* returned, it cannot be expected that the unfinalized record will become
|
* returned, it cannot be expected that the unfinalized record will become
|
||||||
* available.
|
* available.
|
||||||
*/
|
*/
|
||||||
static int __nbcon_atomic_flush_pending_con(struct console *con, u64 stop_seq,
|
static int __nbcon_atomic_flush_pending_con(struct console *con, u64 stop_seq)
|
||||||
bool allow_unsafe_takeover)
|
|
||||||
{
|
{
|
||||||
struct nbcon_write_context wctxt = { };
|
struct nbcon_write_context wctxt = { };
|
||||||
struct nbcon_context *ctxt = &ACCESS_PRIVATE(&wctxt, ctxt);
|
struct nbcon_context *ctxt = &ACCESS_PRIVATE(&wctxt, ctxt);
|
||||||
|
|
@ -1536,7 +1554,7 @@ static int __nbcon_atomic_flush_pending_con(struct console *con, u64 stop_seq,
|
||||||
ctxt->console = con;
|
ctxt->console = con;
|
||||||
ctxt->spinwait_max_us = 2000;
|
ctxt->spinwait_max_us = 2000;
|
||||||
ctxt->prio = nbcon_get_default_prio();
|
ctxt->prio = nbcon_get_default_prio();
|
||||||
ctxt->allow_unsafe_takeover = allow_unsafe_takeover;
|
ctxt->allow_unsafe_takeover = nbcon_allow_unsafe_takeover();
|
||||||
|
|
||||||
while (nbcon_seq_read(con) < stop_seq) {
|
while (nbcon_seq_read(con) < stop_seq) {
|
||||||
if (!nbcon_context_try_acquire(ctxt, false))
|
if (!nbcon_context_try_acquire(ctxt, false))
|
||||||
|
|
@ -1568,15 +1586,13 @@ static int __nbcon_atomic_flush_pending_con(struct console *con, u64 stop_seq,
|
||||||
* write_atomic() callback
|
* write_atomic() callback
|
||||||
* @con: The nbcon console to flush
|
* @con: The nbcon console to flush
|
||||||
* @stop_seq: Flush up until this record
|
* @stop_seq: Flush up until this record
|
||||||
* @allow_unsafe_takeover: True, to allow unsafe hostile takeovers
|
|
||||||
*
|
*
|
||||||
* This will stop flushing before @stop_seq if another context has ownership.
|
* This will stop flushing before @stop_seq if another context has ownership.
|
||||||
* That context is then responsible for the flushing. Likewise, if new records
|
* That context is then responsible for the flushing. Likewise, if new records
|
||||||
* are added while this context was flushing and there is no other context
|
* are added while this context was flushing and there is no other context
|
||||||
* to handle the printing, this context must also flush those records.
|
* to handle the printing, this context must also flush those records.
|
||||||
*/
|
*/
|
||||||
static void nbcon_atomic_flush_pending_con(struct console *con, u64 stop_seq,
|
static void nbcon_atomic_flush_pending_con(struct console *con, u64 stop_seq)
|
||||||
bool allow_unsafe_takeover)
|
|
||||||
{
|
{
|
||||||
struct console_flush_type ft;
|
struct console_flush_type ft;
|
||||||
unsigned long flags;
|
unsigned long flags;
|
||||||
|
|
@ -1591,7 +1607,7 @@ again:
|
||||||
*/
|
*/
|
||||||
local_irq_save(flags);
|
local_irq_save(flags);
|
||||||
|
|
||||||
err = __nbcon_atomic_flush_pending_con(con, stop_seq, allow_unsafe_takeover);
|
err = __nbcon_atomic_flush_pending_con(con, stop_seq);
|
||||||
|
|
||||||
local_irq_restore(flags);
|
local_irq_restore(flags);
|
||||||
|
|
||||||
|
|
@ -1623,9 +1639,8 @@ again:
|
||||||
* __nbcon_atomic_flush_pending - Flush all nbcon consoles using their
|
* __nbcon_atomic_flush_pending - Flush all nbcon consoles using their
|
||||||
* write_atomic() callback
|
* write_atomic() callback
|
||||||
* @stop_seq: Flush up until this record
|
* @stop_seq: Flush up until this record
|
||||||
* @allow_unsafe_takeover: True, to allow unsafe hostile takeovers
|
|
||||||
*/
|
*/
|
||||||
static void __nbcon_atomic_flush_pending(u64 stop_seq, bool allow_unsafe_takeover)
|
static void __nbcon_atomic_flush_pending(u64 stop_seq)
|
||||||
{
|
{
|
||||||
struct console *con;
|
struct console *con;
|
||||||
int cookie;
|
int cookie;
|
||||||
|
|
@ -1643,7 +1658,7 @@ static void __nbcon_atomic_flush_pending(u64 stop_seq, bool allow_unsafe_takeove
|
||||||
if (nbcon_seq_read(con) >= stop_seq)
|
if (nbcon_seq_read(con) >= stop_seq)
|
||||||
continue;
|
continue;
|
||||||
|
|
||||||
nbcon_atomic_flush_pending_con(con, stop_seq, allow_unsafe_takeover);
|
nbcon_atomic_flush_pending_con(con, stop_seq);
|
||||||
}
|
}
|
||||||
console_srcu_read_unlock(cookie);
|
console_srcu_read_unlock(cookie);
|
||||||
}
|
}
|
||||||
|
|
@ -1659,7 +1674,7 @@ static void __nbcon_atomic_flush_pending(u64 stop_seq, bool allow_unsafe_takeove
|
||||||
*/
|
*/
|
||||||
void nbcon_atomic_flush_pending(void)
|
void nbcon_atomic_flush_pending(void)
|
||||||
{
|
{
|
||||||
__nbcon_atomic_flush_pending(prb_next_reserve_seq(prb), false);
|
__nbcon_atomic_flush_pending(prb_next_reserve_seq(prb));
|
||||||
}
|
}
|
||||||
|
|
||||||
/**
|
/**
|
||||||
|
|
@ -1671,7 +1686,9 @@ void nbcon_atomic_flush_pending(void)
|
||||||
*/
|
*/
|
||||||
void nbcon_atomic_flush_unsafe(void)
|
void nbcon_atomic_flush_unsafe(void)
|
||||||
{
|
{
|
||||||
__nbcon_atomic_flush_pending(prb_next_reserve_seq(prb), true);
|
panic_nbcon_allow_unsafe_takeover = true;
|
||||||
|
__nbcon_atomic_flush_pending(prb_next_reserve_seq(prb));
|
||||||
|
panic_nbcon_allow_unsafe_takeover = false;
|
||||||
}
|
}
|
||||||
|
|
||||||
/**
|
/**
|
||||||
|
|
@ -1894,7 +1911,7 @@ void nbcon_device_release(struct console *con)
|
||||||
* using the legacy loop.
|
* using the legacy loop.
|
||||||
*/
|
*/
|
||||||
if (ft.nbcon_atomic) {
|
if (ft.nbcon_atomic) {
|
||||||
__nbcon_atomic_flush_pending_con(con, prb_next_reserve_seq(prb), false);
|
__nbcon_atomic_flush_pending_con(con, prb_next_reserve_seq(prb));
|
||||||
} else if (ft.legacy_direct) {
|
} else if (ft.legacy_direct) {
|
||||||
if (console_trylock())
|
if (console_trylock())
|
||||||
console_unlock();
|
console_unlock();
|
||||||
|
|
@ -1964,5 +1981,5 @@ void nbcon_kdb_release(struct nbcon_write_context *wctxt)
|
||||||
* The console was locked only when the write_atomic() callback
|
* The console was locked only when the write_atomic() callback
|
||||||
* was usable.
|
* was usable.
|
||||||
*/
|
*/
|
||||||
__nbcon_atomic_flush_pending_con(ctxt->console, prb_next_reserve_seq(prb), false);
|
__nbcon_atomic_flush_pending_con(ctxt->console, prb_next_reserve_seq(prb));
|
||||||
}
|
}
|
||||||
|
|
|
||||||
Loading…
Reference in New Issue