Revert "drm: Fix missing newline in DRM_WARN."
Accidentally included more than I intended here.
This commit is contained in:
parent
826f18790e
commit
1d2fb50490
|
@ -1,4 +1,4 @@
|
||||||
/* $NetBSD: sched_entity.c,v 1.5 2021/12/24 15:22:20 riastradh Exp $ */
|
/* $NetBSD: sched_entity.c,v 1.6 2021/12/24 15:25:03 riastradh Exp $ */
|
||||||
|
|
||||||
/*
|
/*
|
||||||
* Copyright 2015 Advanced Micro Devices, Inc.
|
* Copyright 2015 Advanced Micro Devices, Inc.
|
||||||
|
@ -24,7 +24,7 @@
|
||||||
*/
|
*/
|
||||||
|
|
||||||
#include <sys/cdefs.h>
|
#include <sys/cdefs.h>
|
||||||
__KERNEL_RCSID(0, "$NetBSD: sched_entity.c,v 1.5 2021/12/24 15:22:20 riastradh Exp $");
|
__KERNEL_RCSID(0, "$NetBSD: sched_entity.c,v 1.6 2021/12/24 15:25:03 riastradh Exp $");
|
||||||
|
|
||||||
#include <linux/kthread.h>
|
#include <linux/kthread.h>
|
||||||
#include <linux/slab.h>
|
#include <linux/slab.h>
|
||||||
|
@ -142,7 +142,7 @@ drm_sched_entity_get_free_sched(struct drm_sched_entity *entity)
|
||||||
struct drm_gpu_scheduler *sched = entity->sched_list[i];
|
struct drm_gpu_scheduler *sched = entity->sched_list[i];
|
||||||
|
|
||||||
if (!entity->sched_list[i]->ready) {
|
if (!entity->sched_list[i]->ready) {
|
||||||
DRM_WARN("sched%s is not ready, skipping\n", sched->name);
|
DRM_WARN("sched%s is not ready, skipping", sched->name);
|
||||||
continue;
|
continue;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|
|
@ -1,4 +1,4 @@
|
||||||
/* $NetBSD: linux_ww_mutex.c,v 1.11 2021/12/24 15:22:20 riastradh Exp $ */
|
/* $NetBSD: linux_ww_mutex.c,v 1.12 2021/12/24 15:25:03 riastradh Exp $ */
|
||||||
|
|
||||||
/*-
|
/*-
|
||||||
* Copyright (c) 2014 The NetBSD Foundation, Inc.
|
* Copyright (c) 2014 The NetBSD Foundation, Inc.
|
||||||
|
@ -30,7 +30,7 @@
|
||||||
*/
|
*/
|
||||||
|
|
||||||
#include <sys/cdefs.h>
|
#include <sys/cdefs.h>
|
||||||
__KERNEL_RCSID(0, "$NetBSD: linux_ww_mutex.c,v 1.11 2021/12/24 15:22:20 riastradh Exp $");
|
__KERNEL_RCSID(0, "$NetBSD: linux_ww_mutex.c,v 1.12 2021/12/24 15:25:03 riastradh Exp $");
|
||||||
|
|
||||||
#include <sys/types.h>
|
#include <sys/types.h>
|
||||||
#include <sys/atomic.h>
|
#include <sys/atomic.h>
|
||||||
|
@ -108,21 +108,6 @@ ww_acquire_done(struct ww_acquire_ctx *ctx)
|
||||||
ctx->wwx_acquire_done = true;
|
ctx->wwx_acquire_done = true;
|
||||||
}
|
}
|
||||||
|
|
||||||
static void
|
|
||||||
ww_acquire_done_check(struct ww_mutex *mutex, struct ww_acquire_ctx *ctx)
|
|
||||||
{
|
|
||||||
|
|
||||||
/*
|
|
||||||
* If caller has invoked ww_acquire_done, we must already hold
|
|
||||||
* this mutex.
|
|
||||||
*/
|
|
||||||
KASSERT(mutex_owned(&mutex->wwm_lock));
|
|
||||||
KASSERT((!ctx->wwx_acquire_done ||
|
|
||||||
(mutex->wwm_state == WW_CTX && mutex->wwm_u.ctx == ctx)),
|
|
||||||
"ctx %p done acquiring locks, refusing to acquire %p",
|
|
||||||
ctx, mutex);
|
|
||||||
}
|
|
||||||
|
|
||||||
void
|
void
|
||||||
ww_acquire_fini(struct ww_acquire_ctx *ctx)
|
ww_acquire_fini(struct ww_acquire_ctx *ctx)
|
||||||
{
|
{
|
||||||
|
@ -567,6 +552,8 @@ ww_mutex_lock(struct ww_mutex *mutex, struct ww_acquire_ctx *ctx)
|
||||||
|
|
||||||
KASSERTMSG((ctx->wwx_owner == curlwp),
|
KASSERTMSG((ctx->wwx_owner == curlwp),
|
||||||
"ctx %p owned by %p, not self (%p)", ctx, ctx->wwx_owner, curlwp);
|
"ctx %p owned by %p, not self (%p)", ctx, ctx->wwx_owner, curlwp);
|
||||||
|
KASSERTMSG(!ctx->wwx_acquire_done,
|
||||||
|
"ctx %p done acquiring locks, can't acquire more", ctx);
|
||||||
KASSERTMSG((ctx->wwx_acquired != ~0U),
|
KASSERTMSG((ctx->wwx_acquired != ~0U),
|
||||||
"ctx %p finished, can't be used any more", ctx);
|
"ctx %p finished, can't be used any more", ctx);
|
||||||
KASSERTMSG((ctx->wwx_class == mutex->wwm_class),
|
KASSERTMSG((ctx->wwx_class == mutex->wwm_class),
|
||||||
|
@ -574,7 +561,6 @@ ww_mutex_lock(struct ww_mutex *mutex, struct ww_acquire_ctx *ctx)
|
||||||
ctx, ctx->wwx_class, mutex, mutex->wwm_class);
|
ctx, ctx->wwx_class, mutex, mutex->wwm_class);
|
||||||
|
|
||||||
mutex_enter(&mutex->wwm_lock);
|
mutex_enter(&mutex->wwm_lock);
|
||||||
ww_acquire_done_check(mutex, ctx);
|
|
||||||
retry: switch (mutex->wwm_state) {
|
retry: switch (mutex->wwm_state) {
|
||||||
case WW_UNLOCKED:
|
case WW_UNLOCKED:
|
||||||
WW_WANTLOCK(mutex);
|
WW_WANTLOCK(mutex);
|
||||||
|
@ -687,6 +673,8 @@ ww_mutex_lock_interruptible(struct ww_mutex *mutex, struct ww_acquire_ctx *ctx)
|
||||||
|
|
||||||
KASSERTMSG((ctx->wwx_owner == curlwp),
|
KASSERTMSG((ctx->wwx_owner == curlwp),
|
||||||
"ctx %p owned by %p, not self (%p)", ctx, ctx->wwx_owner, curlwp);
|
"ctx %p owned by %p, not self (%p)", ctx, ctx->wwx_owner, curlwp);
|
||||||
|
KASSERTMSG(!ctx->wwx_acquire_done,
|
||||||
|
"ctx %p done acquiring locks, can't acquire more", ctx);
|
||||||
KASSERTMSG((ctx->wwx_acquired != ~0U),
|
KASSERTMSG((ctx->wwx_acquired != ~0U),
|
||||||
"ctx %p finished, can't be used any more", ctx);
|
"ctx %p finished, can't be used any more", ctx);
|
||||||
KASSERTMSG((ctx->wwx_class == mutex->wwm_class),
|
KASSERTMSG((ctx->wwx_class == mutex->wwm_class),
|
||||||
|
@ -694,7 +682,6 @@ ww_mutex_lock_interruptible(struct ww_mutex *mutex, struct ww_acquire_ctx *ctx)
|
||||||
ctx, ctx->wwx_class, mutex, mutex->wwm_class);
|
ctx, ctx->wwx_class, mutex, mutex->wwm_class);
|
||||||
|
|
||||||
mutex_enter(&mutex->wwm_lock);
|
mutex_enter(&mutex->wwm_lock);
|
||||||
ww_acquire_done_check(mutex, ctx);
|
|
||||||
retry: switch (mutex->wwm_state) {
|
retry: switch (mutex->wwm_state) {
|
||||||
case WW_UNLOCKED:
|
case WW_UNLOCKED:
|
||||||
WW_WANTLOCK(mutex);
|
WW_WANTLOCK(mutex);
|
||||||
|
@ -808,6 +795,8 @@ ww_mutex_lock_slow(struct ww_mutex *mutex, struct ww_acquire_ctx *ctx)
|
||||||
|
|
||||||
KASSERTMSG((ctx->wwx_owner == curlwp),
|
KASSERTMSG((ctx->wwx_owner == curlwp),
|
||||||
"ctx %p owned by %p, not self (%p)", ctx, ctx->wwx_owner, curlwp);
|
"ctx %p owned by %p, not self (%p)", ctx, ctx->wwx_owner, curlwp);
|
||||||
|
KASSERTMSG(!ctx->wwx_acquire_done,
|
||||||
|
"ctx %p done acquiring locks, can't acquire more", ctx);
|
||||||
KASSERTMSG((ctx->wwx_acquired != ~0U),
|
KASSERTMSG((ctx->wwx_acquired != ~0U),
|
||||||
"ctx %p finished, can't be used any more", ctx);
|
"ctx %p finished, can't be used any more", ctx);
|
||||||
KASSERTMSG((ctx->wwx_acquired == 0),
|
KASSERTMSG((ctx->wwx_acquired == 0),
|
||||||
|
@ -818,7 +807,6 @@ ww_mutex_lock_slow(struct ww_mutex *mutex, struct ww_acquire_ctx *ctx)
|
||||||
ctx, ctx->wwx_class, mutex, mutex->wwm_class);
|
ctx, ctx->wwx_class, mutex, mutex->wwm_class);
|
||||||
|
|
||||||
mutex_enter(&mutex->wwm_lock);
|
mutex_enter(&mutex->wwm_lock);
|
||||||
ww_acquire_done_check(mutex, ctx);
|
|
||||||
retry: switch (mutex->wwm_state) {
|
retry: switch (mutex->wwm_state) {
|
||||||
case WW_UNLOCKED:
|
case WW_UNLOCKED:
|
||||||
mutex->wwm_state = WW_CTX;
|
mutex->wwm_state = WW_CTX;
|
||||||
|
@ -886,6 +874,8 @@ ww_mutex_lock_slow_interruptible(struct ww_mutex *mutex,
|
||||||
|
|
||||||
KASSERTMSG((ctx->wwx_owner == curlwp),
|
KASSERTMSG((ctx->wwx_owner == curlwp),
|
||||||
"ctx %p owned by %p, not self (%p)", ctx, ctx->wwx_owner, curlwp);
|
"ctx %p owned by %p, not self (%p)", ctx, ctx->wwx_owner, curlwp);
|
||||||
|
KASSERTMSG(!ctx->wwx_acquire_done,
|
||||||
|
"ctx %p done acquiring locks, can't acquire more", ctx);
|
||||||
KASSERTMSG((ctx->wwx_acquired != ~0U),
|
KASSERTMSG((ctx->wwx_acquired != ~0U),
|
||||||
"ctx %p finished, can't be used any more", ctx);
|
"ctx %p finished, can't be used any more", ctx);
|
||||||
KASSERTMSG((ctx->wwx_acquired == 0),
|
KASSERTMSG((ctx->wwx_acquired == 0),
|
||||||
|
@ -896,7 +886,6 @@ ww_mutex_lock_slow_interruptible(struct ww_mutex *mutex,
|
||||||
ctx, ctx->wwx_class, mutex, mutex->wwm_class);
|
ctx, ctx->wwx_class, mutex, mutex->wwm_class);
|
||||||
|
|
||||||
mutex_enter(&mutex->wwm_lock);
|
mutex_enter(&mutex->wwm_lock);
|
||||||
ww_acquire_done_check(mutex, ctx);
|
|
||||||
retry: switch (mutex->wwm_state) {
|
retry: switch (mutex->wwm_state) {
|
||||||
case WW_UNLOCKED:
|
case WW_UNLOCKED:
|
||||||
mutex->wwm_state = WW_CTX;
|
mutex->wwm_state = WW_CTX;
|
||||||
|
|
Loading…
Reference in New Issue