i915-4.5.7

git-svn-id: svn://kolibrios.org@6937 a494cfbc-eb01-0410-851d-a64ba20cac60
This commit is contained in:
Sergey Semyonov (Serge) 2017-07-28 20:01:47 +00:00
parent f75e5bc283
commit d2905e7c3f
105 changed files with 8574 additions and 6822 deletions

View File

@ -65,8 +65,6 @@ drm_atomic_state_init(struct drm_device *dev, struct drm_atomic_state *state)
*/ */
state->allow_modeset = true; state->allow_modeset = true;
state->num_connector = ACCESS_ONCE(dev->mode_config.num_connector);
state->crtcs = kcalloc(dev->mode_config.num_crtc, state->crtcs = kcalloc(dev->mode_config.num_crtc,
sizeof(*state->crtcs), GFP_KERNEL); sizeof(*state->crtcs), GFP_KERNEL);
if (!state->crtcs) if (!state->crtcs)
@ -83,16 +81,6 @@ drm_atomic_state_init(struct drm_device *dev, struct drm_atomic_state *state)
sizeof(*state->plane_states), GFP_KERNEL); sizeof(*state->plane_states), GFP_KERNEL);
if (!state->plane_states) if (!state->plane_states)
goto fail; goto fail;
state->connectors = kcalloc(state->num_connector,
sizeof(*state->connectors),
GFP_KERNEL);
if (!state->connectors)
goto fail;
state->connector_states = kcalloc(state->num_connector,
sizeof(*state->connector_states),
GFP_KERNEL);
if (!state->connector_states)
goto fail;
state->dev = dev; state->dev = dev;
@ -288,8 +276,8 @@ drm_atomic_get_crtc_state(struct drm_atomic_state *state,
state->crtcs[index] = crtc; state->crtcs[index] = crtc;
crtc_state->state = state; crtc_state->state = state;
DRM_DEBUG_ATOMIC("Added [CRTC:%d] %p state to %p\n", DRM_DEBUG_ATOMIC("Added [CRTC:%d:%s] %p state to %p\n",
crtc->base.id, crtc_state, state); crtc->base.id, crtc->name, crtc_state, state);
return crtc_state; return crtc_state;
} }
@ -316,7 +304,6 @@ int drm_atomic_set_mode_for_crtc(struct drm_crtc_state *state,
if (mode && memcmp(&state->mode, mode, sizeof(*mode)) == 0) if (mode && memcmp(&state->mode, mode, sizeof(*mode)) == 0)
return 0; return 0;
if (state->mode_blob)
drm_property_unreference_blob(state->mode_blob); drm_property_unreference_blob(state->mode_blob);
state->mode_blob = NULL; state->mode_blob = NULL;
@ -363,12 +350,9 @@ int drm_atomic_set_mode_prop_for_crtc(struct drm_crtc_state *state,
if (blob == state->mode_blob) if (blob == state->mode_blob)
return 0; return 0;
if (state->mode_blob)
drm_property_unreference_blob(state->mode_blob); drm_property_unreference_blob(state->mode_blob);
state->mode_blob = NULL; state->mode_blob = NULL;
memset(&state->mode, 0, sizeof(state->mode));
if (blob) { if (blob) {
if (blob->length != sizeof(struct drm_mode_modeinfo) || if (blob->length != sizeof(struct drm_mode_modeinfo) ||
drm_mode_convert_umode(&state->mode, drm_mode_convert_umode(&state->mode,
@ -381,6 +365,7 @@ int drm_atomic_set_mode_prop_for_crtc(struct drm_crtc_state *state,
DRM_DEBUG_ATOMIC("Set [MODE:%s] for CRTC state %p\n", DRM_DEBUG_ATOMIC("Set [MODE:%s] for CRTC state %p\n",
state->mode.name, state); state->mode.name, state);
} else { } else {
memset(&state->mode, 0, sizeof(state->mode));
state->enable = false; state->enable = false;
DRM_DEBUG_ATOMIC("Set [NOMODE] for CRTC state %p\n", DRM_DEBUG_ATOMIC("Set [NOMODE] for CRTC state %p\n",
state); state);
@ -420,7 +405,6 @@ int drm_atomic_crtc_set_property(struct drm_crtc *crtc,
struct drm_property_blob *mode = struct drm_property_blob *mode =
drm_property_lookup_blob(dev, val); drm_property_lookup_blob(dev, val);
ret = drm_atomic_set_mode_prop_for_crtc(state, mode); ret = drm_atomic_set_mode_prop_for_crtc(state, mode);
if (mode)
drm_property_unreference_blob(mode); drm_property_unreference_blob(mode);
return ret; return ret;
} }
@ -433,11 +417,20 @@ int drm_atomic_crtc_set_property(struct drm_crtc *crtc,
} }
EXPORT_SYMBOL(drm_atomic_crtc_set_property); EXPORT_SYMBOL(drm_atomic_crtc_set_property);
/* /**
* drm_atomic_crtc_get_property - get property value from CRTC state
* @crtc: the drm CRTC to set a property on
* @state: the state object to get the property value from
* @property: the property to set
* @val: return location for the property value
*
* This function handles generic/core properties and calls out to * This function handles generic/core properties and calls out to
* driver's ->atomic_get_property() for driver properties. To ensure * driver's ->atomic_get_property() for driver properties. To ensure
* consistent behavior you must call this function rather than the * consistent behavior you must call this function rather than the
* driver hook directly. * driver hook directly.
*
* RETURNS:
* Zero on success, error code on failure
*/ */
static int static int
drm_atomic_crtc_get_property(struct drm_crtc *crtc, drm_atomic_crtc_get_property(struct drm_crtc *crtc,
@ -481,8 +474,8 @@ static int drm_atomic_crtc_check(struct drm_crtc *crtc,
*/ */
if (state->active && !state->enable) { if (state->active && !state->enable) {
DRM_DEBUG_ATOMIC("[CRTC:%d] active without enabled\n", DRM_DEBUG_ATOMIC("[CRTC:%d:%s] active without enabled\n",
crtc->base.id); crtc->base.id, crtc->name);
return -EINVAL; return -EINVAL;
} }
@ -491,14 +484,30 @@ static int drm_atomic_crtc_check(struct drm_crtc *crtc,
* be able to trigger. */ * be able to trigger. */
if (drm_core_check_feature(crtc->dev, DRIVER_ATOMIC) && if (drm_core_check_feature(crtc->dev, DRIVER_ATOMIC) &&
WARN_ON(state->enable && !state->mode_blob)) { WARN_ON(state->enable && !state->mode_blob)) {
DRM_DEBUG_ATOMIC("[CRTC:%d] enabled without mode blob\n", DRM_DEBUG_ATOMIC("[CRTC:%d:%s] enabled without mode blob\n",
crtc->base.id); crtc->base.id, crtc->name);
return -EINVAL; return -EINVAL;
} }
if (drm_core_check_feature(crtc->dev, DRIVER_ATOMIC) && if (drm_core_check_feature(crtc->dev, DRIVER_ATOMIC) &&
WARN_ON(!state->enable && state->mode_blob)) { WARN_ON(!state->enable && state->mode_blob)) {
DRM_DEBUG_ATOMIC("[CRTC:%d] disabled with mode blob\n", DRM_DEBUG_ATOMIC("[CRTC:%d:%s] disabled with mode blob\n",
crtc->base.id, crtc->name);
return -EINVAL;
}
/*
* Reject event generation for when a CRTC is off and stays off.
* It wouldn't be hard to implement this, but userspace has a track
* record of happily burning through 100% cpu (or worse, crash) when the
* display pipe is suspended. To avoid all that fun just reject updates
* that ask for events since likely that indicates a bug in the
* compositor's drawing loop. This is consistent with the vblank IOCTL
* and legacy page_flip IOCTL which also reject service on a disabled
* pipe.
*/
if (state->event && !state->active && !crtc->state->active) {
DRM_DEBUG_ATOMIC("[CRTC:%d] requesting event but off\n",
crtc->base.id); crtc->base.id);
return -EINVAL; return -EINVAL;
} }
@ -544,8 +553,8 @@ drm_atomic_get_plane_state(struct drm_atomic_state *state,
state->planes[index] = plane; state->planes[index] = plane;
plane_state->state = state; plane_state->state = state;
DRM_DEBUG_ATOMIC("Added [PLANE:%d] %p state to %p\n", DRM_DEBUG_ATOMIC("Added [PLANE:%d:%s] %p state to %p\n",
plane->base.id, plane_state, state); plane->base.id, plane->name, plane_state, state);
if (plane_state->crtc) { if (plane_state->crtc) {
struct drm_crtc_state *crtc_state; struct drm_crtc_state *crtc_state;
@ -620,11 +629,20 @@ int drm_atomic_plane_set_property(struct drm_plane *plane,
} }
EXPORT_SYMBOL(drm_atomic_plane_set_property); EXPORT_SYMBOL(drm_atomic_plane_set_property);
/* /**
* drm_atomic_plane_get_property - get property value from plane state
* @plane: the drm plane to set a property on
* @state: the state object to get the property value from
* @property: the property to set
* @val: return location for the property value
*
* This function handles generic/core properties and calls out to * This function handles generic/core properties and calls out to
* driver's ->atomic_get_property() for driver properties. To ensure * driver's ->atomic_get_property() for driver properties. To ensure
* consistent behavior you must call this function rather than the * consistent behavior you must call this function rather than the
* driver hook directly. * driver hook directly.
*
* RETURNS:
* Zero on success, error code on failure
*/ */
static int static int
drm_atomic_plane_get_property(struct drm_plane *plane, drm_atomic_plane_get_property(struct drm_plane *plane,
@ -756,8 +774,8 @@ static int drm_atomic_plane_check(struct drm_plane *plane,
} }
if (plane_switching_crtc(state->state, plane, state)) { if (plane_switching_crtc(state->state, plane, state)) {
DRM_DEBUG_ATOMIC("[PLANE:%d] switching CRTC directly\n", DRM_DEBUG_ATOMIC("[PLANE:%d:%s] switching CRTC directly\n",
plane->base.id); plane->base.id, plane->name);
return -EINVAL; return -EINVAL;
} }
@ -793,19 +811,27 @@ drm_atomic_get_connector_state(struct drm_atomic_state *state,
index = drm_connector_index(connector); index = drm_connector_index(connector);
/*
* Construction of atomic state updates can race with a connector
* hot-add which might overflow. In this case flip the table and just
* restart the entire ioctl - no one is fast enough to livelock a cpu
* with physical hotplug events anyway.
*
* Note that we only grab the indexes once we have the right lock to
* prevent hotplug/unplugging of connectors. So removal is no problem,
* at most the array is a bit too large.
*/
if (index >= state->num_connector) { if (index >= state->num_connector) {
DRM_DEBUG_ATOMIC("Hot-added connector would overflow state array, restarting\n"); struct drm_connector **c;
return ERR_PTR(-EAGAIN); struct drm_connector_state **cs;
int alloc = max(index + 1, config->num_connector);
c = krealloc(state->connectors, alloc * sizeof(*state->connectors), GFP_KERNEL);
if (!c)
return ERR_PTR(-ENOMEM);
state->connectors = c;
memset(&state->connectors[state->num_connector], 0,
sizeof(*state->connectors) * (alloc - state->num_connector));
cs = krealloc(state->connector_states, alloc * sizeof(*state->connector_states), GFP_KERNEL);
if (!cs)
return ERR_PTR(-ENOMEM);
state->connector_states = cs;
memset(&state->connector_states[state->num_connector], 0,
sizeof(*state->connector_states) * (alloc - state->num_connector));
state->num_connector = alloc;
} }
if (state->connector_states[index]) if (state->connector_states[index])
@ -876,11 +902,20 @@ int drm_atomic_connector_set_property(struct drm_connector *connector,
} }
EXPORT_SYMBOL(drm_atomic_connector_set_property); EXPORT_SYMBOL(drm_atomic_connector_set_property);
/* /**
* drm_atomic_connector_get_property - get property value from connector state
* @connector: the drm connector to set a property on
* @state: the state object to get the property value from
* @property: the property to set
* @val: return location for the property value
*
* This function handles generic/core properties and calls out to * This function handles generic/core properties and calls out to
* driver's ->atomic_get_property() for driver properties. To ensure * driver's ->atomic_get_property() for driver properties. To ensure
* consistent behavior you must call this function rather than the * consistent behavior you must call this function rather than the
* driver hook directly. * driver hook directly.
*
* RETURNS:
* Zero on success, error code on failure
*/ */
static int static int
drm_atomic_connector_get_property(struct drm_connector *connector, drm_atomic_connector_get_property(struct drm_connector *connector,
@ -981,8 +1016,8 @@ drm_atomic_set_crtc_for_plane(struct drm_plane_state *plane_state,
} }
if (crtc) if (crtc)
DRM_DEBUG_ATOMIC("Link plane state %p to [CRTC:%d]\n", DRM_DEBUG_ATOMIC("Link plane state %p to [CRTC:%d:%s]\n",
plane_state, crtc->base.id); plane_state, crtc->base.id, crtc->name);
else else
DRM_DEBUG_ATOMIC("Link plane state %p to [NOCRTC]\n", DRM_DEBUG_ATOMIC("Link plane state %p to [NOCRTC]\n",
plane_state); plane_state);
@ -1040,17 +1075,28 @@ drm_atomic_set_crtc_for_connector(struct drm_connector_state *conn_state,
{ {
struct drm_crtc_state *crtc_state; struct drm_crtc_state *crtc_state;
if (conn_state->crtc && conn_state->crtc != crtc) {
crtc_state = drm_atomic_get_existing_crtc_state(conn_state->state,
conn_state->crtc);
crtc_state->connector_mask &=
~(1 << drm_connector_index(conn_state->connector));
}
if (crtc) { if (crtc) {
crtc_state = drm_atomic_get_crtc_state(conn_state->state, crtc); crtc_state = drm_atomic_get_crtc_state(conn_state->state, crtc);
if (IS_ERR(crtc_state)) if (IS_ERR(crtc_state))
return PTR_ERR(crtc_state); return PTR_ERR(crtc_state);
crtc_state->connector_mask |=
1 << drm_connector_index(conn_state->connector);
} }
conn_state->crtc = crtc; conn_state->crtc = crtc;
if (crtc) if (crtc)
DRM_DEBUG_ATOMIC("Link connector state %p to [CRTC:%d]\n", DRM_DEBUG_ATOMIC("Link connector state %p to [CRTC:%d:%s]\n",
conn_state, crtc->base.id); conn_state, crtc->base.id, crtc->name);
else else
DRM_DEBUG_ATOMIC("Link connector state %p to [NOCRTC]\n", DRM_DEBUG_ATOMIC("Link connector state %p to [NOCRTC]\n",
conn_state); conn_state);
@ -1089,8 +1135,8 @@ drm_atomic_add_affected_connectors(struct drm_atomic_state *state,
if (ret) if (ret)
return ret; return ret;
DRM_DEBUG_ATOMIC("Adding all current connectors for [CRTC:%d] to %p\n", DRM_DEBUG_ATOMIC("Adding all current connectors for [CRTC:%d:%s] to %p\n",
crtc->base.id, state); crtc->base.id, crtc->name, state);
/* /*
* Changed connectors are already in @state, so only need to look at the * Changed connectors are already in @state, so only need to look at the
@ -1148,35 +1194,6 @@ drm_atomic_add_affected_planes(struct drm_atomic_state *state,
} }
EXPORT_SYMBOL(drm_atomic_add_affected_planes); EXPORT_SYMBOL(drm_atomic_add_affected_planes);
/**
* drm_atomic_connectors_for_crtc - count number of connected outputs
* @state: atomic state
* @crtc: DRM crtc
*
* This function counts all connectors which will be connected to @crtc
* according to @state. Useful to recompute the enable state for @crtc.
*/
int
drm_atomic_connectors_for_crtc(struct drm_atomic_state *state,
struct drm_crtc *crtc)
{
struct drm_connector *connector;
struct drm_connector_state *conn_state;
int i, num_connected_connectors = 0;
for_each_connector_in_state(state, connector, conn_state, i) {
if (conn_state->crtc == crtc)
num_connected_connectors++;
}
DRM_DEBUG_ATOMIC("State %p has %i connectors for [CRTC:%d]\n",
state, num_connected_connectors, crtc->base.id);
return num_connected_connectors;
}
EXPORT_SYMBOL(drm_atomic_connectors_for_crtc);
/** /**
* drm_atomic_legacy_backoff - locking backoff for legacy ioctls * drm_atomic_legacy_backoff - locking backoff for legacy ioctls
* @state: atomic state * @state: atomic state
@ -1192,12 +1209,7 @@ void drm_atomic_legacy_backoff(struct drm_atomic_state *state)
retry: retry:
drm_modeset_backoff(state->acquire_ctx); drm_modeset_backoff(state->acquire_ctx);
ret = drm_modeset_lock(&state->dev->mode_config.connection_mutex, ret = drm_modeset_lock_all_ctx(state->dev, state->acquire_ctx);
state->acquire_ctx);
if (ret)
goto retry;
ret = drm_modeset_lock_all_crtcs(state->dev,
state->acquire_ctx);
if (ret) if (ret)
goto retry; goto retry;
} }
@ -1229,8 +1241,8 @@ int drm_atomic_check_only(struct drm_atomic_state *state)
for_each_plane_in_state(state, plane, plane_state, i) { for_each_plane_in_state(state, plane, plane_state, i) {
ret = drm_atomic_plane_check(plane, plane_state); ret = drm_atomic_plane_check(plane, plane_state);
if (ret) { if (ret) {
DRM_DEBUG_ATOMIC("[PLANE:%d] atomic core check failed\n", DRM_DEBUG_ATOMIC("[PLANE:%d:%s] atomic core check failed\n",
plane->base.id); plane->base.id, plane->name);
return ret; return ret;
} }
} }
@ -1238,8 +1250,8 @@ int drm_atomic_check_only(struct drm_atomic_state *state)
for_each_crtc_in_state(state, crtc, crtc_state, i) { for_each_crtc_in_state(state, crtc, crtc_state, i) {
ret = drm_atomic_crtc_check(crtc, crtc_state); ret = drm_atomic_crtc_check(crtc, crtc_state);
if (ret) { if (ret) {
DRM_DEBUG_ATOMIC("[CRTC:%d] atomic core check failed\n", DRM_DEBUG_ATOMIC("[CRTC:%d:%s] atomic core check failed\n",
crtc->base.id); crtc->base.id, crtc->name);
return ret; return ret;
} }
} }
@ -1250,8 +1262,8 @@ int drm_atomic_check_only(struct drm_atomic_state *state)
if (!state->allow_modeset) { if (!state->allow_modeset) {
for_each_crtc_in_state(state, crtc, crtc_state, i) { for_each_crtc_in_state(state, crtc, crtc_state, i) {
if (drm_atomic_crtc_needs_modeset(crtc_state)) { if (drm_atomic_crtc_needs_modeset(crtc_state)) {
DRM_DEBUG_ATOMIC("[CRTC:%d] requires full modeset\n", DRM_DEBUG_ATOMIC("[CRTC:%d:%s] requires full modeset\n",
crtc->base.id); crtc->base.id, crtc->name);
return -EINVAL; return -EINVAL;
} }
} }
@ -1434,7 +1446,7 @@ static int atomic_set_prop(struct drm_atomic_state *state,
} }
/** /**
* drm_atomic_update_old_fb -- Unset old_fb pointers and set plane->fb pointers. * drm_atomic_clean_old_fb -- Unset old_fb pointers and set plane->fb pointers.
* *
* @dev: drm device to check. * @dev: drm device to check.
* @plane_mask: plane mask for planes that were updated. * @plane_mask: plane mask for planes that were updated.

View File

@ -52,6 +52,12 @@
* drm_atomic_helper_disable_plane(), drm_atomic_helper_disable_plane() and the * drm_atomic_helper_disable_plane(), drm_atomic_helper_disable_plane() and the
* various functions to implement set_property callbacks. New drivers must not * various functions to implement set_property callbacks. New drivers must not
* implement these functions themselves but must use the provided helpers. * implement these functions themselves but must use the provided helpers.
*
* The atomic helper uses the same function table structures as all other
* modesetting helpers. See the documentation for struct &drm_crtc_helper_funcs,
* struct &drm_encoder_helper_funcs and struct &drm_connector_helper_funcs. It
* also shares the struct &drm_plane_helper_funcs function table with the plane
* helpers.
*/ */
static void static void
drm_atomic_helper_plane_changed(struct drm_atomic_state *state, drm_atomic_helper_plane_changed(struct drm_atomic_state *state,
@ -80,6 +86,26 @@ drm_atomic_helper_plane_changed(struct drm_atomic_state *state,
} }
} }
static bool
check_pending_encoder_assignment(struct drm_atomic_state *state,
struct drm_encoder *new_encoder)
{
struct drm_connector *connector;
struct drm_connector_state *conn_state;
int i;
for_each_connector_in_state(state, connector, conn_state, i) {
if (conn_state->best_encoder != new_encoder)
continue;
/* encoder already assigned and we're trying to re-steal it! */
if (connector->state->best_encoder != conn_state->best_encoder)
return false;
}
return true;
}
static struct drm_crtc * static struct drm_crtc *
get_current_crtc_for_encoder(struct drm_device *dev, get_current_crtc_for_encoder(struct drm_device *dev,
struct drm_encoder *encoder) struct drm_encoder *encoder)
@ -108,6 +134,7 @@ steal_encoder(struct drm_atomic_state *state,
struct drm_crtc_state *crtc_state; struct drm_crtc_state *crtc_state;
struct drm_connector *connector; struct drm_connector *connector;
struct drm_connector_state *connector_state; struct drm_connector_state *connector_state;
int ret;
/* /*
* We can only steal an encoder coming from a connector, which means we * We can only steal an encoder coming from a connector, which means we
@ -115,9 +142,9 @@ steal_encoder(struct drm_atomic_state *state,
*/ */
WARN_ON(!drm_modeset_is_locked(&config->connection_mutex)); WARN_ON(!drm_modeset_is_locked(&config->connection_mutex));
DRM_DEBUG_ATOMIC("[ENCODER:%d:%s] in use on [CRTC:%d], stealing it\n", DRM_DEBUG_ATOMIC("[ENCODER:%d:%s] in use on [CRTC:%d:%s], stealing it\n",
encoder->base.id, encoder->name, encoder->base.id, encoder->name,
encoder_crtc->base.id); encoder_crtc->base.id, encoder_crtc->name);
crtc_state = drm_atomic_get_crtc_state(state, encoder_crtc); crtc_state = drm_atomic_get_crtc_state(state, encoder_crtc);
if (IS_ERR(crtc_state)) if (IS_ERR(crtc_state))
@ -138,6 +165,9 @@ steal_encoder(struct drm_atomic_state *state,
if (IS_ERR(connector_state)) if (IS_ERR(connector_state))
return PTR_ERR(connector_state); return PTR_ERR(connector_state);
ret = drm_atomic_set_crtc_for_connector(connector_state, NULL);
if (ret)
return ret;
connector_state->best_encoder = NULL; connector_state->best_encoder = NULL;
} }
@ -215,16 +245,24 @@ update_connector_routing(struct drm_atomic_state *state, int conn_idx)
} }
if (new_encoder == connector_state->best_encoder) { if (new_encoder == connector_state->best_encoder) {
DRM_DEBUG_ATOMIC("[CONNECTOR:%d:%s] keeps [ENCODER:%d:%s], now on [CRTC:%d]\n", DRM_DEBUG_ATOMIC("[CONNECTOR:%d:%s] keeps [ENCODER:%d:%s], now on [CRTC:%d:%s]\n",
connector->base.id, connector->base.id,
connector->name, connector->name,
new_encoder->base.id, new_encoder->base.id,
new_encoder->name, new_encoder->name,
connector_state->crtc->base.id); connector_state->crtc->base.id,
connector_state->crtc->name);
return 0; return 0;
} }
if (!check_pending_encoder_assignment(state, new_encoder)) {
DRM_DEBUG_ATOMIC("Encoder for [CONNECTOR:%d:%s] already assigned\n",
connector->base.id,
connector->name);
return -EINVAL;
}
encoder_crtc = get_current_crtc_for_encoder(state->dev, encoder_crtc = get_current_crtc_for_encoder(state->dev,
new_encoder); new_encoder);
@ -247,12 +285,13 @@ update_connector_routing(struct drm_atomic_state *state, int conn_idx)
crtc_state = state->crtc_states[idx]; crtc_state = state->crtc_states[idx];
crtc_state->connectors_changed = true; crtc_state->connectors_changed = true;
DRM_DEBUG_ATOMIC("[CONNECTOR:%d:%s] using [ENCODER:%d:%s] on [CRTC:%d]\n", DRM_DEBUG_ATOMIC("[CONNECTOR:%d:%s] using [ENCODER:%d:%s] on [CRTC:%d:%s]\n",
connector->base.id, connector->base.id,
connector->name, connector->name,
new_encoder->base.id, new_encoder->base.id,
new_encoder->name, new_encoder->name,
connector_state->crtc->base.id); connector_state->crtc->base.id,
connector_state->crtc->name);
return 0; return 0;
} }
@ -265,7 +304,7 @@ mode_fixup(struct drm_atomic_state *state)
struct drm_connector *connector; struct drm_connector *connector;
struct drm_connector_state *conn_state; struct drm_connector_state *conn_state;
int i; int i;
int ret; bool ret;
for_each_crtc_in_state(state, crtc, crtc_state, i) { for_each_crtc_in_state(state, crtc, crtc_state, i) {
if (!crtc_state->mode_changed && if (!crtc_state->mode_changed &&
@ -336,8 +375,8 @@ mode_fixup(struct drm_atomic_state *state)
ret = funcs->mode_fixup(crtc, &crtc_state->mode, ret = funcs->mode_fixup(crtc, &crtc_state->mode,
&crtc_state->adjusted_mode); &crtc_state->adjusted_mode);
if (!ret) { if (!ret) {
DRM_DEBUG_ATOMIC("[CRTC:%d] fixup failed\n", DRM_DEBUG_ATOMIC("[CRTC:%d:%s] fixup failed\n",
crtc->base.id); crtc->base.id, crtc->name);
return -EINVAL; return -EINVAL;
} }
} }
@ -384,14 +423,14 @@ drm_atomic_helper_check_modeset(struct drm_device *dev,
for_each_crtc_in_state(state, crtc, crtc_state, i) { for_each_crtc_in_state(state, crtc, crtc_state, i) {
if (!drm_mode_equal(&crtc->state->mode, &crtc_state->mode)) { if (!drm_mode_equal(&crtc->state->mode, &crtc_state->mode)) {
DRM_DEBUG_ATOMIC("[CRTC:%d] mode changed\n", DRM_DEBUG_ATOMIC("[CRTC:%d:%s] mode changed\n",
crtc->base.id); crtc->base.id, crtc->name);
crtc_state->mode_changed = true; crtc_state->mode_changed = true;
} }
if (crtc->state->enable != crtc_state->enable) { if (crtc->state->enable != crtc_state->enable) {
DRM_DEBUG_ATOMIC("[CRTC:%d] enable changed\n", DRM_DEBUG_ATOMIC("[CRTC:%d:%s] enable changed\n",
crtc->base.id); crtc->base.id, crtc->name);
/* /*
* For clarity this assignment is done here, but * For clarity this assignment is done here, but
@ -424,7 +463,8 @@ drm_atomic_helper_check_modeset(struct drm_device *dev,
* crtc only changed its mode but has the same set of connectors. * crtc only changed its mode but has the same set of connectors.
*/ */
for_each_crtc_in_state(state, crtc, crtc_state, i) { for_each_crtc_in_state(state, crtc, crtc_state, i) {
int num_connectors; bool has_connectors =
!!crtc_state->connector_mask;
/* /*
* We must set ->active_changed after walking connectors for * We must set ->active_changed after walking connectors for
@ -432,16 +472,16 @@ drm_atomic_helper_check_modeset(struct drm_device *dev,
* a full modeset because update_connector_routing force that. * a full modeset because update_connector_routing force that.
*/ */
if (crtc->state->active != crtc_state->active) { if (crtc->state->active != crtc_state->active) {
DRM_DEBUG_ATOMIC("[CRTC:%d] active changed\n", DRM_DEBUG_ATOMIC("[CRTC:%d:%s] active changed\n",
crtc->base.id); crtc->base.id, crtc->name);
crtc_state->active_changed = true; crtc_state->active_changed = true;
} }
if (!drm_atomic_crtc_needs_modeset(crtc_state)) if (!drm_atomic_crtc_needs_modeset(crtc_state))
continue; continue;
DRM_DEBUG_ATOMIC("[CRTC:%d] needs all connectors, enable: %c, active: %c\n", DRM_DEBUG_ATOMIC("[CRTC:%d:%s] needs all connectors, enable: %c, active: %c\n",
crtc->base.id, crtc->base.id, crtc->name,
crtc_state->enable ? 'y' : 'n', crtc_state->enable ? 'y' : 'n',
crtc_state->active ? 'y' : 'n'); crtc_state->active ? 'y' : 'n');
@ -453,12 +493,9 @@ drm_atomic_helper_check_modeset(struct drm_device *dev,
if (ret != 0) if (ret != 0)
return ret; return ret;
num_connectors = drm_atomic_connectors_for_crtc(state, if (crtc_state->enable != has_connectors) {
crtc); DRM_DEBUG_ATOMIC("[CRTC:%d:%s] enabled/connectors mismatch\n",
crtc->base.id, crtc->name);
if (crtc_state->enable != !!num_connectors) {
DRM_DEBUG_ATOMIC("[CRTC:%d] enabled/connectors mismatch\n",
crtc->base.id);
return -EINVAL; return -EINVAL;
} }
@ -505,8 +542,8 @@ drm_atomic_helper_check_planes(struct drm_device *dev,
ret = funcs->atomic_check(plane, plane_state); ret = funcs->atomic_check(plane, plane_state);
if (ret) { if (ret) {
DRM_DEBUG_ATOMIC("[PLANE:%d] atomic driver check failed\n", DRM_DEBUG_ATOMIC("[PLANE:%d:%s] atomic driver check failed\n",
plane->base.id); plane->base.id, plane->name);
return ret; return ret;
} }
} }
@ -521,8 +558,8 @@ drm_atomic_helper_check_planes(struct drm_device *dev,
ret = funcs->atomic_check(crtc, state->crtc_states[i]); ret = funcs->atomic_check(crtc, state->crtc_states[i]);
if (ret) { if (ret) {
DRM_DEBUG_ATOMIC("[CRTC:%d] atomic driver check failed\n", DRM_DEBUG_ATOMIC("[CRTC:%d:%s] atomic driver check failed\n",
crtc->base.id); crtc->base.id, crtc->name);
return ret; return ret;
} }
} }
@ -635,8 +672,8 @@ disable_outputs(struct drm_device *dev, struct drm_atomic_state *old_state)
funcs = crtc->helper_private; funcs = crtc->helper_private;
DRM_DEBUG_ATOMIC("disabling [CRTC:%d]\n", DRM_DEBUG_ATOMIC("disabling [CRTC:%d:%s]\n",
crtc->base.id); crtc->base.id, crtc->name);
/* Right function depends upon target state. */ /* Right function depends upon target state. */
@ -747,8 +784,8 @@ crtc_set_mode(struct drm_device *dev, struct drm_atomic_state *old_state)
funcs = crtc->helper_private; funcs = crtc->helper_private;
if (crtc->state->enable && funcs->mode_set_nofb) { if (crtc->state->enable && funcs->mode_set_nofb) {
DRM_DEBUG_ATOMIC("modeset on [CRTC:%d]\n", DRM_DEBUG_ATOMIC("modeset on [CRTC:%d:%s]\n",
crtc->base.id); crtc->base.id, crtc->name);
funcs->mode_set_nofb(crtc); funcs->mode_set_nofb(crtc);
} }
@ -847,8 +884,8 @@ void drm_atomic_helper_commit_modeset_enables(struct drm_device *dev,
funcs = crtc->helper_private; funcs = crtc->helper_private;
if (crtc->state->enable) { if (crtc->state->enable) {
DRM_DEBUG_ATOMIC("enabling [CRTC:%d]\n", DRM_DEBUG_ATOMIC("enabling [CRTC:%d:%s]\n",
crtc->base.id); crtc->base.id, crtc->name);
if (funcs->enable) if (funcs->enable)
funcs->enable(crtc); funcs->enable(crtc);
@ -909,7 +946,21 @@ static void wait_for_fences(struct drm_device *dev,
} }
} }
static bool framebuffer_changed(struct drm_device *dev, /**
* drm_atomic_helper_framebuffer_changed - check if framebuffer has changed
* @dev: DRM device
* @old_state: atomic state object with old state structures
* @crtc: DRM crtc
*
* Checks whether the framebuffer used for this CRTC changes as a result of
* the atomic update. This is useful for drivers which cannot use
* drm_atomic_helper_wait_for_vblanks() and need to reimplement its
* functionality.
*
* Returns:
* true if the framebuffer changed.
*/
bool drm_atomic_helper_framebuffer_changed(struct drm_device *dev,
struct drm_atomic_state *old_state, struct drm_atomic_state *old_state,
struct drm_crtc *crtc) struct drm_crtc *crtc)
{ {
@ -928,6 +979,7 @@ static bool framebuffer_changed(struct drm_device *dev,
return false; return false;
} }
EXPORT_SYMBOL(drm_atomic_helper_framebuffer_changed);
/** /**
* drm_atomic_helper_wait_for_vblanks - wait for vblank on crtcs * drm_atomic_helper_wait_for_vblanks - wait for vblank on crtcs
@ -962,7 +1014,8 @@ drm_atomic_helper_wait_for_vblanks(struct drm_device *dev,
if (old_state->legacy_cursor_update) if (old_state->legacy_cursor_update)
continue; continue;
if (!framebuffer_changed(dev, old_state, crtc)) if (!drm_atomic_helper_framebuffer_changed(dev,
old_state, crtc))
continue; continue;
ret = drm_crtc_vblank_get(crtc); ret = drm_crtc_vblank_get(crtc);
@ -1337,6 +1390,49 @@ drm_atomic_helper_commit_planes_on_crtc(struct drm_crtc_state *old_crtc_state)
} }
EXPORT_SYMBOL(drm_atomic_helper_commit_planes_on_crtc); EXPORT_SYMBOL(drm_atomic_helper_commit_planes_on_crtc);
/**
* drm_atomic_helper_disable_planes_on_crtc - helper to disable CRTC's planes
* @crtc: CRTC
* @atomic: if set, synchronize with CRTC's atomic_begin/flush hooks
*
* Disables all planes associated with the given CRTC. This can be
* used for instance in the CRTC helper disable callback to disable
* all planes before shutting down the display pipeline.
*
* If the atomic-parameter is set the function calls the CRTC's
* atomic_begin hook before and atomic_flush hook after disabling the
* planes.
*
* It is a bug to call this function without having implemented the
* ->atomic_disable() plane hook.
*/
void drm_atomic_helper_disable_planes_on_crtc(struct drm_crtc *crtc,
bool atomic)
{
const struct drm_crtc_helper_funcs *crtc_funcs =
crtc->helper_private;
struct drm_plane *plane;
if (atomic && crtc_funcs && crtc_funcs->atomic_begin)
crtc_funcs->atomic_begin(crtc, NULL);
drm_for_each_plane(plane, crtc->dev) {
const struct drm_plane_helper_funcs *plane_funcs =
plane->helper_private;
if (plane->state->crtc != crtc || !plane_funcs)
continue;
WARN_ON(!plane_funcs->atomic_disable);
if (plane_funcs->atomic_disable)
plane_funcs->atomic_disable(plane, NULL);
}
if (atomic && crtc_funcs && crtc_funcs->atomic_flush)
crtc_funcs->atomic_flush(crtc, NULL);
}
EXPORT_SYMBOL(drm_atomic_helper_disable_planes_on_crtc);
/** /**
* drm_atomic_helper_cleanup_planes - cleanup plane resources after commit * drm_atomic_helper_cleanup_planes - cleanup plane resources after commit
* @dev: DRM device * @dev: DRM device
@ -1397,7 +1493,7 @@ void drm_atomic_helper_swap_state(struct drm_device *dev,
{ {
int i; int i;
for (i = 0; i < dev->mode_config.num_connector; i++) { for (i = 0; i < state->num_connector; i++) {
struct drm_connector *connector = state->connectors[i]; struct drm_connector *connector = state->connectors[i];
if (!connector) if (!connector)
@ -1481,12 +1577,12 @@ retry:
drm_atomic_set_fb_for_plane(plane_state, fb); drm_atomic_set_fb_for_plane(plane_state, fb);
plane_state->crtc_x = crtc_x; plane_state->crtc_x = crtc_x;
plane_state->crtc_y = crtc_y; plane_state->crtc_y = crtc_y;
plane_state->crtc_h = crtc_h;
plane_state->crtc_w = crtc_w; plane_state->crtc_w = crtc_w;
plane_state->crtc_h = crtc_h;
plane_state->src_x = src_x; plane_state->src_x = src_x;
plane_state->src_y = src_y; plane_state->src_y = src_y;
plane_state->src_h = src_h;
plane_state->src_w = src_w; plane_state->src_w = src_w;
plane_state->src_h = src_h;
if (plane == crtc->cursor) if (plane == crtc->cursor)
state->legacy_cursor_update = true; state->legacy_cursor_update = true;
@ -1605,12 +1701,12 @@ int __drm_atomic_helper_disable_plane(struct drm_plane *plane,
drm_atomic_set_fb_for_plane(plane_state, NULL); drm_atomic_set_fb_for_plane(plane_state, NULL);
plane_state->crtc_x = 0; plane_state->crtc_x = 0;
plane_state->crtc_y = 0; plane_state->crtc_y = 0;
plane_state->crtc_h = 0;
plane_state->crtc_w = 0; plane_state->crtc_w = 0;
plane_state->crtc_h = 0;
plane_state->src_x = 0; plane_state->src_x = 0;
plane_state->src_y = 0; plane_state->src_y = 0;
plane_state->src_h = 0;
plane_state->src_w = 0; plane_state->src_w = 0;
plane_state->src_h = 0;
return 0; return 0;
} }
@ -1672,7 +1768,7 @@ static int update_output_state(struct drm_atomic_state *state,
if (crtc == set->crtc) if (crtc == set->crtc)
continue; continue;
if (!drm_atomic_connectors_for_crtc(state, crtc)) { if (!crtc_state->connector_mask) {
ret = drm_atomic_set_mode_prop_for_crtc(crtc_state, ret = drm_atomic_set_mode_prop_for_crtc(crtc_state,
NULL); NULL);
if (ret < 0) if (ret < 0)
@ -1793,16 +1889,16 @@ int __drm_atomic_helper_set_config(struct drm_mode_set *set,
drm_atomic_set_fb_for_plane(primary_state, set->fb); drm_atomic_set_fb_for_plane(primary_state, set->fb);
primary_state->crtc_x = 0; primary_state->crtc_x = 0;
primary_state->crtc_y = 0; primary_state->crtc_y = 0;
primary_state->crtc_h = vdisplay;
primary_state->crtc_w = hdisplay; primary_state->crtc_w = hdisplay;
primary_state->crtc_h = vdisplay;
primary_state->src_x = set->x << 16; primary_state->src_x = set->x << 16;
primary_state->src_y = set->y << 16; primary_state->src_y = set->y << 16;
if (primary_state->rotation & (BIT(DRM_ROTATE_90) | BIT(DRM_ROTATE_270))) { if (primary_state->rotation & (BIT(DRM_ROTATE_90) | BIT(DRM_ROTATE_270))) {
primary_state->src_h = hdisplay << 16;
primary_state->src_w = vdisplay << 16; primary_state->src_w = vdisplay << 16;
primary_state->src_h = hdisplay << 16;
} else { } else {
primary_state->src_h = vdisplay << 16;
primary_state->src_w = hdisplay << 16; primary_state->src_w = hdisplay << 16;
primary_state->src_h = vdisplay << 16;
} }
commit: commit:
@ -1813,6 +1909,161 @@ commit:
return 0; return 0;
} }
/**
* drm_atomic_helper_disable_all - disable all currently active outputs
* @dev: DRM device
* @ctx: lock acquisition context
*
* Loops through all connectors, finding those that aren't turned off and then
* turns them off by setting their DPMS mode to OFF and deactivating the CRTC
* that they are connected to.
*
* This is used for example in suspend/resume to disable all currently active
* functions when suspending.
*
* Note that if callers haven't already acquired all modeset locks this might
* return -EDEADLK, which must be handled by calling drm_modeset_backoff().
*
* Returns:
* 0 on success or a negative error code on failure.
*
* See also:
* drm_atomic_helper_suspend(), drm_atomic_helper_resume()
*/
int drm_atomic_helper_disable_all(struct drm_device *dev,
struct drm_modeset_acquire_ctx *ctx)
{
struct drm_atomic_state *state;
struct drm_connector *conn;
int err;
state = drm_atomic_state_alloc(dev);
if (!state)
return -ENOMEM;
state->acquire_ctx = ctx;
drm_for_each_connector(conn, dev) {
struct drm_crtc *crtc = conn->state->crtc;
struct drm_crtc_state *crtc_state;
if (!crtc || conn->dpms != DRM_MODE_DPMS_ON)
continue;
crtc_state = drm_atomic_get_crtc_state(state, crtc);
if (IS_ERR(crtc_state)) {
err = PTR_ERR(crtc_state);
goto free;
}
crtc_state->active = false;
}
err = drm_atomic_commit(state);
free:
if (err < 0)
drm_atomic_state_free(state);
return err;
}
EXPORT_SYMBOL(drm_atomic_helper_disable_all);
/**
* drm_atomic_helper_suspend - subsystem-level suspend helper
* @dev: DRM device
*
* Duplicates the current atomic state, disables all active outputs and then
* returns a pointer to the original atomic state to the caller. Drivers can
* pass this pointer to the drm_atomic_helper_resume() helper upon resume to
* restore the output configuration that was active at the time the system
* entered suspend.
*
* Note that it is potentially unsafe to use this. The atomic state object
* returned by this function is assumed to be persistent. Drivers must ensure
* that this holds true. Before calling this function, drivers must make sure
* to suspend fbdev emulation so that nothing can be using the device.
*
* Returns:
* A pointer to a copy of the state before suspend on success or an ERR_PTR()-
* encoded error code on failure. Drivers should store the returned atomic
* state object and pass it to the drm_atomic_helper_resume() helper upon
* resume.
*
* See also:
* drm_atomic_helper_duplicate_state(), drm_atomic_helper_disable_all(),
* drm_atomic_helper_resume()
*/
struct drm_atomic_state *drm_atomic_helper_suspend(struct drm_device *dev)
{
struct drm_modeset_acquire_ctx ctx;
struct drm_atomic_state *state;
int err;
drm_modeset_acquire_init(&ctx, 0);
retry:
err = drm_modeset_lock_all_ctx(dev, &ctx);
if (err < 0) {
state = ERR_PTR(err);
goto unlock;
}
state = drm_atomic_helper_duplicate_state(dev, &ctx);
if (IS_ERR(state))
goto unlock;
err = drm_atomic_helper_disable_all(dev, &ctx);
if (err < 0) {
drm_atomic_state_free(state);
state = ERR_PTR(err);
goto unlock;
}
unlock:
if (PTR_ERR(state) == -EDEADLK) {
drm_modeset_backoff(&ctx);
goto retry;
}
drm_modeset_drop_locks(&ctx);
drm_modeset_acquire_fini(&ctx);
return state;
}
EXPORT_SYMBOL(drm_atomic_helper_suspend);
/**
* drm_atomic_helper_resume - subsystem-level resume helper
* @dev: DRM device
* @state: atomic state to resume to
*
* Calls drm_mode_config_reset() to synchronize hardware and software states,
* grabs all modeset locks and commits the atomic state object. This can be
* used in conjunction with the drm_atomic_helper_suspend() helper to
* implement suspend/resume for drivers that support atomic mode-setting.
*
* Returns:
* 0 on success or a negative error code on failure.
*
* See also:
* drm_atomic_helper_suspend()
*/
int drm_atomic_helper_resume(struct drm_device *dev,
struct drm_atomic_state *state)
{
struct drm_mode_config *config = &dev->mode_config;
int err;
drm_mode_config_reset(dev);
drm_modeset_lock_all(dev);
state->acquire_ctx = config->acquire_ctx;
err = drm_atomic_commit(state);
drm_modeset_unlock_all(dev);
return err;
}
EXPORT_SYMBOL(drm_atomic_helper_resume);
/** /**
* drm_atomic_helper_crtc_set_property - helper for crtc properties * drm_atomic_helper_crtc_set_property - helper for crtc properties
* @crtc: DRM crtc * @crtc: DRM crtc
@ -2047,6 +2298,15 @@ retry:
goto fail; goto fail;
drm_atomic_set_fb_for_plane(plane_state, fb); drm_atomic_set_fb_for_plane(plane_state, fb);
/* Make sure we don't accidentally do a full modeset. */
state->allow_modeset = false;
if (!crtc_state->active) {
DRM_DEBUG_ATOMIC("[CRTC:%d] disabled, rejecting legacy flip\n",
crtc->base.id);
ret = -EINVAL;
goto fail;
}
ret = drm_atomic_async_commit(state); ret = drm_atomic_async_commit(state);
if (ret != 0) if (ret != 0)
goto fail; goto fail;
@ -2169,6 +2429,12 @@ EXPORT_SYMBOL(drm_atomic_helper_connector_dpms);
* The simpler solution is to just reset the software state to everything off, * The simpler solution is to just reset the software state to everything off,
* which is easiest to do by calling drm_mode_config_reset(). To facilitate this * which is easiest to do by calling drm_mode_config_reset(). To facilitate this
* the atomic helpers provide default reset implementations for all hooks. * the atomic helpers provide default reset implementations for all hooks.
*
* On the upside the precise state tracking of atomic simplifies system suspend
* and resume a lot. For drivers using drm_mode_config_reset() a complete recipe
* is implemented in drm_atomic_helper_suspend() and drm_atomic_helper_resume().
* For other drivers the building blocks are split out, see the documentation
* for these functions.
*/ */
/** /**
@ -2180,7 +2446,7 @@ EXPORT_SYMBOL(drm_atomic_helper_connector_dpms);
*/ */
void drm_atomic_helper_crtc_reset(struct drm_crtc *crtc) void drm_atomic_helper_crtc_reset(struct drm_crtc *crtc)
{ {
if (crtc->state && crtc->state->mode_blob) if (crtc->state)
drm_property_unreference_blob(crtc->state->mode_blob); drm_property_unreference_blob(crtc->state->mode_blob);
kfree(crtc->state); kfree(crtc->state);
crtc->state = kzalloc(sizeof(*crtc->state), GFP_KERNEL); crtc->state = kzalloc(sizeof(*crtc->state), GFP_KERNEL);
@ -2248,7 +2514,6 @@ EXPORT_SYMBOL(drm_atomic_helper_crtc_duplicate_state);
void __drm_atomic_helper_crtc_destroy_state(struct drm_crtc *crtc, void __drm_atomic_helper_crtc_destroy_state(struct drm_crtc *crtc,
struct drm_crtc_state *state) struct drm_crtc_state *state)
{ {
if (state->mode_blob)
drm_property_unreference_blob(state->mode_blob); drm_property_unreference_blob(state->mode_blob);
} }
EXPORT_SYMBOL(__drm_atomic_helper_crtc_destroy_state); EXPORT_SYMBOL(__drm_atomic_helper_crtc_destroy_state);
@ -2363,6 +2628,28 @@ void drm_atomic_helper_plane_destroy_state(struct drm_plane *plane,
} }
EXPORT_SYMBOL(drm_atomic_helper_plane_destroy_state); EXPORT_SYMBOL(drm_atomic_helper_plane_destroy_state);
/**
* __drm_atomic_helper_connector_reset - reset state on connector
* @connector: drm connector
* @conn_state: connector state to assign
*
* Initializes the newly allocated @conn_state and assigns it to
* #connector ->state, usually required when initializing the drivers
* or when called from the ->reset hook.
*
* This is useful for drivers that subclass the connector state.
*/
void
__drm_atomic_helper_connector_reset(struct drm_connector *connector,
struct drm_connector_state *conn_state)
{
if (conn_state)
conn_state->connector = connector;
connector->state = conn_state;
}
EXPORT_SYMBOL(__drm_atomic_helper_connector_reset);
/** /**
* drm_atomic_helper_connector_reset - default ->reset hook for connectors * drm_atomic_helper_connector_reset - default ->reset hook for connectors
* @connector: drm connector * @connector: drm connector
@ -2373,11 +2660,11 @@ EXPORT_SYMBOL(drm_atomic_helper_plane_destroy_state);
*/ */
void drm_atomic_helper_connector_reset(struct drm_connector *connector) void drm_atomic_helper_connector_reset(struct drm_connector *connector)
{ {
kfree(connector->state); struct drm_connector_state *conn_state =
connector->state = kzalloc(sizeof(*connector->state), GFP_KERNEL); kzalloc(sizeof(*conn_state), GFP_KERNEL);
if (connector->state) kfree(connector->state);
connector->state->connector = connector; __drm_atomic_helper_connector_reset(connector, conn_state);
} }
EXPORT_SYMBOL(drm_atomic_helper_connector_reset); EXPORT_SYMBOL(drm_atomic_helper_connector_reset);
@ -2426,7 +2713,9 @@ EXPORT_SYMBOL(drm_atomic_helper_connector_duplicate_state);
* @ctx: lock acquisition context * @ctx: lock acquisition context
* *
* Makes a copy of the current atomic state by looping over all objects and * Makes a copy of the current atomic state by looping over all objects and
* duplicating their respective states. * duplicating their respective states. This is used for example by suspend/
* resume support code to save the state prior to suspend such that it can
* be restored upon resume.
* *
* Note that this treats atomic state as persistent between save and restore. * Note that this treats atomic state as persistent between save and restore.
* Drivers must make sure that this is possible and won't result in confusion * Drivers must make sure that this is possible and won't result in confusion
@ -2438,6 +2727,9 @@ EXPORT_SYMBOL(drm_atomic_helper_connector_duplicate_state);
* Returns: * Returns:
* A pointer to the copy of the atomic state object on success or an * A pointer to the copy of the atomic state object on success or an
* ERR_PTR()-encoded error code on failure. * ERR_PTR()-encoded error code on failure.
*
* See also:
* drm_atomic_helper_suspend(), drm_atomic_helper_resume()
*/ */
struct drm_atomic_state * struct drm_atomic_state *
drm_atomic_helper_duplicate_state(struct drm_device *dev, drm_atomic_helper_duplicate_state(struct drm_device *dev,

View File

@ -31,11 +31,11 @@
/** /**
* DOC: overview * DOC: overview
* *
* drm_bridge represents a device that hangs on to an encoder. These are handy * struct &drm_bridge represents a device that hangs on to an encoder. These are
* when a regular drm_encoder entity isn't enough to represent the entire * handy when a regular &drm_encoder entity isn't enough to represent the entire
* encoder chain. * encoder chain.
* *
* A bridge is always associated to a single drm_encoder at a time, but can be * A bridge is always attached to a single &drm_encoder at a time, but can be
* either connected to it directly, or through an intermediate bridge: * either connected to it directly, or through an intermediate bridge:
* *
* encoder ---> bridge B ---> bridge A * encoder ---> bridge B ---> bridge A
@ -46,11 +46,16 @@
* The driver using the bridge is responsible to make the associations between * The driver using the bridge is responsible to make the associations between
* the encoder and bridges. Once these links are made, the bridges will * the encoder and bridges. Once these links are made, the bridges will
* participate along with encoder functions to perform mode_set/enable/disable * participate along with encoder functions to perform mode_set/enable/disable
* through the ops provided in drm_bridge_funcs. * through the ops provided in &drm_bridge_funcs.
* *
* drm_bridge, like drm_panel, aren't drm_mode_object entities like planes, * drm_bridge, like drm_panel, aren't drm_mode_object entities like planes,
* crtcs, encoders or connectors. They just provide additional hooks to get the * CRTCs, encoders or connectors and hence are not visible to userspace. They
* desired output at the end of the encoder chain. * just provide additional hooks to get the desired output at the end of the
* encoder chain.
*
* Bridges can also be chained up using the next pointer in struct &drm_bridge.
*
* Both legacy CRTC helpers and the new atomic modeset helpers support bridges.
*/ */
static DEFINE_MUTEX(bridge_lock); static DEFINE_MUTEX(bridge_lock);
@ -122,34 +127,12 @@ EXPORT_SYMBOL(drm_bridge_attach);
/** /**
* DOC: bridge callbacks * DOC: bridge callbacks
* *
* The drm_bridge_funcs ops are populated by the bridge driver. The drm * The &drm_bridge_funcs ops are populated by the bridge driver. The DRM
* internals(atomic and crtc helpers) use the helpers defined in drm_bridge.c * internals (atomic and CRTC helpers) use the helpers defined in drm_bridge.c
* These helpers call a specific drm_bridge_funcs op for all the bridges * These helpers call a specific &drm_bridge_funcs op for all the bridges
* during encoder configuration. * during encoder configuration.
* *
* When creating a bridge driver, one can implement drm_bridge_funcs op with * For detailed specification of the bridge callbacks see &drm_bridge_funcs.
* the help of these rough rules:
*
* pre_enable: this contains things needed to be done for the bridge before
* its clock and timings are enabled by its source. For a bridge, its source
* is generally the encoder or bridge just before it in the encoder chain.
*
* enable: this contains things needed to be done for the bridge once its
* source is enabled. In other words, enable is called once the source is
* ready with clock and timing needed by the bridge.
*
* disable: this contains things needed to be done for the bridge assuming
* that its source is still enabled, i.e. clock and timings are still on.
*
* post_disable: this contains things needed to be done for the bridge once
* its source is disabled, i.e. once clocks and timings are off.
*
* mode_fixup: this should fixup the given mode for the bridge. It is called
* after the encoder's mode fixup. mode_fixup can also reject a mode completely
* if it's unsuitable for the hardware.
*
* mode_set: this sets up the mode for the bridge. It assumes that its source
* (an encoder or a bridge) has set the mode too.
*/ */
/** /**
@ -159,7 +142,7 @@ EXPORT_SYMBOL(drm_bridge_attach);
* @mode: desired mode to be set for the bridge * @mode: desired mode to be set for the bridge
* @adjusted_mode: updated mode that works for this bridge * @adjusted_mode: updated mode that works for this bridge
* *
* Calls 'mode_fixup' drm_bridge_funcs op for all the bridges in the * Calls ->mode_fixup() &drm_bridge_funcs op for all the bridges in the
* encoder chain, starting from the first bridge to the last. * encoder chain, starting from the first bridge to the last.
* *
* Note: the bridge passed should be the one closest to the encoder * Note: the bridge passed should be the one closest to the encoder
@ -186,11 +169,11 @@ bool drm_bridge_mode_fixup(struct drm_bridge *bridge,
EXPORT_SYMBOL(drm_bridge_mode_fixup); EXPORT_SYMBOL(drm_bridge_mode_fixup);
/** /**
* drm_bridge_disable - calls 'disable' drm_bridge_funcs op for all * drm_bridge_disable - calls ->disable() &drm_bridge_funcs op for all
* bridges in the encoder chain. * bridges in the encoder chain.
* @bridge: bridge control structure * @bridge: bridge control structure
* *
* Calls 'disable' drm_bridge_funcs op for all the bridges in the encoder * Calls ->disable() &drm_bridge_funcs op for all the bridges in the encoder
* chain, starting from the last bridge to the first. These are called before * chain, starting from the last bridge to the first. These are called before
* calling the encoder's prepare op. * calling the encoder's prepare op.
* *
@ -208,11 +191,11 @@ void drm_bridge_disable(struct drm_bridge *bridge)
EXPORT_SYMBOL(drm_bridge_disable); EXPORT_SYMBOL(drm_bridge_disable);
/** /**
* drm_bridge_post_disable - calls 'post_disable' drm_bridge_funcs op for * drm_bridge_post_disable - calls ->post_disable() &drm_bridge_funcs op for
* all bridges in the encoder chain. * all bridges in the encoder chain.
* @bridge: bridge control structure * @bridge: bridge control structure
* *
* Calls 'post_disable' drm_bridge_funcs op for all the bridges in the * Calls ->post_disable() &drm_bridge_funcs op for all the bridges in the
* encoder chain, starting from the first bridge to the last. These are called * encoder chain, starting from the first bridge to the last. These are called
* after completing the encoder's prepare op. * after completing the encoder's prepare op.
* *
@ -236,7 +219,7 @@ EXPORT_SYMBOL(drm_bridge_post_disable);
* @mode: desired mode to be set for the bridge * @mode: desired mode to be set for the bridge
* @adjusted_mode: updated mode that works for this bridge * @adjusted_mode: updated mode that works for this bridge
* *
* Calls 'mode_set' drm_bridge_funcs op for all the bridges in the * Calls ->mode_set() &drm_bridge_funcs op for all the bridges in the
* encoder chain, starting from the first bridge to the last. * encoder chain, starting from the first bridge to the last.
* *
* Note: the bridge passed should be the one closest to the encoder * Note: the bridge passed should be the one closest to the encoder
@ -256,11 +239,11 @@ void drm_bridge_mode_set(struct drm_bridge *bridge,
EXPORT_SYMBOL(drm_bridge_mode_set); EXPORT_SYMBOL(drm_bridge_mode_set);
/** /**
* drm_bridge_pre_enable - calls 'pre_enable' drm_bridge_funcs op for all * drm_bridge_pre_enable - calls ->pre_enable() &drm_bridge_funcs op for all
* bridges in the encoder chain. * bridges in the encoder chain.
* @bridge: bridge control structure * @bridge: bridge control structure
* *
* Calls 'pre_enable' drm_bridge_funcs op for all the bridges in the encoder * Calls ->pre_enable() &drm_bridge_funcs op for all the bridges in the encoder
* chain, starting from the last bridge to the first. These are called * chain, starting from the last bridge to the first. These are called
* before calling the encoder's commit op. * before calling the encoder's commit op.
* *
@ -278,11 +261,11 @@ void drm_bridge_pre_enable(struct drm_bridge *bridge)
EXPORT_SYMBOL(drm_bridge_pre_enable); EXPORT_SYMBOL(drm_bridge_pre_enable);
/** /**
* drm_bridge_enable - calls 'enable' drm_bridge_funcs op for all bridges * drm_bridge_enable - calls ->enable() &drm_bridge_funcs op for all bridges
* in the encoder chain. * in the encoder chain.
* @bridge: bridge control structure * @bridge: bridge control structure
* *
* Calls 'enable' drm_bridge_funcs op for all the bridges in the encoder * Calls ->enable() &drm_bridge_funcs op for all the bridges in the encoder
* chain, starting from the first bridge to the last. These are called * chain, starting from the first bridge to the last. These are called
* after completing the encoder's commit op. * after completing the encoder's commit op.
* *

View File

@ -45,7 +45,7 @@
static struct drm_framebuffer * static struct drm_framebuffer *
internal_framebuffer_create(struct drm_device *dev, internal_framebuffer_create(struct drm_device *dev,
struct drm_mode_fb_cmd2 *r, const struct drm_mode_fb_cmd2 *r,
struct drm_file *file_priv); struct drm_file *file_priv);
/* Avoid boilerplate. I'm tired of typing. */ /* Avoid boilerplate. I'm tired of typing. */
@ -649,6 +649,18 @@ EXPORT_SYMBOL(drm_framebuffer_remove);
DEFINE_WW_CLASS(crtc_ww_class); DEFINE_WW_CLASS(crtc_ww_class);
static unsigned int drm_num_crtcs(struct drm_device *dev)
{
unsigned int num = 0;
struct drm_crtc *tmp;
drm_for_each_crtc(tmp, dev) {
num++;
}
return num;
}
/** /**
* drm_crtc_init_with_planes - Initialise a new CRTC object with * drm_crtc_init_with_planes - Initialise a new CRTC object with
* specified primary and cursor planes. * specified primary and cursor planes.
@ -657,6 +669,7 @@ DEFINE_WW_CLASS(crtc_ww_class);
* @primary: Primary plane for CRTC * @primary: Primary plane for CRTC
* @cursor: Cursor plane for CRTC * @cursor: Cursor plane for CRTC
* @funcs: callbacks for the new CRTC * @funcs: callbacks for the new CRTC
* @name: printf style format string for the CRTC name, or NULL for default name
* *
* Inits a new object created as base part of a driver crtc object. * Inits a new object created as base part of a driver crtc object.
* *
@ -666,7 +679,8 @@ DEFINE_WW_CLASS(crtc_ww_class);
int drm_crtc_init_with_planes(struct drm_device *dev, struct drm_crtc *crtc, int drm_crtc_init_with_planes(struct drm_device *dev, struct drm_crtc *crtc,
struct drm_plane *primary, struct drm_plane *primary,
struct drm_plane *cursor, struct drm_plane *cursor,
const struct drm_crtc_funcs *funcs) const struct drm_crtc_funcs *funcs,
const char *name, ...)
{ {
struct drm_mode_config *config = &dev->mode_config; struct drm_mode_config *config = &dev->mode_config;
int ret; int ret;
@ -682,6 +696,21 @@ int drm_crtc_init_with_planes(struct drm_device *dev, struct drm_crtc *crtc,
if (ret) if (ret)
return ret; return ret;
if (name) {
va_list ap;
va_start(ap, name);
crtc->name = kvasprintf(GFP_KERNEL, name, ap);
va_end(ap);
} else {
crtc->name = kasprintf(GFP_KERNEL, "crtc-%d",
drm_num_crtcs(dev));
}
if (!crtc->name) {
drm_mode_object_put(dev, &crtc->base);
return -ENOMEM;
}
crtc->base.properties = &crtc->properties; crtc->base.properties = &crtc->properties;
list_add_tail(&crtc->head, &config->crtc_list); list_add_tail(&crtc->head, &config->crtc_list);
@ -728,6 +757,8 @@ void drm_crtc_cleanup(struct drm_crtc *crtc)
if (crtc->state && crtc->funcs->atomic_destroy_state) if (crtc->state && crtc->funcs->atomic_destroy_state)
crtc->funcs->atomic_destroy_state(crtc, crtc->state); crtc->funcs->atomic_destroy_state(crtc, crtc->state);
kfree(crtc->name);
memset(crtc, 0, sizeof(*crtc)); memset(crtc, 0, sizeof(*crtc));
} }
EXPORT_SYMBOL(drm_crtc_cleanup); EXPORT_SYMBOL(drm_crtc_cleanup);
@ -887,12 +918,19 @@ int drm_connector_init(struct drm_device *dev,
connector->base.properties = &connector->properties; connector->base.properties = &connector->properties;
connector->dev = dev; connector->dev = dev;
connector->funcs = funcs; connector->funcs = funcs;
connector->connector_id = ida_simple_get(&config->connector_ida, 0, 0, GFP_KERNEL);
if (connector->connector_id < 0) {
ret = connector->connector_id;
goto out_put;
}
connector->connector_type = connector_type; connector->connector_type = connector_type;
connector->connector_type_id = connector->connector_type_id =
ida_simple_get(connector_ida, 1, 0, GFP_KERNEL); ida_simple_get(connector_ida, 1, 0, GFP_KERNEL);
if (connector->connector_type_id < 0) { if (connector->connector_type_id < 0) {
ret = connector->connector_type_id; ret = connector->connector_type_id;
goto out_put; goto out_put_id;
} }
connector->name = connector->name =
kasprintf(GFP_KERNEL, "%s-%d", kasprintf(GFP_KERNEL, "%s-%d",
@ -900,7 +938,7 @@ int drm_connector_init(struct drm_device *dev,
connector->connector_type_id); connector->connector_type_id);
if (!connector->name) { if (!connector->name) {
ret = -ENOMEM; ret = -ENOMEM;
goto out_put; goto out_put_type_id;
} }
INIT_LIST_HEAD(&connector->probed_modes); INIT_LIST_HEAD(&connector->probed_modes);
@ -928,7 +966,12 @@ int drm_connector_init(struct drm_device *dev,
} }
connector->debugfs_entry = NULL; connector->debugfs_entry = NULL;
out_put_type_id:
if (ret)
ida_remove(connector_ida, connector->connector_type_id);
out_put_id:
if (ret)
ida_remove(&config->connector_ida, connector->connector_id);
out_put: out_put:
if (ret) if (ret)
drm_mode_object_put(dev, &connector->base); drm_mode_object_put(dev, &connector->base);
@ -965,6 +1008,9 @@ void drm_connector_cleanup(struct drm_connector *connector)
ida_remove(&drm_connector_enum_list[connector->connector_type].ida, ida_remove(&drm_connector_enum_list[connector->connector_type].ida,
connector->connector_type_id); connector->connector_type_id);
ida_remove(&dev->mode_config.connector_ida,
connector->connector_id);
kfree(connector->display_info.bus_formats); kfree(connector->display_info.bus_formats);
drm_mode_object_put(dev, &connector->base); drm_mode_object_put(dev, &connector->base);
kfree(connector->name); kfree(connector->name);
@ -981,32 +1027,6 @@ void drm_connector_cleanup(struct drm_connector *connector)
} }
EXPORT_SYMBOL(drm_connector_cleanup); EXPORT_SYMBOL(drm_connector_cleanup);
/**
* drm_connector_index - find the index of a registered connector
* @connector: connector to find index for
*
* Given a registered connector, return the index of that connector within a DRM
* device's list of connectors.
*/
unsigned int drm_connector_index(struct drm_connector *connector)
{
unsigned int index = 0;
struct drm_connector *tmp;
struct drm_mode_config *config = &connector->dev->mode_config;
WARN_ON(!drm_modeset_is_locked(&config->connection_mutex));
drm_for_each_connector(tmp, connector->dev) {
if (tmp == connector)
return index;
index++;
}
BUG();
}
EXPORT_SYMBOL(drm_connector_index);
/** /**
* drm_connector_register - register a connector * drm_connector_register - register a connector
* @connector: the connector to register * @connector: the connector to register
@ -1075,6 +1095,7 @@ EXPORT_SYMBOL(drm_connector_unplug_all);
* @encoder: the encoder to init * @encoder: the encoder to init
* @funcs: callbacks for this encoder * @funcs: callbacks for this encoder
* @encoder_type: user visible type of the encoder * @encoder_type: user visible type of the encoder
* @name: printf style format string for the encoder name, or NULL for default name
* *
* Initialises a preallocated encoder. Encoder should be * Initialises a preallocated encoder. Encoder should be
* subclassed as part of driver encoder objects. * subclassed as part of driver encoder objects.
@ -1085,7 +1106,7 @@ EXPORT_SYMBOL(drm_connector_unplug_all);
int drm_encoder_init(struct drm_device *dev, int drm_encoder_init(struct drm_device *dev,
struct drm_encoder *encoder, struct drm_encoder *encoder,
const struct drm_encoder_funcs *funcs, const struct drm_encoder_funcs *funcs,
int encoder_type) int encoder_type, const char *name, ...)
{ {
int ret; int ret;
@ -1098,9 +1119,17 @@ int drm_encoder_init(struct drm_device *dev,
encoder->dev = dev; encoder->dev = dev;
encoder->encoder_type = encoder_type; encoder->encoder_type = encoder_type;
encoder->funcs = funcs; encoder->funcs = funcs;
if (name) {
va_list ap;
va_start(ap, name);
encoder->name = kvasprintf(GFP_KERNEL, name, ap);
va_end(ap);
} else {
encoder->name = kasprintf(GFP_KERNEL, "%s-%d", encoder->name = kasprintf(GFP_KERNEL, "%s-%d",
drm_encoder_enum_list[encoder_type].name, drm_encoder_enum_list[encoder_type].name,
encoder->base.id); encoder->base.id);
}
if (!encoder->name) { if (!encoder->name) {
ret = -ENOMEM; ret = -ENOMEM;
goto out_put; goto out_put;
@ -1141,6 +1170,18 @@ void drm_encoder_cleanup(struct drm_encoder *encoder)
} }
EXPORT_SYMBOL(drm_encoder_cleanup); EXPORT_SYMBOL(drm_encoder_cleanup);
static unsigned int drm_num_planes(struct drm_device *dev)
{
unsigned int num = 0;
struct drm_plane *tmp;
drm_for_each_plane(tmp, dev) {
num++;
}
return num;
}
/** /**
* drm_universal_plane_init - Initialize a new universal plane object * drm_universal_plane_init - Initialize a new universal plane object
* @dev: DRM device * @dev: DRM device
@ -1150,6 +1191,7 @@ EXPORT_SYMBOL(drm_encoder_cleanup);
* @formats: array of supported formats (%DRM_FORMAT_*) * @formats: array of supported formats (%DRM_FORMAT_*)
* @format_count: number of elements in @formats * @format_count: number of elements in @formats
* @type: type of plane (overlay, primary, cursor) * @type: type of plane (overlay, primary, cursor)
* @name: printf style format string for the plane name, or NULL for default name
* *
* Initializes a plane object of type @type. * Initializes a plane object of type @type.
* *
@ -1160,7 +1202,8 @@ int drm_universal_plane_init(struct drm_device *dev, struct drm_plane *plane,
unsigned long possible_crtcs, unsigned long possible_crtcs,
const struct drm_plane_funcs *funcs, const struct drm_plane_funcs *funcs,
const uint32_t *formats, unsigned int format_count, const uint32_t *formats, unsigned int format_count,
enum drm_plane_type type) enum drm_plane_type type,
const char *name, ...)
{ {
struct drm_mode_config *config = &dev->mode_config; struct drm_mode_config *config = &dev->mode_config;
int ret; int ret;
@ -1182,6 +1225,22 @@ int drm_universal_plane_init(struct drm_device *dev, struct drm_plane *plane,
return -ENOMEM; return -ENOMEM;
} }
if (name) {
va_list ap;
va_start(ap, name);
plane->name = kvasprintf(GFP_KERNEL, name, ap);
va_end(ap);
} else {
plane->name = kasprintf(GFP_KERNEL, "plane-%d",
drm_num_planes(dev));
}
if (!plane->name) {
kfree(plane->format_types);
drm_mode_object_put(dev, &plane->base);
return -ENOMEM;
}
memcpy(plane->format_types, formats, format_count * sizeof(uint32_t)); memcpy(plane->format_types, formats, format_count * sizeof(uint32_t));
plane->format_count = format_count; plane->format_count = format_count;
plane->possible_crtcs = possible_crtcs; plane->possible_crtcs = possible_crtcs;
@ -1240,7 +1299,7 @@ int drm_plane_init(struct drm_device *dev, struct drm_plane *plane,
type = is_primary ? DRM_PLANE_TYPE_PRIMARY : DRM_PLANE_TYPE_OVERLAY; type = is_primary ? DRM_PLANE_TYPE_PRIMARY : DRM_PLANE_TYPE_OVERLAY;
return drm_universal_plane_init(dev, plane, possible_crtcs, funcs, return drm_universal_plane_init(dev, plane, possible_crtcs, funcs,
formats, format_count, type); formats, format_count, type, NULL);
} }
EXPORT_SYMBOL(drm_plane_init); EXPORT_SYMBOL(drm_plane_init);
@ -1272,6 +1331,8 @@ void drm_plane_cleanup(struct drm_plane *plane)
if (plane->state && plane->funcs->atomic_destroy_state) if (plane->state && plane->funcs->atomic_destroy_state)
plane->funcs->atomic_destroy_state(plane, plane->state); plane->funcs->atomic_destroy_state(plane, plane->state);
kfree(plane->name);
memset(plane, 0, sizeof(*plane)); memset(plane, 0, sizeof(*plane));
} }
EXPORT_SYMBOL(drm_plane_cleanup); EXPORT_SYMBOL(drm_plane_cleanup);
@ -1802,7 +1863,8 @@ int drm_mode_getresources(struct drm_device *dev, void *data,
copied = 0; copied = 0;
crtc_id = (uint32_t __user *)(unsigned long)card_res->crtc_id_ptr; crtc_id = (uint32_t __user *)(unsigned long)card_res->crtc_id_ptr;
drm_for_each_crtc(crtc, dev) { drm_for_each_crtc(crtc, dev) {
DRM_DEBUG_KMS("[CRTC:%d]\n", crtc->base.id); DRM_DEBUG_KMS("[CRTC:%d:%s]\n",
crtc->base.id, crtc->name);
if (put_user(crtc->base.id, crtc_id + copied)) { if (put_user(crtc->base.id, crtc_id + copied)) {
ret = -EFAULT; ret = -EFAULT;
goto out; goto out;
@ -2649,7 +2711,7 @@ int drm_mode_setcrtc(struct drm_device *dev, void *data,
ret = -ENOENT; ret = -ENOENT;
goto out; goto out;
} }
DRM_DEBUG_KMS("[CRTC:%d]\n", crtc->base.id); DRM_DEBUG_KMS("[CRTC:%d:%s]\n", crtc->base.id, crtc->name);
if (crtc_req->mode_valid) { if (crtc_req->mode_valid) {
/* If we have a mode we need a framebuffer. */ /* If we have a mode we need a framebuffer. */
@ -2685,6 +2747,8 @@ int drm_mode_setcrtc(struct drm_device *dev, void *data,
goto out; goto out;
} }
drm_mode_set_crtcinfo(mode, CRTC_INTERLACE_HALVE_V);
/* /*
* Check whether the primary plane supports the fb pixel format. * Check whether the primary plane supports the fb pixel format.
* Drivers not implementing the universal planes API use a * Drivers not implementing the universal planes API use a
@ -3144,7 +3208,7 @@ static int framebuffer_check(const struct drm_mode_fb_cmd2 *r)
static struct drm_framebuffer * static struct drm_framebuffer *
internal_framebuffer_create(struct drm_device *dev, internal_framebuffer_create(struct drm_device *dev,
struct drm_mode_fb_cmd2 *r, const struct drm_mode_fb_cmd2 *r,
struct drm_file *file_priv) struct drm_file *file_priv)
{ {
struct drm_mode_config *config = &dev->mode_config; struct drm_mode_config *config = &dev->mode_config;
@ -3418,6 +3482,7 @@ out_err1:
return ret; return ret;
} }
/** /**
* drm_fb_release - remove and free the FBs on this file * drm_fb_release - remove and free the FBs on this file
* @priv: drm file for the ioctl * @priv: drm file for the ioctl
@ -4565,8 +4630,6 @@ static int drm_mode_connector_set_obj_prop(struct drm_mode_object *obj,
/* Do DPMS ourselves */ /* Do DPMS ourselves */
if (property == connector->dev->mode_config.dpms_property) { if (property == connector->dev->mode_config.dpms_property) {
ret = 0;
if (connector->funcs->dpms)
ret = (*connector->funcs->dpms)(connector, (int)value); ret = (*connector->funcs->dpms)(connector, (int)value);
} else if (connector->funcs->set_property) } else if (connector->funcs->set_property)
ret = connector->funcs->set_property(connector, property, value); ret = connector->funcs->set_property(connector, property, value);
@ -4765,6 +4828,20 @@ int drm_mode_connector_attach_encoder(struct drm_connector *connector,
{ {
int i; int i;
/*
* In the past, drivers have attempted to model the static association
* of connector to encoder in simple connector/encoder devices using a
* direct assignment of connector->encoder = encoder. This connection
* is a logical one and the responsibility of the core, so drivers are
* expected not to mess with this.
*
* Note that the error return should've been enough here, but a large
* majority of drivers ignores the return value, so add in a big WARN
* to get people's attention.
*/
if (WARN_ON(connector->encoder))
return -EINVAL;
for (i = 0; i < DRM_CONNECTOR_MAX_ENCODER; i++) { for (i = 0; i < DRM_CONNECTOR_MAX_ENCODER; i++) {
if (connector->encoder_ids[i] == 0) { if (connector->encoder_ids[i] == 0) {
connector->encoder_ids[i] = encoder->base.id; connector->encoder_ids[i] = encoder->base.id;
@ -5245,6 +5322,7 @@ void drm_mode_config_init(struct drm_device *dev)
INIT_LIST_HEAD(&dev->mode_config.plane_list); INIT_LIST_HEAD(&dev->mode_config.plane_list);
idr_init(&dev->mode_config.crtc_idr); idr_init(&dev->mode_config.crtc_idr);
idr_init(&dev->mode_config.tile_idr); idr_init(&dev->mode_config.tile_idr);
ida_init(&dev->mode_config.connector_ida);
drm_modeset_lock_all(dev); drm_modeset_lock_all(dev);
drm_mode_create_standard_properties(dev); drm_mode_create_standard_properties(dev);
@ -5325,6 +5403,7 @@ void drm_mode_config_cleanup(struct drm_device *dev)
crtc->funcs->destroy(crtc); crtc->funcs->destroy(crtc);
} }
ida_destroy(&dev->mode_config.connector_ida);
idr_destroy(&dev->mode_config.tile_idr); idr_destroy(&dev->mode_config.tile_idr);
idr_destroy(&dev->mode_config.crtc_idr); idr_destroy(&dev->mode_config.crtc_idr);
drm_modeset_lock_fini(&dev->mode_config.connection_mutex); drm_modeset_lock_fini(&dev->mode_config.connection_mutex);

View File

@ -51,6 +51,11 @@
* the same callbacks which drivers can use to e.g. restore the modeset * the same callbacks which drivers can use to e.g. restore the modeset
* configuration on resume with drm_helper_resume_force_mode(). * configuration on resume with drm_helper_resume_force_mode().
* *
* Note that this helper library doesn't track the current power state of CRTCs
* and encoders. It can call callbacks like ->dpms() even though the hardware is
* already in the desired state. This deficiency has been fixed in the atomic
* helpers.
*
* The driver callbacks are mostly compatible with the atomic modeset helpers, * The driver callbacks are mostly compatible with the atomic modeset helpers,
* except for the handling of the primary plane: Atomic helpers require that the * except for the handling of the primary plane: Atomic helpers require that the
* primary plane is implemented as a real standalone plane and not directly tied * primary plane is implemented as a real standalone plane and not directly tied
@ -62,6 +67,11 @@
* converting to the plane helpers). New drivers must not use these functions * converting to the plane helpers). New drivers must not use these functions
* but need to implement the atomic interface instead, potentially using the * but need to implement the atomic interface instead, potentially using the
* atomic helpers for that. * atomic helpers for that.
*
* These legacy modeset helpers use the same function table structures as
* all other modesetting helpers. See the documentation for struct
* &drm_crtc_helper_funcs, struct &drm_encoder_helper_funcs and struct
* &drm_connector_helper_funcs.
*/ */
MODULE_AUTHOR("David Airlie, Jesse Barnes"); MODULE_AUTHOR("David Airlie, Jesse Barnes");
MODULE_DESCRIPTION("DRM KMS helper"); MODULE_DESCRIPTION("DRM KMS helper");
@ -206,8 +216,8 @@ static void __drm_helper_disable_unused_functions(struct drm_device *dev)
* @dev: DRM device * @dev: DRM device
* *
* This function walks through the entire mode setting configuration of @dev. It * This function walks through the entire mode setting configuration of @dev. It
* will remove any crtc links of unused encoders and encoder links of * will remove any CRTC links of unused encoders and encoder links of
* disconnected connectors. Then it will disable all unused encoders and crtcs * disconnected connectors. Then it will disable all unused encoders and CRTCs
* either by calling their disable callback if available or by calling their * either by calling their disable callback if available or by calling their
* dpms callback with DRM_MODE_DPMS_OFF. * dpms callback with DRM_MODE_DPMS_OFF.
*/ */
@ -329,7 +339,7 @@ bool drm_crtc_helper_set_mode(struct drm_crtc *crtc,
DRM_DEBUG_KMS("CRTC fixup failed\n"); DRM_DEBUG_KMS("CRTC fixup failed\n");
goto done; goto done;
} }
DRM_DEBUG_KMS("[CRTC:%d]\n", crtc->base.id); DRM_DEBUG_KMS("[CRTC:%d:%s]\n", crtc->base.id, crtc->name);
crtc->hwmode = *adjusted_mode; crtc->hwmode = *adjusted_mode;
@ -445,11 +455,36 @@ drm_crtc_helper_disable(struct drm_crtc *crtc)
* drm_crtc_helper_set_config - set a new config from userspace * drm_crtc_helper_set_config - set a new config from userspace
* @set: mode set configuration * @set: mode set configuration
* *
* Setup a new configuration, provided by the upper layers (either an ioctl call * The drm_crtc_helper_set_config() helper function implements the set_config
* from userspace or internally e.g. from the fbdev support code) in @set, and * callback of struct &drm_crtc_funcs for drivers using the legacy CRTC helpers.
* enable it. This is the main helper functions for drivers that implement *
* kernel mode setting with the crtc helper functions and the assorted * It first tries to locate the best encoder for each connector by calling the
* ->prepare(), ->modeset() and ->commit() helper callbacks. * connector ->best_encoder() (struct &drm_connector_helper_funcs) helper
* operation.
*
* After locating the appropriate encoders, the helper function will call the
* mode_fixup encoder and CRTC helper operations to adjust the requested mode,
* or reject it completely in which case an error will be returned to the
* application. If the new configuration after mode adjustment is identical to
* the current configuration the helper function will return without performing
* any other operation.
*
* If the adjusted mode is identical to the current mode but changes to the
* frame buffer need to be applied, the drm_crtc_helper_set_config() function
* will call the CRTC ->mode_set_base() (struct &drm_crtc_helper_funcs) helper
* operation.
*
* If the adjusted mode differs from the current mode, or if the
* ->mode_set_base() helper operation is not provided, the helper function
* performs a full mode set sequence by calling the ->prepare(), ->mode_set()
* and ->commit() CRTC and encoder helper operations, in that order.
* Alternatively it can also use the dpms and disable helper operations. For
* details see struct &drm_crtc_helper_funcs and struct
* &drm_encoder_helper_funcs.
*
* This function is deprecated. New drivers must implement atomic modeset
* support, for which this function is unsuitable. Instead drivers should use
* drm_atomic_helper_set_config().
* *
* Returns: * Returns:
* Returns 0 on success, negative errno numbers on failure. * Returns 0 on success, negative errno numbers on failure.
@ -484,11 +519,13 @@ int drm_crtc_helper_set_config(struct drm_mode_set *set)
set->fb = NULL; set->fb = NULL;
if (set->fb) { if (set->fb) {
DRM_DEBUG_KMS("[CRTC:%d] [FB:%d] #connectors=%d (x y) (%i %i)\n", DRM_DEBUG_KMS("[CRTC:%d:%s] [FB:%d] #connectors=%d (x y) (%i %i)\n",
set->crtc->base.id, set->fb->base.id, set->crtc->base.id, set->crtc->name,
set->fb->base.id,
(int)set->num_connectors, set->x, set->y); (int)set->num_connectors, set->x, set->y);
} else { } else {
DRM_DEBUG_KMS("[CRTC:%d] [NOFB]\n", set->crtc->base.id); DRM_DEBUG_KMS("[CRTC:%d:%s] [NOFB]\n",
set->crtc->base.id, set->crtc->name);
drm_crtc_helper_disable(set->crtc); drm_crtc_helper_disable(set->crtc);
return 0; return 0;
} }
@ -628,9 +665,9 @@ int drm_crtc_helper_set_config(struct drm_mode_set *set)
connector->encoder->crtc = new_crtc; connector->encoder->crtc = new_crtc;
} }
if (new_crtc) { if (new_crtc) {
DRM_DEBUG_KMS("[CONNECTOR:%d:%s] to [CRTC:%d]\n", DRM_DEBUG_KMS("[CONNECTOR:%d:%s] to [CRTC:%d:%s]\n",
connector->base.id, connector->name, connector->base.id, connector->name,
new_crtc->base.id); new_crtc->base.id, new_crtc->name);
} else { } else {
DRM_DEBUG_KMS("[CONNECTOR:%d:%s] to [NOCRTC]\n", DRM_DEBUG_KMS("[CONNECTOR:%d:%s] to [NOCRTC]\n",
connector->base.id, connector->name); connector->base.id, connector->name);
@ -650,8 +687,8 @@ int drm_crtc_helper_set_config(struct drm_mode_set *set)
if (!drm_crtc_helper_set_mode(set->crtc, set->mode, if (!drm_crtc_helper_set_mode(set->crtc, set->mode,
set->x, set->y, set->x, set->y,
save_set.fb)) { save_set.fb)) {
DRM_ERROR("failed to set mode on [CRTC:%d]\n", DRM_ERROR("failed to set mode on [CRTC:%d:%s]\n",
set->crtc->base.id); set->crtc->base.id, set->crtc->name);
set->crtc->primary->fb = save_set.fb; set->crtc->primary->fb = save_set.fb;
ret = -EINVAL; ret = -EINVAL;
goto fail; goto fail;
@ -758,10 +795,18 @@ static int drm_helper_choose_crtc_dpms(struct drm_crtc *crtc)
* @connector: affected connector * @connector: affected connector
* @mode: DPMS mode * @mode: DPMS mode
* *
* This is the main helper function provided by the crtc helper framework for * The drm_helper_connector_dpms() helper function implements the ->dpms()
* callback of struct &drm_connector_funcs for drivers using the legacy CRTC helpers.
*
* This is the main helper function provided by the CRTC helper framework for
* implementing the DPMS connector attribute. It computes the new desired DPMS * implementing the DPMS connector attribute. It computes the new desired DPMS
* state for all encoders and crtcs in the output mesh and calls the ->dpms() * state for all encoders and CRTCs in the output mesh and calls the ->dpms()
* callback provided by the driver appropriately. * callbacks provided by the driver in struct &drm_crtc_helper_funcs and struct
* &drm_encoder_helper_funcs appropriately.
*
* This function is deprecated. New drivers must implement atomic modeset
* support, for which this function is unsuitable. Instead drivers should use
* drm_atomic_helper_connector_dpms().
* *
* Returns: * Returns:
* Always returns 0. * Always returns 0.
@ -818,7 +863,7 @@ EXPORT_SYMBOL(drm_helper_connector_dpms);
* metadata fields. * metadata fields.
*/ */
void drm_helper_mode_fill_fb_struct(struct drm_framebuffer *fb, void drm_helper_mode_fill_fb_struct(struct drm_framebuffer *fb,
struct drm_mode_fb_cmd2 *mode_cmd) const struct drm_mode_fb_cmd2 *mode_cmd)
{ {
int i; int i;
@ -855,6 +900,12 @@ EXPORT_SYMBOL(drm_helper_mode_fill_fb_struct);
* due to slight differences in allocating shared resources when the * due to slight differences in allocating shared resources when the
* configuration is restored in a different order than when userspace set it up) * configuration is restored in a different order than when userspace set it up)
* need to use their own restore logic. * need to use their own restore logic.
*
* This function is deprecated. New drivers should implement atomic mode-
* setting and use the atomic suspend/resume helpers.
*
* See also:
* drm_atomic_helper_suspend(), drm_atomic_helper_resume()
*/ */
void drm_helper_resume_force_mode(struct drm_device *dev) void drm_helper_resume_force_mode(struct drm_device *dev)
{ {
@ -913,9 +964,9 @@ EXPORT_SYMBOL(drm_helper_resume_force_mode);
* @old_fb: previous framebuffer * @old_fb: previous framebuffer
* *
* This function implements a callback useable as the ->mode_set callback * This function implements a callback useable as the ->mode_set callback
* required by the crtc helpers. Besides the atomic plane helper functions for * required by the CRTC helpers. Besides the atomic plane helper functions for
* the primary plane the driver must also provide the ->mode_set_nofb callback * the primary plane the driver must also provide the ->mode_set_nofb callback
* to set up the crtc. * to set up the CRTC.
* *
* This is a transitional helper useful for converting drivers to the atomic * This is a transitional helper useful for converting drivers to the atomic
* interfaces. * interfaces.
@ -979,7 +1030,7 @@ EXPORT_SYMBOL(drm_helper_crtc_mode_set);
* @old_fb: previous framebuffer * @old_fb: previous framebuffer
* *
* This function implements a callback useable as the ->mode_set_base used * This function implements a callback useable as the ->mode_set_base used
* required by the crtc helpers. The driver must provide the atomic plane helper * required by the CRTC helpers. The driver must provide the atomic plane helper
* functions for the primary plane. * functions for the primary plane.
* *
* This is a transitional helper useful for converting drivers to the atomic * This is a transitional helper useful for converting drivers to the atomic

View File

@ -21,10 +21,6 @@
*/ */
#include <linux/kernel.h> #include <linux/kernel.h>
#include <linux/mutex.h>
#include <linux/kref.h>
#include <linux/wait.h>
#include <linux/workqueue.h>
#include <linux/delay.h> #include <linux/delay.h>
#include <linux/init.h> #include <linux/init.h>
#include <linux/errno.h> #include <linux/errno.h>
@ -674,7 +670,9 @@ static int build_enum_path_resources(struct drm_dp_sideband_msg_tx *msg, int por
} }
static int build_allocate_payload(struct drm_dp_sideband_msg_tx *msg, int port_num, static int build_allocate_payload(struct drm_dp_sideband_msg_tx *msg, int port_num,
u8 vcpi, uint16_t pbn) u8 vcpi, uint16_t pbn,
u8 number_sdp_streams,
u8 *sdp_stream_sink)
{ {
struct drm_dp_sideband_msg_req_body req; struct drm_dp_sideband_msg_req_body req;
memset(&req, 0, sizeof(req)); memset(&req, 0, sizeof(req));
@ -682,6 +680,9 @@ static int build_allocate_payload(struct drm_dp_sideband_msg_tx *msg, int port_n
req.u.allocate_payload.port_number = port_num; req.u.allocate_payload.port_number = port_num;
req.u.allocate_payload.vcpi = vcpi; req.u.allocate_payload.vcpi = vcpi;
req.u.allocate_payload.pbn = pbn; req.u.allocate_payload.pbn = pbn;
req.u.allocate_payload.number_sdp_streams = number_sdp_streams;
memcpy(req.u.allocate_payload.sdp_stream_sink, sdp_stream_sink,
number_sdp_streams);
drm_dp_encode_sideband_req(&req, msg); drm_dp_encode_sideband_req(&req, msg);
msg->path_msg = true; msg->path_msg = true;
return 0; return 0;
@ -916,7 +917,6 @@ static void drm_dp_destroy_port(struct kref *kref)
/* no need to clean up vcpi /* no need to clean up vcpi
* as if we have no connector we never setup a vcpi */ * as if we have no connector we never setup a vcpi */
drm_dp_port_teardown_pdt(port, port->pdt); drm_dp_port_teardown_pdt(port, port->pdt);
port->pdt = DP_PEER_DEVICE_NONE;
} }
kfree(port); kfree(port);
} }
@ -1162,9 +1162,7 @@ static void drm_dp_add_port(struct drm_dp_mst_branch *mstb,
drm_dp_put_port(port); drm_dp_put_port(port);
goto out; goto out;
} }
if ((port->pdt == DP_PEER_DEVICE_DP_LEGACY_CONV || if (port->port_num >= DP_MST_LOGICAL_PORT_0) {
port->pdt == DP_PEER_DEVICE_SST_SINK) &&
port->port_num >= DP_MST_LOGICAL_PORT_0) {
port->cached_edid = drm_get_edid(port->connector, &port->aux.ddc); port->cached_edid = drm_get_edid(port->connector, &port->aux.ddc);
drm_mode_connector_set_tile_property(port->connector); drm_mode_connector_set_tile_property(port->connector);
} }
@ -1674,6 +1672,8 @@ static int drm_dp_payload_send_msg(struct drm_dp_mst_topology_mgr *mgr,
struct drm_dp_sideband_msg_tx *txmsg; struct drm_dp_sideband_msg_tx *txmsg;
struct drm_dp_mst_branch *mstb; struct drm_dp_mst_branch *mstb;
int len, ret, port_num; int len, ret, port_num;
u8 sinks[DRM_DP_MAX_SDP_STREAMS];
int i;
port = drm_dp_get_validated_port_ref(mgr, port); port = drm_dp_get_validated_port_ref(mgr, port);
if (!port) if (!port)
@ -1696,10 +1696,13 @@ static int drm_dp_payload_send_msg(struct drm_dp_mst_topology_mgr *mgr,
goto fail_put; goto fail_put;
} }
for (i = 0; i < port->num_sdp_streams; i++)
sinks[i] = i;
txmsg->dst = mstb; txmsg->dst = mstb;
len = build_allocate_payload(txmsg, port_num, len = build_allocate_payload(txmsg, port_num,
id, id,
pbn); pbn, port->num_sdp_streams, sinks);
drm_dp_queue_down_tx(mgr, txmsg); drm_dp_queue_down_tx(mgr, txmsg);
@ -1802,6 +1805,7 @@ int drm_dp_update_payload_part1(struct drm_dp_mst_topology_mgr *mgr)
return -EINVAL; return -EINVAL;
} }
req_payload.num_slots = mgr->proposed_vcpis[i]->num_slots; req_payload.num_slots = mgr->proposed_vcpis[i]->num_slots;
req_payload.vcpi = mgr->proposed_vcpis[i]->vcpi;
} else { } else {
port = NULL; port = NULL;
req_payload.num_slots = 0; req_payload.num_slots = 0;
@ -1817,9 +1821,10 @@ int drm_dp_update_payload_part1(struct drm_dp_mst_topology_mgr *mgr)
if (req_payload.num_slots) { if (req_payload.num_slots) {
drm_dp_create_payload_step1(mgr, mgr->proposed_vcpis[i]->vcpi, &req_payload); drm_dp_create_payload_step1(mgr, mgr->proposed_vcpis[i]->vcpi, &req_payload);
mgr->payloads[i].num_slots = req_payload.num_slots; mgr->payloads[i].num_slots = req_payload.num_slots;
mgr->payloads[i].vcpi = req_payload.vcpi;
} else if (mgr->payloads[i].num_slots) { } else if (mgr->payloads[i].num_slots) {
mgr->payloads[i].num_slots = 0; mgr->payloads[i].num_slots = 0;
drm_dp_destroy_payload_step1(mgr, port, mgr->payloads[i].vcpi, &mgr->payloads[i]); drm_dp_destroy_payload_step1(mgr, port, port->vcpi.vcpi, &mgr->payloads[i]);
req_payload.payload_state = mgr->payloads[i].payload_state; req_payload.payload_state = mgr->payloads[i].payload_state;
mgr->payloads[i].start_slot = 0; mgr->payloads[i].start_slot = 0;
} }
@ -1955,7 +1960,7 @@ static int drm_dp_encode_up_ack_reply(struct drm_dp_sideband_msg_tx *msg, u8 req
{ {
struct drm_dp_sideband_msg_reply_body reply; struct drm_dp_sideband_msg_reply_body reply;
reply.reply_type = 1; reply.reply_type = 0;
reply.req_type = req_type; reply.req_type = req_type;
drm_dp_encode_sideband_reply(&reply, msg); drm_dp_encode_sideband_reply(&reply, msg);
return 0; return 0;
@ -2410,6 +2415,27 @@ out:
} }
EXPORT_SYMBOL(drm_dp_mst_detect_port); EXPORT_SYMBOL(drm_dp_mst_detect_port);
/**
* drm_dp_mst_port_has_audio() - Check whether port has audio capability or not
* @mgr: manager for this port
* @port: unverified pointer to a port.
*
* This returns whether the port supports audio or not.
*/
bool drm_dp_mst_port_has_audio(struct drm_dp_mst_topology_mgr *mgr,
struct drm_dp_mst_port *port)
{
bool ret = false;
port = drm_dp_get_validated_port_ref(mgr, port);
if (!port)
return ret;
ret = port->has_audio;
drm_dp_put_port(port);
return ret;
}
EXPORT_SYMBOL(drm_dp_mst_port_has_audio);
/** /**
* drm_dp_mst_get_edid() - get EDID for an MST port * drm_dp_mst_get_edid() - get EDID for an MST port
* @connector: toplevel connector to get EDID for * @connector: toplevel connector to get EDID for
@ -2435,6 +2461,7 @@ struct edid *drm_dp_mst_get_edid(struct drm_connector *connector, struct drm_dp_
edid = drm_get_edid(connector, &port->aux.ddc); edid = drm_get_edid(connector, &port->aux.ddc);
drm_mode_connector_set_tile_property(connector); drm_mode_connector_set_tile_property(connector);
} }
port->has_audio = drm_detect_monitor_audio(edid);
drm_dp_put_port(port); drm_dp_put_port(port);
return edid; return edid;
} }
@ -2821,6 +2848,9 @@ int drm_dp_mst_topology_mgr_init(struct drm_dp_mst_topology_mgr *mgr,
mgr->max_dpcd_transaction_bytes = max_dpcd_transaction_bytes; mgr->max_dpcd_transaction_bytes = max_dpcd_transaction_bytes;
mgr->max_payloads = max_payloads; mgr->max_payloads = max_payloads;
mgr->conn_base_id = conn_base_id; mgr->conn_base_id = conn_base_id;
if (max_payloads + 1 > sizeof(mgr->payload_mask) * 8 ||
max_payloads + 1 > sizeof(mgr->vcpi_mask) * 8)
return -EINVAL;
mgr->payloads = kcalloc(max_payloads, sizeof(struct drm_dp_payload), GFP_KERNEL); mgr->payloads = kcalloc(max_payloads, sizeof(struct drm_dp_payload), GFP_KERNEL);
if (!mgr->payloads) if (!mgr->payloads)
return -ENOMEM; return -ENOMEM;
@ -2828,7 +2858,9 @@ int drm_dp_mst_topology_mgr_init(struct drm_dp_mst_topology_mgr *mgr,
if (!mgr->proposed_vcpis) if (!mgr->proposed_vcpis)
return -ENOMEM; return -ENOMEM;
set_bit(0, &mgr->payload_mask); set_bit(0, &mgr->payload_mask);
test_calc_pbn_mode(); if (test_calc_pbn_mode() < 0)
DRM_ERROR("MST PBN self-test failed\n");
return 0; return 0;
} }
EXPORT_SYMBOL(drm_dp_mst_topology_mgr_init); EXPORT_SYMBOL(drm_dp_mst_topology_mgr_init);

View File

@ -73,10 +73,6 @@
#define EDID_QUIRK_FORCE_8BPC (1 << 8) #define EDID_QUIRK_FORCE_8BPC (1 << 8)
/* Force 12bpc */ /* Force 12bpc */
#define EDID_QUIRK_FORCE_12BPC (1 << 9) #define EDID_QUIRK_FORCE_12BPC (1 << 9)
/* Force 6bpc */
#define EDID_QUIRK_FORCE_6BPC (1 << 10)
/* Force 10bpc */
#define EDID_QUIRK_FORCE_10BPC (1 << 11)
struct detailed_mode_closure { struct detailed_mode_closure {
struct drm_connector *connector; struct drm_connector *connector;
@ -103,9 +99,6 @@ static struct edid_quirk {
/* Unknown Acer */ /* Unknown Acer */
{ "ACR", 2423, EDID_QUIRK_FIRST_DETAILED_PREFERRED }, { "ACR", 2423, EDID_QUIRK_FIRST_DETAILED_PREFERRED },
/* AEO model 0 reports 8 bpc, but is a 6 bpc panel */
{ "AEO", 0, EDID_QUIRK_FORCE_6BPC },
/* Belinea 10 15 55 */ /* Belinea 10 15 55 */
{ "MAX", 1516, EDID_QUIRK_PREFER_LARGE_60 }, { "MAX", 1516, EDID_QUIRK_PREFER_LARGE_60 },
{ "MAX", 0x77e, EDID_QUIRK_PREFER_LARGE_60 }, { "MAX", 0x77e, EDID_QUIRK_PREFER_LARGE_60 },
@ -119,9 +112,6 @@ static struct edid_quirk {
{ "FCM", 13600, EDID_QUIRK_PREFER_LARGE_75 | { "FCM", 13600, EDID_QUIRK_PREFER_LARGE_75 |
EDID_QUIRK_DETAILED_IN_CM }, EDID_QUIRK_DETAILED_IN_CM },
/* LGD panel of HP zBook 17 G2, eDP 10 bpc, but reports unknown bpc */
{ "LGD", 764, EDID_QUIRK_FORCE_10BPC },
/* LG Philips LCD LP154W01-A5 */ /* LG Philips LCD LP154W01-A5 */
{ "LPL", 0, EDID_QUIRK_DETAILED_USE_MAXIMUM_SIZE }, { "LPL", 0, EDID_QUIRK_DETAILED_USE_MAXIMUM_SIZE },
{ "LPL", 0x2a00, EDID_QUIRK_DETAILED_USE_MAXIMUM_SIZE }, { "LPL", 0x2a00, EDID_QUIRK_DETAILED_USE_MAXIMUM_SIZE },
@ -149,9 +139,6 @@ static struct edid_quirk {
/* Panel in Samsung NP700G7A-S01PL notebook reports 6bpc */ /* Panel in Samsung NP700G7A-S01PL notebook reports 6bpc */
{ "SEC", 0xd033, EDID_QUIRK_FORCE_8BPC }, { "SEC", 0xd033, EDID_QUIRK_FORCE_8BPC },
/* Rotel RSX-1058 forwards sink's EDID but only does HDMI 1.1*/
{ "ETR", 13896, EDID_QUIRK_FORCE_8BPC },
}; };
/* /*
@ -650,8 +637,12 @@ static const struct minimode extra_modes[] = {
/* /*
* Probably taken from CEA-861 spec. * Probably taken from CEA-861 spec.
* This table is converted from xorg's hw/xfree86/modes/xf86EdidModes.c. * This table is converted from xorg's hw/xfree86/modes/xf86EdidModes.c.
*
* Index using the VIC.
*/ */
static const struct drm_display_mode edid_cea_modes[] = { static const struct drm_display_mode edid_cea_modes[] = {
/* 0 - dummy, VICs start at 1 */
{ },
/* 1 - 640x480@60Hz */ /* 1 - 640x480@60Hz */
{ DRM_MODE("640x480", DRM_MODE_TYPE_DRIVER, 25175, 640, 656, { DRM_MODE("640x480", DRM_MODE_TYPE_DRIVER, 25175, 640, 656,
752, 800, 0, 480, 490, 492, 525, 0, 752, 800, 0, 480, 490, 492, 525, 0,
@ -1000,9 +991,11 @@ static const struct drm_display_mode edid_cea_modes[] = {
}; };
/* /*
* HDMI 1.4 4k modes. * HDMI 1.4 4k modes. Index using the VIC.
*/ */
static const struct drm_display_mode edid_4k_modes[] = { static const struct drm_display_mode edid_4k_modes[] = {
/* 0 - dummy, VICs start at 1 */
{ },
/* 1 - 3840x2160@30Hz */ /* 1 - 3840x2160@30Hz */
{ DRM_MODE("3840x2160", DRM_MODE_TYPE_DRIVER, 297000, { DRM_MODE("3840x2160", DRM_MODE_TYPE_DRIVER, 297000,
3840, 4016, 4104, 4400, 0, 3840, 4016, 4104, 4400, 0,
@ -2558,6 +2551,33 @@ cea_mode_alternate_clock(const struct drm_display_mode *cea_mode)
return clock; return clock;
} }
static u8 drm_match_cea_mode_clock_tolerance(const struct drm_display_mode *to_match,
unsigned int clock_tolerance)
{
u8 vic;
if (!to_match->clock)
return 0;
for (vic = 1; vic < ARRAY_SIZE(edid_cea_modes); vic++) {
const struct drm_display_mode *cea_mode = &edid_cea_modes[vic];
unsigned int clock1, clock2;
/* Check both 60Hz and 59.94Hz */
clock1 = cea_mode->clock;
clock2 = cea_mode_alternate_clock(cea_mode);
if (abs(to_match->clock - clock1) > clock_tolerance &&
abs(to_match->clock - clock2) > clock_tolerance)
continue;
if (drm_mode_equal_no_clocks(to_match, cea_mode))
return vic;
}
return 0;
}
/** /**
* drm_match_cea_mode - look for a CEA mode matching given mode * drm_match_cea_mode - look for a CEA mode matching given mode
* @to_match: display mode * @to_match: display mode
@ -2567,13 +2587,13 @@ cea_mode_alternate_clock(const struct drm_display_mode *cea_mode)
*/ */
u8 drm_match_cea_mode(const struct drm_display_mode *to_match) u8 drm_match_cea_mode(const struct drm_display_mode *to_match)
{ {
u8 mode; u8 vic;
if (!to_match->clock) if (!to_match->clock)
return 0; return 0;
for (mode = 0; mode < ARRAY_SIZE(edid_cea_modes); mode++) { for (vic = 1; vic < ARRAY_SIZE(edid_cea_modes); vic++) {
const struct drm_display_mode *cea_mode = &edid_cea_modes[mode]; const struct drm_display_mode *cea_mode = &edid_cea_modes[vic];
unsigned int clock1, clock2; unsigned int clock1, clock2;
/* Check both 60Hz and 59.94Hz */ /* Check both 60Hz and 59.94Hz */
@ -2583,12 +2603,17 @@ u8 drm_match_cea_mode(const struct drm_display_mode *to_match)
if ((KHZ2PICOS(to_match->clock) == KHZ2PICOS(clock1) || if ((KHZ2PICOS(to_match->clock) == KHZ2PICOS(clock1) ||
KHZ2PICOS(to_match->clock) == KHZ2PICOS(clock2)) && KHZ2PICOS(to_match->clock) == KHZ2PICOS(clock2)) &&
drm_mode_equal_no_clocks_no_stereo(to_match, cea_mode)) drm_mode_equal_no_clocks_no_stereo(to_match, cea_mode))
return mode + 1; return vic;
} }
return 0; return 0;
} }
EXPORT_SYMBOL(drm_match_cea_mode); EXPORT_SYMBOL(drm_match_cea_mode);
static bool drm_valid_cea_vic(u8 vic)
{
return vic > 0 && vic < ARRAY_SIZE(edid_cea_modes);
}
/** /**
* drm_get_cea_aspect_ratio - get the picture aspect ratio corresponding to * drm_get_cea_aspect_ratio - get the picture aspect ratio corresponding to
* the input VIC from the CEA mode list * the input VIC from the CEA mode list
@ -2598,10 +2623,7 @@ EXPORT_SYMBOL(drm_match_cea_mode);
*/ */
enum hdmi_picture_aspect drm_get_cea_aspect_ratio(const u8 video_code) enum hdmi_picture_aspect drm_get_cea_aspect_ratio(const u8 video_code)
{ {
/* return picture aspect ratio for video_code - 1 to access the return edid_cea_modes[video_code].picture_aspect_ratio;
* right array element
*/
return edid_cea_modes[video_code-1].picture_aspect_ratio;
} }
EXPORT_SYMBOL(drm_get_cea_aspect_ratio); EXPORT_SYMBOL(drm_get_cea_aspect_ratio);
@ -2622,6 +2644,33 @@ hdmi_mode_alternate_clock(const struct drm_display_mode *hdmi_mode)
return cea_mode_alternate_clock(hdmi_mode); return cea_mode_alternate_clock(hdmi_mode);
} }
static u8 drm_match_hdmi_mode_clock_tolerance(const struct drm_display_mode *to_match,
unsigned int clock_tolerance)
{
u8 vic;
if (!to_match->clock)
return 0;
for (vic = 1; vic < ARRAY_SIZE(edid_4k_modes); vic++) {
const struct drm_display_mode *hdmi_mode = &edid_4k_modes[vic];
unsigned int clock1, clock2;
/* Make sure to also match alternate clocks */
clock1 = hdmi_mode->clock;
clock2 = hdmi_mode_alternate_clock(hdmi_mode);
if (abs(to_match->clock - clock1) > clock_tolerance &&
abs(to_match->clock - clock2) > clock_tolerance)
continue;
if (drm_mode_equal_no_clocks(to_match, hdmi_mode))
return vic;
}
return 0;
}
/* /*
* drm_match_hdmi_mode - look for a HDMI mode matching given mode * drm_match_hdmi_mode - look for a HDMI mode matching given mode
* @to_match: display mode * @to_match: display mode
@ -2632,13 +2681,13 @@ hdmi_mode_alternate_clock(const struct drm_display_mode *hdmi_mode)
*/ */
static u8 drm_match_hdmi_mode(const struct drm_display_mode *to_match) static u8 drm_match_hdmi_mode(const struct drm_display_mode *to_match)
{ {
u8 mode; u8 vic;
if (!to_match->clock) if (!to_match->clock)
return 0; return 0;
for (mode = 0; mode < ARRAY_SIZE(edid_4k_modes); mode++) { for (vic = 1; vic < ARRAY_SIZE(edid_4k_modes); vic++) {
const struct drm_display_mode *hdmi_mode = &edid_4k_modes[mode]; const struct drm_display_mode *hdmi_mode = &edid_4k_modes[vic];
unsigned int clock1, clock2; unsigned int clock1, clock2;
/* Make sure to also match alternate clocks */ /* Make sure to also match alternate clocks */
@ -2648,11 +2697,16 @@ static u8 drm_match_hdmi_mode(const struct drm_display_mode *to_match)
if ((KHZ2PICOS(to_match->clock) == KHZ2PICOS(clock1) || if ((KHZ2PICOS(to_match->clock) == KHZ2PICOS(clock1) ||
KHZ2PICOS(to_match->clock) == KHZ2PICOS(clock2)) && KHZ2PICOS(to_match->clock) == KHZ2PICOS(clock2)) &&
drm_mode_equal_no_clocks_no_stereo(to_match, hdmi_mode)) drm_mode_equal_no_clocks_no_stereo(to_match, hdmi_mode))
return mode + 1; return vic;
} }
return 0; return 0;
} }
static bool drm_valid_hdmi_vic(u8 vic)
{
return vic > 0 && vic < ARRAY_SIZE(edid_4k_modes);
}
static int static int
add_alternate_cea_modes(struct drm_connector *connector, struct edid *edid) add_alternate_cea_modes(struct drm_connector *connector, struct edid *edid)
{ {
@ -2672,16 +2726,16 @@ add_alternate_cea_modes(struct drm_connector *connector, struct edid *edid)
list_for_each_entry(mode, &connector->probed_modes, head) { list_for_each_entry(mode, &connector->probed_modes, head) {
const struct drm_display_mode *cea_mode = NULL; const struct drm_display_mode *cea_mode = NULL;
struct drm_display_mode *newmode; struct drm_display_mode *newmode;
u8 mode_idx = drm_match_cea_mode(mode) - 1; u8 vic = drm_match_cea_mode(mode);
unsigned int clock1, clock2; unsigned int clock1, clock2;
if (mode_idx < ARRAY_SIZE(edid_cea_modes)) { if (drm_valid_cea_vic(vic)) {
cea_mode = &edid_cea_modes[mode_idx]; cea_mode = &edid_cea_modes[vic];
clock2 = cea_mode_alternate_clock(cea_mode); clock2 = cea_mode_alternate_clock(cea_mode);
} else { } else {
mode_idx = drm_match_hdmi_mode(mode) - 1; vic = drm_match_hdmi_mode(mode);
if (mode_idx < ARRAY_SIZE(edid_4k_modes)) { if (drm_valid_hdmi_vic(vic)) {
cea_mode = &edid_4k_modes[mode_idx]; cea_mode = &edid_4k_modes[vic];
clock2 = hdmi_mode_alternate_clock(cea_mode); clock2 = hdmi_mode_alternate_clock(cea_mode);
} }
} }
@ -2732,17 +2786,17 @@ drm_display_mode_from_vic_index(struct drm_connector *connector,
{ {
struct drm_device *dev = connector->dev; struct drm_device *dev = connector->dev;
struct drm_display_mode *newmode; struct drm_display_mode *newmode;
u8 cea_mode; u8 vic;
if (video_db == NULL || video_index >= video_len) if (video_db == NULL || video_index >= video_len)
return NULL; return NULL;
/* CEA modes are numbered 1..127 */ /* CEA modes are numbered 1..127 */
cea_mode = (video_db[video_index] & 127) - 1; vic = (video_db[video_index] & 127);
if (cea_mode >= ARRAY_SIZE(edid_cea_modes)) if (!drm_valid_cea_vic(vic))
return NULL; return NULL;
newmode = drm_mode_duplicate(dev, &edid_cea_modes[cea_mode]); newmode = drm_mode_duplicate(dev, &edid_cea_modes[vic]);
if (!newmode) if (!newmode)
return NULL; return NULL;
@ -2837,8 +2891,7 @@ static int add_hdmi_mode(struct drm_connector *connector, u8 vic)
struct drm_device *dev = connector->dev; struct drm_device *dev = connector->dev;
struct drm_display_mode *newmode; struct drm_display_mode *newmode;
vic--; /* VICs start at 1 */ if (!drm_valid_hdmi_vic(vic)) {
if (vic >= ARRAY_SIZE(edid_4k_modes)) {
DRM_ERROR("Unknown HDMI VIC: %d\n", vic); DRM_ERROR("Unknown HDMI VIC: %d\n", vic);
return 0; return 0;
} }
@ -3129,20 +3182,24 @@ static void fixup_detailed_cea_mode_clock(struct drm_display_mode *mode)
{ {
const struct drm_display_mode *cea_mode; const struct drm_display_mode *cea_mode;
int clock1, clock2, clock; int clock1, clock2, clock;
u8 mode_idx; u8 vic;
const char *type; const char *type;
mode_idx = drm_match_cea_mode(mode) - 1; /*
if (mode_idx < ARRAY_SIZE(edid_cea_modes)) { * allow 5kHz clock difference either way to account for
* the 10kHz clock resolution limit of detailed timings.
*/
vic = drm_match_cea_mode_clock_tolerance(mode, 5);
if (drm_valid_cea_vic(vic)) {
type = "CEA"; type = "CEA";
cea_mode = &edid_cea_modes[mode_idx]; cea_mode = &edid_cea_modes[vic];
clock1 = cea_mode->clock; clock1 = cea_mode->clock;
clock2 = cea_mode_alternate_clock(cea_mode); clock2 = cea_mode_alternate_clock(cea_mode);
} else { } else {
mode_idx = drm_match_hdmi_mode(mode) - 1; vic = drm_match_hdmi_mode_clock_tolerance(mode, 5);
if (mode_idx < ARRAY_SIZE(edid_4k_modes)) { if (drm_valid_hdmi_vic(vic)) {
type = "HDMI"; type = "HDMI";
cea_mode = &edid_4k_modes[mode_idx]; cea_mode = &edid_4k_modes[vic];
clock1 = cea_mode->clock; clock1 = cea_mode->clock;
clock2 = hdmi_mode_alternate_clock(cea_mode); clock2 = hdmi_mode_alternate_clock(cea_mode);
} else { } else {
@ -3160,7 +3217,7 @@ static void fixup_detailed_cea_mode_clock(struct drm_display_mode *mode)
return; return;
DRM_DEBUG("detailed mode matches %s VIC %d, adjusting clock %d -> %d\n", DRM_DEBUG("detailed mode matches %s VIC %d, adjusting clock %d -> %d\n",
type, mode_idx + 1, mode->clock, clock); type, vic, mode->clock, clock);
mode->clock = clock; mode->clock = clock;
} }
@ -3833,15 +3890,9 @@ int drm_add_edid_modes(struct drm_connector *connector, struct edid *edid)
drm_add_display_info(edid, &connector->display_info, connector); drm_add_display_info(edid, &connector->display_info, connector);
if (quirks & EDID_QUIRK_FORCE_6BPC)
connector->display_info.bpc = 6;
if (quirks & EDID_QUIRK_FORCE_8BPC) if (quirks & EDID_QUIRK_FORCE_8BPC)
connector->display_info.bpc = 8; connector->display_info.bpc = 8;
if (quirks & EDID_QUIRK_FORCE_10BPC)
connector->display_info.bpc = 10;
if (quirks & EDID_QUIRK_FORCE_12BPC) if (quirks & EDID_QUIRK_FORCE_12BPC)
connector->display_info.bpc = 12; connector->display_info.bpc = 12;

View File

@ -944,7 +944,7 @@ retry:
goto fail; goto fail;
plane = mode_set->crtc->primary; plane = mode_set->crtc->primary;
plane_mask |= drm_plane_index(plane); plane_mask |= (1 << drm_plane_index(plane));
plane->old_fb = plane->fb; plane->old_fb = plane->fb;
} }

View File

@ -170,6 +170,15 @@ void drm_gem_private_object_init(struct drm_device *dev,
} }
EXPORT_SYMBOL(drm_gem_private_object_init); EXPORT_SYMBOL(drm_gem_private_object_init);
static void
drm_gem_remove_prime_handles(struct drm_gem_object *obj, struct drm_file *filp)
{
/*
* Note: obj->dma_buf can't disappear as long as we still hold a
* handle reference in obj->handle_count.
*/
}
/** /**
* drm_gem_object_handle_free - release resources bound to userspace handles * drm_gem_object_handle_free - release resources bound to userspace handles
* @obj: GEM object to clean up. * @obj: GEM object to clean up.
@ -195,6 +204,9 @@ static void drm_gem_object_handle_free(struct drm_gem_object *obj)
static void static void
drm_gem_object_handle_unreference_unlocked(struct drm_gem_object *obj) drm_gem_object_handle_unreference_unlocked(struct drm_gem_object *obj)
{ {
struct drm_device *dev = obj->dev;
bool final = false;
if (WARN_ON(obj->handle_count == 0)) if (WARN_ON(obj->handle_count == 0))
return; return;
@ -204,22 +216,48 @@ drm_gem_object_handle_unreference_unlocked(struct drm_gem_object *obj)
* checked for a name * checked for a name
*/ */
mutex_lock(&obj->dev->object_name_lock); mutex_lock(&dev->object_name_lock);
if (--obj->handle_count == 0) { if (--obj->handle_count == 0) {
drm_gem_object_handle_free(obj); drm_gem_object_handle_free(obj);
final = true;
} }
mutex_unlock(&obj->dev->object_name_lock); mutex_unlock(&dev->object_name_lock);
if (final)
drm_gem_object_unreference_unlocked(obj); drm_gem_object_unreference_unlocked(obj);
} }
/*
* Called at device or object close to release the file's
* handle references on objects.
*/
static int
drm_gem_object_release_handle(int id, void *ptr, void *data)
{
struct drm_file *file_priv = data;
struct drm_gem_object *obj = ptr;
struct drm_device *dev = obj->dev;
if (drm_core_check_feature(dev, DRIVER_PRIME))
drm_gem_remove_prime_handles(obj, file_priv);
// drm_vma_node_revoke(&obj->vma_node, file_priv->filp);
if (dev->driver->gem_close_object)
dev->driver->gem_close_object(obj, file_priv);
drm_gem_object_handle_unreference_unlocked(obj);
return 0;
}
/** /**
* drm_gem_handle_delete - deletes the given file-private handle * drm_gem_handle_delete - deletes the given file-private handle
* @filp: drm file-private structure to use for the handle look up * @filp: drm file-private structure to use for the handle look up
* @handle: userspace handle to delete * @handle: userspace handle to delete
* *
* Removes the GEM handle from the @filp lookup table and if this is the last * Removes the GEM handle from the @filp lookup table which has been added with
* handle also cleans up linked resources like GEM names. * drm_gem_handle_create(). If this is the last handle also cleans up linked
* resources like GEM names.
*/ */
int int
drm_gem_handle_delete(struct drm_file *filp, u32 handle) drm_gem_handle_delete(struct drm_file *filp, u32 handle)
@ -250,12 +288,7 @@ drm_gem_handle_delete(struct drm_file *filp, u32 handle)
idr_remove(&filp->object_idr, handle); idr_remove(&filp->object_idr, handle);
spin_unlock(&filp->table_lock); spin_unlock(&filp->table_lock);
// drm_vma_node_revoke(&obj->vma_node, filp->filp); drm_gem_object_release_handle(handle, obj, filp);
if (dev->driver->gem_close_object)
dev->driver->gem_close_object(obj, filp);
drm_gem_object_handle_unreference_unlocked(obj);
return 0; return 0;
} }
EXPORT_SYMBOL(drm_gem_handle_delete); EXPORT_SYMBOL(drm_gem_handle_delete);
@ -286,6 +319,10 @@ EXPORT_SYMBOL(drm_gem_dumb_destroy);
* This expects the dev->object_name_lock to be held already and will drop it * This expects the dev->object_name_lock to be held already and will drop it
* before returning. Used to avoid races in establishing new handles when * before returning. Used to avoid races in establishing new handles when
* importing an object from either an flink name or a dma-buf. * importing an object from either an flink name or a dma-buf.
*
* Handles must be release again through drm_gem_handle_delete(). This is done
* when userspace closes @file_priv for all attached handles, or through the
* GEM_CLOSE ioctl for individual handles.
*/ */
int int
drm_gem_handle_create_tail(struct drm_file *file_priv, drm_gem_handle_create_tail(struct drm_file *file_priv,
@ -293,9 +330,12 @@ drm_gem_handle_create_tail(struct drm_file *file_priv,
u32 *handlep) u32 *handlep)
{ {
struct drm_device *dev = obj->dev; struct drm_device *dev = obj->dev;
u32 handle;
int ret; int ret;
WARN_ON(!mutex_is_locked(&dev->object_name_lock)); WARN_ON(!mutex_is_locked(&dev->object_name_lock));
if (obj->handle_count++ == 0)
drm_gem_object_reference(obj);
/* /*
* Get the user-visible handle using idr. Preload and perform * Get the user-visible handle using idr. Preload and perform
@ -305,15 +345,15 @@ drm_gem_handle_create_tail(struct drm_file *file_priv,
spin_lock(&file_priv->table_lock); spin_lock(&file_priv->table_lock);
ret = idr_alloc(&file_priv->object_idr, obj, 1, 0, GFP_NOWAIT); ret = idr_alloc(&file_priv->object_idr, obj, 1, 0, GFP_NOWAIT);
drm_gem_object_reference(obj);
obj->handle_count++;
spin_unlock(&file_priv->table_lock); spin_unlock(&file_priv->table_lock);
idr_preload_end(); idr_preload_end();
mutex_unlock(&dev->object_name_lock); mutex_unlock(&dev->object_name_lock);
if (ret < 0) if (ret < 0)
goto err_unref; goto err_unref;
*handlep = ret; handle = ret;
// ret = drm_vma_node_allow(&obj->vma_node, file_priv->filp); // ret = drm_vma_node_allow(&obj->vma_node, file_priv->filp);
// if (ret) { // if (ret) {
@ -327,13 +367,14 @@ drm_gem_handle_create_tail(struct drm_file *file_priv,
goto err_revoke; goto err_revoke;
} }
*handlep = handle;
return 0; return 0;
err_revoke: err_revoke:
// drm_vma_node_revoke(&obj->vma_node, file_priv->filp); // drm_vma_node_revoke(&obj->vma_node, file_priv->filp);
err_remove: err_remove:
spin_lock(&file_priv->table_lock); spin_lock(&file_priv->table_lock);
idr_remove(&file_priv->object_idr, *handlep); idr_remove(&file_priv->object_idr, handle);
spin_unlock(&file_priv->table_lock); spin_unlock(&file_priv->table_lock);
err_unref: err_unref:
drm_gem_object_handle_unreference_unlocked(obj); drm_gem_object_handle_unreference_unlocked(obj);
@ -521,7 +562,17 @@ void drm_gem_put_pages(struct drm_gem_object *obj, struct page **pages,
EXPORT_SYMBOL(drm_gem_put_pages); EXPORT_SYMBOL(drm_gem_put_pages);
#endif #endif
/** Returns a reference to the object named by the handle. */ /**
* drm_gem_object_lookup - look up a GEM object from it's handle
* @dev: DRM device
* @filp: DRM file private date
* @handle: userspace handle
*
* Returns:
*
* A reference to the object named by the handle if such exists on @filp, NULL
* otherwise.
*/
struct drm_gem_object * struct drm_gem_object *
drm_gem_object_lookup(struct drm_device *dev, struct drm_file *filp, drm_gem_object_lookup(struct drm_device *dev, struct drm_file *filp,
u32 handle) u32 handle)
@ -595,7 +646,6 @@ drm_gem_flink_ioctl(struct drm_device *dev, void *data,
return -ENOENT; return -ENOENT;
mutex_lock(&dev->object_name_lock); mutex_lock(&dev->object_name_lock);
idr_preload(GFP_KERNEL);
/* prevent races with concurrent gem_close. */ /* prevent races with concurrent gem_close. */
if (obj->handle_count == 0) { if (obj->handle_count == 0) {
ret = -ENOENT; ret = -ENOENT;
@ -603,7 +653,7 @@ drm_gem_flink_ioctl(struct drm_device *dev, void *data,
} }
if (!obj->name) { if (!obj->name) {
ret = idr_alloc(&dev->object_name_idr, obj, 1, 0, GFP_NOWAIT); ret = idr_alloc(&dev->object_name_idr, obj, 1, 0, GFP_KERNEL);
if (ret < 0) if (ret < 0)
goto err; goto err;
@ -614,9 +664,12 @@ drm_gem_flink_ioctl(struct drm_device *dev, void *data,
ret = 0; ret = 0;
err: err:
idr_preload_end();
mutex_unlock(&dev->object_name_lock); mutex_unlock(&dev->object_name_lock);
drm_gem_object_unreference_unlocked(obj); drm_gem_object_unreference_unlocked(obj);
// printf("%s object %p name %d refcount %d\n",
// __FUNCTION__, obj, obj->name, obj->refcount.refcount);
return ret; return ret;
} }
@ -661,6 +714,9 @@ drm_gem_open_ioctl(struct drm_device *dev, void *data,
args->handle = handle; args->handle = handle;
args->size = obj->size; args->size = obj->size;
// printf("%s object %p handle %d refcount %d\n",
// __FUNCTION__, obj, handle, obj->refcount.refcount);
return 0; return 0;
} }
@ -680,27 +736,6 @@ drm_gem_open(struct drm_device *dev, struct drm_file *file_private)
spin_lock_init(&file_private->table_lock); spin_lock_init(&file_private->table_lock);
} }
/*
* Called at device close to release the file's
* handle references on objects.
*/
static int
drm_gem_object_release_handle(int id, void *ptr, void *data)
{
struct drm_file *file_priv = data;
struct drm_gem_object *obj = ptr;
struct drm_device *dev = obj->dev;
drm_vma_node_revoke(&obj->vma_node, file_priv->filp);
if (dev->driver->gem_close_object)
dev->driver->gem_close_object(obj, file_priv);
drm_gem_object_handle_unreference_unlocked(obj);
return 0;
}
/** /**
* drm_gem_release - release file-private GEM resources * drm_gem_release - release file-private GEM resources
* @dev: drm_device which is being closed by userspace * @dev: drm_device which is being closed by userspace

View File

@ -25,7 +25,6 @@
* USE OR OTHER DEALINGS IN THE SOFTWARE. * USE OR OTHER DEALINGS IN THE SOFTWARE.
*/ */
#include <drm/drmP.h>
#include <drm/drm_mipi_dsi.h> #include <drm/drm_mipi_dsi.h>
#include <linux/device.h> #include <linux/device.h>

View File

@ -708,7 +708,8 @@ void drm_mode_set_name(struct drm_display_mode *mode)
} }
EXPORT_SYMBOL(drm_mode_set_name); EXPORT_SYMBOL(drm_mode_set_name);
/** drm_mode_hsync - get the hsync of a mode /**
* drm_mode_hsync - get the hsync of a mode
* @mode: mode * @mode: mode
* *
* Returns: * Returns:
@ -917,13 +918,30 @@ bool drm_mode_equal(const struct drm_display_mode *mode1, const struct drm_displ
} else if (mode1->clock != mode2->clock) } else if (mode1->clock != mode2->clock)
return false; return false;
return drm_mode_equal_no_clocks(mode1, mode2);
}
EXPORT_SYMBOL(drm_mode_equal);
/**
* drm_mode_equal_no_clocks - test modes for equality
* @mode1: first mode
* @mode2: second mode
*
* Check to see if @mode1 and @mode2 are equivalent, but
* don't check the pixel clocks.
*
* Returns:
* True if the modes are equal, false otherwise.
*/
bool drm_mode_equal_no_clocks(const struct drm_display_mode *mode1, const struct drm_display_mode *mode2)
{
if ((mode1->flags & DRM_MODE_FLAG_3D_MASK) != if ((mode1->flags & DRM_MODE_FLAG_3D_MASK) !=
(mode2->flags & DRM_MODE_FLAG_3D_MASK)) (mode2->flags & DRM_MODE_FLAG_3D_MASK))
return false; return false;
return drm_mode_equal_no_clocks_no_stereo(mode1, mode2); return drm_mode_equal_no_clocks_no_stereo(mode1, mode2);
} }
EXPORT_SYMBOL(drm_mode_equal); EXPORT_SYMBOL(drm_mode_equal_no_clocks);
/** /**
* drm_mode_equal_no_clocks_no_stereo - test modes for equality * drm_mode_equal_no_clocks_no_stereo - test modes for equality
@ -1056,7 +1074,7 @@ static const char * const drm_mode_status_names[] = {
MODE_STATUS(ONE_SIZE), MODE_STATUS(ONE_SIZE),
MODE_STATUS(NO_REDUCED), MODE_STATUS(NO_REDUCED),
MODE_STATUS(NO_STEREO), MODE_STATUS(NO_STEREO),
MODE_STATUS(UNVERIFIED), MODE_STATUS(STALE),
MODE_STATUS(BAD), MODE_STATUS(BAD),
MODE_STATUS(ERROR), MODE_STATUS(ERROR),
}; };
@ -1154,7 +1172,6 @@ EXPORT_SYMBOL(drm_mode_sort);
/** /**
* drm_mode_connector_list_update - update the mode list for the connector * drm_mode_connector_list_update - update the mode list for the connector
* @connector: the connector to update * @connector: the connector to update
* @merge_type_bits: whether to merge or overwrite type bits
* *
* This moves the modes from the @connector probed_modes list * This moves the modes from the @connector probed_modes list
* to the actual mode list. It compares the probed mode against the current * to the actual mode list. It compares the probed mode against the current
@ -1163,34 +1180,49 @@ EXPORT_SYMBOL(drm_mode_sort);
* This is just a helper functions doesn't validate any modes itself and also * This is just a helper functions doesn't validate any modes itself and also
* doesn't prune any invalid modes. Callers need to do that themselves. * doesn't prune any invalid modes. Callers need to do that themselves.
*/ */
void drm_mode_connector_list_update(struct drm_connector *connector, void drm_mode_connector_list_update(struct drm_connector *connector)
bool merge_type_bits)
{ {
struct drm_display_mode *mode;
struct drm_display_mode *pmode, *pt; struct drm_display_mode *pmode, *pt;
int found_it;
WARN_ON(!mutex_is_locked(&connector->dev->mode_config.mutex)); WARN_ON(!mutex_is_locked(&connector->dev->mode_config.mutex));
list_for_each_entry_safe(pmode, pt, &connector->probed_modes, list_for_each_entry_safe(pmode, pt, &connector->probed_modes, head) {
head) { struct drm_display_mode *mode;
found_it = 0; bool found_it = false;
/* go through current modes checking for the new probed mode */ /* go through current modes checking for the new probed mode */
list_for_each_entry(mode, &connector->modes, head) { list_for_each_entry(mode, &connector->modes, head) {
if (drm_mode_equal(pmode, mode)) { if (!drm_mode_equal(pmode, mode))
found_it = 1; continue;
/* if equal delete the probed mode */
mode->status = pmode->status; found_it = true;
/* Merge type bits together */
if (merge_type_bits) /*
* If the old matching mode is stale (ie. left over
* from a previous probe) just replace it outright.
* Otherwise just merge the type bits between all
* equal probed modes.
*
* If two probed modes are considered equal, pick the
* actual timings from the one that's marked as
* preferred (in case the match isn't 100%). If
* multiple or zero preferred modes are present, favor
* the mode added to the probed_modes list first.
*/
if (mode->status == MODE_STALE) {
drm_mode_copy(mode, pmode);
} else if ((mode->type & DRM_MODE_TYPE_PREFERRED) == 0 &&
(pmode->type & DRM_MODE_TYPE_PREFERRED) != 0) {
pmode->type |= mode->type;
drm_mode_copy(mode, pmode);
} else {
mode->type |= pmode->type; mode->type |= pmode->type;
else }
mode->type = pmode->type;
list_del(&pmode->head); list_del(&pmode->head);
drm_mode_destroy(connector->dev, pmode); drm_mode_destroy(connector->dev, pmode);
break; break;
} }
}
if (!found_it) { if (!found_it) {
list_move_tail(&pmode->head, &connector->modes); list_move_tail(&pmode->head, &connector->modes);
@ -1401,13 +1433,6 @@ drm_mode_create_from_cmdline_mode(struct drm_device *dev,
return NULL; return NULL;
mode->type |= DRM_MODE_TYPE_USERDEF; mode->type |= DRM_MODE_TYPE_USERDEF;
/* fix up 1368x768: GFT/CVT can't express 1366 width due to alignment */
if (cmd->xres == 1366 && mode->hdisplay == 1368) {
mode->hdisplay = 1366;
mode->hsync_start--;
mode->hsync_end--;
drm_mode_set_name(mode);
}
drm_mode_set_crtcinfo(mode, CRTC_INTERLACE_HALVE_V); drm_mode_set_crtcinfo(mode, CRTC_INTERLACE_HALVE_V);
return mode; return mode;
} }
@ -1494,10 +1519,8 @@ int drm_mode_convert_umode(struct drm_display_mode *out,
if (out->status != MODE_OK) if (out->status != MODE_OK)
goto out; goto out;
drm_mode_set_crtcinfo(out, CRTC_INTERLACE_HALVE_V);
ret = 0; ret = 0;
out: out:
return ret; return ret;
} }

View File

@ -48,20 +48,25 @@
* goto retry; * goto retry;
* } * }
* } * }
*
* ... do stuff ... * ... do stuff ...
*
* drm_modeset_drop_locks(&ctx); * drm_modeset_drop_locks(&ctx);
* drm_modeset_acquire_fini(&ctx); * drm_modeset_acquire_fini(&ctx);
*/ */
/** /**
* drm_modeset_lock_all - take all modeset locks * drm_modeset_lock_all - take all modeset locks
* @dev: drm device * @dev: DRM device
* *
* This function takes all modeset locks, suitable where a more fine-grained * This function takes all modeset locks, suitable where a more fine-grained
* scheme isn't (yet) implemented. Locks must be dropped with * scheme isn't (yet) implemented. Locks must be dropped by calling the
* drm_modeset_unlock_all. * drm_modeset_unlock_all() function.
*
* This function is deprecated. It allocates a lock acquisition context and
* stores it in the DRM device's ->mode_config. This facilitate conversion of
* existing code because it removes the need to manually deal with the
* acquisition context, but it is also brittle because the context is global
* and care must be taken not to nest calls. New code should use the
* drm_modeset_lock_all_ctx() function and pass in the context explicitly.
*/ */
void drm_modeset_lock_all(struct drm_device *dev) void drm_modeset_lock_all(struct drm_device *dev)
{ {
@ -78,39 +83,43 @@ void drm_modeset_lock_all(struct drm_device *dev)
drm_modeset_acquire_init(ctx, 0); drm_modeset_acquire_init(ctx, 0);
retry: retry:
ret = drm_modeset_lock(&config->connection_mutex, ctx); ret = drm_modeset_lock_all_ctx(dev, ctx);
if (ret) if (ret < 0) {
goto fail; if (ret == -EDEADLK) {
ret = drm_modeset_lock_all_crtcs(dev, ctx); drm_modeset_backoff(ctx);
if (ret) goto retry;
goto fail; }
drm_modeset_acquire_fini(ctx);
kfree(ctx);
return;
}
WARN_ON(config->acquire_ctx); WARN_ON(config->acquire_ctx);
/* now we hold the locks, so now that it is safe, stash the /*
* ctx for drm_modeset_unlock_all(): * We hold the locks now, so it is safe to stash the acquisition
* context for drm_modeset_unlock_all().
*/ */
config->acquire_ctx = ctx; config->acquire_ctx = ctx;
drm_warn_on_modeset_not_all_locked(dev); drm_warn_on_modeset_not_all_locked(dev);
return;
fail:
if (ret == -EDEADLK) {
drm_modeset_backoff(ctx);
goto retry;
}
kfree(ctx);
} }
EXPORT_SYMBOL(drm_modeset_lock_all); EXPORT_SYMBOL(drm_modeset_lock_all);
/** /**
* drm_modeset_unlock_all - drop all modeset locks * drm_modeset_unlock_all - drop all modeset locks
* @dev: device * @dev: DRM device
* *
* This function drop all modeset locks taken by drm_modeset_lock_all. * This function drops all modeset locks taken by a previous call to the
* drm_modeset_lock_all() function.
*
* This function is deprecated. It uses the lock acquisition context stored
* in the DRM device's ->mode_config. This facilitates conversion of existing
* code because it removes the need to manually deal with the acquisition
* context, but it is also brittle because the context is global and care must
* be taken not to nest calls. New code should pass the acquisition context
* directly to the drm_modeset_drop_locks() function.
*/ */
void drm_modeset_unlock_all(struct drm_device *dev) void drm_modeset_unlock_all(struct drm_device *dev)
{ {
@ -431,14 +440,34 @@ void drm_modeset_unlock(struct drm_modeset_lock *lock)
} }
EXPORT_SYMBOL(drm_modeset_unlock); EXPORT_SYMBOL(drm_modeset_unlock);
/* In some legacy codepaths it's convenient to just grab all the crtc and plane /**
* related locks. */ * drm_modeset_lock_all_ctx - take all modeset locks
int drm_modeset_lock_all_crtcs(struct drm_device *dev, * @dev: DRM device
* @ctx: lock acquisition context
*
* This function takes all modeset locks, suitable where a more fine-grained
* scheme isn't (yet) implemented.
*
* Unlike drm_modeset_lock_all(), it doesn't take the dev->mode_config.mutex
* since that lock isn't required for modeset state changes. Callers which
* need to grab that lock too need to do so outside of the acquire context
* @ctx.
*
* Locks acquired with this function should be released by calling the
* drm_modeset_drop_locks() function on @ctx.
*
* Returns: 0 on success or a negative error-code on failure.
*/
int drm_modeset_lock_all_ctx(struct drm_device *dev,
struct drm_modeset_acquire_ctx *ctx) struct drm_modeset_acquire_ctx *ctx)
{ {
struct drm_crtc *crtc; struct drm_crtc *crtc;
struct drm_plane *plane; struct drm_plane *plane;
int ret = 0; int ret;
ret = drm_modeset_lock(&dev->mode_config.connection_mutex, ctx);
if (ret)
return ret;
drm_for_each_crtc(crtc, dev) { drm_for_each_crtc(crtc, dev) {
ret = drm_modeset_lock(&crtc->mutex, ctx); ret = drm_modeset_lock(&crtc->mutex, ctx);
@ -454,4 +483,4 @@ int drm_modeset_lock_all_crtcs(struct drm_device *dev,
return 0; return 0;
} }
EXPORT_SYMBOL(drm_modeset_lock_all_crtcs); EXPORT_SYMBOL(drm_modeset_lock_all_ctx);

View File

@ -57,6 +57,10 @@
* by the atomic helpers. * by the atomic helpers.
* *
* Again drivers are strongly urged to switch to the new interfaces. * Again drivers are strongly urged to switch to the new interfaces.
*
* The plane helpers share the function table structures with other helpers,
* specifically also the atomic helpers. See struct &drm_plane_helper_funcs for
* the details.
*/ */
/* /*
@ -164,6 +168,8 @@ int drm_plane_helper_check_update(struct drm_plane *plane,
vscale = drm_rect_calc_vscale(src, dest, min_scale, max_scale); vscale = drm_rect_calc_vscale(src, dest, min_scale, max_scale);
if (hscale < 0 || vscale < 0) { if (hscale < 0 || vscale < 0) {
DRM_DEBUG_KMS("Invalid scaling of plane\n"); DRM_DEBUG_KMS("Invalid scaling of plane\n");
drm_rect_debug_print("src: ", src, true);
drm_rect_debug_print("dst: ", dest, false);
return -ERANGE; return -ERANGE;
} }
@ -180,6 +186,8 @@ int drm_plane_helper_check_update(struct drm_plane *plane,
if (!can_position && !drm_rect_equals(dest, clip)) { if (!can_position && !drm_rect_equals(dest, clip)) {
DRM_DEBUG_KMS("Plane must cover entire CRTC\n"); DRM_DEBUG_KMS("Plane must cover entire CRTC\n");
drm_rect_debug_print("dst: ", dest, false);
drm_rect_debug_print("clip: ", clip, false);
return -EINVAL; return -EINVAL;
} }
@ -367,7 +375,7 @@ static struct drm_plane *create_primary_plane(struct drm_device *dev)
&drm_primary_helper_funcs, &drm_primary_helper_funcs,
safe_modeset_formats, safe_modeset_formats,
ARRAY_SIZE(safe_modeset_formats), ARRAY_SIZE(safe_modeset_formats),
DRM_PLANE_TYPE_PRIMARY); DRM_PLANE_TYPE_PRIMARY, NULL);
if (ret) { if (ret) {
kfree(primary); kfree(primary);
primary = NULL; primary = NULL;
@ -394,7 +402,8 @@ int drm_crtc_init(struct drm_device *dev, struct drm_crtc *crtc,
struct drm_plane *primary; struct drm_plane *primary;
primary = create_primary_plane(dev); primary = create_primary_plane(dev);
return drm_crtc_init_with_planes(dev, crtc, primary, NULL, funcs); return drm_crtc_init_with_planes(dev, crtc, primary, NULL, funcs,
NULL);
} }
EXPORT_SYMBOL(drm_crtc_init); EXPORT_SYMBOL(drm_crtc_init);

View File

@ -53,6 +53,9 @@
* This helper library can be used independently of the modeset helper library. * This helper library can be used independently of the modeset helper library.
* Drivers can also overwrite different parts e.g. use their own hotplug * Drivers can also overwrite different parts e.g. use their own hotplug
* handling code to avoid probing unrelated outputs. * handling code to avoid probing unrelated outputs.
*
* The probe helpers share the function table structures with other display
* helper libraries. See struct &drm_connector_helper_funcs for the details.
*/ */
static bool drm_kms_helper_poll = true; static bool drm_kms_helper_poll = true;
@ -126,9 +129,64 @@ void drm_kms_helper_poll_enable_locked(struct drm_device *dev)
} }
EXPORT_SYMBOL(drm_kms_helper_poll_enable_locked); EXPORT_SYMBOL(drm_kms_helper_poll_enable_locked);
/**
static int drm_helper_probe_single_connector_modes_merge_bits(struct drm_connector *connector, * drm_helper_probe_single_connector_modes - get complete set of display modes
uint32_t maxX, uint32_t maxY, bool merge_type_bits) * @connector: connector to probe
* @maxX: max width for modes
* @maxY: max height for modes
*
* Based on the helper callbacks implemented by @connector in struct
* &drm_connector_helper_funcs try to detect all valid modes. Modes will first
* be added to the connector's probed_modes list, then culled (based on validity
* and the @maxX, @maxY parameters) and put into the normal modes list.
*
* Intended to be used as a generic implementation of the ->fill_modes()
* @connector vfunc for drivers that use the CRTC helpers for output mode
* filtering and detection.
*
* The basic procedure is as follows
*
* 1. All modes currently on the connector's modes list are marked as stale
*
* 2. New modes are added to the connector's probed_modes list with
* drm_mode_probed_add(). New modes start their life with status as OK.
* Modes are added from a single source using the following priority order.
*
* - debugfs 'override_edid' (used for testing only)
* - firmware EDID (drm_load_edid_firmware())
* - connector helper ->get_modes() vfunc
* - if the connector status is connector_status_connected, standard
* VESA DMT modes up to 1024x768 are automatically added
* (drm_add_modes_noedid())
*
* Finally modes specified via the kernel command line (video=...) are
* added in addition to what the earlier probes produced
* (drm_helper_probe_add_cmdline_mode()). These modes are generated
* using the VESA GTF/CVT formulas.
*
* 3. Modes are moved from the probed_modes list to the modes list. Potential
* duplicates are merged together (see drm_mode_connector_list_update()).
* After this step the probed_modes list will be empty again.
*
* 4. Any non-stale mode on the modes list then undergoes validation
*
* - drm_mode_validate_basic() performs basic sanity checks
* - drm_mode_validate_size() filters out modes larger than @maxX and @maxY
* (if specified)
* - drm_mode_validate_flag() checks the modes againt basic connector
* capabilites (interlace_allowed,doublescan_allowed,stereo_allowed)
* - the optional connector ->mode_valid() helper can perform driver and/or
* hardware specific checks
*
* 5. Any mode whose status is not OK is pruned from the connector's modes list,
* accompanied by a debug message indicating the reason for the mode's
* rejection (see drm_mode_prune_invalid()).
*
* Returns:
* The number of modes found on @connector.
*/
int drm_helper_probe_single_connector_modes(struct drm_connector *connector,
uint32_t maxX, uint32_t maxY)
{ {
struct drm_device *dev = connector->dev; struct drm_device *dev = connector->dev;
struct drm_display_mode *mode; struct drm_display_mode *mode;
@ -143,9 +201,11 @@ static int drm_helper_probe_single_connector_modes_merge_bits(struct drm_connect
DRM_DEBUG_KMS("[CONNECTOR:%d:%s]\n", connector->base.id, DRM_DEBUG_KMS("[CONNECTOR:%d:%s]\n", connector->base.id,
connector->name); connector->name);
/* set all modes to the unverified state */ /* set all old modes to the stale state */
list_for_each_entry(mode, &connector->modes, head) list_for_each_entry(mode, &connector->modes, head)
mode->status = MODE_UNVERIFIED; mode->status = MODE_STALE;
old_status = connector->status;
if (connector->force) { if (connector->force) {
if (connector->force == DRM_FORCE_ON || if (connector->force == DRM_FORCE_ON ||
@ -156,9 +216,8 @@ static int drm_helper_probe_single_connector_modes_merge_bits(struct drm_connect
if (connector->funcs->force) if (connector->funcs->force)
connector->funcs->force(connector); connector->funcs->force(connector);
} else { } else {
old_status = connector->status;
connector->status = connector->funcs->detect(connector, true); connector->status = connector->funcs->detect(connector, true);
}
/* /*
* Normally either the driver's hpd code or the poll loop should * Normally either the driver's hpd code or the poll loop should
@ -167,10 +226,11 @@ static int drm_helper_probe_single_connector_modes_merge_bits(struct drm_connect
* check here, and if anything changed start the hotplug code. * check here, and if anything changed start the hotplug code.
*/ */
if (old_status != connector->status) { if (old_status != connector->status) {
DRM_DEBUG_KMS("[CONNECTOR:%d:%s] status updated from %d to %d\n", DRM_DEBUG_KMS("[CONNECTOR:%d:%s] status updated from %s to %s\n",
connector->base.id, connector->base.id,
connector->name, connector->name,
old_status, connector->status); drm_get_connector_status_name(old_status),
drm_get_connector_status_name(connector->status));
/* /*
* The hotplug event code might call into the fb * The hotplug event code might call into the fb
@ -183,7 +243,6 @@ static int drm_helper_probe_single_connector_modes_merge_bits(struct drm_connect
schedule_delayed_work(&dev->mode_config.output_poll_work, schedule_delayed_work(&dev->mode_config.output_poll_work,
0); 0);
} }
}
/* Re-enable polling in case the global poll config changed. */ /* Re-enable polling in case the global poll config changed. */
if (drm_kms_helper_poll != dev->mode_config.poll_running) if (drm_kms_helper_poll != dev->mode_config.poll_running)
@ -199,17 +258,16 @@ static int drm_helper_probe_single_connector_modes_merge_bits(struct drm_connect
goto prune; goto prune;
} }
#ifdef CONFIG_DRM_LOAD_EDID_FIRMWARE
count = drm_load_edid_firmware(connector);
if (count == 0)
#endif
{
if (connector->override_edid) { if (connector->override_edid) {
struct edid *edid = (struct edid *) connector->edid_blob_ptr->data; struct edid *edid = (struct edid *) connector->edid_blob_ptr->data;
count = drm_add_edid_modes(connector, edid); count = drm_add_edid_modes(connector, edid);
drm_edid_to_eld(connector, edid); drm_edid_to_eld(connector, edid);
} else } else {
#ifdef CONFIG_DRM_LOAD_EDID_FIRMWARE
count = drm_load_edid_firmware(connector);
if (count == 0)
#endif
count = (*connector_funcs->get_modes)(connector); count = (*connector_funcs->get_modes)(connector);
} }
@ -219,7 +277,7 @@ static int drm_helper_probe_single_connector_modes_merge_bits(struct drm_connect
if (count == 0) if (count == 0)
goto prune; goto prune;
drm_mode_connector_list_update(connector, merge_type_bits); drm_mode_connector_list_update(connector);
if (connector->interlace_allowed) if (connector->interlace_allowed)
mode_flags |= DRM_MODE_FLAG_INTERLACE; mode_flags |= DRM_MODE_FLAG_INTERLACE;
@ -263,48 +321,8 @@ prune:
return count; return count;
} }
/**
* drm_helper_probe_single_connector_modes - get complete set of display modes
* @connector: connector to probe
* @maxX: max width for modes
* @maxY: max height for modes
*
* Based on the helper callbacks implemented by @connector try to detect all
* valid modes. Modes will first be added to the connector's probed_modes list,
* then culled (based on validity and the @maxX, @maxY parameters) and put into
* the normal modes list.
*
* Intended to be use as a generic implementation of the ->fill_modes()
* @connector vfunc for drivers that use the crtc helpers for output mode
* filtering and detection.
*
* Returns:
* The number of modes found on @connector.
*/
int drm_helper_probe_single_connector_modes(struct drm_connector *connector,
uint32_t maxX, uint32_t maxY)
{
return drm_helper_probe_single_connector_modes_merge_bits(connector, maxX, maxY, true);
}
EXPORT_SYMBOL(drm_helper_probe_single_connector_modes); EXPORT_SYMBOL(drm_helper_probe_single_connector_modes);
/**
* drm_helper_probe_single_connector_modes_nomerge - get complete set of display modes
* @connector: connector to probe
* @maxX: max width for modes
* @maxY: max height for modes
*
* This operates like drm_hehlper_probe_single_connector_modes except it
* replaces the mode bits instead of merging them for preferred modes.
*/
int drm_helper_probe_single_connector_modes_nomerge(struct drm_connector *connector,
uint32_t maxX, uint32_t maxY)
{
return drm_helper_probe_single_connector_modes_merge_bits(connector, maxX, maxY, false);
}
EXPORT_SYMBOL(drm_helper_probe_single_connector_modes_nomerge);
/** /**
* drm_kms_helper_hotplug_event - fire off KMS hotplug events * drm_kms_helper_hotplug_event - fire off KMS hotplug events
* @dev: drm_device whose connector state changed * @dev: drm_device whose connector state changed

View File

@ -275,22 +275,23 @@ EXPORT_SYMBOL(drm_rect_calc_vscale_relaxed);
/** /**
* drm_rect_debug_print - print the rectangle information * drm_rect_debug_print - print the rectangle information
* @prefix: prefix string
* @r: rectangle to print * @r: rectangle to print
* @fixed_point: rectangle is in 16.16 fixed point format * @fixed_point: rectangle is in 16.16 fixed point format
*/ */
void drm_rect_debug_print(const struct drm_rect *r, bool fixed_point) void drm_rect_debug_print(const char *prefix, const struct drm_rect *r, bool fixed_point)
{ {
int w = drm_rect_width(r); int w = drm_rect_width(r);
int h = drm_rect_height(r); int h = drm_rect_height(r);
if (fixed_point) if (fixed_point)
DRM_DEBUG_KMS("%d.%06ux%d.%06u%+d.%06u%+d.%06u\n", DRM_DEBUG_KMS("%s%d.%06ux%d.%06u%+d.%06u%+d.%06u\n", prefix,
w >> 16, ((w & 0xffff) * 15625) >> 10, w >> 16, ((w & 0xffff) * 15625) >> 10,
h >> 16, ((h & 0xffff) * 15625) >> 10, h >> 16, ((h & 0xffff) * 15625) >> 10,
r->x1 >> 16, ((r->x1 & 0xffff) * 15625) >> 10, r->x1 >> 16, ((r->x1 & 0xffff) * 15625) >> 10,
r->y1 >> 16, ((r->y1 & 0xffff) * 15625) >> 10); r->y1 >> 16, ((r->y1 & 0xffff) * 15625) >> 10);
else else
DRM_DEBUG_KMS("%dx%d%+d%+d\n", w, h, r->x1, r->y1); DRM_DEBUG_KMS("%s%dx%d%+d%+d\n", prefix, w, h, r->x1, r->y1);
} }
EXPORT_SYMBOL(drm_rect_debug_print); EXPORT_SYMBOL(drm_rect_debug_print);

View File

@ -80,9 +80,9 @@ NAME_SRC= main.c \
intel_ddi.c \ intel_ddi.c \
intel_display.c \ intel_display.c \
intel_dp.c \ intel_dp.c \
intel_dp_link_training.c \
intel_dp_mst.c \ intel_dp_mst.c \
intel_dsi.c \ intel_dsi.c \
intel_dsi_cmd.c \
intel_dsi_panel_vbt.c \ intel_dsi_panel_vbt.c \
intel_dsi_pll.c \ intel_dsi_pll.c \
intel_dvo.c \ intel_dvo.c \

View File

@ -80,9 +80,9 @@ NAME_SRC= main.c \
intel_ddi.c \ intel_ddi.c \
intel_display.c \ intel_display.c \
intel_dp.c \ intel_dp.c \
intel_dp_link_training.c \
intel_dp_mst.c \ intel_dp_mst.c \
intel_dsi.c \ intel_dsi.c \
intel_dsi_cmd.c \
intel_dsi_panel_vbt.c \ intel_dsi_panel_vbt.c \
intel_dsi_pll.c \ intel_dsi_pll.c \
intel_dvo.c \ intel_dvo.c \

View File

@ -32,7 +32,8 @@ struct intel_dvo_device {
const char *name; const char *name;
int type; int type;
/* DVOA/B/C output register */ /* DVOA/B/C output register */
u32 dvo_reg; i915_reg_t dvo_reg;
i915_reg_t dvo_srcdim_reg;
/* GPIO register used for i2c bus to control this device */ /* GPIO register used for i2c bus to control this device */
u32 gpio; u32 gpio;
int slave_addr; int slave_addr;
@ -128,11 +129,11 @@ struct intel_dvo_dev_ops {
void (*dump_regs)(struct intel_dvo_device *dvo); void (*dump_regs)(struct intel_dvo_device *dvo);
}; };
extern struct intel_dvo_dev_ops sil164_ops; extern const struct intel_dvo_dev_ops sil164_ops;
extern struct intel_dvo_dev_ops ch7xxx_ops; extern const struct intel_dvo_dev_ops ch7xxx_ops;
extern struct intel_dvo_dev_ops ivch_ops; extern const struct intel_dvo_dev_ops ivch_ops;
extern struct intel_dvo_dev_ops tfp410_ops; extern const struct intel_dvo_dev_ops tfp410_ops;
extern struct intel_dvo_dev_ops ch7017_ops; extern const struct intel_dvo_dev_ops ch7017_ops;
extern struct intel_dvo_dev_ops ns2501_ops; extern const struct intel_dvo_dev_ops ns2501_ops;
#endif /* _INTEL_DVO_H */ #endif /* _INTEL_DVO_H */

View File

@ -402,7 +402,7 @@ static void ch7017_destroy(struct intel_dvo_device *dvo)
} }
} }
struct intel_dvo_dev_ops ch7017_ops = { const struct intel_dvo_dev_ops ch7017_ops = {
.init = ch7017_init, .init = ch7017_init,
.detect = ch7017_detect, .detect = ch7017_detect,
.mode_valid = ch7017_mode_valid, .mode_valid = ch7017_mode_valid,

View File

@ -356,7 +356,7 @@ static void ch7xxx_destroy(struct intel_dvo_device *dvo)
} }
} }
struct intel_dvo_dev_ops ch7xxx_ops = { const struct intel_dvo_dev_ops ch7xxx_ops = {
.init = ch7xxx_init, .init = ch7xxx_init,
.detect = ch7xxx_detect, .detect = ch7xxx_detect,
.mode_valid = ch7xxx_mode_valid, .mode_valid = ch7xxx_mode_valid,

View File

@ -490,7 +490,7 @@ static void ivch_destroy(struct intel_dvo_device *dvo)
} }
} }
struct intel_dvo_dev_ops ivch_ops = { const struct intel_dvo_dev_ops ivch_ops = {
.init = ivch_init, .init = ivch_init,
.dpms = ivch_dpms, .dpms = ivch_dpms,
.get_hw_state = ivch_get_hw_state, .get_hw_state = ivch_get_hw_state,

View File

@ -698,7 +698,7 @@ static void ns2501_destroy(struct intel_dvo_device *dvo)
} }
} }
struct intel_dvo_dev_ops ns2501_ops = { const struct intel_dvo_dev_ops ns2501_ops = {
.init = ns2501_init, .init = ns2501_init,
.detect = ns2501_detect, .detect = ns2501_detect,
.mode_valid = ns2501_mode_valid, .mode_valid = ns2501_mode_valid,

View File

@ -267,7 +267,7 @@ static void sil164_destroy(struct intel_dvo_device *dvo)
} }
} }
struct intel_dvo_dev_ops sil164_ops = { const struct intel_dvo_dev_ops sil164_ops = {
.init = sil164_init, .init = sil164_init,
.detect = sil164_detect, .detect = sil164_detect,
.mode_valid = sil164_mode_valid, .mode_valid = sil164_mode_valid,

View File

@ -306,7 +306,7 @@ static void tfp410_destroy(struct intel_dvo_device *dvo)
} }
} }
struct intel_dvo_dev_ops tfp410_ops = { const struct intel_dvo_dev_ops tfp410_ops = {
.init = tfp410_init, .init = tfp410_init,
.detect = tfp410_detect, .detect = tfp410_detect,
.mode_valid = tfp410_mode_valid, .mode_valid = tfp410_mode_valid,

Binary file not shown.

Binary file not shown.

Binary file not shown.

Binary file not shown.

Binary file not shown.

Binary file not shown.

Binary file not shown.

Binary file not shown.

View File

@ -10,35 +10,58 @@ format MS COFF
public ___start_builtin_fw public ___start_builtin_fw
public ___end_builtin_fw public ___end_builtin_fw
section '.text' code readable executable align 16 section '.rdata' data readable align 16
align 16 align 16
macro CP_code [arg] macro DMC_code [arg]
{ {
dd FIRMWARE_#arg#_CP dd FIRMWARE_#arg#_DMC
dd arg#_CP_START dd arg#_DMC_START
dd (arg#_CP_END - arg#_CP_START) dd (arg#_DMC_END - arg#_DMC_START)
} }
macro CP_firmware [arg] macro DMC_firmware [arg]
{ {
forward forward
FIRMWARE_#arg#_CP db 'i915/',`arg,'.bin',0 FIRMWARE_#arg#_DMC db 'i915/',`arg,'.bin',0
forward forward
align 16 align 16
arg#_CP_START: arg#_DMC_START:
file "firmware/"#`arg#".bin" file "firmware/"#`arg#".bin"
arg#_CP_END: arg#_DMC_END:
}
macro GUC_code [arg]
{
dd FIRMWARE_#arg#_GUC
dd arg#_GUC_START
dd (arg#_GUC_END - arg#_GUC_START)
}
macro GUC_firmware [arg]
{
forward
FIRMWARE_#arg#_GUC db 'i915/',`arg,'.bin',0
forward
align 16
arg#_GUC_START:
file "firmware/"#`arg#".bin"
arg#_GUC_END:
} }
___start_builtin_fw: ___start_builtin_fw:
CP_code skl_guc_ver4 DMC_code skl_dmc_ver1
DMC_code bxt_dmc_ver1
GUC_code skl_guc_ver4
___end_builtin_fw: ___end_builtin_fw:
CP_firmware skl_guc_ver4 DMC_firmware skl_dmc_ver1
DMC_firmware bxt_dmc_ver1
GUC_firmware skl_guc_ver4

View File

@ -83,6 +83,9 @@
#endif #endif
#define strlen __builtin_strlen #define strlen __builtin_strlen
#define strcmp __builtin_strcmp
#define strncmp __builtin_strncmp
#define printf __builtin_printf
# define _(msgid) (msgid) # define _(msgid) (msgid)

View File

@ -407,14 +407,14 @@ static const struct drm_i915_cmd_table hsw_blt_ring_cmds[] = {
* LRI. * LRI.
*/ */
struct drm_i915_reg_descriptor { struct drm_i915_reg_descriptor {
u32 addr; i915_reg_t addr;
u32 mask; u32 mask;
u32 value; u32 value;
}; };
/* Convenience macro for adding 32-bit registers. */ /* Convenience macro for adding 32-bit registers. */
#define REG32(address, ...) \ #define REG32(_reg, ...) \
{ .addr = address, __VA_ARGS__ } { .addr = (_reg), __VA_ARGS__ }
/* /*
* Convenience macro for adding 64-bit registers. * Convenience macro for adding 64-bit registers.
@ -423,8 +423,13 @@ struct drm_i915_reg_descriptor {
* access commands only allow 32-bit accesses. Hence, we have to include * access commands only allow 32-bit accesses. Hence, we have to include
* entries for both halves of the 64-bit registers. * entries for both halves of the 64-bit registers.
*/ */
#define REG64(addr) \ #define REG64(_reg) \
REG32(addr), REG32(addr + sizeof(u32)) { .addr = _reg }, \
{ .addr = _reg ## _UDW }
#define REG64_IDX(_reg, idx) \
{ .addr = _reg(idx) }, \
{ .addr = _reg ## _UDW(idx) }
static const struct drm_i915_reg_descriptor gen7_render_regs[] = { static const struct drm_i915_reg_descriptor gen7_render_regs[] = {
REG64(GPGPU_THREADS_DISPATCHED), REG64(GPGPU_THREADS_DISPATCHED),
@ -451,14 +456,14 @@ static const struct drm_i915_reg_descriptor gen7_render_regs[] = {
REG32(GEN7_GPGPU_DISPATCHDIMX), REG32(GEN7_GPGPU_DISPATCHDIMX),
REG32(GEN7_GPGPU_DISPATCHDIMY), REG32(GEN7_GPGPU_DISPATCHDIMY),
REG32(GEN7_GPGPU_DISPATCHDIMZ), REG32(GEN7_GPGPU_DISPATCHDIMZ),
REG64(GEN7_SO_NUM_PRIMS_WRITTEN(0)), REG64_IDX(GEN7_SO_NUM_PRIMS_WRITTEN, 0),
REG64(GEN7_SO_NUM_PRIMS_WRITTEN(1)), REG64_IDX(GEN7_SO_NUM_PRIMS_WRITTEN, 1),
REG64(GEN7_SO_NUM_PRIMS_WRITTEN(2)), REG64_IDX(GEN7_SO_NUM_PRIMS_WRITTEN, 2),
REG64(GEN7_SO_NUM_PRIMS_WRITTEN(3)), REG64_IDX(GEN7_SO_NUM_PRIMS_WRITTEN, 3),
REG64(GEN7_SO_PRIM_STORAGE_NEEDED(0)), REG64_IDX(GEN7_SO_PRIM_STORAGE_NEEDED, 0),
REG64(GEN7_SO_PRIM_STORAGE_NEEDED(1)), REG64_IDX(GEN7_SO_PRIM_STORAGE_NEEDED, 1),
REG64(GEN7_SO_PRIM_STORAGE_NEEDED(2)), REG64_IDX(GEN7_SO_PRIM_STORAGE_NEEDED, 2),
REG64(GEN7_SO_PRIM_STORAGE_NEEDED(3)), REG64_IDX(GEN7_SO_PRIM_STORAGE_NEEDED, 3),
REG32(GEN7_SO_WRITE_OFFSET(0)), REG32(GEN7_SO_WRITE_OFFSET(0)),
REG32(GEN7_SO_WRITE_OFFSET(1)), REG32(GEN7_SO_WRITE_OFFSET(1)),
REG32(GEN7_SO_WRITE_OFFSET(2)), REG32(GEN7_SO_WRITE_OFFSET(2)),
@ -592,7 +597,7 @@ static bool check_sorted(int ring_id,
bool ret = true; bool ret = true;
for (i = 0; i < reg_count; i++) { for (i = 0; i < reg_count; i++) {
u32 curr = reg_table[i].addr; u32 curr = i915_mmio_reg_offset(reg_table[i].addr);
if (curr < previous) { if (curr < previous) {
DRM_ERROR("CMD: table not sorted ring=%d entry=%d reg=0x%08X prev=0x%08X\n", DRM_ERROR("CMD: table not sorted ring=%d entry=%d reg=0x%08X prev=0x%08X\n",
@ -847,7 +852,7 @@ find_reg(const struct drm_i915_reg_descriptor *table,
int i; int i;
for (i = 0; i < count; i++) { for (i = 0; i < count; i++) {
if (table[i].addr == addr) if (i915_mmio_reg_offset(table[i].addr) == addr)
return &table[i]; return &table[i];
} }
} }
@ -1023,7 +1028,7 @@ static bool check_cmd(const struct intel_engine_cs *ring,
* to the register. Hence, limit OACONTROL writes to * to the register. Hence, limit OACONTROL writes to
* only MI_LOAD_REGISTER_IMM commands. * only MI_LOAD_REGISTER_IMM commands.
*/ */
if (reg_addr == OACONTROL) { if (reg_addr == i915_mmio_reg_offset(OACONTROL)) {
if (desc->cmd.value == MI_LOAD_REGISTER_MEM) { if (desc->cmd.value == MI_LOAD_REGISTER_MEM) {
DRM_DEBUG_DRIVER("CMD: Rejected LRM to OACONTROL\n"); DRM_DEBUG_DRIVER("CMD: Rejected LRM to OACONTROL\n");
return false; return false;

View File

@ -28,7 +28,6 @@
#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
#include <linux/async.h>
#include <drm/drmP.h> #include <drm/drmP.h>
#include <drm/drm_crtc_helper.h> #include <drm/drm_crtc_helper.h>
#include <drm/drm_fb_helper.h> #include <drm/drm_fb_helper.h>
@ -44,6 +43,7 @@
//#include <linux/pnp.h> //#include <linux/pnp.h>
//#include <linux/vga_switcheroo.h> //#include <linux/vga_switcheroo.h>
#include <linux/slab.h> #include <linux/slab.h>
//#include <acpi/video.h>
#include <linux/pm.h> #include <linux/pm.h>
#include <linux/pm_runtime.h> #include <linux/pm_runtime.h>
@ -130,7 +130,7 @@ int i915_getparam(struct drm_device *dev, void *data,
value = 1; value = 1;
break; break;
case I915_PARAM_HAS_SECURE_BATCHES: case I915_PARAM_HAS_SECURE_BATCHES:
value = 1; value = capable(CAP_SYS_ADMIN);
break; break;
case I915_PARAM_HAS_PINNED_BATCHES: case I915_PARAM_HAS_PINNED_BATCHES:
value = 1; value = 1;
@ -206,7 +206,7 @@ intel_setup_mchbar(struct drm_device *dev)
u32 temp; u32 temp;
bool enabled; bool enabled;
if (IS_VALLEYVIEW(dev)) if (IS_VALLEYVIEW(dev) || IS_CHERRYVIEW(dev))
return; return;
dev_priv->mchbar_need_disable = false; dev_priv->mchbar_need_disable = false;
@ -286,7 +286,7 @@ static int i915_load_modeset_init(struct drm_device *dev)
struct drm_i915_private *dev_priv = dev->dev_private; struct drm_i915_private *dev_priv = dev->dev_private;
int ret; int ret;
ret = intel_parse_bios(dev); ret = intel_bios_init(dev_priv);
if (ret) if (ret)
DRM_INFO("failed to find VBIOS tables\n"); DRM_INFO("failed to find VBIOS tables\n");
@ -305,7 +305,9 @@ static int i915_load_modeset_init(struct drm_device *dev)
if (ret) if (ret)
goto cleanup_vga_switcheroo; goto cleanup_vga_switcheroo;
intel_power_domains_init_hw(dev_priv); intel_power_domains_init_hw(dev_priv, false);
intel_csr_ucode_init(dev_priv);
ret = intel_irq_install(dev_priv); ret = intel_irq_install(dev_priv);
if (ret) if (ret)
@ -317,6 +319,8 @@ static int i915_load_modeset_init(struct drm_device *dev)
* working irqs for e.g. gmbus and dp aux transfers. */ * working irqs for e.g. gmbus and dp aux transfers. */
intel_modeset_init(dev); intel_modeset_init(dev);
intel_guc_ucode_init(dev);
ret = i915_gem_init(dev); ret = i915_gem_init(dev);
if (ret) if (ret)
goto cleanup_irq; goto cleanup_irq;
@ -358,6 +362,7 @@ cleanup_gem:
i915_gem_context_fini(dev); i915_gem_context_fini(dev);
mutex_unlock(&dev->struct_mutex); mutex_unlock(&dev->struct_mutex);
cleanup_irq: cleanup_irq:
intel_guc_ucode_fini(dev);
// drm_irq_uninstall(dev); // drm_irq_uninstall(dev);
cleanup_gem_stolen: cleanup_gem_stolen:
i915_gem_cleanup_stolen(dev); i915_gem_cleanup_stolen(dev);
@ -571,7 +576,8 @@ static void gen9_sseu_info_init(struct drm_device *dev)
* supports EU power gating on devices with more than one EU * supports EU power gating on devices with more than one EU
* pair per subslice. * pair per subslice.
*/ */
info->has_slice_pg = (IS_SKYLAKE(dev) && (info->slice_total > 1)); info->has_slice_pg = ((IS_SKYLAKE(dev) || IS_KABYLAKE(dev)) &&
(info->slice_total > 1));
info->has_subslice_pg = (IS_BROXTON(dev) && (info->subslice_total > 1)); info->has_subslice_pg = (IS_BROXTON(dev) && (info->subslice_total > 1));
info->has_eu_pg = (info->eu_per_subslice > 2); info->has_eu_pg = (info->eu_per_subslice > 2);
} }
@ -685,7 +691,7 @@ static void intel_device_info_runtime_init(struct drm_device *dev)
info->num_sprites[PIPE_A] = 2; info->num_sprites[PIPE_A] = 2;
info->num_sprites[PIPE_B] = 2; info->num_sprites[PIPE_B] = 2;
info->num_sprites[PIPE_C] = 1; info->num_sprites[PIPE_C] = 1;
} else if (IS_VALLEYVIEW(dev)) } else if (IS_VALLEYVIEW(dev) || IS_CHERRYVIEW(dev))
for_each_pipe(dev_priv, pipe) for_each_pipe(dev_priv, pipe)
info->num_sprites[pipe] = 2; info->num_sprites[pipe] = 2;
else else
@ -697,7 +703,7 @@ static void intel_device_info_runtime_init(struct drm_device *dev)
info->num_pipes = 0; info->num_pipes = 0;
} else if (info->num_pipes > 0 && } else if (info->num_pipes > 0 &&
(INTEL_INFO(dev)->gen == 7 || INTEL_INFO(dev)->gen == 8) && (INTEL_INFO(dev)->gen == 7 || INTEL_INFO(dev)->gen == 8) &&
!IS_VALLEYVIEW(dev)) { HAS_PCH_SPLIT(dev)) {
u32 fuse_strap = I915_READ(FUSE_STRAP); u32 fuse_strap = I915_READ(FUSE_STRAP);
u32 sfuse_strap = I915_READ(SFUSE_STRAP); u32 sfuse_strap = I915_READ(SFUSE_STRAP);
@ -742,9 +748,6 @@ static void intel_device_info_runtime_init(struct drm_device *dev)
static void intel_init_dpio(struct drm_i915_private *dev_priv) static void intel_init_dpio(struct drm_i915_private *dev_priv)
{ {
if (!IS_VALLEYVIEW(dev_priv))
return;
/* /*
* IOSF_PORT_DPIO is used for VLV x2 PHY (DP/HDMI B and C), * IOSF_PORT_DPIO is used for VLV x2 PHY (DP/HDMI B and C),
* CHV x1 PHY (DP/HDMI D) * CHV x1 PHY (DP/HDMI D)
@ -753,7 +756,7 @@ static void intel_init_dpio(struct drm_i915_private *dev_priv)
if (IS_CHERRYVIEW(dev_priv)) { if (IS_CHERRYVIEW(dev_priv)) {
DPIO_PHY_IOSF_PORT(DPIO_PHY0) = IOSF_PORT_DPIO_2; DPIO_PHY_IOSF_PORT(DPIO_PHY0) = IOSF_PORT_DPIO_2;
DPIO_PHY_IOSF_PORT(DPIO_PHY1) = IOSF_PORT_DPIO; DPIO_PHY_IOSF_PORT(DPIO_PHY1) = IOSF_PORT_DPIO;
} else { } else if (IS_VALLEYVIEW(dev_priv)) {
DPIO_PHY_IOSF_PORT(DPIO_PHY0) = IOSF_PORT_DPIO; DPIO_PHY_IOSF_PORT(DPIO_PHY0) = IOSF_PORT_DPIO;
} }
} }
@ -798,11 +801,12 @@ int i915_driver_load(struct drm_device *dev, unsigned long flags)
spin_lock_init(&dev_priv->mmio_flip_lock); spin_lock_init(&dev_priv->mmio_flip_lock);
mutex_init(&dev_priv->sb_lock); mutex_init(&dev_priv->sb_lock);
mutex_init(&dev_priv->modeset_restore_lock); mutex_init(&dev_priv->modeset_restore_lock);
mutex_init(&dev_priv->csr_lock);
mutex_init(&dev_priv->av_mutex); mutex_init(&dev_priv->av_mutex);
intel_pm_setup(dev); intel_pm_setup(dev);
intel_runtime_pm_get(dev_priv);
intel_display_crc_init(dev); intel_display_crc_init(dev);
i915_dump_device_info(dev_priv); i915_dump_device_info(dev_priv);
@ -847,9 +851,6 @@ int i915_driver_load(struct drm_device *dev, unsigned long flags)
intel_uncore_init(dev); intel_uncore_init(dev);
/* Load CSR Firmware for SKL */
intel_csr_ucode_init(dev);
ret = i915_gem_gtt_init(dev); ret = i915_gem_gtt_init(dev);
if (ret) if (ret)
goto out_freecsr; goto out_freecsr;
@ -998,6 +999,8 @@ int i915_driver_unload(struct drm_device *dev)
struct drm_i915_private *dev_priv = dev->dev_private; struct drm_i915_private *dev_priv = dev->dev_private;
int ret; int ret;
intel_fbdev_fini(dev);
i915_audio_component_cleanup(dev_priv); i915_audio_component_cleanup(dev_priv);
ret = i915_gem_suspend(dev); ret = i915_gem_suspend(dev);
@ -1020,8 +1023,6 @@ int i915_driver_unload(struct drm_device *dev)
acpi_video_unregister(); acpi_video_unregister();
intel_fbdev_fini(dev);
drm_vblank_cleanup(dev); drm_vblank_cleanup(dev);
intel_modeset_cleanup(dev); intel_modeset_cleanup(dev);
@ -1063,7 +1064,7 @@ int i915_driver_unload(struct drm_device *dev)
intel_fbc_cleanup_cfb(dev_priv); intel_fbc_cleanup_cfb(dev_priv);
i915_gem_cleanup_stolen(dev); i915_gem_cleanup_stolen(dev);
intel_csr_ucode_fini(dev); intel_csr_ucode_fini(dev_priv);
intel_teardown_mchbar(dev); intel_teardown_mchbar(dev);
@ -1132,8 +1133,6 @@ void i915_driver_postclose(struct drm_device *dev, struct drm_file *file)
{ {
struct drm_i915_file_private *file_priv = file->driver_priv; struct drm_i915_file_private *file_priv = file->driver_priv;
if (file_priv && file_priv->bsd_ring)
file_priv->bsd_ring = NULL;
kfree(file_priv); kfree(file_priv);
} }

View File

@ -37,14 +37,14 @@
#include <linux/module.h> #include <linux/module.h>
#include <linux/pm_runtime.h> #include <linux/pm_runtime.h>
#include <linux/mod_devicetable.h>
#include <linux/pci.h>
#include <drm/i915_pciids.h>
#include <drm/drm_crtc_helper.h> #include <drm/drm_crtc_helper.h>
#include <syscall.h> #include <syscall.h>
int init_display_kms(struct drm_device *dev);
extern int intel_agp_enabled;
static struct drm_driver driver; static struct drm_driver driver;
#define GEN_DEFAULT_PIPEOFFSETS \ #define GEN_DEFAULT_PIPEOFFSETS \
@ -68,13 +68,8 @@ static struct drm_driver driver;
#define IVB_CURSOR_OFFSETS \ #define IVB_CURSOR_OFFSETS \
.cursor_offsets = { CURSOR_A_OFFSET, IVB_CURSOR_B_OFFSET, IVB_CURSOR_C_OFFSET } .cursor_offsets = { CURSOR_A_OFFSET, IVB_CURSOR_B_OFFSET, IVB_CURSOR_C_OFFSET }
int init_display_kms(struct drm_device *dev);
extern int intel_agp_enabled;
#define PCI_VENDOR_ID_INTEL 0x8086
static const struct intel_device_info intel_i915g_info = { static const struct intel_device_info intel_i915g_info = {
.gen = 3, .is_i915g = 1, .cursor_needs_physical = 1, .num_pipes = 2, .gen = 3, .is_i915g = 1, .cursor_needs_physical = 1, .num_pipes = 2,
@ -207,161 +202,111 @@ static const struct intel_device_info intel_sandybridge_m_info = {
.need_gfx_hws = 1, .has_hotplug = 1, \ .need_gfx_hws = 1, .has_hotplug = 1, \
.has_fbc = 1, \ .has_fbc = 1, \
.ring_mask = RENDER_RING | BSD_RING | BLT_RING, \ .ring_mask = RENDER_RING | BSD_RING | BLT_RING, \
.has_llc = 1 .has_llc = 1, \
GEN_DEFAULT_PIPEOFFSETS, \
IVB_CURSOR_OFFSETS
static const struct intel_device_info intel_ivybridge_d_info = { static const struct intel_device_info intel_ivybridge_d_info = {
GEN7_FEATURES, GEN7_FEATURES,
.is_ivybridge = 1, .is_ivybridge = 1,
GEN_DEFAULT_PIPEOFFSETS,
IVB_CURSOR_OFFSETS,
}; };
static const struct intel_device_info intel_ivybridge_m_info = { static const struct intel_device_info intel_ivybridge_m_info = {
GEN7_FEATURES, GEN7_FEATURES,
.is_ivybridge = 1, .is_ivybridge = 1,
.is_mobile = 1, .is_mobile = 1,
GEN_DEFAULT_PIPEOFFSETS,
IVB_CURSOR_OFFSETS,
}; };
static const struct intel_device_info intel_ivybridge_q_info = { static const struct intel_device_info intel_ivybridge_q_info = {
GEN7_FEATURES, GEN7_FEATURES,
.is_ivybridge = 1, .is_ivybridge = 1,
.num_pipes = 0, /* legal, last one wins */ .num_pipes = 0, /* legal, last one wins */
GEN_DEFAULT_PIPEOFFSETS,
IVB_CURSOR_OFFSETS,
}; };
#define VLV_FEATURES \
.gen = 7, .num_pipes = 2, \
.need_gfx_hws = 1, .has_hotplug = 1, \
.ring_mask = RENDER_RING | BSD_RING | BLT_RING, \
.display_mmio_offset = VLV_DISPLAY_BASE, \
GEN_DEFAULT_PIPEOFFSETS, \
CURSOR_OFFSETS
static const struct intel_device_info intel_valleyview_m_info = { static const struct intel_device_info intel_valleyview_m_info = {
GEN7_FEATURES, VLV_FEATURES,
.is_mobile = 1,
.num_pipes = 2,
.is_valleyview = 1, .is_valleyview = 1,
.display_mmio_offset = VLV_DISPLAY_BASE, .is_mobile = 1,
.has_fbc = 0, /* legal, last one wins */
.has_llc = 0, /* legal, last one wins */
GEN_DEFAULT_PIPEOFFSETS,
CURSOR_OFFSETS,
}; };
static const struct intel_device_info intel_valleyview_d_info = { static const struct intel_device_info intel_valleyview_d_info = {
GEN7_FEATURES, VLV_FEATURES,
.num_pipes = 2,
.is_valleyview = 1, .is_valleyview = 1,
.display_mmio_offset = VLV_DISPLAY_BASE,
.has_fbc = 0, /* legal, last one wins */
.has_llc = 0, /* legal, last one wins */
GEN_DEFAULT_PIPEOFFSETS,
CURSOR_OFFSETS,
}; };
#define HSW_FEATURES \
GEN7_FEATURES, \
.ring_mask = RENDER_RING | BSD_RING | BLT_RING | VEBOX_RING, \
.has_ddi = 1, \
.has_fpga_dbg = 1
static const struct intel_device_info intel_haswell_d_info = { static const struct intel_device_info intel_haswell_d_info = {
GEN7_FEATURES, HSW_FEATURES,
.is_haswell = 1, .is_haswell = 1,
.has_ddi = 1,
.has_fpga_dbg = 1,
.ring_mask = RENDER_RING | BSD_RING | BLT_RING | VEBOX_RING,
GEN_DEFAULT_PIPEOFFSETS,
IVB_CURSOR_OFFSETS,
}; };
static const struct intel_device_info intel_haswell_m_info = { static const struct intel_device_info intel_haswell_m_info = {
GEN7_FEATURES, HSW_FEATURES,
.is_haswell = 1, .is_haswell = 1,
.is_mobile = 1, .is_mobile = 1,
.has_ddi = 1,
.has_fpga_dbg = 1,
.ring_mask = RENDER_RING | BSD_RING | BLT_RING | VEBOX_RING,
GEN_DEFAULT_PIPEOFFSETS,
IVB_CURSOR_OFFSETS,
}; };
static const struct intel_device_info intel_broadwell_d_info = { static const struct intel_device_info intel_broadwell_d_info = {
.gen = 8, .num_pipes = 3, HSW_FEATURES,
.need_gfx_hws = 1, .has_hotplug = 1, .gen = 8,
.ring_mask = RENDER_RING | BSD_RING | BLT_RING | VEBOX_RING,
.has_llc = 1,
.has_ddi = 1,
.has_fpga_dbg = 1,
.has_fbc = 1,
GEN_DEFAULT_PIPEOFFSETS,
IVB_CURSOR_OFFSETS,
}; };
static const struct intel_device_info intel_broadwell_m_info = { static const struct intel_device_info intel_broadwell_m_info = {
.gen = 8, .is_mobile = 1, .num_pipes = 3, HSW_FEATURES,
.need_gfx_hws = 1, .has_hotplug = 1, .gen = 8, .is_mobile = 1,
.ring_mask = RENDER_RING | BSD_RING | BLT_RING | VEBOX_RING,
.has_llc = 1,
.has_ddi = 1,
.has_fpga_dbg = 1,
.has_fbc = 1,
GEN_DEFAULT_PIPEOFFSETS,
IVB_CURSOR_OFFSETS,
}; };
static const struct intel_device_info intel_broadwell_gt3d_info = { static const struct intel_device_info intel_broadwell_gt3d_info = {
.gen = 8, .num_pipes = 3, HSW_FEATURES,
.need_gfx_hws = 1, .has_hotplug = 1, .gen = 8,
.ring_mask = RENDER_RING | BSD_RING | BLT_RING | VEBOX_RING | BSD2_RING, .ring_mask = RENDER_RING | BSD_RING | BLT_RING | VEBOX_RING | BSD2_RING,
.has_llc = 1,
.has_ddi = 1,
.has_fpga_dbg = 1,
.has_fbc = 1,
GEN_DEFAULT_PIPEOFFSETS,
IVB_CURSOR_OFFSETS,
}; };
static const struct intel_device_info intel_broadwell_gt3m_info = { static const struct intel_device_info intel_broadwell_gt3m_info = {
.gen = 8, .is_mobile = 1, .num_pipes = 3, HSW_FEATURES,
.need_gfx_hws = 1, .has_hotplug = 1, .gen = 8, .is_mobile = 1,
.ring_mask = RENDER_RING | BSD_RING | BLT_RING | VEBOX_RING | BSD2_RING, .ring_mask = RENDER_RING | BSD_RING | BLT_RING | VEBOX_RING | BSD2_RING,
.has_llc = 1,
.has_ddi = 1,
.has_fpga_dbg = 1,
.has_fbc = 1,
GEN_DEFAULT_PIPEOFFSETS,
IVB_CURSOR_OFFSETS,
}; };
static const struct intel_device_info intel_cherryview_info = { static const struct intel_device_info intel_cherryview_info = {
.gen = 8, .num_pipes = 3, .gen = 8, .num_pipes = 3,
.need_gfx_hws = 1, .has_hotplug = 1, .need_gfx_hws = 1, .has_hotplug = 1,
.ring_mask = RENDER_RING | BSD_RING | BLT_RING | VEBOX_RING, .ring_mask = RENDER_RING | BSD_RING | BLT_RING | VEBOX_RING,
.is_valleyview = 1, .is_cherryview = 1,
.display_mmio_offset = VLV_DISPLAY_BASE, .display_mmio_offset = VLV_DISPLAY_BASE,
GEN_CHV_PIPEOFFSETS, GEN_CHV_PIPEOFFSETS,
CURSOR_OFFSETS, CURSOR_OFFSETS,
}; };
static const struct intel_device_info intel_skylake_info = { static const struct intel_device_info intel_skylake_info = {
HSW_FEATURES,
.is_skylake = 1, .is_skylake = 1,
.gen = 9, .num_pipes = 3, .gen = 9,
.need_gfx_hws = 1, .has_hotplug = 1,
.ring_mask = RENDER_RING | BSD_RING | BLT_RING | VEBOX_RING,
.has_llc = 1,
.has_ddi = 1,
.has_fpga_dbg = 1,
.has_fbc = 1,
GEN_DEFAULT_PIPEOFFSETS,
IVB_CURSOR_OFFSETS,
}; };
static const struct intel_device_info intel_skylake_gt3_info = { static const struct intel_device_info intel_skylake_gt3_info = {
HSW_FEATURES,
.is_skylake = 1, .is_skylake = 1,
.gen = 9, .num_pipes = 3, .gen = 9,
.need_gfx_hws = 1, .has_hotplug = 1,
.ring_mask = RENDER_RING | BSD_RING | BLT_RING | VEBOX_RING | BSD2_RING, .ring_mask = RENDER_RING | BSD_RING | BLT_RING | VEBOX_RING | BSD2_RING,
.has_llc = 1,
.has_ddi = 1,
.has_fpga_dbg = 1,
.has_fbc = 1,
GEN_DEFAULT_PIPEOFFSETS,
IVB_CURSOR_OFFSETS,
}; };
static const struct intel_device_info intel_broxton_info = { static const struct intel_device_info intel_broxton_info = {
.is_preliminary = 1, .is_preliminary = 1,
.is_broxton = 1,
.gen = 9, .gen = 9,
.need_gfx_hws = 1, .has_hotplug = 1, .need_gfx_hws = 1, .has_hotplug = 1,
.ring_mask = RENDER_RING | BSD_RING | BLT_RING | VEBOX_RING, .ring_mask = RENDER_RING | BSD_RING | BLT_RING | VEBOX_RING,
@ -373,54 +318,68 @@ static const struct intel_device_info intel_broxton_info = {
IVB_CURSOR_OFFSETS, IVB_CURSOR_OFFSETS,
}; };
static const struct intel_device_info intel_kabylake_info = {
HSW_FEATURES,
.is_preliminary = 1,
.is_kabylake = 1,
.gen = 9,
};
static const struct intel_device_info intel_kabylake_gt3_info = {
HSW_FEATURES,
.is_preliminary = 1,
.is_kabylake = 1,
.gen = 9,
.ring_mask = RENDER_RING | BSD_RING | BLT_RING | VEBOX_RING | BSD2_RING,
};
/* /*
* Make sure any device matches here are from most specific to most * Make sure any device matches here are from most specific to most
* general. For example, since the Quanta match is based on the subsystem * general. For example, since the Quanta match is based on the subsystem
* and subvendor IDs, we need it to come before the more general IVB * and subvendor IDs, we need it to come before the more general IVB
* PCI ID matches, otherwise we'll use the wrong info struct above. * PCI ID matches, otherwise we'll use the wrong info struct above.
*/ */
#define INTEL_PCI_IDS \ static const struct pci_device_id pciidlist[] = {
INTEL_I915G_IDS(&intel_i915g_info), \ INTEL_I915G_IDS(&intel_i915g_info),
INTEL_I915GM_IDS(&intel_i915gm_info), \ INTEL_I915GM_IDS(&intel_i915gm_info),
INTEL_I945G_IDS(&intel_i945g_info), \ INTEL_I945G_IDS(&intel_i945g_info),
INTEL_I945GM_IDS(&intel_i945gm_info), \ INTEL_I945GM_IDS(&intel_i945gm_info),
INTEL_I965G_IDS(&intel_i965g_info), \ INTEL_I965G_IDS(&intel_i965g_info),
INTEL_G33_IDS(&intel_g33_info), \ INTEL_G33_IDS(&intel_g33_info),
INTEL_I965GM_IDS(&intel_i965gm_info), \ INTEL_I965GM_IDS(&intel_i965gm_info),
INTEL_GM45_IDS(&intel_gm45_info), \ INTEL_GM45_IDS(&intel_gm45_info),
INTEL_G45_IDS(&intel_g45_info), \ INTEL_G45_IDS(&intel_g45_info),
INTEL_PINEVIEW_IDS(&intel_pineview_info), \ INTEL_PINEVIEW_IDS(&intel_pineview_info),
INTEL_IRONLAKE_D_IDS(&intel_ironlake_d_info), \ INTEL_IRONLAKE_D_IDS(&intel_ironlake_d_info),
INTEL_IRONLAKE_M_IDS(&intel_ironlake_m_info), \ INTEL_IRONLAKE_M_IDS(&intel_ironlake_m_info),
INTEL_SNB_D_IDS(&intel_sandybridge_d_info), \ INTEL_SNB_D_IDS(&intel_sandybridge_d_info),
INTEL_SNB_M_IDS(&intel_sandybridge_m_info), \ INTEL_SNB_M_IDS(&intel_sandybridge_m_info),
INTEL_IVB_Q_IDS(&intel_ivybridge_q_info), /* must be first IVB */ \ INTEL_IVB_Q_IDS(&intel_ivybridge_q_info), /* must be first IVB */
INTEL_IVB_M_IDS(&intel_ivybridge_m_info), \ INTEL_IVB_M_IDS(&intel_ivybridge_m_info),
INTEL_IVB_D_IDS(&intel_ivybridge_d_info), \ INTEL_IVB_D_IDS(&intel_ivybridge_d_info),
INTEL_HSW_D_IDS(&intel_haswell_d_info), \ INTEL_HSW_D_IDS(&intel_haswell_d_info),
INTEL_HSW_M_IDS(&intel_haswell_m_info), \ INTEL_HSW_M_IDS(&intel_haswell_m_info),
INTEL_VLV_M_IDS(&intel_valleyview_m_info), \ INTEL_VLV_M_IDS(&intel_valleyview_m_info),
INTEL_VLV_D_IDS(&intel_valleyview_d_info), \ INTEL_VLV_D_IDS(&intel_valleyview_d_info),
INTEL_BDW_GT12M_IDS(&intel_broadwell_m_info), \ INTEL_BDW_GT12M_IDS(&intel_broadwell_m_info),
INTEL_BDW_GT12D_IDS(&intel_broadwell_d_info), \ INTEL_BDW_GT12D_IDS(&intel_broadwell_d_info),
INTEL_BDW_GT3M_IDS(&intel_broadwell_gt3m_info), \ INTEL_BDW_GT3M_IDS(&intel_broadwell_gt3m_info),
INTEL_BDW_GT3D_IDS(&intel_broadwell_gt3d_info), \ INTEL_BDW_GT3D_IDS(&intel_broadwell_gt3d_info),
INTEL_CHV_IDS(&intel_cherryview_info), \ INTEL_CHV_IDS(&intel_cherryview_info),
INTEL_SKL_GT1_IDS(&intel_skylake_info), \ INTEL_SKL_GT1_IDS(&intel_skylake_info),
INTEL_SKL_GT2_IDS(&intel_skylake_info), \ INTEL_SKL_GT2_IDS(&intel_skylake_info),
INTEL_SKL_GT3_IDS(&intel_skylake_gt3_info), \ INTEL_SKL_GT3_IDS(&intel_skylake_gt3_info),
INTEL_BXT_IDS(&intel_broxton_info) INTEL_SKL_GT4_IDS(&intel_skylake_gt3_info),
INTEL_BXT_IDS(&intel_broxton_info),
static const struct pci_device_id pciidlist[] = { /* aka */ INTEL_KBL_GT1_IDS(&intel_kabylake_info),
INTEL_PCI_IDS, INTEL_KBL_GT2_IDS(&intel_kabylake_info),
INTEL_KBL_GT3_IDS(&intel_kabylake_gt3_info),
INTEL_KBL_GT4_IDS(&intel_kabylake_gt3_info),
{0, 0, 0} {0, 0, 0}
}; };
#define INTEL_PCH_DEVICE_ID_MASK 0xff00
#define INTEL_PCH_IBX_DEVICE_ID_TYPE 0x3b00
#define INTEL_PCH_CPT_DEVICE_ID_TYPE 0x1c00
#define INTEL_PCH_PPT_DEVICE_ID_TYPE 0x1e00
#define INTEL_PCH_LPT_DEVICE_ID_TYPE 0x8c00
static enum intel_pch intel_virt_detect_pch(struct drm_device *dev) static enum intel_pch intel_virt_detect_pch(struct drm_device *dev)
{ {
enum intel_pch ret = PCH_NOP; enum intel_pch ret = PCH_NOP;
@ -441,7 +400,7 @@ static enum intel_pch intel_virt_detect_pch(struct drm_device *dev)
} else if (IS_HASWELL(dev) || IS_BROADWELL(dev)) { } else if (IS_HASWELL(dev) || IS_BROADWELL(dev)) {
ret = PCH_LPT; ret = PCH_LPT;
DRM_DEBUG_KMS("Assuming LynxPoint PCH\n"); DRM_DEBUG_KMS("Assuming LynxPoint PCH\n");
} else if (IS_SKYLAKE(dev)) { } else if (IS_SKYLAKE(dev) || IS_KABYLAKE(dev)) {
ret = PCH_SPT; ret = PCH_SPT;
DRM_DEBUG_KMS("Assuming SunrisePoint PCH\n"); DRM_DEBUG_KMS("Assuming SunrisePoint PCH\n");
} }
@ -504,11 +463,13 @@ void intel_detect_pch(struct drm_device *dev)
} else if (id == INTEL_PCH_SPT_DEVICE_ID_TYPE) { } else if (id == INTEL_PCH_SPT_DEVICE_ID_TYPE) {
dev_priv->pch_type = PCH_SPT; dev_priv->pch_type = PCH_SPT;
DRM_DEBUG_KMS("Found SunrisePoint PCH\n"); DRM_DEBUG_KMS("Found SunrisePoint PCH\n");
WARN_ON(!IS_SKYLAKE(dev)); WARN_ON(!IS_SKYLAKE(dev) &&
!IS_KABYLAKE(dev));
} else if (id == INTEL_PCH_SPT_LP_DEVICE_ID_TYPE) { } else if (id == INTEL_PCH_SPT_LP_DEVICE_ID_TYPE) {
dev_priv->pch_type = PCH_SPT; dev_priv->pch_type = PCH_SPT;
DRM_DEBUG_KMS("Found SunrisePoint LP PCH\n"); DRM_DEBUG_KMS("Found SunrisePoint LP PCH\n");
WARN_ON(!IS_SKYLAKE(dev)); WARN_ON(!IS_SKYLAKE(dev) &&
!IS_KABYLAKE(dev));
} else if ((id == INTEL_PCH_P2X_DEVICE_ID_TYPE) || } else if ((id == INTEL_PCH_P2X_DEVICE_ID_TYPE) ||
((id == INTEL_PCH_QEMU_DEVICE_ID_TYPE) && ((id == INTEL_PCH_QEMU_DEVICE_ID_TYPE) &&
pch->subsystem_vendor == 0x1af4 && pch->subsystem_vendor == 0x1af4 &&
@ -552,47 +513,32 @@ bool i915_semaphore_is_enabled(struct drm_device *dev)
} }
#if 0 #if 0
void i915_firmware_load_error_print(const char *fw_path, int err)
{
DRM_ERROR("failed to load firmware %s (%d)\n", fw_path, err);
/*
* If the reason is not known assume -ENOENT since that's the most
* usual failure mode.
*/
if (!err)
err = -ENOENT;
if (!(IS_BUILTIN(CONFIG_DRM_I915) && err == -ENOENT))
return;
DRM_ERROR(
"The driver is built-in, so to load the firmware you need to\n"
"include it either in the kernel (see CONFIG_EXTRA_FIRMWARE) or\n"
"in your initrd/initramfs image.\n");
}
static void intel_suspend_encoders(struct drm_i915_private *dev_priv) static void intel_suspend_encoders(struct drm_i915_private *dev_priv)
{ {
struct drm_device *dev = dev_priv->dev; struct drm_device *dev = dev_priv->dev;
struct drm_encoder *encoder; struct intel_encoder *encoder;
drm_modeset_lock_all(dev); drm_modeset_lock_all(dev);
list_for_each_entry(encoder, &dev->mode_config.encoder_list, head) { for_each_intel_encoder(dev, encoder)
struct intel_encoder *intel_encoder = to_intel_encoder(encoder); if (encoder->suspend)
encoder->suspend(encoder);
if (intel_encoder->suspend)
intel_encoder->suspend(intel_encoder);
}
drm_modeset_unlock_all(dev); drm_modeset_unlock_all(dev);
} }
static int intel_suspend_complete(struct drm_i915_private *dev_priv); static int intel_suspend_complete(struct drm_i915_private *dev_priv);
static int vlv_resume_prepare(struct drm_i915_private *dev_priv, static int vlv_resume_prepare(struct drm_i915_private *dev_priv,
bool rpm_resume); bool rpm_resume);
static int skl_resume_prepare(struct drm_i915_private *dev_priv);
static int bxt_resume_prepare(struct drm_i915_private *dev_priv); static int bxt_resume_prepare(struct drm_i915_private *dev_priv);
static bool suspend_to_idle(struct drm_i915_private *dev_priv)
{
#if IS_ENABLED(CONFIG_ACPI_SLEEP)
if (acpi_target_system_state() < ACPI_STATE_S3)
return true;
#endif
return false;
}
static int i915_drm_suspend(struct drm_device *dev) static int i915_drm_suspend(struct drm_device *dev)
{ {
@ -605,6 +551,8 @@ static int i915_drm_suspend(struct drm_device *dev)
dev_priv->modeset_restore = MODESET_SUSPENDED; dev_priv->modeset_restore = MODESET_SUSPENDED;
mutex_unlock(&dev_priv->modeset_restore_lock); mutex_unlock(&dev_priv->modeset_restore_lock);
disable_rpm_wakeref_asserts(dev_priv);
/* We do a lot of poking in a lot of registers, make sure they work /* We do a lot of poking in a lot of registers, make sure they work
* properly. */ * properly. */
intel_display_set_init_power(dev_priv, true); intel_display_set_init_power(dev_priv, true);
@ -617,7 +565,7 @@ static int i915_drm_suspend(struct drm_device *dev)
if (error) { if (error) {
dev_err(&dev->pdev->dev, dev_err(&dev->pdev->dev,
"GEM idle failed, resume might fail\n"); "GEM idle failed, resume might fail\n");
return error; goto out;
} }
intel_guc_suspend(dev); intel_guc_suspend(dev);
@ -645,11 +593,7 @@ static int i915_drm_suspend(struct drm_device *dev)
i915_save_state(dev); i915_save_state(dev);
opregion_target_state = PCI_D3cold; opregion_target_state = suspend_to_idle(dev_priv) ? PCI_D1 : PCI_D3cold;
#if IS_ENABLED(CONFIG_ACPI_SLEEP)
if (acpi_target_system_state() < ACPI_STATE_S3)
opregion_target_state = PCI_D1;
#endif
intel_opregion_notify_adapter(dev, opregion_target_state); intel_opregion_notify_adapter(dev, opregion_target_state);
intel_uncore_forcewake_reset(dev, false); intel_uncore_forcewake_reset(dev, false);
@ -661,20 +605,42 @@ static int i915_drm_suspend(struct drm_device *dev)
intel_display_set_init_power(dev_priv, false); intel_display_set_init_power(dev_priv, false);
return 0; if (HAS_CSR(dev_priv))
flush_work(&dev_priv->csr.work);
out:
enable_rpm_wakeref_asserts(dev_priv);
return error;
} }
static int i915_drm_suspend_late(struct drm_device *drm_dev, bool hibernation) static int i915_drm_suspend_late(struct drm_device *drm_dev, bool hibernation)
{ {
struct drm_i915_private *dev_priv = drm_dev->dev_private; struct drm_i915_private *dev_priv = drm_dev->dev_private;
bool fw_csr;
int ret; int ret;
disable_rpm_wakeref_asserts(dev_priv);
fw_csr = suspend_to_idle(dev_priv) && dev_priv->csr.dmc_payload;
/*
* In case of firmware assisted context save/restore don't manually
* deinit the power domains. This also means the CSR/DMC firmware will
* stay active, it will power down any HW resources as required and
* also enable deeper system power states that would be blocked if the
* firmware was inactive.
*/
if (!fw_csr)
intel_power_domains_suspend(dev_priv);
ret = intel_suspend_complete(dev_priv); ret = intel_suspend_complete(dev_priv);
if (ret) { if (ret) {
DRM_ERROR("Suspend complete failed: %d\n", ret); DRM_ERROR("Suspend complete failed: %d\n", ret);
if (!fw_csr)
intel_power_domains_init_hw(dev_priv, true);
return ret; goto out;
} }
pci_disable_device(drm_dev->pdev); pci_disable_device(drm_dev->pdev);
@ -693,7 +659,12 @@ static int i915_drm_suspend_late(struct drm_device *drm_dev, bool hibernation)
if (!(hibernation && INTEL_INFO(dev_priv)->gen < 6)) if (!(hibernation && INTEL_INFO(dev_priv)->gen < 6))
pci_set_power_state(drm_dev->pdev, PCI_D3hot); pci_set_power_state(drm_dev->pdev, PCI_D3hot);
return 0; dev_priv->suspended_to_idle = suspend_to_idle(dev_priv);
out:
enable_rpm_wakeref_asserts(dev_priv);
return ret;
} }
int i915_suspend_switcheroo(struct drm_device *dev, pm_message_t state) int i915_suspend_switcheroo(struct drm_device *dev, pm_message_t state)
@ -724,6 +695,8 @@ static int i915_drm_resume(struct drm_device *dev)
{ {
struct drm_i915_private *dev_priv = dev->dev_private; struct drm_i915_private *dev_priv = dev->dev_private;
disable_rpm_wakeref_asserts(dev_priv);
mutex_lock(&dev->struct_mutex); mutex_lock(&dev->struct_mutex);
i915_gem_restore_gtt_mappings(dev); i915_gem_restore_gtt_mappings(dev);
mutex_unlock(&dev->struct_mutex); mutex_unlock(&dev->struct_mutex);
@ -788,13 +761,15 @@ static int i915_drm_resume(struct drm_device *dev)
drm_kms_helper_poll_enable(dev); drm_kms_helper_poll_enable(dev);
enable_rpm_wakeref_asserts(dev_priv);
return 0; return 0;
} }
static int i915_drm_resume_early(struct drm_device *dev) static int i915_drm_resume_early(struct drm_device *dev)
{ {
struct drm_i915_private *dev_priv = dev->dev_private; struct drm_i915_private *dev_priv = dev->dev_private;
int ret = 0; int ret;
/* /*
* We have a resume ordering issue with the snd-hda driver also * We have a resume ordering issue with the snd-hda driver also
@ -805,12 +780,46 @@ static int i915_drm_resume_early(struct drm_device *dev)
* FIXME: This should be solved with a special hdmi sink device or * FIXME: This should be solved with a special hdmi sink device or
* similar so that power domains can be employed. * similar so that power domains can be employed.
*/ */
if (pci_enable_device(dev->pdev))
return -EIO; /*
* Note that we need to set the power state explicitly, since we
* powered off the device during freeze and the PCI core won't power
* it back up for us during thaw. Powering off the device during
* freeze is not a hard requirement though, and during the
* suspend/resume phases the PCI core makes sure we get here with the
* device powered on. So in case we change our freeze logic and keep
* the device powered we can also remove the following set power state
* call.
*/
ret = pci_set_power_state(dev->pdev, PCI_D0);
if (ret) {
DRM_ERROR("failed to set PCI D0 power state (%d)\n", ret);
goto out;
}
/*
* Note that pci_enable_device() first enables any parent bridge
* device and only then sets the power state for this device. The
* bridge enabling is a nop though, since bridge devices are resumed
* first. The order of enabling power and enabling the device is
* imposed by the PCI core as described above, so here we preserve the
* same order for the freeze/thaw phases.
*
* TODO: eventually we should remove pci_disable_device() /
* pci_enable_enable_device() from suspend/resume. Due to how they
* depend on the device enable refcount we can't anyway depend on them
* disabling/enabling the device.
*/
if (pci_enable_device(dev->pdev)) {
ret = -EIO;
goto out;
}
pci_set_master(dev->pdev); pci_set_master(dev->pdev);
if (IS_VALLEYVIEW(dev_priv)) disable_rpm_wakeref_asserts(dev_priv);
if (IS_VALLEYVIEW(dev_priv) || IS_CHERRYVIEW(dev_priv))
ret = vlv_resume_prepare(dev_priv, false); ret = vlv_resume_prepare(dev_priv, false);
if (ret) if (ret)
DRM_ERROR("Resume prepare failed: %d, continuing anyway\n", DRM_ERROR("Resume prepare failed: %d, continuing anyway\n",
@ -820,13 +829,18 @@ static int i915_drm_resume_early(struct drm_device *dev)
if (IS_BROXTON(dev)) if (IS_BROXTON(dev))
ret = bxt_resume_prepare(dev_priv); ret = bxt_resume_prepare(dev_priv);
else if (IS_SKYLAKE(dev_priv))
ret = skl_resume_prepare(dev_priv);
else if (IS_HASWELL(dev_priv) || IS_BROADWELL(dev_priv)) else if (IS_HASWELL(dev_priv) || IS_BROADWELL(dev_priv))
hsw_disable_pc8(dev_priv); hsw_disable_pc8(dev_priv);
intel_uncore_sanitize(dev); intel_uncore_sanitize(dev);
intel_power_domains_init_hw(dev_priv);
if (!(dev_priv->suspended_to_idle && dev_priv->csr.dmc_payload))
intel_power_domains_init_hw(dev_priv, true);
out:
dev_priv->suspended_to_idle = false;
enable_rpm_wakeref_asserts(dev_priv);
return ret; return ret;
} }
@ -896,6 +910,8 @@ int i915_reset(struct drm_device *dev)
return ret; return ret;
} }
intel_overlay_reset(dev_priv);
/* Ok, now get things going again... */ /* Ok, now get things going again... */
/* /*
@ -1031,15 +1047,6 @@ static int i915_pm_resume(struct device *dev)
return i915_drm_resume(drm_dev); return i915_drm_resume(drm_dev);
} }
static int skl_suspend_complete(struct drm_i915_private *dev_priv)
{
/* Enabling DC6 is not a hard requirement to enter runtime D3 */
skl_uninit_cdclk(dev_priv);
return 0;
}
static int hsw_suspend_complete(struct drm_i915_private *dev_priv) static int hsw_suspend_complete(struct drm_i915_private *dev_priv)
{ {
hsw_enable_pc8(dev_priv); hsw_enable_pc8(dev_priv);
@ -1079,16 +1086,6 @@ static int bxt_resume_prepare(struct drm_i915_private *dev_priv)
return 0; return 0;
} }
static int skl_resume_prepare(struct drm_i915_private *dev_priv)
{
struct drm_device *dev = dev_priv->dev;
skl_init_cdclk(dev_priv);
intel_csr_load_program(dev);
return 0;
}
/* /*
* Save all Gunit registers that may be lost after a D3 and a subsequent * Save all Gunit registers that may be lost after a D3 and a subsequent
* S0i[R123] transition. The list of registers needing a save/restore is * S0i[R123] transition. The list of registers needing a save/restore is
@ -1478,6 +1475,9 @@ static int intel_runtime_suspend(struct device *device)
return -EAGAIN; return -EAGAIN;
} }
disable_rpm_wakeref_asserts(dev_priv);
/* /*
* We are safe here against re-faults, since the fault handler takes * We are safe here against re-faults, since the fault handler takes
* an RPM reference. * an RPM reference.
@ -1485,6 +1485,8 @@ static int intel_runtime_suspend(struct device *device)
i915_gem_release_all_mmaps(dev_priv); i915_gem_release_all_mmaps(dev_priv);
mutex_unlock(&dev->struct_mutex); mutex_unlock(&dev->struct_mutex);
cancel_delayed_work_sync(&dev_priv->gpu_error.hangcheck_work);
intel_guc_suspend(dev); intel_guc_suspend(dev);
intel_suspend_gt_powersave(dev); intel_suspend_gt_powersave(dev);
@ -1495,11 +1497,15 @@ static int intel_runtime_suspend(struct device *device)
DRM_ERROR("Runtime suspend failed, disabling it (%d)\n", ret); DRM_ERROR("Runtime suspend failed, disabling it (%d)\n", ret);
intel_runtime_pm_enable_interrupts(dev_priv); intel_runtime_pm_enable_interrupts(dev_priv);
enable_rpm_wakeref_asserts(dev_priv);
return ret; return ret;
} }
cancel_delayed_work_sync(&dev_priv->gpu_error.hangcheck_work);
intel_uncore_forcewake_reset(dev, false); intel_uncore_forcewake_reset(dev, false);
enable_rpm_wakeref_asserts(dev_priv);
WARN_ON_ONCE(atomic_read(&dev_priv->pm.wakeref_count));
dev_priv->pm.suspended = true; dev_priv->pm.suspended = true;
/* /*
@ -1543,6 +1549,9 @@ static int intel_runtime_resume(struct device *device)
DRM_DEBUG_KMS("Resuming device\n"); DRM_DEBUG_KMS("Resuming device\n");
WARN_ON_ONCE(atomic_read(&dev_priv->pm.wakeref_count));
disable_rpm_wakeref_asserts(dev_priv);
intel_opregion_notify_adapter(dev, PCI_D0); intel_opregion_notify_adapter(dev, PCI_D0);
dev_priv->pm.suspended = false; dev_priv->pm.suspended = false;
@ -1553,11 +1562,9 @@ static int intel_runtime_resume(struct device *device)
if (IS_BROXTON(dev)) if (IS_BROXTON(dev))
ret = bxt_resume_prepare(dev_priv); ret = bxt_resume_prepare(dev_priv);
else if (IS_SKYLAKE(dev))
ret = skl_resume_prepare(dev_priv);
else if (IS_HASWELL(dev_priv) || IS_BROADWELL(dev_priv)) else if (IS_HASWELL(dev_priv) || IS_BROADWELL(dev_priv))
hsw_disable_pc8(dev_priv); hsw_disable_pc8(dev_priv);
else if (IS_VALLEYVIEW(dev_priv)) else if (IS_VALLEYVIEW(dev_priv) || IS_CHERRYVIEW(dev_priv))
ret = vlv_resume_prepare(dev_priv, true); ret = vlv_resume_prepare(dev_priv, true);
/* /*
@ -1574,11 +1581,13 @@ static int intel_runtime_resume(struct device *device)
* power well, so hpd is reinitialized from there. For * power well, so hpd is reinitialized from there. For
* everyone else do it here. * everyone else do it here.
*/ */
if (!IS_VALLEYVIEW(dev_priv)) if (!IS_VALLEYVIEW(dev_priv) && !IS_CHERRYVIEW(dev_priv))
intel_hpd_init(dev_priv); intel_hpd_init(dev_priv);
intel_enable_gt_powersave(dev); intel_enable_gt_powersave(dev);
enable_rpm_wakeref_asserts(dev_priv);
if (ret) if (ret)
DRM_ERROR("Runtime resume failed, disabling it (%d)\n", ret); DRM_ERROR("Runtime resume failed, disabling it (%d)\n", ret);
else else
@ -1597,11 +1606,9 @@ static int intel_suspend_complete(struct drm_i915_private *dev_priv)
if (IS_BROXTON(dev_priv)) if (IS_BROXTON(dev_priv))
ret = bxt_suspend_complete(dev_priv); ret = bxt_suspend_complete(dev_priv);
else if (IS_SKYLAKE(dev_priv))
ret = skl_suspend_complete(dev_priv);
else if (IS_HASWELL(dev_priv) || IS_BROADWELL(dev_priv)) else if (IS_HASWELL(dev_priv) || IS_BROADWELL(dev_priv))
ret = hsw_suspend_complete(dev_priv); ret = hsw_suspend_complete(dev_priv);
else if (IS_VALLEYVIEW(dev_priv)) else if (IS_VALLEYVIEW(dev_priv) || IS_CHERRYVIEW(dev_priv))
ret = vlv_suspend_complete(dev_priv); ret = vlv_suspend_complete(dev_priv);
else else
ret = 0; ret = 0;
@ -1729,7 +1736,7 @@ int i915_init(void)
DRM_INFO("device %x:%x\n", device.pci_dev.vendor, DRM_INFO("device %x:%x\n", device.pci_dev.vendor,
device.pci_dev.device); device.pci_dev.device);
driver.driver_features |= DRIVER_MODESET; driver.driver_features |= DRIVER_MODESET+DRIVER_ATOMIC;
err = drm_get_pci_dev(&device.pci_dev, ent, &driver); err = drm_get_pci_dev(&device.pci_dev, ent, &driver);

View File

@ -33,6 +33,7 @@
#include <uapi/drm/i915_drm.h> #include <uapi/drm/i915_drm.h>
#include <uapi/drm/drm_fourcc.h> #include <uapi/drm/drm_fourcc.h>
#include <drm/drmP.h>
#include "i915_reg.h" #include "i915_reg.h"
#include "intel_bios.h" #include "intel_bios.h"
#include "intel_ringbuffer.h" #include "intel_ringbuffer.h"
@ -45,33 +46,19 @@
#include <drm/intel-gtt.h> #include <drm/intel-gtt.h>
#include <drm/drm_legacy.h> /* for struct drm_dma_handle */ #include <drm/drm_legacy.h> /* for struct drm_dma_handle */
#include <drm/drm_gem.h> #include <drm/drm_gem.h>
//#include <linux/backlight.h> #include <linux/backlight.h>
#include <linux/hashtable.h> #include <linux/hashtable.h>
#include <linux/kref.h> #include <linux/kref.h>
#include "intel_guc.h" #include "intel_guc.h"
#include <linux/spinlock.h> #include <linux/spinlock.h>
#define ioread32(addr) readl(addr)
static inline u8 inb(u16 port)
{
u8 v;
asm volatile("inb %1,%0" : "=a" (v) : "dN" (port));
return v;
}
static inline void outb(u8 v, u16 port)
{
asm volatile("outb %0,%1" : : "a" (v), "dN" (port));
}
/* General customization: /* General customization:
*/ */
#define DRIVER_NAME "i915" #define DRIVER_NAME "i915"
#define DRIVER_DESC "Intel Graphics" #define DRIVER_DESC "Intel Graphics"
#define DRIVER_DATE "20151010" #define DRIVER_DATE "20151218"
#undef WARN_ON #undef WARN_ON
/* Many gcc seem to no see through this and fall over :( */ /* Many gcc seem to no see through this and fall over :( */
@ -194,15 +181,11 @@ enum intel_display_power_domain {
POWER_DOMAIN_TRANSCODER_B, POWER_DOMAIN_TRANSCODER_B,
POWER_DOMAIN_TRANSCODER_C, POWER_DOMAIN_TRANSCODER_C,
POWER_DOMAIN_TRANSCODER_EDP, POWER_DOMAIN_TRANSCODER_EDP,
POWER_DOMAIN_PORT_DDI_A_2_LANES, POWER_DOMAIN_PORT_DDI_A_LANES,
POWER_DOMAIN_PORT_DDI_A_4_LANES, POWER_DOMAIN_PORT_DDI_B_LANES,
POWER_DOMAIN_PORT_DDI_B_2_LANES, POWER_DOMAIN_PORT_DDI_C_LANES,
POWER_DOMAIN_PORT_DDI_B_4_LANES, POWER_DOMAIN_PORT_DDI_D_LANES,
POWER_DOMAIN_PORT_DDI_C_2_LANES, POWER_DOMAIN_PORT_DDI_E_LANES,
POWER_DOMAIN_PORT_DDI_C_4_LANES,
POWER_DOMAIN_PORT_DDI_D_2_LANES,
POWER_DOMAIN_PORT_DDI_D_4_LANES,
POWER_DOMAIN_PORT_DDI_E_2_LANES,
POWER_DOMAIN_PORT_DSI, POWER_DOMAIN_PORT_DSI,
POWER_DOMAIN_PORT_CRT, POWER_DOMAIN_PORT_CRT,
POWER_DOMAIN_PORT_OTHER, POWER_DOMAIN_PORT_OTHER,
@ -214,6 +197,7 @@ enum intel_display_power_domain {
POWER_DOMAIN_AUX_C, POWER_DOMAIN_AUX_C,
POWER_DOMAIN_AUX_D, POWER_DOMAIN_AUX_D,
POWER_DOMAIN_GMBUS, POWER_DOMAIN_GMBUS,
POWER_DOMAIN_MODESET,
POWER_DOMAIN_INIT, POWER_DOMAIN_INIT,
POWER_DOMAIN_NUM, POWER_DOMAIN_NUM,
@ -303,7 +287,7 @@ struct i915_hotplug {
list_for_each_entry(intel_plane, \ list_for_each_entry(intel_plane, \
&(dev)->mode_config.plane_list, \ &(dev)->mode_config.plane_list, \
base.head) \ base.head) \
if ((intel_plane)->pipe == (intel_crtc)->pipe) for_each_if ((intel_plane)->pipe == (intel_crtc)->pipe)
#define for_each_intel_crtc(dev, intel_crtc) \ #define for_each_intel_crtc(dev, intel_crtc) \
list_for_each_entry(intel_crtc, &dev->mode_config.crtc_list, base.head) list_for_each_entry(intel_crtc, &dev->mode_config.crtc_list, base.head)
@ -320,15 +304,15 @@ struct i915_hotplug {
#define for_each_encoder_on_crtc(dev, __crtc, intel_encoder) \ #define for_each_encoder_on_crtc(dev, __crtc, intel_encoder) \
list_for_each_entry((intel_encoder), &(dev)->mode_config.encoder_list, base.head) \ list_for_each_entry((intel_encoder), &(dev)->mode_config.encoder_list, base.head) \
if ((intel_encoder)->base.crtc == (__crtc)) for_each_if ((intel_encoder)->base.crtc == (__crtc))
#define for_each_connector_on_encoder(dev, __encoder, intel_connector) \ #define for_each_connector_on_encoder(dev, __encoder, intel_connector) \
list_for_each_entry((intel_connector), &(dev)->mode_config.connector_list, base.head) \ list_for_each_entry((intel_connector), &(dev)->mode_config.connector_list, base.head) \
if ((intel_connector)->base.encoder == (__encoder)) for_each_if ((intel_connector)->base.encoder == (__encoder))
#define for_each_power_domain(domain, mask) \ #define for_each_power_domain(domain, mask) \
for ((domain) = 0; (domain) < POWER_DOMAIN_NUM; (domain)++) \ for ((domain) = 0; (domain) < POWER_DOMAIN_NUM; (domain)++) \
if ((1 << (domain)) & (mask)) for_each_if ((1 << (domain)) & (mask))
struct drm_i915_private; struct drm_i915_private;
struct i915_mm_struct; struct i915_mm_struct;
@ -474,7 +458,9 @@ struct intel_opregion {
u32 swsci_gbda_sub_functions; u32 swsci_gbda_sub_functions;
u32 swsci_sbcb_sub_functions; u32 swsci_sbcb_sub_functions;
struct opregion_asle *asle; struct opregion_asle *asle;
void *vbt; void *rvda;
const void *vbt;
u32 vbt_size;
u32 *lid_state; u32 *lid_state;
struct work_struct asle_work; struct work_struct asle_work;
}; };
@ -645,11 +631,9 @@ struct drm_i915_display_funcs {
int target, int refclk, int target, int refclk,
struct dpll *match_clock, struct dpll *match_clock,
struct dpll *best_clock); struct dpll *best_clock);
int (*compute_pipe_wm)(struct intel_crtc *crtc,
struct drm_atomic_state *state);
void (*update_wm)(struct drm_crtc *crtc); void (*update_wm)(struct drm_crtc *crtc);
void (*update_sprite_wm)(struct drm_plane *plane,
struct drm_crtc *crtc,
uint32_t sprite_width, uint32_t sprite_height,
int pixel_size, bool enable, bool scaled);
int (*modeset_calc_cdclk)(struct drm_atomic_state *state); int (*modeset_calc_cdclk)(struct drm_atomic_state *state);
void (*modeset_commit_cdclk)(struct drm_atomic_state *state); void (*modeset_commit_cdclk)(struct drm_atomic_state *state);
/* Returns the active state of the crtc, and if the crtc is active, /* Returns the active state of the crtc, and if the crtc is active,
@ -707,18 +691,18 @@ struct intel_uncore_funcs {
void (*force_wake_put)(struct drm_i915_private *dev_priv, void (*force_wake_put)(struct drm_i915_private *dev_priv,
enum forcewake_domains domains); enum forcewake_domains domains);
uint8_t (*mmio_readb)(struct drm_i915_private *dev_priv, off_t offset, bool trace); uint8_t (*mmio_readb)(struct drm_i915_private *dev_priv, i915_reg_t r, bool trace);
uint16_t (*mmio_readw)(struct drm_i915_private *dev_priv, off_t offset, bool trace); uint16_t (*mmio_readw)(struct drm_i915_private *dev_priv, i915_reg_t r, bool trace);
uint32_t (*mmio_readl)(struct drm_i915_private *dev_priv, off_t offset, bool trace); uint32_t (*mmio_readl)(struct drm_i915_private *dev_priv, i915_reg_t r, bool trace);
uint64_t (*mmio_readq)(struct drm_i915_private *dev_priv, off_t offset, bool trace); uint64_t (*mmio_readq)(struct drm_i915_private *dev_priv, i915_reg_t r, bool trace);
void (*mmio_writeb)(struct drm_i915_private *dev_priv, off_t offset, void (*mmio_writeb)(struct drm_i915_private *dev_priv, i915_reg_t r,
uint8_t val, bool trace); uint8_t val, bool trace);
void (*mmio_writew)(struct drm_i915_private *dev_priv, off_t offset, void (*mmio_writew)(struct drm_i915_private *dev_priv, i915_reg_t r,
uint16_t val, bool trace); uint16_t val, bool trace);
void (*mmio_writel)(struct drm_i915_private *dev_priv, off_t offset, void (*mmio_writel)(struct drm_i915_private *dev_priv, i915_reg_t r,
uint32_t val, bool trace); uint32_t val, bool trace);
void (*mmio_writeq)(struct drm_i915_private *dev_priv, off_t offset, void (*mmio_writeq)(struct drm_i915_private *dev_priv, i915_reg_t r,
uint64_t val, bool trace); uint64_t val, bool trace);
}; };
@ -735,11 +719,11 @@ struct intel_uncore {
enum forcewake_domain_id id; enum forcewake_domain_id id;
unsigned wake_count; unsigned wake_count;
struct timer_list timer; struct timer_list timer;
u32 reg_set; i915_reg_t reg_set;
u32 val_set; u32 val_set;
u32 val_clear; u32 val_clear;
u32 reg_ack; i915_reg_t reg_ack;
u32 reg_post; i915_reg_t reg_post;
u32 val_reset; u32 val_reset;
} fw_domain[FW_DOMAIN_ID_COUNT]; } fw_domain[FW_DOMAIN_ID_COUNT];
}; };
@ -749,25 +733,25 @@ struct intel_uncore {
for ((i__) = 0, (domain__) = &(dev_priv__)->uncore.fw_domain[0]; \ for ((i__) = 0, (domain__) = &(dev_priv__)->uncore.fw_domain[0]; \
(i__) < FW_DOMAIN_ID_COUNT; \ (i__) < FW_DOMAIN_ID_COUNT; \
(i__)++, (domain__) = &(dev_priv__)->uncore.fw_domain[i__]) \ (i__)++, (domain__) = &(dev_priv__)->uncore.fw_domain[i__]) \
if (((mask__) & (dev_priv__)->uncore.fw_domains) & (1 << (i__))) for_each_if (((mask__) & (dev_priv__)->uncore.fw_domains) & (1 << (i__)))
#define for_each_fw_domain(domain__, dev_priv__, i__) \ #define for_each_fw_domain(domain__, dev_priv__, i__) \
for_each_fw_domain_mask(domain__, FORCEWAKE_ALL, dev_priv__, i__) for_each_fw_domain_mask(domain__, FORCEWAKE_ALL, dev_priv__, i__)
enum csr_state { #define CSR_VERSION(major, minor) ((major) << 16 | (minor))
FW_UNINITIALIZED = 0, #define CSR_VERSION_MAJOR(version) ((version) >> 16)
FW_LOADED, #define CSR_VERSION_MINOR(version) ((version) & 0xffff)
FW_FAILED
};
struct intel_csr { struct intel_csr {
struct work_struct work;
const char *fw_path; const char *fw_path;
uint32_t *dmc_payload; uint32_t *dmc_payload;
uint32_t dmc_fw_size; uint32_t dmc_fw_size;
uint32_t version;
uint32_t mmio_count; uint32_t mmio_count;
uint32_t mmioaddr[8]; i915_reg_t mmioaddr[8];
uint32_t mmiodata[8]; uint32_t mmiodata[8];
enum csr_state state; uint32_t dc_state;
}; };
#define DEV_INFO_FOR_EACH_FLAG(func, sep) \ #define DEV_INFO_FOR_EACH_FLAG(func, sep) \
@ -783,8 +767,11 @@ struct intel_csr {
func(is_crestline) sep \ func(is_crestline) sep \
func(is_ivybridge) sep \ func(is_ivybridge) sep \
func(is_valleyview) sep \ func(is_valleyview) sep \
func(is_cherryview) sep \
func(is_haswell) sep \ func(is_haswell) sep \
func(is_skylake) sep \ func(is_skylake) sep \
func(is_broxton) sep \
func(is_kabylake) sep \
func(is_preliminary) sep \ func(is_preliminary) sep \
func(has_fbc) sep \ func(has_fbc) sep \
func(has_pipe_cxsr) sep \ func(has_pipe_cxsr) sep \
@ -920,7 +907,6 @@ struct i915_fbc {
/* This is always the inner lock when overlapping with struct_mutex and /* This is always the inner lock when overlapping with struct_mutex and
* it's the outer lock when overlapping with stolen_lock. */ * it's the outer lock when overlapping with stolen_lock. */
struct mutex lock; struct mutex lock;
unsigned long uncompressed_size;
unsigned threshold; unsigned threshold;
unsigned int fb_id; unsigned int fb_id;
unsigned int possible_framebuffer_bits; unsigned int possible_framebuffer_bits;
@ -933,38 +919,21 @@ struct i915_fbc {
bool false_color; bool false_color;
/* Tracks whether the HW is actually enabled, not whether the feature is
* possible. */
bool enabled; bool enabled;
bool active;
struct intel_fbc_work { struct intel_fbc_work {
struct delayed_work work; bool scheduled;
struct intel_crtc *crtc; struct work_struct work;
struct drm_framebuffer *fb; struct drm_framebuffer *fb;
} *fbc_work; unsigned long enable_jiffies;
} work;
enum no_fbc_reason { const char *no_fbc_reason;
FBC_OK, /* FBC is enabled */
FBC_UNSUPPORTED, /* FBC is not supported by this chipset */
FBC_NO_OUTPUT, /* no outputs enabled to compress */
FBC_STOLEN_TOO_SMALL, /* not enough space for buffers */
FBC_UNSUPPORTED_MODE, /* interlace or doublescanned mode */
FBC_MODE_TOO_LARGE, /* mode too large for compression */
FBC_BAD_PLANE, /* fbc not supported on plane */
FBC_NOT_TILED, /* buffer not tiled */
FBC_MULTIPLE_PIPES, /* more than one pipe active */
FBC_MODULE_PARAM,
FBC_CHIP_DEFAULT, /* disabled by default on this chip */
FBC_ROTATION, /* rotation is not supported */
FBC_IN_DBG_MASTER, /* kernel debugger is active */
FBC_BAD_STRIDE, /* stride is not supported */
FBC_PIXEL_RATE, /* pixel rate is too big */
FBC_PIXEL_FORMAT /* pixel format is invalid */
} no_fbc_reason;
bool (*fbc_enabled)(struct drm_i915_private *dev_priv); bool (*is_active)(struct drm_i915_private *dev_priv);
void (*enable_fbc)(struct intel_crtc *crtc); void (*activate)(struct intel_crtc *crtc);
void (*disable_fbc)(struct drm_i915_private *dev_priv); void (*deactivate)(struct drm_i915_private *dev_priv);
}; };
/** /**
@ -1034,7 +1003,7 @@ struct intel_gmbus {
struct i2c_adapter adapter; struct i2c_adapter adapter;
u32 force_bit; u32 force_bit;
u32 reg0; u32 reg0;
u32 gpio_reg; i915_reg_t gpio_reg;
struct i2c_algo_bit_data bit_algo; struct i2c_algo_bit_data bit_algo;
struct drm_i915_private *dev_priv; struct drm_i915_private *dev_priv;
}; };
@ -1173,7 +1142,7 @@ struct intel_gen6_power_mgmt {
struct intel_rps_client semaphores, mmioflips; struct intel_rps_client semaphores, mmioflips;
/* manual wa residency calculations */ /* manual wa residency calculations */
struct intel_rps_ei ei; struct intel_rps_ei up_ei, down_ei;
/* /*
* Protects RPS/RC6 register access and PCU communication. * Protects RPS/RC6 register access and PCU communication.
@ -1293,6 +1262,7 @@ struct i915_gem_mm {
/** PPGTT used for aliasing the PPGTT with the GTT */ /** PPGTT used for aliasing the PPGTT with the GTT */
struct i915_hw_ppgtt *aliasing_ppgtt; struct i915_hw_ppgtt *aliasing_ppgtt;
struct notifier_block oom_notifier;
/** LRU list of objects with fence regs on them. */ /** LRU list of objects with fence regs on them. */
struct list_head fence_list; struct list_head fence_list;
@ -1633,6 +1603,8 @@ struct skl_wm_level {
* For more, read the Documentation/power/runtime_pm.txt. * For more, read the Documentation/power/runtime_pm.txt.
*/ */
struct i915_runtime_pm { struct i915_runtime_pm {
atomic_t wakeref_count;
atomic_t atomic_seq;
bool suspended; bool suspended;
bool irqs_enabled; bool irqs_enabled;
}; };
@ -1679,7 +1651,7 @@ struct i915_frontbuffer_tracking {
}; };
struct i915_wa_reg { struct i915_wa_reg {
u32 addr; i915_reg_t addr;
u32 value; u32 value;
/* bitmask representing WA bits */ /* bitmask representing WA bits */
u32 mask; u32 mask;
@ -1708,6 +1680,13 @@ struct i915_execbuffer_params {
struct drm_i915_gem_request *request; struct drm_i915_gem_request *request;
}; };
/* used in computing the new watermarks state */
struct intel_wm_config {
unsigned int num_pipes_active;
bool sprites_enabled;
bool sprites_scaled;
};
struct drm_i915_private { struct drm_i915_private {
struct drm_device *dev; struct drm_device *dev;
struct kmem_cache *objects; struct kmem_cache *objects;
@ -1728,9 +1707,6 @@ struct drm_i915_private {
struct intel_csr csr; struct intel_csr csr;
/* Display CSR-related protection */
struct mutex csr_lock;
struct intel_gmbus gmbus[GMBUS_NUM_PINS]; struct intel_gmbus gmbus[GMBUS_NUM_PINS];
/** gmbus_mutex protects against concurrent usage of the single hw gmbus /** gmbus_mutex protects against concurrent usage of the single hw gmbus
@ -1745,6 +1721,8 @@ struct drm_i915_private {
/* MMIO base address for MIPI regs */ /* MMIO base address for MIPI regs */
uint32_t mipi_mmio_base; uint32_t mipi_mmio_base;
uint32_t psr_mmio_base;
wait_queue_head_t gmbus_wait_queue; wait_queue_head_t gmbus_wait_queue;
struct pci_dev *bridge_dev; struct pci_dev *bridge_dev;
@ -1908,6 +1886,7 @@ struct drm_i915_private {
u32 chv_phy_control; u32 chv_phy_control;
u32 suspend_count; u32 suspend_count;
bool suspended_to_idle;
struct i915_suspend_saved_registers regfile; struct i915_suspend_saved_registers regfile;
struct vlv_s0ix_state vlv_s0ix_state; struct vlv_s0ix_state vlv_s0ix_state;
@ -1930,6 +1909,9 @@ struct drm_i915_private {
*/ */
uint16_t skl_latency[8]; uint16_t skl_latency[8];
/* Committed wm config */
struct intel_wm_config config;
/* /*
* The skl_wm_values structure is a bit too big for stack * The skl_wm_values structure is a bit too big for stack
* allocation, so we keep the staging struct where we store * allocation, so we keep the staging struct where we store
@ -1964,6 +1946,8 @@ struct drm_i915_private {
/* perform PHY state sanity checks? */ /* perform PHY state sanity checks? */
bool chv_phy_assert[2]; bool chv_phy_assert[2];
struct intel_encoder *dig_port_map[I915_MAX_PORTS];
/* /*
* NOTE: This is the dri1/ums dungeon, don't add stuff here. Your patch * NOTE: This is the dri1/ums dungeon, don't add stuff here. Your patch
* will be rejected. Instead look for a better place. * will be rejected. Instead look for a better place.
@ -1988,7 +1972,7 @@ static inline struct drm_i915_private *guc_to_i915(struct intel_guc *guc)
/* Iterate over initialised rings */ /* Iterate over initialised rings */
#define for_each_ring(ring__, dev_priv__, i__) \ #define for_each_ring(ring__, dev_priv__, i__) \
for ((i__) = 0; (i__) < I915_NUM_RINGS; (i__)++) \ for ((i__) = 0; (i__) < I915_NUM_RINGS; (i__)++) \
if (((ring__) = &(dev_priv__)->ring[(i__)]), intel_ring_initialized((ring__))) for_each_if ((((ring__) = &(dev_priv__)->ring[(i__)]), intel_ring_initialized((ring__))))
enum hdmi_force_audio { enum hdmi_force_audio {
HDMI_AUDIO_OFF_DVI = -2, /* no aux data for HDMI-DVI converter */ HDMI_AUDIO_OFF_DVI = -2, /* no aux data for HDMI-DVI converter */
@ -2000,6 +1984,9 @@ enum hdmi_force_audio {
#define I915_GTT_OFFSET_NONE ((u32)-1) #define I915_GTT_OFFSET_NONE ((u32)-1)
struct drm_i915_gem_object_ops { struct drm_i915_gem_object_ops {
unsigned int flags;
#define I915_GEM_OBJECT_HAS_STRUCT_PAGE 0x1
/* Interface between the GEM object and its backing storage. /* Interface between the GEM object and its backing storage.
* get_pages() is called once prior to the use of the associated set * get_pages() is called once prior to the use of the associated set
* of pages before to binding them into the GTT, and put_pages() is * of pages before to binding them into the GTT, and put_pages() is
@ -2015,6 +2002,7 @@ struct drm_i915_gem_object_ops {
*/ */
int (*get_pages)(struct drm_i915_gem_object *); int (*get_pages)(struct drm_i915_gem_object *);
void (*put_pages)(struct drm_i915_gem_object *); void (*put_pages)(struct drm_i915_gem_object *);
int (*dmabuf_export)(struct drm_i915_gem_object *); int (*dmabuf_export)(struct drm_i915_gem_object *);
void (*release)(struct drm_i915_gem_object *); void (*release)(struct drm_i915_gem_object *);
}; };
@ -2158,6 +2146,10 @@ struct drm_i915_gem_object {
/** Record of address bit 17 of each page at last unbind. */ /** Record of address bit 17 of each page at last unbind. */
unsigned long *bit_17; unsigned long *bit_17;
union {
/** for phy allocated objects */
struct drm_dma_handle *phys_handle;
struct i915_gem_userptr { struct i915_gem_userptr {
uintptr_t ptr; uintptr_t ptr;
unsigned read_only :1; unsigned read_only :1;
@ -2168,9 +2160,7 @@ struct drm_i915_gem_object {
struct i915_mmu_object *mmu_object; struct i915_mmu_object *mmu_object;
struct work_struct *work; struct work_struct *work;
} userptr; } userptr;
};
/** for phys allocated objects */
struct drm_dma_handle *phys_handle;
}; };
#define to_intel_bo(x) container_of(x, struct drm_i915_gem_object, base) #define to_intel_bo(x) container_of(x, struct drm_i915_gem_object, base)
@ -2451,6 +2441,15 @@ struct drm_i915_cmd_table {
#define INTEL_DEVID(p) (INTEL_INFO(p)->device_id) #define INTEL_DEVID(p) (INTEL_INFO(p)->device_id)
#define INTEL_REVID(p) (__I915__(p)->dev->pdev->revision) #define INTEL_REVID(p) (__I915__(p)->dev->pdev->revision)
#define REVID_FOREVER 0xff
/*
* Return true if revision is in range [since,until] inclusive.
*
* Use 0 for open-ended since, and REVID_FOREVER for open-ended until.
*/
#define IS_REVID(p, since, until) \
(INTEL_REVID(p) >= (since) && INTEL_REVID(p) <= (until))
#define IS_I830(dev) (INTEL_DEVID(dev) == 0x3577) #define IS_I830(dev) (INTEL_DEVID(dev) == 0x3577)
#define IS_845G(dev) (INTEL_DEVID(dev) == 0x2562) #define IS_845G(dev) (INTEL_DEVID(dev) == 0x2562)
#define IS_I85X(dev) (INTEL_INFO(dev)->is_i85x) #define IS_I85X(dev) (INTEL_INFO(dev)->is_i85x)
@ -2473,11 +2472,12 @@ struct drm_i915_cmd_table {
INTEL_DEVID(dev) == 0x0152 || \ INTEL_DEVID(dev) == 0x0152 || \
INTEL_DEVID(dev) == 0x015a) INTEL_DEVID(dev) == 0x015a)
#define IS_VALLEYVIEW(dev) (INTEL_INFO(dev)->is_valleyview) #define IS_VALLEYVIEW(dev) (INTEL_INFO(dev)->is_valleyview)
#define IS_CHERRYVIEW(dev) (INTEL_INFO(dev)->is_valleyview && IS_GEN8(dev)) #define IS_CHERRYVIEW(dev) (INTEL_INFO(dev)->is_cherryview)
#define IS_HASWELL(dev) (INTEL_INFO(dev)->is_haswell) #define IS_HASWELL(dev) (INTEL_INFO(dev)->is_haswell)
#define IS_BROADWELL(dev) (!INTEL_INFO(dev)->is_valleyview && IS_GEN8(dev)) #define IS_BROADWELL(dev) (!INTEL_INFO(dev)->is_cherryview && IS_GEN8(dev))
#define IS_SKYLAKE(dev) (INTEL_INFO(dev)->is_skylake) #define IS_SKYLAKE(dev) (INTEL_INFO(dev)->is_skylake)
#define IS_BROXTON(dev) (!INTEL_INFO(dev)->is_skylake && IS_GEN9(dev)) #define IS_BROXTON(dev) (INTEL_INFO(dev)->is_broxton)
#define IS_KABYLAKE(dev) (INTEL_INFO(dev)->is_kabylake)
#define IS_MOBILE(dev) (INTEL_INFO(dev)->is_mobile) #define IS_MOBILE(dev) (INTEL_INFO(dev)->is_mobile)
#define IS_HSW_EARLY_SDV(dev) (IS_HASWELL(dev) && \ #define IS_HSW_EARLY_SDV(dev) (IS_HASWELL(dev) && \
(INTEL_DEVID(dev) & 0xFF00) == 0x0C00) (INTEL_DEVID(dev) & 0xFF00) == 0x0C00)
@ -2505,6 +2505,14 @@ struct drm_i915_cmd_table {
#define IS_SKL_ULX(dev) (INTEL_DEVID(dev) == 0x190E || \ #define IS_SKL_ULX(dev) (INTEL_DEVID(dev) == 0x190E || \
INTEL_DEVID(dev) == 0x1915 || \ INTEL_DEVID(dev) == 0x1915 || \
INTEL_DEVID(dev) == 0x191E) INTEL_DEVID(dev) == 0x191E)
#define IS_KBL_ULT(dev) (INTEL_DEVID(dev) == 0x5906 || \
INTEL_DEVID(dev) == 0x5913 || \
INTEL_DEVID(dev) == 0x5916 || \
INTEL_DEVID(dev) == 0x5921 || \
INTEL_DEVID(dev) == 0x5926)
#define IS_KBL_ULX(dev) (INTEL_DEVID(dev) == 0x590E || \
INTEL_DEVID(dev) == 0x5915 || \
INTEL_DEVID(dev) == 0x591E)
#define IS_SKL_GT3(dev) (IS_SKYLAKE(dev) && \ #define IS_SKL_GT3(dev) (IS_SKYLAKE(dev) && \
(INTEL_DEVID(dev) & 0x00F0) == 0x0020) (INTEL_DEVID(dev) & 0x00F0) == 0x0020)
#define IS_SKL_GT4(dev) (IS_SKYLAKE(dev) && \ #define IS_SKL_GT4(dev) (IS_SKYLAKE(dev) && \
@ -2512,16 +2520,21 @@ struct drm_i915_cmd_table {
#define IS_PRELIMINARY_HW(intel_info) ((intel_info)->is_preliminary) #define IS_PRELIMINARY_HW(intel_info) ((intel_info)->is_preliminary)
#define SKL_REVID_A0 (0x0) #define SKL_REVID_A0 0x0
#define SKL_REVID_B0 (0x1) #define SKL_REVID_B0 0x1
#define SKL_REVID_C0 (0x2) #define SKL_REVID_C0 0x2
#define SKL_REVID_D0 (0x3) #define SKL_REVID_D0 0x3
#define SKL_REVID_E0 (0x4) #define SKL_REVID_E0 0x4
#define SKL_REVID_F0 (0x5) #define SKL_REVID_F0 0x5
#define BXT_REVID_A0 (0x0) #define IS_SKL_REVID(p, since, until) (IS_SKYLAKE(p) && IS_REVID(p, since, until))
#define BXT_REVID_B0 (0x3)
#define BXT_REVID_C0 (0x9) #define BXT_REVID_A0 0x0
#define BXT_REVID_A1 0x1
#define BXT_REVID_B0 0x3
#define BXT_REVID_C0 0x9
#define IS_BXT_REVID(p, since, until) (IS_BROXTON(p) && IS_REVID(p, since, until))
/* /*
* The genX designation typically refers to the render engine, so render * The genX designation typically refers to the render engine, so render
@ -2593,23 +2606,25 @@ struct drm_i915_cmd_table {
#define HAS_FPGA_DBG_UNCLAIMED(dev) (INTEL_INFO(dev)->has_fpga_dbg) #define HAS_FPGA_DBG_UNCLAIMED(dev) (INTEL_INFO(dev)->has_fpga_dbg)
#define HAS_PSR(dev) (IS_HASWELL(dev) || IS_BROADWELL(dev) || \ #define HAS_PSR(dev) (IS_HASWELL(dev) || IS_BROADWELL(dev) || \
IS_VALLEYVIEW(dev) || IS_CHERRYVIEW(dev) || \ IS_VALLEYVIEW(dev) || IS_CHERRYVIEW(dev) || \
IS_SKYLAKE(dev)) IS_SKYLAKE(dev) || IS_KABYLAKE(dev))
#define HAS_RUNTIME_PM(dev) (IS_GEN6(dev) || IS_HASWELL(dev) || \ #define HAS_RUNTIME_PM(dev) (IS_GEN6(dev) || IS_HASWELL(dev) || \
IS_BROADWELL(dev) || IS_VALLEYVIEW(dev) || \ IS_BROADWELL(dev) || IS_VALLEYVIEW(dev) || \
IS_SKYLAKE(dev)) IS_CHERRYVIEW(dev) || IS_SKYLAKE(dev) || \
IS_KABYLAKE(dev))
#define HAS_RC6(dev) (INTEL_INFO(dev)->gen >= 6) #define HAS_RC6(dev) (INTEL_INFO(dev)->gen >= 6)
#define HAS_RC6p(dev) (INTEL_INFO(dev)->gen == 6 || IS_IVYBRIDGE(dev)) #define HAS_RC6p(dev) (INTEL_INFO(dev)->gen == 6 || IS_IVYBRIDGE(dev))
#define HAS_CSR(dev) (IS_GEN9(dev)) #define HAS_CSR(dev) (IS_GEN9(dev))
#define HAS_GUC_UCODE(dev) (IS_GEN9(dev)) #define HAS_GUC_UCODE(dev) (IS_GEN9(dev) && !IS_KABYLAKE(dev))
#define HAS_GUC_SCHED(dev) (IS_GEN9(dev)) #define HAS_GUC_SCHED(dev) (IS_GEN9(dev) && !IS_KABYLAKE(dev))
#define HAS_RESOURCE_STREAMER(dev) (IS_HASWELL(dev) || \ #define HAS_RESOURCE_STREAMER(dev) (IS_HASWELL(dev) || \
INTEL_INFO(dev)->gen >= 8) INTEL_INFO(dev)->gen >= 8)
#define HAS_CORE_RING_FREQ(dev) (INTEL_INFO(dev)->gen >= 6 && \ #define HAS_CORE_RING_FREQ(dev) (INTEL_INFO(dev)->gen >= 6 && \
!IS_VALLEYVIEW(dev) && !IS_BROXTON(dev)) !IS_VALLEYVIEW(dev) && !IS_CHERRYVIEW(dev) && \
!IS_BROXTON(dev))
#define INTEL_PCH_DEVICE_ID_MASK 0xff00 #define INTEL_PCH_DEVICE_ID_MASK 0xff00
#define INTEL_PCH_IBX_DEVICE_ID_TYPE 0x3b00 #define INTEL_PCH_IBX_DEVICE_ID_TYPE 0x3b00
@ -2626,12 +2641,14 @@ struct drm_i915_cmd_table {
#define HAS_PCH_SPT(dev) (INTEL_PCH_TYPE(dev) == PCH_SPT) #define HAS_PCH_SPT(dev) (INTEL_PCH_TYPE(dev) == PCH_SPT)
#define HAS_PCH_LPT(dev) (INTEL_PCH_TYPE(dev) == PCH_LPT) #define HAS_PCH_LPT(dev) (INTEL_PCH_TYPE(dev) == PCH_LPT)
#define HAS_PCH_LPT_LP(dev) (__I915__(dev)->pch_id == INTEL_PCH_LPT_LP_DEVICE_ID_TYPE) #define HAS_PCH_LPT_LP(dev) (__I915__(dev)->pch_id == INTEL_PCH_LPT_LP_DEVICE_ID_TYPE)
#define HAS_PCH_LPT_H(dev) (__I915__(dev)->pch_id == INTEL_PCH_LPT_DEVICE_ID_TYPE)
#define HAS_PCH_CPT(dev) (INTEL_PCH_TYPE(dev) == PCH_CPT) #define HAS_PCH_CPT(dev) (INTEL_PCH_TYPE(dev) == PCH_CPT)
#define HAS_PCH_IBX(dev) (INTEL_PCH_TYPE(dev) == PCH_IBX) #define HAS_PCH_IBX(dev) (INTEL_PCH_TYPE(dev) == PCH_IBX)
#define HAS_PCH_NOP(dev) (INTEL_PCH_TYPE(dev) == PCH_NOP) #define HAS_PCH_NOP(dev) (INTEL_PCH_TYPE(dev) == PCH_NOP)
#define HAS_PCH_SPLIT(dev) (INTEL_PCH_TYPE(dev) != PCH_NONE) #define HAS_PCH_SPLIT(dev) (INTEL_PCH_TYPE(dev) != PCH_NONE)
#define HAS_GMCH_DISPLAY(dev) (INTEL_INFO(dev)->gen < 5 || IS_VALLEYVIEW(dev)) #define HAS_GMCH_DISPLAY(dev) (INTEL_INFO(dev)->gen < 5 || \
IS_VALLEYVIEW(dev) || IS_CHERRYVIEW(dev))
/* DPF == dynamic parity feature */ /* DPF == dynamic parity feature */
#define HAS_L3_DPF(dev) (IS_IVYBRIDGE(dev) || IS_HASWELL(dev)) #define HAS_L3_DPF(dev) (IS_IVYBRIDGE(dev) || IS_HASWELL(dev))
@ -2657,6 +2674,7 @@ struct i915_params {
int panel_use_ssc; int panel_use_ssc;
int vbt_sdvo_panel_type; int vbt_sdvo_panel_type;
int enable_rc6; int enable_rc6;
int enable_dc;
int enable_fbc; int enable_fbc;
int enable_ppgtt; int enable_ppgtt;
int enable_execlists; int enable_execlists;
@ -2708,7 +2726,6 @@ extern unsigned long i915_mch_val(struct drm_i915_private *dev_priv);
extern unsigned long i915_gfx_val(struct drm_i915_private *dev_priv); extern unsigned long i915_gfx_val(struct drm_i915_private *dev_priv);
extern void i915_update_gfx_val(struct drm_i915_private *dev_priv); extern void i915_update_gfx_val(struct drm_i915_private *dev_priv);
int vlv_force_gfx_clock(struct drm_i915_private *dev_priv, bool on); int vlv_force_gfx_clock(struct drm_i915_private *dev_priv, bool on);
void i915_firmware_load_error_print(const char *fw_path, int err);
/* intel_hotplug.c */ /* intel_hotplug.c */
void intel_hpd_irq_handler(struct drm_device *dev, u32 pin_mask, u32 long_mask); void intel_hpd_irq_handler(struct drm_device *dev, u32 pin_mask, u32 long_mask);
@ -2765,17 +2782,47 @@ void valleyview_disable_display_irqs(struct drm_i915_private *dev_priv);
void i915_hotplug_interrupt_update(struct drm_i915_private *dev_priv, void i915_hotplug_interrupt_update(struct drm_i915_private *dev_priv,
uint32_t mask, uint32_t mask,
uint32_t bits); uint32_t bits);
void void ilk_update_display_irq(struct drm_i915_private *dev_priv,
ironlake_enable_display_irq(struct drm_i915_private *dev_priv, u32 mask); uint32_t interrupt_mask,
void uint32_t enabled_irq_mask);
ironlake_disable_display_irq(struct drm_i915_private *dev_priv, u32 mask); static inline void
ilk_enable_display_irq(struct drm_i915_private *dev_priv, uint32_t bits)
{
ilk_update_display_irq(dev_priv, bits, bits);
}
static inline void
ilk_disable_display_irq(struct drm_i915_private *dev_priv, uint32_t bits)
{
ilk_update_display_irq(dev_priv, bits, 0);
}
void bdw_update_pipe_irq(struct drm_i915_private *dev_priv,
enum pipe pipe,
uint32_t interrupt_mask,
uint32_t enabled_irq_mask);
static inline void bdw_enable_pipe_irq(struct drm_i915_private *dev_priv,
enum pipe pipe, uint32_t bits)
{
bdw_update_pipe_irq(dev_priv, pipe, bits, bits);
}
static inline void bdw_disable_pipe_irq(struct drm_i915_private *dev_priv,
enum pipe pipe, uint32_t bits)
{
bdw_update_pipe_irq(dev_priv, pipe, bits, 0);
}
void ibx_display_interrupt_update(struct drm_i915_private *dev_priv, void ibx_display_interrupt_update(struct drm_i915_private *dev_priv,
uint32_t interrupt_mask, uint32_t interrupt_mask,
uint32_t enabled_irq_mask); uint32_t enabled_irq_mask);
#define ibx_enable_display_interrupt(dev_priv, bits) \ static inline void
ibx_display_interrupt_update((dev_priv), (bits), (bits)) ibx_enable_display_interrupt(struct drm_i915_private *dev_priv, uint32_t bits)
#define ibx_disable_display_interrupt(dev_priv, bits) \ {
ibx_display_interrupt_update((dev_priv), (bits), 0) ibx_display_interrupt_update(dev_priv, bits, bits);
}
static inline void
ibx_disable_display_interrupt(struct drm_i915_private *dev_priv, uint32_t bits)
{
ibx_display_interrupt_update(dev_priv, bits, 0);
}
/* i915_gem.c */ /* i915_gem.c */
int i915_gem_create_ioctl(struct drm_device *dev, void *data, int i915_gem_create_ioctl(struct drm_device *dev, void *data,
@ -2844,6 +2891,7 @@ void i915_gem_vma_destroy(struct i915_vma *vma);
#define PIN_UPDATE (1<<5) #define PIN_UPDATE (1<<5)
#define PIN_ZONE_4G (1<<6) #define PIN_ZONE_4G (1<<6)
#define PIN_HIGH (1<<7) #define PIN_HIGH (1<<7)
#define PIN_OFFSET_FIXED (1<<8)
#define PIN_OFFSET_MASK (~4095) #define PIN_OFFSET_MASK (~4095)
int __must_check int __must_check
i915_gem_object_pin(struct drm_i915_gem_object *obj, i915_gem_object_pin(struct drm_i915_gem_object *obj,
@ -2879,6 +2927,9 @@ static inline int __sg_page_count(struct scatterlist *sg)
return sg->length >> PAGE_SHIFT; return sg->length >> PAGE_SHIFT;
} }
struct page *
i915_gem_object_get_dirty_page(struct drm_i915_gem_object *obj, int n);
static inline struct page * static inline struct page *
i915_gem_object_get_page(struct drm_i915_gem_object *obj, int n) i915_gem_object_get_page(struct drm_i915_gem_object *obj, int n)
{ {
@ -3018,8 +3069,6 @@ i915_gem_object_set_to_cpu_domain(struct drm_i915_gem_object *obj, bool write);
int __must_check int __must_check
i915_gem_object_pin_to_display_plane(struct drm_i915_gem_object *obj, i915_gem_object_pin_to_display_plane(struct drm_i915_gem_object *obj,
u32 alignment, u32 alignment,
struct intel_engine_cs *pipelined,
struct drm_i915_gem_request **pipelined_request,
const struct i915_ggtt_view *view); const struct i915_ggtt_view *view);
void i915_gem_object_unpin_from_display_plane(struct drm_i915_gem_object *obj, void i915_gem_object_unpin_from_display_plane(struct drm_i915_gem_object *obj,
const struct i915_ggtt_view *view); const struct i915_ggtt_view *view);
@ -3194,6 +3243,7 @@ int __must_check i915_gem_evict_something(struct drm_device *dev,
unsigned long start, unsigned long start,
unsigned long end, unsigned long end,
unsigned flags); unsigned flags);
int __must_check i915_gem_evict_for_vma(struct i915_vma *target);
int i915_gem_evict_vm(struct i915_address_space *vm, bool do_idle); int i915_gem_evict_vm(struct i915_address_space *vm, bool do_idle);
/* belongs in i915_gem_gtt.h */ /* belongs in i915_gem_gtt.h */
@ -3323,7 +3373,8 @@ static inline bool intel_gmbus_is_forced_bit(struct i2c_adapter *adapter)
extern void intel_i2c_reset(struct drm_device *dev); extern void intel_i2c_reset(struct drm_device *dev);
/* intel_bios.c */ /* intel_bios.c */
bool intel_bios_is_port_present(struct drm_i915_private *dev_priv, enum port port); int intel_bios_init(struct drm_i915_private *dev_priv);
bool intel_bios_is_valid_vbt(const void *buf, size_t size);
/* intel_opregion.c */ /* intel_opregion.c */
#ifdef CONFIG_ACPI #ifdef CONFIG_ACPI
@ -3377,7 +3428,6 @@ extern void intel_set_rps(struct drm_device *dev, u8 val);
extern void intel_set_memory_cxsr(struct drm_i915_private *dev_priv, extern void intel_set_memory_cxsr(struct drm_i915_private *dev_priv,
bool enable); bool enable);
extern void intel_detect_pch(struct drm_device *dev); extern void intel_detect_pch(struct drm_device *dev);
extern int intel_trans_dp_port_sel(struct drm_crtc *crtc);
extern int intel_enable_rc6(const struct drm_device *dev); extern int intel_enable_rc6(const struct drm_device *dev);
extern bool i915_semaphore_is_enabled(struct drm_device *dev); extern bool i915_semaphore_is_enabled(struct drm_device *dev);
@ -3460,6 +3510,32 @@ int intel_freq_opcode(struct drm_i915_private *dev_priv, int val);
#define POSTING_READ(reg) (void)I915_READ_NOTRACE(reg) #define POSTING_READ(reg) (void)I915_READ_NOTRACE(reg)
#define POSTING_READ16(reg) (void)I915_READ16_NOTRACE(reg) #define POSTING_READ16(reg) (void)I915_READ16_NOTRACE(reg)
#define __raw_read(x, s) \
static inline uint##x##_t __raw_i915_read##x(struct drm_i915_private *dev_priv, \
i915_reg_t reg) \
{ \
return read##s(dev_priv->regs + i915_mmio_reg_offset(reg)); \
}
#define __raw_write(x, s) \
static inline void __raw_i915_write##x(struct drm_i915_private *dev_priv, \
i915_reg_t reg, uint##x##_t val) \
{ \
write##s(val, dev_priv->regs + i915_mmio_reg_offset(reg)); \
}
__raw_read(8, b)
__raw_read(16, w)
__raw_read(32, l)
__raw_read(64, q)
__raw_write(8, b)
__raw_write(16, w)
__raw_write(32, l)
__raw_write(64, q)
#undef __raw_read
#undef __raw_write
/* These are untraced mmio-accessors that are only valid to be used inside /* These are untraced mmio-accessors that are only valid to be used inside
* criticial sections inside IRQ handlers where forcewake is explicitly * criticial sections inside IRQ handlers where forcewake is explicitly
* controlled. * controlled.
@ -3467,8 +3543,8 @@ int intel_freq_opcode(struct drm_i915_private *dev_priv, int val);
* Note: Should only be used between intel_uncore_forcewake_irqlock() and * Note: Should only be used between intel_uncore_forcewake_irqlock() and
* intel_uncore_forcewake_irqunlock(). * intel_uncore_forcewake_irqunlock().
*/ */
#define I915_READ_FW(reg__) readl(dev_priv->regs + (reg__)) #define I915_READ_FW(reg__) __raw_i915_read32(dev_priv, (reg__))
#define I915_WRITE_FW(reg__, val__) writel(val__, dev_priv->regs + (reg__)) #define I915_WRITE_FW(reg__, val__) __raw_i915_write32(dev_priv, (reg__), (val__))
#define POSTING_READ_FW(reg__) (void)I915_READ_FW(reg__) #define POSTING_READ_FW(reg__) (void)I915_READ_FW(reg__)
/* "Broadcast RGB" property */ /* "Broadcast RGB" property */
@ -3476,9 +3552,9 @@ int intel_freq_opcode(struct drm_i915_private *dev_priv, int val);
#define INTEL_BROADCAST_RGB_FULL 1 #define INTEL_BROADCAST_RGB_FULL 1
#define INTEL_BROADCAST_RGB_LIMITED 2 #define INTEL_BROADCAST_RGB_LIMITED 2
static inline uint32_t i915_vgacntrl_reg(struct drm_device *dev) static inline i915_reg_t i915_vgacntrl_reg(struct drm_device *dev)
{ {
if (IS_VALLEYVIEW(dev)) if (IS_VALLEYVIEW(dev) || IS_CHERRYVIEW(dev))
return VLV_VGACNTRL; return VLV_VGACNTRL;
else if (INTEL_INFO(dev)->gen >= 5) else if (INTEL_INFO(dev)->gen >= 5)
return CPU_VGACNTRL; return CPU_VGACNTRL;
@ -3543,4 +3619,6 @@ static inline void i915_trace_irq_get(struct intel_engine_cs *ring,
i915_gem_request_assign(&ring->trace_irq_req, req); i915_gem_request_assign(&ring->trace_irq_req, req);
} }
#include "intel_drv.h"
#endif #endif

View File

@ -56,11 +56,6 @@ unsigned long vm_mmap(struct file *file, unsigned long addr,
unsigned long flag, unsigned long offset); unsigned long flag, unsigned long offset);
#define MAX_ERRNO 4095
#define IS_ERR_VALUE(x) unlikely((x) >= (unsigned long)-MAX_ERRNO)
#define offset_in_page(p) ((unsigned long)(p) & ~PAGE_MASK)
static void i915_gem_object_flush_gtt_write_domain(struct drm_i915_gem_object *obj); static void i915_gem_object_flush_gtt_write_domain(struct drm_i915_gem_object *obj);
static void i915_gem_object_flush_cpu_write_domain(struct drm_i915_gem_object *obj); static void i915_gem_object_flush_cpu_write_domain(struct drm_i915_gem_object *obj);
static void static void
@ -1973,6 +1968,9 @@ i915_gem_object_get_pages_gtt(struct drm_i915_gem_object *obj)
sg->length += PAGE_SIZE; sg->length += PAGE_SIZE;
} }
last_pfn = page_to_pfn(page); last_pfn = page_to_pfn(page);
/* Check that the i965g/gm workaround works. */
WARN_ON((gfp & __GFP_DMA32) && (last_pfn >= 0x00100000UL));
} }
#ifdef CONFIG_SWIOTLB #ifdef CONFIG_SWIOTLB
if (!swiotlb_nr_tbl()) if (!swiotlb_nr_tbl())
@ -2439,6 +2437,8 @@ static void i915_gem_reset_ring_status(struct drm_i915_private *dev_priv,
static void i915_gem_reset_ring_cleanup(struct drm_i915_private *dev_priv, static void i915_gem_reset_ring_cleanup(struct drm_i915_private *dev_priv,
struct intel_engine_cs *ring) struct intel_engine_cs *ring)
{ {
struct intel_ringbuffer *buffer;
while (!list_empty(&ring->active_list)) { while (!list_empty(&ring->active_list)) {
struct drm_i915_gem_object *obj; struct drm_i915_gem_object *obj;
@ -2454,18 +2454,16 @@ static void i915_gem_reset_ring_cleanup(struct drm_i915_private *dev_priv,
* are the ones that keep the context and ringbuffer backing objects * are the ones that keep the context and ringbuffer backing objects
* pinned in place. * pinned in place.
*/ */
while (!list_empty(&ring->execlist_queue)) {
struct drm_i915_gem_request *submit_req;
submit_req = list_first_entry(&ring->execlist_queue, if (i915.enable_execlists) {
struct drm_i915_gem_request, spin_lock_irq(&ring->execlist_lock);
execlist_link);
list_del(&submit_req->execlist_link);
if (submit_req->ctx != ring->default_context) /* list_splice_tail_init checks for empty lists */
intel_lr_context_unpin(submit_req); list_splice_tail_init(&ring->execlist_queue,
&ring->execlist_retired_req_list);
i915_gem_request_unreference(submit_req); spin_unlock_irq(&ring->execlist_lock);
intel_execlists_retire_requests(ring);
} }
/* /*
@ -2484,6 +2482,18 @@ static void i915_gem_reset_ring_cleanup(struct drm_i915_private *dev_priv,
i915_gem_request_retire(request); i915_gem_request_retire(request);
} }
/* Having flushed all requests from all queues, we know that all
* ringbuffers must now be empty. However, since we do not reclaim
* all space when retiring the request (to prevent HEADs colliding
* with rapid ringbuffer wraparound) the amount of available space
* upon reset is less than when we start. Do one more pass over
* all the ringbuffers to reset last_retired_head.
*/
list_for_each_entry(buffer, &ring->buffers, link) {
buffer->last_retired_head = buffer->tail;
intel_ring_update_space(buffer);
}
} }
void i915_gem_reset(struct drm_device *dev) void i915_gem_reset(struct drm_device *dev)
@ -2584,10 +2594,10 @@ i915_gem_retire_requests(struct drm_device *dev)
} }
} }
if (idle) // if (idle)
mod_delayed_work(dev_priv->wq, // mod_delayed_work(dev_priv->wq,
&dev_priv->mm.idle_work, // &dev_priv->mm.idle_work,
msecs_to_jiffies(100)); // msecs_to_jiffies(100));
return idle; return idle;
} }
@ -2624,6 +2634,10 @@ i915_gem_idle_work_handler(struct work_struct *work)
if (!list_empty(&ring->request_list)) if (!list_empty(&ring->request_list))
return; return;
/* we probably should sync with hangcheck here, using cancel_work_sync.
* Also locking seems to be fubar here, ring->request_list is protected
* by dev->struct_mutex. */
intel_mark_idle(dev); intel_mark_idle(dev);
if (mutex_trylock(&dev->struct_mutex)) { if (mutex_trylock(&dev->struct_mutex)) {
@ -2748,7 +2762,7 @@ i915_gem_wait_ioctl(struct drm_device *dev, void *data, struct drm_file *file)
if (ret == 0) if (ret == 0)
ret = __i915_wait_request(req[i], reset_counter, true, ret = __i915_wait_request(req[i], reset_counter, true,
args->timeout_ns > 0 ? &args->timeout_ns : NULL, args->timeout_ns > 0 ? &args->timeout_ns : NULL,
file->driver_priv); to_rps_client(file));
i915_gem_request_unreference__unlocked(req[i]); i915_gem_request_unreference__unlocked(req[i]);
} }
return ret; return ret;
@ -3114,7 +3128,7 @@ i915_gem_object_bind_to_vm(struct drm_i915_gem_object *obj,
if (flags & PIN_MAPPABLE) if (flags & PIN_MAPPABLE)
end = min_t(u64, end, dev_priv->gtt.mappable_end); end = min_t(u64, end, dev_priv->gtt.mappable_end);
if (flags & PIN_ZONE_4G) if (flags & PIN_ZONE_4G)
end = min_t(u64, end, (1ULL << 32)); end = min_t(u64, end, (1ULL << 32) - PAGE_SIZE);
if (alignment == 0) if (alignment == 0)
alignment = flags & PIN_MAPPABLE ? fence_alignment : alignment = flags & PIN_MAPPABLE ? fence_alignment :
@ -3151,6 +3165,20 @@ i915_gem_object_bind_to_vm(struct drm_i915_gem_object *obj,
if (IS_ERR(vma)) if (IS_ERR(vma))
goto err_unpin; goto err_unpin;
if (flags & PIN_OFFSET_FIXED) {
uint64_t offset = flags & PIN_OFFSET_MASK;
if (offset & (alignment - 1) || offset + size > end) {
ret = -EINVAL;
goto err_free_vma;
}
vma->node.start = offset;
vma->node.size = size;
vma->node.color = obj->cache_level;
ret = drm_mm_reserve_node(&vm->mm, &vma->node);
if (ret)
goto err_free_vma;
} else {
if (flags & PIN_HIGH) { if (flags & PIN_HIGH) {
search_flag = DRM_MM_SEARCH_BELOW; search_flag = DRM_MM_SEARCH_BELOW;
alloc_flag = DRM_MM_CREATE_TOP; alloc_flag = DRM_MM_CREATE_TOP;
@ -3170,6 +3198,7 @@ search_free:
goto err_free_vma; goto err_free_vma;
} }
}
if (WARN_ON(!i915_gem_valid_gtt_space(vma, obj->cache_level))) { if (WARN_ON(!i915_gem_valid_gtt_space(vma, obj->cache_level))) {
ret = -EINVAL; ret = -EINVAL;
goto err_remove_node; goto err_remove_node;
@ -3522,7 +3551,7 @@ int i915_gem_set_caching_ioctl(struct drm_device *dev, void *data,
* cacheline, whereas normally such cachelines would get * cacheline, whereas normally such cachelines would get
* invalidated. * invalidated.
*/ */
if (IS_BROXTON(dev) && INTEL_REVID(dev) < BXT_REVID_B0) if (IS_BXT_REVID(dev, 0, BXT_REVID_A1))
return -ENODEV; return -ENODEV;
level = I915_CACHE_LLC; level = I915_CACHE_LLC;
@ -3565,17 +3594,11 @@ rpm_put:
int int
i915_gem_object_pin_to_display_plane(struct drm_i915_gem_object *obj, i915_gem_object_pin_to_display_plane(struct drm_i915_gem_object *obj,
u32 alignment, u32 alignment,
struct intel_engine_cs *pipelined,
struct drm_i915_gem_request **pipelined_request,
const struct i915_ggtt_view *view) const struct i915_ggtt_view *view)
{ {
u32 old_read_domains, old_write_domain; u32 old_read_domains, old_write_domain;
int ret; int ret;
ret = i915_gem_object_sync(obj, pipelined, pipelined_request);
if (ret)
return ret;
/* Mark the pin_display early so that we account for the /* Mark the pin_display early so that we account for the
* display coherency whilst setting up the cache domains. * display coherency whilst setting up the cache domains.
*/ */
@ -3765,6 +3788,10 @@ i915_vma_misplaced(struct i915_vma *vma, uint32_t alignment, uint64_t flags)
vma->node.start < (flags & PIN_OFFSET_MASK)) vma->node.start < (flags & PIN_OFFSET_MASK))
return true; return true;
if (flags & PIN_OFFSET_FIXED &&
vma->node.start != (flags & PIN_OFFSET_MASK))
return true;
return false; return false;
} }
@ -4030,6 +4057,7 @@ void i915_gem_object_init(struct drm_i915_gem_object *obj,
} }
static const struct drm_i915_gem_object_ops i915_gem_object_ops = { static const struct drm_i915_gem_object_ops i915_gem_object_ops = {
.flags = I915_GEM_OBJECT_HAS_STRUCT_PAGE,
.get_pages = i915_gem_object_get_pages_gtt, .get_pages = i915_gem_object_get_pages_gtt,
.put_pages = i915_gem_object_put_pages_gtt, .put_pages = i915_gem_object_put_pages_gtt,
}; };
@ -4163,10 +4191,8 @@ struct i915_vma *i915_gem_obj_to_vma(struct drm_i915_gem_object *obj,
{ {
struct i915_vma *vma; struct i915_vma *vma;
list_for_each_entry(vma, &obj->vma_list, vma_link) { list_for_each_entry(vma, &obj->vma_list, vma_link) {
if (i915_is_ggtt(vma->vm) && if (vma->ggtt_view.type == I915_GGTT_VIEW_NORMAL &&
vma->ggtt_view.type != I915_GGTT_VIEW_NORMAL) vma->vm == vm)
continue;
if (vma->vm == vm)
return vma; return vma;
} }
return NULL; return NULL;
@ -4257,7 +4283,6 @@ int i915_gem_l3_remap(struct drm_i915_gem_request *req, int slice)
struct intel_engine_cs *ring = req->ring; struct intel_engine_cs *ring = req->ring;
struct drm_device *dev = ring->dev; struct drm_device *dev = ring->dev;
struct drm_i915_private *dev_priv = dev->dev_private; struct drm_i915_private *dev_priv = dev->dev_private;
u32 reg_base = GEN7_L3LOG_BASE + (slice * 0x200);
u32 *remap_info = dev_priv->l3_parity.remap_info[slice]; u32 *remap_info = dev_priv->l3_parity.remap_info[slice];
int i, ret; int i, ret;
@ -4273,10 +4298,10 @@ int i915_gem_l3_remap(struct drm_i915_gem_request *req, int slice)
* here because no other code should access these registers other than * here because no other code should access these registers other than
* at initialization time. * at initialization time.
*/ */
for (i = 0; i < GEN7_L3LOG_SIZE; i += 4) { for (i = 0; i < GEN7_L3LOG_SIZE / 4; i++) {
intel_ring_emit(ring, MI_LOAD_REGISTER_IMM(1)); intel_ring_emit(ring, MI_LOAD_REGISTER_IMM(1));
intel_ring_emit(ring, reg_base + i); intel_ring_emit_reg(ring, GEN7_L3LOG(slice, i));
intel_ring_emit(ring, remap_info[i/4]); intel_ring_emit(ring, remap_info[i]);
} }
intel_ring_advance(ring); intel_ring_advance(ring);
@ -4444,17 +4469,8 @@ i915_gem_init_hw(struct drm_device *dev)
if (HAS_GUC_UCODE(dev)) { if (HAS_GUC_UCODE(dev)) {
ret = intel_guc_ucode_load(dev); ret = intel_guc_ucode_load(dev);
if (ret) { if (ret) {
/* DRM_ERROR("Failed to initialize GuC, error %d\n", ret);
* If we got an error and GuC submission is enabled, map ret = -EIO;
* the error to -EIO so the GPU will be declared wedged.
* OTOH, if we didn't intend to use the GuC anyway, just
* discard the error and carry on.
*/
DRM_ERROR("Failed to initialize GuC, error %d%s\n", ret,
i915.enable_guc_submission ? "" :
" (ignored)");
ret = i915.enable_guc_submission ? -EIO : 0;
if (ret)
goto out; goto out;
} }
} }
@ -4518,14 +4534,6 @@ int i915_gem_init(struct drm_device *dev)
mutex_lock(&dev->struct_mutex); mutex_lock(&dev->struct_mutex);
if (IS_VALLEYVIEW(dev)) {
/* VLVA0 (potential hack), BIOS isn't actually waking us */
I915_WRITE(VLV_GTLC_WAKE_CTRL, VLV_GTLC_ALLOWWAKEREQ);
if (wait_for((I915_READ(VLV_GTLC_PW_STATUS) &
VLV_GTLC_ALLOWWAKEACK), 10))
DRM_DEBUG_DRIVER("allow wake ack timed out\n");
}
if (!i915.enable_execlists) { if (!i915.enable_execlists) {
dev_priv->gt.execbuf_submit = i915_gem_ringbuffer_submission; dev_priv->gt.execbuf_submit = i915_gem_ringbuffer_submission;
dev_priv->gt.init_rings = i915_gem_init_rings; dev_priv->gt.init_rings = i915_gem_init_rings;
@ -4619,7 +4627,7 @@ i915_gem_load(struct drm_device *dev)
dev_priv->relative_constants_mode = I915_EXEC_CONSTANTS_REL_GENERAL; dev_priv->relative_constants_mode = I915_EXEC_CONSTANTS_REL_GENERAL;
if (INTEL_INFO(dev)->gen >= 7 && !IS_VALLEYVIEW(dev)) if (INTEL_INFO(dev)->gen >= 7 && !IS_VALLEYVIEW(dev) && !IS_CHERRYVIEW(dev))
dev_priv->num_fence_regs = 32; dev_priv->num_fence_regs = 32;
else if (INTEL_INFO(dev)->gen >= 4 || IS_I945G(dev) || IS_I945GM(dev) || IS_G33(dev)) else if (INTEL_INFO(dev)->gen >= 4 || IS_I945G(dev) || IS_I945GM(dev) || IS_G33(dev))
dev_priv->num_fence_regs = 16; dev_priv->num_fence_regs = 16;
@ -4837,6 +4845,21 @@ bool i915_gem_obj_is_pinned(struct drm_i915_gem_object *obj)
return false; return false;
} }
/* Like i915_gem_object_get_page(), but mark the returned page dirty */
struct page *
i915_gem_object_get_dirty_page(struct drm_i915_gem_object *obj, int n)
{
struct page *page;
/* Only default objects have per-page dirty tracking */
if (WARN_ON((obj->ops->flags & I915_GEM_OBJECT_HAS_STRUCT_PAGE) == 0))
return NULL;
page = i915_gem_object_get_page(obj, n);
set_page_dirty(page);
return page;
}
/* Allocate a new GEM object and fill it with the supplied data */ /* Allocate a new GEM object and fill it with the supplied data */
struct drm_i915_gem_object * struct drm_i915_gem_object *
i915_gem_object_create_from_data(struct drm_device *dev, i915_gem_object_create_from_data(struct drm_device *dev,
@ -4862,6 +4885,7 @@ i915_gem_object_create_from_data(struct drm_device *dev,
i915_gem_object_pin_pages(obj); i915_gem_object_pin_pages(obj);
sg = obj->pages; sg = obj->pages;
bytes = sg_copy_from_buffer(sg->sgl, sg->nents, (void *)data, size); bytes = sg_copy_from_buffer(sg->sgl, sg->nents, (void *)data, size);
obj->dirty = 1; /* Backing store is now out of date */
i915_gem_object_unpin_pages(obj); i915_gem_object_unpin_pages(obj);
if (WARN_ON(bytes != size)) { if (WARN_ON(bytes != size)) {

View File

@ -189,8 +189,15 @@ i915_gem_alloc_context_obj(struct drm_device *dev, size_t size)
* shouldn't touch the cache level, especially as that * shouldn't touch the cache level, especially as that
* would make the object snooped which might have a * would make the object snooped which might have a
* negative performance impact. * negative performance impact.
*
* Snooping is required on non-llc platforms in execlist
* mode, but since all GGTT accesses use PAT entry 0 we
* get snooping anyway regardless of cache_level.
*
* This is only applicable for Ivy Bridge devices since
* later platforms don't have L3 control bits in the PTE.
*/ */
if (INTEL_INFO(dev)->gen >= 7 && !IS_VALLEYVIEW(dev)) { if (IS_IVYBRIDGE(dev)) {
ret = i915_gem_object_set_cache_level(obj, I915_CACHE_L3_LLC); ret = i915_gem_object_set_cache_level(obj, I915_CACHE_L3_LLC);
/* Failure shouldn't ever happen this early */ /* Failure shouldn't ever happen this early */
if (WARN_ON(ret)) { if (WARN_ON(ret)) {
@ -558,7 +565,7 @@ mi_set_context(struct drm_i915_gem_request *req, u32 hw_flags)
if (signaller == ring) if (signaller == ring)
continue; continue;
intel_ring_emit(ring, RING_PSMI_CTL(signaller->mmio_base)); intel_ring_emit_reg(ring, RING_PSMI_CTL(signaller->mmio_base));
intel_ring_emit(ring, _MASKED_BIT_ENABLE(GEN6_PSMI_SLEEP_MSG_DISABLE)); intel_ring_emit(ring, _MASKED_BIT_ENABLE(GEN6_PSMI_SLEEP_MSG_DISABLE));
} }
} }
@ -583,7 +590,7 @@ mi_set_context(struct drm_i915_gem_request *req, u32 hw_flags)
if (signaller == ring) if (signaller == ring)
continue; continue;
intel_ring_emit(ring, RING_PSMI_CTL(signaller->mmio_base)); intel_ring_emit_reg(ring, RING_PSMI_CTL(signaller->mmio_base));
intel_ring_emit(ring, _MASKED_BIT_DISABLE(GEN6_PSMI_SLEEP_MSG_DISABLE)); intel_ring_emit(ring, _MASKED_BIT_DISABLE(GEN6_PSMI_SLEEP_MSG_DISABLE));
} }
} }
@ -927,6 +934,14 @@ int i915_gem_context_getparam_ioctl(struct drm_device *dev, void *data,
case I915_CONTEXT_PARAM_NO_ZEROMAP: case I915_CONTEXT_PARAM_NO_ZEROMAP:
args->value = ctx->flags & CONTEXT_NO_ZEROMAP; args->value = ctx->flags & CONTEXT_NO_ZEROMAP;
break; break;
case I915_CONTEXT_PARAM_GTT_SIZE:
if (ctx->ppgtt)
args->value = ctx->ppgtt->base.total;
else if (to_i915(dev)->mm.aliasing_ppgtt)
args->value = to_i915(dev)->mm.aliasing_ppgtt->base.total;
else
args->value = to_i915(dev)->gtt.base.total;
break;
default: default:
ret = -EINVAL; ret = -EINVAL;
break; break;
@ -958,7 +973,8 @@ int i915_gem_context_setparam_ioctl(struct drm_device *dev, void *data,
case I915_CONTEXT_PARAM_BAN_PERIOD: case I915_CONTEXT_PARAM_BAN_PERIOD:
if (args->size) if (args->size)
ret = -EINVAL; ret = -EINVAL;
else if (args->value < ctx->hang_stats.ban_period_seconds) else if (args->value < ctx->hang_stats.ban_period_seconds &&
!capable(CAP_SYS_ADMIN))
ret = -EPERM; ret = -EPERM;
else else
ctx->hang_stats.ban_period_seconds = args->value; ctx->hang_stats.ban_period_seconds = args->value;

View File

@ -199,6 +199,45 @@ found:
return ret; return ret;
} }
int
i915_gem_evict_for_vma(struct i915_vma *target)
{
struct drm_mm_node *node, *next;
list_for_each_entry_safe(node, next,
&target->vm->mm.head_node.node_list,
node_list) {
struct i915_vma *vma;
int ret;
if (node->start + node->size <= target->node.start)
continue;
if (node->start >= target->node.start + target->node.size)
break;
vma = container_of(node, typeof(*vma), node);
if (vma->pin_count) {
if (!vma->exec_entry || (vma->pin_count > 1))
/* Object is pinned for some other use */
return -EBUSY;
/* We need to evict a buffer in the same batch */
if (vma->exec_entry->flags & EXEC_OBJECT_PINNED)
/* Overlapping fixed objects in the same batch */
return -EINVAL;
return -ENOSPC;
}
ret = i915_vma_unbind(vma);
if (ret)
return ret;
}
return 0;
}
/** /**
* i915_gem_evict_vm - Evict all idle vmas from a vm * i915_gem_evict_vm - Evict all idle vmas from a vm
* @vm: Address space to cleanse * @vm: Address space to cleanse

View File

@ -249,6 +249,31 @@ static inline int use_cpu_reloc(struct drm_i915_gem_object *obj)
obj->cache_level != I915_CACHE_NONE); obj->cache_level != I915_CACHE_NONE);
} }
/* Used to convert any address to canonical form.
* Starting from gen8, some commands (e.g. STATE_BASE_ADDRESS,
* MI_LOAD_REGISTER_MEM and others, see Broadwell PRM Vol2a) require the
* addresses to be in a canonical form:
* "GraphicsAddress[63:48] are ignored by the HW and assumed to be in correct
* canonical form [63:48] == [47]."
*/
#define GEN8_HIGH_ADDRESS_BIT 47
static inline uint64_t gen8_canonical_addr(uint64_t address)
{
return sign_extend64(address, GEN8_HIGH_ADDRESS_BIT);
}
static inline uint64_t gen8_noncanonical_addr(uint64_t address)
{
return address & ((1ULL << (GEN8_HIGH_ADDRESS_BIT + 1)) - 1);
}
static inline uint64_t
relocation_target(struct drm_i915_gem_relocation_entry *reloc,
uint64_t target_offset)
{
return gen8_canonical_addr((int)reloc->delta + target_offset);
}
static int static int
relocate_entry_cpu(struct drm_i915_gem_object *obj, relocate_entry_cpu(struct drm_i915_gem_object *obj,
struct drm_i915_gem_relocation_entry *reloc, struct drm_i915_gem_relocation_entry *reloc,
@ -256,7 +281,7 @@ relocate_entry_cpu(struct drm_i915_gem_object *obj,
{ {
struct drm_device *dev = obj->base.dev; struct drm_device *dev = obj->base.dev;
uint32_t page_offset = offset_in_page(reloc->offset); uint32_t page_offset = offset_in_page(reloc->offset);
uint64_t delta = reloc->delta + target_offset; uint64_t delta = relocation_target(reloc, target_offset);
char *vaddr; char *vaddr;
int ret; int ret;
@ -264,7 +289,7 @@ relocate_entry_cpu(struct drm_i915_gem_object *obj,
if (ret) if (ret)
return ret; return ret;
vaddr = kmap_atomic(i915_gem_object_get_page(obj, vaddr = kmap_atomic(i915_gem_object_get_dirty_page(obj,
reloc->offset >> PAGE_SHIFT)); reloc->offset >> PAGE_SHIFT));
*(uint32_t *)(vaddr + page_offset) = lower_32_bits(delta); *(uint32_t *)(vaddr + page_offset) = lower_32_bits(delta);
@ -273,7 +298,7 @@ relocate_entry_cpu(struct drm_i915_gem_object *obj,
if (page_offset == 0) { if (page_offset == 0) {
kunmap_atomic(vaddr); kunmap_atomic(vaddr);
vaddr = kmap_atomic(i915_gem_object_get_page(obj, vaddr = kmap_atomic(i915_gem_object_get_dirty_page(obj,
(reloc->offset + sizeof(uint32_t)) >> PAGE_SHIFT)); (reloc->offset + sizeof(uint32_t)) >> PAGE_SHIFT));
} }
@ -292,7 +317,7 @@ relocate_entry_gtt(struct drm_i915_gem_object *obj,
{ {
struct drm_device *dev = obj->base.dev; struct drm_device *dev = obj->base.dev;
struct drm_i915_private *dev_priv = dev->dev_private; struct drm_i915_private *dev_priv = dev->dev_private;
uint64_t delta = reloc->delta + target_offset; uint64_t delta = relocation_target(reloc, target_offset);
uint64_t offset; uint64_t offset;
void __iomem *reloc_page; void __iomem *reloc_page;
int ret; int ret;
@ -334,7 +359,7 @@ relocate_entry_clflush(struct drm_i915_gem_object *obj,
{ {
struct drm_device *dev = obj->base.dev; struct drm_device *dev = obj->base.dev;
uint32_t page_offset = offset_in_page(reloc->offset); uint32_t page_offset = offset_in_page(reloc->offset);
uint64_t delta = (int)reloc->delta + target_offset; uint64_t delta = relocation_target(reloc, target_offset);
char *vaddr; char *vaddr;
int ret; int ret;
@ -342,7 +367,7 @@ relocate_entry_clflush(struct drm_i915_gem_object *obj,
if (ret) if (ret)
return ret; return ret;
vaddr = kmap_atomic(i915_gem_object_get_page(obj, vaddr = kmap_atomic(i915_gem_object_get_dirty_page(obj,
reloc->offset >> PAGE_SHIFT)); reloc->offset >> PAGE_SHIFT));
clflush_write32(vaddr + page_offset, lower_32_bits(delta)); clflush_write32(vaddr + page_offset, lower_32_bits(delta));
@ -351,7 +376,7 @@ relocate_entry_clflush(struct drm_i915_gem_object *obj,
if (page_offset == 0) { if (page_offset == 0) {
kunmap_atomic(vaddr); kunmap_atomic(vaddr);
vaddr = kmap_atomic(i915_gem_object_get_page(obj, vaddr = kmap_atomic(i915_gem_object_get_dirty_page(obj,
(reloc->offset + sizeof(uint32_t)) >> PAGE_SHIFT)); (reloc->offset + sizeof(uint32_t)) >> PAGE_SHIFT));
} }
@ -382,7 +407,7 @@ i915_gem_execbuffer_relocate_entry(struct drm_i915_gem_object *obj,
target_i915_obj = target_vma->obj; target_i915_obj = target_vma->obj;
target_obj = &target_vma->obj->base; target_obj = &target_vma->obj->base;
target_offset = target_vma->node.start; target_offset = gen8_canonical_addr(target_vma->node.start);
/* Sandybridge PPGTT errata: We need a global gtt mapping for MI and /* Sandybridge PPGTT errata: We need a global gtt mapping for MI and
* pipe_control writes because the gpu doesn't properly redirect them * pipe_control writes because the gpu doesn't properly redirect them
@ -583,6 +608,8 @@ i915_gem_execbuffer_reserve_vma(struct i915_vma *vma,
flags |= PIN_GLOBAL | PIN_MAPPABLE; flags |= PIN_GLOBAL | PIN_MAPPABLE;
if (entry->flags & __EXEC_OBJECT_NEEDS_BIAS) if (entry->flags & __EXEC_OBJECT_NEEDS_BIAS)
flags |= BATCH_OFFSET_BIAS | PIN_OFFSET_BIAS; flags |= BATCH_OFFSET_BIAS | PIN_OFFSET_BIAS;
if (entry->flags & EXEC_OBJECT_PINNED)
flags |= entry->offset | PIN_OFFSET_FIXED;
if ((flags & PIN_MAPPABLE) == 0) if ((flags & PIN_MAPPABLE) == 0)
flags |= PIN_HIGH; flags |= PIN_HIGH;
} }
@ -654,6 +681,10 @@ eb_vma_misplaced(struct i915_vma *vma)
vma->node.start & (entry->alignment - 1)) vma->node.start & (entry->alignment - 1))
return true; return true;
if (entry->flags & EXEC_OBJECT_PINNED &&
vma->node.start != entry->offset)
return true;
if (entry->flags & __EXEC_OBJECT_NEEDS_BIAS && if (entry->flags & __EXEC_OBJECT_NEEDS_BIAS &&
vma->node.start < BATCH_OFFSET_BIAS) vma->node.start < BATCH_OFFSET_BIAS)
return true; return true;
@ -679,6 +710,7 @@ i915_gem_execbuffer_reserve(struct intel_engine_cs *ring,
struct i915_vma *vma; struct i915_vma *vma;
struct i915_address_space *vm; struct i915_address_space *vm;
struct list_head ordered_vmas; struct list_head ordered_vmas;
struct list_head pinned_vmas;
bool has_fenced_gpu_access = INTEL_INFO(ring->dev)->gen < 4; bool has_fenced_gpu_access = INTEL_INFO(ring->dev)->gen < 4;
int retry; int retry;
@ -687,6 +719,7 @@ i915_gem_execbuffer_reserve(struct intel_engine_cs *ring,
vm = list_first_entry(vmas, struct i915_vma, exec_list)->vm; vm = list_first_entry(vmas, struct i915_vma, exec_list)->vm;
INIT_LIST_HEAD(&ordered_vmas); INIT_LIST_HEAD(&ordered_vmas);
INIT_LIST_HEAD(&pinned_vmas);
while (!list_empty(vmas)) { while (!list_empty(vmas)) {
struct drm_i915_gem_exec_object2 *entry; struct drm_i915_gem_exec_object2 *entry;
bool need_fence, need_mappable; bool need_fence, need_mappable;
@ -705,7 +738,9 @@ i915_gem_execbuffer_reserve(struct intel_engine_cs *ring,
obj->tiling_mode != I915_TILING_NONE; obj->tiling_mode != I915_TILING_NONE;
need_mappable = need_fence || need_reloc_mappable(vma); need_mappable = need_fence || need_reloc_mappable(vma);
if (need_mappable) { if (entry->flags & EXEC_OBJECT_PINNED)
list_move_tail(&vma->exec_list, &pinned_vmas);
else if (need_mappable) {
entry->flags |= __EXEC_OBJECT_NEEDS_MAP; entry->flags |= __EXEC_OBJECT_NEEDS_MAP;
list_move(&vma->exec_list, &ordered_vmas); list_move(&vma->exec_list, &ordered_vmas);
} else } else
@ -715,6 +750,7 @@ i915_gem_execbuffer_reserve(struct intel_engine_cs *ring,
obj->base.pending_write_domain = 0; obj->base.pending_write_domain = 0;
} }
list_splice(&ordered_vmas, vmas); list_splice(&ordered_vmas, vmas);
list_splice(&pinned_vmas, vmas);
/* Attempt to pin all of the buffers into the GTT. /* Attempt to pin all of the buffers into the GTT.
* This is done in 3 phases: * This is done in 3 phases:
@ -967,6 +1003,21 @@ validate_exec_list(struct drm_device *dev,
if (exec[i].flags & invalid_flags) if (exec[i].flags & invalid_flags)
return -EINVAL; return -EINVAL;
/* Offset can be used as input (EXEC_OBJECT_PINNED), reject
* any non-page-aligned or non-canonical addresses.
*/
if (exec[i].flags & EXEC_OBJECT_PINNED) {
if (exec[i].offset !=
gen8_canonical_addr(exec[i].offset & PAGE_MASK))
return -EINVAL;
/* From drm_mm perspective address space is continuous,
* so from this point we're always using non-canonical
* form internally.
*/
exec[i].offset = gen8_noncanonical_addr(exec[i].offset);
}
if (exec[i].alignment && !is_power_of_2(exec[i].alignment)) if (exec[i].alignment && !is_power_of_2(exec[i].alignment))
return -EINVAL; return -EINVAL;
@ -1091,7 +1142,7 @@ i915_reset_gen7_sol_offsets(struct drm_device *dev,
for (i = 0; i < 4; i++) { for (i = 0; i < 4; i++) {
intel_ring_emit(ring, MI_LOAD_REGISTER_IMM(1)); intel_ring_emit(ring, MI_LOAD_REGISTER_IMM(1));
intel_ring_emit(ring, GEN7_SO_WRITE_OFFSET(i)); intel_ring_emit_reg(ring, GEN7_SO_WRITE_OFFSET(i));
intel_ring_emit(ring, 0); intel_ring_emit(ring, 0);
} }
@ -1218,7 +1269,7 @@ i915_gem_ringbuffer_submission(struct i915_execbuffer_params *params,
intel_ring_emit(ring, MI_NOOP); intel_ring_emit(ring, MI_NOOP);
intel_ring_emit(ring, MI_LOAD_REGISTER_IMM(1)); intel_ring_emit(ring, MI_LOAD_REGISTER_IMM(1));
intel_ring_emit(ring, INSTPM); intel_ring_emit_reg(ring, INSTPM);
intel_ring_emit(ring, instp_mask << 16 | instp_mode); intel_ring_emit(ring, instp_mask << 16 | instp_mode);
intel_ring_advance(ring); intel_ring_advance(ring);
@ -1294,6 +1345,7 @@ eb_get_batch(struct eb_vmas *eb)
* Note that actual hangs have only been observed on gen7, but for * Note that actual hangs have only been observed on gen7, but for
* paranoia do it everywhere. * paranoia do it everywhere.
*/ */
if ((vma->exec_entry->flags & EXEC_OBJECT_PINNED) == 0)
vma->exec_entry->flags |= __EXEC_OBJECT_NEEDS_BIAS; vma->exec_entry->flags |= __EXEC_OBJECT_NEEDS_BIAS;
return vma->obj; return vma->obj;
@ -1654,6 +1706,8 @@ i915_gem_execbuffer(struct drm_device *dev, void *data,
/* Copy the new buffer offsets back to the user's exec list. */ /* Copy the new buffer offsets back to the user's exec list. */
for (i = 0; i < args->buffer_count; i++) { for (i = 0; i < args->buffer_count; i++) {
exec2_list[i].offset =
gen8_canonical_addr(exec2_list[i].offset);
ret = __copy_to_user(&user_exec_list[i].offset, ret = __copy_to_user(&user_exec_list[i].offset,
&exec2_list[i].offset, &exec2_list[i].offset,
sizeof(user_exec_list[i].offset)); sizeof(user_exec_list[i].offset));
@ -1718,6 +1772,8 @@ i915_gem_execbuffer2(struct drm_device *dev, void *data,
int i; int i;
for (i = 0; i < args->buffer_count; i++) { for (i = 0; i < args->buffer_count; i++) {
exec2_list[i].offset =
gen8_canonical_addr(exec2_list[i].offset);
ret = __copy_to_user(&user_exec_list[i].offset, ret = __copy_to_user(&user_exec_list[i].offset,
&exec2_list[i].offset, &exec2_list[i].offset,
sizeof(user_exec_list[i].offset)); sizeof(user_exec_list[i].offset));

View File

@ -59,7 +59,7 @@ static void i965_write_fence_reg(struct drm_device *dev, int reg,
struct drm_i915_gem_object *obj) struct drm_i915_gem_object *obj)
{ {
struct drm_i915_private *dev_priv = dev->dev_private; struct drm_i915_private *dev_priv = dev->dev_private;
int fence_reg_lo, fence_reg_hi; i915_reg_t fence_reg_lo, fence_reg_hi;
int fence_pitch_shift; int fence_pitch_shift;
if (INTEL_INFO(dev)->gen >= 6) { if (INTEL_INFO(dev)->gen >= 6) {

View File

@ -104,9 +104,11 @@ static int sanitize_enable_ppgtt(struct drm_device *dev, int enable_ppgtt)
{ {
bool has_aliasing_ppgtt; bool has_aliasing_ppgtt;
bool has_full_ppgtt; bool has_full_ppgtt;
bool has_full_48bit_ppgtt;
has_aliasing_ppgtt = INTEL_INFO(dev)->gen >= 6; has_aliasing_ppgtt = INTEL_INFO(dev)->gen >= 6;
has_full_ppgtt = INTEL_INFO(dev)->gen >= 7; has_full_ppgtt = INTEL_INFO(dev)->gen >= 7;
has_full_48bit_ppgtt = IS_BROADWELL(dev) || INTEL_INFO(dev)->gen >= 9;
if (intel_vgpu_active(dev)) if (intel_vgpu_active(dev))
has_full_ppgtt = false; /* emulation is too hard */ has_full_ppgtt = false; /* emulation is too hard */
@ -125,6 +127,9 @@ static int sanitize_enable_ppgtt(struct drm_device *dev, int enable_ppgtt)
if (enable_ppgtt == 2 && has_full_ppgtt) if (enable_ppgtt == 2 && has_full_ppgtt)
return 2; return 2;
if (enable_ppgtt == 3 && has_full_48bit_ppgtt)
return 3;
#ifdef CONFIG_INTEL_IOMMU #ifdef CONFIG_INTEL_IOMMU
/* Disable ppgtt on SNB if VT-d is on. */ /* Disable ppgtt on SNB if VT-d is on. */
if (INTEL_INFO(dev)->gen == 6 && intel_iommu_gfx_mapped) { if (INTEL_INFO(dev)->gen == 6 && intel_iommu_gfx_mapped) {
@ -134,14 +139,13 @@ static int sanitize_enable_ppgtt(struct drm_device *dev, int enable_ppgtt)
#endif #endif
/* Early VLV doesn't have this */ /* Early VLV doesn't have this */
if (IS_VALLEYVIEW(dev) && !IS_CHERRYVIEW(dev) && if (IS_VALLEYVIEW(dev) && dev->pdev->revision < 0xb) {
dev->pdev->revision < 0xb) {
DRM_DEBUG_DRIVER("disabling PPGTT on pre-B3 step VLV\n"); DRM_DEBUG_DRIVER("disabling PPGTT on pre-B3 step VLV\n");
return 0; return 0;
} }
if (INTEL_INFO(dev)->gen >= 8 && i915.enable_execlists) if (INTEL_INFO(dev)->gen >= 8 && i915.enable_execlists)
return 2; return has_full_48bit_ppgtt ? 3 : 2;
else else
return has_aliasing_ppgtt ? 1 : 0; return has_aliasing_ppgtt ? 1 : 0;
} }
@ -654,10 +658,10 @@ static int gen8_write_pdp(struct drm_i915_gem_request *req,
return ret; return ret;
intel_ring_emit(ring, MI_LOAD_REGISTER_IMM(1)); intel_ring_emit(ring, MI_LOAD_REGISTER_IMM(1));
intel_ring_emit(ring, GEN8_RING_PDP_UDW(ring, entry)); intel_ring_emit_reg(ring, GEN8_RING_PDP_UDW(ring, entry));
intel_ring_emit(ring, upper_32_bits(addr)); intel_ring_emit(ring, upper_32_bits(addr));
intel_ring_emit(ring, MI_LOAD_REGISTER_IMM(1)); intel_ring_emit(ring, MI_LOAD_REGISTER_IMM(1));
intel_ring_emit(ring, GEN8_RING_PDP_LDW(ring, entry)); intel_ring_emit_reg(ring, GEN8_RING_PDP_LDW(ring, entry));
intel_ring_emit(ring, lower_32_bits(addr)); intel_ring_emit(ring, lower_32_bits(addr));
intel_ring_advance(ring); intel_ring_advance(ring);
@ -757,10 +761,10 @@ static void gen8_ppgtt_clear_range(struct i915_address_space *vm,
gen8_ppgtt_clear_pte_range(vm, &ppgtt->pdp, start, length, gen8_ppgtt_clear_pte_range(vm, &ppgtt->pdp, start, length,
scratch_pte); scratch_pte);
} else { } else {
uint64_t templ4, pml4e; uint64_t pml4e;
struct i915_page_directory_pointer *pdp; struct i915_page_directory_pointer *pdp;
gen8_for_each_pml4e(pdp, &ppgtt->pml4, start, length, templ4, pml4e) { gen8_for_each_pml4e(pdp, &ppgtt->pml4, start, length, pml4e) {
gen8_ppgtt_clear_pte_range(vm, pdp, start, length, gen8_ppgtt_clear_pte_range(vm, pdp, start, length,
scratch_pte); scratch_pte);
} }
@ -826,10 +830,10 @@ static void gen8_ppgtt_insert_entries(struct i915_address_space *vm,
cache_level); cache_level);
} else { } else {
struct i915_page_directory_pointer *pdp; struct i915_page_directory_pointer *pdp;
uint64_t templ4, pml4e; uint64_t pml4e;
uint64_t length = (uint64_t)pages->orig_nents << PAGE_SHIFT; uint64_t length = (uint64_t)pages->orig_nents << PAGE_SHIFT;
gen8_for_each_pml4e(pdp, &ppgtt->pml4, start, length, templ4, pml4e) { gen8_for_each_pml4e(pdp, &ppgtt->pml4, start, length, pml4e) {
gen8_ppgtt_insert_pte_entries(vm, pdp, &sg_iter, gen8_ppgtt_insert_pte_entries(vm, pdp, &sg_iter,
start, cache_level); start, cache_level);
} }
@ -897,14 +901,13 @@ static int gen8_ppgtt_notify_vgt(struct i915_hw_ppgtt *ppgtt, bool create)
enum vgt_g2v_type msg; enum vgt_g2v_type msg;
struct drm_device *dev = ppgtt->base.dev; struct drm_device *dev = ppgtt->base.dev;
struct drm_i915_private *dev_priv = dev->dev_private; struct drm_i915_private *dev_priv = dev->dev_private;
unsigned int offset = vgtif_reg(pdp0_lo);
int i; int i;
if (USES_FULL_48BIT_PPGTT(dev)) { if (USES_FULL_48BIT_PPGTT(dev)) {
u64 daddr = px_dma(&ppgtt->pml4); u64 daddr = px_dma(&ppgtt->pml4);
I915_WRITE(offset, lower_32_bits(daddr)); I915_WRITE(vgtif_reg(pdp[0].lo), lower_32_bits(daddr));
I915_WRITE(offset + 4, upper_32_bits(daddr)); I915_WRITE(vgtif_reg(pdp[0].hi), upper_32_bits(daddr));
msg = (create ? VGT_G2V_PPGTT_L4_PAGE_TABLE_CREATE : msg = (create ? VGT_G2V_PPGTT_L4_PAGE_TABLE_CREATE :
VGT_G2V_PPGTT_L4_PAGE_TABLE_DESTROY); VGT_G2V_PPGTT_L4_PAGE_TABLE_DESTROY);
@ -912,10 +915,8 @@ static int gen8_ppgtt_notify_vgt(struct i915_hw_ppgtt *ppgtt, bool create)
for (i = 0; i < GEN8_LEGACY_PDPES; i++) { for (i = 0; i < GEN8_LEGACY_PDPES; i++) {
u64 daddr = i915_page_dir_dma_addr(ppgtt, i); u64 daddr = i915_page_dir_dma_addr(ppgtt, i);
I915_WRITE(offset, lower_32_bits(daddr)); I915_WRITE(vgtif_reg(pdp[i].lo), lower_32_bits(daddr));
I915_WRITE(offset + 4, upper_32_bits(daddr)); I915_WRITE(vgtif_reg(pdp[i].hi), upper_32_bits(daddr));
offset += 8;
} }
msg = (create ? VGT_G2V_PPGTT_L3_PAGE_TABLE_CREATE : msg = (create ? VGT_G2V_PPGTT_L3_PAGE_TABLE_CREATE :
@ -1010,10 +1011,9 @@ static int gen8_ppgtt_alloc_pagetabs(struct i915_address_space *vm,
{ {
struct drm_device *dev = vm->dev; struct drm_device *dev = vm->dev;
struct i915_page_table *pt; struct i915_page_table *pt;
uint64_t temp;
uint32_t pde; uint32_t pde;
gen8_for_each_pde(pt, pd, start, length, temp, pde) { gen8_for_each_pde(pt, pd, start, length, pde) {
/* Don't reallocate page tables */ /* Don't reallocate page tables */
if (test_bit(pde, pd->used_pdes)) { if (test_bit(pde, pd->used_pdes)) {
/* Scratch is never allocated this way */ /* Scratch is never allocated this way */
@ -1072,13 +1072,12 @@ gen8_ppgtt_alloc_page_directories(struct i915_address_space *vm,
{ {
struct drm_device *dev = vm->dev; struct drm_device *dev = vm->dev;
struct i915_page_directory *pd; struct i915_page_directory *pd;
uint64_t temp;
uint32_t pdpe; uint32_t pdpe;
uint32_t pdpes = I915_PDPES_PER_PDP(dev); uint32_t pdpes = I915_PDPES_PER_PDP(dev);
WARN_ON(!bitmap_empty(new_pds, pdpes)); WARN_ON(!bitmap_empty(new_pds, pdpes));
gen8_for_each_pdpe(pd, pdp, start, length, temp, pdpe) { gen8_for_each_pdpe(pd, pdp, start, length, pdpe) {
if (test_bit(pdpe, pdp->used_pdpes)) if (test_bit(pdpe, pdp->used_pdpes))
continue; continue;
@ -1126,12 +1125,11 @@ gen8_ppgtt_alloc_page_dirpointers(struct i915_address_space *vm,
{ {
struct drm_device *dev = vm->dev; struct drm_device *dev = vm->dev;
struct i915_page_directory_pointer *pdp; struct i915_page_directory_pointer *pdp;
uint64_t temp;
uint32_t pml4e; uint32_t pml4e;
WARN_ON(!bitmap_empty(new_pdps, GEN8_PML4ES_PER_PML4)); WARN_ON(!bitmap_empty(new_pdps, GEN8_PML4ES_PER_PML4));
gen8_for_each_pml4e(pdp, pml4, start, length, temp, pml4e) { gen8_for_each_pml4e(pdp, pml4, start, length, pml4e) {
if (!test_bit(pml4e, pml4->used_pml4es)) { if (!test_bit(pml4e, pml4->used_pml4es)) {
pdp = alloc_pdp(dev); pdp = alloc_pdp(dev);
if (IS_ERR(pdp)) if (IS_ERR(pdp))
@ -1215,7 +1213,6 @@ static int gen8_alloc_va_range_3lvl(struct i915_address_space *vm,
struct i915_page_directory *pd; struct i915_page_directory *pd;
const uint64_t orig_start = start; const uint64_t orig_start = start;
const uint64_t orig_length = length; const uint64_t orig_length = length;
uint64_t temp;
uint32_t pdpe; uint32_t pdpe;
uint32_t pdpes = I915_PDPES_PER_PDP(dev); uint32_t pdpes = I915_PDPES_PER_PDP(dev);
int ret; int ret;
@ -1242,7 +1239,7 @@ static int gen8_alloc_va_range_3lvl(struct i915_address_space *vm,
} }
/* For every page directory referenced, allocate page tables */ /* For every page directory referenced, allocate page tables */
gen8_for_each_pdpe(pd, pdp, start, length, temp, pdpe) { gen8_for_each_pdpe(pd, pdp, start, length, pdpe) {
ret = gen8_ppgtt_alloc_pagetabs(vm, pd, start, length, ret = gen8_ppgtt_alloc_pagetabs(vm, pd, start, length,
new_page_tables + pdpe * BITS_TO_LONGS(I915_PDES)); new_page_tables + pdpe * BITS_TO_LONGS(I915_PDES));
if (ret) if (ret)
@ -1254,7 +1251,7 @@ static int gen8_alloc_va_range_3lvl(struct i915_address_space *vm,
/* Allocations have completed successfully, so set the bitmaps, and do /* Allocations have completed successfully, so set the bitmaps, and do
* the mappings. */ * the mappings. */
gen8_for_each_pdpe(pd, pdp, start, length, temp, pdpe) { gen8_for_each_pdpe(pd, pdp, start, length, pdpe) {
gen8_pde_t *const page_directory = kmap_px(pd); gen8_pde_t *const page_directory = kmap_px(pd);
struct i915_page_table *pt; struct i915_page_table *pt;
uint64_t pd_len = length; uint64_t pd_len = length;
@ -1264,7 +1261,7 @@ static int gen8_alloc_va_range_3lvl(struct i915_address_space *vm,
/* Every pd should be allocated, we just did that above. */ /* Every pd should be allocated, we just did that above. */
WARN_ON(!pd); WARN_ON(!pd);
gen8_for_each_pde(pt, pd, pd_start, pd_len, temp, pde) { gen8_for_each_pde(pt, pd, pd_start, pd_len, pde) {
/* Same reasoning as pd */ /* Same reasoning as pd */
WARN_ON(!pt); WARN_ON(!pt);
WARN_ON(!pd_len); WARN_ON(!pd_len);
@ -1301,6 +1298,8 @@ static int gen8_alloc_va_range_3lvl(struct i915_address_space *vm,
err_out: err_out:
while (pdpe--) { while (pdpe--) {
unsigned long temp;
for_each_set_bit(temp, new_page_tables + pdpe * for_each_set_bit(temp, new_page_tables + pdpe *
BITS_TO_LONGS(I915_PDES), I915_PDES) BITS_TO_LONGS(I915_PDES), I915_PDES)
free_pt(dev, pdp->page_directory[pdpe]->page_table[temp]); free_pt(dev, pdp->page_directory[pdpe]->page_table[temp]);
@ -1323,7 +1322,7 @@ static int gen8_alloc_va_range_4lvl(struct i915_address_space *vm,
struct i915_hw_ppgtt *ppgtt = struct i915_hw_ppgtt *ppgtt =
container_of(vm, struct i915_hw_ppgtt, base); container_of(vm, struct i915_hw_ppgtt, base);
struct i915_page_directory_pointer *pdp; struct i915_page_directory_pointer *pdp;
uint64_t temp, pml4e; uint64_t pml4e;
int ret = 0; int ret = 0;
/* Do the pml4 allocations first, so we don't need to track the newly /* Do the pml4 allocations first, so we don't need to track the newly
@ -1342,7 +1341,7 @@ static int gen8_alloc_va_range_4lvl(struct i915_address_space *vm,
"The allocation has spanned more than 512GB. " "The allocation has spanned more than 512GB. "
"It is highly likely this is incorrect."); "It is highly likely this is incorrect.");
gen8_for_each_pml4e(pdp, pml4, start, length, temp, pml4e) { gen8_for_each_pml4e(pdp, pml4, start, length, pml4e) {
WARN_ON(!pdp); WARN_ON(!pdp);
ret = gen8_alloc_va_range_3lvl(vm, pdp, start, length); ret = gen8_alloc_va_range_3lvl(vm, pdp, start, length);
@ -1382,10 +1381,9 @@ static void gen8_dump_pdp(struct i915_page_directory_pointer *pdp,
struct seq_file *m) struct seq_file *m)
{ {
struct i915_page_directory *pd; struct i915_page_directory *pd;
uint64_t temp;
uint32_t pdpe; uint32_t pdpe;
gen8_for_each_pdpe(pd, pdp, start, length, temp, pdpe) { gen8_for_each_pdpe(pd, pdp, start, length, pdpe) {
struct i915_page_table *pt; struct i915_page_table *pt;
uint64_t pd_len = length; uint64_t pd_len = length;
uint64_t pd_start = start; uint64_t pd_start = start;
@ -1395,7 +1393,7 @@ static void gen8_dump_pdp(struct i915_page_directory_pointer *pdp,
continue; continue;
seq_printf(m, "\tPDPE #%d\n", pdpe); seq_printf(m, "\tPDPE #%d\n", pdpe);
gen8_for_each_pde(pt, pd, pd_start, pd_len, temp, pde) { gen8_for_each_pde(pt, pd, pd_start, pd_len, pde) {
uint32_t pte; uint32_t pte;
gen8_pte_t *pt_vaddr; gen8_pte_t *pt_vaddr;
@ -1445,11 +1443,11 @@ static void gen8_dump_ppgtt(struct i915_hw_ppgtt *ppgtt, struct seq_file *m)
if (!USES_FULL_48BIT_PPGTT(vm->dev)) { if (!USES_FULL_48BIT_PPGTT(vm->dev)) {
gen8_dump_pdp(&ppgtt->pdp, start, length, scratch_pte, m); gen8_dump_pdp(&ppgtt->pdp, start, length, scratch_pte, m);
} else { } else {
uint64_t templ4, pml4e; uint64_t pml4e;
struct i915_pml4 *pml4 = &ppgtt->pml4; struct i915_pml4 *pml4 = &ppgtt->pml4;
struct i915_page_directory_pointer *pdp; struct i915_page_directory_pointer *pdp;
gen8_for_each_pml4e(pdp, pml4, start, length, templ4, pml4e) { gen8_for_each_pml4e(pdp, pml4, start, length, pml4e) {
if (!test_bit(pml4e, pml4->used_pml4es)) if (!test_bit(pml4e, pml4->used_pml4es))
continue; continue;
@ -1655,9 +1653,9 @@ static int hsw_mm_switch(struct i915_hw_ppgtt *ppgtt,
return ret; return ret;
intel_ring_emit(ring, MI_LOAD_REGISTER_IMM(2)); intel_ring_emit(ring, MI_LOAD_REGISTER_IMM(2));
intel_ring_emit(ring, RING_PP_DIR_DCLV(ring)); intel_ring_emit_reg(ring, RING_PP_DIR_DCLV(ring));
intel_ring_emit(ring, PP_DIR_DCLV_2G); intel_ring_emit(ring, PP_DIR_DCLV_2G);
intel_ring_emit(ring, RING_PP_DIR_BASE(ring)); intel_ring_emit_reg(ring, RING_PP_DIR_BASE(ring));
intel_ring_emit(ring, get_pd_offset(ppgtt)); intel_ring_emit(ring, get_pd_offset(ppgtt));
intel_ring_emit(ring, MI_NOOP); intel_ring_emit(ring, MI_NOOP);
intel_ring_advance(ring); intel_ring_advance(ring);
@ -1692,9 +1690,9 @@ static int gen7_mm_switch(struct i915_hw_ppgtt *ppgtt,
return ret; return ret;
intel_ring_emit(ring, MI_LOAD_REGISTER_IMM(2)); intel_ring_emit(ring, MI_LOAD_REGISTER_IMM(2));
intel_ring_emit(ring, RING_PP_DIR_DCLV(ring)); intel_ring_emit_reg(ring, RING_PP_DIR_DCLV(ring));
intel_ring_emit(ring, PP_DIR_DCLV_2G); intel_ring_emit(ring, PP_DIR_DCLV_2G);
intel_ring_emit(ring, RING_PP_DIR_BASE(ring)); intel_ring_emit_reg(ring, RING_PP_DIR_BASE(ring));
intel_ring_emit(ring, get_pd_offset(ppgtt)); intel_ring_emit(ring, get_pd_offset(ppgtt));
intel_ring_emit(ring, MI_NOOP); intel_ring_emit(ring, MI_NOOP);
intel_ring_advance(ring); intel_ring_advance(ring);
@ -2345,6 +2343,9 @@ static void gen8_ggtt_insert_entries(struct i915_address_space *vm,
int i = 0; int i = 0;
struct sg_page_iter sg_iter; struct sg_page_iter sg_iter;
dma_addr_t addr = 0; /* shut up gcc */ dma_addr_t addr = 0; /* shut up gcc */
int rpm_atomic_seq;
rpm_atomic_seq = assert_rpm_atomic_begin(dev_priv);
for_each_sg_page(st->sgl, &sg_iter, st->nents, 0) { for_each_sg_page(st->sgl, &sg_iter, st->nents, 0) {
addr = sg_dma_address(sg_iter.sg) + addr = sg_dma_address(sg_iter.sg) +
@ -2371,6 +2372,34 @@ static void gen8_ggtt_insert_entries(struct i915_address_space *vm,
*/ */
I915_WRITE(GFX_FLSH_CNTL_GEN6, GFX_FLSH_CNTL_EN); I915_WRITE(GFX_FLSH_CNTL_GEN6, GFX_FLSH_CNTL_EN);
POSTING_READ(GFX_FLSH_CNTL_GEN6); POSTING_READ(GFX_FLSH_CNTL_GEN6);
assert_rpm_atomic_end(dev_priv, rpm_atomic_seq);
}
struct insert_entries {
struct i915_address_space *vm;
struct sg_table *st;
uint64_t start;
enum i915_cache_level level;
u32 flags;
};
static int gen8_ggtt_insert_entries__cb(void *_arg)
{
struct insert_entries *arg = _arg;
gen8_ggtt_insert_entries(arg->vm, arg->st,
arg->start, arg->level, arg->flags);
return 0;
}
static void gen8_ggtt_insert_entries__BKL(struct i915_address_space *vm,
struct sg_table *st,
uint64_t start,
enum i915_cache_level level,
u32 flags)
{
struct insert_entries arg = { vm, st, start, level, flags };
gen8_ggtt_insert_entries__cb, &arg;
} }
/* /*
@ -2391,6 +2420,9 @@ static void gen6_ggtt_insert_entries(struct i915_address_space *vm,
int i = 0; int i = 0;
struct sg_page_iter sg_iter; struct sg_page_iter sg_iter;
dma_addr_t addr = 0; dma_addr_t addr = 0;
int rpm_atomic_seq;
rpm_atomic_seq = assert_rpm_atomic_begin(dev_priv);
for_each_sg_page(st->sgl, &sg_iter, st->nents, 0) { for_each_sg_page(st->sgl, &sg_iter, st->nents, 0) {
addr = sg_page_iter_dma_address(&sg_iter); addr = sg_page_iter_dma_address(&sg_iter);
@ -2415,6 +2447,8 @@ static void gen6_ggtt_insert_entries(struct i915_address_space *vm,
*/ */
I915_WRITE(GFX_FLSH_CNTL_GEN6, GFX_FLSH_CNTL_EN); I915_WRITE(GFX_FLSH_CNTL_GEN6, GFX_FLSH_CNTL_EN);
POSTING_READ(GFX_FLSH_CNTL_GEN6); POSTING_READ(GFX_FLSH_CNTL_GEN6);
assert_rpm_atomic_end(dev_priv, rpm_atomic_seq);
} }
static void gen8_ggtt_clear_range(struct i915_address_space *vm, static void gen8_ggtt_clear_range(struct i915_address_space *vm,
@ -2429,6 +2463,9 @@ static void gen8_ggtt_clear_range(struct i915_address_space *vm,
(gen8_pte_t __iomem *) dev_priv->gtt.gsm + first_entry; (gen8_pte_t __iomem *) dev_priv->gtt.gsm + first_entry;
const int max_entries = gtt_total_entries(dev_priv->gtt) - first_entry; const int max_entries = gtt_total_entries(dev_priv->gtt) - first_entry;
int i; int i;
int rpm_atomic_seq;
rpm_atomic_seq = assert_rpm_atomic_begin(dev_priv);
if (WARN(num_entries > max_entries, if (WARN(num_entries > max_entries,
"First entry = %d; Num entries = %d (max=%d)\n", "First entry = %d; Num entries = %d (max=%d)\n",
@ -2441,6 +2478,8 @@ static void gen8_ggtt_clear_range(struct i915_address_space *vm,
for (i = 0; i < num_entries; i++) for (i = 0; i < num_entries; i++)
gen8_set_pte(&gtt_base[i], scratch_pte); gen8_set_pte(&gtt_base[i], scratch_pte);
readl(gtt_base); readl(gtt_base);
assert_rpm_atomic_end(dev_priv, rpm_atomic_seq);
} }
static void gen6_ggtt_clear_range(struct i915_address_space *vm, static void gen6_ggtt_clear_range(struct i915_address_space *vm,
@ -2455,6 +2494,9 @@ static void gen6_ggtt_clear_range(struct i915_address_space *vm,
(gen6_pte_t __iomem *) dev_priv->gtt.gsm + first_entry; (gen6_pte_t __iomem *) dev_priv->gtt.gsm + first_entry;
const int max_entries = gtt_total_entries(dev_priv->gtt) - first_entry; const int max_entries = gtt_total_entries(dev_priv->gtt) - first_entry;
int i; int i;
int rpm_atomic_seq;
rpm_atomic_seq = assert_rpm_atomic_begin(dev_priv);
if (WARN(num_entries > max_entries, if (WARN(num_entries > max_entries,
"First entry = %d; Num entries = %d (max=%d)\n", "First entry = %d; Num entries = %d (max=%d)\n",
@ -2467,6 +2509,8 @@ static void gen6_ggtt_clear_range(struct i915_address_space *vm,
for (i = 0; i < num_entries; i++) for (i = 0; i < num_entries; i++)
iowrite32(scratch_pte, &gtt_base[i]); iowrite32(scratch_pte, &gtt_base[i]);
readl(gtt_base); readl(gtt_base);
assert_rpm_atomic_end(dev_priv, rpm_atomic_seq);
} }
static void i915_ggtt_insert_entries(struct i915_address_space *vm, static void i915_ggtt_insert_entries(struct i915_address_space *vm,
@ -2474,11 +2518,17 @@ static void i915_ggtt_insert_entries(struct i915_address_space *vm,
uint64_t start, uint64_t start,
enum i915_cache_level cache_level, u32 unused) enum i915_cache_level cache_level, u32 unused)
{ {
struct drm_i915_private *dev_priv = vm->dev->dev_private;
unsigned int flags = (cache_level == I915_CACHE_NONE) ? unsigned int flags = (cache_level == I915_CACHE_NONE) ?
AGP_USER_MEMORY : AGP_USER_CACHED_MEMORY; AGP_USER_MEMORY : AGP_USER_CACHED_MEMORY;
int rpm_atomic_seq;
rpm_atomic_seq = assert_rpm_atomic_begin(dev_priv);
intel_gtt_insert_sg_entries(pages, start >> PAGE_SHIFT, flags); intel_gtt_insert_sg_entries(pages, start >> PAGE_SHIFT, flags);
assert_rpm_atomic_end(dev_priv, rpm_atomic_seq);
} }
static void i915_ggtt_clear_range(struct i915_address_space *vm, static void i915_ggtt_clear_range(struct i915_address_space *vm,
@ -2486,9 +2536,16 @@ static void i915_ggtt_clear_range(struct i915_address_space *vm,
uint64_t length, uint64_t length,
bool unused) bool unused)
{ {
struct drm_i915_private *dev_priv = vm->dev->dev_private;
unsigned first_entry = start >> PAGE_SHIFT; unsigned first_entry = start >> PAGE_SHIFT;
unsigned num_entries = length >> PAGE_SHIFT; unsigned num_entries = length >> PAGE_SHIFT;
int rpm_atomic_seq;
rpm_atomic_seq = assert_rpm_atomic_begin(dev_priv);
intel_gtt_clear_range(first_entry, num_entries); intel_gtt_clear_range(first_entry, num_entries);
assert_rpm_atomic_end(dev_priv, rpm_atomic_seq);
} }
static int ggtt_bind_vma(struct i915_vma *vma, static int ggtt_bind_vma(struct i915_vma *vma,
@ -2740,7 +2797,6 @@ void i915_global_gtt_cleanup(struct drm_device *dev)
struct i915_hw_ppgtt *ppgtt = dev_priv->mm.aliasing_ppgtt; struct i915_hw_ppgtt *ppgtt = dev_priv->mm.aliasing_ppgtt;
ppgtt->base.cleanup(&ppgtt->base); ppgtt->base.cleanup(&ppgtt->base);
kfree(ppgtt);
} }
if (drm_mm_initialized(&vm->mm)) { if (drm_mm_initialized(&vm->mm)) {
@ -2990,6 +3046,9 @@ static int gen8_gmch_probe(struct drm_device *dev,
dev_priv->gtt.base.bind_vma = ggtt_bind_vma; dev_priv->gtt.base.bind_vma = ggtt_bind_vma;
dev_priv->gtt.base.unbind_vma = ggtt_unbind_vma; dev_priv->gtt.base.unbind_vma = ggtt_unbind_vma;
if (IS_CHERRYVIEW(dev_priv))
dev_priv->gtt.base.insert_entries = gen8_ggtt_insert_entries__BKL;
return ret; return ret;
} }
@ -3298,7 +3357,7 @@ static struct sg_table *
intel_rotate_fb_obj_pages(struct i915_ggtt_view *ggtt_view, intel_rotate_fb_obj_pages(struct i915_ggtt_view *ggtt_view,
struct drm_i915_gem_object *obj) struct drm_i915_gem_object *obj)
{ {
struct intel_rotation_info *rot_info = &ggtt_view->rotation_info; struct intel_rotation_info *rot_info = &ggtt_view->params.rotation_info;
unsigned int size_pages = rot_info->size >> PAGE_SHIFT; unsigned int size_pages = rot_info->size >> PAGE_SHIFT;
unsigned int size_pages_uv; unsigned int size_pages_uv;
struct sg_page_iter sg_iter; struct sg_page_iter sg_iter;
@ -3530,7 +3589,7 @@ i915_ggtt_view_size(struct drm_i915_gem_object *obj,
if (view->type == I915_GGTT_VIEW_NORMAL) { if (view->type == I915_GGTT_VIEW_NORMAL) {
return obj->base.size; return obj->base.size;
} else if (view->type == I915_GGTT_VIEW_ROTATED) { } else if (view->type == I915_GGTT_VIEW_ROTATED) {
return view->rotation_info.size; return view->params.rotation_info.size;
} else if (view->type == I915_GGTT_VIEW_PARTIAL) { } else if (view->type == I915_GGTT_VIEW_PARTIAL) {
return view->params.partial.size << PAGE_SHIFT; return view->params.partial.size << PAGE_SHIFT;
} else { } else {

View File

@ -156,13 +156,10 @@ struct i915_ggtt_view {
u64 offset; u64 offset;
unsigned int size; unsigned int size;
} partial; } partial;
struct intel_rotation_info rotation_info;
} params; } params;
struct sg_table *pages; struct sg_table *pages;
union {
struct intel_rotation_info rotation_info;
};
}; };
extern const struct i915_ggtt_view i915_ggtt_view_normal; extern const struct i915_ggtt_view i915_ggtt_view_normal;
@ -458,32 +455,29 @@ static inline uint32_t gen6_pde_index(uint32_t addr)
* between from start until start + length. On gen8+ it simply iterates * between from start until start + length. On gen8+ it simply iterates
* over every page directory entry in a page directory. * over every page directory entry in a page directory.
*/ */
#define gen8_for_each_pde(pt, pd, start, length, temp, iter) \ #define gen8_for_each_pde(pt, pd, start, length, iter) \
for (iter = gen8_pde_index(start); \ for (iter = gen8_pde_index(start); \
length > 0 && iter < I915_PDES ? \ length > 0 && iter < I915_PDES && \
(pt = (pd)->page_table[iter]), 1 : 0; \ (pt = (pd)->page_table[iter], true); \
iter++, \ ({ u64 temp = ALIGN(start+1, 1 << GEN8_PDE_SHIFT); \
temp = ALIGN(start+1, 1 << GEN8_PDE_SHIFT) - start, \ temp = min(temp - start, length); \
temp = min(temp, length), \ start += temp, length -= temp; }), ++iter)
start += temp, length -= temp)
#define gen8_for_each_pdpe(pd, pdp, start, length, temp, iter) \ #define gen8_for_each_pdpe(pd, pdp, start, length, iter) \
for (iter = gen8_pdpe_index(start); \ for (iter = gen8_pdpe_index(start); \
length > 0 && (iter < I915_PDPES_PER_PDP(dev)) ? \ length > 0 && iter < I915_PDPES_PER_PDP(dev) && \
(pd = (pdp)->page_directory[iter]), 1 : 0; \ (pd = (pdp)->page_directory[iter], true); \
iter++, \ ({ u64 temp = ALIGN(start+1, 1 << GEN8_PDPE_SHIFT); \
temp = ALIGN(start+1, 1 << GEN8_PDPE_SHIFT) - start, \ temp = min(temp - start, length); \
temp = min(temp, length), \ start += temp, length -= temp; }), ++iter)
start += temp, length -= temp)
#define gen8_for_each_pml4e(pdp, pml4, start, length, temp, iter) \ #define gen8_for_each_pml4e(pdp, pml4, start, length, iter) \
for (iter = gen8_pml4e_index(start); \ for (iter = gen8_pml4e_index(start); \
length > 0 && iter < GEN8_PML4ES_PER_PML4 ? \ length > 0 && iter < GEN8_PML4ES_PER_PML4 && \
(pdp = (pml4)->pdps[iter]), 1 : 0; \ (pdp = (pml4)->pdps[iter], true); \
iter++, \ ({ u64 temp = ALIGN(start+1, 1ULL << GEN8_PML4E_SHIFT); \
temp = ALIGN(start+1, 1ULL << GEN8_PML4E_SHIFT) - start, \ temp = min(temp - start, length); \
temp = min(temp, length), \ start += temp, length -= temp; }), ++iter)
start += temp, length -= temp)
static inline uint32_t gen8_pte_index(uint64_t address) static inline uint32_t gen8_pte_index(uint64_t address)
{ {
@ -556,7 +550,7 @@ i915_ggtt_view_equal(const struct i915_ggtt_view *a,
if (a->type != b->type) if (a->type != b->type)
return false; return false;
if (a->type == I915_GGTT_VIEW_PARTIAL) if (a->type != I915_GGTT_VIEW_NORMAL)
return !memcmp(&a->params, &b->params, sizeof(a->params)); return !memcmp(&a->params, &b->params, sizeof(a->params));
return true; return true;
} }

View File

@ -103,7 +103,7 @@ static int render_state_setup(struct render_state *so)
if (ret) if (ret)
return ret; return ret;
page = sg_page(so->obj->pages->sgl); page = i915_gem_object_get_dirty_page(so->obj, 0);
d = kmap(page); d = kmap(page);
while (i < rodata->batch_items) { while (i < rodata->batch_items) {

View File

@ -367,7 +367,8 @@ int i915_gem_init_stolen(struct drm_device *dev)
&reserved_size); &reserved_size);
break; break;
default: default:
if (IS_BROADWELL(dev_priv) || IS_SKYLAKE(dev_priv)) if (IS_BROADWELL(dev_priv) ||
IS_SKYLAKE(dev_priv) || IS_KABYLAKE(dev))
bdw_get_stolen_reserved(dev_priv, &reserved_base, bdw_get_stolen_reserved(dev_priv, &reserved_base,
&reserved_size); &reserved_size);
else else

View File

@ -176,6 +176,8 @@ i915_gem_set_tiling(struct drm_device *dev, void *data,
return -EINVAL; return -EINVAL;
} }
intel_runtime_pm_get(dev_priv);
mutex_lock(&dev->struct_mutex); mutex_lock(&dev->struct_mutex);
if (obj->pin_display || obj->framebuffer_references) { if (obj->pin_display || obj->framebuffer_references) {
ret = -EBUSY; ret = -EBUSY;
@ -269,6 +271,8 @@ err:
drm_gem_object_unreference(&obj->base); drm_gem_object_unreference(&obj->base);
mutex_unlock(&dev->struct_mutex); mutex_unlock(&dev->struct_mutex);
intel_runtime_pm_put(dev_priv);
return ret; return ret;
} }

View File

@ -367,6 +367,17 @@ int i915_error_state_to_str(struct drm_i915_error_state_buf *m,
err_printf(m, "Suspend count: %u\n", error->suspend_count); err_printf(m, "Suspend count: %u\n", error->suspend_count);
err_printf(m, "PCI ID: 0x%04x\n", dev->pdev->device); err_printf(m, "PCI ID: 0x%04x\n", dev->pdev->device);
err_printf(m, "IOMMU enabled?: %d\n", error->iommu); err_printf(m, "IOMMU enabled?: %d\n", error->iommu);
if (HAS_CSR(dev)) {
struct intel_csr *csr = &dev_priv->csr;
err_printf(m, "DMC loaded: %s\n",
yesno(csr->dmc_payload != NULL));
err_printf(m, "DMC fw version: %d.%d\n",
CSR_VERSION_MAJOR(csr->version),
CSR_VERSION_MINOR(csr->version));
}
err_printf(m, "EIR: 0x%08x\n", error->eir); err_printf(m, "EIR: 0x%08x\n", error->eir);
err_printf(m, "IER: 0x%08x\n", error->ier); err_printf(m, "IER: 0x%08x\n", error->ier);
if (INTEL_INFO(dev)->gen >= 8) { if (INTEL_INFO(dev)->gen >= 8) {
@ -863,7 +874,7 @@ static void i915_record_ring_state(struct drm_device *dev,
struct drm_i915_private *dev_priv = dev->dev_private; struct drm_i915_private *dev_priv = dev->dev_private;
if (INTEL_INFO(dev)->gen >= 6) { if (INTEL_INFO(dev)->gen >= 6) {
ering->rc_psmi = I915_READ(ring->mmio_base + 0x50); ering->rc_psmi = I915_READ(RING_PSMI_CTL(ring->mmio_base));
ering->fault_reg = I915_READ(RING_FAULT_REG(ring)); ering->fault_reg = I915_READ(RING_FAULT_REG(ring));
if (INTEL_INFO(dev)->gen >= 8) if (INTEL_INFO(dev)->gen >= 8)
gen8_record_semaphore_state(dev_priv, error, ring, ering); gen8_record_semaphore_state(dev_priv, error, ring, ering);
@ -900,7 +911,7 @@ static void i915_record_ring_state(struct drm_device *dev,
ering->ctl = I915_READ_CTL(ring); ering->ctl = I915_READ_CTL(ring);
if (I915_NEED_GFX_HWS(dev)) { if (I915_NEED_GFX_HWS(dev)) {
int mmio; i915_reg_t mmio;
if (IS_GEN7(dev)) { if (IS_GEN7(dev)) {
switch (ring->id) { switch (ring->id) {
@ -1072,6 +1083,25 @@ static void i915_gem_record_rings(struct drm_device *dev,
list_for_each_entry(request, &ring->request_list, list) { list_for_each_entry(request, &ring->request_list, list) {
struct drm_i915_error_request *erq; struct drm_i915_error_request *erq;
if (count >= error->ring[i].num_requests) {
/*
* If the ring request list was changed in
* between the point where the error request
* list was created and dimensioned and this
* point then just exit early to avoid crashes.
*
* We don't need to communicate that the
* request list changed state during error
* state capture and that the error state is
* slightly incorrect as a consequence since we
* are typically only interested in the request
* list state at the point of error state
* capture, not in any changes happening during
* the capture.
*/
break;
}
erq = &error->ring[i].requests[count++]; erq = &error->ring[i].requests[count++];
erq->seqno = request->seqno; erq->seqno = request->seqno;
erq->jiffies = request->emitted_jiffies; erq->jiffies = request->emitted_jiffies;
@ -1182,7 +1212,7 @@ static void i915_capture_reg_state(struct drm_i915_private *dev_priv,
if (IS_VALLEYVIEW(dev)) { if (IS_VALLEYVIEW(dev)) {
error->gtier[0] = I915_READ(GTIER); error->gtier[0] = I915_READ(GTIER);
error->ier = I915_READ(VLV_IER); error->ier = I915_READ(VLV_IER);
error->forcewake = I915_READ(FORCEWAKE_VLV); error->forcewake = I915_READ_FW(FORCEWAKE_VLV);
} }
if (IS_GEN7(dev)) if (IS_GEN7(dev))
@ -1194,14 +1224,14 @@ static void i915_capture_reg_state(struct drm_i915_private *dev_priv,
} }
if (IS_GEN6(dev)) { if (IS_GEN6(dev)) {
error->forcewake = I915_READ(FORCEWAKE); error->forcewake = I915_READ_FW(FORCEWAKE);
error->gab_ctl = I915_READ(GAB_CTL); error->gab_ctl = I915_READ(GAB_CTL);
error->gfx_mode = I915_READ(GFX_MODE); error->gfx_mode = I915_READ(GFX_MODE);
} }
/* 2: Registers which belong to multiple generations */ /* 2: Registers which belong to multiple generations */
if (INTEL_INFO(dev)->gen >= 7) if (INTEL_INFO(dev)->gen >= 7)
error->forcewake = I915_READ(FORCEWAKE_MT); error->forcewake = I915_READ_FW(FORCEWAKE_MT);
if (INTEL_INFO(dev)->gen >= 6) { if (INTEL_INFO(dev)->gen >= 6) {
error->derrmr = I915_READ(DERRMR); error->derrmr = I915_READ(DERRMR);

View File

@ -26,7 +26,7 @@
/* Definitions of GuC H/W registers, bits, etc */ /* Definitions of GuC H/W registers, bits, etc */
#define GUC_STATUS 0xc000 #define GUC_STATUS _MMIO(0xc000)
#define GS_BOOTROM_SHIFT 1 #define GS_BOOTROM_SHIFT 1
#define GS_BOOTROM_MASK (0x7F << GS_BOOTROM_SHIFT) #define GS_BOOTROM_MASK (0x7F << GS_BOOTROM_SHIFT)
#define GS_BOOTROM_RSA_FAILED (0x50 << GS_BOOTROM_SHIFT) #define GS_BOOTROM_RSA_FAILED (0x50 << GS_BOOTROM_SHIFT)
@ -39,40 +39,41 @@
#define GS_MIA_MASK (0x07 << GS_MIA_SHIFT) #define GS_MIA_MASK (0x07 << GS_MIA_SHIFT)
#define GS_MIA_CORE_STATE (1 << GS_MIA_SHIFT) #define GS_MIA_CORE_STATE (1 << GS_MIA_SHIFT)
#define SOFT_SCRATCH(n) (0xc180 + ((n) * 4)) #define SOFT_SCRATCH(n) _MMIO(0xc180 + (n) * 4)
#define UOS_RSA_SCRATCH(i) (0xc200 + (i) * 4) #define UOS_RSA_SCRATCH(i) _MMIO(0xc200 + (i) * 4)
#define DMA_ADDR_0_LOW 0xc300 #define UOS_RSA_SCRATCH_MAX_COUNT 64
#define DMA_ADDR_0_HIGH 0xc304 #define DMA_ADDR_0_LOW _MMIO(0xc300)
#define DMA_ADDR_1_LOW 0xc308 #define DMA_ADDR_0_HIGH _MMIO(0xc304)
#define DMA_ADDR_1_HIGH 0xc30c #define DMA_ADDR_1_LOW _MMIO(0xc308)
#define DMA_ADDR_1_HIGH _MMIO(0xc30c)
#define DMA_ADDRESS_SPACE_WOPCM (7 << 16) #define DMA_ADDRESS_SPACE_WOPCM (7 << 16)
#define DMA_ADDRESS_SPACE_GTT (8 << 16) #define DMA_ADDRESS_SPACE_GTT (8 << 16)
#define DMA_COPY_SIZE 0xc310 #define DMA_COPY_SIZE _MMIO(0xc310)
#define DMA_CTRL 0xc314 #define DMA_CTRL _MMIO(0xc314)
#define UOS_MOVE (1<<4) #define UOS_MOVE (1<<4)
#define START_DMA (1<<0) #define START_DMA (1<<0)
#define DMA_GUC_WOPCM_OFFSET 0xc340 #define DMA_GUC_WOPCM_OFFSET _MMIO(0xc340)
#define GUC_WOPCM_OFFSET_VALUE 0x80000 /* 512KB */ #define GUC_WOPCM_OFFSET_VALUE 0x80000 /* 512KB */
#define GUC_MAX_IDLE_COUNT 0xC3E4 #define GUC_MAX_IDLE_COUNT _MMIO(0xC3E4)
#define GUC_WOPCM_SIZE 0xc050 #define GUC_WOPCM_SIZE _MMIO(0xc050)
#define GUC_WOPCM_SIZE_VALUE (0x80 << 12) /* 512KB */ #define GUC_WOPCM_SIZE_VALUE (0x80 << 12) /* 512KB */
/* GuC addresses below GUC_WOPCM_TOP don't map through the GTT */ /* GuC addresses below GUC_WOPCM_TOP don't map through the GTT */
#define GUC_WOPCM_TOP (GUC_WOPCM_SIZE_VALUE) #define GUC_WOPCM_TOP (GUC_WOPCM_SIZE_VALUE)
#define GEN8_GT_PM_CONFIG 0x138140 #define GEN8_GT_PM_CONFIG _MMIO(0x138140)
#define GEN9LP_GT_PM_CONFIG 0x138140 #define GEN9LP_GT_PM_CONFIG _MMIO(0x138140)
#define GEN9_GT_PM_CONFIG 0x13816c #define GEN9_GT_PM_CONFIG _MMIO(0x13816c)
#define GT_DOORBELL_ENABLE (1<<0) #define GT_DOORBELL_ENABLE (1<<0)
#define GEN8_GTCR 0x4274 #define GEN8_GTCR _MMIO(0x4274)
#define GEN8_GTCR_INVALIDATE (1<<0) #define GEN8_GTCR_INVALIDATE (1<<0)
#define GUC_ARAT_C6DIS 0xA178 #define GUC_ARAT_C6DIS _MMIO(0xA178)
#define GUC_SHIM_CONTROL 0xc064 #define GUC_SHIM_CONTROL _MMIO(0xc064)
#define GUC_DISABLE_SRAM_INIT_TO_ZEROES (1<<0) #define GUC_DISABLE_SRAM_INIT_TO_ZEROES (1<<0)
#define GUC_ENABLE_READ_CACHE_LOGIC (1<<1) #define GUC_ENABLE_READ_CACHE_LOGIC (1<<1)
#define GUC_ENABLE_MIA_CACHING (1<<2) #define GUC_ENABLE_MIA_CACHING (1<<2)
@ -89,21 +90,21 @@
GUC_ENABLE_READ_CACHE_FOR_WOPCM_DATA | \ GUC_ENABLE_READ_CACHE_FOR_WOPCM_DATA | \
GUC_ENABLE_MIA_CLOCK_GATING) GUC_ENABLE_MIA_CLOCK_GATING)
#define HOST2GUC_INTERRUPT 0xc4c8 #define HOST2GUC_INTERRUPT _MMIO(0xc4c8)
#define HOST2GUC_TRIGGER (1<<0) #define HOST2GUC_TRIGGER (1<<0)
#define DRBMISC1 0x1984 #define DRBMISC1 0x1984
#define DOORBELL_ENABLE (1<<0) #define DOORBELL_ENABLE (1<<0)
#define GEN8_DRBREGL(x) (0x1000 + (x) * 8) #define GEN8_DRBREGL(x) _MMIO(0x1000 + (x) * 8)
#define GEN8_DRB_VALID (1<<0) #define GEN8_DRB_VALID (1<<0)
#define GEN8_DRBREGU(x) (GEN8_DRBREGL(x) + 4) #define GEN8_DRBREGU(x) _MMIO(0x1000 + (x) * 8 + 4)
#define DE_GUCRMR 0x44054 #define DE_GUCRMR _MMIO(0x44054)
#define GUC_BCS_RCS_IER 0xC550 #define GUC_BCS_RCS_IER _MMIO(0xC550)
#define GUC_VCS2_VCS1_IER 0xC554 #define GUC_VCS2_VCS1_IER _MMIO(0xC554)
#define GUC_WD_VECS_IER 0xC558 #define GUC_WD_VECS_IER _MMIO(0xC558)
#define GUC_PM_P24C_IER 0xC55C #define GUC_PM_P24C_IER _MMIO(0xC55C)
#endif #endif

View File

@ -23,11 +23,11 @@
*/ */
#include <linux/firmware.h> #include <linux/firmware.h>
#include <linux/circ_buf.h> #include <linux/circ_buf.h>
#include "intel_drv.h" #include "i915_drv.h"
#include "intel_guc.h" #include "intel_guc.h"
/** /**
* DOC: GuC Client * DOC: GuC-based command submission
* *
* i915_guc_client: * i915_guc_client:
* We use the term client to avoid confusion with contexts. A i915_guc_client is * We use the term client to avoid confusion with contexts. A i915_guc_client is
@ -86,7 +86,6 @@ static int host2guc_action(struct intel_guc *guc, u32 *data, u32 len)
return -EINVAL; return -EINVAL;
intel_uncore_forcewake_get(dev_priv, FORCEWAKE_ALL); intel_uncore_forcewake_get(dev_priv, FORCEWAKE_ALL);
spin_lock(&dev_priv->guc.host2guc_lock);
dev_priv->guc.action_count += 1; dev_priv->guc.action_count += 1;
dev_priv->guc.action_cmd = data[0]; dev_priv->guc.action_cmd = data[0];
@ -119,7 +118,6 @@ static int host2guc_action(struct intel_guc *guc, u32 *data, u32 len)
} }
dev_priv->guc.action_status = status; dev_priv->guc.action_status = status;
spin_unlock(&dev_priv->guc.host2guc_lock);
intel_uncore_forcewake_put(dev_priv, FORCEWAKE_ALL); intel_uncore_forcewake_put(dev_priv, FORCEWAKE_ALL);
return ret; return ret;
@ -161,9 +159,9 @@ static int host2guc_sample_forcewake(struct intel_guc *guc,
data[0] = HOST2GUC_ACTION_SAMPLE_FORCEWAKE; data[0] = HOST2GUC_ACTION_SAMPLE_FORCEWAKE;
/* WaRsDisableCoarsePowerGating:skl,bxt */ /* WaRsDisableCoarsePowerGating:skl,bxt */
if (!intel_enable_rc6(dev_priv->dev) || if (!intel_enable_rc6(dev_priv->dev) ||
(IS_BROXTON(dev) && (INTEL_REVID(dev) < BXT_REVID_B0)) || IS_BXT_REVID(dev, 0, BXT_REVID_A1) ||
(IS_SKL_GT3(dev) && (INTEL_REVID(dev) <= SKL_REVID_E0)) || (IS_SKL_GT3(dev) && IS_SKL_REVID(dev, 0, SKL_REVID_E0)) ||
(IS_SKL_GT4(dev) && (INTEL_REVID(dev) <= SKL_REVID_E0))) (IS_SKL_GT4(dev) && IS_SKL_REVID(dev, 0, SKL_REVID_E0)))
data[1] = 0; data[1] = 0;
else else
/* bit 0 and 1 are for Render and Media domain separately */ /* bit 0 and 1 are for Render and Media domain separately */
@ -258,7 +256,7 @@ static void guc_disable_doorbell(struct intel_guc *guc,
struct drm_i915_private *dev_priv = guc_to_i915(guc); struct drm_i915_private *dev_priv = guc_to_i915(guc);
struct guc_doorbell_info *doorbell; struct guc_doorbell_info *doorbell;
void *base; void *base;
int drbreg = GEN8_DRBREGL(client->doorbell_id); i915_reg_t drbreg = GEN8_DRBREGL(client->doorbell_id);
int value; int value;
base = kmap_atomic(i915_gem_object_get_page(client->client_obj, 0)); base = kmap_atomic(i915_gem_object_get_page(client->client_obj, 0));
@ -292,16 +290,12 @@ static uint32_t select_doorbell_cacheline(struct intel_guc *guc)
const uint32_t cacheline_size = cache_line_size(); const uint32_t cacheline_size = cache_line_size();
uint32_t offset; uint32_t offset;
spin_lock(&guc->host2guc_lock);
/* Doorbell uses a single cache line within a page */ /* Doorbell uses a single cache line within a page */
offset = offset_in_page(guc->db_cacheline); offset = offset_in_page(guc->db_cacheline);
/* Moving to next cache line to reduce contention */ /* Moving to next cache line to reduce contention */
guc->db_cacheline += cacheline_size; guc->db_cacheline += cacheline_size;
spin_unlock(&guc->host2guc_lock);
DRM_DEBUG_DRIVER("selected doorbell cacheline 0x%x, next 0x%x, linesize %u\n", DRM_DEBUG_DRIVER("selected doorbell cacheline 0x%x, next 0x%x, linesize %u\n",
offset, guc->db_cacheline, cacheline_size); offset, guc->db_cacheline, cacheline_size);
@ -322,13 +316,11 @@ static uint16_t assign_doorbell(struct intel_guc *guc, uint32_t priority)
const uint16_t end = start + half; const uint16_t end = start + half;
uint16_t id; uint16_t id;
spin_lock(&guc->host2guc_lock);
id = find_next_zero_bit(guc->doorbell_bitmap, end, start); id = find_next_zero_bit(guc->doorbell_bitmap, end, start);
if (id == end) if (id == end)
id = GUC_INVALID_DOORBELL_ID; id = GUC_INVALID_DOORBELL_ID;
else else
bitmap_set(guc->doorbell_bitmap, id, 1); bitmap_set(guc->doorbell_bitmap, id, 1);
spin_unlock(&guc->host2guc_lock);
DRM_DEBUG_DRIVER("assigned %s priority doorbell id 0x%x\n", DRM_DEBUG_DRIVER("assigned %s priority doorbell id 0x%x\n",
hi_pri ? "high" : "normal", id); hi_pri ? "high" : "normal", id);
@ -338,9 +330,7 @@ static uint16_t assign_doorbell(struct intel_guc *guc, uint32_t priority)
static void release_doorbell(struct intel_guc *guc, uint16_t id) static void release_doorbell(struct intel_guc *guc, uint16_t id)
{ {
spin_lock(&guc->host2guc_lock);
bitmap_clear(guc->doorbell_bitmap, id, 1); bitmap_clear(guc->doorbell_bitmap, id, 1);
spin_unlock(&guc->host2guc_lock);
} }
/* /*
@ -487,16 +477,13 @@ static int guc_get_workqueue_space(struct i915_guc_client *gc, u32 *offset)
struct guc_process_desc *desc; struct guc_process_desc *desc;
void *base; void *base;
u32 size = sizeof(struct guc_wq_item); u32 size = sizeof(struct guc_wq_item);
int ret = 0, timeout_counter = 200; int ret = -ETIMEDOUT, timeout_counter = 200;
base = kmap_atomic(i915_gem_object_get_page(gc->client_obj, 0)); base = kmap_atomic(i915_gem_object_get_page(gc->client_obj, 0));
desc = base + gc->proc_desc_offset; desc = base + gc->proc_desc_offset;
while (timeout_counter-- > 0) { while (timeout_counter-- > 0) {
ret = wait_for_atomic(CIRC_SPACE(gc->wq_tail, desc->head, if (CIRC_SPACE(gc->wq_tail, desc->head, gc->wq_size) >= size) {
gc->wq_size) >= size, 1);
if (!ret) {
*offset = gc->wq_tail; *offset = gc->wq_tail;
/* advance the tail for next workqueue item */ /* advance the tail for next workqueue item */
@ -505,7 +492,11 @@ static int guc_get_workqueue_space(struct i915_guc_client *gc, u32 *offset)
/* this will break the loop */ /* this will break the loop */
timeout_counter = 0; timeout_counter = 0;
ret = 0;
} }
if (timeout_counter)
usleep_range(1000, 2000);
}; };
kunmap_atomic(base); kunmap_atomic(base);
@ -577,7 +568,7 @@ static void lr_context_update(struct drm_i915_gem_request *rq)
WARN_ON(!i915_gem_obj_is_pinned(ctx_obj)); WARN_ON(!i915_gem_obj_is_pinned(ctx_obj));
WARN_ON(!i915_gem_obj_is_pinned(rb_obj)); WARN_ON(!i915_gem_obj_is_pinned(rb_obj));
page = i915_gem_object_get_page(ctx_obj, LRC_STATE_PN); page = i915_gem_object_get_dirty_page(ctx_obj, LRC_STATE_PN);
reg_state = kmap_atomic(page); reg_state = kmap_atomic(page);
reg_state[CTX_RING_BUFFER_START+1] = i915_gem_obj_ggtt_offset(rb_obj); reg_state[CTX_RING_BUFFER_START+1] = i915_gem_obj_ggtt_offset(rb_obj);
@ -588,8 +579,7 @@ static void lr_context_update(struct drm_i915_gem_request *rq)
/** /**
* i915_guc_submit() - Submit commands through GuC * i915_guc_submit() - Submit commands through GuC
* @client: the guc client where commands will go through * @client: the guc client where commands will go through
* @ctx: LRC where commands come from * @rq: request associated with the commands
* @ring: HW engine that will excute the commands
* *
* Return: 0 if succeed * Return: 0 if succeed
*/ */
@ -598,15 +588,12 @@ int i915_guc_submit(struct i915_guc_client *client,
{ {
struct intel_guc *guc = client->guc; struct intel_guc *guc = client->guc;
enum intel_ring_id ring_id = rq->ring->id; enum intel_ring_id ring_id = rq->ring->id;
unsigned long flags;
int q_ret, b_ret; int q_ret, b_ret;
/* Need this because of the deferred pin ctx and ring */ /* Need this because of the deferred pin ctx and ring */
/* Shall we move this right after ring is pinned? */ /* Shall we move this right after ring is pinned? */
lr_context_update(rq); lr_context_update(rq);
spin_lock_irqsave(&client->wq_lock, flags);
q_ret = guc_add_workqueue_item(client, rq); q_ret = guc_add_workqueue_item(client, rq);
if (q_ret == 0) if (q_ret == 0)
b_ret = guc_ring_doorbell(client); b_ret = guc_ring_doorbell(client);
@ -621,12 +608,8 @@ int i915_guc_submit(struct i915_guc_client *client,
} else { } else {
client->retcode = 0; client->retcode = 0;
} }
spin_unlock_irqrestore(&client->wq_lock, flags);
spin_lock(&guc->host2guc_lock);
guc->submissions[ring_id] += 1; guc->submissions[ring_id] += 1;
guc->last_seqno[ring_id] = rq->seqno; guc->last_seqno[ring_id] = rq->seqno;
spin_unlock(&guc->host2guc_lock);
return q_ret; return q_ret;
} }
@ -731,7 +714,8 @@ static void guc_client_free(struct drm_device *dev,
* The kernel client to replace ExecList submission is created with * The kernel client to replace ExecList submission is created with
* NORMAL priority. Priority of a client for scheduler can be HIGH, * NORMAL priority. Priority of a client for scheduler can be HIGH,
* while a preemption context can use CRITICAL. * while a preemption context can use CRITICAL.
* @ctx the context to own the client (we use the default render context) * @ctx: the context that owns the client (we use the default render
* context)
* *
* Return: An i915_guc_client object if success. * Return: An i915_guc_client object if success.
*/ */
@ -768,7 +752,6 @@ static struct i915_guc_client *guc_client_alloc(struct drm_device *dev,
client->client_obj = obj; client->client_obj = obj;
client->wq_offset = GUC_DB_SIZE; client->wq_offset = GUC_DB_SIZE;
client->wq_size = GUC_WQ_SIZE; client->wq_size = GUC_WQ_SIZE;
spin_lock_init(&client->wq_lock);
client->doorbell_offset = select_doorbell_cacheline(guc); client->doorbell_offset = select_doorbell_cacheline(guc);
@ -871,8 +854,6 @@ int i915_guc_submission_init(struct drm_device *dev)
if (!guc->ctx_pool_obj) if (!guc->ctx_pool_obj)
return -ENOMEM; return -ENOMEM;
spin_lock_init(&dev_priv->guc.host2guc_lock);
ida_init(&guc->ctx_ids); ida_init(&guc->ctx_ids);
guc_create_log(guc); guc_create_log(guc);

View File

@ -139,7 +139,8 @@ static const u32 hpd_bxt[HPD_NUM_PINS] = {
/* /*
* We should clear IMR at preinstall/uninstall, and just check at postinstall. * We should clear IMR at preinstall/uninstall, and just check at postinstall.
*/ */
static void gen5_assert_iir_is_zero(struct drm_i915_private *dev_priv, u32 reg) static void gen5_assert_iir_is_zero(struct drm_i915_private *dev_priv,
i915_reg_t reg)
{ {
u32 val = I915_READ(reg); u32 val = I915_READ(reg);
@ -147,7 +148,7 @@ static void gen5_assert_iir_is_zero(struct drm_i915_private *dev_priv, u32 reg)
return; return;
WARN(1, "Interrupt register 0x%x is not zero: 0x%08x\n", WARN(1, "Interrupt register 0x%x is not zero: 0x%08x\n",
reg, val); i915_mmio_reg_offset(reg), val);
I915_WRITE(reg, 0xffffffff); I915_WRITE(reg, 0xffffffff);
POSTING_READ(reg); POSTING_READ(reg);
I915_WRITE(reg, 0xffffffff); I915_WRITE(reg, 0xffffffff);
@ -214,7 +215,7 @@ void i915_hotplug_interrupt_update(struct drm_i915_private *dev_priv,
* @interrupt_mask: mask of interrupt bits to update * @interrupt_mask: mask of interrupt bits to update
* @enabled_irq_mask: mask of interrupt bits to enable * @enabled_irq_mask: mask of interrupt bits to enable
*/ */
static void ilk_update_display_irq(struct drm_i915_private *dev_priv, void ilk_update_display_irq(struct drm_i915_private *dev_priv,
uint32_t interrupt_mask, uint32_t interrupt_mask,
uint32_t enabled_irq_mask) uint32_t enabled_irq_mask)
{ {
@ -238,18 +239,6 @@ static void ilk_update_display_irq(struct drm_i915_private *dev_priv,
} }
} }
void
ironlake_enable_display_irq(struct drm_i915_private *dev_priv, u32 mask)
{
ilk_update_display_irq(dev_priv, mask, mask);
}
void
ironlake_disable_display_irq(struct drm_i915_private *dev_priv, u32 mask)
{
ilk_update_display_irq(dev_priv, mask, 0);
}
/** /**
* ilk_update_gt_irq - update GTIMR * ilk_update_gt_irq - update GTIMR
* @dev_priv: driver private * @dev_priv: driver private
@ -283,17 +272,17 @@ void gen5_disable_gt_irq(struct drm_i915_private *dev_priv, uint32_t mask)
ilk_update_gt_irq(dev_priv, mask, 0); ilk_update_gt_irq(dev_priv, mask, 0);
} }
static u32 gen6_pm_iir(struct drm_i915_private *dev_priv) static i915_reg_t gen6_pm_iir(struct drm_i915_private *dev_priv)
{ {
return INTEL_INFO(dev_priv)->gen >= 8 ? GEN8_GT_IIR(2) : GEN6_PMIIR; return INTEL_INFO(dev_priv)->gen >= 8 ? GEN8_GT_IIR(2) : GEN6_PMIIR;
} }
static u32 gen6_pm_imr(struct drm_i915_private *dev_priv) static i915_reg_t gen6_pm_imr(struct drm_i915_private *dev_priv)
{ {
return INTEL_INFO(dev_priv)->gen >= 8 ? GEN8_GT_IMR(2) : GEN6_PMIMR; return INTEL_INFO(dev_priv)->gen >= 8 ? GEN8_GT_IMR(2) : GEN6_PMIMR;
} }
static u32 gen6_pm_ier(struct drm_i915_private *dev_priv) static i915_reg_t gen6_pm_ier(struct drm_i915_private *dev_priv)
{ {
return INTEL_INFO(dev_priv)->gen >= 8 ? GEN8_GT_IER(2) : GEN6_PMIER; return INTEL_INFO(dev_priv)->gen >= 8 ? GEN8_GT_IER(2) : GEN6_PMIER;
} }
@ -350,7 +339,7 @@ void gen6_disable_pm_irq(struct drm_i915_private *dev_priv, uint32_t mask)
void gen6_reset_rps_interrupts(struct drm_device *dev) void gen6_reset_rps_interrupts(struct drm_device *dev)
{ {
struct drm_i915_private *dev_priv = dev->dev_private; struct drm_i915_private *dev_priv = dev->dev_private;
uint32_t reg = gen6_pm_iir(dev_priv); i915_reg_t reg = gen6_pm_iir(dev_priv);
spin_lock_irq(&dev_priv->irq_lock); spin_lock_irq(&dev_priv->irq_lock);
I915_WRITE(reg, dev_priv->pm_rps_events); I915_WRITE(reg, dev_priv->pm_rps_events);
@ -401,7 +390,6 @@ void gen6_disable_rps_interrupts(struct drm_device *dev)
dev_priv->rps.interrupts_enabled = false; dev_priv->rps.interrupts_enabled = false;
spin_unlock_irq(&dev_priv->irq_lock); spin_unlock_irq(&dev_priv->irq_lock);
cancel_work_sync(&dev_priv->rps.work);
spin_lock_irq(&dev_priv->irq_lock); spin_lock_irq(&dev_priv->irq_lock);
@ -447,6 +435,38 @@ static void bdw_update_port_irq(struct drm_i915_private *dev_priv,
} }
} }
/**
* bdw_update_pipe_irq - update DE pipe interrupt
* @dev_priv: driver private
* @pipe: pipe whose interrupt to update
* @interrupt_mask: mask of interrupt bits to update
* @enabled_irq_mask: mask of interrupt bits to enable
*/
void bdw_update_pipe_irq(struct drm_i915_private *dev_priv,
enum pipe pipe,
uint32_t interrupt_mask,
uint32_t enabled_irq_mask)
{
uint32_t new_val;
assert_spin_locked(&dev_priv->irq_lock);
WARN_ON(enabled_irq_mask & ~interrupt_mask);
if (WARN_ON(!intel_irqs_enabled(dev_priv)))
return;
new_val = dev_priv->de_irq_mask[pipe];
new_val &= ~interrupt_mask;
new_val |= (~enabled_irq_mask & interrupt_mask);
if (new_val != dev_priv->de_irq_mask[pipe]) {
dev_priv->de_irq_mask[pipe] = new_val;
I915_WRITE(GEN8_DE_PIPE_IMR(pipe), dev_priv->de_irq_mask[pipe]);
POSTING_READ(GEN8_DE_PIPE_IMR(pipe));
}
}
/** /**
* ibx_display_interrupt_update - update SDEIMR * ibx_display_interrupt_update - update SDEIMR
* @dev_priv: driver private * @dev_priv: driver private
@ -476,7 +496,7 @@ static void
__i915_enable_pipestat(struct drm_i915_private *dev_priv, enum pipe pipe, __i915_enable_pipestat(struct drm_i915_private *dev_priv, enum pipe pipe,
u32 enable_mask, u32 status_mask) u32 enable_mask, u32 status_mask)
{ {
u32 reg = PIPESTAT(pipe); i915_reg_t reg = PIPESTAT(pipe);
u32 pipestat = I915_READ(reg) & PIPESTAT_INT_ENABLE_MASK; u32 pipestat = I915_READ(reg) & PIPESTAT_INT_ENABLE_MASK;
assert_spin_locked(&dev_priv->irq_lock); assert_spin_locked(&dev_priv->irq_lock);
@ -503,7 +523,7 @@ static void
__i915_disable_pipestat(struct drm_i915_private *dev_priv, enum pipe pipe, __i915_disable_pipestat(struct drm_i915_private *dev_priv, enum pipe pipe,
u32 enable_mask, u32 status_mask) u32 enable_mask, u32 status_mask)
{ {
u32 reg = PIPESTAT(pipe); i915_reg_t reg = PIPESTAT(pipe);
u32 pipestat = I915_READ(reg) & PIPESTAT_INT_ENABLE_MASK; u32 pipestat = I915_READ(reg) & PIPESTAT_INT_ENABLE_MASK;
assert_spin_locked(&dev_priv->irq_lock); assert_spin_locked(&dev_priv->irq_lock);
@ -559,7 +579,7 @@ i915_enable_pipestat(struct drm_i915_private *dev_priv, enum pipe pipe,
{ {
u32 enable_mask; u32 enable_mask;
if (IS_VALLEYVIEW(dev_priv->dev)) if (IS_VALLEYVIEW(dev_priv) || IS_CHERRYVIEW(dev_priv))
enable_mask = vlv_get_pipestat_enable_mask(dev_priv->dev, enable_mask = vlv_get_pipestat_enable_mask(dev_priv->dev,
status_mask); status_mask);
else else
@ -573,7 +593,7 @@ i915_disable_pipestat(struct drm_i915_private *dev_priv, enum pipe pipe,
{ {
u32 enable_mask; u32 enable_mask;
if (IS_VALLEYVIEW(dev_priv->dev)) if (IS_VALLEYVIEW(dev_priv) || IS_CHERRYVIEW(dev_priv))
enable_mask = vlv_get_pipestat_enable_mask(dev_priv->dev, enable_mask = vlv_get_pipestat_enable_mask(dev_priv->dev,
status_mask); status_mask);
else else
@ -664,8 +684,7 @@ static u32 i8xx_get_vblank_counter(struct drm_device *dev, unsigned int pipe)
static u32 i915_get_vblank_counter(struct drm_device *dev, unsigned int pipe) static u32 i915_get_vblank_counter(struct drm_device *dev, unsigned int pipe)
{ {
struct drm_i915_private *dev_priv = dev->dev_private; struct drm_i915_private *dev_priv = dev->dev_private;
unsigned long high_frame; i915_reg_t high_frame, low_frame;
unsigned long low_frame;
u32 high1, high2, low, pixel, vbl_start, hsync_start, htotal; u32 high1, high2, low, pixel, vbl_start, hsync_start, htotal;
struct intel_crtc *intel_crtc = struct intel_crtc *intel_crtc =
to_intel_crtc(dev_priv->pipe_to_crtc_mapping[pipe]); to_intel_crtc(dev_priv->pipe_to_crtc_mapping[pipe]);
@ -716,9 +735,7 @@ static u32 g4x_get_vblank_counter(struct drm_device *dev, unsigned int pipe)
return I915_READ(PIPE_FRMCOUNT_G4X(pipe)); return I915_READ(PIPE_FRMCOUNT_G4X(pipe));
} }
/* raw reads, only for fast reads of display block, no need for forcewake etc. */ /* I915_READ_FW, only for fast reads of display block, no need for forcewake etc. */
#define __raw_i915_read32(dev_priv__, reg__) readl((dev_priv__)->regs + (reg__))
static int __intel_get_crtc_scanline(struct intel_crtc *crtc) static int __intel_get_crtc_scanline(struct intel_crtc *crtc)
{ {
struct drm_device *dev = crtc->base.dev; struct drm_device *dev = crtc->base.dev;
@ -732,9 +749,9 @@ static int __intel_get_crtc_scanline(struct intel_crtc *crtc)
vtotal /= 2; vtotal /= 2;
if (IS_GEN2(dev)) if (IS_GEN2(dev))
position = __raw_i915_read32(dev_priv, PIPEDSL(pipe)) & DSL_LINEMASK_GEN2; position = I915_READ_FW(PIPEDSL(pipe)) & DSL_LINEMASK_GEN2;
else else
position = __raw_i915_read32(dev_priv, PIPEDSL(pipe)) & DSL_LINEMASK_GEN3; position = I915_READ_FW(PIPEDSL(pipe)) & DSL_LINEMASK_GEN3;
/* /*
* On HSW, the DSL reg (0x70000) appears to return 0 if we * On HSW, the DSL reg (0x70000) appears to return 0 if we
@ -826,7 +843,7 @@ static int i915_get_crtc_scanoutpos(struct drm_device *dev, unsigned int pipe,
* We can split this into vertical and horizontal * We can split this into vertical and horizontal
* scanout position. * scanout position.
*/ */
position = (__raw_i915_read32(dev_priv, PIPEFRAMEPIXEL(pipe)) & PIPE_PIXEL_MASK) >> PIPE_PIXEL_SHIFT; position = (I915_READ_FW(PIPEFRAMEPIXEL(pipe)) & PIPE_PIXEL_MASK) >> PIPE_PIXEL_SHIFT;
/* convert to pixel counts */ /* convert to pixel counts */
vbl_start *= htotal; vbl_start *= htotal;
@ -993,51 +1010,68 @@ static void vlv_c0_read(struct drm_i915_private *dev_priv,
ei->media_c0 = I915_READ(VLV_MEDIA_C0_COUNT); ei->media_c0 = I915_READ(VLV_MEDIA_C0_COUNT);
} }
static bool vlv_c0_above(struct drm_i915_private *dev_priv,
const struct intel_rps_ei *old,
const struct intel_rps_ei *now,
int threshold)
{
u64 time, c0;
unsigned int mul = 100;
if (old->cz_clock == 0)
return false;
if (I915_READ(VLV_COUNTER_CONTROL) & VLV_COUNT_RANGE_HIGH)
mul <<= 8;
time = now->cz_clock - old->cz_clock;
time *= threshold * dev_priv->czclk_freq;
/* Workload can be split between render + media, e.g. SwapBuffers
* being blitted in X after being rendered in mesa. To account for
* this we need to combine both engines into our activity counter.
*/
c0 = now->render_c0 - old->render_c0;
c0 += now->media_c0 - old->media_c0;
c0 *= mul * VLV_CZ_CLOCK_TO_MILLI_SEC;
return c0 >= time;
}
void gen6_rps_reset_ei(struct drm_i915_private *dev_priv) void gen6_rps_reset_ei(struct drm_i915_private *dev_priv)
{ {
memset(&dev_priv->rps.ei, 0, sizeof(dev_priv->rps.ei)); vlv_c0_read(dev_priv, &dev_priv->rps.down_ei);
dev_priv->rps.up_ei = dev_priv->rps.down_ei;
} }
static u32 vlv_wa_c0_ei(struct drm_i915_private *dev_priv, u32 pm_iir) static u32 vlv_wa_c0_ei(struct drm_i915_private *dev_priv, u32 pm_iir)
{ {
const struct intel_rps_ei *prev = &dev_priv->rps.ei;
struct intel_rps_ei now; struct intel_rps_ei now;
u32 events = 0; u32 events = 0;
if ((pm_iir & GEN6_PM_RP_UP_EI_EXPIRED) == 0) if ((pm_iir & (GEN6_PM_RP_DOWN_EI_EXPIRED | GEN6_PM_RP_UP_EI_EXPIRED)) == 0)
return 0; return 0;
vlv_c0_read(dev_priv, &now); vlv_c0_read(dev_priv, &now);
if (now.cz_clock == 0) if (now.cz_clock == 0)
return 0; return 0;
if (prev->cz_clock) { if (pm_iir & GEN6_PM_RP_DOWN_EI_EXPIRED) {
u64 time, c0; if (!vlv_c0_above(dev_priv,
unsigned int mul; &dev_priv->rps.down_ei, &now,
dev_priv->rps.down_threshold))
mul = VLV_CZ_CLOCK_TO_MILLI_SEC * 100; /* scale to threshold% */ events |= GEN6_PM_RP_DOWN_THRESHOLD;
if (I915_READ(VLV_COUNTER_CONTROL) & VLV_COUNT_RANGE_HIGH) dev_priv->rps.down_ei = now;
mul <<= 8; }
time = now.cz_clock - prev->cz_clock; if (pm_iir & GEN6_PM_RP_UP_EI_EXPIRED) {
time *= dev_priv->czclk_freq; if (vlv_c0_above(dev_priv,
&dev_priv->rps.up_ei, &now,
/* Workload can be split between render + media, dev_priv->rps.up_threshold))
* e.g. SwapBuffers being blitted in X after being rendered in events |= GEN6_PM_RP_UP_THRESHOLD;
* mesa. To account for this we need to combine both engines dev_priv->rps.up_ei = now;
* into our activity counter.
*/
c0 = now.render_c0 - prev->render_c0;
c0 += now.media_c0 - prev->media_c0;
c0 *= mul;
if (c0 > time * dev_priv->rps.up_threshold)
events = GEN6_PM_RP_UP_THRESHOLD;
else if (c0 < time * dev_priv->rps.down_threshold)
events = GEN6_PM_RP_DOWN_THRESHOLD;
} }
dev_priv->rps.ei = now;
return events; return events;
} }
@ -1067,6 +1101,14 @@ static void gen6_pm_rps_work(struct work_struct *work)
spin_unlock_irq(&dev_priv->irq_lock); spin_unlock_irq(&dev_priv->irq_lock);
return; return;
} }
/*
* The RPS work is synced during runtime suspend, we don't require a
* wakeref. TODO: instead of disabling the asserts make sure that we
* always hold an RPM reference while the work is running.
*/
DISABLE_RPM_WAKEREF_ASSERTS(dev_priv);
pm_iir = dev_priv->rps.pm_iir; pm_iir = dev_priv->rps.pm_iir;
dev_priv->rps.pm_iir = 0; dev_priv->rps.pm_iir = 0;
/* Make sure not to corrupt PMIMR state used by ringbuffer on GEN6 */ /* Make sure not to corrupt PMIMR state used by ringbuffer on GEN6 */
@ -1079,7 +1121,7 @@ static void gen6_pm_rps_work(struct work_struct *work)
WARN_ON(pm_iir & ~dev_priv->pm_rps_events); WARN_ON(pm_iir & ~dev_priv->pm_rps_events);
if ((pm_iir & dev_priv->pm_rps_events) == 0 && !client_boost) if ((pm_iir & dev_priv->pm_rps_events) == 0 && !client_boost)
return; goto out;
mutex_lock(&dev_priv->rps.hw_lock); mutex_lock(&dev_priv->rps.hw_lock);
@ -1134,6 +1176,8 @@ static void gen6_pm_rps_work(struct work_struct *work)
intel_set_rps(dev_priv->dev, new_delay); intel_set_rps(dev_priv->dev, new_delay);
mutex_unlock(&dev_priv->rps.hw_lock); mutex_unlock(&dev_priv->rps.hw_lock);
out:
ENABLE_RPM_WAKEREF_ASSERTS(dev_priv);
} }
@ -1170,7 +1214,7 @@ static void ivybridge_parity_work(struct work_struct *work)
POSTING_READ(GEN7_MISCCPCTL); POSTING_READ(GEN7_MISCCPCTL);
while ((slice = ffs(dev_priv->l3_parity.which_slice)) != 0) { while ((slice = ffs(dev_priv->l3_parity.which_slice)) != 0) {
u32 reg; i915_reg_t reg;
slice--; slice--;
if (WARN_ON_ONCE(slice >= NUM_L3_SLICES(dev_priv->dev))) if (WARN_ON_ONCE(slice >= NUM_L3_SLICES(dev_priv->dev)))
@ -1178,7 +1222,7 @@ static void ivybridge_parity_work(struct work_struct *work)
dev_priv->l3_parity.which_slice &= ~(1<<slice); dev_priv->l3_parity.which_slice &= ~(1<<slice);
reg = GEN7_L3CDERRST1 + (slice * 0x200); reg = GEN7_L3CDERRST1(slice);
error_status = I915_READ(reg); error_status = I915_READ(reg);
row = GEN7_PARITY_ERROR_ROW(error_status); row = GEN7_PARITY_ERROR_ROW(error_status);
@ -1258,70 +1302,69 @@ static void snb_gt_irq_handler(struct drm_device *dev,
ivybridge_parity_error_irq_handler(dev, gt_iir); ivybridge_parity_error_irq_handler(dev, gt_iir);
} }
static __always_inline void
gen8_cs_irq_handler(struct intel_engine_cs *ring, u32 iir, int test_shift)
{
if (iir & (GT_RENDER_USER_INTERRUPT << test_shift))
notify_ring(ring);
if (iir & (GT_CONTEXT_SWITCH_INTERRUPT << test_shift))
intel_lrc_irq_handler(ring);
}
static irqreturn_t gen8_gt_irq_handler(struct drm_i915_private *dev_priv, static irqreturn_t gen8_gt_irq_handler(struct drm_i915_private *dev_priv,
u32 master_ctl) u32 master_ctl)
{ {
irqreturn_t ret = IRQ_NONE; irqreturn_t ret = IRQ_NONE;
if (master_ctl & (GEN8_GT_RCS_IRQ | GEN8_GT_BCS_IRQ)) { if (master_ctl & (GEN8_GT_RCS_IRQ | GEN8_GT_BCS_IRQ)) {
u32 tmp = I915_READ_FW(GEN8_GT_IIR(0)); u32 iir = I915_READ_FW(GEN8_GT_IIR(0));
if (tmp) { if (iir) {
I915_WRITE_FW(GEN8_GT_IIR(0), tmp); I915_WRITE_FW(GEN8_GT_IIR(0), iir);
ret = IRQ_HANDLED; ret = IRQ_HANDLED;
if (tmp & (GT_CONTEXT_SWITCH_INTERRUPT << GEN8_RCS_IRQ_SHIFT)) gen8_cs_irq_handler(&dev_priv->ring[RCS],
intel_lrc_irq_handler(&dev_priv->ring[RCS]); iir, GEN8_RCS_IRQ_SHIFT);
if (tmp & (GT_RENDER_USER_INTERRUPT << GEN8_RCS_IRQ_SHIFT))
notify_ring(&dev_priv->ring[RCS]);
if (tmp & (GT_CONTEXT_SWITCH_INTERRUPT << GEN8_BCS_IRQ_SHIFT)) gen8_cs_irq_handler(&dev_priv->ring[BCS],
intel_lrc_irq_handler(&dev_priv->ring[BCS]); iir, GEN8_BCS_IRQ_SHIFT);
if (tmp & (GT_RENDER_USER_INTERRUPT << GEN8_BCS_IRQ_SHIFT))
notify_ring(&dev_priv->ring[BCS]);
} else } else
DRM_ERROR("The master control interrupt lied (GT0)!\n"); DRM_ERROR("The master control interrupt lied (GT0)!\n");
} }
if (master_ctl & (GEN8_GT_VCS1_IRQ | GEN8_GT_VCS2_IRQ)) { if (master_ctl & (GEN8_GT_VCS1_IRQ | GEN8_GT_VCS2_IRQ)) {
u32 tmp = I915_READ_FW(GEN8_GT_IIR(1)); u32 iir = I915_READ_FW(GEN8_GT_IIR(1));
if (tmp) { if (iir) {
I915_WRITE_FW(GEN8_GT_IIR(1), tmp); I915_WRITE_FW(GEN8_GT_IIR(1), iir);
ret = IRQ_HANDLED; ret = IRQ_HANDLED;
if (tmp & (GT_CONTEXT_SWITCH_INTERRUPT << GEN8_VCS1_IRQ_SHIFT)) gen8_cs_irq_handler(&dev_priv->ring[VCS],
intel_lrc_irq_handler(&dev_priv->ring[VCS]); iir, GEN8_VCS1_IRQ_SHIFT);
if (tmp & (GT_RENDER_USER_INTERRUPT << GEN8_VCS1_IRQ_SHIFT))
notify_ring(&dev_priv->ring[VCS]);
if (tmp & (GT_CONTEXT_SWITCH_INTERRUPT << GEN8_VCS2_IRQ_SHIFT)) gen8_cs_irq_handler(&dev_priv->ring[VCS2],
intel_lrc_irq_handler(&dev_priv->ring[VCS2]); iir, GEN8_VCS2_IRQ_SHIFT);
if (tmp & (GT_RENDER_USER_INTERRUPT << GEN8_VCS2_IRQ_SHIFT))
notify_ring(&dev_priv->ring[VCS2]);
} else } else
DRM_ERROR("The master control interrupt lied (GT1)!\n"); DRM_ERROR("The master control interrupt lied (GT1)!\n");
} }
if (master_ctl & GEN8_GT_VECS_IRQ) { if (master_ctl & GEN8_GT_VECS_IRQ) {
u32 tmp = I915_READ_FW(GEN8_GT_IIR(3)); u32 iir = I915_READ_FW(GEN8_GT_IIR(3));
if (tmp) { if (iir) {
I915_WRITE_FW(GEN8_GT_IIR(3), tmp); I915_WRITE_FW(GEN8_GT_IIR(3), iir);
ret = IRQ_HANDLED; ret = IRQ_HANDLED;
if (tmp & (GT_CONTEXT_SWITCH_INTERRUPT << GEN8_VECS_IRQ_SHIFT)) gen8_cs_irq_handler(&dev_priv->ring[VECS],
intel_lrc_irq_handler(&dev_priv->ring[VECS]); iir, GEN8_VECS_IRQ_SHIFT);
if (tmp & (GT_RENDER_USER_INTERRUPT << GEN8_VECS_IRQ_SHIFT))
notify_ring(&dev_priv->ring[VECS]);
} else } else
DRM_ERROR("The master control interrupt lied (GT3)!\n"); DRM_ERROR("The master control interrupt lied (GT3)!\n");
} }
if (master_ctl & GEN8_GT_PM_IRQ) { if (master_ctl & GEN8_GT_PM_IRQ) {
u32 tmp = I915_READ_FW(GEN8_GT_IIR(2)); u32 iir = I915_READ_FW(GEN8_GT_IIR(2));
if (tmp & dev_priv->pm_rps_events) { if (iir & dev_priv->pm_rps_events) {
I915_WRITE_FW(GEN8_GT_IIR(2), I915_WRITE_FW(GEN8_GT_IIR(2),
tmp & dev_priv->pm_rps_events); iir & dev_priv->pm_rps_events);
ret = IRQ_HANDLED; ret = IRQ_HANDLED;
gen6_rps_irq_handler(dev_priv, tmp); gen6_rps_irq_handler(dev_priv, iir);
} else } else
DRM_ERROR("The master control interrupt lied (PM)!\n"); DRM_ERROR("The master control interrupt lied (PM)!\n");
} }
@ -1593,7 +1636,7 @@ static void valleyview_pipestat_irq_handler(struct drm_device *dev, u32 iir)
spin_lock(&dev_priv->irq_lock); spin_lock(&dev_priv->irq_lock);
for_each_pipe(dev_priv, pipe) { for_each_pipe(dev_priv, pipe) {
int reg; i915_reg_t reg;
u32 mask, iir_bit = 0; u32 mask, iir_bit = 0;
/* /*
@ -1674,7 +1717,7 @@ static void i9xx_hpd_irq_handler(struct drm_device *dev)
*/ */
POSTING_READ(PORT_HOTPLUG_STAT); POSTING_READ(PORT_HOTPLUG_STAT);
if (IS_G4X(dev) || IS_VALLEYVIEW(dev)) { if (IS_G4X(dev) || IS_VALLEYVIEW(dev) || IS_CHERRYVIEW(dev)) {
u32 hotplug_trigger = hotplug_status & HOTPLUG_INT_STATUS_G4X; u32 hotplug_trigger = hotplug_status & HOTPLUG_INT_STATUS_G4X;
if (hotplug_trigger) { if (hotplug_trigger) {
@ -1709,6 +1752,9 @@ static irqreturn_t valleyview_irq_handler(int irq, void *arg)
if (!intel_irqs_enabled(dev_priv)) if (!intel_irqs_enabled(dev_priv))
return IRQ_NONE; return IRQ_NONE;
/* IRQs are synced during runtime_suspend, we don't require a wakeref */
disable_rpm_wakeref_asserts(dev_priv);
while (true) { while (true) {
/* Find, clear, then process each source of interrupt */ /* Find, clear, then process each source of interrupt */
@ -1743,6 +1789,8 @@ static irqreturn_t valleyview_irq_handler(int irq, void *arg)
} }
out: out:
enable_rpm_wakeref_asserts(dev_priv);
return ret; return ret;
} }
@ -1756,6 +1804,9 @@ static irqreturn_t cherryview_irq_handler(int irq, void *arg)
if (!intel_irqs_enabled(dev_priv)) if (!intel_irqs_enabled(dev_priv))
return IRQ_NONE; return IRQ_NONE;
/* IRQs are synced during runtime_suspend, we don't require a wakeref */
disable_rpm_wakeref_asserts(dev_priv);
for (;;) { for (;;) {
master_ctl = I915_READ(GEN8_MASTER_IRQ) & ~GEN8_MASTER_IRQ_CONTROL; master_ctl = I915_READ(GEN8_MASTER_IRQ) & ~GEN8_MASTER_IRQ_CONTROL;
iir = I915_READ(VLV_IIR); iir = I915_READ(VLV_IIR);
@ -1786,6 +1837,8 @@ static irqreturn_t cherryview_irq_handler(int irq, void *arg)
POSTING_READ(GEN8_MASTER_IRQ); POSTING_READ(GEN8_MASTER_IRQ);
} }
enable_rpm_wakeref_asserts(dev_priv);
return ret; return ret;
} }
@ -1795,8 +1848,24 @@ static void ibx_hpd_irq_handler(struct drm_device *dev, u32 hotplug_trigger,
struct drm_i915_private *dev_priv = to_i915(dev); struct drm_i915_private *dev_priv = to_i915(dev);
u32 dig_hotplug_reg, pin_mask = 0, long_mask = 0; u32 dig_hotplug_reg, pin_mask = 0, long_mask = 0;
/*
* Somehow the PCH doesn't seem to really ack the interrupt to the CPU
* unless we touch the hotplug register, even if hotplug_trigger is
* zero. Not acking leads to "The master control interrupt lied (SDE)!"
* errors.
*/
dig_hotplug_reg = I915_READ(PCH_PORT_HOTPLUG); dig_hotplug_reg = I915_READ(PCH_PORT_HOTPLUG);
if (!hotplug_trigger) {
u32 mask = PORTA_HOTPLUG_STATUS_MASK |
PORTD_HOTPLUG_STATUS_MASK |
PORTC_HOTPLUG_STATUS_MASK |
PORTB_HOTPLUG_STATUS_MASK;
dig_hotplug_reg &= ~mask;
}
I915_WRITE(PCH_PORT_HOTPLUG, dig_hotplug_reg); I915_WRITE(PCH_PORT_HOTPLUG, dig_hotplug_reg);
if (!hotplug_trigger)
return;
intel_get_hpd_pins(&pin_mask, &long_mask, hotplug_trigger, intel_get_hpd_pins(&pin_mask, &long_mask, hotplug_trigger,
dig_hotplug_reg, hpd, dig_hotplug_reg, hpd,
@ -1811,7 +1880,6 @@ static void ibx_irq_handler(struct drm_device *dev, u32 pch_iir)
int pipe; int pipe;
u32 hotplug_trigger = pch_iir & SDE_HOTPLUG_MASK; u32 hotplug_trigger = pch_iir & SDE_HOTPLUG_MASK;
if (hotplug_trigger)
ibx_hpd_irq_handler(dev, hotplug_trigger, hpd_ibx); ibx_hpd_irq_handler(dev, hotplug_trigger, hpd_ibx);
if (pch_iir & SDE_AUDIO_POWER_MASK) { if (pch_iir & SDE_AUDIO_POWER_MASK) {
@ -1905,7 +1973,6 @@ static void cpt_irq_handler(struct drm_device *dev, u32 pch_iir)
int pipe; int pipe;
u32 hotplug_trigger = pch_iir & SDE_HOTPLUG_MASK_CPT; u32 hotplug_trigger = pch_iir & SDE_HOTPLUG_MASK_CPT;
if (hotplug_trigger)
ibx_hpd_irq_handler(dev, hotplug_trigger, hpd_cpt); ibx_hpd_irq_handler(dev, hotplug_trigger, hpd_cpt);
if (pch_iir & SDE_AUDIO_POWER_MASK_CPT) { if (pch_iir & SDE_AUDIO_POWER_MASK_CPT) {
@ -2102,6 +2169,9 @@ static irqreturn_t ironlake_irq_handler(int irq, void *arg)
if (!intel_irqs_enabled(dev_priv)) if (!intel_irqs_enabled(dev_priv))
return IRQ_NONE; return IRQ_NONE;
/* IRQs are synced during runtime_suspend, we don't require a wakeref */
disable_rpm_wakeref_asserts(dev_priv);
/* We get interrupts on unclaimed registers, so check for this before we /* We get interrupts on unclaimed registers, so check for this before we
* do any I915_{READ,WRITE}. */ * do any I915_{READ,WRITE}. */
intel_uncore_check_errors(dev); intel_uncore_check_errors(dev);
@ -2160,6 +2230,9 @@ static irqreturn_t ironlake_irq_handler(int irq, void *arg)
POSTING_READ(SDEIER); POSTING_READ(SDEIER);
} }
/* IRQs are synced during runtime_suspend, we don't require a wakeref */
enable_rpm_wakeref_asserts(dev_priv);
return ret; return ret;
} }
@ -2192,6 +2265,9 @@ static irqreturn_t gen8_irq_handler(int irq, void *arg)
if (!intel_irqs_enabled(dev_priv)) if (!intel_irqs_enabled(dev_priv))
return IRQ_NONE; return IRQ_NONE;
/* IRQs are synced during runtime_suspend, we don't require a wakeref */
disable_rpm_wakeref_asserts(dev_priv);
if (INTEL_INFO(dev_priv)->gen >= 9) if (INTEL_INFO(dev_priv)->gen >= 9)
aux_mask |= GEN9_AUX_CHANNEL_B | GEN9_AUX_CHANNEL_C | aux_mask |= GEN9_AUX_CHANNEL_B | GEN9_AUX_CHANNEL_C |
GEN9_AUX_CHANNEL_D; GEN9_AUX_CHANNEL_D;
@ -2199,7 +2275,7 @@ static irqreturn_t gen8_irq_handler(int irq, void *arg)
master_ctl = I915_READ_FW(GEN8_MASTER_IRQ); master_ctl = I915_READ_FW(GEN8_MASTER_IRQ);
master_ctl &= ~GEN8_MASTER_IRQ_CONTROL; master_ctl &= ~GEN8_MASTER_IRQ_CONTROL;
if (!master_ctl) if (!master_ctl)
return IRQ_NONE; goto out;
I915_WRITE_FW(GEN8_MASTER_IRQ, 0); I915_WRITE_FW(GEN8_MASTER_IRQ, 0);
@ -2334,6 +2410,9 @@ static irqreturn_t gen8_irq_handler(int irq, void *arg)
I915_WRITE_FW(GEN8_MASTER_IRQ, GEN8_MASTER_IRQ_CONTROL); I915_WRITE_FW(GEN8_MASTER_IRQ, GEN8_MASTER_IRQ_CONTROL);
POSTING_READ_FW(GEN8_MASTER_IRQ); POSTING_READ_FW(GEN8_MASTER_IRQ);
out:
enable_rpm_wakeref_asserts(dev_priv);
return ret; return ret;
} }
@ -2393,6 +2472,13 @@ static void i915_reset_and_wakeup(struct drm_device *dev)
*/ */
if (i915_reset_in_progress(error) && !i915_terminally_wedged(error)) { if (i915_reset_in_progress(error) && !i915_terminally_wedged(error)) {
DRM_DEBUG_DRIVER("resetting chip\n"); DRM_DEBUG_DRIVER("resetting chip\n");
/*
* In most cases it's guaranteed that we get here with an RPM
* reference held, for example because there is a pending GPU
* request that won't finish until the reset is done. This
* isn't the case at least when we get here by doing a
* simulated reset via debugs, so get an RPM reference.
*/
intel_runtime_pm_get(dev_priv); intel_runtime_pm_get(dev_priv);
/* /*
@ -2600,7 +2686,7 @@ static int ironlake_enable_vblank(struct drm_device *dev, unsigned int pipe)
DE_PIPE_VBLANK(pipe); DE_PIPE_VBLANK(pipe);
spin_lock_irqsave(&dev_priv->irq_lock, irqflags); spin_lock_irqsave(&dev_priv->irq_lock, irqflags);
ironlake_enable_display_irq(dev_priv, bit); ilk_enable_display_irq(dev_priv, bit);
spin_unlock_irqrestore(&dev_priv->irq_lock, irqflags); spin_unlock_irqrestore(&dev_priv->irq_lock, irqflags);
return 0; return 0;
@ -2625,10 +2711,9 @@ static int gen8_enable_vblank(struct drm_device *dev, unsigned int pipe)
unsigned long irqflags; unsigned long irqflags;
spin_lock_irqsave(&dev_priv->irq_lock, irqflags); spin_lock_irqsave(&dev_priv->irq_lock, irqflags);
dev_priv->de_irq_mask[pipe] &= ~GEN8_PIPE_VBLANK; bdw_enable_pipe_irq(dev_priv, pipe, GEN8_PIPE_VBLANK);
I915_WRITE(GEN8_DE_PIPE_IMR(pipe), dev_priv->de_irq_mask[pipe]);
POSTING_READ(GEN8_DE_PIPE_IMR(pipe));
spin_unlock_irqrestore(&dev_priv->irq_lock, irqflags); spin_unlock_irqrestore(&dev_priv->irq_lock, irqflags);
return 0; return 0;
} }
@ -2655,7 +2740,7 @@ static void ironlake_disable_vblank(struct drm_device *dev, unsigned int pipe)
DE_PIPE_VBLANK(pipe); DE_PIPE_VBLANK(pipe);
spin_lock_irqsave(&dev_priv->irq_lock, irqflags); spin_lock_irqsave(&dev_priv->irq_lock, irqflags);
ironlake_disable_display_irq(dev_priv, bit); ilk_disable_display_irq(dev_priv, bit);
spin_unlock_irqrestore(&dev_priv->irq_lock, irqflags); spin_unlock_irqrestore(&dev_priv->irq_lock, irqflags);
} }
@ -2676,9 +2761,7 @@ static void gen8_disable_vblank(struct drm_device *dev, unsigned int pipe)
unsigned long irqflags; unsigned long irqflags;
spin_lock_irqsave(&dev_priv->irq_lock, irqflags); spin_lock_irqsave(&dev_priv->irq_lock, irqflags);
dev_priv->de_irq_mask[pipe] |= GEN8_PIPE_VBLANK; bdw_disable_pipe_irq(dev_priv, pipe, GEN8_PIPE_VBLANK);
I915_WRITE(GEN8_DE_PIPE_IMR(pipe), dev_priv->de_irq_mask[pipe]);
POSTING_READ(GEN8_DE_PIPE_IMR(pipe));
spin_unlock_irqrestore(&dev_priv->irq_lock, irqflags); spin_unlock_irqrestore(&dev_priv->irq_lock, irqflags);
} }
@ -2917,6 +3000,13 @@ static void i915_hangcheck_elapsed(struct work_struct *work)
if (!i915.enable_hangcheck) if (!i915.enable_hangcheck)
return; return;
/*
* The hangcheck work is synced during runtime suspend, we don't
* require a wakeref. TODO: instead of disabling the asserts make
* sure that we hold a reference when this work is running.
*/
DISABLE_RPM_WAKEREF_ASSERTS(dev_priv);
for_each_ring(ring, dev_priv, i) { for_each_ring(ring, dev_priv, i) {
u64 acthd; u64 acthd;
u32 seqno; u32 seqno;
@ -3387,7 +3477,7 @@ static int ironlake_irq_postinstall(struct drm_device *dev)
* setup is guaranteed to run in single-threaded context. But we * setup is guaranteed to run in single-threaded context. But we
* need it to make the assert_spin_locked happy. */ * need it to make the assert_spin_locked happy. */
spin_lock_irq(&dev_priv->irq_lock); spin_lock_irq(&dev_priv->irq_lock);
ironlake_enable_display_irq(dev_priv, DE_PCU_EVENT); ilk_enable_display_irq(dev_priv, DE_PCU_EVENT);
spin_unlock_irq(&dev_priv->irq_lock); spin_unlock_irq(&dev_priv->irq_lock);
} }
@ -3787,13 +3877,18 @@ static irqreturn_t i8xx_irq_handler(int irq, void *arg)
u16 flip_mask = u16 flip_mask =
I915_DISPLAY_PLANE_A_FLIP_PENDING_INTERRUPT | I915_DISPLAY_PLANE_A_FLIP_PENDING_INTERRUPT |
I915_DISPLAY_PLANE_B_FLIP_PENDING_INTERRUPT; I915_DISPLAY_PLANE_B_FLIP_PENDING_INTERRUPT;
irqreturn_t ret;
if (!intel_irqs_enabled(dev_priv)) if (!intel_irqs_enabled(dev_priv))
return IRQ_NONE; return IRQ_NONE;
/* IRQs are synced during runtime_suspend, we don't require a wakeref */
disable_rpm_wakeref_asserts(dev_priv);
ret = IRQ_NONE;
iir = I915_READ16(IIR); iir = I915_READ16(IIR);
if (iir == 0) if (iir == 0)
return IRQ_NONE; goto out;
while (iir & ~flip_mask) { while (iir & ~flip_mask) {
/* Can't rely on pipestat interrupt bit in iir as it might /* Can't rely on pipestat interrupt bit in iir as it might
@ -3806,7 +3901,7 @@ static irqreturn_t i8xx_irq_handler(int irq, void *arg)
DRM_DEBUG("Command parser error, iir 0x%08x\n", iir); DRM_DEBUG("Command parser error, iir 0x%08x\n", iir);
for_each_pipe(dev_priv, pipe) { for_each_pipe(dev_priv, pipe) {
int reg = PIPESTAT(pipe); i915_reg_t reg = PIPESTAT(pipe);
pipe_stats[pipe] = I915_READ(reg); pipe_stats[pipe] = I915_READ(reg);
/* /*
@ -3842,8 +3937,12 @@ static irqreturn_t i8xx_irq_handler(int irq, void *arg)
iir = new_iir; iir = new_iir;
} }
ret = IRQ_HANDLED;
return IRQ_HANDLED; out:
enable_rpm_wakeref_asserts(dev_priv);
return ret;
} }
static void i8xx_irq_uninstall(struct drm_device * dev) static void i8xx_irq_uninstall(struct drm_device * dev)
@ -3974,6 +4073,9 @@ static irqreturn_t i915_irq_handler(int irq, void *arg)
if (!intel_irqs_enabled(dev_priv)) if (!intel_irqs_enabled(dev_priv))
return IRQ_NONE; return IRQ_NONE;
/* IRQs are synced during runtime_suspend, we don't require a wakeref */
disable_rpm_wakeref_asserts(dev_priv);
iir = I915_READ(IIR); iir = I915_READ(IIR);
do { do {
bool irq_received = (iir & ~flip_mask) != 0; bool irq_received = (iir & ~flip_mask) != 0;
@ -3989,7 +4091,7 @@ static irqreturn_t i915_irq_handler(int irq, void *arg)
DRM_DEBUG("Command parser error, iir 0x%08x\n", iir); DRM_DEBUG("Command parser error, iir 0x%08x\n", iir);
for_each_pipe(dev_priv, pipe) { for_each_pipe(dev_priv, pipe) {
int reg = PIPESTAT(pipe); i915_reg_t reg = PIPESTAT(pipe);
pipe_stats[pipe] = I915_READ(reg); pipe_stats[pipe] = I915_READ(reg);
/* Clear the PIPE*STAT regs before the IIR */ /* Clear the PIPE*STAT regs before the IIR */
@ -4056,6 +4158,8 @@ static irqreturn_t i915_irq_handler(int irq, void *arg)
iir = new_iir; iir = new_iir;
} while (iir & ~flip_mask); } while (iir & ~flip_mask);
enable_rpm_wakeref_asserts(dev_priv);
return ret; return ret;
} }
@ -4195,6 +4299,9 @@ static irqreturn_t i965_irq_handler(int irq, void *arg)
if (!intel_irqs_enabled(dev_priv)) if (!intel_irqs_enabled(dev_priv))
return IRQ_NONE; return IRQ_NONE;
/* IRQs are synced during runtime_suspend, we don't require a wakeref */
disable_rpm_wakeref_asserts(dev_priv);
iir = I915_READ(IIR); iir = I915_READ(IIR);
for (;;) { for (;;) {
@ -4211,7 +4318,7 @@ static irqreturn_t i965_irq_handler(int irq, void *arg)
DRM_DEBUG("Command parser error, iir 0x%08x\n", iir); DRM_DEBUG("Command parser error, iir 0x%08x\n", iir);
for_each_pipe(dev_priv, pipe) { for_each_pipe(dev_priv, pipe) {
int reg = PIPESTAT(pipe); i915_reg_t reg = PIPESTAT(pipe);
pipe_stats[pipe] = I915_READ(reg); pipe_stats[pipe] = I915_READ(reg);
/* /*
@ -4280,6 +4387,8 @@ static irqreturn_t i965_irq_handler(int irq, void *arg)
iir = new_iir; iir = new_iir;
} }
enable_rpm_wakeref_asserts(dev_priv);
return ret; return ret;
} }
@ -4323,9 +4432,9 @@ void intel_irq_init(struct drm_i915_private *dev_priv)
INIT_WORK(&dev_priv->l3_parity.error_work, ivybridge_parity_work); INIT_WORK(&dev_priv->l3_parity.error_work, ivybridge_parity_work);
/* Let's track the enabled rps events */ /* Let's track the enabled rps events */
if (IS_VALLEYVIEW(dev_priv) && !IS_CHERRYVIEW(dev_priv)) if (IS_VALLEYVIEW(dev_priv))
/* WaGsvRC0ResidencyMethod:vlv */ /* WaGsvRC0ResidencyMethod:vlv */
dev_priv->pm_rps_events = GEN6_PM_RP_UP_EI_EXPIRED; dev_priv->pm_rps_events = GEN6_PM_RP_DOWN_EI_EXPIRED | GEN6_PM_RP_UP_EI_EXPIRED;
else else
dev_priv->pm_rps_events = GEN6_PM_RPS_EVENTS; dev_priv->pm_rps_events = GEN6_PM_RPS_EVENTS;

View File

@ -31,7 +31,8 @@ struct i915_params i915 __read_mostly = {
.lvds_channel_mode = 0, .lvds_channel_mode = 0,
.panel_use_ssc = -1, .panel_use_ssc = -1,
.vbt_sdvo_panel_type = -1, .vbt_sdvo_panel_type = -1,
.enable_rc6 = 0, .enable_rc6 = 0,
.enable_dc = -1,
.enable_fbc = -1, .enable_fbc = -1,
.enable_execlists = -1, .enable_execlists = -1,
.enable_hangcheck = true, .enable_hangcheck = true,
@ -82,6 +83,11 @@ MODULE_PARM_DESC(enable_rc6,
"For example, 3 would enable rc6 and deep rc6, and 7 would enable everything. " "For example, 3 would enable rc6 and deep rc6, and 7 would enable everything. "
"default: -1 (use per-chip default)"); "default: -1 (use per-chip default)");
module_param_named_unsafe(enable_dc, i915.enable_dc, int, 0400);
MODULE_PARM_DESC(enable_dc,
"Enable power-saving display C-states. "
"(-1=auto [default]; 0=disable; 1=up to DC5; 2=up to DC6)");
module_param_named_unsafe(enable_fbc, i915.enable_fbc, int, 0600); module_param_named_unsafe(enable_fbc, i915.enable_fbc, int, 0600);
MODULE_PARM_DESC(enable_fbc, MODULE_PARM_DESC(enable_fbc,
"Enable frame buffer compression for power savings " "Enable frame buffer compression for power savings "
@ -114,7 +120,7 @@ MODULE_PARM_DESC(enable_hangcheck,
module_param_named_unsafe(enable_ppgtt, i915.enable_ppgtt, int, 0400); module_param_named_unsafe(enable_ppgtt, i915.enable_ppgtt, int, 0400);
MODULE_PARM_DESC(enable_ppgtt, MODULE_PARM_DESC(enable_ppgtt,
"Override PPGTT usage. " "Override PPGTT usage. "
"(-1=auto [default], 0=disabled, 1=aliasing, 2=full)"); "(-1=auto [default], 0=disabled, 1=aliasing, 2=full, 3=full with extended address space)");
module_param_named_unsafe(enable_execlists, i915.enable_execlists, int, 0400); module_param_named_unsafe(enable_execlists, i915.enable_execlists, int, 0400);
MODULE_PARM_DESC(enable_execlists, MODULE_PARM_DESC(enable_execlists,
@ -128,7 +134,7 @@ module_param_named_unsafe(preliminary_hw_support, i915.preliminary_hw_support, i
MODULE_PARM_DESC(preliminary_hw_support, MODULE_PARM_DESC(preliminary_hw_support,
"Enable preliminary hardware support."); "Enable preliminary hardware support.");
module_param_named_unsafe(disable_power_well, i915.disable_power_well, int, 0600); module_param_named_unsafe(disable_power_well, i915.disable_power_well, int, 0400);
MODULE_PARM_DESC(disable_power_well, MODULE_PARM_DESC(disable_power_well,
"Disable display power wells when possible " "Disable display power wells when possible "
"(-1=auto [default], 0=power wells always on, 1=power wells disabled when possible)"); "(-1=auto [default], 0=power wells always on, 1=power wells disabled when possible)");

File diff suppressed because it is too large Load Diff

View File

@ -69,13 +69,13 @@ void i915_check_vgpu(struct drm_device *dev)
if (!IS_HASWELL(dev)) if (!IS_HASWELL(dev))
return; return;
magic = readq(dev_priv->regs + vgtif_reg(magic)); magic = __raw_i915_read64(dev_priv, vgtif_reg(magic));
if (magic != VGT_MAGIC) if (magic != VGT_MAGIC)
return; return;
version = INTEL_VGT_IF_VERSION_ENCODE( version = INTEL_VGT_IF_VERSION_ENCODE(
readw(dev_priv->regs + vgtif_reg(version_major)), __raw_i915_read16(dev_priv, vgtif_reg(version_major)),
readw(dev_priv->regs + vgtif_reg(version_minor))); __raw_i915_read16(dev_priv, vgtif_reg(version_minor)));
if (version != INTEL_VGT_IF_VERSION) { if (version != INTEL_VGT_IF_VERSION) {
DRM_INFO("VGT interface version mismatch!\n"); DRM_INFO("VGT interface version mismatch!\n");
return; return;

View File

@ -92,14 +92,10 @@ struct vgt_if {
uint32_t g2v_notify; uint32_t g2v_notify;
uint32_t rsv6[7]; uint32_t rsv6[7];
uint32_t pdp0_lo; struct {
uint32_t pdp0_hi; uint32_t lo;
uint32_t pdp1_lo; uint32_t hi;
uint32_t pdp1_hi; } pdp[4];
uint32_t pdp2_lo;
uint32_t pdp2_hi;
uint32_t pdp3_lo;
uint32_t pdp3_hi;
uint32_t execlist_context_descriptor_lo; uint32_t execlist_context_descriptor_lo;
uint32_t execlist_context_descriptor_hi; uint32_t execlist_context_descriptor_hi;
@ -108,7 +104,7 @@ struct vgt_if {
} __packed; } __packed;
#define vgtif_reg(x) \ #define vgtif_reg(x) \
(VGT_PVINFO_PAGE + (long)&((struct vgt_if *)NULL)->x) _MMIO((VGT_PVINFO_PAGE + (long)&((struct vgt_if *)NULL)->x))
/* vGPU display status to be used by the host side */ /* vGPU display status to be used by the host side */
#define VGT_DRV_DISPLAY_NOT_READY 0 #define VGT_DRV_DISPLAY_NOT_READY 0

View File

@ -94,6 +94,10 @@ intel_crtc_duplicate_state(struct drm_crtc *crtc)
__drm_atomic_helper_crtc_duplicate_state(crtc, &crtc_state->base); __drm_atomic_helper_crtc_duplicate_state(crtc, &crtc_state->base);
crtc_state->update_pipe = false; crtc_state->update_pipe = false;
crtc_state->disable_lp_wm = false;
crtc_state->disable_cxsr = false;
crtc_state->update_wm_pre = false;
crtc_state->update_wm_post = false;
return &crtc_state->base; return &crtc_state->base;
} }
@ -205,8 +209,6 @@ int intel_atomic_setup_scalers(struct drm_device *dev,
* but since this plane is unchanged just do the * but since this plane is unchanged just do the
* minimum required validation. * minimum required validation.
*/ */
if (plane->type == DRM_PLANE_TYPE_PRIMARY)
intel_crtc->atomic.wait_for_flips = true;
crtc_state->base.planes_changed = true; crtc_state->base.planes_changed = true;
} }

View File

@ -84,6 +84,7 @@ intel_plane_duplicate_state(struct drm_plane *plane)
state = &intel_state->base; state = &intel_state->base;
__drm_atomic_helper_plane_duplicate_state(plane, state); __drm_atomic_helper_plane_duplicate_state(plane, state);
intel_state->wait_req = NULL;
return state; return state;
} }
@ -100,6 +101,7 @@ void
intel_plane_destroy_state(struct drm_plane *plane, intel_plane_destroy_state(struct drm_plane *plane,
struct drm_plane_state *state) struct drm_plane_state *state)
{ {
WARN_ON(state && to_intel_plane_state(state)->wait_req);
drm_atomic_helper_plane_destroy_state(plane, state); drm_atomic_helper_plane_destroy_state(plane, state);
} }

View File

@ -161,9 +161,9 @@ static bool audio_rate_need_prog(struct intel_crtc *crtc,
} }
static bool intel_eld_uptodate(struct drm_connector *connector, static bool intel_eld_uptodate(struct drm_connector *connector,
int reg_eldv, uint32_t bits_eldv, i915_reg_t reg_eldv, uint32_t bits_eldv,
int reg_elda, uint32_t bits_elda, i915_reg_t reg_elda, uint32_t bits_elda,
int reg_edid) i915_reg_t reg_edid)
{ {
struct drm_i915_private *dev_priv = connector->dev->dev_private; struct drm_i915_private *dev_priv = connector->dev->dev_private;
uint8_t *eld = connector->eld; uint8_t *eld = connector->eld;
@ -364,8 +364,7 @@ static void ilk_audio_codec_disable(struct intel_encoder *encoder)
enum port port = intel_dig_port->port; enum port port = intel_dig_port->port;
enum pipe pipe = intel_crtc->pipe; enum pipe pipe = intel_crtc->pipe;
uint32_t tmp, eldv; uint32_t tmp, eldv;
int aud_config; i915_reg_t aud_config, aud_cntrl_st2;
int aud_cntrl_st2;
DRM_DEBUG_KMS("Disable audio codec on port %c, pipe %c\n", DRM_DEBUG_KMS("Disable audio codec on port %c, pipe %c\n",
port_name(port), pipe_name(pipe)); port_name(port), pipe_name(pipe));
@ -376,7 +375,7 @@ static void ilk_audio_codec_disable(struct intel_encoder *encoder)
if (HAS_PCH_IBX(dev_priv->dev)) { if (HAS_PCH_IBX(dev_priv->dev)) {
aud_config = IBX_AUD_CFG(pipe); aud_config = IBX_AUD_CFG(pipe);
aud_cntrl_st2 = IBX_AUD_CNTL_ST2; aud_cntrl_st2 = IBX_AUD_CNTL_ST2;
} else if (IS_VALLEYVIEW(dev_priv)) { } else if (IS_VALLEYVIEW(dev_priv) || IS_CHERRYVIEW(dev_priv)) {
aud_config = VLV_AUD_CFG(pipe); aud_config = VLV_AUD_CFG(pipe);
aud_cntrl_st2 = VLV_AUD_CNTL_ST2; aud_cntrl_st2 = VLV_AUD_CNTL_ST2;
} else { } else {
@ -416,10 +415,7 @@ static void ilk_audio_codec_enable(struct drm_connector *connector,
uint32_t eldv; uint32_t eldv;
uint32_t tmp; uint32_t tmp;
int len, i; int len, i;
int hdmiw_hdmiedid; i915_reg_t hdmiw_hdmiedid, aud_config, aud_cntl_st, aud_cntrl_st2;
int aud_config;
int aud_cntl_st;
int aud_cntrl_st2;
DRM_DEBUG_KMS("Enable audio codec on port %c, pipe %c, %u bytes ELD\n", DRM_DEBUG_KMS("Enable audio codec on port %c, pipe %c, %u bytes ELD\n",
port_name(port), pipe_name(pipe), drm_eld_size(eld)); port_name(port), pipe_name(pipe), drm_eld_size(eld));
@ -439,7 +435,8 @@ static void ilk_audio_codec_enable(struct drm_connector *connector,
aud_config = IBX_AUD_CFG(pipe); aud_config = IBX_AUD_CFG(pipe);
aud_cntl_st = IBX_AUD_CNTL_ST(pipe); aud_cntl_st = IBX_AUD_CNTL_ST(pipe);
aud_cntrl_st2 = IBX_AUD_CNTL_ST2; aud_cntrl_st2 = IBX_AUD_CNTL_ST2;
} else if (IS_VALLEYVIEW(connector->dev)) { } else if (IS_VALLEYVIEW(connector->dev) ||
IS_CHERRYVIEW(connector->dev)) {
hdmiw_hdmiedid = VLV_HDMIW_HDMIEDID(pipe); hdmiw_hdmiedid = VLV_HDMIW_HDMIEDID(pipe);
aud_config = VLV_AUD_CFG(pipe); aud_config = VLV_AUD_CFG(pipe);
aud_cntl_st = VLV_AUD_CNTL_ST(pipe); aud_cntl_st = VLV_AUD_CNTL_ST(pipe);
@ -525,6 +522,12 @@ void intel_audio_codec_enable(struct intel_encoder *intel_encoder)
dev_priv->display.audio_codec_enable(connector, intel_encoder, dev_priv->display.audio_codec_enable(connector, intel_encoder,
adjusted_mode); adjusted_mode);
mutex_lock(&dev_priv->av_mutex);
intel_dig_port->audio_connector = connector;
/* referred in audio callbacks */
dev_priv->dig_port_map[port] = intel_encoder;
mutex_unlock(&dev_priv->av_mutex);
if (acomp && acomp->audio_ops && acomp->audio_ops->pin_eld_notify) if (acomp && acomp->audio_ops && acomp->audio_ops->pin_eld_notify)
acomp->audio_ops->pin_eld_notify(acomp->audio_ops->audio_ptr, (int) port); acomp->audio_ops->pin_eld_notify(acomp->audio_ops->audio_ptr, (int) port);
} }
@ -548,6 +551,11 @@ void intel_audio_codec_disable(struct intel_encoder *intel_encoder)
if (dev_priv->display.audio_codec_disable) if (dev_priv->display.audio_codec_disable)
dev_priv->display.audio_codec_disable(intel_encoder); dev_priv->display.audio_codec_disable(intel_encoder);
mutex_lock(&dev_priv->av_mutex);
intel_dig_port->audio_connector = NULL;
dev_priv->dig_port_map[port] = NULL;
mutex_unlock(&dev_priv->av_mutex);
if (acomp && acomp->audio_ops && acomp->audio_ops->pin_eld_notify) if (acomp && acomp->audio_ops && acomp->audio_ops->pin_eld_notify)
acomp->audio_ops->pin_eld_notify(acomp->audio_ops->audio_ptr, (int) port); acomp->audio_ops->pin_eld_notify(acomp->audio_ops->audio_ptr, (int) port);
} }
@ -591,7 +599,7 @@ static void i915_audio_component_codec_wake_override(struct device *dev,
struct drm_i915_private *dev_priv = dev_to_i915(dev); struct drm_i915_private *dev_priv = dev_to_i915(dev);
u32 tmp; u32 tmp;
if (!IS_SKYLAKE(dev_priv)) if (!IS_SKYLAKE(dev_priv) && !IS_KABYLAKE(dev_priv))
return; return;
/* /*
@ -632,44 +640,40 @@ static int i915_audio_component_sync_audio_rate(struct device *dev,
int port, int rate) int port, int rate)
{ {
struct drm_i915_private *dev_priv = dev_to_i915(dev); struct drm_i915_private *dev_priv = dev_to_i915(dev);
struct drm_device *drm_dev = dev_priv->dev;
struct intel_encoder *intel_encoder; struct intel_encoder *intel_encoder;
struct intel_digital_port *intel_dig_port;
struct intel_crtc *crtc; struct intel_crtc *crtc;
struct drm_display_mode *mode; struct drm_display_mode *mode;
struct i915_audio_component *acomp = dev_priv->audio_component; struct i915_audio_component *acomp = dev_priv->audio_component;
enum pipe pipe = -1; enum pipe pipe = INVALID_PIPE;
u32 tmp; u32 tmp;
int n; int n;
int err = 0;
/* HSW, BDW SKL need this fix */ /* HSW, BDW, SKL, KBL need this fix */
if (!IS_SKYLAKE(dev_priv) && if (!IS_SKYLAKE(dev_priv) &&
!IS_KABYLAKE(dev_priv) &&
!IS_BROADWELL(dev_priv) && !IS_BROADWELL(dev_priv) &&
!IS_HASWELL(dev_priv)) !IS_HASWELL(dev_priv))
return 0; return 0;
mutex_lock(&dev_priv->av_mutex); mutex_lock(&dev_priv->av_mutex);
/* 1. get the pipe */ /* 1. get the pipe */
for_each_intel_encoder(drm_dev, intel_encoder) { intel_encoder = dev_priv->dig_port_map[port];
if (intel_encoder->type != INTEL_OUTPUT_HDMI) /* intel_encoder might be NULL for DP MST */
continue; if (!intel_encoder || !intel_encoder->base.crtc ||
intel_dig_port = enc_to_dig_port(&intel_encoder->base); intel_encoder->type != INTEL_OUTPUT_HDMI) {
if (port == intel_dig_port->port) { DRM_DEBUG_KMS("no valid port %c\n", port_name(port));
crtc = to_intel_crtc(intel_encoder->base.crtc); err = -ENODEV;
if (!crtc) { goto unlock;
DRM_DEBUG_KMS("%s: crtc is NULL\n", __func__);
continue;
}
pipe = crtc->pipe;
break;
}
} }
crtc = to_intel_crtc(intel_encoder->base.crtc);
pipe = crtc->pipe;
if (pipe == INVALID_PIPE) { if (pipe == INVALID_PIPE) {
DRM_DEBUG_KMS("no pipe for the port %c\n", port_name(port)); DRM_DEBUG_KMS("no pipe for the port %c\n", port_name(port));
mutex_unlock(&dev_priv->av_mutex); err = -ENODEV;
return -ENODEV; goto unlock;
} }
DRM_DEBUG_KMS("pipe %c connects port %c\n", DRM_DEBUG_KMS("pipe %c connects port %c\n",
pipe_name(pipe), port_name(port)); pipe_name(pipe), port_name(port));
mode = &crtc->config->base.adjusted_mode; mode = &crtc->config->base.adjusted_mode;
@ -682,8 +686,7 @@ static int i915_audio_component_sync_audio_rate(struct device *dev,
tmp = I915_READ(HSW_AUD_CFG(pipe)); tmp = I915_READ(HSW_AUD_CFG(pipe));
tmp &= ~AUD_CONFIG_N_PROG_ENABLE; tmp &= ~AUD_CONFIG_N_PROG_ENABLE;
I915_WRITE(HSW_AUD_CFG(pipe), tmp); I915_WRITE(HSW_AUD_CFG(pipe), tmp);
mutex_unlock(&dev_priv->av_mutex); goto unlock;
return 0;
} }
n = audio_config_get_n(mode, rate); n = audio_config_get_n(mode, rate);
@ -693,8 +696,7 @@ static int i915_audio_component_sync_audio_rate(struct device *dev,
tmp = I915_READ(HSW_AUD_CFG(pipe)); tmp = I915_READ(HSW_AUD_CFG(pipe));
tmp &= ~AUD_CONFIG_N_PROG_ENABLE; tmp &= ~AUD_CONFIG_N_PROG_ENABLE;
I915_WRITE(HSW_AUD_CFG(pipe), tmp); I915_WRITE(HSW_AUD_CFG(pipe), tmp);
mutex_unlock(&dev_priv->av_mutex); goto unlock;
return 0;
} }
/* 3. set the N/CTS/M */ /* 3. set the N/CTS/M */
@ -702,8 +704,37 @@ static int i915_audio_component_sync_audio_rate(struct device *dev,
tmp = audio_config_setup_n_reg(n, tmp); tmp = audio_config_setup_n_reg(n, tmp);
I915_WRITE(HSW_AUD_CFG(pipe), tmp); I915_WRITE(HSW_AUD_CFG(pipe), tmp);
unlock:
mutex_unlock(&dev_priv->av_mutex); mutex_unlock(&dev_priv->av_mutex);
return 0; return err;
}
static int i915_audio_component_get_eld(struct device *dev, int port,
bool *enabled,
unsigned char *buf, int max_bytes)
{
struct drm_i915_private *dev_priv = dev_to_i915(dev);
struct intel_encoder *intel_encoder;
struct intel_digital_port *intel_dig_port;
const u8 *eld;
int ret = -EINVAL;
mutex_lock(&dev_priv->av_mutex);
intel_encoder = dev_priv->dig_port_map[port];
/* intel_encoder might be NULL for DP MST */
if (intel_encoder) {
ret = 0;
intel_dig_port = enc_to_dig_port(&intel_encoder->base);
*enabled = intel_dig_port->audio_connector != NULL;
if (*enabled) {
eld = intel_dig_port->audio_connector->eld;
ret = drm_eld_size(eld);
memcpy(buf, eld, min(max_bytes, ret));
}
}
mutex_unlock(&dev_priv->av_mutex);
return ret;
} }
static const struct i915_audio_component_ops i915_audio_component_ops = { static const struct i915_audio_component_ops i915_audio_component_ops = {
@ -713,6 +744,7 @@ static const struct i915_audio_component_ops i915_audio_component_ops = {
.codec_wake_override = i915_audio_component_codec_wake_override, .codec_wake_override = i915_audio_component_codec_wake_override,
.get_cdclk_freq = i915_audio_component_get_cdclk_freq, .get_cdclk_freq = i915_audio_component_get_cdclk_freq,
.sync_audio_rate = i915_audio_component_sync_audio_rate, .sync_audio_rate = i915_audio_component_sync_audio_rate,
.get_eld = i915_audio_component_get_eld,
}; };
static int i915_audio_component_bind(struct device *i915_dev, static int i915_audio_component_bind(struct device *i915_dev,
@ -773,6 +805,16 @@ static const struct component_ops i915_audio_component_bind_ops = {
*/ */
void i915_audio_component_init(struct drm_i915_private *dev_priv) void i915_audio_component_init(struct drm_i915_private *dev_priv)
{ {
int ret;
// ret = component_add(dev_priv->dev->dev, &i915_audio_component_bind_ops);
// if (ret < 0) {
// DRM_ERROR("failed to add audio component (%d)\n", ret);
// /* continue with reduced functionality */
// return;
// }
dev_priv->audio_component_registered = true;
} }
/** /**
@ -784,4 +826,9 @@ void i915_audio_component_init(struct drm_i915_private *dev_priv)
*/ */
void i915_audio_component_cleanup(struct drm_i915_private *dev_priv) void i915_audio_component_cleanup(struct drm_i915_private *dev_priv)
{ {
if (!dev_priv->audio_component_registered)
return;
// component_del(dev_priv->dev->dev, &i915_audio_component_bind_ops);
dev_priv->audio_component_registered = false;
} }

View File

@ -24,7 +24,7 @@
* Eric Anholt <eric@anholt.net> * Eric Anholt <eric@anholt.net>
* *
*/ */
#include <linux/dmi.h>
#include <drm/drm_dp_helper.h> #include <drm/drm_dp_helper.h>
#include <drm/drmP.h> #include <drm/drmP.h>
#include <drm/i915_drm.h> #include <drm/i915_drm.h>
@ -332,10 +332,10 @@ parse_sdvo_panel_data(struct drm_i915_private *dev_priv,
drm_mode_debug_printmodeline(panel_fixed_mode); drm_mode_debug_printmodeline(panel_fixed_mode);
} }
static int intel_bios_ssc_frequency(struct drm_device *dev, static int intel_bios_ssc_frequency(struct drm_i915_private *dev_priv,
bool alternate) bool alternate)
{ {
switch (INTEL_INFO(dev)->gen) { switch (INTEL_INFO(dev_priv)->gen) {
case 2: case 2:
return alternate ? 66667 : 48000; return alternate ? 66667 : 48000;
case 3: case 3:
@ -350,16 +350,20 @@ static void
parse_general_features(struct drm_i915_private *dev_priv, parse_general_features(struct drm_i915_private *dev_priv,
const struct bdb_header *bdb) const struct bdb_header *bdb)
{ {
struct drm_device *dev = dev_priv->dev;
const struct bdb_general_features *general; const struct bdb_general_features *general;
general = find_section(bdb, BDB_GENERAL_FEATURES); general = find_section(bdb, BDB_GENERAL_FEATURES);
if (general) { if (!general)
return;
dev_priv->vbt.int_tv_support = general->int_tv_support; dev_priv->vbt.int_tv_support = general->int_tv_support;
/* int_crt_support can't be trusted on earlier platforms */
if (bdb->version >= 155 &&
(HAS_DDI(dev_priv) || IS_VALLEYVIEW(dev_priv)))
dev_priv->vbt.int_crt_support = general->int_crt_support; dev_priv->vbt.int_crt_support = general->int_crt_support;
dev_priv->vbt.lvds_use_ssc = general->enable_ssc; dev_priv->vbt.lvds_use_ssc = general->enable_ssc;
dev_priv->vbt.lvds_ssc_freq = dev_priv->vbt.lvds_ssc_freq =
intel_bios_ssc_frequency(dev, general->ssc_freq); intel_bios_ssc_frequency(dev_priv, general->ssc_freq);
dev_priv->vbt.display_clock_mode = general->display_clock_mode; dev_priv->vbt.display_clock_mode = general->display_clock_mode;
dev_priv->vbt.fdi_rx_polarity_inverted = general->fdi_rx_polarity_inverted; dev_priv->vbt.fdi_rx_polarity_inverted = general->fdi_rx_polarity_inverted;
DRM_DEBUG_KMS("BDB_GENERAL_FEATURES int_tv_support %d int_crt_support %d lvds_use_ssc %d lvds_ssc_freq %d display_clock_mode %d fdi_rx_polarity_inverted %d\n", DRM_DEBUG_KMS("BDB_GENERAL_FEATURES int_tv_support %d int_crt_support %d lvds_use_ssc %d lvds_ssc_freq %d display_clock_mode %d fdi_rx_polarity_inverted %d\n",
@ -370,7 +374,6 @@ parse_general_features(struct drm_i915_private *dev_priv,
dev_priv->vbt.display_clock_mode, dev_priv->vbt.display_clock_mode,
dev_priv->vbt.fdi_rx_polarity_inverted); dev_priv->vbt.fdi_rx_polarity_inverted);
} }
}
static void static void
parse_general_definitions(struct drm_i915_private *dev_priv, parse_general_definitions(struct drm_i915_private *dev_priv,
@ -1054,10 +1057,9 @@ static void parse_ddi_port(struct drm_i915_private *dev_priv, enum port port,
static void parse_ddi_ports(struct drm_i915_private *dev_priv, static void parse_ddi_ports(struct drm_i915_private *dev_priv,
const struct bdb_header *bdb) const struct bdb_header *bdb)
{ {
struct drm_device *dev = dev_priv->dev;
enum port port; enum port port;
if (!HAS_DDI(dev)) if (!HAS_DDI(dev_priv))
return; return;
if (!dev_priv->vbt.child_dev_num) if (!dev_priv->vbt.child_dev_num)
@ -1170,7 +1172,6 @@ parse_device_mapping(struct drm_i915_private *dev_priv,
static void static void
init_vbt_defaults(struct drm_i915_private *dev_priv) init_vbt_defaults(struct drm_i915_private *dev_priv)
{ {
struct drm_device *dev = dev_priv->dev;
enum port port; enum port port;
dev_priv->vbt.crt_ddc_pin = GMBUS_PIN_VGADDC; dev_priv->vbt.crt_ddc_pin = GMBUS_PIN_VGADDC;
@ -1195,8 +1196,8 @@ init_vbt_defaults(struct drm_i915_private *dev_priv)
* Core/SandyBridge/IvyBridge use alternative (120MHz) reference * Core/SandyBridge/IvyBridge use alternative (120MHz) reference
* clock for LVDS. * clock for LVDS.
*/ */
dev_priv->vbt.lvds_ssc_freq = intel_bios_ssc_frequency(dev, dev_priv->vbt.lvds_ssc_freq = intel_bios_ssc_frequency(dev_priv,
!HAS_PCH_SPLIT(dev)); !HAS_PCH_SPLIT(dev_priv));
DRM_DEBUG_KMS("Set default to SSC at %d kHz\n", dev_priv->vbt.lvds_ssc_freq); DRM_DEBUG_KMS("Set default to SSC at %d kHz\n", dev_priv->vbt.lvds_ssc_freq);
for (port = PORT_A; port < I915_MAX_PORTS; port++) { for (port = PORT_A; port < I915_MAX_PORTS; port++) {
@ -1211,88 +1212,79 @@ init_vbt_defaults(struct drm_i915_private *dev_priv)
} }
} }
static int intel_no_opregion_vbt_callback(const struct dmi_system_id *id) static const struct bdb_header *get_bdb_header(const struct vbt_header *vbt)
{ {
DRM_DEBUG_KMS("Falling back to manually reading VBT from " const void *_vbt = vbt;
"VBIOS ROM for %s\n",
id->ident); return _vbt + vbt->bdb_offset;
return 1;
} }
static const struct dmi_system_id intel_no_opregion_vbt[] = { /**
* intel_bios_is_valid_vbt - does the given buffer contain a valid VBT
* @buf: pointer to a buffer to validate
* @size: size of the buffer
*
* Returns true on valid VBT.
*/
bool intel_bios_is_valid_vbt(const void *buf, size_t size)
{ {
.callback = intel_no_opregion_vbt_callback, const struct vbt_header *vbt = buf;
.ident = "ThinkCentre A57",
.matches = {
DMI_MATCH(DMI_SYS_VENDOR, "LENOVO"),
DMI_MATCH(DMI_PRODUCT_NAME, "97027RG"),
},
},
{ }
};
static const struct bdb_header *validate_vbt(const void *base,
size_t size,
const void *_vbt,
const char *source)
{
size_t offset = _vbt - base;
const struct vbt_header *vbt = _vbt;
const struct bdb_header *bdb; const struct bdb_header *bdb;
if (offset + sizeof(struct vbt_header) > size) { if (!vbt)
return false;
if (sizeof(struct vbt_header) > size) {
DRM_DEBUG_DRIVER("VBT header incomplete\n"); DRM_DEBUG_DRIVER("VBT header incomplete\n");
return NULL; return false;
} }
if (memcmp(vbt->signature, "$VBT", 4)) { if (memcmp(vbt->signature, "$VBT", 4)) {
DRM_DEBUG_DRIVER("VBT invalid signature\n"); DRM_DEBUG_DRIVER("VBT invalid signature\n");
return NULL; return false;
} }
offset += vbt->bdb_offset; if (vbt->bdb_offset + sizeof(struct bdb_header) > size) {
if (offset + sizeof(struct bdb_header) > size) {
DRM_DEBUG_DRIVER("BDB header incomplete\n"); DRM_DEBUG_DRIVER("BDB header incomplete\n");
return NULL; return false;
} }
bdb = base + offset; bdb = get_bdb_header(vbt);
if (offset + bdb->bdb_size > size) { if (vbt->bdb_offset + bdb->bdb_size > size) {
DRM_DEBUG_DRIVER("BDB incomplete\n"); DRM_DEBUG_DRIVER("BDB incomplete\n");
return NULL; return false;
} }
DRM_DEBUG_KMS("Using VBT from %s: %20s\n", return vbt;
source, vbt->signature);
return bdb;
} }
static const struct bdb_header *find_vbt(void __iomem *bios, size_t size) static const struct vbt_header *find_vbt(void __iomem *bios, size_t size)
{ {
const struct bdb_header *bdb = NULL;
size_t i; size_t i;
/* Scour memory looking for the VBT signature. */ /* Scour memory looking for the VBT signature. */
for (i = 0; i + 4 < size; i++) { for (i = 0; i + 4 < size; i++) {
if (ioread32(bios + i) == *((const u32 *) "$VBT")) { void *vbt;
/*
* This is the one place where we explicitly discard the if (ioread32(bios + i) != *((const u32 *) "$VBT"))
* address space (__iomem) of the BIOS/VBT. From now on continue;
* everything is based on 'base', and treated as regular
* memory. /*
*/ * This is the one place where we explicitly discard the address
void *_bios = (void __force *) bios; * space (__iomem) of the BIOS/VBT.
*/
vbt = (void __force *) bios + i;
if (intel_bios_is_valid_vbt(vbt, size - i))
return vbt;
bdb = validate_vbt(_bios, size, _bios + i, "PCI ROM");
break; break;
} }
}
return bdb; return NULL;
} }
/** /**
* intel_parse_bios - find VBT and initialize settings from the BIOS * intel_bios_init - find VBT and initialize settings from the BIOS
* @dev: DRM device * @dev: DRM device
* *
* Loads the Video BIOS and checks that the VBT exists. Sets scratch registers * Loads the Video BIOS and checks that the VBT exists. Sets scratch registers
@ -1301,37 +1293,39 @@ static const struct bdb_header *find_vbt(void __iomem *bios, size_t size)
* Returns 0 on success, nonzero on failure. * Returns 0 on success, nonzero on failure.
*/ */
int int
intel_parse_bios(struct drm_device *dev) intel_bios_init(struct drm_i915_private *dev_priv)
{ {
struct drm_i915_private *dev_priv = dev->dev_private; struct pci_dev *pdev = dev_priv->dev->pdev;
struct pci_dev *pdev = dev->pdev; const struct vbt_header *vbt = dev_priv->opregion.vbt;
const struct bdb_header *bdb = NULL; const struct bdb_header *bdb;
u8 __iomem *bios = NULL; u8 __iomem *bios = NULL;
if (HAS_PCH_NOP(dev)) if (HAS_PCH_NOP(dev_priv))
return -ENODEV; return -ENODEV;
init_vbt_defaults(dev_priv); init_vbt_defaults(dev_priv);
/* XXX Should this validation be moved to intel_opregion.c? */ if (!vbt) {
if (!dmi_check_system(intel_no_opregion_vbt) && dev_priv->opregion.vbt)
bdb = validate_vbt(dev_priv->opregion.header, OPREGION_SIZE,
dev_priv->opregion.vbt, "OpRegion");
if (bdb == NULL) {
size_t size; size_t size;
bios = pci_map_rom(pdev, &size); bios = pci_map_rom(pdev, &size);
if (!bios) if (!bios)
return -1; return -1;
bdb = find_vbt(bios, size); vbt = find_vbt(bios, size);
if (!bdb) { if (!vbt) {
pci_unmap_rom(pdev, bios); pci_unmap_rom(pdev, bios);
return -1; return -1;
} }
DRM_DEBUG_KMS("Found valid VBT in PCI ROM\n");
} }
bdb = get_bdb_header(vbt);
DRM_DEBUG_KMS("VBT signature \"%.*s\", BDB version %d\n",
(int)sizeof(vbt->signature), vbt->signature, bdb->version);
/* Grab useful general definitions */ /* Grab useful general definitions */
parse_general_features(dev_priv, bdb); parse_general_features(dev_priv, bdb);
parse_general_definitions(dev_priv, bdb); parse_general_definitions(dev_priv, bdb);
@ -1351,42 +1345,3 @@ intel_parse_bios(struct drm_device *dev)
return 0; return 0;
} }
/**
* intel_bios_is_port_present - is the specified digital port present
* @dev_priv: i915 device instance
* @port: port to check
*
* Return true if the device in %port is present.
*/
bool intel_bios_is_port_present(struct drm_i915_private *dev_priv, enum port port)
{
static const struct {
u16 dp, hdmi;
} port_mapping[] = {
[PORT_B] = { DVO_PORT_DPB, DVO_PORT_HDMIB, },
[PORT_C] = { DVO_PORT_DPC, DVO_PORT_HDMIC, },
[PORT_D] = { DVO_PORT_DPD, DVO_PORT_HDMID, },
[PORT_E] = { DVO_PORT_DPE, DVO_PORT_HDMIE, },
};
int i;
/* FIXME maybe deal with port A as well? */
if (WARN_ON(port == PORT_A) || port >= ARRAY_SIZE(port_mapping))
return false;
if (!dev_priv->vbt.child_dev_num)
return false;
for (i = 0; i < dev_priv->vbt.child_dev_num; i++) {
const union child_device_config *p_child =
&dev_priv->vbt.child_dev[i];
if ((p_child->common.dvo_port == port_mapping[port].dp ||
p_child->common.dvo_port == port_mapping[port].hdmi) &&
(p_child->common.device_type & (DEVICE_TYPE_TMDS_DVI_SIGNALING |
DEVICE_TYPE_DISPLAYPORT_OUTPUT)))
return true;
}
return false;
}

View File

@ -28,8 +28,6 @@
#ifndef _I830_BIOS_H_ #ifndef _I830_BIOS_H_
#define _I830_BIOS_H_ #define _I830_BIOS_H_
#include <drm/drmP.h>
struct vbt_header { struct vbt_header {
u8 signature[20]; /**< Always starts with 'VBT$' */ u8 signature[20]; /**< Always starts with 'VBT$' */
u16 version; /**< decimal */ u16 version; /**< decimal */
@ -588,8 +586,6 @@ struct bdb_psr {
struct psr_table psr_table[16]; struct psr_table psr_table[16];
} __packed; } __packed;
int intel_parse_bios(struct drm_device *dev);
/* /*
* Driver<->VBIOS interaction occurs through scratch bits in * Driver<->VBIOS interaction occurs through scratch bits in
* GR18 & SWF*. * GR18 & SWF*.

View File

@ -50,7 +50,7 @@ struct intel_crt {
* encoder's enable/disable callbacks */ * encoder's enable/disable callbacks */
struct intel_connector *connector; struct intel_connector *connector;
bool force_hotplug_required; bool force_hotplug_required;
u32 adpa_reg; i915_reg_t adpa_reg;
}; };
static struct intel_crt *intel_encoder_to_crt(struct intel_encoder *encoder) static struct intel_crt *intel_encoder_to_crt(struct intel_encoder *encoder)
@ -71,22 +71,29 @@ static bool intel_crt_get_hw_state(struct intel_encoder *encoder,
struct intel_crt *crt = intel_encoder_to_crt(encoder); struct intel_crt *crt = intel_encoder_to_crt(encoder);
enum intel_display_power_domain power_domain; enum intel_display_power_domain power_domain;
u32 tmp; u32 tmp;
bool ret;
power_domain = intel_display_port_power_domain(encoder); power_domain = intel_display_port_power_domain(encoder);
if (!intel_display_power_is_enabled(dev_priv, power_domain)) if (!intel_display_power_get_if_enabled(dev_priv, power_domain))
return false; return false;
ret = false;
tmp = I915_READ(crt->adpa_reg); tmp = I915_READ(crt->adpa_reg);
if (!(tmp & ADPA_DAC_ENABLE)) if (!(tmp & ADPA_DAC_ENABLE))
return false; goto out;
if (HAS_PCH_CPT(dev)) if (HAS_PCH_CPT(dev))
*pipe = PORT_TO_PIPE_CPT(tmp); *pipe = PORT_TO_PIPE_CPT(tmp);
else else
*pipe = PORT_TO_PIPE(tmp); *pipe = PORT_TO_PIPE(tmp);
return true; ret = true;
out:
intel_display_power_put(dev_priv, power_domain);
return ret;
} }
static unsigned int intel_crt_get_flags(struct intel_encoder *encoder) static unsigned int intel_crt_get_flags(struct intel_encoder *encoder)
@ -445,7 +452,6 @@ static bool intel_crt_detect_ddc(struct drm_connector *connector)
struct drm_i915_private *dev_priv = crt->base.base.dev->dev_private; struct drm_i915_private *dev_priv = crt->base.base.dev->dev_private;
struct edid *edid; struct edid *edid;
struct i2c_adapter *i2c; struct i2c_adapter *i2c;
bool ret = false;
BUG_ON(crt->base.type != INTEL_OUTPUT_ANALOG); BUG_ON(crt->base.type != INTEL_OUTPUT_ANALOG);
@ -462,17 +468,17 @@ static bool intel_crt_detect_ddc(struct drm_connector *connector)
*/ */
if (!is_digital) { if (!is_digital) {
DRM_DEBUG_KMS("CRT detected via DDC:0x50 [EDID]\n"); DRM_DEBUG_KMS("CRT detected via DDC:0x50 [EDID]\n");
ret = true; return true;
} else {
DRM_DEBUG_KMS("CRT not detected via DDC:0x50 [EDID reports a digital panel]\n");
} }
DRM_DEBUG_KMS("CRT not detected via DDC:0x50 [EDID reports a digital panel]\n");
} else { } else {
DRM_DEBUG_KMS("CRT not detected via DDC:0x50 [no valid EDID found]\n"); DRM_DEBUG_KMS("CRT not detected via DDC:0x50 [no valid EDID found]\n");
} }
kfree(edid); kfree(edid);
return ret; return false;
} }
static enum drm_connector_status static enum drm_connector_status
@ -487,12 +493,8 @@ intel_crt_load_detect(struct intel_crt *crt)
uint32_t vsample; uint32_t vsample;
uint32_t vblank, vblank_start, vblank_end; uint32_t vblank, vblank_start, vblank_end;
uint32_t dsl; uint32_t dsl;
uint32_t bclrpat_reg; i915_reg_t bclrpat_reg, vtotal_reg,
uint32_t vtotal_reg; vblank_reg, vsync_reg, pipeconf_reg, pipe_dsl_reg;
uint32_t vblank_reg;
uint32_t vsync_reg;
uint32_t pipeconf_reg;
uint32_t pipe_dsl_reg;
uint8_t st00; uint8_t st00;
enum drm_connector_status status; enum drm_connector_status status;
@ -525,7 +527,7 @@ intel_crt_load_detect(struct intel_crt *crt)
/* Wait for next Vblank to substitue /* Wait for next Vblank to substitue
* border color for Color info */ * border color for Color info */
intel_wait_for_vblank(dev, pipe); intel_wait_for_vblank(dev, pipe);
st00 = I915_READ8(VGA_MSR_WRITE); st00 = I915_READ8(_VGA_MSR_WRITE);
status = ((st00 & (1 << 4)) != 0) ? status = ((st00 & (1 << 4)) != 0) ?
connector_status_connected : connector_status_connected :
connector_status_disconnected; connector_status_disconnected;
@ -570,7 +572,7 @@ intel_crt_load_detect(struct intel_crt *crt)
do { do {
count++; count++;
/* Read the ST00 VGA status register */ /* Read the ST00 VGA status register */
st00 = I915_READ8(VGA_MSR_WRITE); st00 = I915_READ8(_VGA_MSR_WRITE);
if (st00 & (1 << 4)) if (st00 & (1 << 4))
detect++; detect++;
} while ((I915_READ(pipe_dsl_reg) == dsl)); } while ((I915_READ(pipe_dsl_reg) == dsl));
@ -788,11 +790,37 @@ void intel_crt_init(struct drm_device *dev)
struct intel_crt *crt; struct intel_crt *crt;
struct intel_connector *intel_connector; struct intel_connector *intel_connector;
struct drm_i915_private *dev_priv = dev->dev_private; struct drm_i915_private *dev_priv = dev->dev_private;
i915_reg_t adpa_reg;
u32 adpa;
/* Skip machines without VGA that falsely report hotplug events */ /* Skip machines without VGA that falsely report hotplug events */
if (dmi_check_system(intel_no_crt)) if (dmi_check_system(intel_no_crt))
return; return;
if (HAS_PCH_SPLIT(dev))
adpa_reg = PCH_ADPA;
else if (IS_VALLEYVIEW(dev))
adpa_reg = VLV_ADPA;
else
adpa_reg = ADPA;
adpa = I915_READ(adpa_reg);
if ((adpa & ADPA_DAC_ENABLE) == 0) {
/*
* On some machines (some IVB at least) CRT can be
* fused off, but there's no known fuse bit to
* indicate that. On these machine the ADPA register
* works normally, except the DAC enable bit won't
* take. So the only way to tell is attempt to enable
* it and see what happens.
*/
I915_WRITE(adpa_reg, adpa | ADPA_DAC_ENABLE |
ADPA_HSYNC_CNTL_DISABLE | ADPA_VSYNC_CNTL_DISABLE);
if ((I915_READ(adpa_reg) & ADPA_DAC_ENABLE) == 0)
return;
I915_WRITE(adpa_reg, adpa);
}
crt = kzalloc(sizeof(struct intel_crt), GFP_KERNEL); crt = kzalloc(sizeof(struct intel_crt), GFP_KERNEL);
if (!crt) if (!crt)
return; return;
@ -809,7 +837,7 @@ void intel_crt_init(struct drm_device *dev)
&intel_crt_connector_funcs, DRM_MODE_CONNECTOR_VGA); &intel_crt_connector_funcs, DRM_MODE_CONNECTOR_VGA);
drm_encoder_init(dev, &crt->base.base, &intel_crt_enc_funcs, drm_encoder_init(dev, &crt->base.base, &intel_crt_enc_funcs,
DRM_MODE_ENCODER_DAC); DRM_MODE_ENCODER_DAC, NULL);
intel_connector_attach_encoder(intel_connector, &crt->base); intel_connector_attach_encoder(intel_connector, &crt->base);
@ -826,15 +854,10 @@ void intel_crt_init(struct drm_device *dev)
connector->interlace_allowed = 1; connector->interlace_allowed = 1;
connector->doublescan_allowed = 0; connector->doublescan_allowed = 0;
if (HAS_PCH_SPLIT(dev)) crt->adpa_reg = adpa_reg;
crt->adpa_reg = PCH_ADPA;
else if (IS_VALLEYVIEW(dev))
crt->adpa_reg = VLV_ADPA;
else
crt->adpa_reg = ADPA;
crt->base.compute_config = intel_crt_compute_config; crt->base.compute_config = intel_crt_compute_config;
if (HAS_PCH_SPLIT(dev) && !HAS_DDI(dev)) { if (HAS_PCH_SPLIT(dev)) {
crt->base.disable = pch_disable_crt; crt->base.disable = pch_disable_crt;
crt->base.post_disable = pch_post_disable_crt; crt->base.post_disable = pch_post_disable_crt;
} else { } else {

View File

@ -47,21 +47,10 @@
MODULE_FIRMWARE(I915_CSR_SKL); MODULE_FIRMWARE(I915_CSR_SKL);
MODULE_FIRMWARE(I915_CSR_BXT); MODULE_FIRMWARE(I915_CSR_BXT);
/* #define SKL_CSR_VERSION_REQUIRED CSR_VERSION(1, 23)
* SKL CSR registers for DC5 and DC6
*/
#define CSR_PROGRAM(i) (0x80000 + (i) * 4)
#define CSR_SSP_BASE_ADDR_GEN9 0x00002FC0
#define CSR_HTP_ADDR_SKL 0x00500034
#define CSR_SSP_BASE 0x8F074
#define CSR_HTP_SKL 0x8F004
#define CSR_LAST_WRITE 0x8F034
#define CSR_LAST_WRITE_VALUE 0xc003b400
/* MMIO address range for CSR program (0x80000 - 0x82FFF) */
#define CSR_MAX_FW_SIZE 0x2FFF #define CSR_MAX_FW_SIZE 0x2FFF
#define CSR_DEFAULT_FW_OFFSET 0xFFFFFFFF #define CSR_DEFAULT_FW_OFFSET 0xFFFFFFFF
#define CSR_MMIO_START_RANGE 0x80000
#define CSR_MMIO_END_RANGE 0x8FFFF
struct intel_css_header { struct intel_css_header {
/* 0x09 for DMC */ /* 0x09 for DMC */
@ -177,6 +166,14 @@ struct stepping_info {
char substepping; char substepping;
}; };
/*
* Kabylake derivated from Skylake H0, so SKL H0
* is the right firmware for KBL A0 (revid 0).
*/
static const struct stepping_info kbl_stepping_info[] = {
{'H', '0'}, {'I', '0'}
};
static const struct stepping_info skl_stepping_info[] = { static const struct stepping_info skl_stepping_info[] = {
{'A', '0'}, {'B', '0'}, {'C', '0'}, {'A', '0'}, {'B', '0'}, {'C', '0'},
{'D', '0'}, {'E', '0'}, {'F', '0'}, {'D', '0'}, {'E', '0'}, {'F', '0'},
@ -184,98 +181,58 @@ static const struct stepping_info skl_stepping_info[] = {
{'J', '0'}, {'K', '0'} {'J', '0'}, {'K', '0'}
}; };
static struct stepping_info bxt_stepping_info[] = { static const struct stepping_info bxt_stepping_info[] = {
{'A', '0'}, {'A', '1'}, {'A', '2'}, {'A', '0'}, {'A', '1'}, {'A', '2'},
{'B', '0'}, {'B', '1'}, {'B', '2'} {'B', '0'}, {'B', '1'}, {'B', '2'}
}; };
static char intel_get_stepping(struct drm_device *dev) static const struct stepping_info *intel_get_stepping_info(struct drm_device *dev)
{ {
if (IS_SKYLAKE(dev) && (dev->pdev->revision < const struct stepping_info *si;
ARRAY_SIZE(skl_stepping_info))) unsigned int size;
return skl_stepping_info[dev->pdev->revision].stepping;
else if (IS_BROXTON(dev) && (dev->pdev->revision <
ARRAY_SIZE(bxt_stepping_info)))
return bxt_stepping_info[dev->pdev->revision].stepping;
else
return -ENODATA;
}
static char intel_get_substepping(struct drm_device *dev) if (IS_KABYLAKE(dev)) {
{ size = ARRAY_SIZE(kbl_stepping_info);
if (IS_SKYLAKE(dev) && (dev->pdev->revision < si = kbl_stepping_info;
ARRAY_SIZE(skl_stepping_info))) } else if (IS_SKYLAKE(dev)) {
return skl_stepping_info[dev->pdev->revision].substepping; size = ARRAY_SIZE(skl_stepping_info);
else if (IS_BROXTON(dev) && (dev->pdev->revision < si = skl_stepping_info;
ARRAY_SIZE(bxt_stepping_info))) } else if (IS_BROXTON(dev)) {
return bxt_stepping_info[dev->pdev->revision].substepping; size = ARRAY_SIZE(bxt_stepping_info);
else si = bxt_stepping_info;
return -ENODATA; } else {
} return NULL;
}
/** if (INTEL_REVID(dev) < size)
* intel_csr_load_status_get() - to get firmware loading status. return si + INTEL_REVID(dev);
* @dev_priv: i915 device.
*
* This function helps to get the firmware loading status.
*
* Return: Firmware loading status.
*/
enum csr_state intel_csr_load_status_get(struct drm_i915_private *dev_priv)
{
enum csr_state state;
mutex_lock(&dev_priv->csr_lock); return NULL;
state = dev_priv->csr.state;
mutex_unlock(&dev_priv->csr_lock);
return state;
}
/**
* intel_csr_load_status_set() - help to set firmware loading status.
* @dev_priv: i915 device.
* @state: enumeration of firmware loading status.
*
* Set the firmware loading status.
*/
void intel_csr_load_status_set(struct drm_i915_private *dev_priv,
enum csr_state state)
{
mutex_lock(&dev_priv->csr_lock);
dev_priv->csr.state = state;
mutex_unlock(&dev_priv->csr_lock);
} }
/** /**
* intel_csr_load_program() - write the firmware from memory to register. * intel_csr_load_program() - write the firmware from memory to register.
* @dev: drm device. * @dev_priv: i915 drm device.
* *
* CSR firmware is read from a .bin file and kept in internal memory one time. * CSR firmware is read from a .bin file and kept in internal memory one time.
* Everytime display comes back from low power state this function is called to * Everytime display comes back from low power state this function is called to
* copy the firmware from internal memory to registers. * copy the firmware from internal memory to registers.
*/ */
void intel_csr_load_program(struct drm_device *dev) void intel_csr_load_program(struct drm_i915_private *dev_priv)
{ {
struct drm_i915_private *dev_priv = dev->dev_private;
u32 *payload = dev_priv->csr.dmc_payload; u32 *payload = dev_priv->csr.dmc_payload;
uint32_t i, fw_size; uint32_t i, fw_size;
if (!IS_GEN9(dev)) { if (!IS_GEN9(dev_priv)) {
DRM_ERROR("No CSR support available for this platform\n"); DRM_ERROR("No CSR support available for this platform\n");
return; return;
} }
/* if (!dev_priv->csr.dmc_payload) {
* FIXME: Firmware gets lost on S3/S4, but not when entering system DRM_ERROR("Tried to program CSR with empty payload\n");
* standby or suspend-to-idle (which is just like forced runtime pm).
* Unfortunately the ACPI subsystem doesn't yet give us a way to
* differentiate this, hence figure it out with this hack.
*/
if (I915_READ(CSR_PROGRAM(0)))
return; return;
}
mutex_lock(&dev_priv->csr_lock);
fw_size = dev_priv->csr.dmc_fw_size; fw_size = dev_priv->csr.dmc_fw_size;
for (i = 0; i < fw_size; i++) for (i = 0; i < fw_size; i++)
I915_WRITE(CSR_PROGRAM(i), payload[i]); I915_WRITE(CSR_PROGRAM(i), payload[i]);
@ -285,43 +242,56 @@ void intel_csr_load_program(struct drm_device *dev)
dev_priv->csr.mmiodata[i]); dev_priv->csr.mmiodata[i]);
} }
dev_priv->csr.state = FW_LOADED; dev_priv->csr.dc_state = 0;
mutex_unlock(&dev_priv->csr_lock);
} }
static void finish_csr_load(const struct firmware *fw, void *context) static uint32_t *parse_csr_fw(struct drm_i915_private *dev_priv,
const struct firmware *fw)
{ {
struct drm_i915_private *dev_priv = context;
struct drm_device *dev = dev_priv->dev; struct drm_device *dev = dev_priv->dev;
struct intel_css_header *css_header; struct intel_css_header *css_header;
struct intel_package_header *package_header; struct intel_package_header *package_header;
struct intel_dmc_header *dmc_header; struct intel_dmc_header *dmc_header;
struct intel_csr *csr = &dev_priv->csr; struct intel_csr *csr = &dev_priv->csr;
char stepping = intel_get_stepping(dev); const struct stepping_info *stepping_info = intel_get_stepping_info(dev);
char substepping = intel_get_substepping(dev); char stepping, substepping;
uint32_t dmc_offset = CSR_DEFAULT_FW_OFFSET, readcount = 0, nbytes; uint32_t dmc_offset = CSR_DEFAULT_FW_OFFSET, readcount = 0, nbytes;
uint32_t i; uint32_t i;
uint32_t *dmc_payload; uint32_t *dmc_payload;
bool fw_loaded = false;
if (!fw) { if (!fw)
i915_firmware_load_error_print(csr->fw_path, 0); return NULL;
goto out;
}
if ((stepping == -ENODATA) || (substepping == -ENODATA)) { if (!stepping_info) {
DRM_ERROR("Unknown stepping info, firmware loading failed\n"); DRM_ERROR("Unknown stepping info, firmware loading failed\n");
goto out; return NULL;
} }
stepping = stepping_info->stepping;
substepping = stepping_info->substepping;
/* Extract CSS Header information*/ /* Extract CSS Header information*/
css_header = (struct intel_css_header *)fw->data; css_header = (struct intel_css_header *)fw->data;
if (sizeof(struct intel_css_header) != if (sizeof(struct intel_css_header) !=
(css_header->header_len * 4)) { (css_header->header_len * 4)) {
DRM_ERROR("Firmware has wrong CSS header length %u bytes\n", DRM_ERROR("Firmware has wrong CSS header length %u bytes\n",
(css_header->header_len * 4)); (css_header->header_len * 4));
goto out; return NULL;
} }
csr->version = css_header->version;
if (IS_SKYLAKE(dev) && csr->version < SKL_CSR_VERSION_REQUIRED) {
DRM_INFO("Refusing to load old Skylake DMC firmware v%u.%u,"
" please upgrade to v%u.%u or later"
" [https://01.org/linuxgraphics/intel-linux-graphics-firmwares].\n",
CSR_VERSION_MAJOR(csr->version),
CSR_VERSION_MINOR(csr->version),
CSR_VERSION_MAJOR(SKL_CSR_VERSION_REQUIRED),
CSR_VERSION_MINOR(SKL_CSR_VERSION_REQUIRED));
return NULL;
}
readcount += sizeof(struct intel_css_header); readcount += sizeof(struct intel_css_header);
/* Extract Package Header information*/ /* Extract Package Header information*/
@ -331,7 +301,7 @@ static void finish_csr_load(const struct firmware *fw, void *context)
(package_header->header_len * 4)) { (package_header->header_len * 4)) {
DRM_ERROR("Firmware has wrong package header length %u bytes\n", DRM_ERROR("Firmware has wrong package header length %u bytes\n",
(package_header->header_len * 4)); (package_header->header_len * 4));
goto out; return NULL;
} }
readcount += sizeof(struct intel_package_header); readcount += sizeof(struct intel_package_header);
@ -351,7 +321,7 @@ static void finish_csr_load(const struct firmware *fw, void *context)
} }
if (dmc_offset == CSR_DEFAULT_FW_OFFSET) { if (dmc_offset == CSR_DEFAULT_FW_OFFSET) {
DRM_ERROR("Firmware not supported for %c stepping\n", stepping); DRM_ERROR("Firmware not supported for %c stepping\n", stepping);
goto out; return NULL;
} }
readcount += dmc_offset; readcount += dmc_offset;
@ -360,7 +330,7 @@ static void finish_csr_load(const struct firmware *fw, void *context)
if (sizeof(struct intel_dmc_header) != (dmc_header->header_len)) { if (sizeof(struct intel_dmc_header) != (dmc_header->header_len)) {
DRM_ERROR("Firmware has wrong dmc header length %u bytes\n", DRM_ERROR("Firmware has wrong dmc header length %u bytes\n",
(dmc_header->header_len)); (dmc_header->header_len));
goto out; return NULL;
} }
readcount += sizeof(struct intel_dmc_header); readcount += sizeof(struct intel_dmc_header);
@ -368,7 +338,7 @@ static void finish_csr_load(const struct firmware *fw, void *context)
if (dmc_header->mmio_count > ARRAY_SIZE(csr->mmioaddr)) { if (dmc_header->mmio_count > ARRAY_SIZE(csr->mmioaddr)) {
DRM_ERROR("Firmware has wrong mmio count %u\n", DRM_ERROR("Firmware has wrong mmio count %u\n",
dmc_header->mmio_count); dmc_header->mmio_count);
goto out; return NULL;
} }
csr->mmio_count = dmc_header->mmio_count; csr->mmio_count = dmc_header->mmio_count;
for (i = 0; i < dmc_header->mmio_count; i++) { for (i = 0; i < dmc_header->mmio_count; i++) {
@ -376,9 +346,9 @@ static void finish_csr_load(const struct firmware *fw, void *context)
dmc_header->mmioaddr[i] > CSR_MMIO_END_RANGE) { dmc_header->mmioaddr[i] > CSR_MMIO_END_RANGE) {
DRM_ERROR(" Firmware has wrong mmio address 0x%x\n", DRM_ERROR(" Firmware has wrong mmio address 0x%x\n",
dmc_header->mmioaddr[i]); dmc_header->mmioaddr[i]);
goto out; return NULL;
} }
csr->mmioaddr[i] = dmc_header->mmioaddr[i]; csr->mmioaddr[i] = _MMIO(dmc_header->mmioaddr[i]);
csr->mmiodata[i] = dmc_header->mmiodata[i]; csr->mmiodata[i] = dmc_header->mmiodata[i];
} }
@ -386,103 +356,102 @@ static void finish_csr_load(const struct firmware *fw, void *context)
nbytes = dmc_header->fw_size * 4; nbytes = dmc_header->fw_size * 4;
if (nbytes > CSR_MAX_FW_SIZE) { if (nbytes > CSR_MAX_FW_SIZE) {
DRM_ERROR("CSR firmware too big (%u) bytes\n", nbytes); DRM_ERROR("CSR firmware too big (%u) bytes\n", nbytes);
goto out; return NULL;
} }
csr->dmc_fw_size = dmc_header->fw_size; csr->dmc_fw_size = dmc_header->fw_size;
csr->dmc_payload = kmalloc(nbytes, GFP_KERNEL); dmc_payload = kmalloc(nbytes, GFP_KERNEL);
if (!csr->dmc_payload) { if (!dmc_payload) {
DRM_ERROR("Memory allocation failed for dmc payload\n"); DRM_ERROR("Memory allocation failed for dmc payload\n");
goto out; return NULL;
} }
dmc_payload = csr->dmc_payload;
memcpy(dmc_payload, &fw->data[readcount], nbytes); memcpy(dmc_payload, &fw->data[readcount], nbytes);
/* load csr program during system boot, as needed for DC states */ return dmc_payload;
intel_csr_load_program(dev); }
fw_loaded = true;
static void csr_load_work_fn(struct drm_i915_private *dev_priv)
{
struct intel_csr *csr;
const struct firmware *fw;
int ret;
csr = &dev_priv->csr;
ret = request_firmware(&fw, dev_priv->csr.fw_path,
&dev_priv->dev->pdev->dev);
if (!fw)
goto out;
dev_priv->csr.dmc_payload = parse_csr_fw(dev_priv, fw);
if (!dev_priv->csr.dmc_payload)
goto out;
/* load csr program during system boot, as needed for DC states */
intel_csr_load_program(dev_priv);
DRM_DEBUG_KMS("Finished loading %s\n", dev_priv->csr.fw_path);
out: out:
if (fw_loaded) if (dev_priv->csr.dmc_payload) {
intel_runtime_pm_put(dev_priv); intel_display_power_put(dev_priv, POWER_DOMAIN_INIT);
else
intel_csr_load_status_set(dev_priv, FW_FAILED); DRM_INFO("Finished loading %s (v%u.%u)\n",
dev_priv->csr.fw_path,
CSR_VERSION_MAJOR(csr->version),
CSR_VERSION_MINOR(csr->version));
} else {
DRM_ERROR("Failed to load DMC firmware, disabling rpm\n");
}
release_firmware(fw); release_firmware(fw);
} }
/** /**
* intel_csr_ucode_init() - initialize the firmware loading. * intel_csr_ucode_init() - initialize the firmware loading.
* @dev: drm device. * @dev_priv: i915 drm device.
* *
* This function is called at the time of loading the display driver to read * This function is called at the time of loading the display driver to read
* firmware from a .bin file and copied into a internal memory. * firmware from a .bin file and copied into a internal memory.
*/ */
void intel_csr_ucode_init(struct drm_device *dev) void intel_csr_ucode_init(struct drm_i915_private *dev_priv)
{ {
struct drm_i915_private *dev_priv = dev->dev_private;
struct intel_csr *csr = &dev_priv->csr; struct intel_csr *csr = &dev_priv->csr;
int ret;
if (!HAS_CSR(dev)) if (!HAS_CSR(dev_priv))
return; return;
if (IS_SKYLAKE(dev)) if (IS_SKYLAKE(dev_priv))
csr->fw_path = I915_CSR_SKL; csr->fw_path = I915_CSR_SKL;
else if (IS_BROXTON(dev_priv)) else if (IS_BROXTON(dev_priv))
csr->fw_path = I915_CSR_BXT; csr->fw_path = I915_CSR_BXT;
else { else {
DRM_ERROR("Unexpected: no known CSR firmware for platform\n"); DRM_ERROR("Unexpected: no known CSR firmware for platform\n");
intel_csr_load_status_set(dev_priv, FW_FAILED);
return; return;
} }
#if 0
DRM_DEBUG_KMS("Loading %s\n", csr->fw_path); DRM_DEBUG_KMS("Loading %s\n", csr->fw_path);
/* /*
* Obtain a runtime pm reference, until CSR is loaded, * Obtain a runtime pm reference, until CSR is loaded,
* to avoid entering runtime-suspend. * to avoid entering runtime-suspend.
*/ */
intel_runtime_pm_get(dev_priv); intel_display_power_get(dev_priv, POWER_DOMAIN_INIT);
/* CSR supported for platform, load firmware */ csr_load_work_fn(dev_priv);
ret = request_firmware_nowait(THIS_MODULE, true, csr->fw_path,
&dev_priv->dev->pdev->dev,
GFP_KERNEL, dev_priv,
finish_csr_load);
if (ret) {
i915_firmware_load_error_print(csr->fw_path, ret);
intel_csr_load_status_set(dev_priv, FW_FAILED);
}
#endif
} }
/** /**
* intel_csr_ucode_fini() - unload the CSR firmware. * intel_csr_ucode_fini() - unload the CSR firmware.
* @dev: drm device. * @dev_priv: i915 drm device.
* *
* Firmmware unloading includes freeing the internal momory and reset the * Firmmware unloading includes freeing the internal momory and reset the
* firmware loading status. * firmware loading status.
*/ */
void intel_csr_ucode_fini(struct drm_device *dev) void intel_csr_ucode_fini(struct drm_i915_private *dev_priv)
{ {
struct drm_i915_private *dev_priv = dev->dev_private; if (!HAS_CSR(dev_priv))
if (!HAS_CSR(dev))
return; return;
intel_csr_load_status_set(dev_priv, FW_FAILED);
kfree(dev_priv->csr.dmc_payload); kfree(dev_priv->csr.dmc_payload);
} }
void assert_csr_loaded(struct drm_i915_private *dev_priv)
{
WARN_ONCE(intel_csr_load_status_get(dev_priv) != FW_LOADED,
"CSR is not loaded.\n");
WARN_ONCE(!I915_READ(CSR_PROGRAM(0)),
"CSR program storage start is NULL\n");
WARN_ONCE(!I915_READ(CSR_SSP_BASE), "CSR SSP Base Not fine\n");
WARN_ONCE(!I915_READ(CSR_HTP_SKL), "CSR HTP Not fine\n");
}

View File

@ -133,12 +133,12 @@ static const struct ddi_buf_trans skl_ddi_translations_dp[] = {
{ 0x00002016, 0x000000A0, 0x0 }, { 0x00002016, 0x000000A0, 0x0 },
{ 0x00005012, 0x0000009B, 0x0 }, { 0x00005012, 0x0000009B, 0x0 },
{ 0x00007011, 0x00000088, 0x0 }, { 0x00007011, 0x00000088, 0x0 },
{ 0x00009010, 0x000000C7, 0x0 }, { 0x80009010, 0x000000C0, 0x1 }, /* Uses I_boost level 0x1 */
{ 0x00002016, 0x0000009B, 0x0 }, { 0x00002016, 0x0000009B, 0x0 },
{ 0x00005012, 0x00000088, 0x0 }, { 0x00005012, 0x00000088, 0x0 },
{ 0x00007011, 0x000000C7, 0x0 }, { 0x80007011, 0x000000C0, 0x1 }, /* Uses I_boost level 0x1 */
{ 0x00002016, 0x000000DF, 0x0 }, { 0x00002016, 0x000000DF, 0x0 },
{ 0x00005012, 0x000000C7, 0x0 }, { 0x80005012, 0x000000C0, 0x1 }, /* Uses I_boost level 0x1 */
}; };
/* Skylake U */ /* Skylake U */
@ -146,12 +146,12 @@ static const struct ddi_buf_trans skl_u_ddi_translations_dp[] = {
{ 0x0000201B, 0x000000A2, 0x0 }, { 0x0000201B, 0x000000A2, 0x0 },
{ 0x00005012, 0x00000088, 0x0 }, { 0x00005012, 0x00000088, 0x0 },
{ 0x00007011, 0x00000087, 0x0 }, { 0x00007011, 0x00000087, 0x0 },
{ 0x80009010, 0x000000C7, 0x1 }, /* Uses I_boost level 0x1 */ { 0x80009010, 0x000000C0, 0x1 }, /* Uses I_boost level 0x1 */
{ 0x0000201B, 0x0000009D, 0x0 }, { 0x0000201B, 0x0000009D, 0x0 },
{ 0x00005012, 0x000000C7, 0x0 }, { 0x80005012, 0x000000C0, 0x1 }, /* Uses I_boost level 0x1 */
{ 0x00007011, 0x000000C7, 0x0 }, { 0x80007011, 0x000000C0, 0x1 }, /* Uses I_boost level 0x1 */
{ 0x00002016, 0x00000088, 0x0 }, { 0x00002016, 0x00000088, 0x0 },
{ 0x00005012, 0x000000C7, 0x0 }, { 0x80005012, 0x000000C0, 0x1 }, /* Uses I_boost level 0x1 */
}; };
/* Skylake Y */ /* Skylake Y */
@ -159,12 +159,12 @@ static const struct ddi_buf_trans skl_y_ddi_translations_dp[] = {
{ 0x00000018, 0x000000A2, 0x0 }, { 0x00000018, 0x000000A2, 0x0 },
{ 0x00005012, 0x00000088, 0x0 }, { 0x00005012, 0x00000088, 0x0 },
{ 0x00007011, 0x00000087, 0x0 }, { 0x00007011, 0x00000087, 0x0 },
{ 0x80009010, 0x000000C7, 0x3 }, /* Uses I_boost level 0x3 */ { 0x80009010, 0x000000C0, 0x3 }, /* Uses I_boost level 0x3 */
{ 0x00000018, 0x0000009D, 0x0 }, { 0x00000018, 0x0000009D, 0x0 },
{ 0x00005012, 0x000000C7, 0x0 }, { 0x80005012, 0x000000C0, 0x3 }, /* Uses I_boost level 0x3 */
{ 0x00007011, 0x000000C7, 0x0 }, { 0x80007011, 0x000000C0, 0x3 }, /* Uses I_boost level 0x3 */
{ 0x00000018, 0x00000088, 0x0 }, { 0x00000018, 0x00000088, 0x0 },
{ 0x00005012, 0x000000C7, 0x0 }, { 0x80005012, 0x000000C0, 0x3 }, /* Uses I_boost level 0x3 */
}; };
/* /*
@ -345,7 +345,7 @@ enum port intel_ddi_get_encoder_port(struct intel_encoder *intel_encoder)
static bool static bool
intel_dig_port_supports_hdmi(const struct intel_digital_port *intel_dig_port) intel_dig_port_supports_hdmi(const struct intel_digital_port *intel_dig_port)
{ {
return intel_dig_port->hdmi.hdmi_reg; return i915_mmio_reg_valid(intel_dig_port->hdmi.hdmi_reg);
} }
static const struct ddi_buf_trans *skl_get_buf_trans_dp(struct drm_device *dev, static const struct ddi_buf_trans *skl_get_buf_trans_dp(struct drm_device *dev,
@ -353,10 +353,10 @@ static const struct ddi_buf_trans *skl_get_buf_trans_dp(struct drm_device *dev,
{ {
const struct ddi_buf_trans *ddi_translations; const struct ddi_buf_trans *ddi_translations;
if (IS_SKL_ULX(dev)) { if (IS_SKL_ULX(dev) || IS_KBL_ULX(dev)) {
ddi_translations = skl_y_ddi_translations_dp; ddi_translations = skl_y_ddi_translations_dp;
*n_entries = ARRAY_SIZE(skl_y_ddi_translations_dp); *n_entries = ARRAY_SIZE(skl_y_ddi_translations_dp);
} else if (IS_SKL_ULT(dev)) { } else if (IS_SKL_ULT(dev) || IS_KBL_ULT(dev)) {
ddi_translations = skl_u_ddi_translations_dp; ddi_translations = skl_u_ddi_translations_dp;
*n_entries = ARRAY_SIZE(skl_u_ddi_translations_dp); *n_entries = ARRAY_SIZE(skl_u_ddi_translations_dp);
} else { } else {
@ -373,7 +373,7 @@ static const struct ddi_buf_trans *skl_get_buf_trans_edp(struct drm_device *dev,
struct drm_i915_private *dev_priv = dev->dev_private; struct drm_i915_private *dev_priv = dev->dev_private;
const struct ddi_buf_trans *ddi_translations; const struct ddi_buf_trans *ddi_translations;
if (IS_SKL_ULX(dev)) { if (IS_SKL_ULX(dev) || IS_KBL_ULX(dev)) {
if (dev_priv->edp_low_vswing) { if (dev_priv->edp_low_vswing) {
ddi_translations = skl_y_ddi_translations_edp; ddi_translations = skl_y_ddi_translations_edp;
*n_entries = ARRAY_SIZE(skl_y_ddi_translations_edp); *n_entries = ARRAY_SIZE(skl_y_ddi_translations_edp);
@ -381,7 +381,7 @@ static const struct ddi_buf_trans *skl_get_buf_trans_edp(struct drm_device *dev,
ddi_translations = skl_y_ddi_translations_dp; ddi_translations = skl_y_ddi_translations_dp;
*n_entries = ARRAY_SIZE(skl_y_ddi_translations_dp); *n_entries = ARRAY_SIZE(skl_y_ddi_translations_dp);
} }
} else if (IS_SKL_ULT(dev)) { } else if (IS_SKL_ULT(dev) || IS_KBL_ULT(dev)) {
if (dev_priv->edp_low_vswing) { if (dev_priv->edp_low_vswing) {
ddi_translations = skl_u_ddi_translations_edp; ddi_translations = skl_u_ddi_translations_edp;
*n_entries = ARRAY_SIZE(skl_u_ddi_translations_edp); *n_entries = ARRAY_SIZE(skl_u_ddi_translations_edp);
@ -408,7 +408,7 @@ skl_get_buf_trans_hdmi(struct drm_device *dev,
{ {
const struct ddi_buf_trans *ddi_translations; const struct ddi_buf_trans *ddi_translations;
if (IS_SKL_ULX(dev)) { if (IS_SKL_ULX(dev) || IS_KBL_ULX(dev)) {
ddi_translations = skl_y_ddi_translations_hdmi; ddi_translations = skl_y_ddi_translations_hdmi;
*n_entries = ARRAY_SIZE(skl_y_ddi_translations_hdmi); *n_entries = ARRAY_SIZE(skl_y_ddi_translations_hdmi);
} else { } else {
@ -448,7 +448,7 @@ static void intel_prepare_ddi_buffers(struct drm_device *dev, enum port port,
bxt_ddi_vswing_sequence(dev, hdmi_level, port, bxt_ddi_vswing_sequence(dev, hdmi_level, port,
INTEL_OUTPUT_HDMI); INTEL_OUTPUT_HDMI);
return; return;
} else if (IS_SKYLAKE(dev)) { } else if (IS_SKYLAKE(dev) || IS_KABYLAKE(dev)) {
ddi_translations_fdi = NULL; ddi_translations_fdi = NULL;
ddi_translations_dp = ddi_translations_dp =
skl_get_buf_trans_dp(dev, &n_dp_entries); skl_get_buf_trans_dp(dev, &n_dp_entries);
@ -584,7 +584,7 @@ void intel_prepare_ddi(struct drm_device *dev)
static void intel_wait_ddi_buf_idle(struct drm_i915_private *dev_priv, static void intel_wait_ddi_buf_idle(struct drm_i915_private *dev_priv,
enum port port) enum port port)
{ {
uint32_t reg = DDI_BUF_CTL(port); i915_reg_t reg = DDI_BUF_CTL(port);
int i; int i;
for (i = 0; i < 16; i++) { for (i = 0; i < 16; i++) {
@ -683,15 +683,16 @@ void hsw_fdi_link_train(struct drm_crtc *crtc)
temp = I915_READ(DP_TP_STATUS(PORT_E)); temp = I915_READ(DP_TP_STATUS(PORT_E));
if (temp & DP_TP_STATUS_AUTOTRAIN_DONE) { if (temp & DP_TP_STATUS_AUTOTRAIN_DONE) {
DRM_DEBUG_KMS("FDI link training done on step %d\n", i); DRM_DEBUG_KMS("FDI link training done on step %d\n", i);
break;
}
/* Enable normal pixel sending for FDI */ /*
I915_WRITE(DP_TP_CTL(PORT_E), * Leave things enabled even if we failed to train FDI.
DP_TP_CTL_FDI_AUTOTRAIN | * Results in less fireworks from the state checker.
DP_TP_CTL_LINK_TRAIN_NORMAL | */
DP_TP_CTL_ENHANCED_FRAME_ENABLE | if (i == ARRAY_SIZE(hsw_ddi_translations_fdi) * 2 - 1) {
DP_TP_CTL_ENABLE); DRM_ERROR("FDI link training failed!\n");
break;
return;
} }
temp = I915_READ(DDI_BUF_CTL(PORT_E)); temp = I915_READ(DDI_BUF_CTL(PORT_E));
@ -720,7 +721,12 @@ void hsw_fdi_link_train(struct drm_crtc *crtc)
POSTING_READ(FDI_RX_MISC(PIPE_A)); POSTING_READ(FDI_RX_MISC(PIPE_A));
} }
DRM_ERROR("FDI link training failed!\n"); /* Enable normal pixel sending for FDI */
I915_WRITE(DP_TP_CTL(PORT_E),
DP_TP_CTL_FDI_AUTOTRAIN |
DP_TP_CTL_LINK_TRAIN_NORMAL |
DP_TP_CTL_ENHANCED_FRAME_ENABLE |
DP_TP_CTL_ENABLE);
} }
void intel_ddi_init_dp_buf_reg(struct intel_encoder *encoder) void intel_ddi_init_dp_buf_reg(struct intel_encoder *encoder)
@ -939,7 +945,8 @@ static void hsw_wrpll_update_rnp(uint64_t freq2k, unsigned budget,
/* Otherwise a < c && b >= d, do nothing */ /* Otherwise a < c && b >= d, do nothing */
} }
static int hsw_ddi_calc_wrpll_link(struct drm_i915_private *dev_priv, int reg) static int hsw_ddi_calc_wrpll_link(struct drm_i915_private *dev_priv,
i915_reg_t reg)
{ {
int refclk = LC_FREQ; int refclk = LC_FREQ;
int n, p, r; int n, p, r;
@ -975,7 +982,7 @@ static int hsw_ddi_calc_wrpll_link(struct drm_i915_private *dev_priv, int reg)
static int skl_calc_wrpll_link(struct drm_i915_private *dev_priv, static int skl_calc_wrpll_link(struct drm_i915_private *dev_priv,
uint32_t dpll) uint32_t dpll)
{ {
uint32_t cfgcr1_reg, cfgcr2_reg; i915_reg_t cfgcr1_reg, cfgcr2_reg;
uint32_t cfgcr1_val, cfgcr2_val; uint32_t cfgcr1_val, cfgcr2_val;
uint32_t p0, p1, p2, dco_freq; uint32_t p0, p1, p2, dco_freq;
@ -1120,10 +1127,10 @@ static void hsw_ddi_clock_get(struct intel_encoder *encoder,
link_clock = 270000; link_clock = 270000;
break; break;
case PORT_CLK_SEL_WRPLL1: case PORT_CLK_SEL_WRPLL1:
link_clock = hsw_ddi_calc_wrpll_link(dev_priv, WRPLL_CTL1); link_clock = hsw_ddi_calc_wrpll_link(dev_priv, WRPLL_CTL(0));
break; break;
case PORT_CLK_SEL_WRPLL2: case PORT_CLK_SEL_WRPLL2:
link_clock = hsw_ddi_calc_wrpll_link(dev_priv, WRPLL_CTL2); link_clock = hsw_ddi_calc_wrpll_link(dev_priv, WRPLL_CTL(1));
break; break;
case PORT_CLK_SEL_SPLL: case PORT_CLK_SEL_SPLL:
pll = I915_READ(SPLL_CTL) & SPLL_PLL_FREQ_MASK; pll = I915_READ(SPLL_CTL) & SPLL_PLL_FREQ_MASK;
@ -1192,7 +1199,7 @@ void intel_ddi_clock_get(struct intel_encoder *encoder,
if (INTEL_INFO(dev)->gen <= 8) if (INTEL_INFO(dev)->gen <= 8)
hsw_ddi_clock_get(encoder, pipe_config); hsw_ddi_clock_get(encoder, pipe_config);
else if (IS_SKYLAKE(dev)) else if (IS_SKYLAKE(dev) || IS_KABYLAKE(dev))
skl_ddi_clock_get(encoder, pipe_config); skl_ddi_clock_get(encoder, pipe_config);
else if (IS_BROXTON(dev)) else if (IS_BROXTON(dev))
bxt_ddi_clock_get(encoder, pipe_config); bxt_ddi_clock_get(encoder, pipe_config);
@ -1789,7 +1796,7 @@ bool intel_ddi_pll_select(struct intel_crtc *intel_crtc,
struct intel_encoder *intel_encoder = struct intel_encoder *intel_encoder =
intel_ddi_get_crtc_new_encoder(crtc_state); intel_ddi_get_crtc_new_encoder(crtc_state);
if (IS_SKYLAKE(dev)) if (IS_SKYLAKE(dev) || IS_KABYLAKE(dev))
return skl_ddi_pll_select(intel_crtc, crtc_state, return skl_ddi_pll_select(intel_crtc, crtc_state,
intel_encoder); intel_encoder);
else if (IS_BROXTON(dev)) else if (IS_BROXTON(dev))
@ -1951,7 +1958,7 @@ void intel_ddi_enable_transcoder_func(struct drm_crtc *crtc)
void intel_ddi_disable_transcoder_func(struct drm_i915_private *dev_priv, void intel_ddi_disable_transcoder_func(struct drm_i915_private *dev_priv,
enum transcoder cpu_transcoder) enum transcoder cpu_transcoder)
{ {
uint32_t reg = TRANS_DDI_FUNC_CTL(cpu_transcoder); i915_reg_t reg = TRANS_DDI_FUNC_CTL(cpu_transcoder);
uint32_t val = I915_READ(reg); uint32_t val = I915_READ(reg);
val &= ~(TRANS_DDI_FUNC_ENABLE | TRANS_DDI_PORT_MASK | TRANS_DDI_DP_VC_PAYLOAD_ALLOC); val &= ~(TRANS_DDI_FUNC_ENABLE | TRANS_DDI_PORT_MASK | TRANS_DDI_DP_VC_PAYLOAD_ALLOC);
@ -1970,13 +1977,16 @@ bool intel_ddi_connector_get_hw_state(struct intel_connector *intel_connector)
enum transcoder cpu_transcoder; enum transcoder cpu_transcoder;
enum intel_display_power_domain power_domain; enum intel_display_power_domain power_domain;
uint32_t tmp; uint32_t tmp;
bool ret;
power_domain = intel_display_port_power_domain(intel_encoder); power_domain = intel_display_port_power_domain(intel_encoder);
if (!intel_display_power_is_enabled(dev_priv, power_domain)) if (!intel_display_power_get_if_enabled(dev_priv, power_domain))
return false; return false;
if (!intel_encoder->get_hw_state(intel_encoder, &pipe)) if (!intel_encoder->get_hw_state(intel_encoder, &pipe)) {
return false; ret = false;
goto out;
}
if (port == PORT_A) if (port == PORT_A)
cpu_transcoder = TRANSCODER_EDP; cpu_transcoder = TRANSCODER_EDP;
@ -1988,23 +1998,33 @@ bool intel_ddi_connector_get_hw_state(struct intel_connector *intel_connector)
switch (tmp & TRANS_DDI_MODE_SELECT_MASK) { switch (tmp & TRANS_DDI_MODE_SELECT_MASK) {
case TRANS_DDI_MODE_SELECT_HDMI: case TRANS_DDI_MODE_SELECT_HDMI:
case TRANS_DDI_MODE_SELECT_DVI: case TRANS_DDI_MODE_SELECT_DVI:
return (type == DRM_MODE_CONNECTOR_HDMIA); ret = type == DRM_MODE_CONNECTOR_HDMIA;
break;
case TRANS_DDI_MODE_SELECT_DP_SST: case TRANS_DDI_MODE_SELECT_DP_SST:
if (type == DRM_MODE_CONNECTOR_eDP) ret = type == DRM_MODE_CONNECTOR_eDP ||
return true; type == DRM_MODE_CONNECTOR_DisplayPort;
return (type == DRM_MODE_CONNECTOR_DisplayPort); break;
case TRANS_DDI_MODE_SELECT_DP_MST: case TRANS_DDI_MODE_SELECT_DP_MST:
/* if the transcoder is in MST state then /* if the transcoder is in MST state then
* connector isn't connected */ * connector isn't connected */
return false; ret = false;
break;
case TRANS_DDI_MODE_SELECT_FDI: case TRANS_DDI_MODE_SELECT_FDI:
return (type == DRM_MODE_CONNECTOR_VGA); ret = type == DRM_MODE_CONNECTOR_VGA;
break;
default: default:
return false; ret = false;
break;
} }
out:
intel_display_power_put(dev_priv, power_domain);
return ret;
} }
bool intel_ddi_get_hw_state(struct intel_encoder *encoder, bool intel_ddi_get_hw_state(struct intel_encoder *encoder,
@ -2016,15 +2036,18 @@ bool intel_ddi_get_hw_state(struct intel_encoder *encoder,
enum intel_display_power_domain power_domain; enum intel_display_power_domain power_domain;
u32 tmp; u32 tmp;
int i; int i;
bool ret;
power_domain = intel_display_port_power_domain(encoder); power_domain = intel_display_port_power_domain(encoder);
if (!intel_display_power_is_enabled(dev_priv, power_domain)) if (!intel_display_power_get_if_enabled(dev_priv, power_domain))
return false; return false;
ret = false;
tmp = I915_READ(DDI_BUF_CTL(port)); tmp = I915_READ(DDI_BUF_CTL(port));
if (!(tmp & DDI_BUF_CTL_ENABLE)) if (!(tmp & DDI_BUF_CTL_ENABLE))
return false; goto out;
if (port == PORT_A) { if (port == PORT_A) {
tmp = I915_READ(TRANS_DDI_FUNC_CTL(TRANSCODER_EDP)); tmp = I915_READ(TRANS_DDI_FUNC_CTL(TRANSCODER_EDP));
@ -2042,25 +2065,32 @@ bool intel_ddi_get_hw_state(struct intel_encoder *encoder,
break; break;
} }
return true; ret = true;
} else {
goto out;
}
for (i = TRANSCODER_A; i <= TRANSCODER_C; i++) { for (i = TRANSCODER_A; i <= TRANSCODER_C; i++) {
tmp = I915_READ(TRANS_DDI_FUNC_CTL(i)); tmp = I915_READ(TRANS_DDI_FUNC_CTL(i));
if ((tmp & TRANS_DDI_PORT_MASK) if ((tmp & TRANS_DDI_PORT_MASK) == TRANS_DDI_SELECT_PORT(port)) {
== TRANS_DDI_SELECT_PORT(port)) { if ((tmp & TRANS_DDI_MODE_SELECT_MASK) ==
if ((tmp & TRANS_DDI_MODE_SELECT_MASK) == TRANS_DDI_MODE_SELECT_DP_MST) TRANS_DDI_MODE_SELECT_DP_MST)
return false; goto out;
*pipe = i; *pipe = i;
return true; ret = true;
}
goto out;
} }
} }
DRM_DEBUG_KMS("No pipe for ddi port %c found\n", port_name(port)); DRM_DEBUG_KMS("No pipe for ddi port %c found\n", port_name(port));
return false; out:
intel_display_power_put(dev_priv, power_domain);
return ret;
} }
void intel_ddi_enable_pipe_clock(struct intel_crtc *intel_crtc) void intel_ddi_enable_pipe_clock(struct intel_crtc *intel_crtc)
@ -2106,21 +2136,21 @@ static void skl_ddi_set_iboost(struct drm_device *dev, u32 level,
iboost = dp_iboost; iboost = dp_iboost;
} else { } else {
ddi_translations = skl_get_buf_trans_dp(dev, &n_entries); ddi_translations = skl_get_buf_trans_dp(dev, &n_entries);
iboost = ddi_translations[port].i_boost; iboost = ddi_translations[level].i_boost;
} }
} else if (type == INTEL_OUTPUT_EDP) { } else if (type == INTEL_OUTPUT_EDP) {
if (dp_iboost) { if (dp_iboost) {
iboost = dp_iboost; iboost = dp_iboost;
} else { } else {
ddi_translations = skl_get_buf_trans_edp(dev, &n_entries); ddi_translations = skl_get_buf_trans_edp(dev, &n_entries);
iboost = ddi_translations[port].i_boost; iboost = ddi_translations[level].i_boost;
} }
} else if (type == INTEL_OUTPUT_HDMI) { } else if (type == INTEL_OUTPUT_HDMI) {
if (hdmi_iboost) { if (hdmi_iboost) {
iboost = hdmi_iboost; iboost = hdmi_iboost;
} else { } else {
ddi_translations = skl_get_buf_trans_hdmi(dev, &n_entries); ddi_translations = skl_get_buf_trans_hdmi(dev, &n_entries);
iboost = ddi_translations[port].i_boost; iboost = ddi_translations[level].i_boost;
} }
} else { } else {
return; return;
@ -2272,7 +2302,7 @@ uint32_t ddi_signal_levels(struct intel_dp *intel_dp)
level = translate_signal_level(signal_levels); level = translate_signal_level(signal_levels);
if (IS_SKYLAKE(dev)) if (IS_SKYLAKE(dev) || IS_KABYLAKE(dev))
skl_ddi_set_iboost(dev, level, port, encoder->type); skl_ddi_set_iboost(dev, level, port, encoder->type);
else if (IS_BROXTON(dev)) else if (IS_BROXTON(dev))
bxt_ddi_vswing_sequence(dev, level, port, encoder->type); bxt_ddi_vswing_sequence(dev, level, port, encoder->type);
@ -2280,6 +2310,50 @@ uint32_t ddi_signal_levels(struct intel_dp *intel_dp)
return DDI_BUF_TRANS_SELECT(level); return DDI_BUF_TRANS_SELECT(level);
} }
void intel_ddi_clk_select(struct intel_encoder *encoder,
const struct intel_crtc_state *pipe_config)
{
struct drm_i915_private *dev_priv = to_i915(encoder->base.dev);
enum port port = intel_ddi_get_encoder_port(encoder);
if (IS_SKYLAKE(dev_priv) || IS_KABYLAKE(dev_priv)) {
uint32_t dpll = pipe_config->ddi_pll_sel;
uint32_t val;
/*
* DPLL0 is used for eDP and is the only "private" DPLL (as
* opposed to shared) on SKL
*/
if (encoder->type == INTEL_OUTPUT_EDP) {
WARN_ON(dpll != SKL_DPLL0);
val = I915_READ(DPLL_CTRL1);
val &= ~(DPLL_CTRL1_HDMI_MODE(dpll) |
DPLL_CTRL1_SSC(dpll) |
DPLL_CTRL1_LINK_RATE_MASK(dpll));
val |= pipe_config->dpll_hw_state.ctrl1 << (dpll * 6);
I915_WRITE(DPLL_CTRL1, val);
POSTING_READ(DPLL_CTRL1);
}
/* DDI -> PLL mapping */
val = I915_READ(DPLL_CTRL2);
val &= ~(DPLL_CTRL2_DDI_CLK_OFF(port) |
DPLL_CTRL2_DDI_CLK_SEL_MASK(port));
val |= (DPLL_CTRL2_DDI_CLK_SEL(dpll, port) |
DPLL_CTRL2_DDI_SEL_OVERRIDE(port));
I915_WRITE(DPLL_CTRL2, val);
} else if (INTEL_INFO(dev_priv)->gen < 9) {
WARN_ON(pipe_config->ddi_pll_sel == PORT_CLK_SEL_NONE);
I915_WRITE(PORT_CLK_SEL(port), pipe_config->ddi_pll_sel);
}
}
static void intel_ddi_pre_enable(struct intel_encoder *intel_encoder) static void intel_ddi_pre_enable(struct intel_encoder *intel_encoder)
{ {
struct drm_encoder *encoder = &intel_encoder->base; struct drm_encoder *encoder = &intel_encoder->base;
@ -2295,42 +2369,7 @@ static void intel_ddi_pre_enable(struct intel_encoder *intel_encoder)
intel_edp_panel_on(intel_dp); intel_edp_panel_on(intel_dp);
} }
if (IS_SKYLAKE(dev)) { intel_ddi_clk_select(intel_encoder, crtc->config);
uint32_t dpll = crtc->config->ddi_pll_sel;
uint32_t val;
/*
* DPLL0 is used for eDP and is the only "private" DPLL (as
* opposed to shared) on SKL
*/
if (type == INTEL_OUTPUT_EDP) {
WARN_ON(dpll != SKL_DPLL0);
val = I915_READ(DPLL_CTRL1);
val &= ~(DPLL_CTRL1_HDMI_MODE(dpll) |
DPLL_CTRL1_SSC(dpll) |
DPLL_CTRL1_LINK_RATE_MASK(dpll));
val |= crtc->config->dpll_hw_state.ctrl1 << (dpll * 6);
I915_WRITE(DPLL_CTRL1, val);
POSTING_READ(DPLL_CTRL1);
}
/* DDI -> PLL mapping */
val = I915_READ(DPLL_CTRL2);
val &= ~(DPLL_CTRL2_DDI_CLK_OFF(port) |
DPLL_CTRL2_DDI_CLK_SEL_MASK(port));
val |= (DPLL_CTRL2_DDI_CLK_SEL(dpll, port) |
DPLL_CTRL2_DDI_SEL_OVERRIDE(port));
I915_WRITE(DPLL_CTRL2, val);
} else if (INTEL_INFO(dev)->gen < 9) {
WARN_ON(crtc->config->ddi_pll_sel == PORT_CLK_SEL_NONE);
I915_WRITE(PORT_CLK_SEL(port), crtc->config->ddi_pll_sel);
}
if (type == INTEL_OUTPUT_DISPLAYPORT || type == INTEL_OUTPUT_EDP) { if (type == INTEL_OUTPUT_DISPLAYPORT || type == INTEL_OUTPUT_EDP) {
struct intel_dp *intel_dp = enc_to_intel_dp(encoder); struct intel_dp *intel_dp = enc_to_intel_dp(encoder);
@ -2390,7 +2429,7 @@ static void intel_ddi_post_disable(struct intel_encoder *intel_encoder)
intel_edp_panel_off(intel_dp); intel_edp_panel_off(intel_dp);
} }
if (IS_SKYLAKE(dev)) if (IS_SKYLAKE(dev) || IS_KABYLAKE(dev))
I915_WRITE(DPLL_CTRL2, (I915_READ(DPLL_CTRL2) | I915_WRITE(DPLL_CTRL2, (I915_READ(DPLL_CTRL2) |
DPLL_CTRL2_DDI_CLK_OFF(port))); DPLL_CTRL2_DDI_CLK_OFF(port)));
else if (INTEL_INFO(dev)->gen < 9) else if (INTEL_INFO(dev)->gen < 9)
@ -2500,12 +2539,14 @@ static bool hsw_ddi_wrpll_get_hw_state(struct drm_i915_private *dev_priv,
{ {
uint32_t val; uint32_t val;
if (!intel_display_power_is_enabled(dev_priv, POWER_DOMAIN_PLLS)) if (!intel_display_power_get_if_enabled(dev_priv, POWER_DOMAIN_PLLS))
return false; return false;
val = I915_READ(WRPLL_CTL(pll->id)); val = I915_READ(WRPLL_CTL(pll->id));
hw_state->wrpll = val; hw_state->wrpll = val;
intel_display_power_put(dev_priv, POWER_DOMAIN_PLLS);
return val & WRPLL_PLL_ENABLE; return val & WRPLL_PLL_ENABLE;
} }
@ -2515,12 +2556,14 @@ static bool hsw_ddi_spll_get_hw_state(struct drm_i915_private *dev_priv,
{ {
uint32_t val; uint32_t val;
if (!intel_display_power_is_enabled(dev_priv, POWER_DOMAIN_PLLS)) if (!intel_display_power_get_if_enabled(dev_priv, POWER_DOMAIN_PLLS))
return false; return false;
val = I915_READ(SPLL_CTL); val = I915_READ(SPLL_CTL);
hw_state->spll = val; hw_state->spll = val;
intel_display_power_put(dev_priv, POWER_DOMAIN_PLLS);
return val & SPLL_PLL_ENABLE; return val & SPLL_PLL_ENABLE;
} }
@ -2562,7 +2605,7 @@ static const char * const skl_ddi_pll_names[] = {
}; };
struct skl_dpll_regs { struct skl_dpll_regs {
u32 ctl, cfgcr1, cfgcr2; i915_reg_t ctl, cfgcr1, cfgcr2;
}; };
/* this array is indexed by the *shared* pll id */ /* this array is indexed by the *shared* pll id */
@ -2575,13 +2618,13 @@ static const struct skl_dpll_regs skl_dpll_regs[3] = {
}, },
{ {
/* DPLL 2 */ /* DPLL 2 */
.ctl = WRPLL_CTL1, .ctl = WRPLL_CTL(0),
.cfgcr1 = DPLL_CFGCR1(SKL_DPLL2), .cfgcr1 = DPLL_CFGCR1(SKL_DPLL2),
.cfgcr2 = DPLL_CFGCR2(SKL_DPLL2), .cfgcr2 = DPLL_CFGCR2(SKL_DPLL2),
}, },
{ {
/* DPLL 3 */ /* DPLL 3 */
.ctl = WRPLL_CTL2, .ctl = WRPLL_CTL(1),
.cfgcr1 = DPLL_CFGCR1(SKL_DPLL3), .cfgcr1 = DPLL_CFGCR1(SKL_DPLL3),
.cfgcr2 = DPLL_CFGCR2(SKL_DPLL3), .cfgcr2 = DPLL_CFGCR2(SKL_DPLL3),
}, },
@ -2637,16 +2680,19 @@ static bool skl_ddi_pll_get_hw_state(struct drm_i915_private *dev_priv,
uint32_t val; uint32_t val;
unsigned int dpll; unsigned int dpll;
const struct skl_dpll_regs *regs = skl_dpll_regs; const struct skl_dpll_regs *regs = skl_dpll_regs;
bool ret;
if (!intel_display_power_is_enabled(dev_priv, POWER_DOMAIN_PLLS)) if (!intel_display_power_get_if_enabled(dev_priv, POWER_DOMAIN_PLLS))
return false; return false;
ret = false;
/* DPLL0 is not part of the shared DPLLs, so pll->id is 0 for DPLL1 */ /* DPLL0 is not part of the shared DPLLs, so pll->id is 0 for DPLL1 */
dpll = pll->id + 1; dpll = pll->id + 1;
val = I915_READ(regs[pll->id].ctl); val = I915_READ(regs[pll->id].ctl);
if (!(val & LCPLL_PLL_ENABLE)) if (!(val & LCPLL_PLL_ENABLE))
return false; goto out;
val = I915_READ(DPLL_CTRL1); val = I915_READ(DPLL_CTRL1);
hw_state->ctrl1 = (val >> (dpll * 6)) & 0x3f; hw_state->ctrl1 = (val >> (dpll * 6)) & 0x3f;
@ -2656,8 +2702,12 @@ static bool skl_ddi_pll_get_hw_state(struct drm_i915_private *dev_priv,
hw_state->cfgcr1 = I915_READ(regs[pll->id].cfgcr1); hw_state->cfgcr1 = I915_READ(regs[pll->id].cfgcr1);
hw_state->cfgcr2 = I915_READ(regs[pll->id].cfgcr2); hw_state->cfgcr2 = I915_READ(regs[pll->id].cfgcr2);
} }
ret = true;
return true; out:
intel_display_power_put(dev_priv, POWER_DOMAIN_PLLS);
return ret;
} }
static void skl_shared_dplls_init(struct drm_i915_private *dev_priv) static void skl_shared_dplls_init(struct drm_i915_private *dev_priv)
@ -2924,13 +2974,16 @@ static bool bxt_ddi_pll_get_hw_state(struct drm_i915_private *dev_priv,
{ {
enum port port = (enum port)pll->id; /* 1:1 port->PLL mapping */ enum port port = (enum port)pll->id; /* 1:1 port->PLL mapping */
uint32_t val; uint32_t val;
bool ret;
if (!intel_display_power_is_enabled(dev_priv, POWER_DOMAIN_PLLS)) if (!intel_display_power_get_if_enabled(dev_priv, POWER_DOMAIN_PLLS))
return false; return false;
ret = false;
val = I915_READ(BXT_PORT_PLL_ENABLE(port)); val = I915_READ(BXT_PORT_PLL_ENABLE(port));
if (!(val & PORT_PLL_ENABLE)) if (!(val & PORT_PLL_ENABLE))
return false; goto out;
hw_state->ebb0 = I915_READ(BXT_PORT_PLL_EBB_0(port)); hw_state->ebb0 = I915_READ(BXT_PORT_PLL_EBB_0(port));
hw_state->ebb0 &= PORT_PLL_P1_MASK | PORT_PLL_P2_MASK; hw_state->ebb0 &= PORT_PLL_P1_MASK | PORT_PLL_P2_MASK;
@ -2977,7 +3030,12 @@ static bool bxt_ddi_pll_get_hw_state(struct drm_i915_private *dev_priv,
I915_READ(BXT_PORT_PCS_DW12_LN23(port))); I915_READ(BXT_PORT_PCS_DW12_LN23(port)));
hw_state->pcsdw12 &= LANE_STAGGER_MASK | LANESTAGGER_STRAP_OVRD; hw_state->pcsdw12 &= LANE_STAGGER_MASK | LANESTAGGER_STRAP_OVRD;
return true; ret = true;
out:
intel_display_power_put(dev_priv, POWER_DOMAIN_PLLS);
return ret;
} }
static void bxt_shared_dplls_init(struct drm_i915_private *dev_priv) static void bxt_shared_dplls_init(struct drm_i915_private *dev_priv)
@ -3001,22 +3059,22 @@ void intel_ddi_pll_init(struct drm_device *dev)
struct drm_i915_private *dev_priv = dev->dev_private; struct drm_i915_private *dev_priv = dev->dev_private;
uint32_t val = I915_READ(LCPLL_CTL); uint32_t val = I915_READ(LCPLL_CTL);
if (IS_SKYLAKE(dev)) if (IS_SKYLAKE(dev) || IS_KABYLAKE(dev))
skl_shared_dplls_init(dev_priv); skl_shared_dplls_init(dev_priv);
else if (IS_BROXTON(dev)) else if (IS_BROXTON(dev))
bxt_shared_dplls_init(dev_priv); bxt_shared_dplls_init(dev_priv);
else else
hsw_shared_dplls_init(dev_priv); hsw_shared_dplls_init(dev_priv);
if (IS_SKYLAKE(dev)) { if (IS_SKYLAKE(dev) || IS_KABYLAKE(dev)) {
int cdclk_freq; int cdclk_freq;
cdclk_freq = dev_priv->display.get_display_clock_speed(dev); cdclk_freq = dev_priv->display.get_display_clock_speed(dev);
dev_priv->skl_boot_cdclk = cdclk_freq; dev_priv->skl_boot_cdclk = cdclk_freq;
if (skl_sanitize_cdclk(dev_priv))
DRM_DEBUG_KMS("Sanitized cdclk programmed by pre-os\n");
if (!(I915_READ(LCPLL1_CTL) & LCPLL_PLL_ENABLE)) if (!(I915_READ(LCPLL1_CTL) & LCPLL_PLL_ENABLE))
DRM_ERROR("LCPLL1 is disabled\n"); DRM_ERROR("LCPLL1 is disabled\n");
else
intel_display_power_get(dev_priv, POWER_DOMAIN_PLLS);
} else if (IS_BROXTON(dev)) { } else if (IS_BROXTON(dev)) {
broxton_init_cdclk(dev); broxton_init_cdclk(dev);
broxton_ddi_phy_init(dev); broxton_ddi_phy_init(dev);
@ -3035,11 +3093,11 @@ void intel_ddi_pll_init(struct drm_device *dev)
} }
} }
void intel_ddi_prepare_link_retrain(struct drm_encoder *encoder) void intel_ddi_prepare_link_retrain(struct intel_dp *intel_dp)
{ {
struct intel_digital_port *intel_dig_port = enc_to_dig_port(encoder); struct intel_digital_port *intel_dig_port = dp_to_dig_port(intel_dp);
struct intel_dp *intel_dp = &intel_dig_port->dp; struct drm_i915_private *dev_priv =
struct drm_i915_private *dev_priv = encoder->dev->dev_private; to_i915(intel_dig_port->base.base.dev);
enum port port = intel_dig_port->port; enum port port = intel_dig_port->port;
uint32_t val; uint32_t val;
bool wait = false; bool wait = false;
@ -3150,7 +3208,7 @@ void intel_ddi_get_config(struct intel_encoder *encoder,
pipe_config->has_hdmi_sink = true; pipe_config->has_hdmi_sink = true;
intel_hdmi = enc_to_intel_hdmi(&encoder->base); intel_hdmi = enc_to_intel_hdmi(&encoder->base);
if (intel_hdmi->infoframe_enabled(&encoder->base)) if (intel_hdmi->infoframe_enabled(&encoder->base, pipe_config))
pipe_config->has_infoframe = true; pipe_config->has_infoframe = true;
break; break;
case TRANS_DDI_MODE_SELECT_DVI: case TRANS_DDI_MODE_SELECT_DVI:
@ -3278,7 +3336,7 @@ void intel_ddi_init(struct drm_device *dev, enum port port)
encoder = &intel_encoder->base; encoder = &intel_encoder->base;
drm_encoder_init(dev, encoder, &intel_ddi_funcs, drm_encoder_init(dev, encoder, &intel_ddi_funcs,
DRM_MODE_ENCODER_TMDS); DRM_MODE_ENCODER_TMDS, NULL);
intel_encoder->compute_config = intel_ddi_compute_config; intel_encoder->compute_config = intel_ddi_compute_config;
intel_encoder->enable = intel_enable_ddi; intel_encoder->enable = intel_enable_ddi;
@ -3294,6 +3352,20 @@ void intel_ddi_init(struct drm_device *dev, enum port port)
(DDI_BUF_PORT_REVERSAL | (DDI_BUF_PORT_REVERSAL |
DDI_A_4_LANES); DDI_A_4_LANES);
/*
* Bspec says that DDI_A_4_LANES is the only supported configuration
* for Broxton. Yet some BIOS fail to set this bit on port A if eDP
* wasn't lit up at boot. Force this bit on in our internal
* configuration so that we use the proper lane count for our
* calculations.
*/
if (IS_BROXTON(dev) && port == PORT_A) {
if (!(intel_dig_port->saved_port_bits & DDI_A_4_LANES)) {
DRM_DEBUG_KMS("BXT BIOS forgot to set DDI_A_4_LANES for port A; fixing\n");
intel_dig_port->saved_port_bits |= DDI_A_4_LANES;
}
}
intel_encoder->type = INTEL_OUTPUT_UNKNOWN; intel_encoder->type = INTEL_OUTPUT_UNKNOWN;
intel_encoder->crtc_mask = (1 << 0) | (1 << 1) | (1 << 2); intel_encoder->crtc_mask = (1 << 0) | (1 << 1) | (1 << 2);
intel_encoder->cloneable = 0; intel_encoder->cloneable = 0;
@ -3307,8 +3379,7 @@ void intel_ddi_init(struct drm_device *dev, enum port port)
* On BXT A0/A1, sw needs to activate DDIA HPD logic and * On BXT A0/A1, sw needs to activate DDIA HPD logic and
* interrupts to check the external panel connection. * interrupts to check the external panel connection.
*/ */
if (IS_BROXTON(dev_priv) && (INTEL_REVID(dev) < BXT_REVID_B0) if (IS_BXT_REVID(dev, 0, BXT_REVID_A1) && port == PORT_B)
&& port == PORT_B)
dev_priv->hotplug.irq_port[PORT_A] = intel_dig_port; dev_priv->hotplug.irq_port[PORT_A] = intel_dig_port;
else else
dev_priv->hotplug.irq_port[port] = intel_dig_port; dev_priv->hotplug.irq_port[port] = intel_dig_port;

File diff suppressed because it is too large Load Diff

File diff suppressed because it is too large Load Diff

View File

@ -0,0 +1,342 @@
/*
* Copyright © 2008-2015 Intel Corporation
*
* Permission is hereby granted, free of charge, to any person obtaining a
* copy of this software and associated documentation files (the "Software"),
* to deal in the Software without restriction, including without limitation
* the rights to use, copy, modify, merge, publish, distribute, sublicense,
* and/or sell copies of the Software, and to permit persons to whom the
* Software is furnished to do so, subject to the following conditions:
*
* The above copyright notice and this permission notice (including the next
* paragraph) shall be included in all copies or substantial portions of the
* Software.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
* FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
* THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
* LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
* FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
* IN THE SOFTWARE.
*/
#include "intel_drv.h"
static void
intel_get_adjust_train(struct intel_dp *intel_dp,
const uint8_t link_status[DP_LINK_STATUS_SIZE])
{
uint8_t v = 0;
uint8_t p = 0;
int lane;
uint8_t voltage_max;
uint8_t preemph_max;
for (lane = 0; lane < intel_dp->lane_count; lane++) {
uint8_t this_v = drm_dp_get_adjust_request_voltage(link_status, lane);
uint8_t this_p = drm_dp_get_adjust_request_pre_emphasis(link_status, lane);
if (this_v > v)
v = this_v;
if (this_p > p)
p = this_p;
}
voltage_max = intel_dp_voltage_max(intel_dp);
if (v >= voltage_max)
v = voltage_max | DP_TRAIN_MAX_SWING_REACHED;
preemph_max = intel_dp_pre_emphasis_max(intel_dp, v);
if (p >= preemph_max)
p = preemph_max | DP_TRAIN_MAX_PRE_EMPHASIS_REACHED;
for (lane = 0; lane < 4; lane++)
intel_dp->train_set[lane] = v | p;
}
static bool
intel_dp_set_link_train(struct intel_dp *intel_dp,
uint8_t dp_train_pat)
{
uint8_t buf[sizeof(intel_dp->train_set) + 1];
int ret, len;
intel_dp_program_link_training_pattern(intel_dp, dp_train_pat);
buf[0] = dp_train_pat;
if ((dp_train_pat & DP_TRAINING_PATTERN_MASK) ==
DP_TRAINING_PATTERN_DISABLE) {
/* don't write DP_TRAINING_LANEx_SET on disable */
len = 1;
} else {
/* DP_TRAINING_LANEx_SET follow DP_TRAINING_PATTERN_SET */
memcpy(buf + 1, intel_dp->train_set, intel_dp->lane_count);
len = intel_dp->lane_count + 1;
}
ret = drm_dp_dpcd_write(&intel_dp->aux, DP_TRAINING_PATTERN_SET,
buf, len);
return ret == len;
}
static bool
intel_dp_reset_link_train(struct intel_dp *intel_dp,
uint8_t dp_train_pat)
{
if (!intel_dp->train_set_valid)
memset(intel_dp->train_set, 0, sizeof(intel_dp->train_set));
intel_dp_set_signal_levels(intel_dp);
return intel_dp_set_link_train(intel_dp, dp_train_pat);
}
static bool
intel_dp_update_link_train(struct intel_dp *intel_dp)
{
int ret;
intel_dp_set_signal_levels(intel_dp);
ret = drm_dp_dpcd_write(&intel_dp->aux, DP_TRAINING_LANE0_SET,
intel_dp->train_set, intel_dp->lane_count);
return ret == intel_dp->lane_count;
}
/* Enable corresponding port and start training pattern 1 */
static void
intel_dp_link_training_clock_recovery(struct intel_dp *intel_dp)
{
int i;
uint8_t voltage;
int voltage_tries, loop_tries;
uint8_t link_config[2];
uint8_t link_bw, rate_select;
if (intel_dp->prepare_link_retrain)
intel_dp->prepare_link_retrain(intel_dp);
intel_dp_compute_rate(intel_dp, intel_dp->link_rate,
&link_bw, &rate_select);
/* Write the link configuration data */
link_config[0] = link_bw;
link_config[1] = intel_dp->lane_count;
if (drm_dp_enhanced_frame_cap(intel_dp->dpcd))
link_config[1] |= DP_LANE_COUNT_ENHANCED_FRAME_EN;
drm_dp_dpcd_write(&intel_dp->aux, DP_LINK_BW_SET, link_config, 2);
if (intel_dp->num_sink_rates)
drm_dp_dpcd_write(&intel_dp->aux, DP_LINK_RATE_SET,
&rate_select, 1);
link_config[0] = 0;
link_config[1] = DP_SET_ANSI_8B10B;
drm_dp_dpcd_write(&intel_dp->aux, DP_DOWNSPREAD_CTRL, link_config, 2);
intel_dp->DP |= DP_PORT_EN;
/* clock recovery */
if (!intel_dp_reset_link_train(intel_dp,
DP_TRAINING_PATTERN_1 |
DP_LINK_SCRAMBLING_DISABLE)) {
DRM_ERROR("failed to enable link training\n");
return;
}
voltage = 0xff;
voltage_tries = 0;
loop_tries = 0;
for (;;) {
uint8_t link_status[DP_LINK_STATUS_SIZE];
drm_dp_link_train_clock_recovery_delay(intel_dp->dpcd);
if (!intel_dp_get_link_status(intel_dp, link_status)) {
DRM_ERROR("failed to get link status\n");
break;
}
if (drm_dp_clock_recovery_ok(link_status, intel_dp->lane_count)) {
DRM_DEBUG_KMS("clock recovery OK\n");
break;
}
/*
* if we used previously trained voltage and pre-emphasis values
* and we don't get clock recovery, reset link training values
*/
if (intel_dp->train_set_valid) {
DRM_DEBUG_KMS("clock recovery not ok, reset");
/* clear the flag as we are not reusing train set */
intel_dp->train_set_valid = false;
if (!intel_dp_reset_link_train(intel_dp,
DP_TRAINING_PATTERN_1 |
DP_LINK_SCRAMBLING_DISABLE)) {
DRM_ERROR("failed to enable link training\n");
return;
}
continue;
}
/* Check to see if we've tried the max voltage */
for (i = 0; i < intel_dp->lane_count; i++)
if ((intel_dp->train_set[i] & DP_TRAIN_MAX_SWING_REACHED) == 0)
break;
if (i == intel_dp->lane_count) {
++loop_tries;
if (loop_tries == 5) {
DRM_ERROR("too many full retries, give up\n");
break;
}
intel_dp_reset_link_train(intel_dp,
DP_TRAINING_PATTERN_1 |
DP_LINK_SCRAMBLING_DISABLE);
voltage_tries = 0;
continue;
}
/* Check to see if we've tried the same voltage 5 times */
if ((intel_dp->train_set[0] & DP_TRAIN_VOLTAGE_SWING_MASK) == voltage) {
++voltage_tries;
if (voltage_tries == 5) {
DRM_ERROR("too many voltage retries, give up\n");
break;
}
} else
voltage_tries = 0;
voltage = intel_dp->train_set[0] & DP_TRAIN_VOLTAGE_SWING_MASK;
/* Update training set as requested by target */
intel_get_adjust_train(intel_dp, link_status);
if (!intel_dp_update_link_train(intel_dp)) {
DRM_ERROR("failed to update link training\n");
break;
}
}
}
/*
* Pick training pattern for channel equalization. Training Pattern 3 for HBR2
* or 1.2 devices that support it, Training Pattern 2 otherwise.
*/
static u32 intel_dp_training_pattern(struct intel_dp *intel_dp)
{
u32 training_pattern = DP_TRAINING_PATTERN_2;
bool source_tps3, sink_tps3;
/*
* Intel platforms that support HBR2 also support TPS3. TPS3 support is
* also mandatory for downstream devices that support HBR2. However, not
* all sinks follow the spec.
*
* Due to WaDisableHBR2 SKL < B0 is the only exception where TPS3 is
* supported in source but still not enabled.
*/
source_tps3 = intel_dp_source_supports_hbr2(intel_dp);
sink_tps3 = drm_dp_tps3_supported(intel_dp->dpcd);
if (source_tps3 && sink_tps3) {
training_pattern = DP_TRAINING_PATTERN_3;
} else if (intel_dp->link_rate == 540000) {
if (!source_tps3)
DRM_DEBUG_KMS("5.4 Gbps link rate without source HBR2/TPS3 support\n");
if (!sink_tps3)
DRM_DEBUG_KMS("5.4 Gbps link rate without sink TPS3 support\n");
}
return training_pattern;
}
static void
intel_dp_link_training_channel_equalization(struct intel_dp *intel_dp)
{
bool channel_eq = false;
int tries, cr_tries;
u32 training_pattern;
training_pattern = intel_dp_training_pattern(intel_dp);
/* channel equalization */
if (!intel_dp_set_link_train(intel_dp,
training_pattern |
DP_LINK_SCRAMBLING_DISABLE)) {
DRM_ERROR("failed to start channel equalization\n");
return;
}
tries = 0;
cr_tries = 0;
channel_eq = false;
for (;;) {
uint8_t link_status[DP_LINK_STATUS_SIZE];
if (cr_tries > 5) {
DRM_ERROR("failed to train DP, aborting\n");
break;
}
drm_dp_link_train_channel_eq_delay(intel_dp->dpcd);
if (!intel_dp_get_link_status(intel_dp, link_status)) {
DRM_ERROR("failed to get link status\n");
break;
}
/* Make sure clock is still ok */
if (!drm_dp_clock_recovery_ok(link_status,
intel_dp->lane_count)) {
intel_dp->train_set_valid = false;
intel_dp_link_training_clock_recovery(intel_dp);
intel_dp_set_link_train(intel_dp,
training_pattern |
DP_LINK_SCRAMBLING_DISABLE);
cr_tries++;
continue;
}
if (drm_dp_channel_eq_ok(link_status,
intel_dp->lane_count)) {
channel_eq = true;
break;
}
/* Try 5 times, then try clock recovery if that fails */
if (tries > 5) {
intel_dp->train_set_valid = false;
intel_dp_link_training_clock_recovery(intel_dp);
intel_dp_set_link_train(intel_dp,
training_pattern |
DP_LINK_SCRAMBLING_DISABLE);
tries = 0;
cr_tries++;
continue;
}
/* Update training set as requested by target */
intel_get_adjust_train(intel_dp, link_status);
if (!intel_dp_update_link_train(intel_dp)) {
DRM_ERROR("failed to update link training\n");
break;
}
++tries;
}
intel_dp_set_idle_link_train(intel_dp);
if (channel_eq) {
intel_dp->train_set_valid = true;
DRM_DEBUG_KMS("Channel EQ done. DP Training successful\n");
}
}
void intel_dp_stop_link_train(struct intel_dp *intel_dp)
{
intel_dp_set_link_train(intel_dp,
DP_TRAINING_PATTERN_DISABLE);
}
void
intel_dp_start_link_train(struct intel_dp *intel_dp)
{
intel_dp_link_training_clock_recovery(intel_dp);
intel_dp_link_training_channel_equalization(intel_dp);
}

View File

@ -173,20 +173,14 @@ static void intel_mst_pre_enable_dp(struct intel_encoder *encoder)
intel_mst->port = found->port; intel_mst->port = found->port;
if (intel_dp->active_mst_links == 0) { if (intel_dp->active_mst_links == 0) {
enum port port = intel_ddi_get_encoder_port(encoder); intel_ddi_clk_select(&intel_dig_port->base, intel_crtc->config);
intel_dp_set_link_params(intel_dp, intel_crtc->config); intel_dp_set_link_params(intel_dp, intel_crtc->config);
/* FIXME: add support for SKL */
if (INTEL_INFO(dev)->gen < 9)
I915_WRITE(PORT_CLK_SEL(port),
intel_crtc->config->ddi_pll_sel);
intel_ddi_init_dp_buf_reg(&intel_dig_port->base); intel_ddi_init_dp_buf_reg(&intel_dig_port->base);
intel_dp_sink_dpms(intel_dp, DRM_MODE_DPMS_ON); intel_dp_sink_dpms(intel_dp, DRM_MODE_DPMS_ON);
intel_dp_start_link_train(intel_dp); intel_dp_start_link_train(intel_dp);
intel_dp_stop_link_train(intel_dp); intel_dp_stop_link_train(intel_dp);
} }
@ -414,7 +408,10 @@ static void intel_connector_add_to_fbdev(struct intel_connector *connector)
{ {
#ifdef CONFIG_DRM_FBDEV_EMULATION #ifdef CONFIG_DRM_FBDEV_EMULATION
struct drm_i915_private *dev_priv = to_i915(connector->base.dev); struct drm_i915_private *dev_priv = to_i915(connector->base.dev);
drm_fb_helper_add_one_connector(&dev_priv->fbdev->helper, &connector->base);
if (dev_priv->fbdev)
drm_fb_helper_add_one_connector(&dev_priv->fbdev->helper,
&connector->base);
#endif #endif
} }
@ -422,7 +419,10 @@ static void intel_connector_remove_from_fbdev(struct intel_connector *connector)
{ {
#ifdef CONFIG_DRM_FBDEV_EMULATION #ifdef CONFIG_DRM_FBDEV_EMULATION
struct drm_i915_private *dev_priv = to_i915(connector->base.dev); struct drm_i915_private *dev_priv = to_i915(connector->base.dev);
drm_fb_helper_remove_one_connector(&dev_priv->fbdev->helper, &connector->base);
if (dev_priv->fbdev)
drm_fb_helper_remove_one_connector(&dev_priv->fbdev->helper,
&connector->base);
#endif #endif
} }
@ -510,7 +510,7 @@ static void intel_dp_mst_hotplug(struct drm_dp_mst_topology_mgr *mgr)
drm_kms_helper_hotplug_event(dev); drm_kms_helper_hotplug_event(dev);
} }
static struct drm_dp_mst_topology_cbs mst_cbs = { static const struct drm_dp_mst_topology_cbs mst_cbs = {
.add_connector = intel_dp_add_mst_connector, .add_connector = intel_dp_add_mst_connector,
.register_connector = intel_dp_register_mst_connector, .register_connector = intel_dp_register_mst_connector,
.destroy_connector = intel_dp_destroy_mst_connector, .destroy_connector = intel_dp_destroy_mst_connector,
@ -534,7 +534,7 @@ intel_dp_create_fake_mst_encoder(struct intel_digital_port *intel_dig_port, enum
intel_mst->primary = intel_dig_port; intel_mst->primary = intel_dig_port;
drm_encoder_init(dev, &intel_encoder->base, &intel_dp_mst_enc_funcs, drm_encoder_init(dev, &intel_encoder->base, &intel_dp_mst_enc_funcs,
DRM_MODE_ENCODER_DPMST); DRM_MODE_ENCODER_DPMST, NULL);
intel_encoder->type = INTEL_OUTPUT_DP_MST; intel_encoder->type = INTEL_OUTPUT_DP_MST;
intel_encoder->crtc_mask = 0x7; intel_encoder->crtc_mask = 0x7;

View File

@ -124,8 +124,6 @@ struct intel_framebuffer {
struct intel_fbdev { struct intel_fbdev {
struct drm_fb_helper helper; struct drm_fb_helper helper;
struct intel_framebuffer *fb; struct intel_framebuffer *fb;
struct list_head fbdev_list;
struct drm_display_mode *our_mode;
int preferred_bpp; int preferred_bpp;
}; };
@ -251,6 +249,7 @@ struct intel_atomic_state {
unsigned int cdclk; unsigned int cdclk;
bool dpll_set; bool dpll_set;
struct intel_shared_dpll_config shared_dpll[I915_NUM_PLLS]; struct intel_shared_dpll_config shared_dpll[I915_NUM_PLLS];
struct intel_wm_config wm_config;
}; };
struct intel_plane_state { struct intel_plane_state {
@ -281,6 +280,9 @@ struct intel_plane_state {
int scaler_id; int scaler_id;
struct drm_intel_sprite_colorkey ckey; struct drm_intel_sprite_colorkey ckey;
/* async flip related structures */
struct drm_i915_gem_request *wait_req;
}; };
struct intel_initial_plane_config { struct intel_initial_plane_config {
@ -335,6 +337,21 @@ struct intel_crtc_scaler_state {
/* drm_mode->private_flags */ /* drm_mode->private_flags */
#define I915_MODE_FLAG_INHERITED 1 #define I915_MODE_FLAG_INHERITED 1
struct intel_pipe_wm {
struct intel_wm_level wm[5];
uint32_t linetime;
bool fbc_wm_enabled;
bool pipe_enabled;
bool sprites_enabled;
bool sprites_scaled;
};
struct skl_pipe_wm {
struct skl_wm_level wm[8];
struct skl_wm_level trans_wm;
uint32_t linetime;
};
struct intel_crtc_state { struct intel_crtc_state {
struct drm_crtc_state base; struct drm_crtc_state base;
@ -349,7 +366,9 @@ struct intel_crtc_state {
#define PIPE_CONFIG_QUIRK_MODE_SYNC_FLAGS (1<<0) /* unreliable sync mode.flags */ #define PIPE_CONFIG_QUIRK_MODE_SYNC_FLAGS (1<<0) /* unreliable sync mode.flags */
unsigned long quirks; unsigned long quirks;
bool update_pipe; bool update_pipe; /* can a fast modeset be performed? */
bool disable_cxsr;
bool update_wm_pre, update_wm_post; /* watermarks are updated */
/* Pipe source size (ie. panel fitter input size) /* Pipe source size (ie. panel fitter input size)
* All planes will be positioned inside this space, * All planes will be positioned inside this space,
@ -377,6 +396,9 @@ struct intel_crtc_state {
* accordingly. */ * accordingly. */
bool has_dp_encoder; bool has_dp_encoder;
/* DSI has special cases */
bool has_dsi_encoder;
/* Whether we should send NULL infoframes. Required for audio. */ /* Whether we should send NULL infoframes. Required for audio. */
bool has_hdmi_sink; bool has_hdmi_sink;
@ -469,6 +491,20 @@ struct intel_crtc_state {
/* w/a for waiting 2 vblanks during crtc enable */ /* w/a for waiting 2 vblanks during crtc enable */
enum pipe hsw_workaround_pipe; enum pipe hsw_workaround_pipe;
/* IVB sprite scaling w/a (WaCxSRDisabledForSpriteScaling:ivb) */
bool disable_lp_wm;
struct {
/*
* optimal watermarks, programmed post-vblank when this state
* is committed
*/
union {
struct intel_pipe_wm ilk;
struct skl_pipe_wm skl;
} optimal;
} wm;
}; };
struct vlv_wm_state { struct vlv_wm_state {
@ -480,26 +516,12 @@ struct vlv_wm_state {
bool cxsr; bool cxsr;
}; };
struct intel_pipe_wm {
struct intel_wm_level wm[5];
uint32_t linetime;
bool fbc_wm_enabled;
bool pipe_enabled;
bool sprites_enabled;
bool sprites_scaled;
};
struct intel_mmio_flip { struct intel_mmio_flip {
struct work_struct work; struct work_struct work;
struct drm_i915_private *i915; struct drm_i915_private *i915;
struct drm_i915_gem_request *req; struct drm_i915_gem_request *req;
struct intel_crtc *crtc; struct intel_crtc *crtc;
}; unsigned int rotation;
struct skl_pipe_wm {
struct skl_wm_level wm[8];
struct skl_wm_level trans_wm;
uint32_t linetime;
}; };
/* /*
@ -510,13 +532,9 @@ struct skl_pipe_wm {
*/ */
struct intel_crtc_atomic_commit { struct intel_crtc_atomic_commit {
/* Sleepable operations to perform before commit */ /* Sleepable operations to perform before commit */
bool wait_for_flips;
bool disable_fbc; bool disable_fbc;
bool disable_ips; bool disable_ips;
bool disable_cxsr;
bool pre_disable_primary; bool pre_disable_primary;
bool update_wm_pre, update_wm_post;
unsigned disabled_planes;
/* Sleepable operations to perform after commit */ /* Sleepable operations to perform after commit */
unsigned fb_bits; unsigned fb_bits;
@ -568,9 +586,10 @@ struct intel_crtc {
/* per-pipe watermark state */ /* per-pipe watermark state */
struct { struct {
/* watermarks currently being used */ /* watermarks currently being used */
struct intel_pipe_wm active; union {
/* SKL wm values currently in use */ struct intel_pipe_wm ilk;
struct skl_pipe_wm skl_active; struct skl_pipe_wm skl;
} active;
/* allow CxSR on this pipe */ /* allow CxSR on this pipe */
bool cxsr_allowed; bool cxsr_allowed;
} wm; } wm;
@ -678,7 +697,7 @@ struct cxsr_latency {
#define intel_fb_obj(x) (x ? to_intel_framebuffer(x)->obj : NULL) #define intel_fb_obj(x) (x ? to_intel_framebuffer(x)->obj : NULL)
struct intel_hdmi { struct intel_hdmi {
u32 hdmi_reg; i915_reg_t hdmi_reg;
int ddc_bus; int ddc_bus;
bool limited_color_range; bool limited_color_range;
bool color_range_auto; bool color_range_auto;
@ -694,7 +713,8 @@ struct intel_hdmi {
void (*set_infoframes)(struct drm_encoder *encoder, void (*set_infoframes)(struct drm_encoder *encoder,
bool enable, bool enable,
const struct drm_display_mode *adjusted_mode); const struct drm_display_mode *adjusted_mode);
bool (*infoframe_enabled)(struct drm_encoder *encoder); bool (*infoframe_enabled)(struct drm_encoder *encoder,
const struct intel_crtc_state *pipe_config);
}; };
struct intel_dp_mst_encoder; struct intel_dp_mst_encoder;
@ -720,15 +740,10 @@ enum link_m_n_set {
M2_N2 M2_N2
}; };
struct sink_crc {
bool started;
u8 last_crc[6];
int last_count;
};
struct intel_dp { struct intel_dp {
uint32_t output_reg; i915_reg_t output_reg;
uint32_t aux_ch_ctl_reg; i915_reg_t aux_ch_ctl_reg;
i915_reg_t aux_ch_data_reg[5];
uint32_t DP; uint32_t DP;
int link_rate; int link_rate;
uint8_t lane_count; uint8_t lane_count;
@ -742,7 +757,6 @@ struct intel_dp {
/* sink rates as reported by DP_SUPPORTED_LINK_RATES */ /* sink rates as reported by DP_SUPPORTED_LINK_RATES */
uint8_t num_sink_rates; uint8_t num_sink_rates;
int sink_rates[DP_MAX_SUPPORTED_RATES]; int sink_rates[DP_MAX_SUPPORTED_RATES];
struct sink_crc sink_crc;
struct drm_dp_aux aux; struct drm_dp_aux aux;
uint8_t train_set[4]; uint8_t train_set[4];
int panel_power_up_delay; int panel_power_up_delay;
@ -756,6 +770,8 @@ struct intel_dp {
unsigned long last_power_on; unsigned long last_power_on;
unsigned long last_backlight_off; unsigned long last_backlight_off;
struct notifier_block edp_notifier;
/* /*
* Pipe whose power sequencer is currently locked into * Pipe whose power sequencer is currently locked into
* this port. Only relevant on VLV/CHV. * this port. Only relevant on VLV/CHV.
@ -783,6 +799,11 @@ struct intel_dp {
int send_bytes, int send_bytes,
uint32_t aux_clock_divider); uint32_t aux_clock_divider);
/* This is called before a link training is starterd */
void (*prepare_link_retrain)(struct intel_dp *intel_dp);
bool train_set_valid;
/* Displayport compliance testing */ /* Displayport compliance testing */
unsigned long compliance_test_type; unsigned long compliance_test_type;
unsigned long compliance_test_data; unsigned long compliance_test_data;
@ -797,6 +818,8 @@ struct intel_digital_port {
struct intel_hdmi hdmi; struct intel_hdmi hdmi;
enum irqreturn (*hpd_pulse)(struct intel_digital_port *, bool); enum irqreturn (*hpd_pulse)(struct intel_digital_port *, bool);
bool release_cl2_override; bool release_cl2_override;
/* for communication with audio component; protected by av_mutex */
const struct drm_connector *audio_connector;
}; };
struct intel_dp_mst_encoder { struct intel_dp_mst_encoder {
@ -940,7 +963,8 @@ void intel_cpu_fifo_underrun_irq_handler(struct drm_i915_private *dev_priv,
enum pipe pipe); enum pipe pipe);
void intel_pch_fifo_underrun_irq_handler(struct drm_i915_private *dev_priv, void intel_pch_fifo_underrun_irq_handler(struct drm_i915_private *dev_priv,
enum transcoder pch_transcoder); enum transcoder pch_transcoder);
void i9xx_check_fifo_underruns(struct drm_i915_private *dev_priv); void intel_check_cpu_fifo_underruns(struct drm_i915_private *dev_priv);
void intel_check_pch_fifo_underruns(struct drm_i915_private *dev_priv);
/* i915_irq.c */ /* i915_irq.c */
void gen5_enable_gt_irq(struct drm_i915_private *dev_priv, uint32_t mask); void gen5_enable_gt_irq(struct drm_i915_private *dev_priv, uint32_t mask);
@ -971,6 +995,8 @@ void intel_crt_init(struct drm_device *dev);
/* intel_ddi.c */ /* intel_ddi.c */
void intel_ddi_clk_select(struct intel_encoder *encoder,
const struct intel_crtc_state *pipe_config);
void intel_prepare_ddi(struct drm_device *dev); void intel_prepare_ddi(struct drm_device *dev);
void hsw_fdi_link_train(struct drm_crtc *crtc); void hsw_fdi_link_train(struct drm_crtc *crtc);
void intel_ddi_init(struct drm_device *dev, enum port port); void intel_ddi_init(struct drm_device *dev, enum port port);
@ -985,7 +1011,7 @@ void intel_ddi_disable_pipe_clock(struct intel_crtc *intel_crtc);
bool intel_ddi_pll_select(struct intel_crtc *crtc, bool intel_ddi_pll_select(struct intel_crtc *crtc,
struct intel_crtc_state *crtc_state); struct intel_crtc_state *crtc_state);
void intel_ddi_set_pipe_settings(struct drm_crtc *crtc); void intel_ddi_set_pipe_settings(struct drm_crtc *crtc);
void intel_ddi_prepare_link_retrain(struct drm_encoder *encoder); void intel_ddi_prepare_link_retrain(struct intel_dp *intel_dp);
bool intel_ddi_connector_get_hw_state(struct intel_connector *intel_connector); bool intel_ddi_connector_get_hw_state(struct intel_connector *intel_connector);
void intel_ddi_fdi_disable(struct drm_crtc *crtc); void intel_ddi_fdi_disable(struct drm_crtc *crtc);
void intel_ddi_get_config(struct intel_encoder *encoder, void intel_ddi_get_config(struct intel_encoder *encoder,
@ -1053,6 +1079,15 @@ intel_wait_for_vblank(struct drm_device *dev, int pipe)
{ {
drm_wait_one_vblank(dev, pipe); drm_wait_one_vblank(dev, pipe);
} }
static inline void
intel_wait_for_vblank_if_active(struct drm_device *dev, int pipe)
{
const struct intel_crtc *crtc =
to_intel_crtc(intel_get_crtc_for_pipe(dev, pipe));
if (crtc->active)
intel_wait_for_vblank(dev, pipe);
}
int ironlake_get_lanes_required(int target_clock, int link_bw, int bpp); int ironlake_get_lanes_required(int target_clock, int link_bw, int bpp);
void vlv_wait_port_ready(struct drm_i915_private *dev_priv, void vlv_wait_port_ready(struct drm_i915_private *dev_priv,
struct intel_digital_port *dport, struct intel_digital_port *dport,
@ -1066,9 +1101,7 @@ void intel_release_load_detect_pipe(struct drm_connector *connector,
struct drm_modeset_acquire_ctx *ctx); struct drm_modeset_acquire_ctx *ctx);
int intel_pin_and_fence_fb_obj(struct drm_plane *plane, int intel_pin_and_fence_fb_obj(struct drm_plane *plane,
struct drm_framebuffer *fb, struct drm_framebuffer *fb,
const struct drm_plane_state *plane_state, const struct drm_plane_state *plane_state);
struct intel_engine_cs *pipelined,
struct drm_i915_gem_request **pipelined_request);
struct drm_framebuffer * struct drm_framebuffer *
__intel_framebuffer_create(struct drm_device *dev, __intel_framebuffer_create(struct drm_device *dev,
struct drm_mode_fb_cmd2 *mode_cmd, struct drm_mode_fb_cmd2 *mode_cmd,
@ -1149,7 +1182,10 @@ void broxton_ddi_phy_uninit(struct drm_device *dev);
void bxt_enable_dc9(struct drm_i915_private *dev_priv); void bxt_enable_dc9(struct drm_i915_private *dev_priv);
void bxt_disable_dc9(struct drm_i915_private *dev_priv); void bxt_disable_dc9(struct drm_i915_private *dev_priv);
void skl_init_cdclk(struct drm_i915_private *dev_priv); void skl_init_cdclk(struct drm_i915_private *dev_priv);
int skl_sanitize_cdclk(struct drm_i915_private *dev_priv);
void skl_uninit_cdclk(struct drm_i915_private *dev_priv); void skl_uninit_cdclk(struct drm_i915_private *dev_priv);
void skl_enable_dc6(struct drm_i915_private *dev_priv);
void skl_disable_dc6(struct drm_i915_private *dev_priv);
void intel_dp_get_m_n(struct intel_crtc *crtc, void intel_dp_get_m_n(struct intel_crtc *crtc,
struct intel_crtc_state *pipe_config); struct intel_crtc_state *pipe_config);
void intel_dp_set_m_n(struct intel_crtc *crtc, enum link_m_n_set m_n); void intel_dp_set_m_n(struct intel_crtc *crtc, enum link_m_n_set m_n);
@ -1170,7 +1206,6 @@ enum intel_display_power_domain
intel_display_port_aux_power_domain(struct intel_encoder *intel_encoder); intel_display_port_aux_power_domain(struct intel_encoder *intel_encoder);
void intel_mode_from_pipe_config(struct drm_display_mode *mode, void intel_mode_from_pipe_config(struct drm_display_mode *mode,
struct intel_crtc_state *pipe_config); struct intel_crtc_state *pipe_config);
void intel_crtc_wait_for_pending_flips(struct drm_crtc *crtc);
void intel_modeset_preclose(struct drm_device *dev, struct drm_file *file); void intel_modeset_preclose(struct drm_device *dev, struct drm_file *file);
int skl_update_scaler_crtc(struct intel_crtc_state *crtc_state); int skl_update_scaler_crtc(struct intel_crtc_state *crtc_state);
@ -1185,16 +1220,12 @@ u32 skl_plane_ctl_tiling(uint64_t fb_modifier);
u32 skl_plane_ctl_rotation(unsigned int rotation); u32 skl_plane_ctl_rotation(unsigned int rotation);
/* intel_csr.c */ /* intel_csr.c */
void intel_csr_ucode_init(struct drm_device *dev); void intel_csr_ucode_init(struct drm_i915_private *);
enum csr_state intel_csr_load_status_get(struct drm_i915_private *dev_priv); void intel_csr_load_program(struct drm_i915_private *);
void intel_csr_load_status_set(struct drm_i915_private *dev_priv, void intel_csr_ucode_fini(struct drm_i915_private *);
enum csr_state state);
void intel_csr_load_program(struct drm_device *dev);
void intel_csr_ucode_fini(struct drm_device *dev);
void assert_csr_loaded(struct drm_i915_private *dev_priv);
/* intel_dp.c */ /* intel_dp.c */
bool intel_dp_init(struct drm_device *dev, int output_reg, enum port port); void intel_dp_init(struct drm_device *dev, i915_reg_t output_reg, enum port port);
bool intel_dp_init_connector(struct intel_digital_port *intel_dig_port, bool intel_dp_init_connector(struct intel_digital_port *intel_dig_port,
struct intel_connector *intel_connector); struct intel_connector *intel_connector);
void intel_dp_set_link_params(struct intel_dp *intel_dp, void intel_dp_set_link_params(struct intel_dp *intel_dp,
@ -1230,8 +1261,26 @@ void intel_edp_drrs_disable(struct intel_dp *intel_dp);
void intel_edp_drrs_invalidate(struct drm_device *dev, void intel_edp_drrs_invalidate(struct drm_device *dev,
unsigned frontbuffer_bits); unsigned frontbuffer_bits);
void intel_edp_drrs_flush(struct drm_device *dev, unsigned frontbuffer_bits); void intel_edp_drrs_flush(struct drm_device *dev, unsigned frontbuffer_bits);
bool intel_digital_port_connected(struct drm_i915_private *dev_priv,
struct intel_digital_port *port);
void hsw_dp_set_ddi_pll_sel(struct intel_crtc_state *pipe_config); void hsw_dp_set_ddi_pll_sel(struct intel_crtc_state *pipe_config);
void
intel_dp_program_link_training_pattern(struct intel_dp *intel_dp,
uint8_t dp_train_pat);
void
intel_dp_set_signal_levels(struct intel_dp *intel_dp);
void intel_dp_set_idle_link_train(struct intel_dp *intel_dp);
uint8_t
intel_dp_voltage_max(struct intel_dp *intel_dp);
uint8_t
intel_dp_pre_emphasis_max(struct intel_dp *intel_dp, uint8_t voltage_swing);
void intel_dp_compute_rate(struct intel_dp *intel_dp, int port_clock,
uint8_t *link_bw, uint8_t *rate_select);
bool intel_dp_source_supports_hbr2(struct intel_dp *intel_dp);
bool
intel_dp_get_link_status(struct intel_dp *intel_dp, uint8_t link_status[DP_LINK_STATUS_SIZE]);
/* intel_dp_mst.c */ /* intel_dp_mst.c */
int intel_dp_mst_encoder_init(struct intel_digital_port *intel_dig_port, int conn_id); int intel_dp_mst_encoder_init(struct intel_digital_port *intel_dig_port, int conn_id);
void intel_dp_mst_encoder_cleanup(struct intel_digital_port *intel_dig_port); void intel_dp_mst_encoder_cleanup(struct intel_digital_port *intel_dig_port);
@ -1246,7 +1295,7 @@ void intel_dvo_init(struct drm_device *dev);
/* legacy fbdev emulation in intel_fbdev.c */ /* legacy fbdev emulation in intel_fbdev.c */
#ifdef CONFIG_DRM_FBDEV_EMULATION #ifdef CONFIG_DRM_FBDEV_EMULATION
extern int intel_fbdev_init(struct drm_device *dev); extern int intel_fbdev_init(struct drm_device *dev);
extern void intel_fbdev_initial_config(void *data, async_cookie_t cookie); extern void intel_fbdev_initial_config_async(struct drm_device *dev);
extern void intel_fbdev_fini(struct drm_device *dev); extern void intel_fbdev_fini(struct drm_device *dev);
extern void intel_fbdev_set_suspend(struct drm_device *dev, int state, bool synchronous); extern void intel_fbdev_set_suspend(struct drm_device *dev, int state, bool synchronous);
extern void intel_fbdev_output_poll_changed(struct drm_device *dev); extern void intel_fbdev_output_poll_changed(struct drm_device *dev);
@ -1257,7 +1306,7 @@ static inline int intel_fbdev_init(struct drm_device *dev)
return 0; return 0;
} }
static inline void intel_fbdev_initial_config(void *data, async_cookie_t cookie) static inline void intel_fbdev_initial_config_async(struct drm_device *dev)
{ {
} }
@ -1275,9 +1324,11 @@ static inline void intel_fbdev_restore_mode(struct drm_device *dev)
#endif #endif
/* intel_fbc.c */ /* intel_fbc.c */
bool intel_fbc_enabled(struct drm_i915_private *dev_priv); bool intel_fbc_is_active(struct drm_i915_private *dev_priv);
void intel_fbc_update(struct drm_i915_private *dev_priv); void intel_fbc_deactivate(struct intel_crtc *crtc);
void intel_fbc_update(struct intel_crtc *crtc);
void intel_fbc_init(struct drm_i915_private *dev_priv); void intel_fbc_init(struct drm_i915_private *dev_priv);
void intel_fbc_enable(struct intel_crtc *crtc);
void intel_fbc_disable(struct drm_i915_private *dev_priv); void intel_fbc_disable(struct drm_i915_private *dev_priv);
void intel_fbc_disable_crtc(struct intel_crtc *crtc); void intel_fbc_disable_crtc(struct intel_crtc *crtc);
void intel_fbc_invalidate(struct drm_i915_private *dev_priv, void intel_fbc_invalidate(struct drm_i915_private *dev_priv,
@ -1285,11 +1336,10 @@ void intel_fbc_invalidate(struct drm_i915_private *dev_priv,
enum fb_op_origin origin); enum fb_op_origin origin);
void intel_fbc_flush(struct drm_i915_private *dev_priv, void intel_fbc_flush(struct drm_i915_private *dev_priv,
unsigned int frontbuffer_bits, enum fb_op_origin origin); unsigned int frontbuffer_bits, enum fb_op_origin origin);
const char *intel_no_fbc_reason_str(enum no_fbc_reason reason);
void intel_fbc_cleanup_cfb(struct drm_i915_private *dev_priv); void intel_fbc_cleanup_cfb(struct drm_i915_private *dev_priv);
/* intel_hdmi.c */ /* intel_hdmi.c */
void intel_hdmi_init(struct drm_device *dev, int hdmi_reg, enum port port); void intel_hdmi_init(struct drm_device *dev, i915_reg_t hdmi_reg, enum port port);
void intel_hdmi_init_connector(struct intel_digital_port *intel_dig_port, void intel_hdmi_init_connector(struct intel_digital_port *intel_dig_port,
struct intel_connector *intel_connector); struct intel_connector *intel_connector);
struct intel_hdmi *enc_to_intel_hdmi(struct drm_encoder *encoder); struct intel_hdmi *enc_to_intel_hdmi(struct drm_encoder *encoder);
@ -1365,8 +1415,13 @@ void intel_psr_single_frame_update(struct drm_device *dev,
/* intel_runtime_pm.c */ /* intel_runtime_pm.c */
int intel_power_domains_init(struct drm_i915_private *); int intel_power_domains_init(struct drm_i915_private *);
void intel_power_domains_fini(struct drm_i915_private *); void intel_power_domains_fini(struct drm_i915_private *);
void intel_power_domains_init_hw(struct drm_i915_private *dev_priv); void intel_power_domains_init_hw(struct drm_i915_private *dev_priv, bool resume);
void intel_power_domains_suspend(struct drm_i915_private *dev_priv);
void skl_pw1_misc_io_init(struct drm_i915_private *dev_priv);
void skl_pw1_misc_io_fini(struct drm_i915_private *dev_priv);
void intel_runtime_pm_enable(struct drm_i915_private *dev_priv); void intel_runtime_pm_enable(struct drm_i915_private *dev_priv);
const char *
intel_display_power_domain_str(enum intel_display_power_domain domain);
bool intel_display_power_is_enabled(struct drm_i915_private *dev_priv, bool intel_display_power_is_enabled(struct drm_i915_private *dev_priv,
enum intel_display_power_domain domain); enum intel_display_power_domain domain);
@ -1374,9 +1429,95 @@ bool __intel_display_power_is_enabled(struct drm_i915_private *dev_priv,
enum intel_display_power_domain domain); enum intel_display_power_domain domain);
void intel_display_power_get(struct drm_i915_private *dev_priv, void intel_display_power_get(struct drm_i915_private *dev_priv,
enum intel_display_power_domain domain); enum intel_display_power_domain domain);
bool intel_display_power_get_if_enabled(struct drm_i915_private *dev_priv,
enum intel_display_power_domain domain);
void intel_display_power_put(struct drm_i915_private *dev_priv, void intel_display_power_put(struct drm_i915_private *dev_priv,
enum intel_display_power_domain domain); enum intel_display_power_domain domain);
static inline void
assert_rpm_device_not_suspended(struct drm_i915_private *dev_priv)
{
WARN_ONCE(dev_priv->pm.suspended,
"Device suspended during HW access\n");
}
static inline void
assert_rpm_wakelock_held(struct drm_i915_private *dev_priv)
{
assert_rpm_device_not_suspended(dev_priv);
/* FIXME: Needs to be converted back to WARN_ONCE, but currently causes
* too much noise. */
if (!atomic_read(&dev_priv->pm.wakeref_count))
DRM_DEBUG_DRIVER("RPM wakelock ref not held during HW access");
}
static inline int
assert_rpm_atomic_begin(struct drm_i915_private *dev_priv)
{
int seq = atomic_read(&dev_priv->pm.atomic_seq);
assert_rpm_wakelock_held(dev_priv);
return seq;
}
static inline void
assert_rpm_atomic_end(struct drm_i915_private *dev_priv, int begin_seq)
{
WARN_ONCE(atomic_read(&dev_priv->pm.atomic_seq) != begin_seq,
"HW access outside of RPM atomic section\n");
}
/**
* disable_rpm_wakeref_asserts - disable the RPM assert checks
* @dev_priv: i915 device instance
*
* This function disable asserts that check if we hold an RPM wakelock
* reference, while keeping the device-not-suspended checks still enabled.
* It's meant to be used only in special circumstances where our rule about
* the wakelock refcount wrt. the device power state doesn't hold. According
* to this rule at any point where we access the HW or want to keep the HW in
* an active state we must hold an RPM wakelock reference acquired via one of
* the intel_runtime_pm_get() helpers. Currently there are a few special spots
* where this rule doesn't hold: the IRQ and suspend/resume handlers, the
* forcewake release timer, and the GPU RPS and hangcheck works. All other
* users should avoid using this function.
*
* Any calls to this function must have a symmetric call to
* enable_rpm_wakeref_asserts().
*/
static inline void
disable_rpm_wakeref_asserts(struct drm_i915_private *dev_priv)
{
atomic_inc(&dev_priv->pm.wakeref_count);
}
/**
* enable_rpm_wakeref_asserts - re-enable the RPM assert checks
* @dev_priv: i915 device instance
*
* This function re-enables the RPM assert checks after disabling them with
* disable_rpm_wakeref_asserts. It's meant to be used only in special
* circumstances otherwise its use should be avoided.
*
* Any calls to this function must have a symmetric call to
* disable_rpm_wakeref_asserts().
*/
static inline void
enable_rpm_wakeref_asserts(struct drm_i915_private *dev_priv)
{
atomic_dec(&dev_priv->pm.wakeref_count);
}
/* TODO: convert users of these to rely instead on proper RPM refcounting */
#define DISABLE_RPM_WAKEREF_ASSERTS(dev_priv) \
disable_rpm_wakeref_asserts(dev_priv)
#define ENABLE_RPM_WAKEREF_ASSERTS(dev_priv) \
enable_rpm_wakeref_asserts(dev_priv)
void intel_runtime_pm_get(struct drm_i915_private *dev_priv); void intel_runtime_pm_get(struct drm_i915_private *dev_priv);
bool intel_runtime_pm_get_if_in_use(struct drm_i915_private *dev_priv);
void intel_runtime_pm_get_noresume(struct drm_i915_private *dev_priv); void intel_runtime_pm_get_noresume(struct drm_i915_private *dev_priv);
void intel_runtime_pm_put(struct drm_i915_private *dev_priv); void intel_runtime_pm_put(struct drm_i915_private *dev_priv);
@ -1393,12 +1534,6 @@ void intel_init_clock_gating(struct drm_device *dev);
void intel_suspend_hw(struct drm_device *dev); void intel_suspend_hw(struct drm_device *dev);
int ilk_wm_max_level(const struct drm_device *dev); int ilk_wm_max_level(const struct drm_device *dev);
void intel_update_watermarks(struct drm_crtc *crtc); void intel_update_watermarks(struct drm_crtc *crtc);
void intel_update_sprite_watermarks(struct drm_plane *plane,
struct drm_crtc *crtc,
uint32_t sprite_width,
uint32_t sprite_height,
int pixel_size,
bool enabled, bool scaled);
void intel_init_pm(struct drm_device *dev); void intel_init_pm(struct drm_device *dev);
void intel_pm_setup(struct drm_device *dev); void intel_pm_setup(struct drm_device *dev);
void intel_gpu_ips_init(struct drm_i915_private *dev_priv); void intel_gpu_ips_init(struct drm_i915_private *dev_priv);
@ -1426,7 +1561,8 @@ void skl_ddb_get_hw_state(struct drm_i915_private *dev_priv,
uint32_t ilk_pipe_pixel_rate(const struct intel_crtc_state *pipe_config); uint32_t ilk_pipe_pixel_rate(const struct intel_crtc_state *pipe_config);
/* intel_sdvo.c */ /* intel_sdvo.c */
bool intel_sdvo_init(struct drm_device *dev, uint32_t sdvo_reg, bool is_sdvob); bool intel_sdvo_init(struct drm_device *dev,
i915_reg_t reg, enum port port);
/* intel_sprite.c */ /* intel_sprite.c */
@ -1474,4 +1610,12 @@ void intel_plane_destroy_state(struct drm_plane *plane,
struct drm_plane_state *state); struct drm_plane_state *state);
extern const struct drm_plane_helper_funcs intel_plane_helper_funcs; extern const struct drm_plane_helper_funcs intel_plane_helper_funcs;
int drm_core_init(void);
void set_fake_framebuffer();
int kolibri_framebuffer_init(void *param);
void shmem_file_delete(struct file *filep);
void intel_fbdev_initial_config(void *data, async_cookie_t cookie);
int drm_get_pci_dev(struct pci_dev *pdev, const struct pci_device_id *ent,
struct drm_driver *driver);
#endif /* __INTEL_DRV_H__ */ #endif /* __INTEL_DRV_H__ */

View File

@ -60,7 +60,8 @@ static void wait_for_dsi_fifo_empty(struct intel_dsi *intel_dsi, enum port port)
DRM_ERROR("DPI FIFOs are not empty\n"); DRM_ERROR("DPI FIFOs are not empty\n");
} }
static void write_data(struct drm_i915_private *dev_priv, u32 reg, static void write_data(struct drm_i915_private *dev_priv,
i915_reg_t reg,
const u8 *data, u32 len) const u8 *data, u32 len)
{ {
u32 i, j; u32 i, j;
@ -75,7 +76,8 @@ static void write_data(struct drm_i915_private *dev_priv, u32 reg,
} }
} }
static void read_data(struct drm_i915_private *dev_priv, u32 reg, static void read_data(struct drm_i915_private *dev_priv,
i915_reg_t reg,
u8 *data, u32 len) u8 *data, u32 len)
{ {
u32 i, j; u32 i, j;
@ -98,7 +100,8 @@ static ssize_t intel_dsi_host_transfer(struct mipi_dsi_host *host,
struct mipi_dsi_packet packet; struct mipi_dsi_packet packet;
ssize_t ret; ssize_t ret;
const u8 *header, *data; const u8 *header, *data;
u32 data_reg, data_mask, ctrl_reg, ctrl_mask; i915_reg_t data_reg, ctrl_reg;
u32 data_mask, ctrl_mask;
ret = mipi_dsi_create_packet(&packet, msg); ret = mipi_dsi_create_packet(&packet, msg);
if (ret < 0) if (ret < 0)
@ -263,16 +266,18 @@ static inline bool is_cmd_mode(struct intel_dsi *intel_dsi)
} }
static bool intel_dsi_compute_config(struct intel_encoder *encoder, static bool intel_dsi_compute_config(struct intel_encoder *encoder,
struct intel_crtc_state *config) struct intel_crtc_state *pipe_config)
{ {
struct intel_dsi *intel_dsi = container_of(encoder, struct intel_dsi, struct intel_dsi *intel_dsi = container_of(encoder, struct intel_dsi,
base); base);
struct intel_connector *intel_connector = intel_dsi->attached_connector; struct intel_connector *intel_connector = intel_dsi->attached_connector;
struct drm_display_mode *fixed_mode = intel_connector->panel.fixed_mode; struct drm_display_mode *fixed_mode = intel_connector->panel.fixed_mode;
struct drm_display_mode *adjusted_mode = &config->base.adjusted_mode; struct drm_display_mode *adjusted_mode = &pipe_config->base.adjusted_mode;
DRM_DEBUG_KMS("\n"); DRM_DEBUG_KMS("\n");
pipe_config->has_dsi_encoder = true;
if (fixed_mode) if (fixed_mode)
intel_fixed_panel_mode(fixed_mode, adjusted_mode); intel_fixed_panel_mode(fixed_mode, adjusted_mode);
@ -364,7 +369,7 @@ static void intel_dsi_device_ready(struct intel_encoder *encoder)
{ {
struct drm_device *dev = encoder->base.dev; struct drm_device *dev = encoder->base.dev;
if (IS_VALLEYVIEW(dev)) if (IS_VALLEYVIEW(dev) || IS_CHERRYVIEW(dev))
vlv_dsi_device_ready(encoder); vlv_dsi_device_ready(encoder);
else if (IS_BROXTON(dev)) else if (IS_BROXTON(dev))
bxt_dsi_device_ready(encoder); bxt_dsi_device_ready(encoder);
@ -377,10 +382,10 @@ static void intel_dsi_port_enable(struct intel_encoder *encoder)
struct intel_crtc *intel_crtc = to_intel_crtc(encoder->base.crtc); struct intel_crtc *intel_crtc = to_intel_crtc(encoder->base.crtc);
struct intel_dsi *intel_dsi = enc_to_intel_dsi(&encoder->base); struct intel_dsi *intel_dsi = enc_to_intel_dsi(&encoder->base);
enum port port; enum port port;
u32 temp;
u32 port_ctrl;
if (intel_dsi->dual_link == DSI_DUAL_LINK_FRONT_BACK) { if (intel_dsi->dual_link == DSI_DUAL_LINK_FRONT_BACK) {
u32 temp;
temp = I915_READ(VLV_CHICKEN_3); temp = I915_READ(VLV_CHICKEN_3);
temp &= ~PIXEL_OVERLAP_CNT_MASK | temp &= ~PIXEL_OVERLAP_CNT_MASK |
intel_dsi->pixel_overlap << intel_dsi->pixel_overlap <<
@ -389,8 +394,9 @@ static void intel_dsi_port_enable(struct intel_encoder *encoder)
} }
for_each_dsi_port(port, intel_dsi->ports) { for_each_dsi_port(port, intel_dsi->ports) {
port_ctrl = IS_BROXTON(dev) ? BXT_MIPI_PORT_CTRL(port) : i915_reg_t port_ctrl = IS_BROXTON(dev) ?
MIPI_PORT_CTRL(port); BXT_MIPI_PORT_CTRL(port) : MIPI_PORT_CTRL(port);
u32 temp;
temp = I915_READ(port_ctrl); temp = I915_READ(port_ctrl);
@ -416,13 +422,13 @@ static void intel_dsi_port_disable(struct intel_encoder *encoder)
struct drm_i915_private *dev_priv = dev->dev_private; struct drm_i915_private *dev_priv = dev->dev_private;
struct intel_dsi *intel_dsi = enc_to_intel_dsi(&encoder->base); struct intel_dsi *intel_dsi = enc_to_intel_dsi(&encoder->base);
enum port port; enum port port;
u32 temp;
u32 port_ctrl;
for_each_dsi_port(port, intel_dsi->ports) { for_each_dsi_port(port, intel_dsi->ports) {
i915_reg_t port_ctrl = IS_BROXTON(dev) ?
BXT_MIPI_PORT_CTRL(port) : MIPI_PORT_CTRL(port);
u32 temp;
/* de-assert ip_tg_enable signal */ /* de-assert ip_tg_enable signal */
port_ctrl = IS_BROXTON(dev) ? BXT_MIPI_PORT_CTRL(port) :
MIPI_PORT_CTRL(port);
temp = I915_READ(port_ctrl); temp = I915_READ(port_ctrl);
I915_WRITE(port_ctrl, temp & ~DPI_ENABLE); I915_WRITE(port_ctrl, temp & ~DPI_ENABLE);
POSTING_READ(port_ctrl); POSTING_READ(port_ctrl);
@ -458,6 +464,8 @@ static void intel_dsi_enable(struct intel_encoder *encoder)
intel_panel_enable_backlight(intel_dsi->attached_connector); intel_panel_enable_backlight(intel_dsi->attached_connector);
} }
static void intel_dsi_prepare(struct intel_encoder *intel_encoder);
static void intel_dsi_pre_enable(struct intel_encoder *encoder) static void intel_dsi_pre_enable(struct intel_encoder *encoder)
{ {
struct drm_device *dev = encoder->base.dev; struct drm_device *dev = encoder->base.dev;
@ -470,13 +478,16 @@ static void intel_dsi_pre_enable(struct intel_encoder *encoder)
DRM_DEBUG_KMS("\n"); DRM_DEBUG_KMS("\n");
intel_dsi_prepare(encoder);
intel_enable_dsi_pll(encoder);
/* Panel Enable over CRC PMIC */ /* Panel Enable over CRC PMIC */
if (intel_dsi->gpio_panel) if (intel_dsi->gpio_panel)
gpiod_set_value_cansleep(intel_dsi->gpio_panel, 1); gpiod_set_value_cansleep(intel_dsi->gpio_panel, 1);
msleep(intel_dsi->panel_on_delay); msleep(intel_dsi->panel_on_delay);
if (IS_VALLEYVIEW(dev)) { if (IS_VALLEYVIEW(dev) || IS_CHERRYVIEW(dev)) {
/* /*
* Disable DPOunit clock gating, can stall pipe * Disable DPOunit clock gating, can stall pipe
* and we need DPLL REFA always enabled * and we need DPLL REFA always enabled
@ -580,11 +591,13 @@ static void intel_dsi_clear_device_ready(struct intel_encoder *encoder)
struct drm_i915_private *dev_priv = encoder->base.dev->dev_private; struct drm_i915_private *dev_priv = encoder->base.dev->dev_private;
struct intel_dsi *intel_dsi = enc_to_intel_dsi(&encoder->base); struct intel_dsi *intel_dsi = enc_to_intel_dsi(&encoder->base);
enum port port; enum port port;
u32 val;
u32 port_ctrl = 0;
DRM_DEBUG_KMS("\n"); DRM_DEBUG_KMS("\n");
for_each_dsi_port(port, intel_dsi->ports) { for_each_dsi_port(port, intel_dsi->ports) {
/* Common bit for both MIPI Port A & MIPI Port C on VLV/CHV */
i915_reg_t port_ctrl = IS_BROXTON(dev) ?
BXT_MIPI_PORT_CTRL(port) : MIPI_PORT_CTRL(PORT_A);
u32 val;
I915_WRITE(MIPI_DEVICE_READY(port), DEVICE_READY | I915_WRITE(MIPI_DEVICE_READY(port), DEVICE_READY |
ULPS_STATE_ENTER); ULPS_STATE_ENTER);
@ -598,12 +611,6 @@ static void intel_dsi_clear_device_ready(struct intel_encoder *encoder)
ULPS_STATE_ENTER); ULPS_STATE_ENTER);
usleep_range(2000, 2500); usleep_range(2000, 2500);
if (IS_BROXTON(dev))
port_ctrl = BXT_MIPI_PORT_CTRL(port);
else if (IS_VALLEYVIEW(dev))
/* Common bit for both MIPI Port A & MIPI Port C */
port_ctrl = MIPI_PORT_CTRL(PORT_A);
/* Wait till Clock lanes are in LP-00 state for MIPI Port A /* Wait till Clock lanes are in LP-00 state for MIPI Port A
* only. MIPI Port C has no similar bit for checking * only. MIPI Port C has no similar bit for checking
*/ */
@ -656,40 +663,47 @@ static bool intel_dsi_get_hw_state(struct intel_encoder *encoder,
struct intel_dsi *intel_dsi = enc_to_intel_dsi(&encoder->base); struct intel_dsi *intel_dsi = enc_to_intel_dsi(&encoder->base);
struct drm_device *dev = encoder->base.dev; struct drm_device *dev = encoder->base.dev;
enum intel_display_power_domain power_domain; enum intel_display_power_domain power_domain;
u32 dpi_enabled, func, ctrl_reg;
enum port port; enum port port;
bool ret;
DRM_DEBUG_KMS("\n"); DRM_DEBUG_KMS("\n");
power_domain = intel_display_port_power_domain(encoder); power_domain = intel_display_port_power_domain(encoder);
if (!intel_display_power_is_enabled(dev_priv, power_domain)) if (!intel_display_power_get_if_enabled(dev_priv, power_domain))
return false; return false;
ret = false;
/* XXX: this only works for one DSI output */ /* XXX: this only works for one DSI output */
for_each_dsi_port(port, intel_dsi->ports) { for_each_dsi_port(port, intel_dsi->ports) {
i915_reg_t ctrl_reg = IS_BROXTON(dev) ?
BXT_MIPI_PORT_CTRL(port) : MIPI_PORT_CTRL(port);
u32 dpi_enabled, func;
func = I915_READ(MIPI_DSI_FUNC_PRG(port)); func = I915_READ(MIPI_DSI_FUNC_PRG(port));
ctrl_reg = IS_BROXTON(dev) ? BXT_MIPI_PORT_CTRL(port) :
MIPI_PORT_CTRL(port);
dpi_enabled = I915_READ(ctrl_reg) & DPI_ENABLE; dpi_enabled = I915_READ(ctrl_reg) & DPI_ENABLE;
/* Due to some hardware limitations on BYT, MIPI Port C DPI /* Due to some hardware limitations on BYT, MIPI Port C DPI
* Enable bit does not get set. To check whether DSI Port C * Enable bit does not get set. To check whether DSI Port C
* was enabled in BIOS, check the Pipe B enable bit * was enabled in BIOS, check the Pipe B enable bit
*/ */
if (IS_VALLEYVIEW(dev) && !IS_CHERRYVIEW(dev) && if (IS_VALLEYVIEW(dev) && port == PORT_C)
(port == PORT_C))
dpi_enabled = I915_READ(PIPECONF(PIPE_B)) & dpi_enabled = I915_READ(PIPECONF(PIPE_B)) &
PIPECONF_ENABLE; PIPECONF_ENABLE;
if (dpi_enabled || (func & CMD_MODE_DATA_WIDTH_MASK)) { if (dpi_enabled || (func & CMD_MODE_DATA_WIDTH_MASK)) {
if (I915_READ(MIPI_DEVICE_READY(port)) & DEVICE_READY) { if (I915_READ(MIPI_DEVICE_READY(port)) & DEVICE_READY) {
*pipe = port == PORT_A ? PIPE_A : PIPE_B; *pipe = port == PORT_A ? PIPE_A : PIPE_B;
return true; ret = true;
goto out;
} }
} }
} }
out:
intel_display_power_put(dev_priv, power_domain);
return false; return ret;
} }
static void intel_dsi_get_config(struct intel_encoder *encoder, static void intel_dsi_get_config(struct intel_encoder *encoder,
@ -698,6 +712,8 @@ static void intel_dsi_get_config(struct intel_encoder *encoder,
u32 pclk = 0; u32 pclk = 0;
DRM_DEBUG_KMS("\n"); DRM_DEBUG_KMS("\n");
pipe_config->has_dsi_encoder = true;
/* /*
* DPLL_MD is not used in case of DSI, reading will get some default value * DPLL_MD is not used in case of DSI, reading will get some default value
* set dpll_md = 0 * set dpll_md = 0
@ -706,7 +722,8 @@ static void intel_dsi_get_config(struct intel_encoder *encoder,
if (IS_BROXTON(encoder->base.dev)) if (IS_BROXTON(encoder->base.dev))
pclk = bxt_get_dsi_pclk(encoder, pipe_config->pipe_bpp); pclk = bxt_get_dsi_pclk(encoder, pipe_config->pipe_bpp);
else if (IS_VALLEYVIEW(encoder->base.dev)) else if (IS_VALLEYVIEW(encoder->base.dev) ||
IS_CHERRYVIEW(encoder->base.dev))
pclk = vlv_get_dsi_pclk(encoder, pipe_config->pipe_bpp); pclk = vlv_get_dsi_pclk(encoder, pipe_config->pipe_bpp);
if (!pclk) if (!pclk)
@ -859,7 +876,7 @@ static void intel_dsi_prepare(struct intel_encoder *intel_encoder)
} }
for_each_dsi_port(port, intel_dsi->ports) { for_each_dsi_port(port, intel_dsi->ports) {
if (IS_VALLEYVIEW(dev)) { if (IS_VALLEYVIEW(dev) || IS_CHERRYVIEW(dev)) {
/* /*
* escape clock divider, 20MHz, shared for A and C. * escape clock divider, 20MHz, shared for A and C.
* device ready must be off when doing this! txclkesc? * device ready must be off when doing this! txclkesc?
@ -875,21 +892,12 @@ static void intel_dsi_prepare(struct intel_encoder *intel_encoder)
I915_WRITE(MIPI_CTRL(port), tmp | I915_WRITE(MIPI_CTRL(port), tmp |
READ_REQUEST_PRIORITY_HIGH); READ_REQUEST_PRIORITY_HIGH);
} else if (IS_BROXTON(dev)) { } else if (IS_BROXTON(dev)) {
/* enum pipe pipe = intel_crtc->pipe;
* FIXME:
* BXT can connect any PIPE to any MIPI port.
* Select the pipe based on the MIPI port read from
* VBT for now. Pick PIPE A for MIPI port A and C
* for port C.
*/
tmp = I915_READ(MIPI_CTRL(port)); tmp = I915_READ(MIPI_CTRL(port));
tmp &= ~BXT_PIPE_SELECT_MASK; tmp &= ~BXT_PIPE_SELECT_MASK;
if (port == PORT_A) tmp |= BXT_PIPE_SELECT(pipe);
tmp |= BXT_PIPE_SELECT_A;
else if (port == PORT_C)
tmp |= BXT_PIPE_SELECT_C;
I915_WRITE(MIPI_CTRL(port), tmp); I915_WRITE(MIPI_CTRL(port), tmp);
} }
@ -1025,15 +1033,6 @@ static void intel_dsi_prepare(struct intel_encoder *intel_encoder)
} }
} }
static void intel_dsi_pre_pll_enable(struct intel_encoder *encoder)
{
DRM_DEBUG_KMS("\n");
intel_dsi_prepare(encoder);
intel_enable_dsi_pll(encoder);
}
static enum drm_connector_status static enum drm_connector_status
intel_dsi_detect(struct drm_connector *connector, bool force) intel_dsi_detect(struct drm_connector *connector, bool force)
{ {
@ -1128,7 +1127,7 @@ void intel_dsi_init(struct drm_device *dev)
if (!dev_priv->vbt.has_mipi) if (!dev_priv->vbt.has_mipi)
return; return;
if (IS_VALLEYVIEW(dev)) { if (IS_VALLEYVIEW(dev) || IS_CHERRYVIEW(dev)) {
dev_priv->mipi_mmio_base = VLV_MIPI_BASE; dev_priv->mipi_mmio_base = VLV_MIPI_BASE;
} else { } else {
DRM_ERROR("Unsupported Mipi device to reg base"); DRM_ERROR("Unsupported Mipi device to reg base");
@ -1151,11 +1150,10 @@ void intel_dsi_init(struct drm_device *dev)
connector = &intel_connector->base; connector = &intel_connector->base;
drm_encoder_init(dev, encoder, &intel_dsi_funcs, DRM_MODE_ENCODER_DSI); drm_encoder_init(dev, encoder, &intel_dsi_funcs, DRM_MODE_ENCODER_DSI,
NULL);
/* XXX: very likely not all of these are needed */
intel_encoder->compute_config = intel_dsi_compute_config; intel_encoder->compute_config = intel_dsi_compute_config;
intel_encoder->pre_pll_enable = intel_dsi_pre_pll_enable;
intel_encoder->pre_enable = intel_dsi_pre_enable; intel_encoder->pre_enable = intel_dsi_pre_enable;
intel_encoder->enable = intel_dsi_enable_nop; intel_encoder->enable = intel_dsi_enable_nop;
intel_encoder->disable = intel_dsi_pre_disable; intel_encoder->disable = intel_dsi_pre_disable;

View File

@ -117,7 +117,7 @@ static inline struct intel_dsi_host *to_intel_dsi_host(struct mipi_dsi_host *h)
#define for_each_dsi_port(__port, __ports_mask) \ #define for_each_dsi_port(__port, __ports_mask) \
for ((__port) = PORT_A; (__port) < I915_MAX_PORTS; (__port)++) \ for ((__port) = PORT_A; (__port) < I915_MAX_PORTS; (__port)++) \
if ((__ports_mask) & (1 << (__port))) for_each_if ((__ports_mask) & (1 << (__port)))
static inline struct intel_dsi *enc_to_intel_dsi(struct drm_encoder *encoder) static inline struct intel_dsi *enc_to_intel_dsi(struct drm_encoder *encoder)
{ {

View File

@ -1,437 +0,0 @@
/*
* Copyright © 2013 Intel Corporation
*
* Permission is hereby granted, free of charge, to any person obtaining a
* copy of this software and associated documentation files (the "Software"),
* to deal in the Software without restriction, including without limitation
* the rights to use, copy, modify, merge, publish, distribute, sublicense,
* and/or sell copies of the Software, and to permit persons to whom the
* Software is furnished to do so, subject to the following conditions:
*
* The above copyright notice and this permission notice (including the next
* paragraph) shall be included in all copies or substantial portions of the
* Software.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
* FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
* THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
* LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
* FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
* DEALINGS IN THE SOFTWARE.
*
* Author: Jani Nikula <jani.nikula@intel.com>
*/
#include <linux/export.h>
#include <drm/drmP.h>
#include <drm/drm_crtc.h>
#include <video/mipi_display.h>
#include "i915_drv.h"
#include "intel_drv.h"
#include "intel_dsi.h"
#include "intel_dsi_cmd.h"
/*
* XXX: MIPI_DATA_ADDRESS, MIPI_DATA_LENGTH, MIPI_COMMAND_LENGTH, and
* MIPI_COMMAND_ADDRESS registers.
*
* Apparently these registers provide a MIPI adapter level way to send (lots of)
* commands and data to the receiver, without having to write the commands and
* data to MIPI_{HS,LP}_GEN_{CTRL,DATA} registers word by word.
*
* Presumably for anything other than MIPI_DCS_WRITE_MEMORY_START and
* MIPI_DCS_WRITE_MEMORY_CONTINUE (which are used to update the external
* framebuffer in command mode displays) these are just an optimization that can
* come later.
*
* For memory writes, these should probably be used for performance.
*/
static void print_stat(struct intel_dsi *intel_dsi)
{
struct drm_encoder *encoder = &intel_dsi->base.base;
struct drm_device *dev = encoder->dev;
struct drm_i915_private *dev_priv = dev->dev_private;
struct intel_crtc *intel_crtc = to_intel_crtc(encoder->crtc);
enum pipe pipe = intel_crtc->pipe;
u32 val;
val = I915_READ(MIPI_INTR_STAT(pipe));
#define STAT_BIT(val, bit) (val) & (bit) ? " " #bit : ""
DRM_DEBUG_KMS("MIPI_INTR_STAT(%d) = %08x"
"%s%s%s%s%s%s%s%s%s%s%s%s%s%s%s%s%s%s%s%s%s%s%s%s%s%s%s%s%s%s%s%s"
"\n", pipe, val,
STAT_BIT(val, TEARING_EFFECT),
STAT_BIT(val, SPL_PKT_SENT_INTERRUPT),
STAT_BIT(val, GEN_READ_DATA_AVAIL),
STAT_BIT(val, LP_GENERIC_WR_FIFO_FULL),
STAT_BIT(val, HS_GENERIC_WR_FIFO_FULL),
STAT_BIT(val, RX_PROT_VIOLATION),
STAT_BIT(val, RX_INVALID_TX_LENGTH),
STAT_BIT(val, ACK_WITH_NO_ERROR),
STAT_BIT(val, TURN_AROUND_ACK_TIMEOUT),
STAT_BIT(val, LP_RX_TIMEOUT),
STAT_BIT(val, HS_TX_TIMEOUT),
STAT_BIT(val, DPI_FIFO_UNDERRUN),
STAT_BIT(val, LOW_CONTENTION),
STAT_BIT(val, HIGH_CONTENTION),
STAT_BIT(val, TXDSI_VC_ID_INVALID),
STAT_BIT(val, TXDSI_DATA_TYPE_NOT_RECOGNISED),
STAT_BIT(val, TXCHECKSUM_ERROR),
STAT_BIT(val, TXECC_MULTIBIT_ERROR),
STAT_BIT(val, TXECC_SINGLE_BIT_ERROR),
STAT_BIT(val, TXFALSE_CONTROL_ERROR),
STAT_BIT(val, RXDSI_VC_ID_INVALID),
STAT_BIT(val, RXDSI_DATA_TYPE_NOT_REGOGNISED),
STAT_BIT(val, RXCHECKSUM_ERROR),
STAT_BIT(val, RXECC_MULTIBIT_ERROR),
STAT_BIT(val, RXECC_SINGLE_BIT_ERROR),
STAT_BIT(val, RXFALSE_CONTROL_ERROR),
STAT_BIT(val, RXHS_RECEIVE_TIMEOUT_ERROR),
STAT_BIT(val, RX_LP_TX_SYNC_ERROR),
STAT_BIT(val, RXEXCAPE_MODE_ENTRY_ERROR),
STAT_BIT(val, RXEOT_SYNC_ERROR),
STAT_BIT(val, RXSOT_SYNC_ERROR),
STAT_BIT(val, RXSOT_ERROR));
#undef STAT_BIT
}
enum dsi_type {
DSI_DCS,
DSI_GENERIC,
};
/* enable or disable command mode hs transmissions */
void dsi_hs_mode_enable(struct intel_dsi *intel_dsi, bool enable)
{
struct drm_encoder *encoder = &intel_dsi->base.base;
struct drm_device *dev = encoder->dev;
struct drm_i915_private *dev_priv = dev->dev_private;
struct intel_crtc *intel_crtc = to_intel_crtc(encoder->crtc);
enum pipe pipe = intel_crtc->pipe;
u32 temp;
u32 mask = DBI_FIFO_EMPTY;
if (wait_for((I915_READ(MIPI_GEN_FIFO_STAT(pipe)) & mask) == mask, 50))
DRM_ERROR("Timeout waiting for DBI FIFO empty\n");
temp = I915_READ(MIPI_HS_LP_DBI_ENABLE(pipe));
temp &= DBI_HS_LP_MODE_MASK;
I915_WRITE(MIPI_HS_LP_DBI_ENABLE(pipe), enable ? DBI_HS_MODE : DBI_LP_MODE);
intel_dsi->hs = enable;
}
static int dsi_vc_send_short(struct intel_dsi *intel_dsi, int channel,
u8 data_type, u16 data)
{
struct drm_encoder *encoder = &intel_dsi->base.base;
struct drm_device *dev = encoder->dev;
struct drm_i915_private *dev_priv = dev->dev_private;
struct intel_crtc *intel_crtc = to_intel_crtc(encoder->crtc);
enum pipe pipe = intel_crtc->pipe;
u32 ctrl_reg;
u32 ctrl;
u32 mask;
DRM_DEBUG_KMS("channel %d, data_type %d, data %04x\n",
channel, data_type, data);
if (intel_dsi->hs) {
ctrl_reg = MIPI_HS_GEN_CTRL(pipe);
mask = HS_CTRL_FIFO_FULL;
} else {
ctrl_reg = MIPI_LP_GEN_CTRL(pipe);
mask = LP_CTRL_FIFO_FULL;
}
if (wait_for((I915_READ(MIPI_GEN_FIFO_STAT(pipe)) & mask) == 0, 50)) {
DRM_ERROR("Timeout waiting for HS/LP CTRL FIFO !full\n");
print_stat(intel_dsi);
}
/*
* Note: This function is also used for long packets, with length passed
* as data, since SHORT_PACKET_PARAM_SHIFT ==
* LONG_PACKET_WORD_COUNT_SHIFT.
*/
ctrl = data << SHORT_PACKET_PARAM_SHIFT |
channel << VIRTUAL_CHANNEL_SHIFT |
data_type << DATA_TYPE_SHIFT;
I915_WRITE(ctrl_reg, ctrl);
return 0;
}
static int dsi_vc_send_long(struct intel_dsi *intel_dsi, int channel,
u8 data_type, const u8 *data, int len)
{
struct drm_encoder *encoder = &intel_dsi->base.base;
struct drm_device *dev = encoder->dev;
struct drm_i915_private *dev_priv = dev->dev_private;
struct intel_crtc *intel_crtc = to_intel_crtc(encoder->crtc);
enum pipe pipe = intel_crtc->pipe;
u32 data_reg;
int i, j, n;
u32 mask;
DRM_DEBUG_KMS("channel %d, data_type %d, len %04x\n",
channel, data_type, len);
if (intel_dsi->hs) {
data_reg = MIPI_HS_GEN_DATA(pipe);
mask = HS_DATA_FIFO_FULL;
} else {
data_reg = MIPI_LP_GEN_DATA(pipe);
mask = LP_DATA_FIFO_FULL;
}
if (wait_for((I915_READ(MIPI_GEN_FIFO_STAT(pipe)) & mask) == 0, 50))
DRM_ERROR("Timeout waiting for HS/LP DATA FIFO !full\n");
for (i = 0; i < len; i += n) {
u32 val = 0;
n = min_t(int, len - i, 4);
for (j = 0; j < n; j++)
val |= *data++ << 8 * j;
I915_WRITE(data_reg, val);
/* XXX: check for data fifo full, once that is set, write 4
* dwords, then wait for not set, then continue. */
}
return dsi_vc_send_short(intel_dsi, channel, data_type, len);
}
static int dsi_vc_write_common(struct intel_dsi *intel_dsi,
int channel, const u8 *data, int len,
enum dsi_type type)
{
int ret;
if (len == 0) {
BUG_ON(type == DSI_GENERIC);
ret = dsi_vc_send_short(intel_dsi, channel,
MIPI_DSI_GENERIC_SHORT_WRITE_0_PARAM,
0);
} else if (len == 1) {
ret = dsi_vc_send_short(intel_dsi, channel,
type == DSI_GENERIC ?
MIPI_DSI_GENERIC_SHORT_WRITE_1_PARAM :
MIPI_DSI_DCS_SHORT_WRITE, data[0]);
} else if (len == 2) {
ret = dsi_vc_send_short(intel_dsi, channel,
type == DSI_GENERIC ?
MIPI_DSI_GENERIC_SHORT_WRITE_2_PARAM :
MIPI_DSI_DCS_SHORT_WRITE_PARAM,
(data[1] << 8) | data[0]);
} else {
ret = dsi_vc_send_long(intel_dsi, channel,
type == DSI_GENERIC ?
MIPI_DSI_GENERIC_LONG_WRITE :
MIPI_DSI_DCS_LONG_WRITE, data, len);
}
return ret;
}
int dsi_vc_dcs_write(struct intel_dsi *intel_dsi, int channel,
const u8 *data, int len)
{
return dsi_vc_write_common(intel_dsi, channel, data, len, DSI_DCS);
}
int dsi_vc_generic_write(struct intel_dsi *intel_dsi, int channel,
const u8 *data, int len)
{
return dsi_vc_write_common(intel_dsi, channel, data, len, DSI_GENERIC);
}
static int dsi_vc_dcs_send_read_request(struct intel_dsi *intel_dsi,
int channel, u8 dcs_cmd)
{
return dsi_vc_send_short(intel_dsi, channel, MIPI_DSI_DCS_READ,
dcs_cmd);
}
static int dsi_vc_generic_send_read_request(struct intel_dsi *intel_dsi,
int channel, u8 *reqdata,
int reqlen)
{
u16 data;
u8 data_type;
switch (reqlen) {
case 0:
data_type = MIPI_DSI_GENERIC_READ_REQUEST_0_PARAM;
data = 0;
break;
case 1:
data_type = MIPI_DSI_GENERIC_READ_REQUEST_1_PARAM;
data = reqdata[0];
break;
case 2:
data_type = MIPI_DSI_GENERIC_READ_REQUEST_2_PARAM;
data = (reqdata[1] << 8) | reqdata[0];
break;
default:
BUG();
}
return dsi_vc_send_short(intel_dsi, channel, data_type, data);
}
static int dsi_read_data_return(struct intel_dsi *intel_dsi,
u8 *buf, int buflen)
{
struct drm_encoder *encoder = &intel_dsi->base.base;
struct drm_device *dev = encoder->dev;
struct drm_i915_private *dev_priv = dev->dev_private;
struct intel_crtc *intel_crtc = to_intel_crtc(encoder->crtc);
enum pipe pipe = intel_crtc->pipe;
int i, len = 0;
u32 data_reg, val;
if (intel_dsi->hs) {
data_reg = MIPI_HS_GEN_DATA(pipe);
} else {
data_reg = MIPI_LP_GEN_DATA(pipe);
}
while (len < buflen) {
val = I915_READ(data_reg);
for (i = 0; i < 4 && len < buflen; i++, len++)
buf[len] = val >> 8 * i;
}
return len;
}
int dsi_vc_dcs_read(struct intel_dsi *intel_dsi, int channel, u8 dcs_cmd,
u8 *buf, int buflen)
{
struct drm_encoder *encoder = &intel_dsi->base.base;
struct drm_device *dev = encoder->dev;
struct drm_i915_private *dev_priv = dev->dev_private;
struct intel_crtc *intel_crtc = to_intel_crtc(encoder->crtc);
enum pipe pipe = intel_crtc->pipe;
u32 mask;
int ret;
/*
* XXX: should issue multiple read requests and reads if request is
* longer than MIPI_MAX_RETURN_PKT_SIZE
*/
I915_WRITE(MIPI_INTR_STAT(pipe), GEN_READ_DATA_AVAIL);
ret = dsi_vc_dcs_send_read_request(intel_dsi, channel, dcs_cmd);
if (ret)
return ret;
mask = GEN_READ_DATA_AVAIL;
if (wait_for((I915_READ(MIPI_INTR_STAT(pipe)) & mask) == mask, 50))
DRM_ERROR("Timeout waiting for read data.\n");
ret = dsi_read_data_return(intel_dsi, buf, buflen);
if (ret < 0)
return ret;
if (ret != buflen)
return -EIO;
return 0;
}
int dsi_vc_generic_read(struct intel_dsi *intel_dsi, int channel,
u8 *reqdata, int reqlen, u8 *buf, int buflen)
{
struct drm_encoder *encoder = &intel_dsi->base.base;
struct drm_device *dev = encoder->dev;
struct drm_i915_private *dev_priv = dev->dev_private;
struct intel_crtc *intel_crtc = to_intel_crtc(encoder->crtc);
enum pipe pipe = intel_crtc->pipe;
u32 mask;
int ret;
/*
* XXX: should issue multiple read requests and reads if request is
* longer than MIPI_MAX_RETURN_PKT_SIZE
*/
I915_WRITE(MIPI_INTR_STAT(pipe), GEN_READ_DATA_AVAIL);
ret = dsi_vc_generic_send_read_request(intel_dsi, channel, reqdata,
reqlen);
if (ret)
return ret;
mask = GEN_READ_DATA_AVAIL;
if (wait_for((I915_READ(MIPI_INTR_STAT(pipe)) & mask) == mask, 50))
DRM_ERROR("Timeout waiting for read data.\n");
ret = dsi_read_data_return(intel_dsi, buf, buflen);
if (ret < 0)
return ret;
if (ret != buflen)
return -EIO;
return 0;
}
/*
* send a video mode command
*
* XXX: commands with data in MIPI_DPI_DATA?
*/
int dpi_send_cmd(struct intel_dsi *intel_dsi, u32 cmd, bool hs)
{
struct drm_encoder *encoder = &intel_dsi->base.base;
struct drm_device *dev = encoder->dev;
struct drm_i915_private *dev_priv = dev->dev_private;
struct intel_crtc *intel_crtc = to_intel_crtc(encoder->crtc);
enum pipe pipe = intel_crtc->pipe;
u32 mask;
/* XXX: pipe, hs */
if (hs)
cmd &= ~DPI_LP_MODE;
else
cmd |= DPI_LP_MODE;
/* clear bit */
I915_WRITE(MIPI_INTR_STAT(pipe), SPL_PKT_SENT_INTERRUPT);
/* XXX: old code skips write if control unchanged */
if (cmd == I915_READ(MIPI_DPI_CONTROL(pipe)))
DRM_ERROR("Same special packet %02x twice in a row.\n", cmd);
I915_WRITE(MIPI_DPI_CONTROL(pipe), cmd);
mask = SPL_PKT_SENT_INTERRUPT;
if (wait_for((I915_READ(MIPI_INTR_STAT(pipe)) & mask) == mask, 100))
DRM_ERROR("Video mode command 0x%08x send failed.\n", cmd);
return 0;
}
void wait_for_dsi_fifo_empty(struct intel_dsi *intel_dsi)
{
struct drm_encoder *encoder = &intel_dsi->base.base;
struct drm_device *dev = encoder->dev;
struct drm_i915_private *dev_priv = dev->dev_private;
struct intel_crtc *intel_crtc = to_intel_crtc(encoder->crtc);
enum pipe pipe = intel_crtc->pipe;
u32 mask;
mask = LP_CTRL_FIFO_EMPTY | HS_CTRL_FIFO_EMPTY |
LP_DATA_FIFO_EMPTY | HS_DATA_FIFO_EMPTY;
if (wait_for((I915_READ(MIPI_GEN_FIFO_STAT(pipe)) & mask) == mask, 100))
DRM_ERROR("DPI FIFOs are not empty\n");
}

View File

@ -1,113 +0,0 @@
/*
* Copyright © 2013 Intel Corporation
*
* Permission is hereby granted, free of charge, to any person obtaining a
* copy of this software and associated documentation files (the "Software"),
* to deal in the Software without restriction, including without limitation
* the rights to use, copy, modify, merge, publish, distribute, sublicense,
* and/or sell copies of the Software, and to permit persons to whom the
* Software is furnished to do so, subject to the following conditions:
*
* The above copyright notice and this permission notice (including the next
* paragraph) shall be included in all copies or substantial portions of the
* Software.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
* FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
* THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
* LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
* FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
* DEALINGS IN THE SOFTWARE.
*
* Author: Jani Nikula <jani.nikula@intel.com>
*/
#ifndef _INTEL_DSI_DSI_H
#define _INTEL_DSI_DSI_H
#include <drm/drmP.h>
#include <drm/drm_crtc.h>
#include <video/mipi_display.h>
#include "i915_drv.h"
#include "intel_drv.h"
#include "intel_dsi.h"
#define DPI_LP_MODE_EN false
#define DPI_HS_MODE_EN true
void dsi_hs_mode_enable(struct intel_dsi *intel_dsi, bool enable);
int dsi_vc_dcs_write(struct intel_dsi *intel_dsi, int channel,
const u8 *data, int len);
int dsi_vc_generic_write(struct intel_dsi *intel_dsi, int channel,
const u8 *data, int len);
int dsi_vc_dcs_read(struct intel_dsi *intel_dsi, int channel, u8 dcs_cmd,
u8 *buf, int buflen);
int dsi_vc_generic_read(struct intel_dsi *intel_dsi, int channel,
u8 *reqdata, int reqlen, u8 *buf, int buflen);
int dpi_send_cmd(struct intel_dsi *intel_dsi, u32 cmd, bool hs);
void wait_for_dsi_fifo_empty(struct intel_dsi *intel_dsi);
/* XXX: questionable write helpers */
static inline int dsi_vc_dcs_write_0(struct intel_dsi *intel_dsi,
int channel, u8 dcs_cmd)
{
return dsi_vc_dcs_write(intel_dsi, channel, &dcs_cmd, 1);
}
static inline int dsi_vc_dcs_write_1(struct intel_dsi *intel_dsi,
int channel, u8 dcs_cmd, u8 param)
{
u8 buf[2] = { dcs_cmd, param };
return dsi_vc_dcs_write(intel_dsi, channel, buf, 2);
}
static inline int dsi_vc_generic_write_0(struct intel_dsi *intel_dsi,
int channel)
{
return dsi_vc_generic_write(intel_dsi, channel, NULL, 0);
}
static inline int dsi_vc_generic_write_1(struct intel_dsi *intel_dsi,
int channel, u8 param)
{
return dsi_vc_generic_write(intel_dsi, channel, &param, 1);
}
static inline int dsi_vc_generic_write_2(struct intel_dsi *intel_dsi,
int channel, u8 param1, u8 param2)
{
u8 buf[2] = { param1, param2 };
return dsi_vc_generic_write(intel_dsi, channel, buf, 2);
}
/* XXX: questionable read helpers */
static inline int dsi_vc_generic_read_0(struct intel_dsi *intel_dsi,
int channel, u8 *buf, int buflen)
{
return dsi_vc_generic_read(intel_dsi, channel, NULL, 0, buf, buflen);
}
static inline int dsi_vc_generic_read_1(struct intel_dsi *intel_dsi,
int channel, u8 param, u8 *buf,
int buflen)
{
return dsi_vc_generic_read(intel_dsi, channel, &param, 1, buf, buflen);
}
static inline int dsi_vc_generic_read_2(struct intel_dsi *intel_dsi,
int channel, u8 param1, u8 param2,
u8 *buf, int buflen)
{
u8 req[2] = { param1, param2 };
return dsi_vc_generic_read(intel_dsi, channel, req, 2, buf, buflen);
}
#endif /* _INTEL_DSI_DSI_H */

View File

@ -204,6 +204,9 @@ static const u8 *mipi_exec_gpio(struct intel_dsi *intel_dsi, const u8 *data)
struct drm_device *dev = intel_dsi->base.base.dev; struct drm_device *dev = intel_dsi->base.base.dev;
struct drm_i915_private *dev_priv = dev->dev_private; struct drm_i915_private *dev_priv = dev->dev_private;
if (dev_priv->vbt.dsi.seq_version >= 3)
data++;
gpio = *data++; gpio = *data++;
/* pull up/down */ /* pull up/down */
@ -214,6 +217,16 @@ static const u8 *mipi_exec_gpio(struct intel_dsi *intel_dsi, const u8 *data)
goto out; goto out;
} }
if (!IS_VALLEYVIEW(dev_priv)) {
DRM_DEBUG_KMS("GPIO element not supported on this platform\n");
goto out;
}
if (dev_priv->vbt.dsi.seq_version >= 3) {
DRM_DEBUG_KMS("GPIO element v3 not supported\n");
goto out;
}
function = gtable[gpio].function_reg; function = gtable[gpio].function_reg;
pad = gtable[gpio].pad_reg; pad = gtable[gpio].pad_reg;

View File

@ -561,7 +561,7 @@ void intel_enable_dsi_pll(struct intel_encoder *encoder)
{ {
struct drm_device *dev = encoder->base.dev; struct drm_device *dev = encoder->base.dev;
if (IS_VALLEYVIEW(dev)) if (IS_VALLEYVIEW(dev) || IS_CHERRYVIEW(dev))
vlv_enable_dsi_pll(encoder); vlv_enable_dsi_pll(encoder);
else if (IS_BROXTON(dev)) else if (IS_BROXTON(dev))
bxt_enable_dsi_pll(encoder); bxt_enable_dsi_pll(encoder);
@ -571,7 +571,7 @@ void intel_disable_dsi_pll(struct intel_encoder *encoder)
{ {
struct drm_device *dev = encoder->base.dev; struct drm_device *dev = encoder->base.dev;
if (IS_VALLEYVIEW(dev)) if (IS_VALLEYVIEW(dev) || IS_CHERRYVIEW(dev))
vlv_disable_dsi_pll(encoder); vlv_disable_dsi_pll(encoder);
else if (IS_BROXTON(dev)) else if (IS_BROXTON(dev))
bxt_disable_dsi_pll(encoder); bxt_disable_dsi_pll(encoder);
@ -599,6 +599,6 @@ void intel_dsi_reset_clocks(struct intel_encoder *encoder, enum port port)
if (IS_BROXTON(dev)) if (IS_BROXTON(dev))
bxt_dsi_reset_clocks(encoder, port); bxt_dsi_reset_clocks(encoder, port);
else if (IS_VALLEYVIEW(dev)) else if (IS_VALLEYVIEW(dev) || IS_CHERRYVIEW(dev))
vlv_dsi_reset_clocks(encoder, port); vlv_dsi_reset_clocks(encoder, port);
} }

View File

@ -44,6 +44,7 @@ static const struct intel_dvo_device intel_dvo_devices[] = {
.type = INTEL_DVO_CHIP_TMDS, .type = INTEL_DVO_CHIP_TMDS,
.name = "sil164", .name = "sil164",
.dvo_reg = DVOC, .dvo_reg = DVOC,
.dvo_srcdim_reg = DVOC_SRCDIM,
.slave_addr = SIL164_ADDR, .slave_addr = SIL164_ADDR,
.dev_ops = &sil164_ops, .dev_ops = &sil164_ops,
}, },
@ -51,6 +52,7 @@ static const struct intel_dvo_device intel_dvo_devices[] = {
.type = INTEL_DVO_CHIP_TMDS, .type = INTEL_DVO_CHIP_TMDS,
.name = "ch7xxx", .name = "ch7xxx",
.dvo_reg = DVOC, .dvo_reg = DVOC,
.dvo_srcdim_reg = DVOC_SRCDIM,
.slave_addr = CH7xxx_ADDR, .slave_addr = CH7xxx_ADDR,
.dev_ops = &ch7xxx_ops, .dev_ops = &ch7xxx_ops,
}, },
@ -58,6 +60,7 @@ static const struct intel_dvo_device intel_dvo_devices[] = {
.type = INTEL_DVO_CHIP_TMDS, .type = INTEL_DVO_CHIP_TMDS,
.name = "ch7xxx", .name = "ch7xxx",
.dvo_reg = DVOC, .dvo_reg = DVOC,
.dvo_srcdim_reg = DVOC_SRCDIM,
.slave_addr = 0x75, /* For some ch7010 */ .slave_addr = 0x75, /* For some ch7010 */
.dev_ops = &ch7xxx_ops, .dev_ops = &ch7xxx_ops,
}, },
@ -65,6 +68,7 @@ static const struct intel_dvo_device intel_dvo_devices[] = {
.type = INTEL_DVO_CHIP_LVDS, .type = INTEL_DVO_CHIP_LVDS,
.name = "ivch", .name = "ivch",
.dvo_reg = DVOA, .dvo_reg = DVOA,
.dvo_srcdim_reg = DVOA_SRCDIM,
.slave_addr = 0x02, /* Might also be 0x44, 0x84, 0xc4 */ .slave_addr = 0x02, /* Might also be 0x44, 0x84, 0xc4 */
.dev_ops = &ivch_ops, .dev_ops = &ivch_ops,
}, },
@ -72,6 +76,7 @@ static const struct intel_dvo_device intel_dvo_devices[] = {
.type = INTEL_DVO_CHIP_TMDS, .type = INTEL_DVO_CHIP_TMDS,
.name = "tfp410", .name = "tfp410",
.dvo_reg = DVOC, .dvo_reg = DVOC,
.dvo_srcdim_reg = DVOC_SRCDIM,
.slave_addr = TFP410_ADDR, .slave_addr = TFP410_ADDR,
.dev_ops = &tfp410_ops, .dev_ops = &tfp410_ops,
}, },
@ -79,6 +84,7 @@ static const struct intel_dvo_device intel_dvo_devices[] = {
.type = INTEL_DVO_CHIP_LVDS, .type = INTEL_DVO_CHIP_LVDS,
.name = "ch7017", .name = "ch7017",
.dvo_reg = DVOC, .dvo_reg = DVOC,
.dvo_srcdim_reg = DVOC_SRCDIM,
.slave_addr = 0x75, .slave_addr = 0x75,
.gpio = GMBUS_PIN_DPB, .gpio = GMBUS_PIN_DPB,
.dev_ops = &ch7017_ops, .dev_ops = &ch7017_ops,
@ -87,6 +93,7 @@ static const struct intel_dvo_device intel_dvo_devices[] = {
.type = INTEL_DVO_CHIP_TMDS, .type = INTEL_DVO_CHIP_TMDS,
.name = "ns2501", .name = "ns2501",
.dvo_reg = DVOB, .dvo_reg = DVOB,
.dvo_srcdim_reg = DVOB_SRCDIM,
.slave_addr = NS2501_ADDR, .slave_addr = NS2501_ADDR,
.dev_ops = &ns2501_ops, .dev_ops = &ns2501_ops,
} }
@ -171,7 +178,7 @@ static void intel_disable_dvo(struct intel_encoder *encoder)
{ {
struct drm_i915_private *dev_priv = encoder->base.dev->dev_private; struct drm_i915_private *dev_priv = encoder->base.dev->dev_private;
struct intel_dvo *intel_dvo = enc_to_dvo(encoder); struct intel_dvo *intel_dvo = enc_to_dvo(encoder);
u32 dvo_reg = intel_dvo->dev.dvo_reg; i915_reg_t dvo_reg = intel_dvo->dev.dvo_reg;
u32 temp = I915_READ(dvo_reg); u32 temp = I915_READ(dvo_reg);
intel_dvo->dev.dev_ops->dpms(&intel_dvo->dev, false); intel_dvo->dev.dev_ops->dpms(&intel_dvo->dev, false);
@ -184,7 +191,7 @@ static void intel_enable_dvo(struct intel_encoder *encoder)
struct drm_i915_private *dev_priv = encoder->base.dev->dev_private; struct drm_i915_private *dev_priv = encoder->base.dev->dev_private;
struct intel_dvo *intel_dvo = enc_to_dvo(encoder); struct intel_dvo *intel_dvo = enc_to_dvo(encoder);
struct intel_crtc *crtc = to_intel_crtc(encoder->base.crtc); struct intel_crtc *crtc = to_intel_crtc(encoder->base.crtc);
u32 dvo_reg = intel_dvo->dev.dvo_reg; i915_reg_t dvo_reg = intel_dvo->dev.dvo_reg;
u32 temp = I915_READ(dvo_reg); u32 temp = I915_READ(dvo_reg);
intel_dvo->dev.dev_ops->mode_set(&intel_dvo->dev, intel_dvo->dev.dev_ops->mode_set(&intel_dvo->dev,
@ -255,20 +262,8 @@ static void intel_dvo_pre_enable(struct intel_encoder *encoder)
struct intel_dvo *intel_dvo = enc_to_dvo(encoder); struct intel_dvo *intel_dvo = enc_to_dvo(encoder);
int pipe = crtc->pipe; int pipe = crtc->pipe;
u32 dvo_val; u32 dvo_val;
u32 dvo_reg = intel_dvo->dev.dvo_reg, dvo_srcdim_reg; i915_reg_t dvo_reg = intel_dvo->dev.dvo_reg;
i915_reg_t dvo_srcdim_reg = intel_dvo->dev.dvo_srcdim_reg;
switch (dvo_reg) {
case DVOA:
default:
dvo_srcdim_reg = DVOA_SRCDIM;
break;
case DVOB:
dvo_srcdim_reg = DVOB_SRCDIM;
break;
case DVOC:
dvo_srcdim_reg = DVOC_SRCDIM;
break;
}
/* Save the data order, since I don't know what it should be set to. */ /* Save the data order, since I don't know what it should be set to. */
dvo_val = I915_READ(dvo_reg) & dvo_val = I915_READ(dvo_reg) &
@ -434,7 +429,7 @@ void intel_dvo_init(struct drm_device *dev)
intel_encoder = &intel_dvo->base; intel_encoder = &intel_dvo->base;
drm_encoder_init(dev, &intel_encoder->base, drm_encoder_init(dev, &intel_encoder->base,
&intel_dvo_enc_funcs, encoder_type); &intel_dvo_enc_funcs, encoder_type, NULL);
intel_encoder->disable = intel_disable_dvo; intel_encoder->disable = intel_disable_dvo;
intel_encoder->enable = intel_enable_dvo; intel_encoder->enable = intel_enable_dvo;

File diff suppressed because it is too large Load Diff

View File

@ -118,7 +118,7 @@ static int intelfb_alloc(struct drm_fb_helper *helper,
{ {
struct intel_fbdev *ifbdev = struct intel_fbdev *ifbdev =
container_of(helper, struct intel_fbdev, helper); container_of(helper, struct intel_fbdev, helper);
struct drm_framebuffer *fb; struct drm_framebuffer *fb = NULL;
struct drm_device *dev = helper->dev; struct drm_device *dev = helper->dev;
struct drm_i915_private *dev_priv = to_i915(dev); struct drm_i915_private *dev_priv = to_i915(dev);
struct drm_mode_fb_cmd2 mode_cmd = {}; struct drm_mode_fb_cmd2 mode_cmd = {};
@ -137,6 +137,8 @@ static int intelfb_alloc(struct drm_fb_helper *helper,
mode_cmd.pixel_format = drm_mode_legacy_fb_format(sizes->surface_bpp, mode_cmd.pixel_format = drm_mode_legacy_fb_format(sizes->surface_bpp,
sizes->surface_depth); sizes->surface_depth);
mutex_lock(&dev->struct_mutex);
size = mode_cmd.pitches[0] * mode_cmd.height; size = mode_cmd.pitches[0] * mode_cmd.height;
size = PAGE_ALIGN(size); size = PAGE_ALIGN(size);
@ -155,26 +157,21 @@ static int intelfb_alloc(struct drm_fb_helper *helper,
fb = __intel_framebuffer_create(dev, &mode_cmd, obj); fb = __intel_framebuffer_create(dev, &mode_cmd, obj);
if (IS_ERR(fb)) { if (IS_ERR(fb)) {
drm_gem_object_unreference(&obj->base);
ret = PTR_ERR(fb); ret = PTR_ERR(fb);
goto out_unref; goto out;
} }
/* Flush everything out, we'll be doing GTT only from now on */ mutex_unlock(&dev->struct_mutex);
ret = intel_pin_and_fence_fb_obj(NULL, fb, NULL, NULL, NULL);
if (ret) {
DRM_ERROR("failed to pin obj: %d\n", ret);
goto out_fb;
}
ifbdev->fb = to_intel_framebuffer(fb); ifbdev->fb = to_intel_framebuffer(fb);
return 0; return 0;
out_fb:
drm_framebuffer_remove(fb);
out_unref:
drm_gem_object_unreference(&obj->base);
out: out:
mutex_unlock(&dev->struct_mutex);
if (!IS_ERR_OR_NULL(fb))
drm_framebuffer_unreference(fb);
return ret; return ret;
} }
@ -192,8 +189,6 @@ static int intelfb_create(struct drm_fb_helper *helper,
int size, ret; int size, ret;
bool prealloc = false; bool prealloc = false;
mutex_lock(&dev->struct_mutex);
if (intel_fb && if (intel_fb &&
(sizes->fb_width > intel_fb->base.width || (sizes->fb_width > intel_fb->base.width ||
sizes->fb_height > intel_fb->base.height)) { sizes->fb_height > intel_fb->base.height)) {
@ -208,7 +203,7 @@ static int intelfb_create(struct drm_fb_helper *helper,
DRM_DEBUG_KMS("no BIOS fb, allocating a new one\n"); DRM_DEBUG_KMS("no BIOS fb, allocating a new one\n");
ret = intelfb_alloc(helper, sizes); ret = intelfb_alloc(helper, sizes);
if (ret) if (ret)
goto out_unlock; return ret;
intel_fb = ifbdev->fb; intel_fb = ifbdev->fb;
} else { } else {
DRM_DEBUG_KMS("re-using BIOS fb\n"); DRM_DEBUG_KMS("re-using BIOS fb\n");
@ -220,8 +215,19 @@ static int intelfb_create(struct drm_fb_helper *helper,
obj = intel_fb->obj; obj = intel_fb->obj;
size = obj->base.size; size = obj->base.size;
mutex_lock(&dev->struct_mutex);
/* Pin the GGTT vma for our access via info->screen_base.
* This also validates that any existing fb inherited from the
* BIOS is suitable for own access.
*/
ret = intel_pin_and_fence_fb_obj(NULL, &ifbdev->fb->base, NULL);
if (ret)
goto out_unlock;
info = drm_fb_helper_alloc_fbi(helper); info = drm_fb_helper_alloc_fbi(helper);
if (IS_ERR(info)) { if (IS_ERR(info)) {
DRM_ERROR("Failed to allocate fb_info\n");
ret = PTR_ERR(info); ret = PTR_ERR(info);
goto out_unpin; goto out_unpin;
} }
@ -266,7 +272,6 @@ out_destroy_fbi:
drm_fb_helper_release_fbi(helper); drm_fb_helper_release_fbi(helper);
out_unpin: out_unpin:
i915_gem_object_ggtt_unpin(obj); i915_gem_object_ggtt_unpin(obj);
drm_gem_object_unreference(&obj->base);
out_unlock: out_unlock:
mutex_unlock(&dev->struct_mutex); mutex_unlock(&dev->struct_mutex);
return ret; return ret;
@ -505,15 +510,21 @@ static const struct drm_fb_helper_funcs intel_fb_helper_funcs = {
static void intel_fbdev_destroy(struct drm_device *dev, static void intel_fbdev_destroy(struct drm_device *dev,
struct intel_fbdev *ifbdev) struct intel_fbdev *ifbdev)
{ {
/* We rely on the object-free to release the VMA pinning for
* the info->screen_base mmaping. Leaking the VMA is simpler than
* trying to rectify all the possible error paths leading here.
*/
drm_fb_helper_unregister_fbi(&ifbdev->helper); drm_fb_helper_unregister_fbi(&ifbdev->helper);
drm_fb_helper_release_fbi(&ifbdev->helper); drm_fb_helper_release_fbi(&ifbdev->helper);
drm_fb_helper_fini(&ifbdev->helper); drm_fb_helper_fini(&ifbdev->helper);
if (ifbdev->fb) {
drm_framebuffer_unregister_private(&ifbdev->fb->base); drm_framebuffer_unregister_private(&ifbdev->fb->base);
drm_framebuffer_remove(&ifbdev->fb->base); drm_framebuffer_remove(&ifbdev->fb->base);
} }
}
/* /*
* Build an intel_fbdev struct using a BIOS allocated framebuffer, if possible. * Build an intel_fbdev struct using a BIOS allocated framebuffer, if possible.

View File

@ -84,30 +84,16 @@ static bool cpt_can_enable_serr_int(struct drm_device *dev)
return true; return true;
} }
/** static void i9xx_check_fifo_underruns(struct intel_crtc *crtc)
* i9xx_check_fifo_underruns - check for fifo underruns
* @dev_priv: i915 device instance
*
* This function checks for fifo underruns on GMCH platforms. This needs to be
* done manually on modeset to make sure that we catch all underruns since they
* do not generate an interrupt by themselves on these platforms.
*/
void i9xx_check_fifo_underruns(struct drm_i915_private *dev_priv)
{ {
struct intel_crtc *crtc; struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
i915_reg_t reg = PIPESTAT(crtc->pipe);
u32 pipestat = I915_READ(reg) & 0xffff0000;
spin_lock_irq(&dev_priv->irq_lock); assert_spin_locked(&dev_priv->irq_lock);
for_each_intel_crtc(dev_priv->dev, crtc) {
u32 reg = PIPESTAT(crtc->pipe);
u32 pipestat;
if (crtc->cpu_fifo_underrun_disabled)
continue;
pipestat = I915_READ(reg) & 0xffff0000;
if ((pipestat & PIPE_FIFO_UNDERRUN_STATUS) == 0) if ((pipestat & PIPE_FIFO_UNDERRUN_STATUS) == 0)
continue; return;
I915_WRITE(reg, pipestat | PIPE_FIFO_UNDERRUN_STATUS); I915_WRITE(reg, pipestat | PIPE_FIFO_UNDERRUN_STATUS);
POSTING_READ(reg); POSTING_READ(reg);
@ -115,15 +101,12 @@ void i9xx_check_fifo_underruns(struct drm_i915_private *dev_priv)
DRM_ERROR("pipe %c underrun\n", pipe_name(crtc->pipe)); DRM_ERROR("pipe %c underrun\n", pipe_name(crtc->pipe));
} }
spin_unlock_irq(&dev_priv->irq_lock);
}
static void i9xx_set_fifo_underrun_reporting(struct drm_device *dev, static void i9xx_set_fifo_underrun_reporting(struct drm_device *dev,
enum pipe pipe, enum pipe pipe,
bool enable, bool old) bool enable, bool old)
{ {
struct drm_i915_private *dev_priv = dev->dev_private; struct drm_i915_private *dev_priv = dev->dev_private;
u32 reg = PIPESTAT(pipe); i915_reg_t reg = PIPESTAT(pipe);
u32 pipestat = I915_READ(reg) & 0xffff0000; u32 pipestat = I915_READ(reg) & 0xffff0000;
assert_spin_locked(&dev_priv->irq_lock); assert_spin_locked(&dev_priv->irq_lock);
@ -145,9 +128,26 @@ static void ironlake_set_fifo_underrun_reporting(struct drm_device *dev,
DE_PIPEB_FIFO_UNDERRUN; DE_PIPEB_FIFO_UNDERRUN;
if (enable) if (enable)
ironlake_enable_display_irq(dev_priv, bit); ilk_enable_display_irq(dev_priv, bit);
else else
ironlake_disable_display_irq(dev_priv, bit); ilk_disable_display_irq(dev_priv, bit);
}
static void ivybridge_check_fifo_underruns(struct intel_crtc *crtc)
{
struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
enum pipe pipe = crtc->pipe;
uint32_t err_int = I915_READ(GEN7_ERR_INT);
assert_spin_locked(&dev_priv->irq_lock);
if ((err_int & ERR_INT_FIFO_UNDERRUN(pipe)) == 0)
return;
I915_WRITE(GEN7_ERR_INT, ERR_INT_FIFO_UNDERRUN(pipe));
POSTING_READ(GEN7_ERR_INT);
DRM_ERROR("fifo underrun on pipe %c\n", pipe_name(pipe));
} }
static void ivybridge_set_fifo_underrun_reporting(struct drm_device *dev, static void ivybridge_set_fifo_underrun_reporting(struct drm_device *dev,
@ -161,9 +161,9 @@ static void ivybridge_set_fifo_underrun_reporting(struct drm_device *dev,
if (!ivb_can_enable_err_int(dev)) if (!ivb_can_enable_err_int(dev))
return; return;
ironlake_enable_display_irq(dev_priv, DE_ERR_INT_IVB); ilk_enable_display_irq(dev_priv, DE_ERR_INT_IVB);
} else { } else {
ironlake_disable_display_irq(dev_priv, DE_ERR_INT_IVB); ilk_disable_display_irq(dev_priv, DE_ERR_INT_IVB);
if (old && if (old &&
I915_READ(GEN7_ERR_INT) & ERR_INT_FIFO_UNDERRUN(pipe)) { I915_READ(GEN7_ERR_INT) & ERR_INT_FIFO_UNDERRUN(pipe)) {
@ -178,14 +178,10 @@ static void broadwell_set_fifo_underrun_reporting(struct drm_device *dev,
{ {
struct drm_i915_private *dev_priv = dev->dev_private; struct drm_i915_private *dev_priv = dev->dev_private;
assert_spin_locked(&dev_priv->irq_lock);
if (enable) if (enable)
dev_priv->de_irq_mask[pipe] &= ~GEN8_PIPE_FIFO_UNDERRUN; bdw_enable_pipe_irq(dev_priv, pipe, GEN8_PIPE_FIFO_UNDERRUN);
else else
dev_priv->de_irq_mask[pipe] |= GEN8_PIPE_FIFO_UNDERRUN; bdw_disable_pipe_irq(dev_priv, pipe, GEN8_PIPE_FIFO_UNDERRUN);
I915_WRITE(GEN8_DE_PIPE_IMR(pipe), dev_priv->de_irq_mask[pipe]);
POSTING_READ(GEN8_DE_PIPE_IMR(pipe));
} }
static void ibx_set_fifo_underrun_reporting(struct drm_device *dev, static void ibx_set_fifo_underrun_reporting(struct drm_device *dev,
@ -202,6 +198,24 @@ static void ibx_set_fifo_underrun_reporting(struct drm_device *dev,
ibx_disable_display_interrupt(dev_priv, bit); ibx_disable_display_interrupt(dev_priv, bit);
} }
static void cpt_check_pch_fifo_underruns(struct intel_crtc *crtc)
{
struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
enum transcoder pch_transcoder = (enum transcoder) crtc->pipe;
uint32_t serr_int = I915_READ(SERR_INT);
assert_spin_locked(&dev_priv->irq_lock);
if ((serr_int & SERR_INT_TRANS_FIFO_UNDERRUN(pch_transcoder)) == 0)
return;
I915_WRITE(SERR_INT, SERR_INT_TRANS_FIFO_UNDERRUN(pch_transcoder));
POSTING_READ(SERR_INT);
DRM_ERROR("pch fifo underrun on pch transcoder %c\n",
transcoder_name(pch_transcoder));
}
static void cpt_set_fifo_underrun_reporting(struct drm_device *dev, static void cpt_set_fifo_underrun_reporting(struct drm_device *dev,
enum transcoder pch_transcoder, enum transcoder pch_transcoder,
bool enable, bool old) bool enable, bool old)
@ -375,3 +389,56 @@ void intel_pch_fifo_underrun_irq_handler(struct drm_i915_private *dev_priv,
DRM_ERROR("PCH transcoder %c FIFO underrun\n", DRM_ERROR("PCH transcoder %c FIFO underrun\n",
transcoder_name(pch_transcoder)); transcoder_name(pch_transcoder));
} }
/**
* intel_check_cpu_fifo_underruns - check for CPU fifo underruns immediately
* @dev_priv: i915 device instance
*
* Check for CPU fifo underruns immediately. Useful on IVB/HSW where the shared
* error interrupt may have been disabled, and so CPU fifo underruns won't
* necessarily raise an interrupt, and on GMCH platforms where underruns never
* raise an interrupt.
*/
void intel_check_cpu_fifo_underruns(struct drm_i915_private *dev_priv)
{
struct intel_crtc *crtc;
spin_lock_irq(&dev_priv->irq_lock);
for_each_intel_crtc(dev_priv->dev, crtc) {
if (crtc->cpu_fifo_underrun_disabled)
continue;
if (HAS_GMCH_DISPLAY(dev_priv))
i9xx_check_fifo_underruns(crtc);
else if (IS_GEN7(dev_priv))
ivybridge_check_fifo_underruns(crtc);
}
spin_unlock_irq(&dev_priv->irq_lock);
}
/**
* intel_check_pch_fifo_underruns - check for PCH fifo underruns immediately
* @dev_priv: i915 device instance
*
* Check for PCH fifo underruns immediately. Useful on CPT/PPT where the shared
* error interrupt may have been disabled, and so PCH fifo underruns won't
* necessarily raise an interrupt.
*/
void intel_check_pch_fifo_underruns(struct drm_i915_private *dev_priv)
{
struct intel_crtc *crtc;
spin_lock_irq(&dev_priv->irq_lock);
for_each_intel_crtc(dev_priv->dev, crtc) {
if (crtc->pch_fifo_underrun_disabled)
continue;
if (HAS_PCH_CPT(dev_priv))
cpt_check_pch_fifo_underruns(crtc);
}
spin_unlock_irq(&dev_priv->irq_lock);
}

View File

@ -42,8 +42,6 @@ struct i915_guc_client {
uint32_t wq_offset; uint32_t wq_offset;
uint32_t wq_size; uint32_t wq_size;
spinlock_t wq_lock; /* Protects all data below */
uint32_t wq_tail; uint32_t wq_tail;
/* GuC submission statistics & status */ /* GuC submission statistics & status */
@ -76,11 +74,17 @@ struct intel_guc_fw {
uint16_t guc_fw_minor_wanted; uint16_t guc_fw_minor_wanted;
uint16_t guc_fw_major_found; uint16_t guc_fw_major_found;
uint16_t guc_fw_minor_found; uint16_t guc_fw_minor_found;
uint32_t header_size;
uint32_t header_offset;
uint32_t rsa_size;
uint32_t rsa_offset;
uint32_t ucode_size;
uint32_t ucode_offset;
}; };
struct intel_guc { struct intel_guc {
struct intel_guc_fw guc_fw; struct intel_guc_fw guc_fw;
uint32_t log_flags; uint32_t log_flags;
struct drm_i915_gem_object *log_obj; struct drm_i915_gem_object *log_obj;
@ -89,8 +93,6 @@ struct intel_guc {
struct i915_guc_client *execbuf_client; struct i915_guc_client *execbuf_client;
spinlock_t host2guc_lock; /* Protects all data below */
DECLARE_BITMAP(doorbell_bitmap, GUC_MAX_DOORBELLS); DECLARE_BITMAP(doorbell_bitmap, GUC_MAX_DOORBELLS);
uint32_t db_cacheline; /* Cyclic counter mod pagesize */ uint32_t db_cacheline; /* Cyclic counter mod pagesize */

View File

@ -122,6 +122,78 @@
#define GUC_CTL_MAX_DWORDS (GUC_CTL_RSRVD + 1) #define GUC_CTL_MAX_DWORDS (GUC_CTL_RSRVD + 1)
/**
* DOC: GuC Firmware Layout
*
* The GuC firmware layout looks like this:
*
* +-------------------------------+
* | guc_css_header |
* | contains major/minor version |
* +-------------------------------+
* | uCode |
* +-------------------------------+
* | RSA signature |
* +-------------------------------+
* | modulus key |
* +-------------------------------+
* | exponent val |
* +-------------------------------+
*
* The firmware may or may not have modulus key and exponent data. The header,
* uCode and RSA signature are must-have components that will be used by driver.
* Length of each components, which is all in dwords, can be found in header.
* In the case that modulus and exponent are not present in fw, a.k.a truncated
* image, the length value still appears in header.
*
* Driver will do some basic fw size validation based on the following rules:
*
* 1. Header, uCode and RSA are must-have components.
* 2. All firmware components, if they present, are in the sequence illustrated
* in the layout table above.
* 3. Length info of each component can be found in header, in dwords.
* 4. Modulus and exponent key are not required by driver. They may not appear
* in fw. So driver will load a truncated firmware in this case.
*/
struct guc_css_header {
uint32_t module_type;
/* header_size includes all non-uCode bits, including css_header, rsa
* key, modulus key and exponent data. */
uint32_t header_size_dw;
uint32_t header_version;
uint32_t module_id;
uint32_t module_vendor;
union {
struct {
uint8_t day;
uint8_t month;
uint16_t year;
};
uint32_t date;
};
uint32_t size_dw; /* uCode plus header_size_dw */
uint32_t key_size_dw;
uint32_t modulus_size_dw;
uint32_t exponent_size_dw;
union {
struct {
uint8_t hour;
uint8_t min;
uint16_t sec;
};
uint32_t time;
};
char username[8];
char buildnumber[12];
uint32_t device_id;
uint32_t guc_sw_version;
uint32_t prod_preprod_fw;
uint32_t reserved[12];
uint32_t header_info;
} __packed;
struct guc_doorbell_info { struct guc_doorbell_info {
u32 db_status; u32 db_status;
u32 cookie; u32 cookie;

View File

@ -27,12 +27,11 @@
* Alex Dai <yu.dai@intel.com> * Alex Dai <yu.dai@intel.com>
*/ */
#include <linux/firmware.h> #include <linux/firmware.h>
#include "intel_drv.h"
#include "i915_drv.h" #include "i915_drv.h"
#include "intel_guc.h" #include "intel_guc.h"
/** /**
* DOC: GuC * DOC: GuC-specific firmware loader
* *
* intel_guc: * intel_guc:
* Top level structure of guc. It handles firmware loading and manages client * Top level structure of guc. It handles firmware loading and manages client
@ -209,16 +208,6 @@ static inline bool guc_ucode_response(struct drm_i915_private *dev_priv,
/* /*
* Transfer the firmware image to RAM for execution by the microcontroller. * Transfer the firmware image to RAM for execution by the microcontroller.
* *
* GuC Firmware layout:
* +-------------------------------+ ----
* | CSS header | 128B
* | contains major/minor version |
* +-------------------------------+ ----
* | uCode |
* +-------------------------------+ ----
* | RSA signature | 256B
* +-------------------------------+ ----
*
* Architecturally, the DMA engine is bidirectional, and can potentially even * Architecturally, the DMA engine is bidirectional, and can potentially even
* transfer between GTT locations. This functionality is left out of the API * transfer between GTT locations. This functionality is left out of the API
* for now as there is no need for it. * for now as there is no need for it.
@ -226,33 +215,29 @@ static inline bool guc_ucode_response(struct drm_i915_private *dev_priv,
* Note that GuC needs the CSS header plus uKernel code to be copied by the * Note that GuC needs the CSS header plus uKernel code to be copied by the
* DMA engine in one operation, whereas the RSA signature is loaded via MMIO. * DMA engine in one operation, whereas the RSA signature is loaded via MMIO.
*/ */
#define UOS_CSS_HEADER_OFFSET 0
#define UOS_VER_MINOR_OFFSET 0x44
#define UOS_VER_MAJOR_OFFSET 0x46
#define UOS_CSS_HEADER_SIZE 0x80
#define UOS_RSA_SIG_SIZE 0x100
static int guc_ucode_xfer_dma(struct drm_i915_private *dev_priv) static int guc_ucode_xfer_dma(struct drm_i915_private *dev_priv)
{ {
struct intel_guc_fw *guc_fw = &dev_priv->guc.guc_fw; struct intel_guc_fw *guc_fw = &dev_priv->guc.guc_fw;
struct drm_i915_gem_object *fw_obj = guc_fw->guc_fw_obj; struct drm_i915_gem_object *fw_obj = guc_fw->guc_fw_obj;
unsigned long offset; unsigned long offset;
struct sg_table *sg = fw_obj->pages; struct sg_table *sg = fw_obj->pages;
u32 status, ucode_size, rsa[UOS_RSA_SIG_SIZE / sizeof(u32)]; u32 status, rsa[UOS_RSA_SCRATCH_MAX_COUNT];
int i, ret = 0; int i, ret = 0;
/* uCode size, also is where RSA signature starts */ /* where RSA signature starts */
offset = ucode_size = guc_fw->guc_fw_size - UOS_RSA_SIG_SIZE; offset = guc_fw->rsa_offset;
I915_WRITE(DMA_COPY_SIZE, ucode_size);
/* Copy RSA signature from the fw image to HW for verification */ /* Copy RSA signature from the fw image to HW for verification */
sg_pcopy_to_buffer(sg->sgl, sg->nents, rsa, UOS_RSA_SIG_SIZE, offset); sg_pcopy_to_buffer(sg->sgl, sg->nents, rsa, sizeof(rsa), offset);
for (i = 0; i < UOS_RSA_SIG_SIZE / sizeof(u32); i++) for (i = 0; i < UOS_RSA_SCRATCH_MAX_COUNT; i++)
I915_WRITE(UOS_RSA_SCRATCH(i), rsa[i]); I915_WRITE(UOS_RSA_SCRATCH(i), rsa[i]);
/* The header plus uCode will be copied to WOPCM via DMA, excluding any
* other components */
I915_WRITE(DMA_COPY_SIZE, guc_fw->header_size + guc_fw->ucode_size);
/* Set the source address for the new blob */ /* Set the source address for the new blob */
offset = i915_gem_obj_ggtt_offset(fw_obj); offset = i915_gem_obj_ggtt_offset(fw_obj) + guc_fw->header_offset;
I915_WRITE(DMA_ADDR_0_LOW, lower_32_bits(offset)); I915_WRITE(DMA_ADDR_0_LOW, lower_32_bits(offset));
I915_WRITE(DMA_ADDR_0_HIGH, upper_32_bits(offset) & 0xFFFF); I915_WRITE(DMA_ADDR_0_HIGH, upper_32_bits(offset) & 0xFFFF);
@ -323,8 +308,8 @@ static int guc_ucode_xfer(struct drm_i915_private *dev_priv)
I915_WRITE(GUC_SHIM_CONTROL, GUC_SHIM_CONTROL_VALUE); I915_WRITE(GUC_SHIM_CONTROL, GUC_SHIM_CONTROL_VALUE);
/* WaDisableMinuteIaClockGating:skl,bxt */ /* WaDisableMinuteIaClockGating:skl,bxt */
if ((IS_SKYLAKE(dev) && INTEL_REVID(dev) <= SKL_REVID_B0) || if (IS_SKL_REVID(dev, 0, SKL_REVID_B0) ||
(IS_BROXTON(dev) && INTEL_REVID(dev) == BXT_REVID_A0)) { IS_BXT_REVID(dev, 0, BXT_REVID_A1)) {
I915_WRITE(GUC_SHIM_CONTROL, (I915_READ(GUC_SHIM_CONTROL) & I915_WRITE(GUC_SHIM_CONTROL, (I915_READ(GUC_SHIM_CONTROL) &
~GUC_ENABLE_MIA_CLOCK_GATING)); ~GUC_ENABLE_MIA_CLOCK_GATING));
} }
@ -379,6 +364,9 @@ int intel_guc_ucode_load(struct drm_device *dev)
struct intel_guc_fw *guc_fw = &dev_priv->guc.guc_fw; struct intel_guc_fw *guc_fw = &dev_priv->guc.guc_fw;
int err = 0; int err = 0;
if (!i915.enable_guc_submission)
return 0;
DRM_DEBUG_DRIVER("GuC fw status: fetch %s, load %s\n", DRM_DEBUG_DRIVER("GuC fw status: fetch %s, load %s\n",
intel_guc_fw_status_repr(guc_fw->guc_fw_fetch_status), intel_guc_fw_status_repr(guc_fw->guc_fw_fetch_status),
intel_guc_fw_status_repr(guc_fw->guc_fw_load_status)); intel_guc_fw_status_repr(guc_fw->guc_fw_load_status));
@ -458,10 +446,8 @@ static void guc_fw_fetch(struct drm_device *dev, struct intel_guc_fw *guc_fw)
{ {
struct drm_i915_gem_object *obj; struct drm_i915_gem_object *obj;
const struct firmware *fw; const struct firmware *fw;
const u8 *css_header; struct guc_css_header *css;
const size_t minsize = UOS_CSS_HEADER_SIZE + UOS_RSA_SIG_SIZE; size_t size;
const size_t maxsize = GUC_WOPCM_SIZE_VALUE + UOS_RSA_SIG_SIZE
- 0x8000; /* 32k reserved (8K stack + 24k context) */
int err; int err;
DRM_DEBUG_DRIVER("before requesting firmware: GuC fw fetch status %s\n", DRM_DEBUG_DRIVER("before requesting firmware: GuC fw fetch status %s\n",
@ -475,12 +461,52 @@ static void guc_fw_fetch(struct drm_device *dev, struct intel_guc_fw *guc_fw)
DRM_DEBUG_DRIVER("fetch GuC fw from %s succeeded, fw %p\n", DRM_DEBUG_DRIVER("fetch GuC fw from %s succeeded, fw %p\n",
guc_fw->guc_fw_path, fw); guc_fw->guc_fw_path, fw);
DRM_DEBUG_DRIVER("firmware file size %zu (minimum %zu, maximum %zu)\n",
fw->size, minsize, maxsize);
/* Check the size of the blob befoe examining buffer contents */ /* Check the size of the blob before examining buffer contents */
if (fw->size < minsize || fw->size > maxsize) if (fw->size < sizeof(struct guc_css_header)) {
DRM_ERROR("Firmware header is missing\n");
goto fail; goto fail;
}
css = (struct guc_css_header *)fw->data;
/* Firmware bits always start from header */
guc_fw->header_offset = 0;
guc_fw->header_size = (css->header_size_dw - css->modulus_size_dw -
css->key_size_dw - css->exponent_size_dw) * sizeof(u32);
if (guc_fw->header_size != sizeof(struct guc_css_header)) {
DRM_ERROR("CSS header definition mismatch\n");
goto fail;
}
/* then, uCode */
guc_fw->ucode_offset = guc_fw->header_offset + guc_fw->header_size;
guc_fw->ucode_size = (css->size_dw - css->header_size_dw) * sizeof(u32);
/* now RSA */
if (css->key_size_dw != UOS_RSA_SCRATCH_MAX_COUNT) {
DRM_ERROR("RSA key size is bad\n");
goto fail;
}
guc_fw->rsa_offset = guc_fw->ucode_offset + guc_fw->ucode_size;
guc_fw->rsa_size = css->key_size_dw * sizeof(u32);
/* At least, it should have header, uCode and RSA. Size of all three. */
size = guc_fw->header_size + guc_fw->ucode_size + guc_fw->rsa_size;
if (fw->size < size) {
DRM_ERROR("Missing firmware components\n");
goto fail;
}
/* Header and uCode will be loaded to WOPCM. Size of the two. */
size = guc_fw->header_size + guc_fw->ucode_size;
/* Top 32k of WOPCM is reserved (8K stack + 24k RC6 context). */
if (size > GUC_WOPCM_SIZE_VALUE - 0x8000) {
DRM_ERROR("Firmware is too large to fit in WOPCM\n");
goto fail;
}
/* /*
* The GuC firmware image has the version number embedded at a well-known * The GuC firmware image has the version number embedded at a well-known
@ -488,9 +514,8 @@ static void guc_fw_fetch(struct drm_device *dev, struct intel_guc_fw *guc_fw)
* TWO bytes each (i.e. u16), although all pointers and offsets are defined * TWO bytes each (i.e. u16), although all pointers and offsets are defined
* in terms of bytes (u8). * in terms of bytes (u8).
*/ */
css_header = fw->data + UOS_CSS_HEADER_OFFSET; guc_fw->guc_fw_major_found = css->guc_sw_version >> 16;
guc_fw->guc_fw_major_found = *(u16 *)(css_header + UOS_VER_MAJOR_OFFSET); guc_fw->guc_fw_minor_found = css->guc_sw_version & 0xFFFF;
guc_fw->guc_fw_minor_found = *(u16 *)(css_header + UOS_VER_MINOR_OFFSET);
if (guc_fw->guc_fw_major_found != guc_fw->guc_fw_major_wanted || if (guc_fw->guc_fw_major_found != guc_fw->guc_fw_major_wanted ||
guc_fw->guc_fw_minor_found < guc_fw->guc_fw_minor_wanted) { guc_fw->guc_fw_minor_found < guc_fw->guc_fw_minor_wanted) {
@ -567,6 +592,9 @@ void intel_guc_ucode_init(struct drm_device *dev)
fw_path = ""; /* unknown device */ fw_path = ""; /* unknown device */
} }
if (!i915.enable_guc_submission)
return;
guc_fw->guc_dev = dev; guc_fw->guc_dev = dev;
guc_fw->guc_fw_path = fw_path; guc_fw->guc_fw_path = fw_path;
guc_fw->guc_fw_fetch_status = GUC_FIRMWARE_NONE; guc_fw->guc_fw_fetch_status = GUC_FIRMWARE_NONE;

View File

@ -78,7 +78,7 @@ static u32 g4x_infoframe_index(enum hdmi_infoframe_type type)
case HDMI_INFOFRAME_TYPE_VENDOR: case HDMI_INFOFRAME_TYPE_VENDOR:
return VIDEO_DIP_SELECT_VENDOR; return VIDEO_DIP_SELECT_VENDOR;
default: default:
DRM_DEBUG_DRIVER("unknown info frame type %d\n", type); MISSING_CASE(type);
return 0; return 0;
} }
} }
@ -93,7 +93,7 @@ static u32 g4x_infoframe_enable(enum hdmi_infoframe_type type)
case HDMI_INFOFRAME_TYPE_VENDOR: case HDMI_INFOFRAME_TYPE_VENDOR:
return VIDEO_DIP_ENABLE_VENDOR; return VIDEO_DIP_ENABLE_VENDOR;
default: default:
DRM_DEBUG_DRIVER("unknown info frame type %d\n", type); MISSING_CASE(type);
return 0; return 0;
} }
} }
@ -108,12 +108,13 @@ static u32 hsw_infoframe_enable(enum hdmi_infoframe_type type)
case HDMI_INFOFRAME_TYPE_VENDOR: case HDMI_INFOFRAME_TYPE_VENDOR:
return VIDEO_DIP_ENABLE_VS_HSW; return VIDEO_DIP_ENABLE_VS_HSW;
default: default:
DRM_DEBUG_DRIVER("unknown info frame type %d\n", type); MISSING_CASE(type);
return 0; return 0;
} }
} }
static u32 hsw_dip_data_reg(struct drm_i915_private *dev_priv, static i915_reg_t
hsw_dip_data_reg(struct drm_i915_private *dev_priv,
enum transcoder cpu_transcoder, enum transcoder cpu_transcoder,
enum hdmi_infoframe_type type, enum hdmi_infoframe_type type,
int i) int i)
@ -126,8 +127,8 @@ static u32 hsw_dip_data_reg(struct drm_i915_private *dev_priv,
case HDMI_INFOFRAME_TYPE_VENDOR: case HDMI_INFOFRAME_TYPE_VENDOR:
return HSW_TVIDEO_DIP_VS_DATA(cpu_transcoder, i); return HSW_TVIDEO_DIP_VS_DATA(cpu_transcoder, i);
default: default:
DRM_DEBUG_DRIVER("unknown info frame type %d\n", type); MISSING_CASE(type);
return 0; return INVALID_MMIO_REG;
} }
} }
@ -168,10 +169,10 @@ static void g4x_write_infoframe(struct drm_encoder *encoder,
POSTING_READ(VIDEO_DIP_CTL); POSTING_READ(VIDEO_DIP_CTL);
} }
static bool g4x_infoframe_enabled(struct drm_encoder *encoder) static bool g4x_infoframe_enabled(struct drm_encoder *encoder,
const struct intel_crtc_state *pipe_config)
{ {
struct drm_device *dev = encoder->dev; struct drm_i915_private *dev_priv = to_i915(encoder->dev);
struct drm_i915_private *dev_priv = dev->dev_private;
struct intel_digital_port *intel_dig_port = enc_to_dig_port(encoder); struct intel_digital_port *intel_dig_port = enc_to_dig_port(encoder);
u32 val = I915_READ(VIDEO_DIP_CTL); u32 val = I915_READ(VIDEO_DIP_CTL);
@ -193,8 +194,9 @@ static void ibx_write_infoframe(struct drm_encoder *encoder,
struct drm_device *dev = encoder->dev; struct drm_device *dev = encoder->dev;
struct drm_i915_private *dev_priv = dev->dev_private; struct drm_i915_private *dev_priv = dev->dev_private;
struct intel_crtc *intel_crtc = to_intel_crtc(encoder->crtc); struct intel_crtc *intel_crtc = to_intel_crtc(encoder->crtc);
int i, reg = TVIDEO_DIP_CTL(intel_crtc->pipe); i915_reg_t reg = TVIDEO_DIP_CTL(intel_crtc->pipe);
u32 val = I915_READ(reg); u32 val = I915_READ(reg);
int i;
WARN(!(val & VIDEO_DIP_ENABLE), "Writing DIP with CTL reg disabled\n"); WARN(!(val & VIDEO_DIP_ENABLE), "Writing DIP with CTL reg disabled\n");
@ -223,13 +225,13 @@ static void ibx_write_infoframe(struct drm_encoder *encoder,
POSTING_READ(reg); POSTING_READ(reg);
} }
static bool ibx_infoframe_enabled(struct drm_encoder *encoder) static bool ibx_infoframe_enabled(struct drm_encoder *encoder,
const struct intel_crtc_state *pipe_config)
{ {
struct drm_device *dev = encoder->dev; struct drm_i915_private *dev_priv = to_i915(encoder->dev);
struct drm_i915_private *dev_priv = dev->dev_private;
struct intel_crtc *intel_crtc = to_intel_crtc(encoder->crtc);
struct intel_digital_port *intel_dig_port = enc_to_dig_port(encoder); struct intel_digital_port *intel_dig_port = enc_to_dig_port(encoder);
int reg = TVIDEO_DIP_CTL(intel_crtc->pipe); enum pipe pipe = to_intel_crtc(pipe_config->base.crtc)->pipe;
i915_reg_t reg = TVIDEO_DIP_CTL(pipe);
u32 val = I915_READ(reg); u32 val = I915_READ(reg);
if ((val & VIDEO_DIP_ENABLE) == 0) if ((val & VIDEO_DIP_ENABLE) == 0)
@ -251,8 +253,9 @@ static void cpt_write_infoframe(struct drm_encoder *encoder,
struct drm_device *dev = encoder->dev; struct drm_device *dev = encoder->dev;
struct drm_i915_private *dev_priv = dev->dev_private; struct drm_i915_private *dev_priv = dev->dev_private;
struct intel_crtc *intel_crtc = to_intel_crtc(encoder->crtc); struct intel_crtc *intel_crtc = to_intel_crtc(encoder->crtc);
int i, reg = TVIDEO_DIP_CTL(intel_crtc->pipe); i915_reg_t reg = TVIDEO_DIP_CTL(intel_crtc->pipe);
u32 val = I915_READ(reg); u32 val = I915_READ(reg);
int i;
WARN(!(val & VIDEO_DIP_ENABLE), "Writing DIP with CTL reg disabled\n"); WARN(!(val & VIDEO_DIP_ENABLE), "Writing DIP with CTL reg disabled\n");
@ -284,13 +287,12 @@ static void cpt_write_infoframe(struct drm_encoder *encoder,
POSTING_READ(reg); POSTING_READ(reg);
} }
static bool cpt_infoframe_enabled(struct drm_encoder *encoder) static bool cpt_infoframe_enabled(struct drm_encoder *encoder,
const struct intel_crtc_state *pipe_config)
{ {
struct drm_device *dev = encoder->dev; struct drm_i915_private *dev_priv = to_i915(encoder->dev);
struct drm_i915_private *dev_priv = dev->dev_private; enum pipe pipe = to_intel_crtc(pipe_config->base.crtc)->pipe;
struct intel_crtc *intel_crtc = to_intel_crtc(encoder->crtc); u32 val = I915_READ(TVIDEO_DIP_CTL(pipe));
int reg = TVIDEO_DIP_CTL(intel_crtc->pipe);
u32 val = I915_READ(reg);
if ((val & VIDEO_DIP_ENABLE) == 0) if ((val & VIDEO_DIP_ENABLE) == 0)
return false; return false;
@ -308,8 +310,9 @@ static void vlv_write_infoframe(struct drm_encoder *encoder,
struct drm_device *dev = encoder->dev; struct drm_device *dev = encoder->dev;
struct drm_i915_private *dev_priv = dev->dev_private; struct drm_i915_private *dev_priv = dev->dev_private;
struct intel_crtc *intel_crtc = to_intel_crtc(encoder->crtc); struct intel_crtc *intel_crtc = to_intel_crtc(encoder->crtc);
int i, reg = VLV_TVIDEO_DIP_CTL(intel_crtc->pipe); i915_reg_t reg = VLV_TVIDEO_DIP_CTL(intel_crtc->pipe);
u32 val = I915_READ(reg); u32 val = I915_READ(reg);
int i;
WARN(!(val & VIDEO_DIP_ENABLE), "Writing DIP with CTL reg disabled\n"); WARN(!(val & VIDEO_DIP_ENABLE), "Writing DIP with CTL reg disabled\n");
@ -338,14 +341,13 @@ static void vlv_write_infoframe(struct drm_encoder *encoder,
POSTING_READ(reg); POSTING_READ(reg);
} }
static bool vlv_infoframe_enabled(struct drm_encoder *encoder) static bool vlv_infoframe_enabled(struct drm_encoder *encoder,
const struct intel_crtc_state *pipe_config)
{ {
struct drm_device *dev = encoder->dev; struct drm_i915_private *dev_priv = to_i915(encoder->dev);
struct drm_i915_private *dev_priv = dev->dev_private;
struct intel_crtc *intel_crtc = to_intel_crtc(encoder->crtc);
struct intel_digital_port *intel_dig_port = enc_to_dig_port(encoder); struct intel_digital_port *intel_dig_port = enc_to_dig_port(encoder);
int reg = VLV_TVIDEO_DIP_CTL(intel_crtc->pipe); enum pipe pipe = to_intel_crtc(pipe_config->base.crtc)->pipe;
u32 val = I915_READ(reg); u32 val = I915_READ(VLV_TVIDEO_DIP_CTL(pipe));
if ((val & VIDEO_DIP_ENABLE) == 0) if ((val & VIDEO_DIP_ENABLE) == 0)
return false; return false;
@ -367,14 +369,12 @@ static void hsw_write_infoframe(struct drm_encoder *encoder,
struct drm_i915_private *dev_priv = dev->dev_private; struct drm_i915_private *dev_priv = dev->dev_private;
struct intel_crtc *intel_crtc = to_intel_crtc(encoder->crtc); struct intel_crtc *intel_crtc = to_intel_crtc(encoder->crtc);
enum transcoder cpu_transcoder = intel_crtc->config->cpu_transcoder; enum transcoder cpu_transcoder = intel_crtc->config->cpu_transcoder;
u32 ctl_reg = HSW_TVIDEO_DIP_CTL(cpu_transcoder); i915_reg_t ctl_reg = HSW_TVIDEO_DIP_CTL(cpu_transcoder);
u32 data_reg; i915_reg_t data_reg;
int i; int i;
u32 val = I915_READ(ctl_reg); u32 val = I915_READ(ctl_reg);
data_reg = hsw_dip_data_reg(dev_priv, cpu_transcoder, type, 0); data_reg = hsw_dip_data_reg(dev_priv, cpu_transcoder, type, 0);
if (data_reg == 0)
return;
val &= ~hsw_infoframe_enable(type); val &= ~hsw_infoframe_enable(type);
I915_WRITE(ctl_reg, val); I915_WRITE(ctl_reg, val);
@ -396,13 +396,11 @@ static void hsw_write_infoframe(struct drm_encoder *encoder,
POSTING_READ(ctl_reg); POSTING_READ(ctl_reg);
} }
static bool hsw_infoframe_enabled(struct drm_encoder *encoder) static bool hsw_infoframe_enabled(struct drm_encoder *encoder,
const struct intel_crtc_state *pipe_config)
{ {
struct drm_device *dev = encoder->dev; struct drm_i915_private *dev_priv = to_i915(encoder->dev);
struct drm_i915_private *dev_priv = dev->dev_private; u32 val = I915_READ(HSW_TVIDEO_DIP_CTL(pipe_config->cpu_transcoder));
struct intel_crtc *intel_crtc = to_intel_crtc(encoder->crtc);
u32 ctl_reg = HSW_TVIDEO_DIP_CTL(intel_crtc->config->cpu_transcoder);
u32 val = I915_READ(ctl_reg);
return val & (VIDEO_DIP_ENABLE_VSC_HSW | VIDEO_DIP_ENABLE_AVI_HSW | return val & (VIDEO_DIP_ENABLE_VSC_HSW | VIDEO_DIP_ENABLE_AVI_HSW |
VIDEO_DIP_ENABLE_GCP_HSW | VIDEO_DIP_ENABLE_VS_HSW | VIDEO_DIP_ENABLE_GCP_HSW | VIDEO_DIP_ENABLE_VS_HSW |
@ -513,7 +511,7 @@ static void g4x_set_infoframes(struct drm_encoder *encoder,
struct drm_i915_private *dev_priv = encoder->dev->dev_private; struct drm_i915_private *dev_priv = encoder->dev->dev_private;
struct intel_digital_port *intel_dig_port = enc_to_dig_port(encoder); struct intel_digital_port *intel_dig_port = enc_to_dig_port(encoder);
struct intel_hdmi *intel_hdmi = &intel_dig_port->hdmi; struct intel_hdmi *intel_hdmi = &intel_dig_port->hdmi;
u32 reg = VIDEO_DIP_CTL; i915_reg_t reg = VIDEO_DIP_CTL;
u32 val = I915_READ(reg); u32 val = I915_READ(reg);
u32 port = VIDEO_DIP_PORT(intel_dig_port->port); u32 port = VIDEO_DIP_PORT(intel_dig_port->port);
@ -633,11 +631,12 @@ static bool intel_hdmi_set_gcp_infoframe(struct drm_encoder *encoder)
{ {
struct drm_i915_private *dev_priv = encoder->dev->dev_private; struct drm_i915_private *dev_priv = encoder->dev->dev_private;
struct intel_crtc *crtc = to_intel_crtc(encoder->crtc); struct intel_crtc *crtc = to_intel_crtc(encoder->crtc);
u32 reg, val = 0; i915_reg_t reg;
u32 val = 0;
if (HAS_DDI(dev_priv)) if (HAS_DDI(dev_priv))
reg = HSW_TVIDEO_DIP_GCP(crtc->config->cpu_transcoder); reg = HSW_TVIDEO_DIP_GCP(crtc->config->cpu_transcoder);
else if (IS_VALLEYVIEW(dev_priv)) else if (IS_VALLEYVIEW(dev_priv) || IS_CHERRYVIEW(dev_priv))
reg = VLV_TVIDEO_DIP_GCP(crtc->pipe); reg = VLV_TVIDEO_DIP_GCP(crtc->pipe);
else if (HAS_PCH_SPLIT(dev_priv->dev)) else if (HAS_PCH_SPLIT(dev_priv->dev))
reg = TVIDEO_DIP_GCP(crtc->pipe); reg = TVIDEO_DIP_GCP(crtc->pipe);
@ -666,7 +665,7 @@ static void ibx_set_infoframes(struct drm_encoder *encoder,
struct intel_crtc *intel_crtc = to_intel_crtc(encoder->crtc); struct intel_crtc *intel_crtc = to_intel_crtc(encoder->crtc);
struct intel_digital_port *intel_dig_port = enc_to_dig_port(encoder); struct intel_digital_port *intel_dig_port = enc_to_dig_port(encoder);
struct intel_hdmi *intel_hdmi = &intel_dig_port->hdmi; struct intel_hdmi *intel_hdmi = &intel_dig_port->hdmi;
u32 reg = TVIDEO_DIP_CTL(intel_crtc->pipe); i915_reg_t reg = TVIDEO_DIP_CTL(intel_crtc->pipe);
u32 val = I915_READ(reg); u32 val = I915_READ(reg);
u32 port = VIDEO_DIP_PORT(intel_dig_port->port); u32 port = VIDEO_DIP_PORT(intel_dig_port->port);
@ -717,7 +716,7 @@ static void cpt_set_infoframes(struct drm_encoder *encoder,
struct drm_i915_private *dev_priv = encoder->dev->dev_private; struct drm_i915_private *dev_priv = encoder->dev->dev_private;
struct intel_crtc *intel_crtc = to_intel_crtc(encoder->crtc); struct intel_crtc *intel_crtc = to_intel_crtc(encoder->crtc);
struct intel_hdmi *intel_hdmi = enc_to_intel_hdmi(encoder); struct intel_hdmi *intel_hdmi = enc_to_intel_hdmi(encoder);
u32 reg = TVIDEO_DIP_CTL(intel_crtc->pipe); i915_reg_t reg = TVIDEO_DIP_CTL(intel_crtc->pipe);
u32 val = I915_READ(reg); u32 val = I915_READ(reg);
assert_hdmi_port_disabled(intel_hdmi); assert_hdmi_port_disabled(intel_hdmi);
@ -760,7 +759,7 @@ static void vlv_set_infoframes(struct drm_encoder *encoder,
struct intel_digital_port *intel_dig_port = enc_to_dig_port(encoder); struct intel_digital_port *intel_dig_port = enc_to_dig_port(encoder);
struct intel_crtc *intel_crtc = to_intel_crtc(encoder->crtc); struct intel_crtc *intel_crtc = to_intel_crtc(encoder->crtc);
struct intel_hdmi *intel_hdmi = enc_to_intel_hdmi(encoder); struct intel_hdmi *intel_hdmi = enc_to_intel_hdmi(encoder);
u32 reg = VLV_TVIDEO_DIP_CTL(intel_crtc->pipe); i915_reg_t reg = VLV_TVIDEO_DIP_CTL(intel_crtc->pipe);
u32 val = I915_READ(reg); u32 val = I915_READ(reg);
u32 port = VIDEO_DIP_PORT(intel_dig_port->port); u32 port = VIDEO_DIP_PORT(intel_dig_port->port);
@ -811,7 +810,7 @@ static void hsw_set_infoframes(struct drm_encoder *encoder,
struct drm_i915_private *dev_priv = encoder->dev->dev_private; struct drm_i915_private *dev_priv = encoder->dev->dev_private;
struct intel_crtc *intel_crtc = to_intel_crtc(encoder->crtc); struct intel_crtc *intel_crtc = to_intel_crtc(encoder->crtc);
struct intel_hdmi *intel_hdmi = enc_to_intel_hdmi(encoder); struct intel_hdmi *intel_hdmi = enc_to_intel_hdmi(encoder);
u32 reg = HSW_TVIDEO_DIP_CTL(intel_crtc->config->cpu_transcoder); i915_reg_t reg = HSW_TVIDEO_DIP_CTL(intel_crtc->config->cpu_transcoder);
u32 val = I915_READ(reg); u32 val = I915_READ(reg);
assert_hdmi_port_disabled(intel_hdmi); assert_hdmi_port_disabled(intel_hdmi);
@ -881,15 +880,18 @@ static bool intel_hdmi_get_hw_state(struct intel_encoder *encoder,
struct intel_hdmi *intel_hdmi = enc_to_intel_hdmi(&encoder->base); struct intel_hdmi *intel_hdmi = enc_to_intel_hdmi(&encoder->base);
enum intel_display_power_domain power_domain; enum intel_display_power_domain power_domain;
u32 tmp; u32 tmp;
bool ret;
power_domain = intel_display_port_power_domain(encoder); power_domain = intel_display_port_power_domain(encoder);
if (!intel_display_power_is_enabled(dev_priv, power_domain)) if (!intel_display_power_get_if_enabled(dev_priv, power_domain))
return false; return false;
ret = false;
tmp = I915_READ(intel_hdmi->hdmi_reg); tmp = I915_READ(intel_hdmi->hdmi_reg);
if (!(tmp & SDVO_ENABLE)) if (!(tmp & SDVO_ENABLE))
return false; goto out;
if (HAS_PCH_CPT(dev)) if (HAS_PCH_CPT(dev))
*pipe = PORT_TO_PIPE_CPT(tmp); *pipe = PORT_TO_PIPE_CPT(tmp);
@ -898,7 +900,12 @@ static bool intel_hdmi_get_hw_state(struct intel_encoder *encoder,
else else
*pipe = PORT_TO_PIPE(tmp); *pipe = PORT_TO_PIPE(tmp);
return true; ret = true;
out:
intel_display_power_put(dev_priv, power_domain);
return ret;
} }
static void intel_hdmi_get_config(struct intel_encoder *encoder, static void intel_hdmi_get_config(struct intel_encoder *encoder,
@ -925,7 +932,7 @@ static void intel_hdmi_get_config(struct intel_encoder *encoder,
if (tmp & HDMI_MODE_SELECT_HDMI) if (tmp & HDMI_MODE_SELECT_HDMI)
pipe_config->has_hdmi_sink = true; pipe_config->has_hdmi_sink = true;
if (intel_hdmi->infoframe_enabled(&encoder->base)) if (intel_hdmi->infoframe_enabled(&encoder->base, pipe_config))
pipe_config->has_infoframe = true; pipe_config->has_infoframe = true;
if (tmp & SDVO_AUDIO_ENABLE) if (tmp & SDVO_AUDIO_ENABLE)
@ -1108,6 +1115,13 @@ static void intel_disable_hdmi(struct intel_encoder *encoder)
* matching DP port to be enabled on transcoder A. * matching DP port to be enabled on transcoder A.
*/ */
if (HAS_PCH_IBX(dev) && crtc->pipe == PIPE_B) { if (HAS_PCH_IBX(dev) && crtc->pipe == PIPE_B) {
/*
* We get CPU/PCH FIFO underruns on the other pipe when
* doing the workaround. Sweep them under the rug.
*/
intel_set_cpu_fifo_underrun_reporting(dev_priv, PIPE_A, false);
intel_set_pch_fifo_underrun_reporting(dev_priv, PIPE_A, false);
temp &= ~SDVO_PIPE_B_SELECT; temp &= ~SDVO_PIPE_B_SELECT;
temp |= SDVO_ENABLE; temp |= SDVO_ENABLE;
/* /*
@ -1122,6 +1136,10 @@ static void intel_disable_hdmi(struct intel_encoder *encoder)
temp &= ~SDVO_ENABLE; temp &= ~SDVO_ENABLE;
I915_WRITE(intel_hdmi->hdmi_reg, temp); I915_WRITE(intel_hdmi->hdmi_reg, temp);
POSTING_READ(intel_hdmi->hdmi_reg); POSTING_READ(intel_hdmi->hdmi_reg);
intel_wait_for_vblank_if_active(dev_priv->dev, PIPE_A);
intel_set_cpu_fifo_underrun_reporting(dev_priv, PIPE_A, true);
intel_set_pch_fifo_underrun_reporting(dev_priv, PIPE_A, true);
} }
intel_hdmi->set_infoframes(&encoder->base, false, NULL); intel_hdmi->set_infoframes(&encoder->base, false, NULL);
@ -1331,13 +1349,14 @@ intel_hdmi_unset_edid(struct drm_connector *connector)
} }
static bool static bool
intel_hdmi_set_edid(struct drm_connector *connector) intel_hdmi_set_edid(struct drm_connector *connector, bool force)
{ {
struct drm_i915_private *dev_priv = to_i915(connector->dev); struct drm_i915_private *dev_priv = to_i915(connector->dev);
struct intel_hdmi *intel_hdmi = intel_attached_hdmi(connector); struct intel_hdmi *intel_hdmi = intel_attached_hdmi(connector);
struct edid *edid; struct edid *edid = NULL;
bool connected = false; bool connected = false;
if (force) {
intel_display_power_get(dev_priv, POWER_DOMAIN_GMBUS); intel_display_power_get(dev_priv, POWER_DOMAIN_GMBUS);
edid = drm_get_edid(connector, edid = drm_get_edid(connector,
@ -1345,6 +1364,7 @@ intel_hdmi_set_edid(struct drm_connector *connector)
intel_hdmi->ddc_bus)); intel_hdmi->ddc_bus));
intel_display_power_put(dev_priv, POWER_DOMAIN_GMBUS); intel_display_power_put(dev_priv, POWER_DOMAIN_GMBUS);
}
to_intel_connector(connector)->detect_edid = edid; to_intel_connector(connector)->detect_edid = edid;
if (edid && edid->input & DRM_EDID_INPUT_DIGITAL) { if (edid && edid->input & DRM_EDID_INPUT_DIGITAL) {
@ -1370,16 +1390,37 @@ static enum drm_connector_status
intel_hdmi_detect(struct drm_connector *connector, bool force) intel_hdmi_detect(struct drm_connector *connector, bool force)
{ {
enum drm_connector_status status; enum drm_connector_status status;
struct intel_hdmi *intel_hdmi = intel_attached_hdmi(connector);
struct drm_i915_private *dev_priv = to_i915(connector->dev); struct drm_i915_private *dev_priv = to_i915(connector->dev);
bool live_status = false;
unsigned int try;
DRM_DEBUG_KMS("[CONNECTOR:%d:%s]\n", DRM_DEBUG_KMS("[CONNECTOR:%d:%s]\n",
connector->base.id, connector->name); connector->base.id, connector->name);
intel_display_power_get(dev_priv, POWER_DOMAIN_GMBUS); intel_display_power_get(dev_priv, POWER_DOMAIN_GMBUS);
for (try = 0; !live_status && try < 9; try++) {
if (try)
msleep(10);
live_status = intel_digital_port_connected(dev_priv,
hdmi_to_dig_port(intel_hdmi));
}
if (!live_status) {
DRM_DEBUG_KMS("HDMI live status down\n");
/*
* Live status register is not reliable on all intel platforms.
* So consider live_status only for certain platforms, for
* others, read EDID to determine presence of sink.
*/
if (INTEL_INFO(dev_priv)->gen < 7 || IS_IVYBRIDGE(dev_priv))
live_status = true;
}
intel_hdmi_unset_edid(connector); intel_hdmi_unset_edid(connector);
if (intel_hdmi_set_edid(connector)) { if (intel_hdmi_set_edid(connector, live_status)) {
struct intel_hdmi *intel_hdmi = intel_attached_hdmi(connector); struct intel_hdmi *intel_hdmi = intel_attached_hdmi(connector);
hdmi_to_dig_port(intel_hdmi)->base.type = INTEL_OUTPUT_HDMI; hdmi_to_dig_port(intel_hdmi)->base.type = INTEL_OUTPUT_HDMI;
@ -1405,7 +1446,7 @@ intel_hdmi_force(struct drm_connector *connector)
if (connector->status != connector_status_connected) if (connector->status != connector_status_connected)
return; return;
intel_hdmi_set_edid(connector); intel_hdmi_set_edid(connector, true);
hdmi_to_dig_port(intel_hdmi)->base.type = INTEL_OUTPUT_HDMI; hdmi_to_dig_port(intel_hdmi)->base.type = INTEL_OUTPUT_HDMI;
} }
@ -1997,50 +2038,6 @@ intel_hdmi_add_properties(struct intel_hdmi *intel_hdmi, struct drm_connector *c
intel_hdmi->aspect_ratio = HDMI_PICTURE_ASPECT_NONE; intel_hdmi->aspect_ratio = HDMI_PICTURE_ASPECT_NONE;
} }
static u8 intel_hdmi_ddc_pin(struct drm_i915_private *dev_priv,
enum port port)
{
const struct ddi_vbt_port_info *info =
&dev_priv->vbt.ddi_port_info[port];
u8 ddc_pin;
if (info->alternate_ddc_pin) {
DRM_DEBUG_KMS("Using DDC pin 0x%x for port %c (VBT)\n",
info->alternate_ddc_pin, port_name(port));
return info->alternate_ddc_pin;
}
switch (port) {
case PORT_B:
if (IS_BROXTON(dev_priv))
ddc_pin = GMBUS_PIN_1_BXT;
else
ddc_pin = GMBUS_PIN_DPB;
break;
case PORT_C:
if (IS_BROXTON(dev_priv))
ddc_pin = GMBUS_PIN_2_BXT;
else
ddc_pin = GMBUS_PIN_DPC;
break;
case PORT_D:
if (IS_CHERRYVIEW(dev_priv))
ddc_pin = GMBUS_PIN_DPD_CHV;
else
ddc_pin = GMBUS_PIN_DPD;
break;
default:
MISSING_CASE(port);
ddc_pin = GMBUS_PIN_DPB;
break;
}
DRM_DEBUG_KMS("Using DDC pin 0x%x for port %c (platform default)\n",
ddc_pin, port_name(port));
return ddc_pin;
}
void intel_hdmi_init_connector(struct intel_digital_port *intel_dig_port, void intel_hdmi_init_connector(struct intel_digital_port *intel_dig_port,
struct intel_connector *intel_connector) struct intel_connector *intel_connector)
{ {
@ -2050,9 +2047,7 @@ void intel_hdmi_init_connector(struct intel_digital_port *intel_dig_port,
struct drm_device *dev = intel_encoder->base.dev; struct drm_device *dev = intel_encoder->base.dev;
struct drm_i915_private *dev_priv = dev->dev_private; struct drm_i915_private *dev_priv = dev->dev_private;
enum port port = intel_dig_port->port; enum port port = intel_dig_port->port;
uint8_t alternate_ddc_pin;
DRM_DEBUG_KMS("Adding HDMI connector on port %c\n",
port_name(port));
drm_connector_init(dev, connector, &intel_hdmi_connector_funcs, drm_connector_init(dev, connector, &intel_hdmi_connector_funcs,
DRM_MODE_CONNECTOR_HDMIA); DRM_MODE_CONNECTOR_HDMIA);
@ -2062,34 +2057,65 @@ void intel_hdmi_init_connector(struct intel_digital_port *intel_dig_port,
connector->doublescan_allowed = 0; connector->doublescan_allowed = 0;
connector->stereo_allowed = 1; connector->stereo_allowed = 1;
intel_hdmi->ddc_bus = intel_hdmi_ddc_pin(dev_priv, port);
switch (port) { switch (port) {
case PORT_B: case PORT_B:
if (IS_BROXTON(dev_priv))
intel_hdmi->ddc_bus = GMBUS_PIN_1_BXT;
else
intel_hdmi->ddc_bus = GMBUS_PIN_DPB;
/* /*
* On BXT A0/A1, sw needs to activate DDIA HPD logic and * On BXT A0/A1, sw needs to activate DDIA HPD logic and
* interrupts to check the external panel connection. * interrupts to check the external panel connection.
*/ */
if (IS_BROXTON(dev_priv) && (INTEL_REVID(dev) < BXT_REVID_B0)) if (IS_BXT_REVID(dev_priv, 0, BXT_REVID_A1))
intel_encoder->hpd_pin = HPD_PORT_A; intel_encoder->hpd_pin = HPD_PORT_A;
else else
intel_encoder->hpd_pin = HPD_PORT_B; intel_encoder->hpd_pin = HPD_PORT_B;
break; break;
case PORT_C: case PORT_C:
if (IS_BROXTON(dev_priv))
intel_hdmi->ddc_bus = GMBUS_PIN_2_BXT;
else
intel_hdmi->ddc_bus = GMBUS_PIN_DPC;
intel_encoder->hpd_pin = HPD_PORT_C; intel_encoder->hpd_pin = HPD_PORT_C;
break; break;
case PORT_D: case PORT_D:
if (WARN_ON(IS_BROXTON(dev_priv)))
intel_hdmi->ddc_bus = GMBUS_PIN_DISABLED;
else if (IS_CHERRYVIEW(dev_priv))
intel_hdmi->ddc_bus = GMBUS_PIN_DPD_CHV;
else
intel_hdmi->ddc_bus = GMBUS_PIN_DPD;
intel_encoder->hpd_pin = HPD_PORT_D; intel_encoder->hpd_pin = HPD_PORT_D;
break; break;
case PORT_E: case PORT_E:
/* On SKL PORT E doesn't have seperate GMBUS pin
* We rely on VBT to set a proper alternate GMBUS pin. */
alternate_ddc_pin =
dev_priv->vbt.ddi_port_info[PORT_E].alternate_ddc_pin;
switch (alternate_ddc_pin) {
case DDC_PIN_B:
intel_hdmi->ddc_bus = GMBUS_PIN_DPB;
break;
case DDC_PIN_C:
intel_hdmi->ddc_bus = GMBUS_PIN_DPC;
break;
case DDC_PIN_D:
intel_hdmi->ddc_bus = GMBUS_PIN_DPD;
break;
default:
MISSING_CASE(alternate_ddc_pin);
}
intel_encoder->hpd_pin = HPD_PORT_E; intel_encoder->hpd_pin = HPD_PORT_E;
break; break;
case PORT_A:
intel_encoder->hpd_pin = HPD_PORT_A;
/* Internal port only for eDP. */
default: default:
MISSING_CASE(port); BUG();
return;
} }
if (IS_VALLEYVIEW(dev)) { if (IS_VALLEYVIEW(dev) || IS_CHERRYVIEW(dev)) {
intel_hdmi->write_infoframe = vlv_write_infoframe; intel_hdmi->write_infoframe = vlv_write_infoframe;
intel_hdmi->set_infoframes = vlv_set_infoframes; intel_hdmi->set_infoframes = vlv_set_infoframes;
intel_hdmi->infoframe_enabled = vlv_infoframe_enabled; intel_hdmi->infoframe_enabled = vlv_infoframe_enabled;
@ -2133,7 +2159,8 @@ void intel_hdmi_init_connector(struct intel_digital_port *intel_dig_port,
} }
} }
void intel_hdmi_init(struct drm_device *dev, int hdmi_reg, enum port port) void intel_hdmi_init(struct drm_device *dev,
i915_reg_t hdmi_reg, enum port port)
{ {
struct intel_digital_port *intel_dig_port; struct intel_digital_port *intel_dig_port;
struct intel_encoder *intel_encoder; struct intel_encoder *intel_encoder;
@ -2152,7 +2179,7 @@ void intel_hdmi_init(struct drm_device *dev, int hdmi_reg, enum port port)
intel_encoder = &intel_dig_port->base; intel_encoder = &intel_dig_port->base;
drm_encoder_init(dev, &intel_encoder->base, &intel_hdmi_enc_funcs, drm_encoder_init(dev, &intel_encoder->base, &intel_hdmi_enc_funcs,
DRM_MODE_ENCODER_TMDS); DRM_MODE_ENCODER_TMDS, NULL);
intel_encoder->compute_config = intel_hdmi_compute_config; intel_encoder->compute_config = intel_hdmi_compute_config;
if (HAS_PCH_SPLIT(dev)) { if (HAS_PCH_SPLIT(dev)) {
@ -2204,7 +2231,7 @@ void intel_hdmi_init(struct drm_device *dev, int hdmi_reg, enum port port)
intel_dig_port->port = port; intel_dig_port->port = port;
intel_dig_port->hdmi.hdmi_reg = hdmi_reg; intel_dig_port->hdmi.hdmi_reg = hdmi_reg;
intel_dig_port->dp.output_reg = 0; intel_dig_port->dp.output_reg = INVALID_MMIO_REG;
intel_hdmi_init_connector(intel_dig_port, intel_connector); intel_hdmi_init_connector(intel_dig_port, intel_connector);
} }

View File

@ -407,7 +407,7 @@ void intel_hpd_irq_handler(struct drm_device *dev,
* hotplug bits itself. So only WARN about unexpected * hotplug bits itself. So only WARN about unexpected
* interrupts on saner platforms. * interrupts on saner platforms.
*/ */
WARN_ONCE(INTEL_INFO(dev)->gen >= 5 && !IS_VALLEYVIEW(dev), WARN_ONCE(!HAS_GMCH_DISPLAY(dev),
"Received HPD interrupt on pin %d although disabled\n", i); "Received HPD interrupt on pin %d although disabled\n", i);
continue; continue;
} }

View File

@ -36,7 +36,7 @@
struct gmbus_pin { struct gmbus_pin {
const char *name; const char *name;
int reg; i915_reg_t reg;
}; };
/* Map gmbus pin pairs to names and registers. */ /* Map gmbus pin pairs to names and registers. */
@ -63,9 +63,9 @@ static const struct gmbus_pin gmbus_pins_skl[] = {
}; };
static const struct gmbus_pin gmbus_pins_bxt[] = { static const struct gmbus_pin gmbus_pins_bxt[] = {
[GMBUS_PIN_1_BXT] = { "dpb", PCH_GPIOB }, [GMBUS_PIN_1_BXT] = { "dpb", GPIOB },
[GMBUS_PIN_2_BXT] = { "dpc", PCH_GPIOC }, [GMBUS_PIN_2_BXT] = { "dpc", GPIOC },
[GMBUS_PIN_3_BXT] = { "misc", PCH_GPIOD }, [GMBUS_PIN_3_BXT] = { "misc", GPIOD },
}; };
/* pin is expected to be valid */ /* pin is expected to be valid */
@ -74,7 +74,7 @@ static const struct gmbus_pin *get_gmbus_pin(struct drm_i915_private *dev_priv,
{ {
if (IS_BROXTON(dev_priv)) if (IS_BROXTON(dev_priv))
return &gmbus_pins_bxt[pin]; return &gmbus_pins_bxt[pin];
else if (IS_SKYLAKE(dev_priv)) else if (IS_SKYLAKE(dev_priv) || IS_KABYLAKE(dev_priv))
return &gmbus_pins_skl[pin]; return &gmbus_pins_skl[pin];
else if (IS_BROADWELL(dev_priv)) else if (IS_BROADWELL(dev_priv))
return &gmbus_pins_bdw[pin]; return &gmbus_pins_bdw[pin];
@ -89,14 +89,15 @@ bool intel_gmbus_is_valid_pin(struct drm_i915_private *dev_priv,
if (IS_BROXTON(dev_priv)) if (IS_BROXTON(dev_priv))
size = ARRAY_SIZE(gmbus_pins_bxt); size = ARRAY_SIZE(gmbus_pins_bxt);
else if (IS_SKYLAKE(dev_priv)) else if (IS_SKYLAKE(dev_priv) || IS_KABYLAKE(dev_priv))
size = ARRAY_SIZE(gmbus_pins_skl); size = ARRAY_SIZE(gmbus_pins_skl);
else if (IS_BROADWELL(dev_priv)) else if (IS_BROADWELL(dev_priv))
size = ARRAY_SIZE(gmbus_pins_bdw); size = ARRAY_SIZE(gmbus_pins_bdw);
else else
size = ARRAY_SIZE(gmbus_pins); size = ARRAY_SIZE(gmbus_pins);
return pin < size && get_gmbus_pin(dev_priv, pin)->reg; return pin < size &&
i915_mmio_reg_valid(get_gmbus_pin(dev_priv, pin)->reg);
} }
/* Intel GPIO access functions */ /* Intel GPIO access functions */
@ -240,9 +241,8 @@ intel_gpio_setup(struct intel_gmbus *bus, unsigned int pin)
algo = &bus->bit_algo; algo = &bus->bit_algo;
bus->gpio_reg = dev_priv->gpio_mmio_base + bus->gpio_reg = _MMIO(dev_priv->gpio_mmio_base +
get_gmbus_pin(dev_priv, pin)->reg; i915_mmio_reg_offset(get_gmbus_pin(dev_priv, pin)->reg));
bus->adapter.algo_data = algo; bus->adapter.algo_data = algo;
algo->setsda = set_data; algo->setsda = set_data;
algo->setscl = set_clock; algo->setscl = set_clock;
@ -472,9 +472,7 @@ gmbus_xfer_index_read(struct drm_i915_private *dev_priv, struct i2c_msg *msgs)
} }
static int static int
gmbus_xfer(struct i2c_adapter *adapter, do_gmbus_xfer(struct i2c_adapter *adapter, struct i2c_msg *msgs, int num)
struct i2c_msg *msgs,
int num)
{ {
struct intel_gmbus *bus = container_of(adapter, struct intel_gmbus *bus = container_of(adapter,
struct intel_gmbus, struct intel_gmbus,
@ -483,14 +481,6 @@ gmbus_xfer(struct i2c_adapter *adapter,
int i = 0, inc, try = 0; int i = 0, inc, try = 0;
int ret = 0; int ret = 0;
intel_display_power_get(dev_priv, POWER_DOMAIN_GMBUS);
mutex_lock(&dev_priv->gmbus_mutex);
if (bus->force_bit) {
ret = i2c_bit_algo.master_xfer(adapter, msgs, num);
goto out;
}
retry: retry:
I915_WRITE(GMBUS0, bus->reg0); I915_WRITE(GMBUS0, bus->reg0);
@ -505,17 +495,13 @@ retry:
ret = gmbus_xfer_write(dev_priv, &msgs[i]); ret = gmbus_xfer_write(dev_priv, &msgs[i]);
} }
if (!ret)
ret = gmbus_wait_hw_status(dev_priv, GMBUS_HW_WAIT_PHASE,
GMBUS_HW_WAIT_EN);
if (ret == -ETIMEDOUT) if (ret == -ETIMEDOUT)
goto timeout; goto timeout;
if (ret == -ENXIO) else if (ret)
goto clear_err; goto clear_err;
ret = gmbus_wait_hw_status(dev_priv, GMBUS_HW_WAIT_PHASE,
GMBUS_HW_WAIT_EN);
if (ret == -ENXIO)
goto clear_err;
if (ret)
goto timeout;
} }
/* Generate a STOP condition on the bus. Note that gmbus can't generata /* Generate a STOP condition on the bus. Note that gmbus can't generata
@ -589,13 +575,34 @@ timeout:
bus->adapter.name, bus->reg0 & 0xff); bus->adapter.name, bus->reg0 & 0xff);
I915_WRITE(GMBUS0, 0); I915_WRITE(GMBUS0, 0);
/* Hardware may not support GMBUS over these pins? Try GPIO bitbanging instead. */ /*
* Hardware may not support GMBUS over these pins? Try GPIO bitbanging
* instead. Use EAGAIN to have i2c core retry.
*/
bus->force_bit = 1; bus->force_bit = 1;
ret = i2c_bit_algo.master_xfer(adapter, msgs, num); ret = -EAGAIN;
out: out:
mutex_unlock(&dev_priv->gmbus_mutex); return ret;
}
static int
gmbus_xfer(struct i2c_adapter *adapter, struct i2c_msg *msgs, int num)
{
struct intel_gmbus *bus = container_of(adapter, struct intel_gmbus,
adapter);
struct drm_i915_private *dev_priv = bus->dev_priv;
int ret;
intel_display_power_get(dev_priv, POWER_DOMAIN_GMBUS);
mutex_lock(&dev_priv->gmbus_mutex);
if (bus->force_bit)
ret = i2c_bit_algo.master_xfer(adapter, msgs, num);
else
ret = do_gmbus_xfer(adapter, msgs, num);
mutex_unlock(&dev_priv->gmbus_mutex);
intel_display_power_put(dev_priv, POWER_DOMAIN_GMBUS); intel_display_power_put(dev_priv, POWER_DOMAIN_GMBUS);
return ret; return ret;
@ -628,12 +635,13 @@ int intel_setup_gmbus(struct drm_device *dev)
if (HAS_PCH_NOP(dev)) if (HAS_PCH_NOP(dev))
return 0; return 0;
else if (HAS_PCH_SPLIT(dev))
dev_priv->gpio_mmio_base = PCH_GPIOA - GPIOA; if (IS_VALLEYVIEW(dev) || IS_CHERRYVIEW(dev))
else if (IS_VALLEYVIEW(dev))
dev_priv->gpio_mmio_base = VLV_DISPLAY_BASE; dev_priv->gpio_mmio_base = VLV_DISPLAY_BASE;
else else if (!HAS_GMCH_DISPLAY(dev_priv))
dev_priv->gpio_mmio_base = 0; dev_priv->gpio_mmio_base =
i915_mmio_reg_offset(PCH_GPIOA) -
i915_mmio_reg_offset(GPIOA);
mutex_init(&dev_priv->gmbus_mutex); mutex_init(&dev_priv->gmbus_mutex);
init_waitqueue_head(&dev_priv->gmbus_wait_queue); init_waitqueue_head(&dev_priv->gmbus_wait_queue);
@ -656,6 +664,12 @@ int intel_setup_gmbus(struct drm_device *dev)
bus->adapter.algo = &gmbus_algorithm; bus->adapter.algo = &gmbus_algorithm;
/*
* We wish to retry with bit banging
* after a timed out GMBUS attempt.
*/
bus->adapter.retries = 1;
/* By default use a conservative clock rate */ /* By default use a conservative clock rate */
bus->reg0 = pin | GMBUS_RATE_100KHZ; bus->reg0 = pin | GMBUS_RATE_100KHZ;

View File

@ -134,7 +134,7 @@
#include <drm/drmP.h> #include <drm/drmP.h>
#include <drm/i915_drm.h> #include <drm/i915_drm.h>
#include "intel_drv.h" #include "i915_drv.h"
#include "intel_mocs.h" #include "intel_mocs.h"
#define GEN9_LR_CONTEXT_RENDER_SIZE (22 * PAGE_SIZE) #define GEN9_LR_CONTEXT_RENDER_SIZE (22 * PAGE_SIZE)
@ -190,16 +190,21 @@
#define GEN8_CTX_L3LLC_COHERENT (1<<5) #define GEN8_CTX_L3LLC_COHERENT (1<<5)
#define GEN8_CTX_PRIVILEGE (1<<8) #define GEN8_CTX_PRIVILEGE (1<<8)
#define ASSIGN_CTX_PDP(ppgtt, reg_state, n) { \ #define ASSIGN_CTX_REG(reg_state, pos, reg, val) do { \
(reg_state)[(pos)+0] = i915_mmio_reg_offset(reg); \
(reg_state)[(pos)+1] = (val); \
} while (0)
#define ASSIGN_CTX_PDP(ppgtt, reg_state, n) do { \
const u64 _addr = i915_page_dir_dma_addr((ppgtt), (n)); \ const u64 _addr = i915_page_dir_dma_addr((ppgtt), (n)); \
reg_state[CTX_PDP ## n ## _UDW+1] = upper_32_bits(_addr); \ reg_state[CTX_PDP ## n ## _UDW+1] = upper_32_bits(_addr); \
reg_state[CTX_PDP ## n ## _LDW+1] = lower_32_bits(_addr); \ reg_state[CTX_PDP ## n ## _LDW+1] = lower_32_bits(_addr); \
} } while (0)
#define ASSIGN_CTX_PML4(ppgtt, reg_state) { \ #define ASSIGN_CTX_PML4(ppgtt, reg_state) do { \
reg_state[CTX_PDP0_UDW + 1] = upper_32_bits(px_dma(&ppgtt->pml4)); \ reg_state[CTX_PDP0_UDW + 1] = upper_32_bits(px_dma(&ppgtt->pml4)); \
reg_state[CTX_PDP0_LDW + 1] = lower_32_bits(px_dma(&ppgtt->pml4)); \ reg_state[CTX_PDP0_LDW + 1] = lower_32_bits(px_dma(&ppgtt->pml4)); \
} } while (0)
enum { enum {
ADVANCED_CONTEXT = 0, ADVANCED_CONTEXT = 0,
@ -284,8 +289,8 @@ static bool disable_lite_restore_wa(struct intel_engine_cs *ring)
{ {
struct drm_device *dev = ring->dev; struct drm_device *dev = ring->dev;
return ((IS_SKYLAKE(dev) && INTEL_REVID(dev) <= SKL_REVID_B0) || return (IS_SKL_REVID(dev, 0, SKL_REVID_B0) ||
(IS_BROXTON(dev) && INTEL_REVID(dev) == BXT_REVID_A0)) && IS_BXT_REVID(dev, 0, BXT_REVID_A1)) &&
(ring->id == VCS || ring->id == VCS2); (ring->id == VCS || ring->id == VCS2);
} }
@ -367,7 +372,7 @@ static int execlists_update_context(struct drm_i915_gem_request *rq)
WARN_ON(!i915_gem_obj_is_pinned(ctx_obj)); WARN_ON(!i915_gem_obj_is_pinned(ctx_obj));
WARN_ON(!i915_gem_obj_is_pinned(rb_obj)); WARN_ON(!i915_gem_obj_is_pinned(rb_obj));
page = i915_gem_object_get_page(ctx_obj, LRC_STATE_PN); page = i915_gem_object_get_dirty_page(ctx_obj, LRC_STATE_PN);
reg_state = kmap_atomic(page); reg_state = kmap_atomic(page);
reg_state[CTX_RING_TAIL+1] = rq->tail; reg_state[CTX_RING_TAIL+1] = rq->tail;
@ -921,7 +926,7 @@ int intel_execlists_submission(struct i915_execbuffer_params *params,
intel_logical_ring_emit(ringbuf, MI_NOOP); intel_logical_ring_emit(ringbuf, MI_NOOP);
intel_logical_ring_emit(ringbuf, MI_LOAD_REGISTER_IMM(1)); intel_logical_ring_emit(ringbuf, MI_LOAD_REGISTER_IMM(1));
intel_logical_ring_emit(ringbuf, INSTPM); intel_logical_ring_emit_reg(ringbuf, INSTPM);
intel_logical_ring_emit(ringbuf, instp_mask << 16 | instp_mode); intel_logical_ring_emit(ringbuf, instp_mask << 16 | instp_mode);
intel_logical_ring_advance(ringbuf); intel_logical_ring_advance(ringbuf);
@ -1096,7 +1101,7 @@ static int intel_logical_ring_workarounds_emit(struct drm_i915_gem_request *req)
intel_logical_ring_emit(ringbuf, MI_LOAD_REGISTER_IMM(w->count)); intel_logical_ring_emit(ringbuf, MI_LOAD_REGISTER_IMM(w->count));
for (i = 0; i < w->count; i++) { for (i = 0; i < w->count; i++) {
intel_logical_ring_emit(ringbuf, w->reg[i].addr); intel_logical_ring_emit_reg(ringbuf, w->reg[i].addr);
intel_logical_ring_emit(ringbuf, w->reg[i].value); intel_logical_ring_emit(ringbuf, w->reg[i].value);
} }
intel_logical_ring_emit(ringbuf, MI_NOOP); intel_logical_ring_emit(ringbuf, MI_NOOP);
@ -1120,6 +1125,8 @@ static int intel_logical_ring_workarounds_emit(struct drm_i915_gem_request *req)
batch[__index] = (cmd); \ batch[__index] = (cmd); \
} while (0) } while (0)
#define wa_ctx_emit_reg(batch, index, reg) \
wa_ctx_emit((batch), (index), i915_mmio_reg_offset(reg))
/* /*
* In this WA we need to set GEN8_L3SQCREG4[21:21] and reset it after * In this WA we need to set GEN8_L3SQCREG4[21:21] and reset it after
@ -1149,17 +1156,17 @@ static inline int gen8_emit_flush_coherentl3_wa(struct intel_engine_cs *ring,
* this batch updates GEN8_L3SQCREG4 with default value we need to * this batch updates GEN8_L3SQCREG4 with default value we need to
* set this bit here to retain the WA during flush. * set this bit here to retain the WA during flush.
*/ */
if (IS_SKYLAKE(ring->dev) && INTEL_REVID(ring->dev) <= SKL_REVID_E0) if (IS_SKL_REVID(ring->dev, 0, SKL_REVID_E0))
l3sqc4_flush |= GEN8_LQSC_RO_PERF_DIS; l3sqc4_flush |= GEN8_LQSC_RO_PERF_DIS;
wa_ctx_emit(batch, index, (MI_STORE_REGISTER_MEM_GEN8 | wa_ctx_emit(batch, index, (MI_STORE_REGISTER_MEM_GEN8 |
MI_SRM_LRM_GLOBAL_GTT)); MI_SRM_LRM_GLOBAL_GTT));
wa_ctx_emit(batch, index, GEN8_L3SQCREG4); wa_ctx_emit_reg(batch, index, GEN8_L3SQCREG4);
wa_ctx_emit(batch, index, ring->scratch.gtt_offset + 256); wa_ctx_emit(batch, index, ring->scratch.gtt_offset + 256);
wa_ctx_emit(batch, index, 0); wa_ctx_emit(batch, index, 0);
wa_ctx_emit(batch, index, MI_LOAD_REGISTER_IMM(1)); wa_ctx_emit(batch, index, MI_LOAD_REGISTER_IMM(1));
wa_ctx_emit(batch, index, GEN8_L3SQCREG4); wa_ctx_emit_reg(batch, index, GEN8_L3SQCREG4);
wa_ctx_emit(batch, index, l3sqc4_flush); wa_ctx_emit(batch, index, l3sqc4_flush);
wa_ctx_emit(batch, index, GFX_OP_PIPE_CONTROL(6)); wa_ctx_emit(batch, index, GFX_OP_PIPE_CONTROL(6));
@ -1172,7 +1179,7 @@ static inline int gen8_emit_flush_coherentl3_wa(struct intel_engine_cs *ring,
wa_ctx_emit(batch, index, (MI_LOAD_REGISTER_MEM_GEN8 | wa_ctx_emit(batch, index, (MI_LOAD_REGISTER_MEM_GEN8 |
MI_SRM_LRM_GLOBAL_GTT)); MI_SRM_LRM_GLOBAL_GTT));
wa_ctx_emit(batch, index, GEN8_L3SQCREG4); wa_ctx_emit_reg(batch, index, GEN8_L3SQCREG4);
wa_ctx_emit(batch, index, ring->scratch.gtt_offset + 256); wa_ctx_emit(batch, index, ring->scratch.gtt_offset + 256);
wa_ctx_emit(batch, index, 0); wa_ctx_emit(batch, index, 0);
@ -1314,8 +1321,8 @@ static int gen9_init_indirectctx_bb(struct intel_engine_cs *ring,
uint32_t index = wa_ctx_start(wa_ctx, *offset, CACHELINE_DWORDS); uint32_t index = wa_ctx_start(wa_ctx, *offset, CACHELINE_DWORDS);
/* WaDisableCtxRestoreArbitration:skl,bxt */ /* WaDisableCtxRestoreArbitration:skl,bxt */
if ((IS_SKYLAKE(dev) && (INTEL_REVID(dev) <= SKL_REVID_D0)) || if (IS_SKL_REVID(dev, 0, SKL_REVID_D0) ||
(IS_BROXTON(dev) && (INTEL_REVID(dev) == BXT_REVID_A0))) IS_BXT_REVID(dev, 0, BXT_REVID_A1))
wa_ctx_emit(batch, index, MI_ARB_ON_OFF | MI_ARB_DISABLE); wa_ctx_emit(batch, index, MI_ARB_ON_OFF | MI_ARB_DISABLE);
/* WaFlushCoherentL3CacheLinesAtContextSwitch:skl,bxt */ /* WaFlushCoherentL3CacheLinesAtContextSwitch:skl,bxt */
@ -1340,18 +1347,18 @@ static int gen9_init_perctx_bb(struct intel_engine_cs *ring,
uint32_t index = wa_ctx_start(wa_ctx, *offset, CACHELINE_DWORDS); uint32_t index = wa_ctx_start(wa_ctx, *offset, CACHELINE_DWORDS);
/* WaSetDisablePixMaskCammingAndRhwoInCommonSliceChicken:skl,bxt */ /* WaSetDisablePixMaskCammingAndRhwoInCommonSliceChicken:skl,bxt */
if ((IS_SKYLAKE(dev) && (INTEL_REVID(dev) <= SKL_REVID_B0)) || if (IS_SKL_REVID(dev, 0, SKL_REVID_B0) ||
(IS_BROXTON(dev) && (INTEL_REVID(dev) == BXT_REVID_A0))) { IS_BXT_REVID(dev, 0, BXT_REVID_A1)) {
wa_ctx_emit(batch, index, MI_LOAD_REGISTER_IMM(1)); wa_ctx_emit(batch, index, MI_LOAD_REGISTER_IMM(1));
wa_ctx_emit(batch, index, GEN9_SLICE_COMMON_ECO_CHICKEN0); wa_ctx_emit_reg(batch, index, GEN9_SLICE_COMMON_ECO_CHICKEN0);
wa_ctx_emit(batch, index, wa_ctx_emit(batch, index,
_MASKED_BIT_ENABLE(DISABLE_PIXEL_MASK_CAMMING)); _MASKED_BIT_ENABLE(DISABLE_PIXEL_MASK_CAMMING));
wa_ctx_emit(batch, index, MI_NOOP); wa_ctx_emit(batch, index, MI_NOOP);
} }
/* WaDisableCtxRestoreArbitration:skl,bxt */ /* WaDisableCtxRestoreArbitration:skl,bxt */
if ((IS_SKYLAKE(dev) && (INTEL_REVID(dev) <= SKL_REVID_D0)) || if (IS_SKL_REVID(dev, 0, SKL_REVID_D0) ||
(IS_BROXTON(dev) && (INTEL_REVID(dev) == BXT_REVID_A0))) IS_BXT_REVID(dev, 0, BXT_REVID_A1))
wa_ctx_emit(batch, index, MI_ARB_ON_OFF | MI_ARB_ENABLE); wa_ctx_emit(batch, index, MI_ARB_ON_OFF | MI_ARB_ENABLE);
wa_ctx_emit(batch, index, MI_BATCH_BUFFER_END); wa_ctx_emit(batch, index, MI_BATCH_BUFFER_END);
@ -1418,7 +1425,7 @@ static int intel_init_workaround_bb(struct intel_engine_cs *ring)
return ret; return ret;
} }
page = i915_gem_object_get_page(wa_ctx->obj, 0); page = i915_gem_object_get_dirty_page(wa_ctx->obj, 0);
batch = kmap_atomic(page); batch = kmap_atomic(page);
offset = 0; offset = 0;
@ -1472,12 +1479,6 @@ static int gen8_init_common_ring(struct intel_engine_cs *ring)
I915_WRITE_IMR(ring, ~(ring->irq_enable_mask | ring->irq_keep_mask)); I915_WRITE_IMR(ring, ~(ring->irq_enable_mask | ring->irq_keep_mask));
I915_WRITE(RING_HWSTAM(ring->mmio_base), 0xffffffff); I915_WRITE(RING_HWSTAM(ring->mmio_base), 0xffffffff);
if (ring->status_page.obj) {
I915_WRITE(RING_HWS_PGA(ring->mmio_base),
(u32)ring->status_page.gfx_addr);
POSTING_READ(RING_HWS_PGA(ring->mmio_base));
}
I915_WRITE(RING_MODE_GEN7(ring), I915_WRITE(RING_MODE_GEN7(ring),
_MASKED_BIT_DISABLE(GFX_REPLAY_MODE) | _MASKED_BIT_DISABLE(GFX_REPLAY_MODE) |
_MASKED_BIT_ENABLE(GFX_RUN_LIST_ENABLE)); _MASKED_BIT_ENABLE(GFX_RUN_LIST_ENABLE));
@ -1562,9 +1563,9 @@ static int intel_logical_ring_emit_pdps(struct drm_i915_gem_request *req)
for (i = GEN8_LEGACY_PDPES - 1; i >= 0; i--) { for (i = GEN8_LEGACY_PDPES - 1; i >= 0; i--) {
const dma_addr_t pd_daddr = i915_page_dir_dma_addr(ppgtt, i); const dma_addr_t pd_daddr = i915_page_dir_dma_addr(ppgtt, i);
intel_logical_ring_emit(ringbuf, GEN8_RING_PDP_UDW(ring, i)); intel_logical_ring_emit_reg(ringbuf, GEN8_RING_PDP_UDW(ring, i));
intel_logical_ring_emit(ringbuf, upper_32_bits(pd_daddr)); intel_logical_ring_emit(ringbuf, upper_32_bits(pd_daddr));
intel_logical_ring_emit(ringbuf, GEN8_RING_PDP_LDW(ring, i)); intel_logical_ring_emit_reg(ringbuf, GEN8_RING_PDP_LDW(ring, i));
intel_logical_ring_emit(ringbuf, lower_32_bits(pd_daddr)); intel_logical_ring_emit(ringbuf, lower_32_bits(pd_daddr));
} }
@ -1894,8 +1895,10 @@ void intel_logical_ring_cleanup(struct intel_engine_cs *ring)
dev_priv = ring->dev->dev_private; dev_priv = ring->dev->dev_private;
if (ring->buffer) {
intel_logical_ring_stop(ring); intel_logical_ring_stop(ring);
WARN_ON((I915_READ_MODE(ring) & MODE_IDLE) == 0); WARN_ON((I915_READ_MODE(ring) & MODE_IDLE) == 0);
}
if (ring->cleanup) if (ring->cleanup)
ring->cleanup(ring); ring->cleanup(ring);
@ -1909,6 +1912,7 @@ void intel_logical_ring_cleanup(struct intel_engine_cs *ring)
} }
lrc_destroy_wa_ctx_obj(ring); lrc_destroy_wa_ctx_obj(ring);
ring->dev = NULL;
} }
static int logical_ring_init(struct drm_device *dev, struct intel_engine_cs *ring) static int logical_ring_init(struct drm_device *dev, struct intel_engine_cs *ring)
@ -1924,17 +1928,18 @@ static int logical_ring_init(struct drm_device *dev, struct intel_engine_cs *rin
i915_gem_batch_pool_init(dev, &ring->batch_pool); i915_gem_batch_pool_init(dev, &ring->batch_pool);
init_waitqueue_head(&ring->irq_queue); init_waitqueue_head(&ring->irq_queue);
INIT_LIST_HEAD(&ring->buffers);
INIT_LIST_HEAD(&ring->execlist_queue); INIT_LIST_HEAD(&ring->execlist_queue);
INIT_LIST_HEAD(&ring->execlist_retired_req_list); INIT_LIST_HEAD(&ring->execlist_retired_req_list);
spin_lock_init(&ring->execlist_lock); spin_lock_init(&ring->execlist_lock);
ret = i915_cmd_parser_init_ring(ring); ret = i915_cmd_parser_init_ring(ring);
if (ret) if (ret)
return ret; goto error;
ret = intel_lr_context_deferred_alloc(ring->default_context, ring); ret = intel_lr_context_deferred_alloc(ring->default_context, ring);
if (ret) if (ret)
return ret; goto error;
/* As this is the default context, always pin it */ /* As this is the default context, always pin it */
ret = intel_lr_context_do_pin( ret = intel_lr_context_do_pin(
@ -1945,9 +1950,13 @@ static int logical_ring_init(struct drm_device *dev, struct intel_engine_cs *rin
DRM_ERROR( DRM_ERROR(
"Failed to pin and map ringbuffer %s: %d\n", "Failed to pin and map ringbuffer %s: %d\n",
ring->name, ret); ring->name, ret);
return ret; goto error;
} }
return 0;
error:
intel_logical_ring_cleanup(ring);
return ret; return ret;
} }
@ -1973,7 +1982,7 @@ static int logical_render_ring_init(struct drm_device *dev)
ring->init_hw = gen8_init_render_ring; ring->init_hw = gen8_init_render_ring;
ring->init_context = gen8_init_rcs_context; ring->init_context = gen8_init_rcs_context;
ring->cleanup = intel_fini_pipe_control; ring->cleanup = intel_fini_pipe_control;
if (IS_BROXTON(dev) && INTEL_REVID(dev) < BXT_REVID_B0) { if (IS_BXT_REVID(dev, 0, BXT_REVID_A1)) {
ring->get_seqno = bxt_a_get_seqno; ring->get_seqno = bxt_a_get_seqno;
ring->set_seqno = bxt_a_set_seqno; ring->set_seqno = bxt_a_set_seqno;
} else { } else {
@ -2025,7 +2034,7 @@ static int logical_bsd_ring_init(struct drm_device *dev)
GT_CONTEXT_SWITCH_INTERRUPT << GEN8_VCS1_IRQ_SHIFT; GT_CONTEXT_SWITCH_INTERRUPT << GEN8_VCS1_IRQ_SHIFT;
ring->init_hw = gen8_init_common_ring; ring->init_hw = gen8_init_common_ring;
if (IS_BROXTON(dev) && INTEL_REVID(dev) < BXT_REVID_B0) { if (IS_BXT_REVID(dev, 0, BXT_REVID_A1)) {
ring->get_seqno = bxt_a_get_seqno; ring->get_seqno = bxt_a_get_seqno;
ring->set_seqno = bxt_a_set_seqno; ring->set_seqno = bxt_a_set_seqno;
} else { } else {
@ -2080,7 +2089,7 @@ static int logical_blt_ring_init(struct drm_device *dev)
GT_CONTEXT_SWITCH_INTERRUPT << GEN8_BCS_IRQ_SHIFT; GT_CONTEXT_SWITCH_INTERRUPT << GEN8_BCS_IRQ_SHIFT;
ring->init_hw = gen8_init_common_ring; ring->init_hw = gen8_init_common_ring;
if (IS_BROXTON(dev) && INTEL_REVID(dev) < BXT_REVID_B0) { if (IS_BXT_REVID(dev, 0, BXT_REVID_A1)) {
ring->get_seqno = bxt_a_get_seqno; ring->get_seqno = bxt_a_get_seqno;
ring->set_seqno = bxt_a_set_seqno; ring->set_seqno = bxt_a_set_seqno;
} else { } else {
@ -2110,7 +2119,7 @@ static int logical_vebox_ring_init(struct drm_device *dev)
GT_CONTEXT_SWITCH_INTERRUPT << GEN8_VECS_IRQ_SHIFT; GT_CONTEXT_SWITCH_INTERRUPT << GEN8_VECS_IRQ_SHIFT;
ring->init_hw = gen8_init_common_ring; ring->init_hw = gen8_init_common_ring;
if (IS_BROXTON(dev) && INTEL_REVID(dev) < BXT_REVID_B0) { if (IS_BXT_REVID(dev, 0, BXT_REVID_A1)) {
ring->get_seqno = bxt_a_get_seqno; ring->get_seqno = bxt_a_get_seqno;
ring->set_seqno = bxt_a_set_seqno; ring->set_seqno = bxt_a_set_seqno;
} else { } else {
@ -2256,7 +2265,7 @@ populate_lr_context(struct intel_context *ctx, struct drm_i915_gem_object *ctx_o
/* The second page of the context object contains some fields which must /* The second page of the context object contains some fields which must
* be set up prior to the first execution. */ * be set up prior to the first execution. */
page = i915_gem_object_get_page(ctx_obj, LRC_STATE_PN); page = i915_gem_object_get_dirty_page(ctx_obj, LRC_STATE_PN);
reg_state = kmap_atomic(page); reg_state = kmap_atomic(page);
/* A context is actually a big batch buffer with several MI_LOAD_REGISTER_IMM /* A context is actually a big batch buffer with several MI_LOAD_REGISTER_IMM
@ -2264,46 +2273,31 @@ populate_lr_context(struct intel_context *ctx, struct drm_i915_gem_object *ctx_o
* only for the first context restore: on a subsequent save, the GPU will * only for the first context restore: on a subsequent save, the GPU will
* recreate this batchbuffer with new values (including all the missing * recreate this batchbuffer with new values (including all the missing
* MI_LOAD_REGISTER_IMM commands that we are not initializing here). */ * MI_LOAD_REGISTER_IMM commands that we are not initializing here). */
if (ring->id == RCS) reg_state[CTX_LRI_HEADER_0] =
reg_state[CTX_LRI_HEADER_0] = MI_LOAD_REGISTER_IMM(14); MI_LOAD_REGISTER_IMM(ring->id == RCS ? 14 : 11) | MI_LRI_FORCE_POSTED;
else ASSIGN_CTX_REG(reg_state, CTX_CONTEXT_CONTROL, RING_CONTEXT_CONTROL(ring),
reg_state[CTX_LRI_HEADER_0] = MI_LOAD_REGISTER_IMM(11);
reg_state[CTX_LRI_HEADER_0] |= MI_LRI_FORCE_POSTED;
reg_state[CTX_CONTEXT_CONTROL] = RING_CONTEXT_CONTROL(ring);
reg_state[CTX_CONTEXT_CONTROL+1] =
_MASKED_BIT_ENABLE(CTX_CTRL_INHIBIT_SYN_CTX_SWITCH | _MASKED_BIT_ENABLE(CTX_CTRL_INHIBIT_SYN_CTX_SWITCH |
CTX_CTRL_ENGINE_CTX_RESTORE_INHIBIT | CTX_CTRL_ENGINE_CTX_RESTORE_INHIBIT |
CTX_CTRL_RS_CTX_ENABLE); CTX_CTRL_RS_CTX_ENABLE));
reg_state[CTX_RING_HEAD] = RING_HEAD(ring->mmio_base); ASSIGN_CTX_REG(reg_state, CTX_RING_HEAD, RING_HEAD(ring->mmio_base), 0);
reg_state[CTX_RING_HEAD+1] = 0; ASSIGN_CTX_REG(reg_state, CTX_RING_TAIL, RING_TAIL(ring->mmio_base), 0);
reg_state[CTX_RING_TAIL] = RING_TAIL(ring->mmio_base);
reg_state[CTX_RING_TAIL+1] = 0;
reg_state[CTX_RING_BUFFER_START] = RING_START(ring->mmio_base);
/* Ring buffer start address is not known until the buffer is pinned. /* Ring buffer start address is not known until the buffer is pinned.
* It is written to the context image in execlists_update_context() * It is written to the context image in execlists_update_context()
*/ */
reg_state[CTX_RING_BUFFER_CONTROL] = RING_CTL(ring->mmio_base); ASSIGN_CTX_REG(reg_state, CTX_RING_BUFFER_START, RING_START(ring->mmio_base), 0);
reg_state[CTX_RING_BUFFER_CONTROL+1] = ASSIGN_CTX_REG(reg_state, CTX_RING_BUFFER_CONTROL, RING_CTL(ring->mmio_base),
((ringbuf->size - PAGE_SIZE) & RING_NR_PAGES) | RING_VALID; ((ringbuf->size - PAGE_SIZE) & RING_NR_PAGES) | RING_VALID);
reg_state[CTX_BB_HEAD_U] = ring->mmio_base + 0x168; ASSIGN_CTX_REG(reg_state, CTX_BB_HEAD_U, RING_BBADDR_UDW(ring->mmio_base), 0);
reg_state[CTX_BB_HEAD_U+1] = 0; ASSIGN_CTX_REG(reg_state, CTX_BB_HEAD_L, RING_BBADDR(ring->mmio_base), 0);
reg_state[CTX_BB_HEAD_L] = ring->mmio_base + 0x140; ASSIGN_CTX_REG(reg_state, CTX_BB_STATE, RING_BBSTATE(ring->mmio_base),
reg_state[CTX_BB_HEAD_L+1] = 0; RING_BB_PPGTT);
reg_state[CTX_BB_STATE] = ring->mmio_base + 0x110; ASSIGN_CTX_REG(reg_state, CTX_SECOND_BB_HEAD_U, RING_SBBADDR_UDW(ring->mmio_base), 0);
reg_state[CTX_BB_STATE+1] = (1<<5); ASSIGN_CTX_REG(reg_state, CTX_SECOND_BB_HEAD_L, RING_SBBADDR(ring->mmio_base), 0);
reg_state[CTX_SECOND_BB_HEAD_U] = ring->mmio_base + 0x11c; ASSIGN_CTX_REG(reg_state, CTX_SECOND_BB_STATE, RING_SBBSTATE(ring->mmio_base), 0);
reg_state[CTX_SECOND_BB_HEAD_U+1] = 0;
reg_state[CTX_SECOND_BB_HEAD_L] = ring->mmio_base + 0x114;
reg_state[CTX_SECOND_BB_HEAD_L+1] = 0;
reg_state[CTX_SECOND_BB_STATE] = ring->mmio_base + 0x118;
reg_state[CTX_SECOND_BB_STATE+1] = 0;
if (ring->id == RCS) { if (ring->id == RCS) {
reg_state[CTX_BB_PER_CTX_PTR] = ring->mmio_base + 0x1c0; ASSIGN_CTX_REG(reg_state, CTX_BB_PER_CTX_PTR, RING_BB_PER_CTX_PTR(ring->mmio_base), 0);
reg_state[CTX_BB_PER_CTX_PTR+1] = 0; ASSIGN_CTX_REG(reg_state, CTX_RCS_INDIRECT_CTX, RING_INDIRECT_CTX(ring->mmio_base), 0);
reg_state[CTX_RCS_INDIRECT_CTX] = ring->mmio_base + 0x1c4; ASSIGN_CTX_REG(reg_state, CTX_RCS_INDIRECT_CTX_OFFSET, RING_INDIRECT_CTX_OFFSET(ring->mmio_base), 0);
reg_state[CTX_RCS_INDIRECT_CTX+1] = 0;
reg_state[CTX_RCS_INDIRECT_CTX_OFFSET] = ring->mmio_base + 0x1c8;
reg_state[CTX_RCS_INDIRECT_CTX_OFFSET+1] = 0;
if (ring->wa_ctx.obj) { if (ring->wa_ctx.obj) {
struct i915_ctx_workarounds *wa_ctx = &ring->wa_ctx; struct i915_ctx_workarounds *wa_ctx = &ring->wa_ctx;
uint32_t ggtt_offset = i915_gem_obj_ggtt_offset(wa_ctx->obj); uint32_t ggtt_offset = i915_gem_obj_ggtt_offset(wa_ctx->obj);
@ -2320,18 +2314,17 @@ populate_lr_context(struct intel_context *ctx, struct drm_i915_gem_object *ctx_o
0x01; 0x01;
} }
} }
reg_state[CTX_LRI_HEADER_1] = MI_LOAD_REGISTER_IMM(9); reg_state[CTX_LRI_HEADER_1] = MI_LOAD_REGISTER_IMM(9) | MI_LRI_FORCE_POSTED;
reg_state[CTX_LRI_HEADER_1] |= MI_LRI_FORCE_POSTED; ASSIGN_CTX_REG(reg_state, CTX_CTX_TIMESTAMP, RING_CTX_TIMESTAMP(ring->mmio_base), 0);
reg_state[CTX_CTX_TIMESTAMP] = ring->mmio_base + 0x3a8; /* PDP values well be assigned later if needed */
reg_state[CTX_CTX_TIMESTAMP+1] = 0; ASSIGN_CTX_REG(reg_state, CTX_PDP3_UDW, GEN8_RING_PDP_UDW(ring, 3), 0);
reg_state[CTX_PDP3_UDW] = GEN8_RING_PDP_UDW(ring, 3); ASSIGN_CTX_REG(reg_state, CTX_PDP3_LDW, GEN8_RING_PDP_LDW(ring, 3), 0);
reg_state[CTX_PDP3_LDW] = GEN8_RING_PDP_LDW(ring, 3); ASSIGN_CTX_REG(reg_state, CTX_PDP2_UDW, GEN8_RING_PDP_UDW(ring, 2), 0);
reg_state[CTX_PDP2_UDW] = GEN8_RING_PDP_UDW(ring, 2); ASSIGN_CTX_REG(reg_state, CTX_PDP2_LDW, GEN8_RING_PDP_LDW(ring, 2), 0);
reg_state[CTX_PDP2_LDW] = GEN8_RING_PDP_LDW(ring, 2); ASSIGN_CTX_REG(reg_state, CTX_PDP1_UDW, GEN8_RING_PDP_UDW(ring, 1), 0);
reg_state[CTX_PDP1_UDW] = GEN8_RING_PDP_UDW(ring, 1); ASSIGN_CTX_REG(reg_state, CTX_PDP1_LDW, GEN8_RING_PDP_LDW(ring, 1), 0);
reg_state[CTX_PDP1_LDW] = GEN8_RING_PDP_LDW(ring, 1); ASSIGN_CTX_REG(reg_state, CTX_PDP0_UDW, GEN8_RING_PDP_UDW(ring, 0), 0);
reg_state[CTX_PDP0_UDW] = GEN8_RING_PDP_UDW(ring, 0); ASSIGN_CTX_REG(reg_state, CTX_PDP0_LDW, GEN8_RING_PDP_LDW(ring, 0), 0);
reg_state[CTX_PDP0_LDW] = GEN8_RING_PDP_LDW(ring, 0);
if (USES_FULL_48BIT_PPGTT(ppgtt->base.dev)) { if (USES_FULL_48BIT_PPGTT(ppgtt->base.dev)) {
/* 64b PPGTT (48bit canonical) /* 64b PPGTT (48bit canonical)
@ -2353,14 +2346,11 @@ populate_lr_context(struct intel_context *ctx, struct drm_i915_gem_object *ctx_o
if (ring->id == RCS) { if (ring->id == RCS) {
reg_state[CTX_LRI_HEADER_2] = MI_LOAD_REGISTER_IMM(1); reg_state[CTX_LRI_HEADER_2] = MI_LOAD_REGISTER_IMM(1);
reg_state[CTX_R_PWR_CLK_STATE] = GEN8_R_PWR_CLK_STATE; ASSIGN_CTX_REG(reg_state, CTX_R_PWR_CLK_STATE, GEN8_R_PWR_CLK_STATE,
reg_state[CTX_R_PWR_CLK_STATE+1] = make_rpcs(dev); make_rpcs(dev));
} }
kunmap_atomic(reg_state); kunmap_atomic(reg_state);
ctx_obj->dirty = 1;
set_page_dirty(page);
i915_gem_object_unpin_pages(ctx_obj); i915_gem_object_unpin_pages(ctx_obj);
return 0; return 0;
@ -2544,7 +2534,7 @@ void intel_lr_context_reset(struct drm_device *dev,
WARN(1, "Failed get_pages for context obj\n"); WARN(1, "Failed get_pages for context obj\n");
continue; continue;
} }
page = i915_gem_object_get_page(ctx_obj, LRC_STATE_PN); page = i915_gem_object_get_dirty_page(ctx_obj, LRC_STATE_PN);
reg_state = kmap_atomic(page); reg_state = kmap_atomic(page);
reg_state[CTX_RING_HEAD+1] = 0; reg_state[CTX_RING_HEAD+1] = 0;

View File

@ -29,16 +29,16 @@
#define GEN8_CSB_PTR_MASK 0x07 #define GEN8_CSB_PTR_MASK 0x07
/* Execlists regs */ /* Execlists regs */
#define RING_ELSP(ring) ((ring)->mmio_base+0x230) #define RING_ELSP(ring) _MMIO((ring)->mmio_base + 0x230)
#define RING_EXECLIST_STATUS_LO(ring) ((ring)->mmio_base+0x234) #define RING_EXECLIST_STATUS_LO(ring) _MMIO((ring)->mmio_base + 0x234)
#define RING_EXECLIST_STATUS_HI(ring) ((ring)->mmio_base+0x234 + 4) #define RING_EXECLIST_STATUS_HI(ring) _MMIO((ring)->mmio_base + 0x234 + 4)
#define RING_CONTEXT_CONTROL(ring) ((ring)->mmio_base+0x244) #define RING_CONTEXT_CONTROL(ring) _MMIO((ring)->mmio_base + 0x244)
#define CTX_CTRL_INHIBIT_SYN_CTX_SWITCH (1 << 3) #define CTX_CTRL_INHIBIT_SYN_CTX_SWITCH (1 << 3)
#define CTX_CTRL_ENGINE_CTX_RESTORE_INHIBIT (1 << 0) #define CTX_CTRL_ENGINE_CTX_RESTORE_INHIBIT (1 << 0)
#define CTX_CTRL_RS_CTX_ENABLE (1 << 1) #define CTX_CTRL_RS_CTX_ENABLE (1 << 1)
#define RING_CONTEXT_STATUS_BUF_LO(ring, i) ((ring)->mmio_base+0x370 + (i) * 8) #define RING_CONTEXT_STATUS_BUF_LO(ring, i) _MMIO((ring)->mmio_base + 0x370 + (i) * 8)
#define RING_CONTEXT_STATUS_BUF_HI(ring, i) ((ring)->mmio_base+0x370 + (i) * 8 + 4) #define RING_CONTEXT_STATUS_BUF_HI(ring, i) _MMIO((ring)->mmio_base + 0x370 + (i) * 8 + 4)
#define RING_CONTEXT_STATUS_PTR(ring) ((ring)->mmio_base+0x3a0) #define RING_CONTEXT_STATUS_PTR(ring) _MMIO((ring)->mmio_base + 0x3a0)
/* Logical Rings */ /* Logical Rings */
int intel_logical_ring_alloc_request_extras(struct drm_i915_gem_request *request); int intel_logical_ring_alloc_request_extras(struct drm_i915_gem_request *request);
@ -70,6 +70,11 @@ static inline void intel_logical_ring_emit(struct intel_ringbuffer *ringbuf,
iowrite32(data, ringbuf->virtual_start + ringbuf->tail); iowrite32(data, ringbuf->virtual_start + ringbuf->tail);
ringbuf->tail += 4; ringbuf->tail += 4;
} }
static inline void intel_logical_ring_emit_reg(struct intel_ringbuffer *ringbuf,
i915_reg_t reg)
{
intel_logical_ring_emit(ringbuf, i915_mmio_reg_offset(reg));
}
/* Logical Ring Contexts */ /* Logical Ring Contexts */

View File

@ -44,14 +44,14 @@
struct intel_lvds_connector { struct intel_lvds_connector {
struct intel_connector base; struct intel_connector base;
// struct notifier_block lid_notifier; struct notifier_block lid_notifier;
}; };
struct intel_lvds_encoder { struct intel_lvds_encoder {
struct intel_encoder base; struct intel_encoder base;
bool is_dual_link; bool is_dual_link;
u32 reg; i915_reg_t reg;
u32 a3_power; u32 a3_power;
struct intel_lvds_connector *attached_connector; struct intel_lvds_connector *attached_connector;
@ -75,22 +75,30 @@ static bool intel_lvds_get_hw_state(struct intel_encoder *encoder,
struct intel_lvds_encoder *lvds_encoder = to_lvds_encoder(&encoder->base); struct intel_lvds_encoder *lvds_encoder = to_lvds_encoder(&encoder->base);
enum intel_display_power_domain power_domain; enum intel_display_power_domain power_domain;
u32 tmp; u32 tmp;
bool ret;
power_domain = intel_display_port_power_domain(encoder); power_domain = intel_display_port_power_domain(encoder);
if (!intel_display_power_is_enabled(dev_priv, power_domain)) if (!intel_display_power_get_if_enabled(dev_priv, power_domain))
return false; return false;
ret = false;
tmp = I915_READ(lvds_encoder->reg); tmp = I915_READ(lvds_encoder->reg);
if (!(tmp & LVDS_PORT_EN)) if (!(tmp & LVDS_PORT_EN))
return false; goto out;
if (HAS_PCH_CPT(dev)) if (HAS_PCH_CPT(dev))
*pipe = PORT_TO_PIPE_CPT(tmp); *pipe = PORT_TO_PIPE_CPT(tmp);
else else
*pipe = PORT_TO_PIPE(tmp); *pipe = PORT_TO_PIPE(tmp);
return true; ret = true;
out:
intel_display_power_put(dev_priv, power_domain);
return ret;
} }
static void intel_lvds_get_config(struct intel_encoder *encoder, static void intel_lvds_get_config(struct intel_encoder *encoder,
@ -210,7 +218,7 @@ static void intel_enable_lvds(struct intel_encoder *encoder)
struct intel_connector *intel_connector = struct intel_connector *intel_connector =
&lvds_encoder->attached_connector->base; &lvds_encoder->attached_connector->base;
struct drm_i915_private *dev_priv = dev->dev_private; struct drm_i915_private *dev_priv = dev->dev_private;
u32 ctl_reg, stat_reg; i915_reg_t ctl_reg, stat_reg;
if (HAS_PCH_SPLIT(dev)) { if (HAS_PCH_SPLIT(dev)) {
ctl_reg = PCH_PP_CONTROL; ctl_reg = PCH_PP_CONTROL;
@ -235,7 +243,7 @@ static void intel_disable_lvds(struct intel_encoder *encoder)
struct drm_device *dev = encoder->base.dev; struct drm_device *dev = encoder->base.dev;
struct intel_lvds_encoder *lvds_encoder = to_lvds_encoder(&encoder->base); struct intel_lvds_encoder *lvds_encoder = to_lvds_encoder(&encoder->base);
struct drm_i915_private *dev_priv = dev->dev_private; struct drm_i915_private *dev_priv = dev->dev_private;
u32 ctl_reg, stat_reg; i915_reg_t ctl_reg, stat_reg;
if (HAS_PCH_SPLIT(dev)) { if (HAS_PCH_SPLIT(dev)) {
ctl_reg = PCH_PP_CONTROL; ctl_reg = PCH_PP_CONTROL;
@ -939,7 +947,7 @@ void intel_lvds_init(struct drm_device *dev)
struct drm_display_mode *downclock_mode = NULL; struct drm_display_mode *downclock_mode = NULL;
struct edid *edid; struct edid *edid;
struct drm_crtc *crtc; struct drm_crtc *crtc;
u32 lvds_reg; i915_reg_t lvds_reg;
u32 lvds; u32 lvds;
int pipe; int pipe;
u8 pin; u8 pin;
@ -1025,7 +1033,7 @@ void intel_lvds_init(struct drm_device *dev)
DRM_MODE_CONNECTOR_LVDS); DRM_MODE_CONNECTOR_LVDS);
drm_encoder_init(dev, &intel_encoder->base, &intel_lvds_enc_funcs, drm_encoder_init(dev, &intel_encoder->base, &intel_lvds_enc_funcs,
DRM_MODE_ENCODER_LVDS); DRM_MODE_ENCODER_LVDS, NULL);
intel_encoder->enable = intel_enable_lvds; intel_encoder->enable = intel_enable_lvds;
intel_encoder->pre_enable = intel_pre_enable_lvds; intel_encoder->pre_enable = intel_pre_enable_lvds;
@ -1164,8 +1172,7 @@ out:
DRM_DEBUG_KMS("detected %s-link lvds configuration\n", DRM_DEBUG_KMS("detected %s-link lvds configuration\n",
lvds_encoder->is_dual_link ? "dual" : "single"); lvds_encoder->is_dual_link ? "dual" : "single");
lvds_encoder->a3_power = I915_READ(lvds_encoder->reg) & lvds_encoder->a3_power = lvds & LVDS_A3_POWER_MASK;
LVDS_A3_POWER_MASK;
drm_connector_register(connector); drm_connector_register(connector);

View File

@ -143,7 +143,7 @@ static bool get_mocs_settings(struct drm_device *dev,
{ {
bool result = false; bool result = false;
if (IS_SKYLAKE(dev)) { if (IS_SKYLAKE(dev) || IS_KABYLAKE(dev)) {
table->size = ARRAY_SIZE(skylake_mocs_table); table->size = ARRAY_SIZE(skylake_mocs_table);
table->table = skylake_mocs_table; table->table = skylake_mocs_table;
result = true; result = true;
@ -159,11 +159,30 @@ static bool get_mocs_settings(struct drm_device *dev,
return result; return result;
} }
static i915_reg_t mocs_register(enum intel_ring_id ring, int index)
{
switch (ring) {
case RCS:
return GEN9_GFX_MOCS(index);
case VCS:
return GEN9_MFX0_MOCS(index);
case BCS:
return GEN9_BLT_MOCS(index);
case VECS:
return GEN9_VEBOX_MOCS(index);
case VCS2:
return GEN9_MFX1_MOCS(index);
default:
MISSING_CASE(ring);
return INVALID_MMIO_REG;
}
}
/** /**
* emit_mocs_control_table() - emit the mocs control table * emit_mocs_control_table() - emit the mocs control table
* @req: Request to set up the MOCS table for. * @req: Request to set up the MOCS table for.
* @table: The values to program into the control regs. * @table: The values to program into the control regs.
* @reg_base: The base for the engine that needs to be programmed. * @ring: The engine for whom to emit the registers.
* *
* This function simply emits a MI_LOAD_REGISTER_IMM command for the * This function simply emits a MI_LOAD_REGISTER_IMM command for the
* given table starting at the given address. * given table starting at the given address.
@ -172,7 +191,7 @@ static bool get_mocs_settings(struct drm_device *dev,
*/ */
static int emit_mocs_control_table(struct drm_i915_gem_request *req, static int emit_mocs_control_table(struct drm_i915_gem_request *req,
const struct drm_i915_mocs_table *table, const struct drm_i915_mocs_table *table,
u32 reg_base) enum intel_ring_id ring)
{ {
struct intel_ringbuffer *ringbuf = req->ringbuf; struct intel_ringbuffer *ringbuf = req->ringbuf;
unsigned int index; unsigned int index;
@ -191,7 +210,7 @@ static int emit_mocs_control_table(struct drm_i915_gem_request *req,
MI_LOAD_REGISTER_IMM(GEN9_NUM_MOCS_ENTRIES)); MI_LOAD_REGISTER_IMM(GEN9_NUM_MOCS_ENTRIES));
for (index = 0; index < table->size; index++) { for (index = 0; index < table->size; index++) {
intel_logical_ring_emit(ringbuf, reg_base + index * 4); intel_logical_ring_emit_reg(ringbuf, mocs_register(ring, index));
intel_logical_ring_emit(ringbuf, intel_logical_ring_emit(ringbuf,
table->table[index].control_value); table->table[index].control_value);
} }
@ -205,7 +224,7 @@ static int emit_mocs_control_table(struct drm_i915_gem_request *req,
* that value to all the used entries. * that value to all the used entries.
*/ */
for (; index < GEN9_NUM_MOCS_ENTRIES; index++) { for (; index < GEN9_NUM_MOCS_ENTRIES; index++) {
intel_logical_ring_emit(ringbuf, reg_base + index * 4); intel_logical_ring_emit_reg(ringbuf, mocs_register(ring, index));
intel_logical_ring_emit(ringbuf, table->table[0].control_value); intel_logical_ring_emit(ringbuf, table->table[0].control_value);
} }
@ -253,7 +272,7 @@ static int emit_mocs_l3cc_table(struct drm_i915_gem_request *req,
value = (table->table[count].l3cc_value & 0xffff) | value = (table->table[count].l3cc_value & 0xffff) |
((table->table[count + 1].l3cc_value & 0xffff) << 16); ((table->table[count + 1].l3cc_value & 0xffff) << 16);
intel_logical_ring_emit(ringbuf, GEN9_LNCFCMOCS0 + i * 4); intel_logical_ring_emit_reg(ringbuf, GEN9_LNCFCMOCS(i));
intel_logical_ring_emit(ringbuf, value); intel_logical_ring_emit(ringbuf, value);
} }
@ -270,7 +289,7 @@ static int emit_mocs_l3cc_table(struct drm_i915_gem_request *req,
* they are reserved by the hardware. * they are reserved by the hardware.
*/ */
for (; i < GEN9_NUM_MOCS_ENTRIES / 2; i++) { for (; i < GEN9_NUM_MOCS_ENTRIES / 2; i++) {
intel_logical_ring_emit(ringbuf, GEN9_LNCFCMOCS0 + i * 4); intel_logical_ring_emit_reg(ringbuf, GEN9_LNCFCMOCS(i));
intel_logical_ring_emit(ringbuf, value); intel_logical_ring_emit(ringbuf, value);
value = filler; value = filler;
@ -304,26 +323,16 @@ int intel_rcs_context_init_mocs(struct drm_i915_gem_request *req)
int ret; int ret;
if (get_mocs_settings(req->ring->dev, &t)) { if (get_mocs_settings(req->ring->dev, &t)) {
struct drm_i915_private *dev_priv = req->i915;
struct intel_engine_cs *ring;
enum intel_ring_id ring_id;
/* Program the control registers */ /* Program the control registers */
ret = emit_mocs_control_table(req, &t, GEN9_GFX_MOCS_0); for_each_ring(ring, dev_priv, ring_id) {
if (ret) ret = emit_mocs_control_table(req, &t, ring_id);
return ret;
ret = emit_mocs_control_table(req, &t, GEN9_MFX0_MOCS_0);
if (ret)
return ret;
ret = emit_mocs_control_table(req, &t, GEN9_MFX1_MOCS_0);
if (ret)
return ret;
ret = emit_mocs_control_table(req, &t, GEN9_VEBOX_MOCS_0);
if (ret)
return ret;
ret = emit_mocs_control_table(req, &t, GEN9_BLT_MOCS_0);
if (ret) if (ret)
return ret; return ret;
}
/* Now program the l3cc registers */ /* Now program the l3cc registers */
ret = emit_mocs_l3cc_table(req, &t); ret = emit_mocs_l3cc_table(req, &t);

View File

@ -26,6 +26,7 @@
*/ */
#include <linux/acpi.h> #include <linux/acpi.h>
#include <linux/dmi.h>
#include <acpi/video.h> #include <acpi/video.h>
#include <drm/drmP.h> #include <drm/drmP.h>
@ -46,6 +47,7 @@
#define OPREGION_SWSCI_OFFSET 0x200 #define OPREGION_SWSCI_OFFSET 0x200
#define OPREGION_ASLE_OFFSET 0x300 #define OPREGION_ASLE_OFFSET 0x300
#define OPREGION_VBT_OFFSET 0x400 #define OPREGION_VBT_OFFSET 0x400
#define OPREGION_ASLE_EXT_OFFSET 0x1C00
#define OPREGION_SIGNATURE "IntelGraphicsMem" #define OPREGION_SIGNATURE "IntelGraphicsMem"
#define MBOX_ACPI (1<<0) #define MBOX_ACPI (1<<0)
@ -120,7 +122,16 @@ struct opregion_asle {
u64 fdss; u64 fdss;
u32 fdsp; u32 fdsp;
u32 stat; u32 stat;
u8 rsvd[70]; u64 rvda; /* Physical address of raw vbt data */
u32 rvds; /* Size of raw vbt data */
u8 rsvd[58];
} __packed;
/* OpRegion mailbox #5: ASLE ext */
struct opregion_asle_ext {
u32 phed; /* Panel Header */
u8 bddc[256]; /* Panel EDID */
u8 rsvd[764];
} __packed; } __packed;
/* Driver readiness indicator */ /* Driver readiness indicator */
@ -411,7 +422,7 @@ int intel_opregion_notify_adapter(struct drm_device *dev, pci_power_t state)
static u32 asle_set_backlight(struct drm_device *dev, u32 bclp) static u32 asle_set_backlight(struct drm_device *dev, u32 bclp)
{ {
struct drm_i915_private *dev_priv = dev->dev_private; struct drm_i915_private *dev_priv = dev->dev_private;
struct intel_connector *intel_connector; struct intel_connector *connector;
struct opregion_asle *asle = dev_priv->opregion.asle; struct opregion_asle *asle = dev_priv->opregion.asle;
DRM_DEBUG_DRIVER("bclp = 0x%08x\n", bclp); DRM_DEBUG_DRIVER("bclp = 0x%08x\n", bclp);
@ -435,8 +446,8 @@ static u32 asle_set_backlight(struct drm_device *dev, u32 bclp)
* only one). * only one).
*/ */
DRM_DEBUG_KMS("updating opregion backlight %d/255\n", bclp); DRM_DEBUG_KMS("updating opregion backlight %d/255\n", bclp);
list_for_each_entry(intel_connector, &dev->mode_config.connector_list, base.head) for_each_intel_connector(dev, connector)
intel_panel_set_backlight_acpi(intel_connector, bclp, 255); intel_panel_set_backlight_acpi(connector, bclp, 255);
asle->cblv = DIV_ROUND_UP(bclp * 100, 255) | ASLE_CBLV_VALID; asle->cblv = DIV_ROUND_UP(bclp * 100, 255) | ASLE_CBLV_VALID;
drm_modeset_unlock(&dev->mode_config.connection_mutex); drm_modeset_unlock(&dev->mode_config.connection_mutex);
@ -826,6 +837,10 @@ void intel_opregion_fini(struct drm_device *dev)
/* just clear all opregion memory pointers now */ /* just clear all opregion memory pointers now */
memunmap(opregion->header); memunmap(opregion->header);
if (opregion->rvda) {
memunmap(opregion->rvda);
opregion->rvda = NULL;
}
opregion->header = NULL; opregion->header = NULL;
opregion->acpi = NULL; opregion->acpi = NULL;
opregion->swsci = NULL; opregion->swsci = NULL;
@ -894,6 +909,25 @@ static void swsci_setup(struct drm_device *dev)
static inline void swsci_setup(struct drm_device *dev) {} static inline void swsci_setup(struct drm_device *dev) {}
#endif /* CONFIG_ACPI */ #endif /* CONFIG_ACPI */
static int intel_no_opregion_vbt_callback(const struct dmi_system_id *id)
{
DRM_DEBUG_KMS("Falling back to manually reading VBT from "
"VBIOS ROM for %s\n", id->ident);
return 1;
}
static const struct dmi_system_id intel_no_opregion_vbt[] = {
{
.callback = intel_no_opregion_vbt_callback,
.ident = "ThinkCentre A57",
.matches = {
DMI_MATCH(DMI_SYS_VENDOR, "LENOVO"),
DMI_MATCH(DMI_PRODUCT_NAME, "97027RG"),
},
},
{ }
};
int intel_opregion_setup(struct drm_device *dev) int intel_opregion_setup(struct drm_device *dev)
{ {
struct drm_i915_private *dev_priv = dev->dev_private; struct drm_i915_private *dev_priv = dev->dev_private;
@ -907,6 +941,7 @@ int intel_opregion_setup(struct drm_device *dev)
BUILD_BUG_ON(sizeof(struct opregion_acpi) != 0x100); BUILD_BUG_ON(sizeof(struct opregion_acpi) != 0x100);
BUILD_BUG_ON(sizeof(struct opregion_swsci) != 0x100); BUILD_BUG_ON(sizeof(struct opregion_swsci) != 0x100);
BUILD_BUG_ON(sizeof(struct opregion_asle) != 0x100); BUILD_BUG_ON(sizeof(struct opregion_asle) != 0x100);
BUILD_BUG_ON(sizeof(struct opregion_asle_ext) != 0x400);
pci_read_config_dword(dev->pdev, PCI_ASLS, &asls); pci_read_config_dword(dev->pdev, PCI_ASLS, &asls);
DRM_DEBUG_DRIVER("graphic opregion physical addr: 0x%x\n", asls); DRM_DEBUG_DRIVER("graphic opregion physical addr: 0x%x\n", asls);
@ -931,8 +966,6 @@ int intel_opregion_setup(struct drm_device *dev)
goto err_out; goto err_out;
} }
opregion->header = base; opregion->header = base;
opregion->vbt = base + OPREGION_VBT_OFFSET;
opregion->lid_state = base + ACPI_CLID; opregion->lid_state = base + ACPI_CLID;
mboxes = opregion->header->mboxes; mboxes = opregion->header->mboxes;
@ -946,6 +979,7 @@ int intel_opregion_setup(struct drm_device *dev)
opregion->swsci = base + OPREGION_SWSCI_OFFSET; opregion->swsci = base + OPREGION_SWSCI_OFFSET;
swsci_setup(dev); swsci_setup(dev);
} }
if (mboxes & MBOX_ASLE) { if (mboxes & MBOX_ASLE) {
DRM_DEBUG_DRIVER("ASLE supported\n"); DRM_DEBUG_DRIVER("ASLE supported\n");
opregion->asle = base + OPREGION_ASLE_OFFSET; opregion->asle = base + OPREGION_ASLE_OFFSET;
@ -953,6 +987,37 @@ int intel_opregion_setup(struct drm_device *dev)
opregion->asle->ardy = ASLE_ARDY_NOT_READY; opregion->asle->ardy = ASLE_ARDY_NOT_READY;
} }
if (mboxes & MBOX_ASLE_EXT)
DRM_DEBUG_DRIVER("ASLE extension supported\n");
if (!dmi_check_system(intel_no_opregion_vbt)) {
const void *vbt = NULL;
u32 vbt_size = 0;
if (opregion->header->opregion_ver >= 2 && opregion->asle &&
opregion->asle->rvda && opregion->asle->rvds) {
opregion->rvda = memremap(opregion->asle->rvda,
opregion->asle->rvds,
MEMREMAP_WB);
vbt = opregion->rvda;
vbt_size = opregion->asle->rvds;
}
if (intel_bios_is_valid_vbt(vbt, vbt_size)) {
DRM_DEBUG_KMS("Found valid VBT in ACPI OpRegion (RVDA)\n");
opregion->vbt = vbt;
opregion->vbt_size = vbt_size;
} else {
vbt = base + OPREGION_VBT_OFFSET;
vbt_size = OPREGION_ASLE_EXT_OFFSET - OPREGION_VBT_OFFSET;
if (intel_bios_is_valid_vbt(vbt, vbt_size)) {
DRM_DEBUG_KMS("Found valid VBT in ACPI OpRegion (Mailbox #4)\n");
opregion->vbt = vbt;
opregion->vbt_size = vbt_size;
}
}
}
return 0; return 0;
err_out: err_out:

View File

@ -461,8 +461,7 @@ static inline u32 scale_hw_to_user(struct intel_connector *connector,
static u32 intel_panel_compute_brightness(struct intel_connector *connector, static u32 intel_panel_compute_brightness(struct intel_connector *connector,
u32 val) u32 val)
{ {
struct drm_device *dev = connector->base.dev; struct drm_i915_private *dev_priv = to_i915(connector->base.dev);
struct drm_i915_private *dev_priv = dev->dev_private;
struct intel_panel *panel = &connector->panel; struct intel_panel *panel = &connector->panel;
WARN_ON(panel->backlight.max == 0); WARN_ON(panel->backlight.max == 0);
@ -480,45 +479,40 @@ static u32 intel_panel_compute_brightness(struct intel_connector *connector,
static u32 lpt_get_backlight(struct intel_connector *connector) static u32 lpt_get_backlight(struct intel_connector *connector)
{ {
struct drm_device *dev = connector->base.dev; struct drm_i915_private *dev_priv = to_i915(connector->base.dev);
struct drm_i915_private *dev_priv = dev->dev_private;
return I915_READ(BLC_PWM_PCH_CTL2) & BACKLIGHT_DUTY_CYCLE_MASK; return I915_READ(BLC_PWM_PCH_CTL2) & BACKLIGHT_DUTY_CYCLE_MASK;
} }
static u32 pch_get_backlight(struct intel_connector *connector) static u32 pch_get_backlight(struct intel_connector *connector)
{ {
struct drm_device *dev = connector->base.dev; struct drm_i915_private *dev_priv = to_i915(connector->base.dev);
struct drm_i915_private *dev_priv = dev->dev_private;
return I915_READ(BLC_PWM_CPU_CTL) & BACKLIGHT_DUTY_CYCLE_MASK; return I915_READ(BLC_PWM_CPU_CTL) & BACKLIGHT_DUTY_CYCLE_MASK;
} }
static u32 i9xx_get_backlight(struct intel_connector *connector) static u32 i9xx_get_backlight(struct intel_connector *connector)
{ {
struct drm_device *dev = connector->base.dev; struct drm_i915_private *dev_priv = to_i915(connector->base.dev);
struct drm_i915_private *dev_priv = dev->dev_private;
struct intel_panel *panel = &connector->panel; struct intel_panel *panel = &connector->panel;
u32 val; u32 val;
val = I915_READ(BLC_PWM_CTL) & BACKLIGHT_DUTY_CYCLE_MASK; val = I915_READ(BLC_PWM_CTL) & BACKLIGHT_DUTY_CYCLE_MASK;
if (INTEL_INFO(dev)->gen < 4) if (INTEL_INFO(dev_priv)->gen < 4)
val >>= 1; val >>= 1;
if (panel->backlight.combination_mode) { if (panel->backlight.combination_mode) {
u8 lbpc; u8 lbpc;
pci_read_config_byte(dev->pdev, PCI_LBPC, &lbpc); pci_read_config_byte(dev_priv->dev->pdev, PCI_LBPC, &lbpc);
val *= lbpc; val *= lbpc;
} }
return val; return val;
} }
static u32 _vlv_get_backlight(struct drm_device *dev, enum pipe pipe) static u32 _vlv_get_backlight(struct drm_i915_private *dev_priv, enum pipe pipe)
{ {
struct drm_i915_private *dev_priv = dev->dev_private;
if (WARN_ON(pipe != PIPE_A && pipe != PIPE_B)) if (WARN_ON(pipe != PIPE_A && pipe != PIPE_B))
return 0; return 0;
@ -527,17 +521,16 @@ static u32 _vlv_get_backlight(struct drm_device *dev, enum pipe pipe)
static u32 vlv_get_backlight(struct intel_connector *connector) static u32 vlv_get_backlight(struct intel_connector *connector)
{ {
struct drm_device *dev = connector->base.dev; struct drm_i915_private *dev_priv = to_i915(connector->base.dev);
enum pipe pipe = intel_get_pipe_from_connector(connector); enum pipe pipe = intel_get_pipe_from_connector(connector);
return _vlv_get_backlight(dev, pipe); return _vlv_get_backlight(dev_priv, pipe);
} }
static u32 bxt_get_backlight(struct intel_connector *connector) static u32 bxt_get_backlight(struct intel_connector *connector)
{ {
struct drm_device *dev = connector->base.dev; struct drm_i915_private *dev_priv = to_i915(connector->base.dev);
struct intel_panel *panel = &connector->panel; struct intel_panel *panel = &connector->panel;
struct drm_i915_private *dev_priv = dev->dev_private;
return I915_READ(BXT_BLC_PWM_DUTY(panel->backlight.controller)); return I915_READ(BXT_BLC_PWM_DUTY(panel->backlight.controller));
} }
@ -553,8 +546,7 @@ static u32 pwm_get_backlight(struct intel_connector *connector)
static u32 intel_panel_get_backlight(struct intel_connector *connector) static u32 intel_panel_get_backlight(struct intel_connector *connector)
{ {
struct drm_device *dev = connector->base.dev; struct drm_i915_private *dev_priv = to_i915(connector->base.dev);
struct drm_i915_private *dev_priv = dev->dev_private;
struct intel_panel *panel = &connector->panel; struct intel_panel *panel = &connector->panel;
u32 val = 0; u32 val = 0;
@ -573,16 +565,14 @@ static u32 intel_panel_get_backlight(struct intel_connector *connector)
static void lpt_set_backlight(struct intel_connector *connector, u32 level) static void lpt_set_backlight(struct intel_connector *connector, u32 level)
{ {
struct drm_device *dev = connector->base.dev; struct drm_i915_private *dev_priv = to_i915(connector->base.dev);
struct drm_i915_private *dev_priv = dev->dev_private;
u32 val = I915_READ(BLC_PWM_PCH_CTL2) & ~BACKLIGHT_DUTY_CYCLE_MASK; u32 val = I915_READ(BLC_PWM_PCH_CTL2) & ~BACKLIGHT_DUTY_CYCLE_MASK;
I915_WRITE(BLC_PWM_PCH_CTL2, val | level); I915_WRITE(BLC_PWM_PCH_CTL2, val | level);
} }
static void pch_set_backlight(struct intel_connector *connector, u32 level) static void pch_set_backlight(struct intel_connector *connector, u32 level)
{ {
struct drm_device *dev = connector->base.dev; struct drm_i915_private *dev_priv = to_i915(connector->base.dev);
struct drm_i915_private *dev_priv = dev->dev_private;
u32 tmp; u32 tmp;
tmp = I915_READ(BLC_PWM_CPU_CTL) & ~BACKLIGHT_DUTY_CYCLE_MASK; tmp = I915_READ(BLC_PWM_CPU_CTL) & ~BACKLIGHT_DUTY_CYCLE_MASK;
@ -591,8 +581,7 @@ static void pch_set_backlight(struct intel_connector *connector, u32 level)
static void i9xx_set_backlight(struct intel_connector *connector, u32 level) static void i9xx_set_backlight(struct intel_connector *connector, u32 level)
{ {
struct drm_device *dev = connector->base.dev; struct drm_i915_private *dev_priv = to_i915(connector->base.dev);
struct drm_i915_private *dev_priv = dev->dev_private;
struct intel_panel *panel = &connector->panel; struct intel_panel *panel = &connector->panel;
u32 tmp, mask; u32 tmp, mask;
@ -603,10 +592,10 @@ static void i9xx_set_backlight(struct intel_connector *connector, u32 level)
lbpc = level * 0xfe / panel->backlight.max + 1; lbpc = level * 0xfe / panel->backlight.max + 1;
level /= lbpc; level /= lbpc;
pci_write_config_byte(dev->pdev, PCI_LBPC, lbpc); pci_write_config_byte(dev_priv->dev->pdev, PCI_LBPC, lbpc);
} }
if (IS_GEN4(dev)) { if (IS_GEN4(dev_priv)) {
mask = BACKLIGHT_DUTY_CYCLE_MASK; mask = BACKLIGHT_DUTY_CYCLE_MASK;
} else { } else {
level <<= 1; level <<= 1;
@ -619,8 +608,7 @@ static void i9xx_set_backlight(struct intel_connector *connector, u32 level)
static void vlv_set_backlight(struct intel_connector *connector, u32 level) static void vlv_set_backlight(struct intel_connector *connector, u32 level)
{ {
struct drm_device *dev = connector->base.dev; struct drm_i915_private *dev_priv = to_i915(connector->base.dev);
struct drm_i915_private *dev_priv = dev->dev_private;
enum pipe pipe = intel_get_pipe_from_connector(connector); enum pipe pipe = intel_get_pipe_from_connector(connector);
u32 tmp; u32 tmp;
@ -633,8 +621,7 @@ static void vlv_set_backlight(struct intel_connector *connector, u32 level)
static void bxt_set_backlight(struct intel_connector *connector, u32 level) static void bxt_set_backlight(struct intel_connector *connector, u32 level)
{ {
struct drm_device *dev = connector->base.dev; struct drm_i915_private *dev_priv = to_i915(connector->base.dev);
struct drm_i915_private *dev_priv = dev->dev_private;
struct intel_panel *panel = &connector->panel; struct intel_panel *panel = &connector->panel;
I915_WRITE(BXT_BLC_PWM_DUTY(panel->backlight.controller), level); I915_WRITE(BXT_BLC_PWM_DUTY(panel->backlight.controller), level);
@ -663,8 +650,7 @@ intel_panel_actually_set_backlight(struct intel_connector *connector, u32 level)
static void intel_panel_set_backlight(struct intel_connector *connector, static void intel_panel_set_backlight(struct intel_connector *connector,
u32 user_level, u32 user_max) u32 user_level, u32 user_max)
{ {
struct drm_device *dev = connector->base.dev; struct drm_i915_private *dev_priv = to_i915(connector->base.dev);
struct drm_i915_private *dev_priv = dev->dev_private;
struct intel_panel *panel = &connector->panel; struct intel_panel *panel = &connector->panel;
u32 hw_level; u32 hw_level;
@ -690,8 +676,7 @@ static void intel_panel_set_backlight(struct intel_connector *connector,
void intel_panel_set_backlight_acpi(struct intel_connector *connector, void intel_panel_set_backlight_acpi(struct intel_connector *connector,
u32 user_level, u32 user_max) u32 user_level, u32 user_max)
{ {
struct drm_device *dev = connector->base.dev; struct drm_i915_private *dev_priv = to_i915(connector->base.dev);
struct drm_i915_private *dev_priv = dev->dev_private;
struct intel_panel *panel = &connector->panel; struct intel_panel *panel = &connector->panel;
enum pipe pipe = intel_get_pipe_from_connector(connector); enum pipe pipe = intel_get_pipe_from_connector(connector);
u32 hw_level; u32 hw_level;
@ -721,8 +706,7 @@ void intel_panel_set_backlight_acpi(struct intel_connector *connector,
static void lpt_disable_backlight(struct intel_connector *connector) static void lpt_disable_backlight(struct intel_connector *connector)
{ {
struct drm_device *dev = connector->base.dev; struct drm_i915_private *dev_priv = to_i915(connector->base.dev);
struct drm_i915_private *dev_priv = dev->dev_private;
u32 tmp; u32 tmp;
intel_panel_actually_set_backlight(connector, 0); intel_panel_actually_set_backlight(connector, 0);
@ -747,8 +731,7 @@ static void lpt_disable_backlight(struct intel_connector *connector)
static void pch_disable_backlight(struct intel_connector *connector) static void pch_disable_backlight(struct intel_connector *connector)
{ {
struct drm_device *dev = connector->base.dev; struct drm_i915_private *dev_priv = to_i915(connector->base.dev);
struct drm_i915_private *dev_priv = dev->dev_private;
u32 tmp; u32 tmp;
intel_panel_actually_set_backlight(connector, 0); intel_panel_actually_set_backlight(connector, 0);
@ -767,8 +750,7 @@ static void i9xx_disable_backlight(struct intel_connector *connector)
static void i965_disable_backlight(struct intel_connector *connector) static void i965_disable_backlight(struct intel_connector *connector)
{ {
struct drm_device *dev = connector->base.dev; struct drm_i915_private *dev_priv = to_i915(connector->base.dev);
struct drm_i915_private *dev_priv = dev->dev_private;
u32 tmp; u32 tmp;
intel_panel_actually_set_backlight(connector, 0); intel_panel_actually_set_backlight(connector, 0);
@ -779,8 +761,7 @@ static void i965_disable_backlight(struct intel_connector *connector)
static void vlv_disable_backlight(struct intel_connector *connector) static void vlv_disable_backlight(struct intel_connector *connector)
{ {
struct drm_device *dev = connector->base.dev; struct drm_i915_private *dev_priv = to_i915(connector->base.dev);
struct drm_i915_private *dev_priv = dev->dev_private;
enum pipe pipe = intel_get_pipe_from_connector(connector); enum pipe pipe = intel_get_pipe_from_connector(connector);
u32 tmp; u32 tmp;
@ -795,8 +776,7 @@ static void vlv_disable_backlight(struct intel_connector *connector)
static void bxt_disable_backlight(struct intel_connector *connector) static void bxt_disable_backlight(struct intel_connector *connector)
{ {
struct drm_device *dev = connector->base.dev; struct drm_i915_private *dev_priv = to_i915(connector->base.dev);
struct drm_i915_private *dev_priv = dev->dev_private;
struct intel_panel *panel = &connector->panel; struct intel_panel *panel = &connector->panel;
u32 tmp, val; u32 tmp, val;
@ -825,8 +805,7 @@ static void pwm_disable_backlight(struct intel_connector *connector)
void intel_panel_disable_backlight(struct intel_connector *connector) void intel_panel_disable_backlight(struct intel_connector *connector)
{ {
struct drm_device *dev = connector->base.dev; struct drm_i915_private *dev_priv = to_i915(connector->base.dev);
struct drm_i915_private *dev_priv = dev->dev_private;
struct intel_panel *panel = &connector->panel; struct intel_panel *panel = &connector->panel;
if (!panel->backlight.present) if (!panel->backlight.present)
@ -838,7 +817,7 @@ void intel_panel_disable_backlight(struct intel_connector *connector)
* backlight. This will leave the backlight on unnecessarily when * backlight. This will leave the backlight on unnecessarily when
* another client is not activated. * another client is not activated.
*/ */
if (dev->switch_power_state == DRM_SWITCH_POWER_CHANGING) { if (dev_priv->dev->switch_power_state == DRM_SWITCH_POWER_CHANGING) {
DRM_DEBUG_DRIVER("Skipping backlight disable on vga switch\n"); DRM_DEBUG_DRIVER("Skipping backlight disable on vga switch\n");
return; return;
} }
@ -853,8 +832,7 @@ void intel_panel_disable_backlight(struct intel_connector *connector)
static void lpt_enable_backlight(struct intel_connector *connector) static void lpt_enable_backlight(struct intel_connector *connector)
{ {
struct drm_device *dev = connector->base.dev; struct drm_i915_private *dev_priv = to_i915(connector->base.dev);
struct drm_i915_private *dev_priv = dev->dev_private;
struct intel_panel *panel = &connector->panel; struct intel_panel *panel = &connector->panel;
u32 pch_ctl1, pch_ctl2; u32 pch_ctl1, pch_ctl2;
@ -886,8 +864,7 @@ static void lpt_enable_backlight(struct intel_connector *connector)
static void pch_enable_backlight(struct intel_connector *connector) static void pch_enable_backlight(struct intel_connector *connector)
{ {
struct drm_device *dev = connector->base.dev; struct drm_i915_private *dev_priv = to_i915(connector->base.dev);
struct drm_i915_private *dev_priv = dev->dev_private;
struct intel_panel *panel = &connector->panel; struct intel_panel *panel = &connector->panel;
enum pipe pipe = intel_get_pipe_from_connector(connector); enum pipe pipe = intel_get_pipe_from_connector(connector);
enum transcoder cpu_transcoder = enum transcoder cpu_transcoder =
@ -933,8 +910,7 @@ static void pch_enable_backlight(struct intel_connector *connector)
static void i9xx_enable_backlight(struct intel_connector *connector) static void i9xx_enable_backlight(struct intel_connector *connector)
{ {
struct drm_device *dev = connector->base.dev; struct drm_i915_private *dev_priv = to_i915(connector->base.dev);
struct drm_i915_private *dev_priv = dev->dev_private;
struct intel_panel *panel = &connector->panel; struct intel_panel *panel = &connector->panel;
u32 ctl, freq; u32 ctl, freq;
@ -951,7 +927,7 @@ static void i9xx_enable_backlight(struct intel_connector *connector)
ctl = freq << 17; ctl = freq << 17;
if (panel->backlight.combination_mode) if (panel->backlight.combination_mode)
ctl |= BLM_LEGACY_MODE; ctl |= BLM_LEGACY_MODE;
if (IS_PINEVIEW(dev) && panel->backlight.active_low_pwm) if (IS_PINEVIEW(dev_priv) && panel->backlight.active_low_pwm)
ctl |= BLM_POLARITY_PNV; ctl |= BLM_POLARITY_PNV;
I915_WRITE(BLC_PWM_CTL, ctl); I915_WRITE(BLC_PWM_CTL, ctl);
@ -965,14 +941,13 @@ static void i9xx_enable_backlight(struct intel_connector *connector)
* 855gm only, but checking for gen2 is safe, as 855gm is the only gen2 * 855gm only, but checking for gen2 is safe, as 855gm is the only gen2
* that has backlight. * that has backlight.
*/ */
if (IS_GEN2(dev)) if (IS_GEN2(dev_priv))
I915_WRITE(BLC_HIST_CTL, BLM_HISTOGRAM_ENABLE); I915_WRITE(BLC_HIST_CTL, BLM_HISTOGRAM_ENABLE);
} }
static void i965_enable_backlight(struct intel_connector *connector) static void i965_enable_backlight(struct intel_connector *connector)
{ {
struct drm_device *dev = connector->base.dev; struct drm_i915_private *dev_priv = to_i915(connector->base.dev);
struct drm_i915_private *dev_priv = dev->dev_private;
struct intel_panel *panel = &connector->panel; struct intel_panel *panel = &connector->panel;
enum pipe pipe = intel_get_pipe_from_connector(connector); enum pipe pipe = intel_get_pipe_from_connector(connector);
u32 ctl, ctl2, freq; u32 ctl, ctl2, freq;
@ -1005,8 +980,7 @@ static void i965_enable_backlight(struct intel_connector *connector)
static void vlv_enable_backlight(struct intel_connector *connector) static void vlv_enable_backlight(struct intel_connector *connector)
{ {
struct drm_device *dev = connector->base.dev; struct drm_i915_private *dev_priv = to_i915(connector->base.dev);
struct drm_i915_private *dev_priv = dev->dev_private;
struct intel_panel *panel = &connector->panel; struct intel_panel *panel = &connector->panel;
enum pipe pipe = intel_get_pipe_from_connector(connector); enum pipe pipe = intel_get_pipe_from_connector(connector);
u32 ctl, ctl2; u32 ctl, ctl2;
@ -1037,8 +1011,7 @@ static void vlv_enable_backlight(struct intel_connector *connector)
static void bxt_enable_backlight(struct intel_connector *connector) static void bxt_enable_backlight(struct intel_connector *connector)
{ {
struct drm_device *dev = connector->base.dev; struct drm_i915_private *dev_priv = to_i915(connector->base.dev);
struct drm_i915_private *dev_priv = dev->dev_private;
struct intel_panel *panel = &connector->panel; struct intel_panel *panel = &connector->panel;
enum pipe pipe = intel_get_pipe_from_connector(connector); enum pipe pipe = intel_get_pipe_from_connector(connector);
u32 pwm_ctl, val; u32 pwm_ctl, val;
@ -1095,8 +1068,7 @@ static void pwm_enable_backlight(struct intel_connector *connector)
void intel_panel_enable_backlight(struct intel_connector *connector) void intel_panel_enable_backlight(struct intel_connector *connector)
{ {
struct drm_device *dev = connector->base.dev; struct drm_i915_private *dev_priv = to_i915(connector->base.dev);
struct drm_i915_private *dev_priv = dev->dev_private;
struct intel_panel *panel = &connector->panel; struct intel_panel *panel = &connector->panel;
enum pipe pipe = intel_get_pipe_from_connector(connector); enum pipe pipe = intel_get_pipe_from_connector(connector);
@ -1249,6 +1221,14 @@ static void intel_backlight_device_unregister(struct intel_connector *connector)
} }
#endif /* CONFIG_BACKLIGHT_CLASS_DEVICE */ #endif /* CONFIG_BACKLIGHT_CLASS_DEVICE */
/*
* BXT: PWM clock frequency = 19.2 MHz.
*/
static u32 bxt_hz_to_pwm(struct intel_connector *connector, u32 pwm_freq_hz)
{
return KHz(19200) / pwm_freq_hz;
}
/* /*
* SPT: This value represents the period of the PWM stream in clock periods * SPT: This value represents the period of the PWM stream in clock periods
* multiplied by 16 (default increment) or 128 (alternate increment selected in * multiplied by 16 (default increment) or 128 (alternate increment selected in
@ -1256,8 +1236,7 @@ static void intel_backlight_device_unregister(struct intel_connector *connector)
*/ */
static u32 spt_hz_to_pwm(struct intel_connector *connector, u32 pwm_freq_hz) static u32 spt_hz_to_pwm(struct intel_connector *connector, u32 pwm_freq_hz)
{ {
struct drm_device *dev = connector->base.dev; struct drm_i915_private *dev_priv = to_i915(connector->base.dev);
struct drm_i915_private *dev_priv = dev->dev_private;
u32 mul, clock; u32 mul, clock;
if (I915_READ(SOUTH_CHICKEN1) & SPT_PWM_GRANULARITY) if (I915_READ(SOUTH_CHICKEN1) & SPT_PWM_GRANULARITY)
@ -1277,8 +1256,7 @@ static u32 spt_hz_to_pwm(struct intel_connector *connector, u32 pwm_freq_hz)
*/ */
static u32 lpt_hz_to_pwm(struct intel_connector *connector, u32 pwm_freq_hz) static u32 lpt_hz_to_pwm(struct intel_connector *connector, u32 pwm_freq_hz)
{ {
struct drm_device *dev = connector->base.dev; struct drm_i915_private *dev_priv = to_i915(connector->base.dev);
struct drm_i915_private *dev_priv = dev->dev_private;
u32 mul, clock; u32 mul, clock;
if (I915_READ(SOUTH_CHICKEN2) & LPT_PWM_GRANULARITY) if (I915_READ(SOUTH_CHICKEN2) & LPT_PWM_GRANULARITY)
@ -1286,7 +1264,7 @@ static u32 lpt_hz_to_pwm(struct intel_connector *connector, u32 pwm_freq_hz)
else else
mul = 128; mul = 128;
if (dev_priv->pch_id == INTEL_PCH_LPT_DEVICE_ID_TYPE) if (HAS_PCH_LPT_H(dev_priv))
clock = MHz(135); /* LPT:H */ clock = MHz(135); /* LPT:H */
else else
clock = MHz(24); /* LPT:LP */ clock = MHz(24); /* LPT:LP */
@ -1321,22 +1299,28 @@ static u32 i9xx_hz_to_pwm(struct intel_connector *connector, u32 pwm_freq_hz)
int clock; int clock;
if (IS_PINEVIEW(dev)) if (IS_PINEVIEW(dev))
clock = intel_hrawclk(dev); clock = MHz(intel_hrawclk(dev));
else else
clock = 1000 * dev_priv->display.get_display_clock_speed(dev); clock = 1000 * dev_priv->cdclk_freq;
return clock / (pwm_freq_hz * 32); return clock / (pwm_freq_hz * 32);
} }
/* /*
* Gen4: This value represents the period of the PWM stream in display core * Gen4: This value represents the period of the PWM stream in display core
* clocks multiplied by 128. * clocks ([DevCTG] HRAW clocks) multiplied by 128.
*
*/ */
static u32 i965_hz_to_pwm(struct intel_connector *connector, u32 pwm_freq_hz) static u32 i965_hz_to_pwm(struct intel_connector *connector, u32 pwm_freq_hz)
{ {
struct drm_device *dev = connector->base.dev; struct drm_device *dev = connector->base.dev;
struct drm_i915_private *dev_priv = dev->dev_private; struct drm_i915_private *dev_priv = dev->dev_private;
int clock = 1000 * dev_priv->display.get_display_clock_speed(dev); int clock;
if (IS_G4X(dev_priv))
clock = MHz(intel_hrawclk(dev));
else
clock = 1000 * dev_priv->cdclk_freq;
return clock / (pwm_freq_hz * 128); return clock / (pwm_freq_hz * 128);
} }
@ -1365,20 +1349,23 @@ static u32 vlv_hz_to_pwm(struct intel_connector *connector, u32 pwm_freq_hz)
static u32 get_backlight_max_vbt(struct intel_connector *connector) static u32 get_backlight_max_vbt(struct intel_connector *connector)
{ {
struct drm_device *dev = connector->base.dev; struct drm_i915_private *dev_priv = to_i915(connector->base.dev);
struct drm_i915_private *dev_priv = dev->dev_private;
struct intel_panel *panel = &connector->panel; struct intel_panel *panel = &connector->panel;
u16 pwm_freq_hz = dev_priv->vbt.backlight.pwm_freq_hz; u16 pwm_freq_hz = dev_priv->vbt.backlight.pwm_freq_hz;
u32 pwm; u32 pwm;
if (!pwm_freq_hz) { if (!panel->backlight.hz_to_pwm) {
DRM_DEBUG_KMS("backlight frequency not specified in VBT\n"); DRM_DEBUG_KMS("backlight frequency conversion not supported\n");
return 0; return 0;
} }
if (!panel->backlight.hz_to_pwm) { if (pwm_freq_hz) {
DRM_DEBUG_KMS("backlight frequency setting from VBT currently not supported on this platform\n"); DRM_DEBUG_KMS("VBT defined backlight frequency %u Hz\n",
return 0; pwm_freq_hz);
} else {
pwm_freq_hz = 200;
DRM_DEBUG_KMS("default backlight frequency %u Hz\n",
pwm_freq_hz);
} }
pwm = panel->backlight.hz_to_pwm(connector, pwm_freq_hz); pwm = panel->backlight.hz_to_pwm(connector, pwm_freq_hz);
@ -1387,8 +1374,6 @@ static u32 get_backlight_max_vbt(struct intel_connector *connector)
return 0; return 0;
} }
DRM_DEBUG_KMS("backlight frequency %u Hz from VBT\n", pwm_freq_hz);
return pwm; return pwm;
} }
@ -1397,8 +1382,7 @@ static u32 get_backlight_max_vbt(struct intel_connector *connector)
*/ */
static u32 get_backlight_min_vbt(struct intel_connector *connector) static u32 get_backlight_min_vbt(struct intel_connector *connector)
{ {
struct drm_device *dev = connector->base.dev; struct drm_i915_private *dev_priv = to_i915(connector->base.dev);
struct drm_i915_private *dev_priv = dev->dev_private;
struct intel_panel *panel = &connector->panel; struct intel_panel *panel = &connector->panel;
int min; int min;
@ -1423,8 +1407,7 @@ static u32 get_backlight_min_vbt(struct intel_connector *connector)
static int lpt_setup_backlight(struct intel_connector *connector, enum pipe unused) static int lpt_setup_backlight(struct intel_connector *connector, enum pipe unused)
{ {
struct drm_device *dev = connector->base.dev; struct drm_i915_private *dev_priv = to_i915(connector->base.dev);
struct drm_i915_private *dev_priv = dev->dev_private;
struct intel_panel *panel = &connector->panel; struct intel_panel *panel = &connector->panel;
u32 pch_ctl1, pch_ctl2, val; u32 pch_ctl1, pch_ctl2, val;
@ -1453,8 +1436,7 @@ static int lpt_setup_backlight(struct intel_connector *connector, enum pipe unus
static int pch_setup_backlight(struct intel_connector *connector, enum pipe unused) static int pch_setup_backlight(struct intel_connector *connector, enum pipe unused)
{ {
struct drm_device *dev = connector->base.dev; struct drm_i915_private *dev_priv = to_i915(connector->base.dev);
struct drm_i915_private *dev_priv = dev->dev_private;
struct intel_panel *panel = &connector->panel; struct intel_panel *panel = &connector->panel;
u32 cpu_ctl2, pch_ctl1, pch_ctl2, val; u32 cpu_ctl2, pch_ctl1, pch_ctl2, val;
@ -1484,17 +1466,16 @@ static int pch_setup_backlight(struct intel_connector *connector, enum pipe unus
static int i9xx_setup_backlight(struct intel_connector *connector, enum pipe unused) static int i9xx_setup_backlight(struct intel_connector *connector, enum pipe unused)
{ {
struct drm_device *dev = connector->base.dev; struct drm_i915_private *dev_priv = to_i915(connector->base.dev);
struct drm_i915_private *dev_priv = dev->dev_private;
struct intel_panel *panel = &connector->panel; struct intel_panel *panel = &connector->panel;
u32 ctl, val; u32 ctl, val;
ctl = I915_READ(BLC_PWM_CTL); ctl = I915_READ(BLC_PWM_CTL);
if (IS_GEN2(dev) || IS_I915GM(dev) || IS_I945GM(dev)) if (IS_GEN2(dev_priv) || IS_I915GM(dev_priv) || IS_I945GM(dev_priv))
panel->backlight.combination_mode = ctl & BLM_LEGACY_MODE; panel->backlight.combination_mode = ctl & BLM_LEGACY_MODE;
if (IS_PINEVIEW(dev)) if (IS_PINEVIEW(dev_priv))
panel->backlight.active_low_pwm = ctl & BLM_POLARITY_PNV; panel->backlight.active_low_pwm = ctl & BLM_POLARITY_PNV;
panel->backlight.max = ctl >> 17; panel->backlight.max = ctl >> 17;
@ -1522,8 +1503,7 @@ static int i9xx_setup_backlight(struct intel_connector *connector, enum pipe unu
static int i965_setup_backlight(struct intel_connector *connector, enum pipe unused) static int i965_setup_backlight(struct intel_connector *connector, enum pipe unused)
{ {
struct drm_device *dev = connector->base.dev; struct drm_i915_private *dev_priv = to_i915(connector->base.dev);
struct drm_i915_private *dev_priv = dev->dev_private;
struct intel_panel *panel = &connector->panel; struct intel_panel *panel = &connector->panel;
u32 ctl, ctl2, val; u32 ctl, ctl2, val;
@ -1556,8 +1536,7 @@ static int i965_setup_backlight(struct intel_connector *connector, enum pipe unu
static int vlv_setup_backlight(struct intel_connector *connector, enum pipe pipe) static int vlv_setup_backlight(struct intel_connector *connector, enum pipe pipe)
{ {
struct drm_device *dev = connector->base.dev; struct drm_i915_private *dev_priv = to_i915(connector->base.dev);
struct drm_i915_private *dev_priv = dev->dev_private;
struct intel_panel *panel = &connector->panel; struct intel_panel *panel = &connector->panel;
u32 ctl, ctl2, val; u32 ctl, ctl2, val;
@ -1578,7 +1557,7 @@ static int vlv_setup_backlight(struct intel_connector *connector, enum pipe pipe
panel->backlight.min = get_backlight_min_vbt(connector); panel->backlight.min = get_backlight_min_vbt(connector);
val = _vlv_get_backlight(dev, pipe); val = _vlv_get_backlight(dev_priv, pipe);
panel->backlight.level = intel_panel_compute_brightness(connector, val); panel->backlight.level = intel_panel_compute_brightness(connector, val);
panel->backlight.enabled = (ctl2 & BLM_PWM_ENABLE) && panel->backlight.enabled = (ctl2 & BLM_PWM_ENABLE) &&
@ -1590,8 +1569,7 @@ static int vlv_setup_backlight(struct intel_connector *connector, enum pipe pipe
static int static int
bxt_setup_backlight(struct intel_connector *connector, enum pipe unused) bxt_setup_backlight(struct intel_connector *connector, enum pipe unused)
{ {
struct drm_device *dev = connector->base.dev; struct drm_i915_private *dev_priv = to_i915(connector->base.dev);
struct drm_i915_private *dev_priv = dev->dev_private;
struct intel_panel *panel = &connector->panel; struct intel_panel *panel = &connector->panel;
u32 pwm_ctl, val; u32 pwm_ctl, val;
@ -1669,8 +1647,7 @@ static int pwm_setup_backlight(struct intel_connector *connector,
int intel_panel_setup_backlight(struct drm_connector *connector, enum pipe pipe) int intel_panel_setup_backlight(struct drm_connector *connector, enum pipe pipe)
{ {
struct drm_device *dev = connector->dev; struct drm_i915_private *dev_priv = to_i915(connector->dev);
struct drm_i915_private *dev_priv = dev->dev_private;
struct intel_connector *intel_connector = to_intel_connector(connector); struct intel_connector *intel_connector = to_intel_connector(connector);
struct intel_panel *panel = &intel_connector->panel; struct intel_panel *panel = &intel_connector->panel;
int ret; int ret;
@ -1725,35 +1702,35 @@ void intel_panel_destroy_backlight(struct drm_connector *connector)
static void static void
intel_panel_init_backlight_funcs(struct intel_panel *panel) intel_panel_init_backlight_funcs(struct intel_panel *panel)
{ {
struct intel_connector *intel_connector = struct intel_connector *connector =
container_of(panel, struct intel_connector, panel); container_of(panel, struct intel_connector, panel);
struct drm_device *dev = intel_connector->base.dev; struct drm_i915_private *dev_priv = to_i915(connector->base.dev);
struct drm_i915_private *dev_priv = dev->dev_private;
if (IS_BROXTON(dev)) { if (IS_BROXTON(dev_priv)) {
panel->backlight.setup = bxt_setup_backlight; panel->backlight.setup = bxt_setup_backlight;
panel->backlight.enable = bxt_enable_backlight; panel->backlight.enable = bxt_enable_backlight;
panel->backlight.disable = bxt_disable_backlight; panel->backlight.disable = bxt_disable_backlight;
panel->backlight.set = bxt_set_backlight; panel->backlight.set = bxt_set_backlight;
panel->backlight.get = bxt_get_backlight; panel->backlight.get = bxt_get_backlight;
} else if (HAS_PCH_LPT(dev) || HAS_PCH_SPT(dev)) { panel->backlight.hz_to_pwm = bxt_hz_to_pwm;
} else if (HAS_PCH_LPT(dev_priv) || HAS_PCH_SPT(dev_priv)) {
panel->backlight.setup = lpt_setup_backlight; panel->backlight.setup = lpt_setup_backlight;
panel->backlight.enable = lpt_enable_backlight; panel->backlight.enable = lpt_enable_backlight;
panel->backlight.disable = lpt_disable_backlight; panel->backlight.disable = lpt_disable_backlight;
panel->backlight.set = lpt_set_backlight; panel->backlight.set = lpt_set_backlight;
panel->backlight.get = lpt_get_backlight; panel->backlight.get = lpt_get_backlight;
if (HAS_PCH_LPT(dev)) if (HAS_PCH_LPT(dev_priv))
panel->backlight.hz_to_pwm = lpt_hz_to_pwm; panel->backlight.hz_to_pwm = lpt_hz_to_pwm;
else else
panel->backlight.hz_to_pwm = spt_hz_to_pwm; panel->backlight.hz_to_pwm = spt_hz_to_pwm;
} else if (HAS_PCH_SPLIT(dev)) { } else if (HAS_PCH_SPLIT(dev_priv)) {
panel->backlight.setup = pch_setup_backlight; panel->backlight.setup = pch_setup_backlight;
panel->backlight.enable = pch_enable_backlight; panel->backlight.enable = pch_enable_backlight;
panel->backlight.disable = pch_disable_backlight; panel->backlight.disable = pch_disable_backlight;
panel->backlight.set = pch_set_backlight; panel->backlight.set = pch_set_backlight;
panel->backlight.get = pch_get_backlight; panel->backlight.get = pch_get_backlight;
panel->backlight.hz_to_pwm = pch_hz_to_pwm; panel->backlight.hz_to_pwm = pch_hz_to_pwm;
} else if (IS_VALLEYVIEW(dev)) { } else if (IS_VALLEYVIEW(dev_priv) || IS_CHERRYVIEW(dev_priv)) {
if (dev_priv->vbt.has_mipi) { if (dev_priv->vbt.has_mipi) {
panel->backlight.setup = pwm_setup_backlight; panel->backlight.setup = pwm_setup_backlight;
panel->backlight.enable = pwm_enable_backlight; panel->backlight.enable = pwm_enable_backlight;
@ -1768,7 +1745,7 @@ intel_panel_init_backlight_funcs(struct intel_panel *panel)
panel->backlight.get = vlv_get_backlight; panel->backlight.get = vlv_get_backlight;
panel->backlight.hz_to_pwm = vlv_hz_to_pwm; panel->backlight.hz_to_pwm = vlv_hz_to_pwm;
} }
} else if (IS_GEN4(dev)) { } else if (IS_GEN4(dev_priv)) {
panel->backlight.setup = i965_setup_backlight; panel->backlight.setup = i965_setup_backlight;
panel->backlight.enable = i965_enable_backlight; panel->backlight.enable = i965_enable_backlight;
panel->backlight.disable = i965_disable_backlight; panel->backlight.disable = i965_disable_backlight;
@ -1814,7 +1791,7 @@ void intel_backlight_register(struct drm_device *dev)
{ {
struct intel_connector *connector; struct intel_connector *connector;
list_for_each_entry(connector, &dev->mode_config.connector_list, base.head) for_each_intel_connector(dev, connector)
intel_backlight_device_register(connector); intel_backlight_device_register(connector);
} }
@ -1822,6 +1799,6 @@ void intel_backlight_unregister(struct drm_device *dev)
{ {
struct intel_connector *connector; struct intel_connector *connector;
list_for_each_entry(connector, &dev->mode_config.connector_list, base.head) for_each_intel_connector(dev, connector)
intel_backlight_device_unregister(connector); intel_backlight_device_unregister(connector);
} }

File diff suppressed because it is too large Load Diff

View File

@ -80,7 +80,7 @@ static void intel_psr_write_vsc(struct intel_dp *intel_dp,
struct drm_i915_private *dev_priv = dev->dev_private; struct drm_i915_private *dev_priv = dev->dev_private;
struct intel_crtc *crtc = to_intel_crtc(dig_port->base.base.crtc); struct intel_crtc *crtc = to_intel_crtc(dig_port->base.base.crtc);
enum transcoder cpu_transcoder = crtc->config->cpu_transcoder; enum transcoder cpu_transcoder = crtc->config->cpu_transcoder;
u32 ctl_reg = HSW_TVIDEO_DIP_CTL(cpu_transcoder); i915_reg_t ctl_reg = HSW_TVIDEO_DIP_CTL(cpu_transcoder);
uint32_t *data = (uint32_t *) vsc_psr; uint32_t *data = (uint32_t *) vsc_psr;
unsigned int i; unsigned int i;
@ -151,13 +151,31 @@ static void vlv_psr_enable_sink(struct intel_dp *intel_dp)
DP_PSR_ENABLE | DP_PSR_MAIN_LINK_ACTIVE); DP_PSR_ENABLE | DP_PSR_MAIN_LINK_ACTIVE);
} }
static i915_reg_t psr_aux_ctl_reg(struct drm_i915_private *dev_priv,
enum port port)
{
if (INTEL_INFO(dev_priv)->gen >= 9)
return DP_AUX_CH_CTL(port);
else
return EDP_PSR_AUX_CTL;
}
static i915_reg_t psr_aux_data_reg(struct drm_i915_private *dev_priv,
enum port port, int index)
{
if (INTEL_INFO(dev_priv)->gen >= 9)
return DP_AUX_CH_DATA(port, index);
else
return EDP_PSR_AUX_DATA(index);
}
static void hsw_psr_enable_sink(struct intel_dp *intel_dp) static void hsw_psr_enable_sink(struct intel_dp *intel_dp)
{ {
struct intel_digital_port *dig_port = dp_to_dig_port(intel_dp); struct intel_digital_port *dig_port = dp_to_dig_port(intel_dp);
struct drm_device *dev = dig_port->base.base.dev; struct drm_device *dev = dig_port->base.base.dev;
struct drm_i915_private *dev_priv = dev->dev_private; struct drm_i915_private *dev_priv = dev->dev_private;
uint32_t aux_clock_divider; uint32_t aux_clock_divider;
uint32_t aux_data_reg, aux_ctl_reg; i915_reg_t aux_ctl_reg;
int precharge = 0x3; int precharge = 0x3;
static const uint8_t aux_msg[] = { static const uint8_t aux_msg[] = {
[0] = DP_AUX_NATIVE_WRITE << 4, [0] = DP_AUX_NATIVE_WRITE << 4,
@ -166,29 +184,24 @@ static void hsw_psr_enable_sink(struct intel_dp *intel_dp)
[3] = 1 - 1, [3] = 1 - 1,
[4] = DP_SET_POWER_D0, [4] = DP_SET_POWER_D0,
}; };
enum port port = dig_port->port;
int i; int i;
BUILD_BUG_ON(sizeof(aux_msg) > 20); BUILD_BUG_ON(sizeof(aux_msg) > 20);
aux_clock_divider = intel_dp->get_aux_clock_divider(intel_dp, 0); aux_clock_divider = intel_dp->get_aux_clock_divider(intel_dp, 0);
drm_dp_dpcd_writeb(&intel_dp->aux, DP_PSR_EN_CFG,
DP_PSR_ENABLE & ~DP_PSR_MAIN_LINK_ACTIVE);
/* Enable AUX frame sync at sink */ /* Enable AUX frame sync at sink */
if (dev_priv->psr.aux_frame_sync) if (dev_priv->psr.aux_frame_sync)
drm_dp_dpcd_writeb(&intel_dp->aux, drm_dp_dpcd_writeb(&intel_dp->aux,
DP_SINK_DEVICE_AUX_FRAME_SYNC_CONF, DP_SINK_DEVICE_AUX_FRAME_SYNC_CONF,
DP_AUX_FRAME_SYNC_ENABLE); DP_AUX_FRAME_SYNC_ENABLE);
aux_data_reg = (INTEL_INFO(dev)->gen >= 9) ? aux_ctl_reg = psr_aux_ctl_reg(dev_priv, port);
DPA_AUX_CH_DATA1 : EDP_PSR_AUX_DATA1(dev);
aux_ctl_reg = (INTEL_INFO(dev)->gen >= 9) ?
DPA_AUX_CH_CTL : EDP_PSR_AUX_CTL(dev);
/* Setup AUX registers */ /* Setup AUX registers */
for (i = 0; i < sizeof(aux_msg); i += 4) for (i = 0; i < sizeof(aux_msg); i += 4)
I915_WRITE(aux_data_reg + i, I915_WRITE(psr_aux_data_reg(dev_priv, port, i >> 2),
intel_dp_pack_aux(&aux_msg[i], sizeof(aux_msg) - i)); intel_dp_pack_aux(&aux_msg[i], sizeof(aux_msg) - i));
if (INTEL_INFO(dev)->gen >= 9) { if (INTEL_INFO(dev)->gen >= 9) {
@ -254,37 +267,66 @@ static void hsw_psr_enable_source(struct intel_dp *intel_dp)
struct drm_i915_private *dev_priv = dev->dev_private; struct drm_i915_private *dev_priv = dev->dev_private;
uint32_t max_sleep_time = 0x1f; uint32_t max_sleep_time = 0x1f;
/* Lately it was identified that depending on panel idle frame count /*
* calculated at HW can be off by 1. So let's use what came * Let's respect VBT in case VBT asks a higher idle_frame value.
* from VBT + 1. * Let's use 6 as the minimum to cover all known cases including
* There are also other cases where panel demands at least 4 * the off-by-one issue that HW has in some cases. Also there are
* but VBT is not being set. To cover these 2 cases lets use * cases where sink should be able to train
* at least 5 when VBT isn't set to be on the safest side. * with the 5 or 6 idle patterns.
*/ */
uint32_t idle_frames = dev_priv->vbt.psr.idle_frames ? uint32_t idle_frames = max(6, dev_priv->vbt.psr.idle_frames);
dev_priv->vbt.psr.idle_frames + 1 : 5; uint32_t val = EDP_PSR_ENABLE;
uint32_t val = 0x0;
const uint32_t link_entry_time = EDP_PSR_MIN_LINK_ENTRY_TIME_8_LINES;
if (intel_dp->psr_dpcd[1] & DP_PSR_NO_TRAIN_ON_EXIT) { val |= max_sleep_time << EDP_PSR_MAX_SLEEP_TIME_SHIFT;
/* It doesn't mean we shouldn't send TPS patters, so let's val |= idle_frames << EDP_PSR_IDLE_FRAME_SHIFT;
send the minimal TP1 possible and skip TP2. */
if (IS_HASWELL(dev))
val |= EDP_PSR_MIN_LINK_ENTRY_TIME_8_LINES;
if (dev_priv->vbt.psr.tp1_wakeup_time > 5)
val |= EDP_PSR_TP1_TIME_2500us;
else if (dev_priv->vbt.psr.tp1_wakeup_time > 1)
val |= EDP_PSR_TP1_TIME_500us;
else if (dev_priv->vbt.psr.tp1_wakeup_time > 0)
val |= EDP_PSR_TP1_TIME_100us; val |= EDP_PSR_TP1_TIME_100us;
else
val |= EDP_PSR_TP1_TIME_0us;
if (dev_priv->vbt.psr.tp2_tp3_wakeup_time > 5)
val |= EDP_PSR_TP2_TP3_TIME_2500us;
else if (dev_priv->vbt.psr.tp2_tp3_wakeup_time > 1)
val |= EDP_PSR_TP2_TP3_TIME_500us;
else if (dev_priv->vbt.psr.tp2_tp3_wakeup_time > 0)
val |= EDP_PSR_TP2_TP3_TIME_100us;
else
val |= EDP_PSR_TP2_TP3_TIME_0us; val |= EDP_PSR_TP2_TP3_TIME_0us;
val |= EDP_PSR_SKIP_AUX_EXIT;
/* Sink should be able to train with the 5 or 6 idle patterns */
idle_frames += 4;
}
I915_WRITE(EDP_PSR_CTL(dev), val | if (intel_dp_source_supports_hbr2(intel_dp) &&
(IS_BROADWELL(dev) ? 0 : link_entry_time) | drm_dp_tps3_supported(intel_dp->dpcd))
max_sleep_time << EDP_PSR_MAX_SLEEP_TIME_SHIFT | val |= EDP_PSR_TP1_TP3_SEL;
idle_frames << EDP_PSR_IDLE_FRAME_SHIFT | else
EDP_PSR_ENABLE); val |= EDP_PSR_TP1_TP2_SEL;
if (dev_priv->psr.psr2_support) I915_WRITE(EDP_PSR_CTL, val);
I915_WRITE(EDP_PSR2_CTL, EDP_PSR2_ENABLE |
EDP_SU_TRACK_ENABLE | EDP_PSR2_TP2_TIME_100); if (!dev_priv->psr.psr2_support)
return;
/* FIXME: selective update is probably totally broken because it doesn't
* mesh at all with our frontbuffer tracking. And the hw alone isn't
* good enough. */
val = EDP_PSR2_ENABLE | EDP_SU_TRACK_ENABLE;
if (dev_priv->vbt.psr.tp2_tp3_wakeup_time > 5)
val |= EDP_PSR2_TP2_TIME_2500;
else if (dev_priv->vbt.psr.tp2_tp3_wakeup_time > 1)
val |= EDP_PSR2_TP2_TIME_500;
else if (dev_priv->vbt.psr.tp2_tp3_wakeup_time > 0)
val |= EDP_PSR2_TP2_TIME_100;
else
val |= EDP_PSR2_TP2_TIME_50;
I915_WRITE(EDP_PSR2_CTL, val);
} }
static bool intel_psr_match_conditions(struct intel_dp *intel_dp) static bool intel_psr_match_conditions(struct intel_dp *intel_dp)
@ -324,8 +366,8 @@ static bool intel_psr_match_conditions(struct intel_dp *intel_dp)
return false; return false;
} }
if (!IS_VALLEYVIEW(dev) && ((dev_priv->vbt.psr.full_link) || if (!IS_VALLEYVIEW(dev) && !IS_CHERRYVIEW(dev) &&
(dig_port->port != PORT_A))) { ((dev_priv->vbt.psr.full_link) || (dig_port->port != PORT_A))) {
DRM_DEBUG_KMS("PSR condition failed: Link Standby requested/needed but not supported on this platform\n"); DRM_DEBUG_KMS("PSR condition failed: Link Standby requested/needed but not supported on this platform\n");
return false; return false;
} }
@ -340,7 +382,7 @@ static void intel_psr_activate(struct intel_dp *intel_dp)
struct drm_device *dev = intel_dig_port->base.base.dev; struct drm_device *dev = intel_dig_port->base.base.dev;
struct drm_i915_private *dev_priv = dev->dev_private; struct drm_i915_private *dev_priv = dev->dev_private;
WARN_ON(I915_READ(EDP_PSR_CTL(dev)) & EDP_PSR_ENABLE); WARN_ON(I915_READ(EDP_PSR_CTL) & EDP_PSR_ENABLE);
WARN_ON(dev_priv->psr.active); WARN_ON(dev_priv->psr.active);
lockdep_assert_held(&dev_priv->psr.lock); lockdep_assert_held(&dev_priv->psr.lock);
@ -403,9 +445,14 @@ void intel_psr_enable(struct intel_dp *intel_dp)
skl_psr_setup_su_vsc(intel_dp); skl_psr_setup_su_vsc(intel_dp);
} }
/* Avoid continuous PSR exit by masking memup and hpd */ /*
I915_WRITE(EDP_PSR_DEBUG_CTL(dev), EDP_PSR_DEBUG_MASK_MEMUP | * Per Spec: Avoid continuous PSR exit by masking MEMUP and HPD.
EDP_PSR_DEBUG_MASK_HPD); * Also mask LPSP to avoid dependency on other drivers that
* might block runtime_pm besides preventing other hw tracking
* issues now we can rely on frontbuffer tracking.
*/
I915_WRITE(EDP_PSR_DEBUG_CTL, EDP_PSR_DEBUG_MASK_MEMUP |
EDP_PSR_DEBUG_MASK_HPD | EDP_PSR_DEBUG_MASK_LPSP);
/* Enable PSR on the panel */ /* Enable PSR on the panel */
hsw_psr_enable_sink(intel_dp); hsw_psr_enable_sink(intel_dp);
@ -427,6 +474,19 @@ void intel_psr_enable(struct intel_dp *intel_dp)
vlv_psr_enable_source(intel_dp); vlv_psr_enable_source(intel_dp);
} }
/*
* FIXME: Activation should happen immediately since this function
* is just called after pipe is fully trained and enabled.
* However on every platform we face issues when first activation
* follows a modeset so quickly.
* - On VLV/CHV we get bank screen on first activation
* - On HSW/BDW we get a recoverable frozen screen until next
* exit-activate sequence.
*/
if (INTEL_INFO(dev)->gen < 9)
schedule_delayed_work(&dev_priv->psr.work,
msecs_to_jiffies(intel_dp->panel_power_cycle_delay * 5));
dev_priv->psr.enabled = intel_dp; dev_priv->psr.enabled = intel_dp;
unlock: unlock:
mutex_unlock(&dev_priv->psr.lock); mutex_unlock(&dev_priv->psr.lock);
@ -466,17 +526,17 @@ static void hsw_psr_disable(struct intel_dp *intel_dp)
struct drm_i915_private *dev_priv = dev->dev_private; struct drm_i915_private *dev_priv = dev->dev_private;
if (dev_priv->psr.active) { if (dev_priv->psr.active) {
I915_WRITE(EDP_PSR_CTL(dev), I915_WRITE(EDP_PSR_CTL,
I915_READ(EDP_PSR_CTL(dev)) & ~EDP_PSR_ENABLE); I915_READ(EDP_PSR_CTL) & ~EDP_PSR_ENABLE);
/* Wait till PSR is idle */ /* Wait till PSR is idle */
if (_wait_for((I915_READ(EDP_PSR_STATUS_CTL(dev)) & if (_wait_for((I915_READ(EDP_PSR_STATUS_CTL) &
EDP_PSR_STATUS_STATE_MASK) == 0, 2000, 10)) EDP_PSR_STATUS_STATE_MASK) == 0, 2000, 10))
DRM_ERROR("Timed out waiting for PSR Idle State\n"); DRM_ERROR("Timed out waiting for PSR Idle State\n");
dev_priv->psr.active = false; dev_priv->psr.active = false;
} else { } else {
WARN_ON(I915_READ(EDP_PSR_CTL(dev)) & EDP_PSR_ENABLE); WARN_ON(I915_READ(EDP_PSR_CTL) & EDP_PSR_ENABLE);
} }
} }
@ -498,11 +558,15 @@ void intel_psr_disable(struct intel_dp *intel_dp)
return; return;
} }
/* Disable PSR on Source */
if (HAS_DDI(dev)) if (HAS_DDI(dev))
hsw_psr_disable(intel_dp); hsw_psr_disable(intel_dp);
else else
vlv_psr_disable(intel_dp); vlv_psr_disable(intel_dp);
/* Disable PSR on Sink */
drm_dp_dpcd_writeb(&intel_dp->aux, DP_PSR_EN_CFG, 0);
dev_priv->psr.enabled = NULL; dev_priv->psr.enabled = NULL;
mutex_unlock(&dev_priv->psr.lock); mutex_unlock(&dev_priv->psr.lock);
@ -523,7 +587,7 @@ static void intel_psr_work(struct work_struct *work)
* and be ready for re-enable. * and be ready for re-enable.
*/ */
if (HAS_DDI(dev_priv->dev)) { if (HAS_DDI(dev_priv->dev)) {
if (wait_for((I915_READ(EDP_PSR_STATUS_CTL(dev_priv->dev)) & if (wait_for((I915_READ(EDP_PSR_STATUS_CTL) &
EDP_PSR_STATUS_STATE_MASK) == 0, 50)) { EDP_PSR_STATUS_STATE_MASK) == 0, 50)) {
DRM_ERROR("Timed out waiting for PSR Idle for re-enable\n"); DRM_ERROR("Timed out waiting for PSR Idle for re-enable\n");
return; return;
@ -566,11 +630,11 @@ static void intel_psr_exit(struct drm_device *dev)
return; return;
if (HAS_DDI(dev)) { if (HAS_DDI(dev)) {
val = I915_READ(EDP_PSR_CTL(dev)); val = I915_READ(EDP_PSR_CTL);
WARN_ON(!(val & EDP_PSR_ENABLE)); WARN_ON(!(val & EDP_PSR_ENABLE));
I915_WRITE(EDP_PSR_CTL(dev), val & ~EDP_PSR_ENABLE); I915_WRITE(EDP_PSR_CTL, val & ~EDP_PSR_ENABLE);
} else { } else {
val = I915_READ(VLV_PSRCTL(pipe)); val = I915_READ(VLV_PSRCTL(pipe));
@ -620,7 +684,7 @@ void intel_psr_single_frame_update(struct drm_device *dev,
* Single frame update is already supported on BDW+ but it requires * Single frame update is already supported on BDW+ but it requires
* many W/A and it isn't really needed. * many W/A and it isn't really needed.
*/ */
if (!IS_VALLEYVIEW(dev)) if (!IS_VALLEYVIEW(dev) && !IS_CHERRYVIEW(dev))
return; return;
mutex_lock(&dev_priv->psr.lock); mutex_lock(&dev_priv->psr.lock);
@ -700,7 +764,6 @@ void intel_psr_flush(struct drm_device *dev,
struct drm_i915_private *dev_priv = dev->dev_private; struct drm_i915_private *dev_priv = dev->dev_private;
struct drm_crtc *crtc; struct drm_crtc *crtc;
enum pipe pipe; enum pipe pipe;
int delay_ms = HAS_DDI(dev) ? 100 : 500;
mutex_lock(&dev_priv->psr.lock); mutex_lock(&dev_priv->psr.lock);
if (!dev_priv->psr.enabled) { if (!dev_priv->psr.enabled) {
@ -714,29 +777,14 @@ void intel_psr_flush(struct drm_device *dev,
frontbuffer_bits &= INTEL_FRONTBUFFER_ALL_MASK(pipe); frontbuffer_bits &= INTEL_FRONTBUFFER_ALL_MASK(pipe);
dev_priv->psr.busy_frontbuffer_bits &= ~frontbuffer_bits; dev_priv->psr.busy_frontbuffer_bits &= ~frontbuffer_bits;
if (HAS_DDI(dev)) { /* By definition flush = invalidate + flush */
/*
* By definition every flush should mean invalidate + flush,
* however on core platforms let's minimize the
* disable/re-enable so we can avoid the invalidate when flip
* originated the flush.
*/
if (frontbuffer_bits && origin != ORIGIN_FLIP)
intel_psr_exit(dev);
} else {
/*
* On Valleyview and Cherryview we don't use hardware tracking
* so any plane updates or cursor moves don't result in a PSR
* invalidating. Which means we need to manually fake this in
* software for all flushes.
*/
if (frontbuffer_bits) if (frontbuffer_bits)
intel_psr_exit(dev); intel_psr_exit(dev);
}
if (!dev_priv->psr.active && !dev_priv->psr.busy_frontbuffer_bits) // if (!dev_priv->psr.active && !dev_priv->psr.busy_frontbuffer_bits)
schedule_delayed_work(&dev_priv->psr.work, // if (!work_busy(&dev_priv->psr.work.work))
msecs_to_jiffies(delay_ms)); // schedule_delayed_work(&dev_priv->psr.work,
// msecs_to_jiffies(100));
mutex_unlock(&dev_priv->psr.lock); mutex_unlock(&dev_priv->psr.lock);
} }
@ -751,6 +799,9 @@ void intel_psr_init(struct drm_device *dev)
{ {
struct drm_i915_private *dev_priv = dev->dev_private; struct drm_i915_private *dev_priv = dev->dev_private;
dev_priv->psr_mmio_base = IS_HASWELL(dev_priv) ?
HSW_EDP_PSR_BASE : BDW_EDP_PSR_BASE;
INIT_DELAYED_WORK(&dev_priv->psr.work, intel_psr_work); INIT_DELAYED_WORK(&dev_priv->psr.work, intel_psr_work);
mutex_init(&dev_priv->psr.lock); mutex_init(&dev_priv->psr.lock);
} }

View File

@ -27,29 +27,13 @@
* *
*/ */
#include <linux/log2.h>
#include <drm/drmP.h> #include <drm/drmP.h>
#include "i915_drv.h" #include "i915_drv.h"
#include <drm/i915_drm.h> #include <drm/i915_drm.h>
#include "i915_trace.h" #include "i915_trace.h"
#include "intel_drv.h" #include "intel_drv.h"
bool
intel_ring_initialized(struct intel_engine_cs *ring)
{
struct drm_device *dev = ring->dev;
if (!dev)
return false;
if (i915.enable_execlists) {
struct intel_context *dctx = ring->default_context;
struct intel_ringbuffer *ringbuf = dctx->engine[ring->id].ringbuf;
return ringbuf->obj;
} else
return ring->buffer && ring->buffer->obj;
}
int __intel_ring_space(int head, int tail, int size) int __intel_ring_space(int head, int tail, int size)
{ {
int space = head - tail; int space = head - tail;
@ -483,7 +467,7 @@ static void intel_ring_setup_status_page(struct intel_engine_cs *ring)
{ {
struct drm_device *dev = ring->dev; struct drm_device *dev = ring->dev;
struct drm_i915_private *dev_priv = ring->dev->dev_private; struct drm_i915_private *dev_priv = ring->dev->dev_private;
u32 mmio = 0; i915_reg_t mmio;
/* The ring status page addresses are no longer next to the rest of /* The ring status page addresses are no longer next to the rest of
* the ring registers as of gen7. * the ring registers as of gen7.
@ -526,7 +510,7 @@ static void intel_ring_setup_status_page(struct intel_engine_cs *ring)
* invalidating the TLB? * invalidating the TLB?
*/ */
if (INTEL_INFO(dev)->gen >= 6 && INTEL_INFO(dev)->gen < 8) { if (INTEL_INFO(dev)->gen >= 6 && INTEL_INFO(dev)->gen < 8) {
u32 reg = RING_INSTPM(ring->mmio_base); i915_reg_t reg = RING_INSTPM(ring->mmio_base);
/* ring should be idle before issuing a sync flush*/ /* ring should be idle before issuing a sync flush*/
WARN_ON((I915_READ_MODE(ring) & MODE_IDLE) == 0); WARN_ON((I915_READ_MODE(ring) & MODE_IDLE) == 0);
@ -735,7 +719,7 @@ static int intel_ring_workarounds_emit(struct drm_i915_gem_request *req)
intel_ring_emit(ring, MI_LOAD_REGISTER_IMM(w->count)); intel_ring_emit(ring, MI_LOAD_REGISTER_IMM(w->count));
for (i = 0; i < w->count; i++) { for (i = 0; i < w->count; i++) {
intel_ring_emit(ring, w->reg[i].addr); intel_ring_emit_reg(ring, w->reg[i].addr);
intel_ring_emit(ring, w->reg[i].value); intel_ring_emit(ring, w->reg[i].value);
} }
intel_ring_emit(ring, MI_NOOP); intel_ring_emit(ring, MI_NOOP);
@ -768,7 +752,8 @@ static int intel_rcs_ctx_init(struct drm_i915_gem_request *req)
} }
static int wa_add(struct drm_i915_private *dev_priv, static int wa_add(struct drm_i915_private *dev_priv,
const u32 addr, const u32 mask, const u32 val) i915_reg_t addr,
const u32 mask, const u32 val)
{ {
const u32 idx = dev_priv->workarounds.count; const u32 idx = dev_priv->workarounds.count;
@ -926,17 +911,15 @@ static int gen9_init_workarounds(struct intel_engine_cs *ring)
WA_SET_BIT_MASKED(HALF_SLICE_CHICKEN3, WA_SET_BIT_MASKED(HALF_SLICE_CHICKEN3,
GEN9_DISABLE_OCL_OOB_SUPPRESS_LOGIC); GEN9_DISABLE_OCL_OOB_SUPPRESS_LOGIC);
if ((IS_SKYLAKE(dev) && (INTEL_REVID(dev) == SKL_REVID_A0 ||
INTEL_REVID(dev) == SKL_REVID_B0)) ||
(IS_BROXTON(dev) && INTEL_REVID(dev) < BXT_REVID_B0)) {
/* WaDisableDgMirrorFixInHalfSliceChicken5:skl,bxt */ /* WaDisableDgMirrorFixInHalfSliceChicken5:skl,bxt */
if (IS_SKL_REVID(dev, 0, SKL_REVID_B0) ||
IS_BXT_REVID(dev, 0, BXT_REVID_A1))
WA_CLR_BIT_MASKED(GEN9_HALF_SLICE_CHICKEN5, WA_CLR_BIT_MASKED(GEN9_HALF_SLICE_CHICKEN5,
GEN9_DG_MIRROR_FIX_ENABLE); GEN9_DG_MIRROR_FIX_ENABLE);
}
if ((IS_SKYLAKE(dev) && INTEL_REVID(dev) <= SKL_REVID_B0) ||
(IS_BROXTON(dev) && INTEL_REVID(dev) < BXT_REVID_B0)) {
/* WaSetDisablePixMaskCammingAndRhwoInCommonSliceChicken:skl,bxt */ /* WaSetDisablePixMaskCammingAndRhwoInCommonSliceChicken:skl,bxt */
if (IS_SKL_REVID(dev, 0, SKL_REVID_B0) ||
IS_BXT_REVID(dev, 0, BXT_REVID_A1)) {
WA_SET_BIT_MASKED(GEN7_COMMON_SLICE_CHICKEN1, WA_SET_BIT_MASKED(GEN7_COMMON_SLICE_CHICKEN1,
GEN9_RHWO_OPTIMIZATION_DISABLE); GEN9_RHWO_OPTIMIZATION_DISABLE);
/* /*
@ -946,12 +929,10 @@ static int gen9_init_workarounds(struct intel_engine_cs *ring)
*/ */
} }
if ((IS_SKYLAKE(dev) && INTEL_REVID(dev) >= SKL_REVID_C0) ||
IS_BROXTON(dev)) {
/* WaEnableYV12BugFixInHalfSliceChicken7:skl,bxt */ /* WaEnableYV12BugFixInHalfSliceChicken7:skl,bxt */
if (IS_SKL_REVID(dev, SKL_REVID_C0, REVID_FOREVER) || IS_BROXTON(dev))
WA_SET_BIT_MASKED(GEN9_HALF_SLICE_CHICKEN7, WA_SET_BIT_MASKED(GEN9_HALF_SLICE_CHICKEN7,
GEN9_ENABLE_YV12_BUGFIX); GEN9_ENABLE_YV12_BUGFIX);
}
/* Wa4x4STCOptimizationDisable:skl,bxt */ /* Wa4x4STCOptimizationDisable:skl,bxt */
/* WaDisablePartialResolveInVc:skl,bxt */ /* WaDisablePartialResolveInVc:skl,bxt */
@ -963,24 +944,22 @@ static int gen9_init_workarounds(struct intel_engine_cs *ring)
GEN9_CCS_TLB_PREFETCH_ENABLE); GEN9_CCS_TLB_PREFETCH_ENABLE);
/* WaDisableMaskBasedCammingInRCC:skl,bxt */ /* WaDisableMaskBasedCammingInRCC:skl,bxt */
if ((IS_SKYLAKE(dev) && INTEL_REVID(dev) == SKL_REVID_C0) || if (IS_SKL_REVID(dev, SKL_REVID_C0, SKL_REVID_C0) ||
(IS_BROXTON(dev) && INTEL_REVID(dev) < BXT_REVID_B0)) IS_BXT_REVID(dev, 0, BXT_REVID_A1))
WA_SET_BIT_MASKED(SLICE_ECO_CHICKEN0, WA_SET_BIT_MASKED(SLICE_ECO_CHICKEN0,
PIXEL_MASK_CAMMING_DISABLE); PIXEL_MASK_CAMMING_DISABLE);
/* WaForceContextSaveRestoreNonCoherent:skl,bxt */ /* WaForceContextSaveRestoreNonCoherent:skl,bxt */
tmp = HDC_FORCE_CONTEXT_SAVE_RESTORE_NON_COHERENT; tmp = HDC_FORCE_CONTEXT_SAVE_RESTORE_NON_COHERENT;
if ((IS_SKYLAKE(dev) && INTEL_REVID(dev) == SKL_REVID_F0) || if (IS_SKL_REVID(dev, SKL_REVID_F0, REVID_FOREVER) ||
(IS_BROXTON(dev) && INTEL_REVID(dev) >= BXT_REVID_B0)) IS_BXT_REVID(dev, BXT_REVID_B0, REVID_FOREVER))
tmp |= HDC_FORCE_CSR_NON_COHERENT_OVR_DISABLE; tmp |= HDC_FORCE_CSR_NON_COHERENT_OVR_DISABLE;
WA_SET_BIT_MASKED(HDC_CHICKEN0, tmp); WA_SET_BIT_MASKED(HDC_CHICKEN0, tmp);
/* WaDisableSamplerPowerBypassForSOPingPong:skl,bxt */ /* WaDisableSamplerPowerBypassForSOPingPong:skl,bxt */
if (IS_SKYLAKE(dev) || if (IS_SKYLAKE(dev) || IS_BXT_REVID(dev, 0, BXT_REVID_B0))
(IS_BROXTON(dev) && INTEL_REVID(dev) <= BXT_REVID_B0)) {
WA_SET_BIT_MASKED(HALF_SLICE_CHICKEN3, WA_SET_BIT_MASKED(HALF_SLICE_CHICKEN3,
GEN8_SAMPLER_POWER_BYPASS_DIS); GEN8_SAMPLER_POWER_BYPASS_DIS);
}
/* WaDisableSTUnitPowerOptimization:skl,bxt */ /* WaDisableSTUnitPowerOptimization:skl,bxt */
WA_SET_BIT_MASKED(HALF_SLICE_CHICKEN2, GEN8_ST_PO_DISABLE); WA_SET_BIT_MASKED(HALF_SLICE_CHICKEN2, GEN8_ST_PO_DISABLE);
@ -1002,7 +981,7 @@ static int skl_tune_iz_hashing(struct intel_engine_cs *ring)
* Only consider slices where one, and only one, subslice has 7 * Only consider slices where one, and only one, subslice has 7
* EUs * EUs
*/ */
if (hweight8(dev_priv->info.subslice_7eu[i]) != 1) if (!is_power_of_2(dev_priv->info.subslice_7eu[i]))
continue; continue;
/* /*
@ -1040,11 +1019,7 @@ static int skl_init_workarounds(struct intel_engine_cs *ring)
if (ret) if (ret)
return ret; return ret;
if (INTEL_REVID(dev) <= SKL_REVID_D0) { if (IS_SKL_REVID(dev, 0, SKL_REVID_D0)) {
/* WaDisableHDCInvalidation:skl */
I915_WRITE(GAM_ECOCHK, I915_READ(GAM_ECOCHK) |
BDW_DISABLE_HDC_INVALIDATION);
/* WaDisableChickenBitTSGBarrierAckForFFSliceCS:skl */ /* WaDisableChickenBitTSGBarrierAckForFFSliceCS:skl */
I915_WRITE(FF_SLICE_CS_CHICKEN2, I915_WRITE(FF_SLICE_CS_CHICKEN2,
_MASKED_BIT_ENABLE(GEN9_TSG_BARRIER_ACK_DISABLE)); _MASKED_BIT_ENABLE(GEN9_TSG_BARRIER_ACK_DISABLE));
@ -1053,23 +1028,24 @@ static int skl_init_workarounds(struct intel_engine_cs *ring)
/* GEN8_L3SQCREG4 has a dependency with WA batch so any new changes /* GEN8_L3SQCREG4 has a dependency with WA batch so any new changes
* involving this register should also be added to WA batch as required. * involving this register should also be added to WA batch as required.
*/ */
if (INTEL_REVID(dev) <= SKL_REVID_E0) if (IS_SKL_REVID(dev, 0, SKL_REVID_E0))
/* WaDisableLSQCROPERFforOCL:skl */ /* WaDisableLSQCROPERFforOCL:skl */
I915_WRITE(GEN8_L3SQCREG4, I915_READ(GEN8_L3SQCREG4) | I915_WRITE(GEN8_L3SQCREG4, I915_READ(GEN8_L3SQCREG4) |
GEN8_LQSC_RO_PERF_DIS); GEN8_LQSC_RO_PERF_DIS);
/* WaEnableGapsTsvCreditFix:skl */ /* WaEnableGapsTsvCreditFix:skl */
if (IS_SKYLAKE(dev) && (INTEL_REVID(dev) >= SKL_REVID_C0)) { if (IS_SKL_REVID(dev, SKL_REVID_C0, REVID_FOREVER)) {
I915_WRITE(GEN8_GARBCNTL, (I915_READ(GEN8_GARBCNTL) | I915_WRITE(GEN8_GARBCNTL, (I915_READ(GEN8_GARBCNTL) |
GEN9_GAPS_TSV_CREDIT_DISABLE)); GEN9_GAPS_TSV_CREDIT_DISABLE));
} }
/* WaDisablePowerCompilerClockGating:skl */ /* WaDisablePowerCompilerClockGating:skl */
if (INTEL_REVID(dev) == SKL_REVID_B0) if (IS_SKL_REVID(dev, SKL_REVID_B0, SKL_REVID_B0))
WA_SET_BIT_MASKED(HIZ_CHICKEN, WA_SET_BIT_MASKED(HIZ_CHICKEN,
BDW_HIZ_POWER_COMPILER_CLOCK_GATING_DISABLE); BDW_HIZ_POWER_COMPILER_CLOCK_GATING_DISABLE);
if (INTEL_REVID(dev) <= SKL_REVID_D0) { /* This is tied to WaForceContextSaveRestoreNonCoherent */
if (IS_SKL_REVID(dev, 0, REVID_FOREVER)) {
/* /*
*Use Force Non-Coherent whenever executing a 3D context. This *Use Force Non-Coherent whenever executing a 3D context. This
* is a workaround for a possible hang in the unlikely event * is a workaround for a possible hang in the unlikely event
@ -1078,21 +1054,23 @@ static int skl_init_workarounds(struct intel_engine_cs *ring)
/* WaForceEnableNonCoherent:skl */ /* WaForceEnableNonCoherent:skl */
WA_SET_BIT_MASKED(HDC_CHICKEN0, WA_SET_BIT_MASKED(HDC_CHICKEN0,
HDC_FORCE_NON_COHERENT); HDC_FORCE_NON_COHERENT);
/* WaDisableHDCInvalidation:skl */
I915_WRITE(GAM_ECOCHK, I915_READ(GAM_ECOCHK) |
BDW_DISABLE_HDC_INVALIDATION);
} }
if (INTEL_REVID(dev) == SKL_REVID_C0 ||
INTEL_REVID(dev) == SKL_REVID_D0)
/* WaBarrierPerformanceFixDisable:skl */ /* WaBarrierPerformanceFixDisable:skl */
if (IS_SKL_REVID(dev, SKL_REVID_C0, SKL_REVID_D0))
WA_SET_BIT_MASKED(HDC_CHICKEN0, WA_SET_BIT_MASKED(HDC_CHICKEN0,
HDC_FENCE_DEST_SLM_DISABLE | HDC_FENCE_DEST_SLM_DISABLE |
HDC_BARRIER_PERFORMANCE_DISABLE); HDC_BARRIER_PERFORMANCE_DISABLE);
/* WaDisableSbeCacheDispatchPortSharing:skl */ /* WaDisableSbeCacheDispatchPortSharing:skl */
if (INTEL_REVID(dev) <= SKL_REVID_F0) { if (IS_SKL_REVID(dev, 0, SKL_REVID_F0))
WA_SET_BIT_MASKED( WA_SET_BIT_MASKED(
GEN7_HALF_SLICE_CHICKEN1, GEN7_HALF_SLICE_CHICKEN1,
GEN7_SBE_SS_CACHE_DISPATCH_PORT_SHARING_DISABLE); GEN7_SBE_SS_CACHE_DISPATCH_PORT_SHARING_DISABLE);
}
return skl_tune_iz_hashing(ring); return skl_tune_iz_hashing(ring);
} }
@ -1109,11 +1087,11 @@ static int bxt_init_workarounds(struct intel_engine_cs *ring)
/* WaStoreMultiplePTEenable:bxt */ /* WaStoreMultiplePTEenable:bxt */
/* This is a requirement according to Hardware specification */ /* This is a requirement according to Hardware specification */
if (INTEL_REVID(dev) == BXT_REVID_A0) if (IS_BXT_REVID(dev, 0, BXT_REVID_A1))
I915_WRITE(TILECTL, I915_READ(TILECTL) | TILECTL_TLBPF); I915_WRITE(TILECTL, I915_READ(TILECTL) | TILECTL_TLBPF);
/* WaSetClckGatingDisableMedia:bxt */ /* WaSetClckGatingDisableMedia:bxt */
if (INTEL_REVID(dev) == BXT_REVID_A0) { if (IS_BXT_REVID(dev, 0, BXT_REVID_A1)) {
I915_WRITE(GEN7_MISCCPCTL, (I915_READ(GEN7_MISCCPCTL) & I915_WRITE(GEN7_MISCCPCTL, (I915_READ(GEN7_MISCCPCTL) &
~GEN8_DOP_CLOCK_GATE_MEDIA_ENABLE)); ~GEN8_DOP_CLOCK_GATE_MEDIA_ENABLE));
} }
@ -1123,7 +1101,7 @@ static int bxt_init_workarounds(struct intel_engine_cs *ring)
STALL_DOP_GATING_DISABLE); STALL_DOP_GATING_DISABLE);
/* WaDisableSbeCacheDispatchPortSharing:bxt */ /* WaDisableSbeCacheDispatchPortSharing:bxt */
if (INTEL_REVID(dev) <= BXT_REVID_B0) { if (IS_BXT_REVID(dev, 0, BXT_REVID_B0)) {
WA_SET_BIT_MASKED( WA_SET_BIT_MASKED(
GEN7_HALF_SLICE_CHICKEN1, GEN7_HALF_SLICE_CHICKEN1,
GEN7_SBE_SS_CACHE_DISPATCH_PORT_SHARING_DISABLE); GEN7_SBE_SS_CACHE_DISPATCH_PORT_SHARING_DISABLE);
@ -1321,11 +1299,13 @@ static int gen6_signal(struct drm_i915_gem_request *signaller_req,
return ret; return ret;
for_each_ring(useless, dev_priv, i) { for_each_ring(useless, dev_priv, i) {
u32 mbox_reg = signaller->semaphore.mbox.signal[i]; i915_reg_t mbox_reg = signaller->semaphore.mbox.signal[i];
if (mbox_reg != GEN6_NOSYNC) {
if (i915_mmio_reg_valid(mbox_reg)) {
u32 seqno = i915_gem_request_get_seqno(signaller_req); u32 seqno = i915_gem_request_get_seqno(signaller_req);
intel_ring_emit(signaller, MI_LOAD_REGISTER_IMM(1)); intel_ring_emit(signaller, MI_LOAD_REGISTER_IMM(1));
intel_ring_emit(signaller, mbox_reg); intel_ring_emit_reg(signaller, mbox_reg);
intel_ring_emit(signaller, seqno); intel_ring_emit(signaller, seqno);
} }
} }
@ -2027,6 +2007,8 @@ int intel_pin_and_map_ringbuffer_obj(struct drm_device *dev,
{ {
struct drm_i915_private *dev_priv = to_i915(dev); struct drm_i915_private *dev_priv = to_i915(dev);
struct drm_i915_gem_object *obj = ringbuf->obj; struct drm_i915_gem_object *obj = ringbuf->obj;
/* Ring wraparound at offset 0 sometimes hangs. No idea why. */
unsigned flags = PIN_OFFSET_BIAS | 4096;
int ret; int ret;
ret = i915_gem_obj_ggtt_pin(obj, PAGE_SIZE, PIN_MAPPABLE); ret = i915_gem_obj_ggtt_pin(obj, PAGE_SIZE, PIN_MAPPABLE);
@ -2083,10 +2065,14 @@ intel_engine_create_ringbuffer(struct intel_engine_cs *engine, int size)
int ret; int ret;
ring = kzalloc(sizeof(*ring), GFP_KERNEL); ring = kzalloc(sizeof(*ring), GFP_KERNEL);
if (ring == NULL) if (ring == NULL) {
DRM_DEBUG_DRIVER("Failed to allocate ringbuffer %s\n",
engine->name);
return ERR_PTR(-ENOMEM); return ERR_PTR(-ENOMEM);
}
ring->ring = engine; ring->ring = engine;
list_add(&ring->link, &engine->buffers);
ring->size = size; ring->size = size;
/* Workaround an erratum on the i830 which causes a hang if /* Workaround an erratum on the i830 which causes a hang if
@ -2102,8 +2088,9 @@ intel_engine_create_ringbuffer(struct intel_engine_cs *engine, int size)
ret = intel_alloc_ringbuffer_obj(engine->dev, ring); ret = intel_alloc_ringbuffer_obj(engine->dev, ring);
if (ret) { if (ret) {
DRM_ERROR("Failed to allocate ringbuffer %s: %d\n", DRM_DEBUG_DRIVER("Failed to allocate ringbuffer %s: %d\n",
engine->name, ret); engine->name, ret);
list_del(&ring->link);
kfree(ring); kfree(ring);
return ERR_PTR(ret); return ERR_PTR(ret);
} }
@ -2115,6 +2102,7 @@ void
intel_ringbuffer_free(struct intel_ringbuffer *ring) intel_ringbuffer_free(struct intel_ringbuffer *ring)
{ {
intel_destroy_ringbuffer_obj(ring); intel_destroy_ringbuffer_obj(ring);
list_del(&ring->link);
kfree(ring); kfree(ring);
} }
@ -2130,14 +2118,17 @@ static int intel_init_ring_buffer(struct drm_device *dev,
INIT_LIST_HEAD(&ring->active_list); INIT_LIST_HEAD(&ring->active_list);
INIT_LIST_HEAD(&ring->request_list); INIT_LIST_HEAD(&ring->request_list);
INIT_LIST_HEAD(&ring->execlist_queue); INIT_LIST_HEAD(&ring->execlist_queue);
INIT_LIST_HEAD(&ring->buffers);
i915_gem_batch_pool_init(dev, &ring->batch_pool); i915_gem_batch_pool_init(dev, &ring->batch_pool);
memset(ring->semaphore.sync_seqno, 0, sizeof(ring->semaphore.sync_seqno)); memset(ring->semaphore.sync_seqno, 0, sizeof(ring->semaphore.sync_seqno));
init_waitqueue_head(&ring->irq_queue); init_waitqueue_head(&ring->irq_queue);
ringbuf = intel_engine_create_ringbuffer(ring, 32 * PAGE_SIZE); ringbuf = intel_engine_create_ringbuffer(ring, 32 * PAGE_SIZE);
if (IS_ERR(ringbuf)) if (IS_ERR(ringbuf)) {
return PTR_ERR(ringbuf); ret = PTR_ERR(ringbuf);
goto error;
}
ring->buffer = ringbuf; ring->buffer = ringbuf;
if (I915_NEED_GFX_HWS(dev)) { if (I915_NEED_GFX_HWS(dev)) {
@ -2166,8 +2157,7 @@ static int intel_init_ring_buffer(struct drm_device *dev,
return 0; return 0;
error: error:
intel_ringbuffer_free(ringbuf); intel_cleanup_ring_buffer(ring);
ring->buffer = NULL;
return ret; return ret;
} }
@ -2180,12 +2170,14 @@ void intel_cleanup_ring_buffer(struct intel_engine_cs *ring)
dev_priv = to_i915(ring->dev); dev_priv = to_i915(ring->dev);
if (ring->buffer) {
intel_stop_ring_buffer(ring); intel_stop_ring_buffer(ring);
WARN_ON(!IS_GEN2(ring->dev) && (I915_READ_MODE(ring) & MODE_IDLE) == 0); WARN_ON(!IS_GEN2(ring->dev) && (I915_READ_MODE(ring) & MODE_IDLE) == 0);
intel_unpin_ringbuffer_obj(ring->buffer); intel_unpin_ringbuffer_obj(ring->buffer);
intel_ringbuffer_free(ring->buffer); intel_ringbuffer_free(ring->buffer);
ring->buffer = NULL; ring->buffer = NULL;
}
if (ring->cleanup) if (ring->cleanup)
ring->cleanup(ring); ring->cleanup(ring);
@ -2199,6 +2191,7 @@ void intel_cleanup_ring_buffer(struct intel_engine_cs *ring)
i915_cmd_parser_fini_ring(ring); i915_cmd_parser_fini_ring(ring);
i915_gem_batch_pool_fini(&ring->batch_pool); i915_gem_batch_pool_fini(&ring->batch_pool);
ring->dev = NULL;
} }
static int ring_wait_for_space(struct intel_engine_cs *ring, int n) static int ring_wait_for_space(struct intel_engine_cs *ring, int n)

View File

@ -100,6 +100,7 @@ struct intel_ringbuffer {
void __iomem *virtual_start; void __iomem *virtual_start;
struct intel_engine_cs *ring; struct intel_engine_cs *ring;
struct list_head link;
u32 head; u32 head;
u32 tail; u32 tail;
@ -157,6 +158,7 @@ struct intel_engine_cs {
u32 mmio_base; u32 mmio_base;
struct drm_device *dev; struct drm_device *dev;
struct intel_ringbuffer *buffer; struct intel_ringbuffer *buffer;
struct list_head buffers;
/* /*
* A pool of objects to use as shadow copies of client batch buffers * A pool of objects to use as shadow copies of client batch buffers
@ -247,7 +249,7 @@ struct intel_engine_cs {
/* our mbox written by others */ /* our mbox written by others */
u32 wait[I915_NUM_RINGS]; u32 wait[I915_NUM_RINGS];
/* mboxes this ring signals to */ /* mboxes this ring signals to */
u32 signal[I915_NUM_RINGS]; i915_reg_t signal[I915_NUM_RINGS];
} mbox; } mbox;
u64 signal_ggtt[I915_NUM_RINGS]; u64 signal_ggtt[I915_NUM_RINGS];
}; };
@ -348,7 +350,11 @@ struct intel_engine_cs {
u32 (*get_cmd_length_mask)(u32 cmd_header); u32 (*get_cmd_length_mask)(u32 cmd_header);
}; };
bool intel_ring_initialized(struct intel_engine_cs *ring); static inline bool
intel_ring_initialized(struct intel_engine_cs *ring)
{
return ring->dev != NULL;
}
static inline unsigned static inline unsigned
intel_ring_flag(struct intel_engine_cs *ring) intel_ring_flag(struct intel_engine_cs *ring)
@ -441,6 +447,11 @@ static inline void intel_ring_emit(struct intel_engine_cs *ring,
iowrite32(data, ringbuf->virtual_start + ringbuf->tail); iowrite32(data, ringbuf->virtual_start + ringbuf->tail);
ringbuf->tail += 4; ringbuf->tail += 4;
} }
static inline void intel_ring_emit_reg(struct intel_engine_cs *ring,
i915_reg_t reg)
{
intel_ring_emit(ring, i915_mmio_reg_offset(reg));
}
static inline void intel_ring_advance(struct intel_engine_cs *ring) static inline void intel_ring_advance(struct intel_engine_cs *ring)
{ {
struct intel_ringbuffer *ringbuf = ring->buffer; struct intel_ringbuffer *ringbuf = ring->buffer;

File diff suppressed because it is too large Load Diff

View File

@ -74,7 +74,7 @@ struct intel_sdvo {
struct i2c_adapter ddc; struct i2c_adapter ddc;
/* Register for the SDVO device: SDVOB or SDVOC */ /* Register for the SDVO device: SDVOB or SDVOC */
uint32_t sdvo_reg; i915_reg_t sdvo_reg;
/* Active outputs controlled by this SDVO output */ /* Active outputs controlled by this SDVO output */
uint16_t controlled_output; uint16_t controlled_output;
@ -120,8 +120,7 @@ struct intel_sdvo {
*/ */
bool is_tv; bool is_tv;
/* On different gens SDVOB is at different places. */ enum port port;
bool is_sdvob;
/* This is for current tv format name */ /* This is for current tv format name */
int tv_format_index; int tv_format_index;
@ -245,7 +244,7 @@ static void intel_sdvo_write_sdvox(struct intel_sdvo *intel_sdvo, u32 val)
u32 bval = val, cval = val; u32 bval = val, cval = val;
int i; int i;
if (intel_sdvo->sdvo_reg == PCH_SDVOB) { if (HAS_PCH_SPLIT(dev_priv)) {
I915_WRITE(intel_sdvo->sdvo_reg, val); I915_WRITE(intel_sdvo->sdvo_reg, val);
POSTING_READ(intel_sdvo->sdvo_reg); POSTING_READ(intel_sdvo->sdvo_reg);
/* /*
@ -259,7 +258,7 @@ static void intel_sdvo_write_sdvox(struct intel_sdvo *intel_sdvo, u32 val)
return; return;
} }
if (intel_sdvo->sdvo_reg == GEN3_SDVOB) if (intel_sdvo->port == PORT_B)
cval = I915_READ(GEN3_SDVOC); cval = I915_READ(GEN3_SDVOC);
else else
bval = I915_READ(GEN3_SDVOB); bval = I915_READ(GEN3_SDVOB);
@ -422,7 +421,7 @@ static const struct _sdvo_cmd_name {
SDVO_CMD_NAME_ENTRY(SDVO_CMD_GET_HBUF_DATA), SDVO_CMD_NAME_ENTRY(SDVO_CMD_GET_HBUF_DATA),
}; };
#define SDVO_NAME(svdo) ((svdo)->is_sdvob ? "SDVOB" : "SDVOC") #define SDVO_NAME(svdo) ((svdo)->port == PORT_B ? "SDVOB" : "SDVOC")
static void intel_sdvo_debug_write(struct intel_sdvo *intel_sdvo, u8 cmd, static void intel_sdvo_debug_write(struct intel_sdvo *intel_sdvo, u8 cmd,
const void *args, int args_len) const void *args, int args_len)
@ -1282,14 +1281,10 @@ static void intel_sdvo_pre_enable(struct intel_encoder *intel_encoder)
sdvox |= SDVO_BORDER_ENABLE; sdvox |= SDVO_BORDER_ENABLE;
} else { } else {
sdvox = I915_READ(intel_sdvo->sdvo_reg); sdvox = I915_READ(intel_sdvo->sdvo_reg);
switch (intel_sdvo->sdvo_reg) { if (intel_sdvo->port == PORT_B)
case GEN3_SDVOB:
sdvox &= SDVOB_PRESERVE_MASK; sdvox &= SDVOB_PRESERVE_MASK;
break; else
case GEN3_SDVOC:
sdvox &= SDVOC_PRESERVE_MASK; sdvox &= SDVOC_PRESERVE_MASK;
break;
}
sdvox |= (9 << 19) | SDVO_BORDER_ENABLE; sdvox |= (9 << 19) | SDVO_BORDER_ENABLE;
} }
@ -1464,12 +1459,23 @@ static void intel_disable_sdvo(struct intel_encoder *encoder)
* matching DP port to be enabled on transcoder A. * matching DP port to be enabled on transcoder A.
*/ */
if (HAS_PCH_IBX(dev_priv) && crtc->pipe == PIPE_B) { if (HAS_PCH_IBX(dev_priv) && crtc->pipe == PIPE_B) {
/*
* We get CPU/PCH FIFO underruns on the other pipe when
* doing the workaround. Sweep them under the rug.
*/
intel_set_cpu_fifo_underrun_reporting(dev_priv, PIPE_A, false);
intel_set_pch_fifo_underrun_reporting(dev_priv, PIPE_A, false);
temp &= ~SDVO_PIPE_B_SELECT; temp &= ~SDVO_PIPE_B_SELECT;
temp |= SDVO_ENABLE; temp |= SDVO_ENABLE;
intel_sdvo_write_sdvox(intel_sdvo, temp); intel_sdvo_write_sdvox(intel_sdvo, temp);
temp &= ~SDVO_ENABLE; temp &= ~SDVO_ENABLE;
intel_sdvo_write_sdvox(intel_sdvo, temp); intel_sdvo_write_sdvox(intel_sdvo, temp);
intel_wait_for_vblank_if_active(dev_priv->dev, PIPE_A);
intel_set_cpu_fifo_underrun_reporting(dev_priv, PIPE_A, true);
intel_set_pch_fifo_underrun_reporting(dev_priv, PIPE_A, true);
} }
} }
@ -2251,7 +2257,7 @@ intel_sdvo_select_ddc_bus(struct drm_i915_private *dev_priv,
{ {
struct sdvo_device_mapping *mapping; struct sdvo_device_mapping *mapping;
if (sdvo->is_sdvob) if (sdvo->port == PORT_B)
mapping = &(dev_priv->sdvo_mappings[0]); mapping = &(dev_priv->sdvo_mappings[0]);
else else
mapping = &(dev_priv->sdvo_mappings[1]); mapping = &(dev_priv->sdvo_mappings[1]);
@ -2269,7 +2275,7 @@ intel_sdvo_select_i2c_bus(struct drm_i915_private *dev_priv,
struct sdvo_device_mapping *mapping; struct sdvo_device_mapping *mapping;
u8 pin; u8 pin;
if (sdvo->is_sdvob) if (sdvo->port == PORT_B)
mapping = &dev_priv->sdvo_mappings[0]; mapping = &dev_priv->sdvo_mappings[0];
else else
mapping = &dev_priv->sdvo_mappings[1]; mapping = &dev_priv->sdvo_mappings[1];
@ -2307,7 +2313,7 @@ intel_sdvo_get_slave_addr(struct drm_device *dev, struct intel_sdvo *sdvo)
struct drm_i915_private *dev_priv = dev->dev_private; struct drm_i915_private *dev_priv = dev->dev_private;
struct sdvo_device_mapping *my_mapping, *other_mapping; struct sdvo_device_mapping *my_mapping, *other_mapping;
if (sdvo->is_sdvob) { if (sdvo->port == PORT_B) {
my_mapping = &dev_priv->sdvo_mappings[0]; my_mapping = &dev_priv->sdvo_mappings[0];
other_mapping = &dev_priv->sdvo_mappings[1]; other_mapping = &dev_priv->sdvo_mappings[1];
} else { } else {
@ -2332,7 +2338,7 @@ intel_sdvo_get_slave_addr(struct drm_device *dev, struct intel_sdvo *sdvo)
/* No SDVO device info is found for another DVO port, /* No SDVO device info is found for another DVO port,
* so use mapping assumption we had before BIOS parsing. * so use mapping assumption we had before BIOS parsing.
*/ */
if (sdvo->is_sdvob) if (sdvo->port == PORT_B)
return 0x70; return 0x70;
else else
return 0x72; return 0x72;
@ -2939,18 +2945,31 @@ intel_sdvo_init_ddc_proxy(struct intel_sdvo *sdvo,
return i2c_add_adapter(&sdvo->ddc) == 0; return i2c_add_adapter(&sdvo->ddc) == 0;
} }
bool intel_sdvo_init(struct drm_device *dev, uint32_t sdvo_reg, bool is_sdvob) static void assert_sdvo_port_valid(const struct drm_i915_private *dev_priv,
enum port port)
{
if (HAS_PCH_SPLIT(dev_priv))
WARN_ON(port != PORT_B);
else
WARN_ON(port != PORT_B && port != PORT_C);
}
bool intel_sdvo_init(struct drm_device *dev,
i915_reg_t sdvo_reg, enum port port)
{ {
struct drm_i915_private *dev_priv = dev->dev_private; struct drm_i915_private *dev_priv = dev->dev_private;
struct intel_encoder *intel_encoder; struct intel_encoder *intel_encoder;
struct intel_sdvo *intel_sdvo; struct intel_sdvo *intel_sdvo;
int i; int i;
assert_sdvo_port_valid(dev_priv, port);
intel_sdvo = kzalloc(sizeof(*intel_sdvo), GFP_KERNEL); intel_sdvo = kzalloc(sizeof(*intel_sdvo), GFP_KERNEL);
if (!intel_sdvo) if (!intel_sdvo)
return false; return false;
intel_sdvo->sdvo_reg = sdvo_reg; intel_sdvo->sdvo_reg = sdvo_reg;
intel_sdvo->is_sdvob = is_sdvob; intel_sdvo->port = port;
intel_sdvo->slave_addr = intel_sdvo_get_slave_addr(dev, intel_sdvo) >> 1; intel_sdvo->slave_addr = intel_sdvo_get_slave_addr(dev, intel_sdvo) >> 1;
intel_sdvo_select_i2c_bus(dev_priv, intel_sdvo); intel_sdvo_select_i2c_bus(dev_priv, intel_sdvo);
if (!intel_sdvo_init_ddc_proxy(intel_sdvo, dev)) if (!intel_sdvo_init_ddc_proxy(intel_sdvo, dev))
@ -2959,7 +2978,8 @@ bool intel_sdvo_init(struct drm_device *dev, uint32_t sdvo_reg, bool is_sdvob)
/* encoder type will be decided later */ /* encoder type will be decided later */
intel_encoder = &intel_sdvo->base; intel_encoder = &intel_sdvo->base;
intel_encoder->type = INTEL_OUTPUT_SDVO; intel_encoder->type = INTEL_OUTPUT_SDVO;
drm_encoder_init(dev, &intel_encoder->base, &intel_sdvo_enc_funcs, 0); drm_encoder_init(dev, &intel_encoder->base, &intel_sdvo_enc_funcs, 0,
NULL);
/* Read the regs to test if we can talk to the device */ /* Read the regs to test if we can talk to the device */
for (i = 0; i < 0x40; i++) { for (i = 0; i < 0x40; i++) {
@ -3000,8 +3020,10 @@ bool intel_sdvo_init(struct drm_device *dev, uint32_t sdvo_reg, bool is_sdvob)
* hotplug lines. * hotplug lines.
*/ */
if (intel_sdvo->hotplug_active) { if (intel_sdvo->hotplug_active) {
intel_encoder->hpd_pin = if (intel_sdvo->port == PORT_B)
intel_sdvo->is_sdvob ? HPD_SDVO_B : HPD_SDVO_C; intel_encoder->hpd_pin = HPD_SDVO_B;
else
intel_encoder->hpd_pin = HPD_SDVO_C;
} }
/* /*

Some files were not shown because too many files have changed in this diff Show More